diff --git a/.mailmap b/.mailmap index a897c16d3baef..be60c13d2ee15 100644 --- a/.mailmap +++ b/.mailmap @@ -88,7 +88,6 @@ Antonio Quartulli Antonio Quartulli Antonio Quartulli Antonio Quartulli -Antonio Quartulli Antonio Quartulli Anup Patel Archit Taneja @@ -282,6 +281,7 @@ Henrik Rydberg Herbert Xu Huacai Chen Huacai Chen +Ike Panhc J. Bruce Fields J. Bruce Fields Jacob Shin @@ -522,6 +522,7 @@ Nadav Amit Nadia Yvette Chambers William Lee Irwin III Naoya Horiguchi Naoya Horiguchi +Natalie Vock Nathan Chancellor Naveen N Rao Naveen N Rao @@ -613,6 +614,8 @@ Richard Leitner Richard Leitner Robert Foss Rocky Liao +Rodrigo Siqueira +Rodrigo Siqueira Roman Gushchin Roman Gushchin Roman Gushchin @@ -689,6 +692,7 @@ Subbaraman Narayanamurthy Subhash Jadavani Sudarshan Rajagopalan Sudeep Holla Sudeep KarkadaNagesha +Sumit Garg Sumit Semwal Surabhi Vishnoi Sven Eckelmann diff --git a/CREDITS b/CREDITS index 53d11a46fd698..d71d42c30044b 100644 --- a/CREDITS +++ b/CREDITS @@ -3233,6 +3233,10 @@ N: Rui Prior E: rprior@inescn.pt D: ATM device driver for NICStAR based cards +N: Roopa Prabhu +E: roopa@nvidia.com +D: Bridge co-maintainer, vxlan and networking contributor + N: Stefan Probst E: sp@caldera.de D: The Linux Support Team Erlangen, 1993-97 diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net index ebf21beba846d..15d7d36a8294b 100644 --- a/Documentation/ABI/testing/sysfs-class-net +++ b/Documentation/ABI/testing/sysfs-class-net @@ -343,7 +343,7 @@ Date: Jan 2021 KernelVersion: 5.12 Contact: netdev@vger.kernel.org Description: - Boolean value to control the threaded mode per device. User could + Integer value to control the threaded mode per device. User could set this value to enable/disable threaded mode for all napi belonging to this device, without the need to do device up/down. @@ -351,4 +351,5 @@ Description: == ================================== 0 threaded mode disabled for this dev 1 threaded mode enabled for this dev + 2 threaded mode enabled, and busy polling enabled. == ================================== diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst index eb94526689091..b557cf1c820d2 100644 --- a/Documentation/admin-guide/README.rst +++ b/Documentation/admin-guide/README.rst @@ -176,7 +176,7 @@ Configuring the kernel values without prompting. "make defconfig" Create a ./.config file by using the default - symbol values from either arch/$ARCH/defconfig + symbol values from either arch/$ARCH/configs/defconfig or arch/$ARCH/configs/${PLATFORM}_defconfig, depending on the architecture. diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index a43b78b4b6464..dd49a89a62d35 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -212,6 +212,17 @@ pid>/``). This value defaults to 0. +core_sort_vma +============= + +The default coredump writes VMAs in address order. By setting +``core_sort_vma`` to 1, VMAs will be written from smallest size +to largest size. This is known to break at least elfutils, but +can be handy when dealing with very large (and truncated) +coredumps where the more useful debugging details are included +in the smaller VMAs. + + core_uses_pid ============= diff --git a/Documentation/arch/powerpc/cxl.rst b/Documentation/arch/powerpc/cxl.rst index d2d77057610e4..778adda740d24 100644 --- a/Documentation/arch/powerpc/cxl.rst +++ b/Documentation/arch/powerpc/cxl.rst @@ -18,6 +18,7 @@ Introduction both access system memory directly and with the same effective addresses. + **This driver is deprecated and will be removed in a future release.** Hardware overview ================= @@ -453,7 +454,7 @@ Sysfs Class A cxl sysfs class is added under /sys/class/cxl to facilitate enumeration and tuning of the accelerators. Its layout is - described in Documentation/ABI/testing/sysfs-class-cxl + described in Documentation/ABI/obsolete/sysfs-class-cxl Udev rules diff --git a/Documentation/arch/s390/driver-model.rst b/Documentation/arch/s390/driver-model.rst index ad4bc2dbea435..ad18f129fb0bc 100644 --- a/Documentation/arch/s390/driver-model.rst +++ b/Documentation/arch/s390/driver-model.rst @@ -244,7 +244,7 @@ information about the interrupt from the irb parameter. -------------------- The ccwgroup mechanism is designed to handle devices consisting of multiple ccw -devices, like lcs or ctc. +devices, like qeth or ctc. The ccw driver provides a 'group' attribute. Piping bus ids of ccw devices to this attributes creates a ccwgroup device consisting of these ccw devices (if diff --git a/Documentation/arch/x86/sva.rst b/Documentation/arch/x86/sva.rst index 33cb050059820..6a759984d4712 100644 --- a/Documentation/arch/x86/sva.rst +++ b/Documentation/arch/x86/sva.rst @@ -25,7 +25,7 @@ to cache translations for virtual addresses. The IOMMU driver uses the mmu_notifier() support to keep the device TLB cache and the CPU cache in sync. When an ATS lookup fails for a virtual address, the device should use the PRI in order to request the virtual address to be paged into the -CPU page tables. The device must use ATS again in order the fetch the +CPU page tables. The device must use ATS again in order to fetch the translation before use. Shared Hardware Workqueues @@ -216,7 +216,7 @@ submitting work and processing completions. Single Root I/O Virtualization (SR-IOV) focuses on providing independent hardware interfaces for virtualizing hardware. Hence, it's required to be -almost fully functional interface to software supporting the traditional +an almost fully functional interface to software supporting the traditional BARs, space for interrupts via MSI-X, its own register layout. Virtual Functions (VFs) are assisted by the Physical Function (PF) driver. diff --git a/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml b/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml index 932f981265ccb..52016a141227b 100644 --- a/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml +++ b/Documentation/devicetree/bindings/arm/rockchip/pmu.yaml @@ -53,11 +53,17 @@ properties: reg: maxItems: 1 + power-controller: + type: object + + reboot-mode: + type: object + required: - compatible - reg -additionalProperties: true +additionalProperties: false examples: - | diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml index ab5881d0d017f..52d3f1ce33678 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7606.yaml @@ -146,6 +146,7 @@ properties: maxItems: 2 pwm-names: + minItems: 1 items: - const: convst1 - const: convst2 diff --git a/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml index e24cbd9609933..bd8ede3a4ad89 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml +++ b/Documentation/devicetree/bindings/input/touchscreen/imagis,ist3038c.yaml @@ -19,6 +19,7 @@ properties: - imagis,ist3038 - imagis,ist3038b - imagis,ist3038c + - imagis,ist3038h reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/mtd/cdns,hp-nfc.yaml b/Documentation/devicetree/bindings/mtd/cdns,hp-nfc.yaml index 0bed37a994c38..e1f4d7c35a885 100644 --- a/Documentation/devicetree/bindings/mtd/cdns,hp-nfc.yaml +++ b/Documentation/devicetree/bindings/mtd/cdns,hp-nfc.yaml @@ -33,6 +33,10 @@ properties: clocks: maxItems: 1 + clock-names: + items: + - const: nf_clk + dmas: maxItems: 1 @@ -51,6 +55,7 @@ required: - reg-names - interrupts - clocks + - clock-names unevaluatedProperties: false @@ -66,7 +71,8 @@ examples: #address-cells = <1>; #size-cells = <0>; interrupts = ; - clocks = <&nf_clk>; + clocks = <&clk>; + clock-names = "nf_clk"; cdns,board-delay-ps = <4830>; nand@0 { diff --git a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml index c578637c5826d..0fdd112654177 100644 --- a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml +++ b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml @@ -63,6 +63,14 @@ properties: "#size-cells": const: 0 + airoha,npu: + $ref: /schemas/types.yaml#/definitions/phandle + description: + Phandle to the node used to configure the NPU module. + The Airoha Network Processor Unit (NPU) provides a configuration + interface to implement hardware flow offloading programming Packet + Processor Engine (PPE) flow table. + patternProperties: "^ethernet@[1-4]$": type: object @@ -132,6 +140,8 @@ examples: , ; + airoha,npu = <&npu>; + #address-cells = <1>; #size-cells = <0>; diff --git a/Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml b/Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml new file mode 100644 index 0000000000000..76dd97c3fb400 --- /dev/null +++ b/Documentation/devicetree/bindings/net/airoha,en7581-npu.yaml @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/airoha,en7581-npu.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Airoha Network Processor Unit for EN7581 SoC + +maintainers: + - Lorenzo Bianconi + +description: + The Airoha Network Processor Unit (NPU) provides a configuration interface + to implement wired and wireless hardware flow offloading programming Packet + Processor Engine (PPE) flow table. + +properties: + compatible: + enum: + - airoha,en7581-npu + + reg: + maxItems: 1 + + interrupts: + items: + - description: mbox host irq line + - description: watchdog0 irq line + - description: watchdog1 irq line + - description: watchdog2 irq line + - description: watchdog3 irq line + - description: watchdog4 irq line + - description: watchdog5 irq line + - description: watchdog6 irq line + - description: watchdog7 irq line + - description: wlan irq line0 + - description: wlan irq line1 + - description: wlan irq line2 + - description: wlan irq line3 + - description: wlan irq line4 + - description: wlan irq line5 + + memory-region: + maxItems: 1 + description: + Memory used to store NPU firmware binary. + +required: + - compatible + - reg + - interrupts + - memory-region + +additionalProperties: false + +examples: + - | + #include + #include + soc { + #address-cells = <2>; + #size-cells = <2>; + + npu@1e900000 { + compatible = "airoha,en7581-npu"; + reg = <0 0x1e900000 0 0x313000>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + , + ; + memory-region = <&npu_binary>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml index 798a4c19f18cf..0cd78d71768c1 100644 --- a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml @@ -152,6 +152,12 @@ properties: The second range is is for the Amlogic specific configuration (for example the PRG_ETHERNET register range on Meson8b and newer) + interrupts: + maxItems: 1 + + interrupt-names: + const: macirq + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml index 0a2d7baf5db32..d02e9dd847eff 100644 --- a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml +++ b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml @@ -17,6 +17,9 @@ description: maintainers: - Neeraj Sanjay Kale +allOf: + - $ref: bluetooth-controller.yaml# + properties: compatible: enum: @@ -40,10 +43,20 @@ properties: Host-To-Chip power save mechanism is driven by this GPIO connected to BT_WAKE_IN pin of the NXP chipset. + nxp,wakein-pin: + $ref: /schemas/types.yaml#/definitions/uint8 + description: + The GPIO number of the NXP chipset used for BT_WAKE_IN. + + nxp,wakeout-pin: + $ref: /schemas/types.yaml#/definitions/uint8 + description: + The GPIO number of the NXP chipset used for BT_WAKE_OUT. + required: - compatible -additionalProperties: false +unevaluatedProperties: false examples: - | @@ -54,5 +67,8 @@ examples: fw-init-baudrate = <3000000>; firmware-name = "uartuart8987_bt_v0.bin"; device-wakeup-gpios = <&gpio 11 GPIO_ACTIVE_HIGH>; + nxp,wakein-pin = /bits/ 8 <18>; + nxp,wakeout-pin = /bits/ 8 <19>; + local-bd-address = [66 55 44 33 22 11]; }; }; diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml index a72152f7e29b4..6353a336f382e 100644 --- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml +++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml @@ -19,6 +19,7 @@ properties: - qcom,qca2066-bt - qcom,qca6174-bt - qcom,qca9377-bt + - qcom,wcn3950-bt - qcom,wcn3988-bt - qcom,wcn3990-bt - qcom,wcn3991-bt @@ -138,6 +139,7 @@ allOf: compatible: contains: enum: + - qcom,wcn3950-bt - qcom,wcn3988-bt - qcom,wcn3990-bt - qcom,wcn3991-bt diff --git a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml index 97dd1a7c5ed26..f81d56f7c12a5 100644 --- a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml +++ b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml @@ -10,9 +10,6 @@ title: maintainers: - Marc Kleine-Budde -allOf: - - $ref: can-controller.yaml# - properties: compatible: oneOf: @@ -28,6 +25,7 @@ properties: - fsl,vf610-flexcan - fsl,ls1021ar2-flexcan - fsl,lx2160ar1-flexcan + - nxp,s32g2-flexcan - items: - enum: - fsl,imx53-flexcan @@ -43,12 +41,25 @@ properties: - enum: - fsl,ls1028ar1-flexcan - const: fsl,lx2160ar1-flexcan + - items: + - enum: + - nxp,s32g3-flexcan + - const: nxp,s32g2-flexcan + - items: + - enum: + - fsl,imx94-flexcan + - const: fsl,imx95-flexcan reg: maxItems: 1 interrupts: - maxItems: 1 + minItems: 1 + maxItems: 4 + + interrupt-names: + minItems: 1 + maxItems: 4 clocks: maxItems: 2 @@ -70,6 +81,9 @@ properties: xceiver-supply: description: Regulator that powers the CAN transceiver. + phys: + maxItems: 1 + big-endian: $ref: /schemas/types.yaml#/definitions/flag description: | @@ -136,6 +150,41 @@ required: - reg - interrupts +allOf: + - $ref: can-controller.yaml# + - if: + properties: + compatible: + contains: + const: nxp,s32g2-flexcan + then: + properties: + interrupts: + items: + - description: Message Buffer interrupt for mailboxes 0-7 and Enhanced RX FIFO + - description: Device state change + - description: Bus Error detection + - description: Message Buffer interrupt for mailboxes 8-127 + interrupt-names: + items: + - const: mb-0 + - const: state + - const: berr + - const: mb-1 + required: + - interrupt-names + else: + properties: + interrupts: + maxItems: 1 + interrupt-names: false + - if: + required: + - xceiver-supply + then: + properties: + phys: false + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml index 2a98b26630cb1..c155c9c6db392 100644 --- a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml +++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml @@ -40,7 +40,7 @@ properties: microchip,rx-int-gpios: description: - GPIO phandle of GPIO connected to to INT1 pin of the MCP251XFD, which + GPIO phandle of GPIO connected to INT1 pin of the MCP251XFD, which signals a pending RX interrupt. maxItems: 1 diff --git a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml index 7c5ac5d2e880b..f6884f6e59e74 100644 --- a/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml +++ b/Documentation/devicetree/bindings/net/can/renesas,rcar-canfd.yaml @@ -170,7 +170,7 @@ allOf: const: renesas,r8a779h0-canfd then: patternProperties: - "^channel[5-7]$": false + "^channel[4-7]$": false else: if: not: diff --git a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml index 4c78c546343f5..d6c957a33b482 100644 --- a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml +++ b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml @@ -16,6 +16,7 @@ properties: compatible: oneOf: - const: brcm,bcm5325 + - const: brcm,bcm53101 - const: brcm,bcm53115 - const: brcm,bcm53125 - const: brcm,bcm53128 @@ -77,6 +78,7 @@ allOf: contains: enum: - brcm,bcm5325 + - brcm,bcm53101 - brcm,bcm53115 - brcm,bcm53125 - brcm,bcm53128 diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml index 2c71454ae8e36..824bbe4333b7e 100644 --- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml @@ -232,6 +232,12 @@ properties: PHY's that have configurable TX internal delays. If this property is present then the PHY applies the TX delay. + tx-amplitude-100base-tx-percent: + description: + Transmit amplitude gain applied for 100BASE-TX. 100% matches 2V + peak-to-peak specified in ANSI X3.263. When omitted, the PHYs default + will be left as is. + leds: type: object diff --git a/Documentation/devicetree/bindings/net/faraday,ftgmac100.yaml b/Documentation/devicetree/bindings/net/faraday,ftgmac100.yaml index 9bcbacb6640db..55d6a8379025b 100644 --- a/Documentation/devicetree/bindings/net/faraday,ftgmac100.yaml +++ b/Documentation/devicetree/bindings/net/faraday,ftgmac100.yaml @@ -44,6 +44,9 @@ properties: phy-mode: enum: - rgmii + - rgmii-id + - rgmii-rxid + - rgmii-txid - rmii phy-handle: true diff --git a/Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml b/Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml new file mode 100644 index 0000000000000..03c819bc701be --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml @@ -0,0 +1,112 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,gianfar-mdio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale Gianfar (TSEC) MDIO Device + +description: + This binding describes the MDIO is a bus to which the PHY devices are + connected. For each device that exists on this bus, a child node should be + created. + + Some TSECs are associated with an internal Ten-Bit Interface (TBI) PHY. This + PHY is accessed through the local MDIO bus. These buses are defined similarly + to the mdio buses, except they are compatible with "fsl,gianfar-tbi". The TBI + PHYs underneath them are similar to normal PHYs, but the reg property is + considered instructive, rather than descriptive. The reg property should be + chosen so it doesn't interfere with other PHYs on the bus. + +maintainers: + - J. Neuschäfer + +# This is needed to distinguish gianfar.yaml and gianfar-mdio.yaml, because +# both use compatible = "gianfar" (with different device_type values) +select: + oneOf: + - properties: + compatible: + contains: + const: gianfar + device_type: + const: mdio + required: + - device_type + + - properties: + compatible: + contains: + enum: + - fsl,gianfar-tbi + - fsl,gianfar-mdio + - fsl,etsec2-tbi + - fsl,etsec2-mdio + - fsl,ucc-mdio + - ucc_geth_phy + + required: + - compatible + +properties: + compatible: + enum: + - fsl,gianfar-tbi + - fsl,gianfar-mdio + - fsl,etsec2-tbi + - fsl,etsec2-mdio + - fsl,ucc-mdio + - gianfar + - ucc_geth_phy + + reg: + minItems: 1 + items: + - description: + Offset and length of the register set for the device + + - description: + Optionally, the offset and length of the TBIPA register (TBI PHY + address register). If TBIPA register is not specified, the driver + will attempt to infer it from the register set specified (your + mileage may vary). + + device_type: + const: mdio + +required: + - reg + - "#address-cells" + - "#size-cells" + +allOf: + - $ref: mdio.yaml# + + - if: + properties: + compatible: + contains: + const: ucc_geth_phy + then: + required: + - device_type + +unevaluatedProperties: false + +examples: + - | + soc { + #address-cells = <1>; + #size-cells = <1>; + + mdio@24520 { + reg = <0x24520 0x20>; + compatible = "fsl,gianfar-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + ethernet-phy@0 { + reg = <0>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/fsl,gianfar.yaml b/Documentation/devicetree/bindings/net/fsl,gianfar.yaml new file mode 100644 index 0000000000000..f92f284aa05b0 --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,gianfar.yaml @@ -0,0 +1,248 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,gianfar.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale Three-Speed Ethernet Controller (TSEC), "Gianfar" + +maintainers: + - J. Neuschäfer + +# This is needed to distinguish gianfar.yaml and gianfar-mdio.yaml, because +# both use compatible = "gianfar" (with different device_type values) +select: + oneOf: + - properties: + compatible: + contains: + const: gianfar + device_type: + const: network + required: + - device_type + + - properties: + compatible: + const: fsl,etsec2 + + required: + - compatible + +properties: + compatible: + enum: + - gianfar + - fsl,etsec2 + + device_type: + const: network + + model: + enum: + - FEC + - TSEC + - eTSEC + + reg: + maxItems: 1 + + ranges: true + + "#address-cells": + enum: [ 1, 2 ] + + "#size-cells": + enum: [ 1, 2 ] + + cell-index: + $ref: /schemas/types.yaml#/definitions/uint32 + + interrupts: + minItems: 1 + items: + - description: Transmit interrupt or single combined interrupt + - description: Receive interrupt + - description: Error interrupt + + dma-coherent: true + + fsl,magic-packet: + type: boolean + description: + If present, indicates that the hardware supports waking up via magic packet. + + fsl,wake-on-filer: + type: boolean + description: + If present, indicates that the hardware supports waking up by Filer + General Purpose Interrupt (FGPI) asserted on the Rx int line. This is + an advanced power management capability allowing certain packet types + (user) defined by filer rules to wake up the system. + + bd-stash: + type: boolean + description: + If present, indicates that the hardware supports stashing buffer + descriptors in the L2. + + rx-stash-len: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Denotes the number of bytes of a received buffer to stash in the L2. + + rx-stash-idx: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Denotes the index of the first byte from the received buffer to stash in + the L2. + + fsl,num_rx_queues: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Number of receive queues + const: 8 + + fsl,num_tx_queues: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Number of transmit queues + const: 8 + + tbi-handle: + $ref: /schemas/types.yaml#/definitions/phandle + description: Reference (phandle) to the TBI node + +required: + - compatible + - model + +patternProperties: + "^mdio@[0-9a-f]+$": + $ref: /schemas/net/fsl,gianfar-mdio.yaml# + +allOf: + - $ref: ethernet-controller.yaml# + + # eTSEC2 controller nodes have "queue group" subnodes and don't need a "reg" + # property. + - if: + properties: + compatible: + contains: + const: fsl,etsec2 + then: + patternProperties: + "^queue-group@[0-9a-f]+$": + type: object + + properties: + reg: + maxItems: 1 + + interrupts: + items: + - description: Transmit interrupt + - description: Receive interrupt + - description: Error interrupt + + required: + - reg + - interrupts + + additionalProperties: false + else: + required: + - reg + + # TSEC and eTSEC devices require three interrupts + - if: + properties: + model: + contains: + enum: [ TSEC, eTSEC ] + then: + properties: + interrupts: + items: + - description: Transmit interrupt + - description: Receive interrupt + - description: Error interrupt + + + +unevaluatedProperties: false + +examples: + - | + ethernet@24000 { + device_type = "network"; + model = "TSEC"; + compatible = "gianfar"; + reg = <0x24000 0x1000>; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <29 2>, <30 2>, <34 2>; + interrupt-parent = <&mpic>; + phy-handle = <&phy0>; + }; + + - | + #include + + ethernet@24000 { + compatible = "gianfar"; + reg = <0x24000 0x1000>; + ranges = <0x0 0x24000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + cell-index = <0>; + device_type = "network"; + model = "eTSEC"; + local-mac-address = [ 00 00 00 00 00 00 ]; + interrupts = <32 IRQ_TYPE_LEVEL_LOW>, + <33 IRQ_TYPE_LEVEL_LOW>, + <34 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&ipic>; + + mdio@520 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "fsl,gianfar-mdio"; + reg = <0x520 0x20>; + }; + }; + + - | + #include + #include + + bus { + #address-cells = <2>; + #size-cells = <2>; + + ethernet { + compatible = "fsl,etsec2"; + ranges; + device_type = "network"; + #address-cells = <2>; + #size-cells = <2>; + interrupt-parent = <&gic>; + model = "eTSEC"; + fsl,magic-packet; + dma-coherent; + + queue-group@2d10000 { + reg = <0x0 0x2d10000 0x0 0x1000>; + interrupts = , + , + ; + }; + + queue-group@2d14000 { + reg = <0x0 0x2d14000 0x0 0x1000>; + interrupts = , + , + ; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt index 9c9668c1b6a24..b18bb4c997ea3 100644 --- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt +++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt @@ -1,88 +1,14 @@ * MDIO IO device -The MDIO is a bus to which the PHY devices are connected. For each -device that exists on this bus, a child node should be created. See -the definition of the PHY node in booting-without-of.txt for an example -of how to define a PHY. - -Required properties: - - reg : Offset and length of the register set for the device, and optionally - the offset and length of the TBIPA register (TBI PHY address - register). If TBIPA register is not specified, the driver will - attempt to infer it from the register set specified (your mileage may - vary). - - compatible : Should define the compatible device type for the - mdio. Currently supported strings/devices are: - - "fsl,gianfar-tbi" - - "fsl,gianfar-mdio" - - "fsl,etsec2-tbi" - - "fsl,etsec2-mdio" - - "fsl,ucc-mdio" - - "fsl,fman-mdio" - When device_type is "mdio", the following strings are also considered: - - "gianfar" - - "ucc_geth_phy" - -Example: - - mdio@24520 { - reg = <24520 20>; - compatible = "fsl,gianfar-mdio"; - - ethernet-phy@0 { - ...... - }; - }; +Refer to Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml * TBI Internal MDIO bus -As of this writing, every tsec is associated with an internal TBI PHY. -This PHY is accessed through the local MDIO bus. These buses are defined -similarly to the mdio buses, except they are compatible with "fsl,gianfar-tbi". -The TBI PHYs underneath them are similar to normal PHYs, but the reg property -is considered instructive, rather than descriptive. The reg property should -be chosen so it doesn't interfere with other PHYs on the bus. +Refer to Documentation/devicetree/bindings/net/fsl,gianfar-mdio.yaml * Gianfar-compatible ethernet nodes -Properties: - - - device_type : Should be "network" - - model : Model of the device. Can be "TSEC", "eTSEC", or "FEC" - - compatible : Should be "gianfar" - - reg : Offset and length of the register set for the device - - interrupts : For FEC devices, the first interrupt is the device's - interrupt. For TSEC and eTSEC devices, the first interrupt is - transmit, the second is receive, and the third is error. - - phy-handle : See ethernet.txt file in the same directory. - - fixed-link : See fixed-link.txt in the same directory. - - phy-connection-type : See ethernet.txt file in the same directory. - This property is only really needed if the connection is of type - "rgmii-id", as all other connection types are detected by hardware. - - fsl,magic-packet : If present, indicates that the hardware supports - waking up via magic packet. - - fsl,wake-on-filer : If present, indicates that the hardware supports - waking up by Filer General Purpose Interrupt (FGPI) asserted on the - Rx int line. This is an advanced power management capability allowing - certain packet types (user) defined by filer rules to wake up the system. - - bd-stash : If present, indicates that the hardware supports stashing - buffer descriptors in the L2. - - rx-stash-len : Denotes the number of bytes of a received buffer to stash - in the L2. - - rx-stash-idx : Denotes the index of the first byte from the received - buffer to stash in the L2. - -Example: - ethernet@24000 { - device_type = "network"; - model = "TSEC"; - compatible = "gianfar"; - reg = <0x24000 0x1000>; - local-mac-address = [ 00 E0 0C 00 73 00 ]; - interrupts = <29 2 30 2 34 2>; - interrupt-parent = <&mpic>; - phy-handle = <&phy0> - }; +Refer to Documentation/devicetree/bindings/net/fsl,gianfar.yaml * Gianfar PTP clock nodes diff --git a/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt b/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt index a1046e636fa14..f1bd07a0097db 100644 --- a/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt +++ b/Documentation/devicetree/bindings/net/ieee802154/ca8210.txt @@ -20,7 +20,7 @@ Example: reg = <0>; spi-max-frequency = <3000000>; spi-cpol; - reset-gpio = <&gpio1 1 GPIO_ACTIVE_HIGH>; + reset-gpio = <&gpio1 1 GPIO_ACTIVE_LOW>; irq-gpio = <&gpio1 2 GPIO_ACTIVE_HIGH>; extclock-enable; extclock-freq = 16000000; diff --git a/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml b/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml index 42a0bc94312c3..62c1da36a2b5a 100644 --- a/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml +++ b/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml @@ -41,6 +41,12 @@ properties: - const: ptp_ref - const: tx_clk + interrupts: + maxItems: 1 + + interrupt-names: + const: macirq + required: - compatible - clocks diff --git a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml index ed9d845f60080..3aab21b8e8de5 100644 --- a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml @@ -64,6 +64,12 @@ properties: - const: rmii_internal - const: mac_cg + interrupts: + maxItems: 1 + + interrupt-names: + const: macirq + power-domains: maxItems: 1 diff --git a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml index 87bc4416eadf2..e5db346beca96 100644 --- a/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml +++ b/Documentation/devicetree/bindings/net/nxp,dwmac-imx.yaml @@ -56,6 +56,14 @@ properties: - tx - mem + interrupts: + maxItems: 2 + + interrupt-names: + items: + - const: macirq + - const: eth_wake_irq + intf_mode: $ref: /schemas/types.yaml#/definitions/phandle-array items: diff --git a/Documentation/devicetree/bindings/net/realtek,rtl9301-mdio.yaml b/Documentation/devicetree/bindings/net/realtek,rtl9301-mdio.yaml new file mode 100644 index 0000000000000..02e4e33e99698 --- /dev/null +++ b/Documentation/devicetree/bindings/net/realtek,rtl9301-mdio.yaml @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/realtek,rtl9301-mdio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Realtek RTL9300 MDIO Controller + +maintainers: + - Chris Packham + +properties: + compatible: + oneOf: + - items: + - enum: + - realtek,rtl9302b-mdio + - realtek,rtl9302c-mdio + - realtek,rtl9303-mdio + - const: realtek,rtl9301-mdio + - const: realtek,rtl9301-mdio + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + + reg: + maxItems: 1 + +patternProperties: + '^mdio-bus@[0-3]$': + $ref: mdio.yaml# + + properties: + reg: + maxItems: 1 + + required: + - reg + + patternProperties: + '^ethernet-phy@[a-f0-9]+$': + type: object + $ref: ethernet-phy.yaml# + unevaluatedProperties: false + + unevaluatedProperties: false + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + mdio-controller@ca00 { + compatible = "realtek,rtl9301-mdio"; + reg = <0xca00 0x200>; + #address-cells = <1>; + #size-cells = <0>; + + mdio-bus@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c45"; + reg = <0>; + }; + }; + + mdio-bus@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + ethernet-phy@0 { + compatible = "ethernet-phy-ieee802.3-c45"; + reg = <0>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/mfd/realtek,rtl9301-switch.yaml b/Documentation/devicetree/bindings/net/realtek,rtl9301-switch.yaml similarity index 66% rename from Documentation/devicetree/bindings/mfd/realtek,rtl9301-switch.yaml rename to Documentation/devicetree/bindings/net/realtek,rtl9301-switch.yaml index f053303ab1e6b..80eabc1706698 100644 --- a/Documentation/devicetree/bindings/mfd/realtek,rtl9301-switch.yaml +++ b/Documentation/devicetree/bindings/net/realtek,rtl9301-switch.yaml @@ -1,7 +1,7 @@ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) %YAML 1.2 --- -$id: http://devicetree.org/schemas/mfd/realtek,rtl9301-switch.yaml# +$id: http://devicetree.org/schemas/net/realtek,rtl9301-switch.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: Realtek Switch with Internal CPU @@ -14,6 +14,8 @@ description: number of different peripherals are accessed through a common register block, represented here as a syscon node. +$ref: ethernet-switch.yaml#/$defs/ethernet-ports + properties: compatible: items: @@ -28,12 +30,23 @@ properties: reg: maxItems: 1 + interrupts: + maxItems: 2 + + interrupt-names: + items: + - const: switch + - const: nic + '#address-cells': const: 1 '#size-cells': const: 1 + ethernet-ports: + type: object + patternProperties: 'reboot@[0-9a-f]+$': $ref: /schemas/power/reset/syscon-reboot.yaml# @@ -41,9 +54,14 @@ patternProperties: 'i2c@[0-9a-f]+$': $ref: /schemas/i2c/realtek,rtl9301-i2c.yaml# + 'mdio-controller@[0-9a-f]+$': + $ref: realtek,rtl9301-mdio.yaml# + required: - compatible - reg + - interrupts + - interrupt-names additionalProperties: false @@ -52,6 +70,9 @@ examples: ethernet-switch@1b000000 { compatible = "realtek,rtl9301-switch", "syscon", "simple-mfd"; reg = <0x1b000000 0x10000>; + interrupt-parent = <&intc>; + interrupts = <23>, <24>; + interrupt-names = "switch", "nic"; #address-cells = <1>; #size-cells = <1>; @@ -110,5 +131,45 @@ examples: }; }; }; + + mdio-controller@ca00 { + compatible = "realtek,rtl9301-mdio"; + reg = <0xca00 0x200>; + #address-cells = <1>; + #size-cells = <0>; + + mdio-bus@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + + phy1: ethernet-phy@0 { + reg = <0>; + }; + }; + mdio-bus@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + + phy2: ethernet-phy@0 { + reg = <0>; + }; + }; + }; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + phy-handle = <&phy1>; + }; + port@1 { + reg = <1>; + phy-handle = <&phy2>; + }; + }; }; diff --git a/Documentation/devicetree/bindings/net/rfkill-gpio.yaml b/Documentation/devicetree/bindings/net/rfkill-gpio.yaml index 9630c8466fac0..4a706a41ab38c 100644 --- a/Documentation/devicetree/bindings/net/rfkill-gpio.yaml +++ b/Documentation/devicetree/bindings/net/rfkill-gpio.yaml @@ -32,6 +32,10 @@ properties: shutdown-gpios: maxItems: 1 + default-blocked: + $ref: /schemas/types.yaml#/definitions/flag + description: configure rfkill state as blocked at boot + required: - compatible - radio-type @@ -48,4 +52,5 @@ examples: label = "rfkill-pcie-wlan"; radio-type = "wlan"; shutdown-gpios = <&gpio2 25 GPIO_ACTIVE_HIGH>; + default-blocked; }; diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml index f8a576611d6c1..0ac7c4b47d6bf 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml @@ -24,6 +24,7 @@ select: - rockchip,rk3366-gmac - rockchip,rk3368-gmac - rockchip,rk3399-gmac + - rockchip,rk3528-gmac - rockchip,rk3568-gmac - rockchip,rk3576-gmac - rockchip,rk3588-gmac @@ -32,9 +33,6 @@ select: required: - compatible -allOf: - - $ref: snps,dwmac.yaml# - properties: compatible: oneOf: @@ -52,14 +50,25 @@ properties: - rockchip,rv1108-gmac - items: - enum: + - rockchip,rk3528-gmac - rockchip,rk3568-gmac - rockchip,rk3576-gmac - rockchip,rk3588-gmac - rockchip,rv1126-gmac - const: snps,dwmac-4.20a + interrupts: + minItems: 1 + maxItems: 2 + + interrupt-names: + minItems: 1 + items: + - const: macirq + - const: eth_wake_irq + clocks: - minItems: 5 + minItems: 4 maxItems: 8 clock-names: @@ -114,6 +123,36 @@ required: - compatible - clocks - clock-names + - rockchip,grf + +allOf: + - $ref: snps,dwmac.yaml# + + - if: + properties: + compatible: + contains: + enum: + - rockchip,rk3576-gmac + - rockchip,rk3588-gmac + then: + required: + - rockchip,php-grf + else: + properties: + rockchip,php-grf: false + + - if: + not: + properties: + compatible: + contains: + enum: + - rockchip,rk3528-gmac + then: + properties: + clocks: + minItems: 5 unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml index 91e75eb3f329b..78b3030dc56d2 100644 --- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml +++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml @@ -32,6 +32,7 @@ select: - snps,dwmac-4.20a - snps,dwmac-5.10a - snps,dwmac-5.20 + - snps,dwmac-5.30a - snps,dwxgmac - snps,dwxgmac-2.10 @@ -98,10 +99,13 @@ properties: - snps,dwmac-4.20a - snps,dwmac-5.10a - snps,dwmac-5.20 + - snps,dwmac-5.30a - snps,dwxgmac - snps,dwxgmac-2.10 + - sophgo,sg2044-dwmac - starfive,jh7100-dwmac - starfive,jh7110-dwmac + - tesla,fsd-ethqos - thead,th1520-gmac reg: @@ -126,7 +130,7 @@ properties: clocks: minItems: 1 - maxItems: 8 + maxItems: 10 additionalItems: true items: - description: GMAC main clock @@ -138,7 +142,7 @@ properties: clock-names: minItems: 1 - maxItems: 8 + maxItems: 10 additionalItems: true contains: enum: @@ -490,6 +494,7 @@ properties: snps,en-tx-lpi-clockgating: $ref: /schemas/types.yaml#/definitions/flag + deprecated: true description: Enable gating of the MAC TX clock during TX low-power mode @@ -631,6 +636,7 @@ allOf: - snps,dwmac-4.20a - snps,dwmac-5.10a - snps,dwmac-5.20 + - snps,dwmac-5.30a - snps,dwxgmac - snps,dwxgmac-2.10 - st,spear600-gmac diff --git a/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml b/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml new file mode 100644 index 0000000000000..4dd2dc9c678b7 --- /dev/null +++ b/Documentation/devicetree/bindings/net/sophgo,sg2044-dwmac.yaml @@ -0,0 +1,126 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/sophgo,sg2044-dwmac.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Sophgo SG2044 DWMAC glue layer + +maintainers: + - Inochi Amaoto + +select: + properties: + compatible: + contains: + enum: + - sophgo,sg2044-dwmac + required: + - compatible + +properties: + compatible: + items: + - const: sophgo,sg2044-dwmac + - const: snps,dwmac-5.30a + + reg: + maxItems: 1 + + clocks: + items: + - description: GMAC main clock + - description: PTP clock + - description: TX clock + + clock-names: + items: + - const: stmmaceth + - const: ptp_ref + - const: tx + + dma-noncoherent: true + + interrupts: + maxItems: 1 + + interrupt-names: + maxItems: 1 + + resets: + maxItems: 1 + + reset-names: + const: stmmaceth + +required: + - compatible + - reg + - clocks + - clock-names + - interrupts + - interrupt-names + - resets + - reset-names + +allOf: + - $ref: snps,dwmac.yaml# + +unevaluatedProperties: false + +examples: + - | + #include + + ethernet@30006000 { + compatible = "sophgo,sg2044-dwmac", "snps,dwmac-5.30a"; + reg = <0x30006000 0x4000>; + clocks = <&clk 151>, <&clk 152>, <&clk 154>; + clock-names = "stmmaceth", "ptp_ref", "tx"; + interrupt-parent = <&intc>; + interrupts = <296 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "macirq"; + resets = <&rst 30>; + reset-names = "stmmaceth"; + snps,multicast-filter-bins = <0>; + snps,perfect-filter-entries = <1>; + snps,aal; + snps,tso; + snps,txpbl = <32>; + snps,rxpbl = <32>; + snps,mtl-rx-config = <&gmac0_mtl_rx_setup>; + snps,mtl-tx-config = <&gmac0_mtl_tx_setup>; + snps,axi-config = <&gmac0_stmmac_axi_setup>; + status = "disabled"; + + gmac0_mtl_rx_setup: rx-queues-config { + snps,rx-queues-to-use = <8>; + snps,rx-sched-wsp; + queue0 {}; + queue1 {}; + queue2 {}; + queue3 {}; + queue4 {}; + queue5 {}; + queue6 {}; + queue7 {}; + }; + + gmac0_mtl_tx_setup: tx-queues-config { + snps,tx-queues-to-use = <8>; + queue0 {}; + queue1 {}; + queue2 {}; + queue3 {}; + queue4 {}; + queue5 {}; + queue6 {}; + queue7 {}; + }; + + gmac0_stmmac_axi_setup: stmmac-axi-config { + snps,blen = <16 8 4 0 0 0 0>; + snps,wr_osr_lmt = <1>; + snps,rd_osr_lmt = <2>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml index 85cea9966a27e..987254900d0da 100644 --- a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml @@ -54,6 +54,16 @@ properties: items: - const: stmmaceth + interrupts: + minItems: 1 + maxItems: 2 + + interrupt-names: + minItems: 1 + items: + - const: macirq + - const: eth_wake_irq + clocks: minItems: 3 items: diff --git a/Documentation/devicetree/bindings/net/tesla,fsd-ethqos.yaml b/Documentation/devicetree/bindings/net/tesla,fsd-ethqos.yaml new file mode 100644 index 0000000000000..dd7481bb16e59 --- /dev/null +++ b/Documentation/devicetree/bindings/net/tesla,fsd-ethqos.yaml @@ -0,0 +1,118 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/tesla,fsd-ethqos.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: FSD Ethernet Quality of Service + +maintainers: + - Swathi K S + +description: + Tesla ethernet devices based on dwmmac support Gigabit ethernet. + +allOf: + - $ref: snps,dwmac.yaml# + +properties: + compatible: + const: tesla,fsd-ethqos + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + interrupt-names: + items: + - const: macirq + + clocks: + minItems: 5 + items: + - description: PTP clock + - description: Master bus clock + - description: Slave bus clock + - description: MAC TX clock + - description: MAC RX clock + - description: Master2 bus clock + - description: Slave2 bus clock + - description: RX MUX clock + - description: PHY RX clock + - description: PERIC RGMII clock + + clock-names: + minItems: 5 + items: + - const: ptp_ref + - const: master_bus + - const: slave_bus + - const: tx + - const: rx + - const: master2_bus + - const: slave2_bus + - const: eqos_rxclk_mux + - const: eqos_phyrxclk + - const: dout_peric_rgmii_clk + + iommus: + maxItems: 1 + + phy-mode: + enum: + - rgmii + - rgmii-id + - rgmii-rxid + - rgmii-txid + +required: + - compatible + - reg + - interrupts + - clocks + - clock-names + - iommus + - phy-mode + +unevaluatedProperties: false + +examples: + - | + #include + #include + soc { + #address-cells = <2>; + #size-cells = <2>; + ethernet1: ethernet@14300000 { + compatible = "tesla,fsd-ethqos"; + reg = <0x0 0x14300000 0x0 0x10000>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I>, + <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_ACLK_I>, + <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_HCLK_I>, + <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_RGMII_CLK_I>, + <&clock_peric PERIC_EQOS_TOP_IPCLKPORT_CLK_RX_I>, + <&clock_peric PERIC_BUS_D_PERIC_IPCLKPORT_EQOSCLK>, + <&clock_peric PERIC_BUS_P_PERIC_IPCLKPORT_EQOSCLK>, + <&clock_peric PERIC_EQOS_PHYRXCLK_MUX>, + <&clock_peric PERIC_EQOS_PHYRXCLK>, + <&clock_peric PERIC_DOUT_RGMII_CLK>; + clock-names = "ptp_ref", "master_bus", "slave_bus","tx", + "rx", "master2_bus", "slave2_bus", "eqos_rxclk_mux", + "eqos_phyrxclk","dout_peric_rgmii_clk"; + assigned-clocks = <&clock_peric PERIC_EQOS_PHYRXCLK_MUX>, + <&clock_peric PERIC_EQOS_PHYRXCLK>; + assigned-clock-parents = <&clock_peric PERIC_EQOS_PHYRXCLK>; + pinctrl-names = "default"; + pinctrl-0 = <ð1_tx_clk>, <ð1_tx_data>, <ð1_tx_ctrl>, + <ð1_phy_intr>, <ð1_rx_clk>, <ð1_rx_data>, + <ð1_rx_ctrl>, <ð1_mdio>; + iommus = <&smmu_peric 0x0 0x1>; + phy-mode = "rgmii-id"; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml b/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml index 052f636158b3f..f0f32e18fc855 100644 --- a/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml @@ -42,6 +42,12 @@ properties: - const: stmmaceth - const: phy_ref_clk + interrupts: + maxItems: 1 + + interrupt-names: + const: macirq + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml index aace072e2d52a..f2440d39b7ebc 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml @@ -92,20 +92,41 @@ properties: ieee80211-freq-limit: true + qcom,calibration-data: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + Calibration data + board-specific data as a byte array. The length + can vary between hardware versions. + qcom,ath10k-calibration-data: $ref: /schemas/types.yaml#/definitions/uint8-array + deprecated: true description: Calibration data + board-specific data as a byte array. The length can vary between hardware versions. + qcom,calibration-variant: + $ref: /schemas/types.yaml#/definitions/string + description: + Unique variant identifier of the calibration data in board-2.bin + for designs with colliding bus and device specific ids + qcom,ath10k-calibration-variant: $ref: /schemas/types.yaml#/definitions/string + deprecated: true description: Unique variant identifier of the calibration data in board-2.bin for designs with colliding bus and device specific ids + qcom,pre-calibration-data: + $ref: /schemas/types.yaml#/definitions/uint8-array + description: + Pre-calibration data as a byte array. The length can vary between + hardware versions. + qcom,ath10k-pre-calibration-data: $ref: /schemas/types.yaml#/definitions/uint8-array + deprecated: true description: Pre-calibration data as a byte array. The length can vary between hardware versions. diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml index a4425cf196aba..653b319fee880 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml @@ -22,8 +22,15 @@ properties: reg: maxItems: 1 + qcom,calibration-variant: + $ref: /schemas/types.yaml#/definitions/string + description: | + string to uniquely identify variant of the calibration data for designs + with colliding bus and device ids + qcom,ath11k-calibration-variant: $ref: /schemas/types.yaml#/definitions/string + deprecated: true description: | string to uniquely identify variant of the calibration data for designs with colliding bus and device ids @@ -127,7 +134,7 @@ examples: vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>; vddrfa1p8-supply = <&vreg_pmu_rfa_1p7>; - qcom,ath11k-calibration-variant = "LE_X13S"; + qcom,calibration-variant = "LE_X13S"; }; }; }; diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml index a69ffb7b3cb88..c089677702cf1 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml @@ -41,8 +41,15 @@ properties: * reg * reg-names + qcom,calibration-variant: + $ref: /schemas/types.yaml#/definitions/string + description: + string to uniquely identify variant of the calibration data in the + board-2.bin for designs with colliding bus and device specific ids + qcom,ath11k-calibration-variant: $ref: /schemas/types.yaml#/definitions/string + deprecated: true description: string to uniquely identify variant of the calibration data in the board-2.bin for designs with colliding bus and device specific ids diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml index 318f305405e3b..589960144fe1d 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k-wsi.yaml @@ -52,8 +52,15 @@ properties: reg: maxItems: 1 + qcom,calibration-variant: + $ref: /schemas/types.yaml#/definitions/string + description: + String to uniquely identify variant of the calibration data for designs + with colliding bus and device ids + qcom,ath12k-calibration-variant: $ref: /schemas/types.yaml#/definitions/string + deprecated: true description: String to uniquely identify variant of the calibration data for designs with colliding bus and device ids @@ -103,7 +110,7 @@ examples: compatible = "pci17cb,1109"; reg = <0x0 0x0 0x0 0x0 0x0>; - qcom,ath12k-calibration-variant = "RDP433_1"; + qcom,calibration-variant = "RDP433_1"; ports { #address-cells = <1>; @@ -139,7 +146,7 @@ examples: compatible = "pci17cb,1109"; reg = <0x0 0x0 0x0 0x0 0x0>; - qcom,ath12k-calibration-variant = "RDP433_2"; + qcom,calibration-variant = "RDP433_2"; qcom,wsi-controller; ports { @@ -176,7 +183,7 @@ examples: compatible = "pci17cb,1109"; reg = <0x0 0x0 0x0 0x0 0x0>; - qcom,ath12k-calibration-variant = "RDP433_3"; + qcom,calibration-variant = "RDP433_3"; ports { #address-cells = <1>; diff --git a/Documentation/filesystems/idmappings.rst b/Documentation/filesystems/idmappings.rst index 77930c77fcfe6..2a206129f8284 100644 --- a/Documentation/filesystems/idmappings.rst +++ b/Documentation/filesystems/idmappings.rst @@ -63,8 +63,8 @@ what id ``k11000`` corresponds to in the second or third idmapping. The straightforward algorithm to use is to apply the inverse of the first idmapping, mapping ``k11000`` up to ``u1000``. Afterwards, we can map ``u1000`` down using either the second idmapping mapping or third idmapping mapping. The second -idmapping would map ``u1000`` down to ``21000``. The third idmapping would map -``u1000`` down to ``u31000``. +idmapping would map ``u1000`` down to ``k21000``. The third idmapping would map +``u1000`` down to ``k31000``. If we were given the same task for the following three idmappings:: diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml index 9660ffb1ed6ac..96fa1f1522ed1 100644 --- a/Documentation/netlink/genetlink-c.yaml +++ b/Documentation/netlink/genetlink-c.yaml @@ -14,9 +14,10 @@ $defs: pattern: ^[0-9A-Za-z_-]+( - 1)?$ minimum: 0 len-or-limit: - # literal int or limit based on fixed-width type e.g. u8-min, u16-max, etc. + # literal int, const name, or limit based on fixed-width type + # e.g. u8-min, u16-max, etc. type: [ string, integer ] - pattern: ^[su](8|16|32|64)-(min|max)$ + pattern: ^[0-9A-Za-z_-]+$ minimum: 0 # Schema for specs @@ -160,7 +161,7 @@ properties: type: string type: &attr-type enum: [ unused, pad, flag, binary, - uint, sint, u8, u16, u32, u64, s32, s64, + uint, sint, u8, u16, u32, u64, s8, s16, s32, s64, string, nest, indexed-array, nest-type-value ] doc: description: Documentation of the attribute. diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml index 16380e12cabe7..a8c5b521937d1 100644 --- a/Documentation/netlink/genetlink-legacy.yaml +++ b/Documentation/netlink/genetlink-legacy.yaml @@ -14,9 +14,10 @@ $defs: pattern: ^[0-9A-Za-z_-]+( - 1)?$ minimum: 0 len-or-limit: - # literal int or limit based on fixed-width type e.g. u8-min, u16-max, etc. + # literal int, const name, or limit based on fixed-width type + # e.g. u8-min, u16-max, etc. type: [ string, integer ] - pattern: ^[su](8|16|32|64)-(min|max)$ + pattern: ^[0-9A-Za-z_-]+$ minimum: 0 # Schema for specs @@ -151,6 +152,9 @@ properties: the right formatting mechanism when displaying values of this type. enum: [ hex, mac, fddi, ipv4, ipv6, uuid ] + struct: + description: Name of the nested struct type. + type: string # End genetlink-legacy attribute-sets: @@ -203,7 +207,7 @@ properties: type: &attr-type description: The netlink attribute type enum: [ unused, pad, flag, binary, bitfield32, - uint, sint, u8, u16, u32, u64, s32, s64, + uint, sint, u8, u16, u32, u64, s8, s16, s32, s64, string, nest, indexed-array, nest-type-value ] doc: description: Documentation of the attribute. diff --git a/Documentation/netlink/genetlink.yaml b/Documentation/netlink/genetlink.yaml index b036227b46f1b..40efbbad76ab8 100644 --- a/Documentation/netlink/genetlink.yaml +++ b/Documentation/netlink/genetlink.yaml @@ -14,9 +14,10 @@ $defs: pattern: ^[0-9A-Za-z_-]+( - 1)?$ minimum: 0 len-or-limit: - # literal int or limit based on fixed-width type e.g. u8-min, u16-max, etc. + # literal int, const name, or limit based on fixed-width type + # e.g. u8-min, u16-max, etc. type: [ string, integer ] - pattern: ^[su](8|16|32|64)-(min|max)$ + pattern: ^[0-9A-Za-z_-]+$ minimum: 0 # Schema for specs @@ -123,7 +124,7 @@ properties: type: string type: &attr-type enum: [ unused, pad, flag, binary, - uint, sint, u8, u16, u32, u64, s32, s64, + uint, sint, u8, u16, u32, u64, s8, s16, s32, s64, string, nest, indexed-array, nest-type-value ] doc: description: Documentation of the attribute. diff --git a/Documentation/netlink/specs/conntrack.yaml b/Documentation/netlink/specs/conntrack.yaml new file mode 100644 index 0000000000000..840dc4504216b --- /dev/null +++ b/Documentation/netlink/specs/conntrack.yaml @@ -0,0 +1,643 @@ +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) + +name: conntrack +protocol: netlink-raw +protonum: 12 + +doc: + Netfilter connection tracking subsystem over nfnetlink + +definitions: + - + name: nfgenmsg + type: struct + members: + - + name: nfgen-family + type: u8 + - + name: version + type: u8 + - + name: res-id + byte-order: big-endian + type: u16 + - + name: nf-ct-tcp-flags-mask + type: struct + members: + - + name: flags + type: u8 + enum: nf-ct-tcp-flags + enum-as-flags: true + - + name: mask + type: u8 + enum: nf-ct-tcp-flags + enum-as-flags: true + - + name: nf-ct-tcp-flags + type: flags + entries: + - window-scale + - sack-perm + - close-init + - be-liberal + - unacked + - maxack + - challenge-ack + - simultaneous-open + - + name: nf-ct-tcp-state + type: enum + entries: + - none + - syn-sent + - syn-recv + - established + - fin-wait + - close-wait + - last-ack + - time-wait + - close + - syn-sent2 + - max + - ignore + - retrans + - unack + - timeout-max + - + name: nf-ct-sctp-state + type: enum + entries: + - none + - cloned + - cookie-wait + - cookie-echoed + - established + - shutdown-sent + - shutdown-received + - shutdown-ack-sent + - shutdown-heartbeat-sent + - + name: nf-ct-status + type: flags + entries: + - expected + - seen-reply + - assured + - confirmed + - src-nat + - dst-nat + - seq-adj + - src-nat-done + - dst-nat-done + - dying + - fixed-timeout + - template + - nat-clash + - helper + - offload + - hw-offload + +attribute-sets: + - + name: counter-attrs + attributes: + - + name: packets + type: u64 + byte-order: big-endian + - + name: bytes + type: u64 + byte-order: big-endian + - + name: packets-old + type: u32 + - + name: bytes-old + type: u32 + - + name: pad + type: pad + - + name: tuple-proto-attrs + attributes: + - + name: proto-num + type: u8 + doc: l4 protocol number + - + name: proto-src-port + type: u16 + byte-order: big-endian + doc: l4 source port + - + name: proto-dst-port + type: u16 + byte-order: big-endian + doc: l4 source port + - + name: proto-icmp-id + type: u16 + byte-order: big-endian + doc: l4 icmp id + - + name: proto-icmp-type + type: u8 + - + name: proto-icmp-code + type: u8 + - + name: proto-icmpv6-id + type: u16 + byte-order: big-endian + doc: l4 icmp id + - + name: proto-icmpv6-type + type: u8 + - + name: proto-icmpv6-code + type: u8 + - + name: tuple-ip-attrs + attributes: + - + name: ip-v4-src + type: u32 + byte-order: big-endian + display-hint: ipv4 + doc: ipv4 source address + - + name: ip-v4-dst + type: u32 + byte-order: big-endian + display-hint: ipv4 + doc: ipv4 destination address + - + name: ip-v6-src + type: binary + checks: + min-len: 16 + byte-order: big-endian + display-hint: ipv6 + doc: ipv6 source address + - + name: ip-v6-dst + type: binary + checks: + min-len: 16 + byte-order: big-endian + display-hint: ipv6 + doc: ipv6 destination address + - + name: tuple-attrs + attributes: + - + name: tuple-ip + type: nest + nested-attributes: tuple-ip-attrs + doc: conntrack l3 information + - + name: tuple-proto + type: nest + nested-attributes: tuple-proto-attrs + doc: conntrack l4 information + - + name: tuple-zone + type: u16 + byte-order: big-endian + doc: conntrack zone id + - + name: protoinfo-tcp-attrs + attributes: + - + name: tcp-state + type: u8 + enum: nf-ct-tcp-state + doc: tcp connection state + - + name: tcp-wscale-original + type: u8 + doc: window scaling factor in original direction + - + name: tcp-wscale-reply + type: u8 + doc: window scaling factor in reply direction + - + name: tcp-flags-original + type: binary + struct: nf-ct-tcp-flags-mask + - + name: tcp-flags-reply + type: binary + struct: nf-ct-tcp-flags-mask + - + name: protoinfo-dccp-attrs + attributes: + - + name: dccp-state + type: u8 + doc: dccp connection state + - + name: dccp-role + type: u8 + - + name: dccp-handshake-seq + type: u64 + byte-order: big-endian + - + name: dccp-pad + type: pad + - + name: protoinfo-sctp-attrs + attributes: + - + name: sctp-state + type: u8 + doc: sctp connection state + enum: nf-ct-sctp-state + - + name: vtag-original + type: u32 + byte-order: big-endian + - + name: vtag-reply + type: u32 + byte-order: big-endian + - + name: protoinfo-attrs + attributes: + - + name: protoinfo-tcp + type: nest + nested-attributes: protoinfo-tcp-attrs + doc: conntrack tcp state information + - + name: protoinfo-dccp + type: nest + nested-attributes: protoinfo-dccp-attrs + doc: conntrack dccp state information + - + name: protoinfo-sctp + type: nest + nested-attributes: protoinfo-sctp-attrs + doc: conntrack sctp state information + - + name: help-attrs + attributes: + - + name: help-name + type: string + doc: helper name + - + name: nat-proto-attrs + attributes: + - + name: nat-port-min + type: u16 + byte-order: big-endian + - + name: nat-port-max + type: u16 + byte-order: big-endian + - + name: nat-attrs + attributes: + - + name: nat-v4-minip + type: u32 + byte-order: big-endian + - + name: nat-v4-maxip + type: u32 + byte-order: big-endian + - + name: nat-v6-minip + type: binary + - + name: nat-v6-maxip + type: binary + - + name: nat-proto + type: nest + nested-attributes: nat-proto-attrs + - + name: seqadj-attrs + attributes: + - + name: correction-pos + type: u32 + byte-order: big-endian + - + name: offset-before + type: u32 + byte-order: big-endian + - + name: offset-after + type: u32 + byte-order: big-endian + - + name: secctx-attrs + attributes: + - + name: secctx-name + type: string + - + name: synproxy-attrs + attributes: + - + name: isn + type: u32 + byte-order: big-endian + - + name: its + type: u32 + byte-order: big-endian + - + name: tsoff + type: u32 + byte-order: big-endian + - + name: conntrack-attrs + attributes: + - + name: tuple-orig + type: nest + nested-attributes: tuple-attrs + doc: conntrack l3+l4 protocol information, original direction + - + name: tuple-reply + type: nest + nested-attributes: tuple-attrs + doc: conntrack l3+l4 protocol information, reply direction + - + name: status + type: u32 + byte-order: big-endian + enum: nf-ct-status + enum-as-flags: true + doc: conntrack flag bits + - + name: protoinfo + type: nest + nested-attributes: protoinfo-attrs + - + name: help + type: nest + nested-attributes: help-attrs + - + name: nat-src + type: nest + nested-attributes: nat-attrs + - + name: timeout + type: u32 + byte-order: big-endian + - + name: mark + type: u32 + byte-order: big-endian + - + name: counters-orig + type: nest + nested-attributes: counter-attrs + - + name: counters-reply + type: nest + nested-attributes: counter-attrs + - + name: use + type: u32 + byte-order: big-endian + - + name: id + type: u32 + byte-order: big-endian + - + name: nat-dst + type: nest + nested-attributes: nat-attrs + - + name: tuple-master + type: nest + nested-attributes: tuple-attrs + - + name: seq-adj-orig + type: nest + nested-attributes: seqadj-attrs + - + name: seq-adj-reply + type: nest + nested-attributes: seqadj-attrs + - + name: secmark + type: binary + doc: obsolete + - + name: zone + type: u16 + byte-order: big-endian + doc: conntrack zone id + - + name: secctx + type: nest + nested-attributes: secctx-attrs + - + name: timestamp + type: u64 + byte-order: big-endian + - + name: mark-mask + type: u32 + byte-order: big-endian + - + name: labels + type: binary + - + name: labels mask + type: binary + - + name: synproxy + type: nest + nested-attributes: synproxy-attrs + - + name: filter + type: nest + nested-attributes: tuple-attrs + - + name: status-mask + type: u32 + byte-order: big-endian + enum: nf-ct-status + enum-as-flags: true + doc: conntrack flag bits to change + - + name: timestamp-event + type: u64 + byte-order: big-endian + - + name: conntrack-stats-attrs + attributes: + - + name: searched + type: u32 + byte-order: big-endian + doc: obsolete + - + name: found + type: u32 + byte-order: big-endian + - + name: new + type: u32 + byte-order: big-endian + doc: obsolete + - + name: invalid + type: u32 + byte-order: big-endian + doc: obsolete + - + name: ignore + type: u32 + byte-order: big-endian + doc: obsolete + - + name: delete + type: u32 + byte-order: big-endian + doc: obsolete + - + name: delete-list + type: u32 + byte-order: big-endian + doc: obsolete + - + name: insert + type: u32 + byte-order: big-endian + - + name: insert-failed + type: u32 + byte-order: big-endian + - + name: drop + type: u32 + byte-order: big-endian + - + name: early-drop + type: u32 + byte-order: big-endian + - + name: error + type: u32 + byte-order: big-endian + - + name: search-restart + type: u32 + byte-order: big-endian + - + name: clash-resolve + type: u32 + byte-order: big-endian + - + name: chain-toolong + type: u32 + byte-order: big-endian + +operations: + enum-model: directional + list: + - + name: get + doc: get / dump entries + attribute-set: conntrack-attrs + fixed-header: nfgenmsg + do: + request: + value: 0x101 + attributes: + - tuple-orig + - tuple-reply + - zone + reply: + value: 0x100 + attributes: + - tuple-orig + - tuple-reply + - status + - protoinfo + - help + - nat-src + - nat-dst + - timeout + - mark + - counter-orig + - counter-reply + - use + - id + - nat-dst + - tuple-master + - seq-adj-orig + - seq-adj-reply + - zone + - secctx + - labels + - synproxy + dump: + request: + value: 0x101 + attributes: + - nfgen-family + - mark + - filter + - status + - zone + reply: + value: 0x100 + attributes: + - tuple-orig + - tuple-reply + - status + - protoinfo + - help + - nat-src + - nat-dst + - timeout + - mark + - counter-orig + - counter-reply + - use + - id + - nat-dst + - tuple-master + - seq-adj-orig + - seq-adj-reply + - zone + - secctx + - labels + - synproxy + - + name: get-stats + doc: dump pcpu conntrack stats + attribute-set: conntrack-stats-attrs + fixed-header: nfgenmsg + dump: + request: + value: 0x104 + reply: + value: 0x104 + attributes: + - searched + - found + - insert + - insert-failed + - drop + - early-drop + - error + - search-restart + - clash-resolve + - chain-toolong diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml index 09fbb4c03fc8d..5d39eb68aefb1 100644 --- a/Documentation/netlink/specs/devlink.yaml +++ b/Documentation/netlink/specs/devlink.yaml @@ -820,6 +820,9 @@ attribute-sets: - name: region-direct type: flag + - + name: info-function-uid + type: string - name: dl-dev-stats @@ -1868,6 +1871,8 @@ operations: - info-version-fixed - info-version-running - info-version-stored + - info-board-serial-number + - info-function-uid dump: reply: *info-get-reply diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml index cbb544bd6c848..6501795595585 100644 --- a/Documentation/netlink/specs/netdev.yaml +++ b/Documentation/netlink/specs/netdev.yaml @@ -70,6 +70,10 @@ definitions: name: tx-checksum doc: L3 checksum HW offload is supported by the driver. + - + name: tx-launch-time-fifo + doc: + Launch time HW offload is supported by the driver. - name: queue-type type: enum @@ -78,6 +82,10 @@ definitions: name: qstats-scope type: flags entries: [ queue ] + - + name: napi-threaded + type: enum + entries: [ disable, enable, busy-poll-enable ] attribute-sets: - @@ -114,6 +122,9 @@ attribute-sets: doc: Bitmask of enabled AF_XDP features. type: u64 enum: xsk-flags + - + name: io-uring-provider-info + attributes: [] - name: page-pool attributes: @@ -171,6 +182,11 @@ attribute-sets: name: dmabuf doc: ID of the dmabuf this page-pool is attached to. type: u32 + - + name: io-uring + doc: io-uring memory provider information. + type: nest + nested-attributes: io-uring-provider-info - name: page-pool-info subset-of: page-pool @@ -268,6 +284,17 @@ attribute-sets: doc: The timeout, in nanoseconds, of how long to suspend irq processing, if event polling finds events type: uint + - + name: threaded + doc: Whether the napi is configured to operate in threaded polling + mode. If this is set to `enable` then the NAPI context operates + in threaded polling mode. If this is set to `busy-poll-enable` + then the NAPI kthread also does busypolling. + type: u32 + enum: napi-threaded + - + name: xsk-info + attributes: [] - name: queue attributes: @@ -286,6 +313,9 @@ attribute-sets: - name: type doc: Queue type as rx, tx. Each queue type defines a separate ID space. + XDP TX queues allocated in the kernel are not linked to NAPIs and + thus not listed. AF_XDP queues will have more information set in + the xsk attribute. type: u32 enum: queue-type - @@ -296,7 +326,16 @@ attribute-sets: name: dmabuf doc: ID of the dmabuf attached to this queue, if any. type: u32 - + - + name: io-uring + doc: io_uring memory provider information. + type: nest + nested-attributes: io-uring-provider-info + - + name: xsk + doc: XSK information for this queue, if any. + type: nest + nested-attributes: xsk-info - name: qstats doc: | @@ -444,6 +483,8 @@ attribute-sets: name: tx-needs-csum doc: | Number of packets that required the device to calculate the checksum. + This counter includes the number of GSO wire packets for which device + calculated the L4 checksum. type: uint - name: tx-hw-gso-packets @@ -572,6 +613,7 @@ operations: - inflight-mem - detach-time - dmabuf + - io-uring dump: reply: *pp-reply config-cond: page-pool @@ -637,6 +679,8 @@ operations: - napi-id - ifindex - dmabuf + - io-uring + - xsk dump: request: attributes: @@ -659,6 +703,7 @@ operations: - defer-hard-irqs - gro-flush-timeout - irq-suspend-timeout + - threaded dump: request: attributes: @@ -711,10 +756,11 @@ operations: - defer-hard-irqs - gro-flush-timeout - irq-suspend-timeout + - threaded kernel-family: - headers: [ "linux/list.h"] - sock-priv: struct list_head + headers: [ "net/netdev_netlink.h"] + sock-priv: struct netdev_nl_sock mcast-groups: list: diff --git a/Documentation/netlink/specs/nl80211.yaml b/Documentation/netlink/specs/nl80211.yaml new file mode 100644 index 0000000000000..1ec49c3562cdc --- /dev/null +++ b/Documentation/netlink/specs/nl80211.yaml @@ -0,0 +1,2000 @@ +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) + +name: nl80211 +protocol: genetlink-legacy + +doc: + Netlink API for 802.11 wireless devices + +definitions: + - + name: commands + type: enum + entries: + - unspec + - get-wiphy + - set-wiphy + - new-wiphy + - del-wiphy + - get-interface + - set-interface + - new-interface + - del-interface + - get-key + - set-key + - new-key + - del-key + - get-beacon + - set-beacon + - new-beacon + - del-beacon + - get-station + - set-station + - new-station + - del-station + - get-mpath + - set-mpath + - new-mpath + - del-mpath + - set-bss + - set-reg + - req-set-reg + - get-mesh-config + - set-mesh-config + - set-mgmt-extra-ie + - get-reg + - get-scan + - trigger-scan + - new-scan-results + - scan-aborted + - reg-change + - authenticate + - associate + - deauthenticate + - disassociate + - michael-mic-failure + - reg-beacon-hint + - join-ibss + - leave-ibss + - testmode + - connect + - roam + - disconnect + - set-wiphy-netns + - get-survey + - new-survey-results + - set-pmksa + - del-pmksa + - flush-pmksa + - remain-on-channel + - cancel-remain-on-channel + - set-tx-bitrate-mask + - register-action + - action + - action-tx-status + - set-power-save + - get-power-save + - set-cqm + - notify-cqm + - set-channel + - set-wds-peer + - frame-wait-cancel + - join-mesh + - leave-mesh + - unprot-deauthenticate + - unprot-disassociate + - new-peer-candidate + - get-wowlan + - set-wowlan + - start-sched-scan + - stop-sched-scan + - sched-scan-results + - sched-scan-stopped + - set-rekey-offload + - pmksa-candidate + - tdls-oper + - tdls-mgmt + - unexpected-frame + - probe-client + - register-beacons + - unexpected-4-addr-frame + - set-noack-map + - ch-switch-notify + - start-p2p-device + - stop-p2p-device + - conn-failed + - set-mcast-rate + - set-mac-acl + - radar-detect + - get-protocol-features + - update-ft-ies + - ft-event + - crit-protocol-start + - crit-protocol-stop + - get-coalesce + - set-coalesce + - channel-switch + - vendor + - set-qos-map + - add-tx-ts + - del-tx-ts + - get-mpp + - join-ocb + - leave-ocb + - ch-switch-started-notify + - tdls-channel-switch + - tdls-cancel-channel-switch + - wiphy-reg-change + - abort-scan + - start-nan + - stop-nan + - add-nan-function + - del-nan-function + - change-nan-config + - nan-match + - set-multicast-to-unicast + - update-connect-params + - set-pmk + - del-pmk + - port-authorized + - reload-regdb + - external-auth + - sta-opmode-changed + - control-port-frame + - get-ftm-responder-stats + - peer-measurement-start + - peer-measurement-result + - peer-measurement-complete + - notify-radar + - update-owe-info + - probe-mesh-link + - set-tid-config + - unprot-beacon + - control-port-frame-tx-status + - set-sar-specs + - obss-color-collision + - color-change-request + - color-change-started + - color-change-aborted + - color-change-completed + - set-fils-aad + - assoc-comeback + - add-link + - remove-link + - add-link-sta + - modify-link-sta + - remove-link-sta + - set-hw-timestamp + - links-removed + - set-tid-to-link-mapping + - + name: feature-flags + type: flags + entries: + - sk-tx-status + - ht-ibss + - inactivity-timer + - cell-base-reg-hints + - p2p-device-needs-channel + - sae + - low-priority-scan + - scan-flush + - ap-scan + - vif-txpower + - need-obss-scan + - p2p-go-ctwin + - p2p-go-oppps + - reserved + - advertise-chan-limits + - full-ap-client-state + - userspace-mpm + - active-monitor + - ap-mode-chan-width-change + - ds-param-set-ie-in-probes + - wfa-tpc-ie-in-probes + - quiet + - tx-power-insertion + - ackto-estimation + - static-smps + - dynamic-smps + - supports-wmm-admission + - mac-on-create + - tdls-channel-switch + - scan-random-mac-addr + - sched-scan-random-mac-addr + - no-random-mac-addr + - + name: ieee80211-mcs-info + type: struct + members: + - + name: rx-mask + type: binary + len: 10 + - + name: rx-highest + type: u16 + byte-order: little-endian + - + name: tx-params + type: u8 + - + name: reserved + type: binary + len: 3 + - + name: ieee80211-vht-mcs-info + type: struct + members: + - + name: rx-mcs-map + type: u16 + byte-order: little-endian + - + name: rx-highest + type: u16 + byte-order: little-endian + - + name: tx-mcs-map + type: u16 + byte-order: little-endian + - + name: tx-highest + type: u16 + byte-order: little-endian + - + name: ieee80211-ht-cap + type: struct + members: + - + name: cap-info + type: u16 + byte-order: little-endian + - + name: ampdu-params-info + type: u8 + - + name: mcs + type: binary + struct: ieee80211-mcs-info + - + name: extended-ht-cap-info + type: u16 + byte-order: little-endian + - + name: tx-bf-cap-info + type: u32 + byte-order: little-endian + - + name: antenna-selection-info + type: u8 + - + name: channel-type + type: enum + entries: + - no-ht + - ht20 + - ht40minus + - ht40plus + - + name: sta-flag-update + type: struct + members: + - + name: mask + type: u32 + - + name: set + type: u32 + - + name: protocol-features + type: flags + entries: + - split-wiphy-dump + +attribute-sets: + - + name: nl80211-attrs + name-prefix: nl80211-attr- + enum-name: nl80211-attrs + attr-max-name: num-nl80211-attr + attributes: + - + name: wiphy + type: u32 + - + name: wiphy-name + type: string + - + name: ifindex + type: u32 + - + name: ifname + type: string + - + name: iftype + type: u32 + - + name: mac + type: binary + display-hint: mac + - + name: key-data + type: binary + - + name: key-idx + type: u8 + - + name: key-cipher + type: u32 + - + name: key-seq + type: binary + - + name: key-default + type: flag + - + name: beacon-interval + type: u32 + - + name: dtim-period + type: u32 + - + name: beacon-head + type: binary + - + name: beacon-tail + type: binary + - + name: sta-aid + type: u16 + - + name: sta-flags + type: binary # TODO: nest + - + name: sta-listen-interval + type: u16 + - + name: sta-supported-rates + type: binary + - + name: sta-vlan + type: u32 + - + name: sta-info + type: binary # TODO: nest + - + name: wiphy-bands + type: nest + nested-attributes: wiphy-bands + - + name: mntr-flags + type: binary # TODO: nest + - + name: mesh-id + type: binary + - + name: sta-plink-action + type: u8 + - + name: mpath-next-hop + type: binary + display-hint: mac + - + name: mpath-info + type: binary # TODO: nest + - + name: bss-cts-prot + type: u8 + - + name: bss-short-preamble + type: u8 + - + name: bss-short-slot-time + type: u8 + - + name: ht-capability + type: binary + - + name: supported-iftypes + type: nest + nested-attributes: supported-iftypes + - + name: reg-alpha2 + type: binary + - + name: reg-rules + type: binary # TODO: nest + - + name: mesh-config + type: binary # TODO: nest + - + name: bss-basic-rates + type: binary + - + name: wiphy-txq-params + type: binary # TODO: nest + - + name: wiphy-freq + type: u32 + - + name: wiphy-channel-type + type: u32 + enum: channel-type + - + name: key-default-mgmt + type: flag + - + name: mgmt-subtype + type: u8 + - + name: ie + type: binary + - + name: max-num-scan-ssids + type: u8 + - + name: scan-frequencies + type: binary # TODO: nest + - + name: scan-ssids + type: binary # TODO: nest + - + name: generation + type: u32 + - + name: bss + type: binary # TODO: nest + - + name: reg-initiator + type: u8 + - + name: reg-type + type: u8 + - + name: supported-commands + type: indexed-array + sub-type: u32 + enum: commands + - + name: frame + type: binary + - + name: ssid + type: binary + - + name: auth-type + type: u32 + - + name: reason-code + type: u16 + - + name: key-type + type: u32 + - + name: max-scan-ie-len + type: u16 + - + name: cipher-suites + type: binary + sub-type: u32 + display-hint: hex + - + name: freq-before + type: binary # TODO: nest + - + name: freq-after + type: binary # TODO: nest + - + name: freq-fixed + type: flag + - + name: wiphy-retry-short + type: u8 + - + name: wiphy-retry-long + type: u8 + - + name: wiphy-frag-threshold + type: u32 + - + name: wiphy-rts-threshold + type: u32 + - + name: timed-out + type: flag + - + name: use-mfp + type: u32 + - + name: sta-flags2 + type: binary + struct: sta-flag-update + - + name: control-port + type: flag + - + name: testdata + type: binary + - + name: privacy + type: flag + - + name: disconnected-by-ap + type: flag + - + name: status-code + type: u16 + - + name: cipher-suites-pairwise + type: binary + - + name: cipher-suite-group + type: u32 + - + name: wpa-versions + type: u32 + - + name: akm-suites + type: binary + - + name: req-ie + type: binary + - + name: resp-ie + type: binary + - + name: prev-bssid + type: binary + - + name: key + type: binary # TODO: nest + - + name: keys + type: binary # TODO: nest + - + name: pid + type: u32 + - + name: 4addr + type: u8 + - + name: survey-info + type: binary # TODO: nest + - + name: pmkid + type: binary + - + name: max-num-pmkids + type: u8 + - + name: duration + type: u32 + - + name: cookie + type: u64 + - + name: wiphy-coverage-class + type: u8 + - + name: tx-rates + type: binary # TODO: nest + - + name: frame-match + type: binary + - + name: ack + type: flag + - + name: ps-state + type: u32 + - + name: cqm + type: binary # TODO: nest + - + name: local-state-change + type: flag + - + name: ap-isolate + type: u8 + - + name: wiphy-tx-power-setting + type: u32 + - + name: wiphy-tx-power-level + type: u32 + - + name: tx-frame-types + type: nest + nested-attributes: iftype-attrs + - + name: rx-frame-types + type: nest + nested-attributes: iftype-attrs + - + name: frame-type + type: u16 + - + name: control-port-ethertype + type: flag + - + name: control-port-no-encrypt + type: flag + - + name: support-ibss-rsn + type: flag + - + name: wiphy-antenna-tx + type: u32 + - + name: wiphy-antenna-rx + type: u32 + - + name: mcast-rate + type: u32 + - + name: offchannel-tx-ok + type: flag + - + name: bss-ht-opmode + type: u16 + - + name: key-default-types + type: binary # TODO: nest + - + name: max-remain-on-channel-duration + type: u32 + - + name: mesh-setup + type: binary # TODO: nest + - + name: wiphy-antenna-avail-tx + type: u32 + - + name: wiphy-antenna-avail-rx + type: u32 + - + name: support-mesh-auth + type: flag + - + name: sta-plink-state + type: u8 + - + name: wowlan-triggers + type: binary # TODO: nest + - + name: wowlan-triggers-supported + type: nest + nested-attributes: wowlan-triggers-attrs + - + name: sched-scan-interval + type: u32 + - + name: interface-combinations + type: indexed-array + sub-type: nest + nested-attributes: if-combination-attributes + - + name: software-iftypes + type: nest + nested-attributes: supported-iftypes + - + name: rekey-data + type: binary # TODO: nest + - + name: max-num-sched-scan-ssids + type: u8 + - + name: max-sched-scan-ie-len + type: u16 + - + name: scan-supp-rates + type: binary # TODO: nest + - + name: hidden-ssid + type: u32 + - + name: ie-probe-resp + type: binary + - + name: ie-assoc-resp + type: binary + - + name: sta-wme + type: binary # TODO: nest + - + name: support-ap-uapsd + type: flag + - + name: roam-support + type: flag + - + name: sched-scan-match + type: binary # TODO: nest + - + name: max-match-sets + type: u8 + - + name: pmksa-candidate + type: binary # TODO: nest + - + name: tx-no-cck-rate + type: flag + - + name: tdls-action + type: u8 + - + name: tdls-dialog-token + type: u8 + - + name: tdls-operation + type: u8 + - + name: tdls-support + type: flag + - + name: tdls-external-setup + type: flag + - + name: device-ap-sme + type: u32 + - + name: dont-wait-for-ack + type: flag + - + name: feature-flags + type: u32 + enum: feature-flags + enum-as-flags: True + - + name: probe-resp-offload + type: u32 + - + name: probe-resp + type: binary + - + name: dfs-region + type: u8 + - + name: disable-ht + type: flag + - + name: ht-capability-mask + type: binary + struct: ieee80211-ht-cap + - + name: noack-map + type: u16 + - + name: inactivity-timeout + type: u16 + - + name: rx-signal-dbm + type: u32 + - + name: bg-scan-period + type: u16 + - + name: wdev + type: u64 + - + name: user-reg-hint-type + type: u32 + - + name: conn-failed-reason + type: u32 + - + name: auth-data + type: binary + - + name: vht-capability + type: binary + - + name: scan-flags + type: u32 + - + name: channel-width + type: u32 + - + name: center-freq1 + type: u32 + - + name: center-freq2 + type: u32 + - + name: p2p-ctwindow + type: u8 + - + name: p2p-oppps + type: u8 + - + name: local-mesh-power-mode + type: u32 + - + name: acl-policy + type: u32 + - + name: mac-addrs + type: binary # TODO: nest + - + name: mac-acl-max + type: u32 + - + name: radar-event + type: u32 + - + name: ext-capa + type: binary + - + name: ext-capa-mask + type: binary + - + name: sta-capability + type: u16 + - + name: sta-ext-capability + type: binary + - + name: protocol-features + type: u32 + enum: protocol-features + - + name: split-wiphy-dump + type: flag + - + name: disable-vht + type: flag + - + name: vht-capability-mask + type: binary + - + name: mdid + type: u16 + - + name: ie-ric + type: binary + - + name: crit-prot-id + type: u16 + - + name: max-crit-prot-duration + type: u16 + - + name: peer-aid + type: u16 + - + name: coalesce-rule + type: binary # TODO: nest + - + name: ch-switch-count + type: u32 + - + name: ch-switch-block-tx + type: flag + - + name: csa-ies + type: binary # TODO: nest + - + name: cntdwn-offs-beacon + type: binary + - + name: cntdwn-offs-presp + type: binary + - + name: rxmgmt-flags + type: binary + - + name: sta-supported-channels + type: binary + - + name: sta-supported-oper-classes + type: binary + - + name: handle-dfs + type: flag + - + name: support-5-mhz + type: flag + - + name: support-10-mhz + type: flag + - + name: opmode-notif + type: u8 + - + name: vendor-id + type: u32 + - + name: vendor-subcmd + type: u32 + - + name: vendor-data + type: binary + - + name: vendor-events + type: binary + - + name: qos-map + type: binary + - + name: mac-hint + type: binary + display-hint: mac + - + name: wiphy-freq-hint + type: u32 + - + name: max-ap-assoc-sta + type: u32 + - + name: tdls-peer-capability + type: u32 + - + name: socket-owner + type: flag + - + name: csa-c-offsets-tx + type: binary + - + name: max-csa-counters + type: u8 + - + name: tdls-initiator + type: flag + - + name: use-rrm + type: flag + - + name: wiphy-dyn-ack + type: flag + - + name: tsid + type: u8 + - + name: user-prio + type: u8 + - + name: admitted-time + type: u16 + - + name: smps-mode + type: u8 + - + name: oper-class + type: u8 + - + name: mac-mask + type: binary + display-hint: mac + - + name: wiphy-self-managed-reg + type: flag + - + name: ext-features + type: binary + - + name: survey-radio-stats + type: binary + - + name: netns-fd + type: u32 + - + name: sched-scan-delay + type: u32 + - + name: reg-indoor + type: flag + - + name: max-num-sched-scan-plans + type: u32 + - + name: max-scan-plan-interval + type: u32 + - + name: max-scan-plan-iterations + type: u32 + - + name: sched-scan-plans + type: binary # TODO: nest + - + name: pbss + type: flag + - + name: bss-select + type: binary # TODO: nest + - + name: sta-support-p2p-ps + type: u8 + - + name: pad + type: binary + - + name: iftype-ext-capa + type: binary # TODO: nest + - + name: mu-mimo-group-data + type: binary + - + name: mu-mimo-follow-mac-addr + type: binary + display-hint: mac + - + name: scan-start-time-tsf + type: u64 + - + name: scan-start-time-tsf-bssid + type: binary + - + name: measurement-duration + type: u16 + - + name: measurement-duration-mandatory + type: flag + - + name: mesh-peer-aid + type: u16 + - + name: nan-master-pref + type: u8 + - + name: bands + type: u32 + - + name: nan-func + type: binary # TODO: nest + - + name: nan-match + type: binary # TODO: nest + - + name: fils-kek + type: binary + - + name: fils-nonces + type: binary + - + name: multicast-to-unicast-enabled + type: flag + - + name: bssid + type: binary + display-hint: mac + - + name: sched-scan-relative-rssi + type: s8 + - + name: sched-scan-rssi-adjust + type: binary + - + name: timeout-reason + type: u32 + - + name: fils-erp-username + type: binary + - + name: fils-erp-realm + type: binary + - + name: fils-erp-next-seq-num + type: u16 + - + name: fils-erp-rrk + type: binary + - + name: fils-cache-id + type: binary + - + name: pmk + type: binary + - + name: sched-scan-multi + type: flag + - + name: sched-scan-max-reqs + type: u32 + - + name: want-1x-4way-hs + type: flag + - + name: pmkr0-name + type: binary + - + name: port-authorized + type: binary + - + name: external-auth-action + type: u32 + - + name: external-auth-support + type: flag + - + name: nss + type: u8 + - + name: ack-signal + type: s32 + - + name: control-port-over-nl80211 + type: flag + - + name: txq-stats + type: nest + nested-attributes: txq-stats-attrs + - + name: txq-limit + type: u32 + - + name: txq-memory-limit + type: u32 + - + name: txq-quantum + type: u32 + - + name: he-capability + type: binary + - + name: ftm-responder + type: binary # TODO: nest + - + name: ftm-responder-stats + type: binary # TODO: nest + - + name: timeout + type: u32 + - + name: peer-measurements + type: binary # TODO: nest + - + name: airtime-weight + type: u16 + - + name: sta-tx-power-setting + type: u8 + - + name: sta-tx-power + type: s16 + - + name: sae-password + type: binary + - + name: twt-responder + type: flag + - + name: he-obss-pd + type: binary # TODO: nest + - + name: wiphy-edmg-channels + type: u8 + - + name: wiphy-edmg-bw-config + type: u8 + - + name: vlan-id + type: u16 + - + name: he-bss-color + type: binary # TODO: nest + - + name: iftype-akm-suites + type: binary # TODO: nest + - + name: tid-config + type: binary # TODO: nest + - + name: control-port-no-preauth + type: flag + - + name: pmk-lifetime + type: u32 + - + name: pmk-reauth-threshold + type: u8 + - + name: receive-multicast + type: flag + - + name: wiphy-freq-offset + type: u32 + - + name: center-freq1-offset + type: u32 + - + name: scan-freq-khz + type: binary # TODO: nest + - + name: he-6ghz-capability + type: binary + - + name: fils-discovery + type: binary # TOOD: nest + - + name: unsol-bcast-probe-resp + type: binary # TOOD: nest + - + name: s1g-capability + type: binary + - + name: s1g-capability-mask + type: binary + - + name: sae-pwe + type: u8 + - + name: reconnect-requested + type: binary + - + name: sar-spec + type: nest + nested-attributes: sar-attributes + - + name: disable-he + type: flag + - + name: obss-color-bitmap + type: u64 + - + name: color-change-count + type: u8 + - + name: color-change-color + type: u8 + - + name: color-change-elems + type: binary # TODO: nest + - + name: mbssid-config + type: binary # TODO: nest + - + name: mbssid-elems + type: binary # TODO: nest + - + name: radar-background + type: flag + - + name: ap-settings-flags + type: u32 + - + name: eht-capability + type: binary + - + name: disable-eht + type: flag + - + name: mlo-links + type: binary # TODO: nest + - + name: mlo-link-id + type: u8 + - + name: mld-addr + type: binary + display-hint: mac + - + name: mlo-support + type: flag + - + name: max-num-akm-suites + type: binary + - + name: eml-capability + type: u16 + - + name: mld-capa-and-ops + type: u16 + - + name: tx-hw-timestamp + type: u64 + - + name: rx-hw-timestamp + type: u64 + - + name: td-bitmap + type: binary + - + name: punct-bitmap + type: u32 + - + name: max-hw-timestamp-peers + type: u16 + - + name: hw-timestamp-enabled + type: flag + - + name: ema-rnr-elems + type: binary # TODO: nest + - + name: mlo-link-disabled + type: flag + - + name: bss-dump-include-use-data + type: flag + - + name: mlo-ttlm-dlink + type: u16 + - + name: mlo-ttlm-ulink + type: u16 + - + name: assoc-spp-amsdu + type: flag + - + name: wiphy-radios + type: binary # TODO: nest + - + name: wiphy-interface-combinations + type: binary # TODO: nest + - + name: vif-radio-mask + type: u32 + - + name: frame-type-attrs + subset-of: nl80211-attrs + attributes: + - + name: frame-type + - + name: wiphy-bands + name-prefix: nl80211-band- + attr-max-name: num-nl80211-bands + attributes: + - + name: 2ghz + doc: 2.4 GHz ISM band + value: 0 + type: nest + nested-attributes: band-attrs + - + name: 5ghz + doc: around 5 GHz band (4.9 - 5.7 GHz) + type: nest + nested-attributes: band-attrs + - + name: 60ghz + doc: around 60 GHz band (58.32 - 69.12 GHz) + type: nest + nested-attributes: band-attrs + - + name: 6ghz + type: nest + nested-attributes: band-attrs + - + name: s1ghz + type: nest + nested-attributes: band-attrs + - + name: lc + type: nest + nested-attributes: band-attrs + - + name: band-attrs + enum-name: nl80211-band-attr + name-prefix: nl80211-band-attr- + attributes: + - + name: freqs + type: indexed-array + sub-type: nest + nested-attributes: frequency-attrs + - + name: rates + type: indexed-array + sub-type: nest + nested-attributes: bitrate-attrs + - + name: ht-mcs-set + type: binary + struct: ieee80211-mcs-info + - + name: ht-capa + type: u16 + - + name: ht-ampdu-factor + type: u8 + - + name: ht-ampdu-density + type: u8 + - + name: vht-mcs-set + type: binary + struct: ieee80211-vht-mcs-info + - + name: vht-capa + type: u32 + - + name: iftype-data + type: indexed-array + sub-type: nest + nested-attributes: iftype-data-attrs + - + name: edmg-channels + type: binary + - + name: edmg-bw-config + type: binary + - + name: s1g-mcs-nss-set + type: binary + - + name: s1g-capa + type: binary + - + name: bitrate-attrs + name-prefix: nl80211-bitrate-attr- + attributes: + - + name: rate + type: u32 + - + name: 2ghz-shortpreamble + type: flag + - + name: frequency-attrs + name-prefix: nl80211-frequency-attr- + attributes: + - + name: freq + type: u32 + - + name: disabled + type: flag + - + name: no-ir + type: flag + - + name: no-ibss + name-prefix: __nl80211-frequency-attr- + type: flag + - + name: radar + type: flag + - + name: max-tx-power + type: u32 + - + name: dfs-state + type: u32 + - + name: dfs-time + type: binary + - + name: no-ht40-minus + type: binary + - + name: no-ht40-plus + type: binary + - + name: no-80mhz + type: binary + - + name: no-160mhz + type: binary + - + name: dfs-cac-time + type: binary + - + name: indoor-only + type: binary + - + name: ir-concurrent + type: binary + - + name: no-20mhz + type: binary + - + name: no-10mhz + type: binary + - + name: wmm + type: indexed-array + sub-type: nest + nested-attributes: wmm-attrs + - + name: no-he + type: binary + - + name: offset + type: u32 + - + name: 1mhz + type: binary + - + name: 2mhz + type: binary + - + name: 4mhz + type: binary + - + name: 8mhz + type: binary + - + name: 16mhz + type: binary + - + name: no-320mhz + type: binary + - + name: no-eht + type: binary + - + name: psd + type: binary + - + name: dfs-concurrent + type: binary + - + name: no-6ghz-vlp-client + type: binary + - + name: no-6ghz-afc-client + type: binary + - + name: can-monitor + type: binary + - + name: allow-6ghz-vlp-ap + type: binary + - + name: if-combination-attributes + enum-name: nl80211-if-combination-attrs + name-prefix: nl80211-iface-comb- + attr-max-name: max-nl80211-iface-comb + attributes: + - + name: limits + type: indexed-array + sub-type: nest + nested-attributes: iface-limit-attributes + - + name: maxnum + type: u32 + - + name: sta-ap-bi-match + type: flag + - + name: num-channels + type: u32 + - + name: radar-detect-widths + type: u32 + - + name: radar-detect-regions + type: u32 + - + name: bi-min-gcd + type: u32 + - + name: iface-limit-attributes + enum-name: nl80211-iface-limit-attrs + name-prefix: nl80211-iface-limit- + attr-max-name: max-nl80211-iface-limit + attributes: + - + name: max + type: u32 + - + name: types + type: nest + nested-attributes: supported-iftypes + - + name: iftype-data-attrs + name-prefix: nl80211-band-iftype-attr- + attributes: + - + name: iftypes + type: binary + - + name: he-cap-mac + type: binary + - + name: he-cap-phy + type: binary + - + name: he-cap-mcs-set + type: binary + - + name: he-cap-ppe + type: binary + - + name: he-6ghz-capa + type: binary + - + name: vendor-elems + type: binary + - + name: eht-cap-mac + type: binary + - + name: eht-cap-phy + type: binary + - + name: eht-cap-mcs-set + type: binary + - + name: eht-cap-ppe + type: binary + - + name: iftype-attrs + enum-name: nl80211-iftype + name-prefix: nl80211-iftype- + attributes: + - + name: unspecified + type: nest + value: 0 + nested-attributes: frame-type-attrs + - + name: adhoc + type: nest + nested-attributes: frame-type-attrs + - + name: station + type: nest + nested-attributes: frame-type-attrs + - + name: ap + type: nest + nested-attributes: frame-type-attrs + - + name: ap-vlan + type: nest + nested-attributes: frame-type-attrs + - + name: wds + type: nest + nested-attributes: frame-type-attrs + - + name: monitor + type: nest + nested-attributes: frame-type-attrs + - + name: mesh-point + type: nest + nested-attributes: frame-type-attrs + - + name: p2p-client + type: nest + nested-attributes: frame-type-attrs + - + name: p2p-go + type: nest + nested-attributes: frame-type-attrs + - + name: p2p-device + type: nest + nested-attributes: frame-type-attrs + - + name: ocb + type: nest + nested-attributes: frame-type-attrs + - + name: nan + type: nest + nested-attributes: frame-type-attrs + - + name: sar-attributes + enum-name: nl80211-sar-attrs + name-prefix: nl80211-sar-attr- + attributes: + - + name: type + type: u32 + - + name: specs + type: indexed-array + sub-type: nest + nested-attributes: sar-specs + - + name: sar-specs + enum-name: nl80211-sar-specs-attrs + name-prefix: nl80211-sar-attr-specs- + attributes: + - + name: power + type: s32 + - + name: range-index + type: u32 + - + name: start-freq + type: u32 + - + name: end-freq + type: u32 + - + name: supported-iftypes + enum-name: nl80211-iftype + name-prefix: nl80211-iftype- + attributes: + - + name: adhoc + type: flag + - + name: station + type: flag + - + name: ap + type: flag + - + name: ap-vlan + type: flag + - + name: wds + type: flag + - + name: monitor + type: flag + - + name: mesh-point + type: flag + - + name: p2p-client + type: flag + - + name: p2p-go + type: flag + - + name: p2p-device + type: flag + - + name: ocb + type: flag + - + name: nan + type: flag + - + name: txq-stats-attrs + name-prefix: nl80211-txq-stats- + attributes: + - + name: backlog-bytes + type: u32 + - + name: backlog-packets + type: u32 + - + name: flows + type: u32 + - + name: drops + type: u32 + - + name: ecn-marks + type: u32 + - + name: overlimit + type: u32 + - + name: overmemory + type: u32 + - + name: collisions + type: u32 + - + name: tx-bytes + type: u32 + - + name: tx-packets + type: u32 + - + name: max-flows + type: u32 + - + name: wmm-attrs + enum-name: nl80211-wmm-rule + name-prefix: nl80211-wmmr- + attributes: + - + name: cw-min + type: u16 + - + name: cw-max + type: u16 + - + name: aifsn + type: u8 + - + name: txop + type: u16 + - + name: wowlan-triggers-attrs + enum-name: nl80211-wowlan-triggers + name-prefix: nl80211-wowlan-trig- + attr-max-name: max-nl80211-wowlan-trig + attributes: + - + name: any + type: flag + - + name: disconnect + type: flag + - + name: magic-pkt + type: flag + - + name: pkt-pattern + type: flag + - + name: gtk-rekey-supported + type: flag + - + name: gtk-rekey-failure + type: flag + - + name: eap-ident-request + type: flag + - + name: 4way-handshake + type: flag + - + name: rfkill-release + type: flag + - + name: wakeup-pkt-80211 + type: flag + - + name: wakeup-pkt-80211-len + type: flag + - + name: wakeup-pkt-8023 + type: flag + - + name: wakeup-pkt-8023-len + type: flag + - + name: tcp-connection + type: flag + - + name: wakeup-tcp-match + type: flag + - + name: wakeup-tcp-connlost + type: flag + - + name: wakeup-tcp-nomoretokens + type: flag + - + name: net-detect + type: flag + - + name: net-detect-results + type: flag + - + name: unprotected-deauth-disassoc + type: flag + +operations: + enum-model: directional + list: + - + name: get-wiphy + doc: | + Get information about a wiphy or dump a list of all wiphys. Requests to dump get-wiphy + should unconditionally include the split-wiphy-dump flag in the request. + attribute-set: nl80211-attrs + do: + request: + value: 1 + attributes: + - wiphy + - wdev + - ifindex + reply: + value: 3 + attributes: &wiphy-reply-attrs + - bands + - cipher-suites + - control-port-ethertype + - ext-capa + - ext-capa-mask + - ext-features + - feature-flags + - generation + - ht-capability-mask + - interface-combinations + - mac + - max-csa-counters + - max-match-sets + - max-num-akm-suites + - max-num-pmkids + - max-num-scan-ssids + - max-num-sched-scan-plans + - max-num-sched-scan-ssids + - max-remain-on-channel-duration + - max-scan-ie-len + - max-scan-plan-interval + - max-scan-plan-iterations + - max-sched-scan-ie-len + - offchannel-tx-ok + - rx-frame-types + - sar-spec + - sched-scan-max-reqs + - software-iftypes + - support-ap-uapsd + - supported-commands + - supported-iftypes + - tdls-external-setup + - tdls-support + - tx-frame-types + - txq-limit + - txq-memory-limit + - txq-quantum + - txq-stats + - vht-capability-mask + - wiphy + - wiphy-antenna-avail-rx + - wiphy-antenna-avail-tx + - wiphy-antenna-rx + - wiphy-antenna-tx + - wiphy-bands + - wiphy-coverage-class + - wiphy-frag-threshold + - wiphy-name + - wiphy-retry-long + - wiphy-retry-short + - wiphy-rts-threshold + - wowlan-triggers-supported + dump: + request: + attributes: + - wiphy + - wdev + - ifindex + - split-wiphy-dump + reply: + attributes: *wiphy-reply-attrs + - + name: get-interface + doc: Get information about an interface or dump a list of all interfaces + attribute-set: nl80211-attrs + do: + request: + value: 5 + attributes: + - ifname + reply: + value: 7 + attributes: &interface-reply-attrs + - ifname + - iftype + - ifindex + - wiphy + - wdev + - mac + - generation + - txq-stats + - 4addr + dump: + request: + attributes: + - ifname + reply: + attributes: *interface-reply-attrs + - + name: get-protocol-features + doc: Get information about supported protocol features + attribute-set: nl80211-attrs + do: + request: + value: 95 + attributes: + - protocol-features + reply: + value: 95 + attributes: + - protocol-features + +mcast-groups: + list: + - + name: config + - + name: scan + - + name: regulatory + - + name: mlme + - + name: vendor + - + name: nan + - + name: testmode diff --git a/Documentation/netlink/specs/ovpn.yaml b/Documentation/netlink/specs/ovpn.yaml new file mode 100644 index 0000000000000..6f131bead88f8 --- /dev/null +++ b/Documentation/netlink/specs/ovpn.yaml @@ -0,0 +1,367 @@ +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) +# +# Author: Antonio Quartulli +# +# Copyright (c) 2024-2025, OpenVPN Inc. +# + +name: ovpn + +protocol: genetlink + +doc: Netlink protocol to control OpenVPN network devices + +definitions: + - + type: const + name: nonce-tail-size + value: 8 + - + type: enum + name: cipher-alg + entries: [ none, aes-gcm, chacha20-poly1305 ] + - + type: enum + name: del-peer-reason + entries: + - teardown + - userspace + - expired + - transport-error + - transport-disconnect + - + type: enum + name: key-slot + entries: [ primary, secondary ] + +attribute-sets: + - + name: peer + attributes: + - + name: id + type: u32 + doc: >- + The unique ID of the peer in the device context. To be used to identify + peers during operations for a specific device + checks: + max: 0xFFFFFF + - + name: remote-ipv4 + type: u32 + doc: The remote IPv4 address of the peer + byte-order: big-endian + display-hint: ipv4 + - + name: remote-ipv6 + type: binary + doc: The remote IPv6 address of the peer + display-hint: ipv6 + checks: + exact-len: 16 + - + name: remote-ipv6-scope-id + type: u32 + doc: The scope id of the remote IPv6 address of the peer (RFC2553) + - + name: remote-port + type: u16 + doc: The remote port of the peer + byte-order: big-endian + checks: + min: 1 + - + name: socket + type: u32 + doc: The socket to be used to communicate with the peer + - + name: socket-netnsid + type: s32 + doc: The ID of the netns the socket assigned to this peer lives in + - + name: vpn-ipv4 + type: u32 + doc: The IPv4 address assigned to the peer by the server + byte-order: big-endian + display-hint: ipv4 + - + name: vpn-ipv6 + type: binary + doc: The IPv6 address assigned to the peer by the server + display-hint: ipv6 + checks: + exact-len: 16 + - + name: local-ipv4 + type: u32 + doc: The local IPv4 to be used to send packets to the peer (UDP only) + byte-order: big-endian + display-hint: ipv4 + - + name: local-ipv6 + type: binary + doc: The local IPv6 to be used to send packets to the peer (UDP only) + display-hint: ipv6 + checks: + exact-len: 16 + - + name: local-port + type: u16 + doc: The local port to be used to send packets to the peer (UDP only) + byte-order: big-endian + checks: + min: 1 + - + name: keepalive-interval + type: u32 + doc: >- + The number of seconds after which a keep alive message is sent to the + peer + - + name: keepalive-timeout + type: u32 + doc: >- + The number of seconds from the last activity after which the peer is + assumed dead + - + name: del-reason + type: u32 + doc: The reason why a peer was deleted + enum: del-peer-reason + - + name: vpn-rx-bytes + type: uint + doc: Number of bytes received over the tunnel + - + name: vpn-tx-bytes + type: uint + doc: Number of bytes transmitted over the tunnel + - + name: vpn-rx-packets + type: uint + doc: Number of packets received over the tunnel + - + name: vpn-tx-packets + type: uint + doc: Number of packets transmitted over the tunnel + - + name: link-rx-bytes + type: uint + doc: Number of bytes received at the transport level + - + name: link-tx-bytes + type: uint + doc: Number of bytes transmitted at the transport level + - + name: link-rx-packets + type: uint + doc: Number of packets received at the transport level + - + name: link-tx-packets + type: uint + doc: Number of packets transmitted at the transport level + - + name: keyconf + attributes: + - + name: peer-id + type: u32 + doc: >- + The unique ID of the peer in the device context. To be used to + identify peers during key operations + checks: + max: 0xFFFFFF + - + name: slot + type: u32 + doc: The slot where the key should be stored + enum: key-slot + - + name: key-id + doc: >- + The unique ID of the key in the peer context. Used to fetch the + correct key upon decryption + type: u32 + checks: + max: 7 + - + name: cipher-alg + type: u32 + doc: The cipher to be used when communicating with the peer + enum: cipher-alg + - + name: encrypt-dir + type: nest + doc: Key material for encrypt direction + nested-attributes: keydir + - + name: decrypt-dir + type: nest + doc: Key material for decrypt direction + nested-attributes: keydir + - + name: keydir + attributes: + - + name: cipher-key + type: binary + doc: The actual key to be used by the cipher + checks: + max-len: 256 + - + name: nonce-tail + type: binary + doc: >- + Random nonce to be concatenated to the packet ID, in order to + obtain the actual cipher IV + checks: + exact-len: nonce-tail-size + - + name: ovpn + attributes: + - + name: ifindex + type: u32 + doc: Index of the ovpn interface to operate on + - + name: peer + type: nest + doc: >- + The peer object containing the attributed of interest for the specific + operation + nested-attributes: peer + - + name: keyconf + type: nest + doc: Peer specific cipher configuration + nested-attributes: keyconf + +operations: + list: + - + name: peer-new + attribute-set: ovpn + flags: [ admin-perm ] + doc: Add a remote peer + do: + pre: ovpn-nl-pre-doit + post: ovpn-nl-post-doit + request: + attributes: + - ifindex + - peer + - + name: peer-set + attribute-set: ovpn + flags: [ admin-perm ] + doc: modify a remote peer + do: + pre: ovpn-nl-pre-doit + post: ovpn-nl-post-doit + request: + attributes: + - ifindex + - peer + - + name: peer-get + attribute-set: ovpn + flags: [ admin-perm ] + doc: Retrieve data about existing remote peers (or a specific one) + do: + pre: ovpn-nl-pre-doit + post: ovpn-nl-post-doit + request: + attributes: + - ifindex + - peer + reply: + attributes: + - peer + dump: + request: + attributes: + - ifindex + reply: + attributes: + - peer + - + name: peer-del + attribute-set: ovpn + flags: [ admin-perm ] + doc: Delete existing remote peer + do: + pre: ovpn-nl-pre-doit + post: ovpn-nl-post-doit + request: + attributes: + - ifindex + - peer + - + name: peer-del-ntf + doc: Notification about a peer being deleted + notify: peer-get + mcgrp: peers + + - + name: key-new + attribute-set: ovpn + flags: [ admin-perm ] + doc: Add a cipher key for a specific peer + do: + pre: ovpn-nl-pre-doit + post: ovpn-nl-post-doit + request: + attributes: + - ifindex + - keyconf + - + name: key-get + attribute-set: ovpn + flags: [ admin-perm ] + doc: Retrieve non-sensitive data about peer key and cipher + do: + pre: ovpn-nl-pre-doit + post: ovpn-nl-post-doit + request: + attributes: + - ifindex + - keyconf + reply: + attributes: + - keyconf + - + name: key-swap + attribute-set: ovpn + flags: [ admin-perm ] + doc: Swap primary and secondary session keys for a specific peer + do: + pre: ovpn-nl-pre-doit + post: ovpn-nl-post-doit + request: + attributes: + - ifindex + - keyconf + - + name: key-swap-ntf + notify: key-get + doc: >- + Notification about key having exhausted its IV space and requiring + renegotiation + mcgrp: peers + - + name: key-del + attribute-set: ovpn + flags: [ admin-perm ] + doc: Delete cipher key for a specific peer + do: + pre: ovpn-nl-pre-doit + post: ovpn-nl-post-doit + request: + attributes: + - ifindex + - keyconf + +mcast-groups: + list: + - + name: peers diff --git a/Documentation/netlink/specs/rt_addr.yaml b/Documentation/netlink/specs/rt_addr.yaml index cbee1cedb177c..5dd5469044c79 100644 --- a/Documentation/netlink/specs/rt_addr.yaml +++ b/Documentation/netlink/specs/rt_addr.yaml @@ -168,6 +168,29 @@ operations: reply: value: 20 attributes: *ifaddr-all + - + name: getmaddrs + doc: Get / dump IPv4/IPv6 multicast addresses. + attribute-set: addr-attrs + fixed-header: ifaddrmsg + do: + request: + value: 58 + attributes: + - ifa-family + - ifa-index + reply: + value: 58 + attributes: &mcaddr-attrs + - ifa-multicast + - ifa-cacheinfo + dump: + request: + value: 58 + - ifa-family + reply: + value: 58 + attributes: *mcaddr-attrs mcast-groups: list: diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml index 0d492500c7e57..a50d9d7d882e7 100644 --- a/Documentation/netlink/specs/rt_link.yaml +++ b/Documentation/netlink/specs/rt_link.yaml @@ -770,6 +770,18 @@ definitions: - name: to type: u32 + - + name: ifla-geneve-port-range + type: struct + members: + - + name: low + type: u16 + byte-order: big-endian + - + name: high + type: u16 + byte-order: big-endian - name: ifla-vf-mac type: struct @@ -926,6 +938,12 @@ definitions: entries: - name: none - name: default + - + name: ovpn-mode + type: enum + entries: + - p2p + - mp attribute-sets: - @@ -1148,6 +1166,9 @@ attribute-sets: name: max-pacing-offload-horizon type: uint doc: EDT offload horizon supported by the device (in nsec). + - + name: netns-immutable + type: u8 - name: af-spec-attrs attributes: @@ -1915,6 +1936,10 @@ attribute-sets: - name: inner-proto-inherit type: flag + - + name: port-range + type: binary + struct: ifla-geneve-port-range - name: linkinfo-iptun-attrs name-prefix: ifla-iptun- @@ -2253,6 +2278,13 @@ attribute-sets: - name: tailroom type: u16 + - + name: linkinfo-ovpn-attrs + attributes: + - + name: mode + type: u8 + enum: ovpn-mode sub-messages: - @@ -2303,6 +2335,9 @@ sub-messages: - value: netkit attribute-set: linkinfo-netkit-attrs + - + value: ovpn + attribute-set: linkinfo-ovpn-attrs - name: linkinfo-member-data-msg formats: diff --git a/Documentation/netlink/specs/rt_rule.yaml b/Documentation/netlink/specs/rt_rule.yaml index a9debac3058a0..de0938d365419 100644 --- a/Documentation/netlink/specs/rt_rule.yaml +++ b/Documentation/netlink/specs/rt_rule.yaml @@ -182,6 +182,18 @@ attribute-sets: type: u32 byte-order: big-endian display-hint: hex + - + name: sport-mask + type: u16 + display-hint: hex + - + name: dport-mask + type: u16 + display-hint: hex + - + name: dscp-mask + type: u8 + display-hint: hex operations: enum-model: directional @@ -215,6 +227,9 @@ operations: - dscp - flowlabel - flowlabel-mask + - sport-mask + - dport-mask + - dscp-mask - name: newrule-ntf doc: Notify a rule creation diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst index 44b9b5cc0e24b..ec53a42499c13 100644 --- a/Documentation/networking/batman-adv.rst +++ b/Documentation/networking/batman-adv.rst @@ -27,7 +27,7 @@ Load the batman-adv module into your kernel:: $ insmod batman-adv.ko The module is now waiting for activation. You must add some interfaces on which -batman-adv can operate. The batman-adv soft-interface can be created using the +batman-adv can operate. The batman-adv mesh-interface can be created using the iproute2 tool ``ip``:: $ ip link add name bat0 type batadv diff --git a/Documentation/networking/device_drivers/cable/index.rst b/Documentation/networking/device_drivers/cable/index.rst deleted file mode 100644 index cce3c4392972b..0000000000000 --- a/Documentation/networking/device_drivers/cable/index.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) - -Cable Modem Device Drivers -========================== - -Contents: - -.. toctree:: - :maxdepth: 2 - - sb1000 - -.. only:: subproject and html - - Indices - ======= - - * :ref:`genindex` diff --git a/Documentation/networking/device_drivers/cable/sb1000.rst b/Documentation/networking/device_drivers/cable/sb1000.rst deleted file mode 100644 index c8582ca4034d5..0000000000000 --- a/Documentation/networking/device_drivers/cable/sb1000.rst +++ /dev/null @@ -1,222 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -=================== -SB100 device driver -=================== - -sb1000 is a module network device driver for the General Instrument (also known -as NextLevel) SURFboard1000 internal cable modem board. This is an ISA card -which is used by a number of cable TV companies to provide cable modem access. -It's a one-way downstream-only cable modem, meaning that your upstream net link -is provided by your regular phone modem. - -This driver was written by Franco Venturi . He deserves -a great deal of thanks for this wonderful piece of code! - -Needed tools -============ - -Support for this device is now a part of the standard Linux kernel. The -driver source code file is drivers/net/sb1000.c. In addition to this -you will need: - -1. The "cmconfig" program. This is a utility which supplements "ifconfig" - to configure the cable modem and network interface (usually called "cm0"); - -2. Several PPP scripts which live in /etc/ppp to make connecting via your - cable modem easy. - - These utilities can be obtained from: - - http://www.jacksonville.net/~fventuri/ - - in Franco's original source code distribution .tar.gz file. Support for - the sb1000 driver can be found at: - - - http://web.archive.org/web/%2E/http://home.adelphia.net/~siglercm/sb1000.html - - http://web.archive.org/web/%2E/http://linuxpower.cx/~cable/ - - along with these utilities. - -3. The standard isapnp tools. These are necessary to configure your SB1000 - card at boot time (or afterwards by hand) since it's a PnP card. - - If you don't have these installed as a standard part of your Linux - distribution, you can find them at: - - http://www.roestock.demon.co.uk/isapnptools/ - - or check your Linux distribution binary CD or their web site. For help with - isapnp, pnpdump, or /etc/isapnp.conf, go to: - - http://www.roestock.demon.co.uk/isapnptools/isapnpfaq.html - -Using the driver -================ - -To make the SB1000 card work, follow these steps: - -1. Run ``make config``, or ``make menuconfig``, or ``make xconfig``, whichever - you prefer, in the top kernel tree directory to set up your kernel - configuration. Make sure to say "Y" to "Prompt for development drivers" - and to say "M" to the sb1000 driver. Also say "Y" or "M" to all the standard - networking questions to get TCP/IP and PPP networking support. - -2. **BEFORE** you build the kernel, edit drivers/net/sb1000.c. Make sure - to redefine the value of READ_DATA_PORT to match the I/O address used - by isapnp to access your PnP cards. This is the value of READPORT in - /etc/isapnp.conf or given by the output of pnpdump. - -3. Build and install the kernel and modules as usual. - -4. Boot your new kernel following the usual procedures. - -5. Set up to configure the new SB1000 PnP card by capturing the output - of "pnpdump" to a file and editing this file to set the correct I/O ports, - IRQ, and DMA settings for all your PnP cards. Make sure none of the settings - conflict with one another. Then test this configuration by running the - "isapnp" command with your new config file as the input. Check for - errors and fix as necessary. (As an aside, I use I/O ports 0x110 and - 0x310 and IRQ 11 for my SB1000 card and these work well for me. YMMV.) - Then save the finished config file as /etc/isapnp.conf for proper - configuration on subsequent reboots. - -6. Download the original file sb1000-1.1.2.tar.gz from Franco's site or one of - the others referenced above. As root, unpack it into a temporary directory - and do a ``make cmconfig`` and then ``install -c cmconfig /usr/local/sbin``. - Don't do ``make install`` because it expects to find all the utilities built - and ready for installation, not just cmconfig. - -7. As root, copy all the files under the ppp/ subdirectory in Franco's - tar file into /etc/ppp, being careful not to overwrite any files that are - already in there. Then modify ppp@gi-on to set the correct login name, - phone number, and frequency for the cable modem. Also edit pap-secrets - to specify your login name and password and any site-specific information - you need. - -8. Be sure to modify /etc/ppp/firewall to use ipchains instead of - the older ipfwadm commands from the 2.0.x kernels. There's a neat utility to - convert ipfwadm commands to ipchains commands: - - http://users.dhp.com/~whisper/ipfwadm2ipchains/ - - You may also wish to modify the firewall script to implement a different - firewalling scheme. - -9. Start the PPP connection via the script /etc/ppp/ppp@gi-on. You must be - root to do this. It's better to use a utility like sudo to execute - frequently used commands like this with root permissions if possible. If you - connect successfully the cable modem interface will come up and you'll see a - driver message like this at the console:: - - cm0: sb1000 at (0x110,0x310), csn 1, S/N 0x2a0d16d8, IRQ 11. - sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net) - - The "ifconfig" command should show two new interfaces, ppp0 and cm0. - - The command "cmconfig cm0" will give you information about the cable modem - interface. - -10. Try pinging a site via ``ping -c 5 www.yahoo.com``, for example. You should - see packets received. - -11. If you can't get site names (like www.yahoo.com) to resolve into - IP addresses (like 204.71.200.67), be sure your /etc/resolv.conf file - has no syntax errors and has the right nameserver IP addresses in it. - If this doesn't help, try something like ``ping -c 5 204.71.200.67`` to - see if the networking is running but the DNS resolution is where the - problem lies. - -12. If you still have problems, go to the support web sites mentioned above - and read the information and documentation there. - -Common problems -=============== - -1. Packets go out on the ppp0 interface but don't come back on the cm0 - interface. It looks like I'm connected but I can't even ping any - numerical IP addresses. (This happens predominantly on Debian systems due - to a default boot-time configuration script.) - -Solution - As root ``echo 0 > /proc/sys/net/ipv4/conf/cm0/rp_filter`` so it - can share the same IP address as the ppp0 interface. Note that this - command should probably be added to the /etc/ppp/cablemodem script - *right*between* the "/sbin/ifconfig" and "/sbin/cmconfig" commands. - You may need to do this to /proc/sys/net/ipv4/conf/ppp0/rp_filter as well. - If you do this to /proc/sys/net/ipv4/conf/default/rp_filter on each reboot - (in rc.local or some such) then any interfaces can share the same IP - addresses. - -2. I get "unresolved symbol" error messages on executing ``insmod sb1000.o``. - -Solution - You probably have a non-matching kernel source tree and - /usr/include/linux and /usr/include/asm header files. Make sure you - install the correct versions of the header files in these two directories. - Then rebuild and reinstall the kernel. - -3. When isapnp runs it reports an error, and my SB1000 card isn't working. - -Solution - There's a problem with later versions of isapnp using the "(CHECK)" - option in the lines that allocate the two I/O addresses for the SB1000 card. - This first popped up on RH 6.0. Delete "(CHECK)" for the SB1000 I/O addresses. - Make sure they don't conflict with any other pieces of hardware first! Then - rerun isapnp and go from there. - -4. I can't execute the /etc/ppp/ppp@gi-on file. - -Solution - As root do ``chmod ug+x /etc/ppp/ppp@gi-on``. - -5. The firewall script isn't working (with 2.2.x and higher kernels). - -Solution - Use the ipfwadm2ipchains script referenced above to convert the - /etc/ppp/firewall script from the deprecated ipfwadm commands to ipchains. - -6. I'm getting *tons* of firewall deny messages in the /var/kern.log, - /var/messages, and/or /var/syslog files, and they're filling up my /var - partition!!! - -Solution - First, tell your ISP that you're receiving DoS (Denial of Service) - and/or portscanning (UDP connection attempts) attacks! Look over the deny - messages to figure out what the attack is and where it's coming from. Next, - edit /etc/ppp/cablemodem and make sure the ",nobroadcast" option is turned on - to the "cmconfig" command (uncomment that line). If you're not receiving these - denied packets on your broadcast interface (IP address xxx.yyy.zzz.255 - typically), then someone is attacking your machine in particular. Be careful - out there.... - -7. Everything seems to work fine but my computer locks up after a while - (and typically during a lengthy download through the cable modem)! - -Solution - You may need to add a short delay in the driver to 'slow down' the - SURFboard because your PC might not be able to keep up with the transfer rate - of the SB1000. To do this, it's probably best to download Franco's - sb1000-1.1.2.tar.gz archive and build and install sb1000.o manually. You'll - want to edit the 'Makefile' and look for the 'SB1000_DELAY' - define. Uncomment those 'CFLAGS' lines (and comment out the default ones) - and try setting the delay to something like 60 microseconds with: - '-DSB1000_DELAY=60'. Then do ``make`` and as root ``make install`` and try - it out. If it still doesn't work or you like playing with the driver, you may - try other numbers. Remember though that the higher the delay, the slower the - driver (which slows down the rest of the PC too when it is actively - used). Thanks to Ed Daiga for this tip! - -Credits -======= - -This README came from Franco Venturi's original README file which is -still supplied with his driver .tar.gz archive. I and all other sb1000 users -owe Franco a tremendous "Thank you!" Additional thanks goes to Carl Patten -and Ralph Bonnell who are now managing the Linux SB1000 web site, and to -the SB1000 users who reported and helped debug the common problems listed -above. - - - Clemmitt Sigler - csigler@vt.edu diff --git a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst index 8bf411b857d43..5f3885e56f582 100644 --- a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst +++ b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst @@ -70,7 +70,7 @@ the DPSW object that it will probe: Besides the configuration of the actual DPSW object, the dpaa2-switch driver will need the following DPAA2 objects: - * 1 DPMCP - A Management Command Portal object is needed for any interraction + * 1 DPMCP - A Management Command Portal object is needed for any interaction with the MC firmware. * 1 DPBP - A Buffer Pool is used for seeding buffers intended for the Rx path diff --git a/Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst b/Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst new file mode 100644 index 0000000000000..e3dfd083fa525 --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst @@ -0,0 +1,137 @@ +.. SPDX-License-Identifier: GPL-2.0 + +===================================================================== +Linux kernel driver for Huawei Ethernet Device Driver (hinic3) family +===================================================================== + +Overview +======== + +The hinic3 is a network interface card (NIC) for Data Center. It supports +a range of link-speed devices (10GE, 25GE, 100GE, etc.). The hinic3 +devices can have multiple physical forms: LOM (Lan on Motherboard) NIC, +PCIe standard NIC, OCP (Open Compute Project) NIC, etc. + +The hinic3 driver supports the following features: +- IPv4/IPv6 TCP/UDP checksum offload +- TSO (TCP Segmentation Offload), LRO (Large Receive Offload) +- RSS (Receive Side Scaling) +- MSI-X interrupt aggregation configuration and interrupt adaptation. +- SR-IOV (Single Root I/O Virtualization). + +Content +======= + +- Supported PCI vendor ID/device IDs +- Source Code Structure of Hinic3 Driver +- Management Interface + +Supported PCI vendor ID/device IDs +================================== + +19e5:0222 - hinic3 PF/PPF +19e5:375F - hinic3 VF + +Prime Physical Function (PPF) is responsible for the management of the +whole NIC card. For example, clock synchronization between the NIC and +the host. Any PF may serve as a PPF. The PPF is selected dynamically. + +Source Code Structure of Hinic3 Driver +====================================== + +======================== ================================================ +hinic3_pci_id_tbl.h Supported device IDs +hinic3_hw_intf.h Interface between HW and driver +hinic3_queue_common.[ch] Common structures and methods for NIC queues +hinic3_common.[ch] Encapsulation of memory operations in Linux +hinic3_csr.h Register definitions in the BAR +hinic3_hwif.[ch] Interface for BAR +hinic3_eqs.[ch] Interface for AEQs and CEQs +hinic3_mbox.[ch] Interface for mailbox +hinic3_mgmt.[ch] Management interface based on mailbox and AEQ +hinic3_wq.[ch] Work queue data structures and interface +hinic3_cmdq.[ch] Command queue is used to post command to HW +hinic3_hwdev.[ch] HW structures and methods abstractions +hinic3_lld.[ch] Auxiliary driver adaptation layer +hinic3_hw_comm.[ch] Interface for common HW operations +hinic3_mgmt_interface.h Interface between firmware and driver +hinic3_hw_cfg.[ch] Interface for HW configuration +hinic3_irq.c Interrupt request +hinic3_netdev_ops.c Operations registered to Linux kernel stack +hinic3_nic_dev.h NIC structures and methods abstractions +hinic3_main.c Main Linux kernel driver +hinic3_nic_cfg.[ch] NIC service configuration +hinic3_nic_io.[ch] Management plane interface for TX and RX +hinic3_rss.[ch] Interface for Receive Side Scaling (RSS) +hinic3_rx.[ch] Interface for transmit +hinic3_tx.[ch] Interface for receive +hinic3_ethtool.c Interface for ethtool operations (ops) +hinic3_filter.c Interface for MAC address +======================== ================================================ + +Management Interface +==================== + +Asynchronous Event Queue (AEQ) +------------------------------ + +AEQ receives high priority events from the HW over a descriptor queue. +Every descriptor is a fixed size of 64 bytes. AEQ can receive solicited or +unsolicited events. Every device, VF or PF, can have up to 4 AEQs. +Every AEQ is associated to a dedicated IRQ. AEQ can receive multiple types +of events, but in practice the hinic3 driver ignores all events except for +2 mailbox related events. + +Mailbox +------- + +Mailbox is a communication mechanism between the hinic3 driver and the HW. +Each device has an independent mailbox. Driver can use the mailbox to send +requests to management. Driver receives mailbox messages, such as responses +to requests, over the AEQ (using event HINIC3_AEQ_FOR_MBOX). Due to the +limited size of mailbox data register, mailbox messages are sent +segment-by-segment. + +Every device can use its mailbox to post request to firmware. The mailbox +can also be used to post requests and responses between the PF and its VFs. + +Completion Event Queue (CEQ) +---------------------------- + +The implementation of CEQ is the same as AEQ. It receives completion events +from HW over a fixed size descriptor of 32 bits. Every device can have up +to 32 CEQs. Every CEQ has a dedicated IRQ. CEQ only receives solicited +events that are responses to requests from the driver. CEQ can receive +multiple types of events, but in practice the hinic3 driver ignores all +events except for HINIC3_CMDQ that represents completion of previously +posted commands on a cmdq. + +Command Queue (cmdq) +-------------------- + +Every cmdq has a dedicated work queue on which commands are posted. +Commands on the work queue are fixed size descriptor of size 64 bytes. +Completion of a command will be indicated using ctrl bits in the +descriptor that carried the command. Notification of command completions +will also be provided via event on CEQ. Every device has 4 command queues +that are initialized as a set (called cmdqs), each with its own type. +Hinic3 driver only uses type HINIC3_CMDQ_SYNC. + +Work Queues(WQ) +--------------- + +Work queues are logical arrays of fixed size WQEs. The array may be spread +over multiple non-contiguous pages using indirection table. Work queues are +used by I/O queues and command queues. + +Global function ID +------------------ + +Every function, PF or VF, has a unique ordinal identification within the device. +Many management commands (mbox or cmdq) contain this ID so HW can apply the +command effect to the right function. + +PF is allowed to post management commands to a subordinate VF by specifying the +VFs ID. A VF must provide its own ID. Anti-spoofing in the HW will cause +command from a VF to fail if it contains the wrong ID. + diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst index 6fc1961492b77..f0dd920be12f4 100644 --- a/Documentation/networking/device_drivers/ethernet/index.rst +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@ -28,6 +28,7 @@ Contents: freescale/gianfar google/gve huawei/hinic + huawei/hinic3 intel/e100 intel/e1000 intel/e1000e diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst index 99d95be4d159f..43d72c8b713b1 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst @@ -1082,6 +1082,11 @@ like flow control, FEC and more. need to replace the cable/transceiver. - Error + * - `total_success_recovery_phy` + - The number of total successful recovery events of any type during + ports reset cycle. + - Error + * - `rx_out_of_buffer` - Number of times receive queue had no software buffers allocated for the adapter's incoming traffic. diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst index 0dd30a84ce25c..a254af25b7efc 100644 --- a/Documentation/networking/device_drivers/index.rst +++ b/Documentation/networking/device_drivers/index.rst @@ -9,7 +9,6 @@ Contents: :maxdepth: 2 atm/index - cable/index can/index cellular/index ethernet/index diff --git a/Documentation/networking/devlink/bnxt.rst b/Documentation/networking/devlink/bnxt.rst index a4fb27663cd6b..9a8b3d76d11f4 100644 --- a/Documentation/networking/devlink/bnxt.rst +++ b/Documentation/networking/devlink/bnxt.rst @@ -24,6 +24,8 @@ Parameters - Permanent * - ``enable_remote_dev_reset`` - Runtime + * - ``enable_roce`` + - Permanent The ``bnxt`` driver also implements the following driver-specific parameters. diff --git a/Documentation/networking/devlink/devlink-info.rst b/Documentation/networking/devlink/devlink-info.rst index 23073bc219d8e..04afceee0c03a 100644 --- a/Documentation/networking/devlink/devlink-info.rst +++ b/Documentation/networking/devlink/devlink-info.rst @@ -50,6 +50,11 @@ versions is generally discouraged - here, and via any other Linux API. This is usually the serial number of the board, often available in PCI *Vital Product Data*. + * - ``function.uid`` + - Function uniqueue identifier. + + Vendor defined uniqueue identifier of a function. + * - ``fixed`` - Group for hardware identifiers, and versions of components which are not field-updatable. diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst index e3972d03cea04..792e9f8c846ab 100644 --- a/Documentation/networking/devlink/ice.rst +++ b/Documentation/networking/devlink/ice.rst @@ -69,6 +69,17 @@ Parameters To verify that value has been set: $ devlink dev param show pci/0000:16:00.0 name tx_scheduling_layers + * - ``msix_vec_per_pf_max`` + - driverinit + - Set the max MSI-X that can be used by the PF, rest can be utilized for + SRIOV. The range is from min value set in msix_vec_per_pf_min to + 2k/number of ports. + * - ``msix_vec_per_pf_min`` + - driverinit + - Set the min MSI-X that will be used by the PF. This value inform how many + MSI-X will be allocated statically. The range is from 2 to value set + in msix_vec_per_pf_max. + .. list-table:: Driver specific parameters implemented :widths: 5 5 90 diff --git a/Documentation/networking/devlink/mlx5.rst b/Documentation/networking/devlink/mlx5.rst index 41618538fc70c..7febe0aecd53f 100644 --- a/Documentation/networking/devlink/mlx5.rst +++ b/Documentation/networking/devlink/mlx5.rst @@ -280,6 +280,10 @@ Description of the vnic counters: number of packets handled by the VNIC experiencing unexpected steering failure (at any point in steering flow owned by the VNIC, including the FDB for the eswitch owner). +- icm_consumption + amount of Interconnect Host Memory (ICM) consumed by the vnic in + granularity of 4KB. ICM is host memory allocated by SW upon HCA request + and is used for storing data structures that control HCA operation. User commands examples: diff --git a/Documentation/networking/devlink/sfc.rst b/Documentation/networking/devlink/sfc.rst index db64a1bd97336..0398d59ea1848 100644 --- a/Documentation/networking/devlink/sfc.rst +++ b/Documentation/networking/devlink/sfc.rst @@ -5,7 +5,7 @@ sfc devlink support =================== This document describes the devlink features implemented by the ``sfc`` -device driver for the ef100 device. +device driver for the ef10 and ef100 devices. Info versions ============= @@ -18,6 +18,10 @@ The ``sfc`` driver reports the following versions * - Name - Type - Description + * - ``fw.bundle_id`` + - stored + - Version of the firmware "bundle" image that was last used to update + multiple components. * - ``fw.mgmt.suc`` - running - For boards where the management function is split between multiple @@ -55,3 +59,13 @@ The ``sfc`` driver reports the following versions * - ``fw.uefi`` - running - UEFI driver version (No UNDI support). + +Flash Update +============ + +The ``sfc`` driver implements support for flash update using the +``devlink-flash`` interface. It supports updating the device flash using a +combined flash image ("bundle") that contains multiple components (on ef10, +typically ``fw.mgmt``, ``fw.app``, ``fw.exprom`` and ``fw.uefi``). + +The driver does not support any overwrite mask flags. diff --git a/Documentation/networking/devmem.rst b/Documentation/networking/devmem.rst index d953636453314..eb678ca454968 100644 --- a/Documentation/networking/devmem.rst +++ b/Documentation/networking/devmem.rst @@ -256,7 +256,7 @@ Testing ======= More realistic example code can be found in the kernel source under -``tools/testing/selftests/net/ncdevmem.c`` +``tools/testing/selftests/drivers/net/hw/ncdevmem.c`` ncdevmem is a devmem TCP netcat. It works very similarly to netcat, but receives data directly into a udmabuf. @@ -268,8 +268,7 @@ ncdevmem has a validation mode as well that expects a repeating pattern of incoming data and validates it as such. For example, you can launch ncdevmem on the server by:: - ncdevmem -s -c -f eth1 -d 3 -n 0000:06:00.0 -l \ - -p 5201 -v 7 + ncdevmem -s -c -f -l -p 5201 -v 7 On client side, use regular netcat to send TX data to ncdevmem process on the server:: diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 3770a22945097..b6e9af4d0f1b9 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -1934,7 +1934,7 @@ ETHTOOL_A_RSS_INDIR attribute returns RSS indirection table where each byte indicates queue number. ETHTOOL_A_RSS_INPUT_XFRM attribute is a bitmap indicating the type of transformation applied to the input protocol fields before given to the RSS -hfunc. Current supported option is symmetric-xor. +hfunc. Current supported options are symmetric-xor and symmetric-or-xor. PLCA_GET_CFG ============ diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index 363b4950d542a..5c63ab928b97d 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -705,6 +705,8 @@ tcp_retries2 - INTEGER seconds and is a lower bound for the effective timeout. TCP will effectively time out at the first RTO which exceeds the hypothetical timeout. + If tcp_rto_max_ms is decreased, it is recommended to also + change tcp_retries2. RFC 1122 recommends at least 100 seconds for the timeout, which corresponds to a value of at least 8. @@ -1227,8 +1229,8 @@ tcp_pingpong_thresh - INTEGER tcp_rto_min_us - INTEGER Minimal TCP retransmission timeout (in microseconds). Note that the rto_min route option has the highest precedence for configuring this - setting, followed by the TCP_BPF_RTO_MIN socket option, followed by - this tcp_rto_min_us sysctl. + setting, followed by the TCP_BPF_RTO_MIN and TCP_RTO_MIN_US socket + options, followed by this tcp_rto_min_us sysctl. The recommended practice is to use a value less or equal to 200000 microseconds. @@ -1237,6 +1239,17 @@ tcp_rto_min_us - INTEGER Default: 200000 +tcp_rto_max_ms - INTEGER + Maximal TCP retransmission timeout (in ms). + Note that TCP_RTO_MAX_MS socket option has higher precedence. + + When changing tcp_rto_max_ms, it is important to understand + that tcp_retries2 might need a change. + + Possible Values: 1000 - 120,000 + + Default: 120,000 + UDP variables ============= diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst index 544bad175aae2..45f02efe3df5f 100644 --- a/Documentation/networking/j1939.rst +++ b/Documentation/networking/j1939.rst @@ -66,6 +66,90 @@ the library exclusively, or by the in-kernel system exclusively. J1939 concepts ============== +Data Sent to the J1939 Stack +---------------------------- + +The data buffers sent to the J1939 stack from user space are not CAN frames +themselves. Instead, they are payloads that the J1939 stack converts into +proper CAN frames based on the size of the buffer and the type of transfer. The +size of the buffer influences how the stack processes the data and determines +the internal code path used for the transfer. + +**Handling of Different Buffer Sizes:** + +- **Buffers with a size of 8 bytes or less:** + + - These are handled as simple sessions internally within the stack. + + - The stack converts the buffer directly into a single CAN frame without + fragmentation. + + - This type of transfer does not require an actual client (receiver) on the + receiving side. + +- **Buffers up to 1785 bytes:** + + - These are automatically handled as J1939 Transport Protocol (TP) transfers. + + - Internally, the stack splits the buffer into multiple 8-byte CAN frames. + + - TP transfers can be unicast or broadcast. + + - **Broadcast TP:** Does not require a receiver on the other side and can be + used in broadcast scenarios. + + - **Unicast TP:** Requires an active receiver (client) on the other side to + acknowledge the transfer. + +- **Buffers from 1786 bytes up to 111 MiB:** + + - These are handled as ISO 11783 Extended Transport Protocol (ETP) transfers. + + - ETP transfers are used for larger payloads and are split into multiple CAN + frames internally. + + - **ETP transfers (unicast):** Require a receiver on the other side to + process the incoming data and acknowledge each step of the transfer. + + - ETP transfers cannot be broadcast like TP transfers, and always require a + receiver for operation. + +**Non-Blocking Operation with `MSG_DONTWAIT`:** + +The J1939 stack supports non-blocking operation when used in combination with +the `MSG_DONTWAIT` flag. In this mode, the stack attempts to take as much data +as the available memory for the socket allows. It returns the amount of data +that was successfully taken, and it is the responsibility of user space to +monitor this value and handle partial transfers. + +- If the stack cannot take the entire buffer, it returns the number of bytes + successfully taken, and user space should handle the remainder. + +- **Error handling:** When using `MSG_DONTWAIT`, the user must rely on the + error queue to detect transfer errors. See the **SO_J1939_ERRQUEUE** section + for details on how to subscribe to error notifications. Without the error + queue, there is no other way for user space to be notified of transfer errors + during non-blocking operations. + +**Behavior and Requirements:** + +- **Simple transfers (<= 8 bytes):** Do not require a receiver on the other + side, making them easy to send without needing address claiming or + coordination with a destination. + +- **Unicast TP/ETP:** Requires a receiver on the other side to complete the + transfer. The receiver must acknowledge the transfer for the session to + proceed successfully. + +- **Broadcast TP:** Allows sending data without a receiver, but only works for + TP transfers. ETP cannot be broadcast and always needs a receiving client. + +These different behaviors depend heavily on the size of the buffer provided to +the stack, and the appropriate transport mechanism (TP or ETP) is selected +based on the payload size. The stack automatically manages the fragmentation +and reassembly of large payloads and ensures that the correct CAN frames are +generated and transmitted for each session. + PGN --- @@ -338,6 +422,459 @@ with ``cmsg_level == SOL_J1939 && cmsg_type == SCM_J1939_DEST_ADDR``, } } +setsockopt(2) +^^^^^^^^^^^^^ + +The ``setsockopt(2)`` function is used to configure various socket-level +options for J1939 communication. The following options are supported: + +``SO_J1939_FILTER`` +~~~~~~~~~~~~~~~~~~~ + +The ``SO_J1939_FILTER`` option is essential when the default behavior of +``bind(2)`` and ``connect(2)`` is insufficient for specific use cases. By +default, ``bind(2)`` and ``connect(2)`` allow a socket to be associated with a +single unicast or broadcast address. However, there are scenarios where finer +control over the incoming messages is required, such as filtering by Parameter +Group Number (PGN) rather than by addresses. + +For example, in a system where multiple types of J1939 messages are being +transmitted, a process might only be interested in a subset of those messages, +such as specific PGNs, and not want to receive all messages destined for its +address or broadcast to the bus. + +By applying the ``SO_J1939_FILTER`` option, you can filter messages based on: + +- **Source Address (SA)**: Filter messages coming from specific source + addresses. + +- **Source Name**: Filter messages coming from ECUs with specific NAME + identifiers. + +- **Parameter Group Number (PGN)**: Focus on receiving messages with specific + PGNs, filtering out irrelevant ones. + +This filtering mechanism is particularly useful when: + +- You want to receive a subset of messages based on their PGNs, even if the + address is the same. + +- You need to handle both broadcast and unicast messages but only care about + certain message types or parameters. + +- The ``bind(2)`` and ``connect(2)`` functions only allow binding to a single + address, which might not be sufficient if the process needs to handle multiple + PGNs but does not want to open multiple sockets. + +To remove existing filters, you can pass ``optval == NULL`` or ``optlen == 0`` +to ``setsockopt(2)``. This will clear all currently set filters. If you want to +**update** the set of filters, you must pass the updated filter set to +``setsockopt(2)``, as the new filter set will **replace** the old one entirely. +This behavior ensures that any previous filter configuration is discarded and +only the new set is applied. + +Example of removing all filters: + +.. code-block:: c + + setsockopt(sock, SOL_CAN_J1939, SO_J1939_FILTER, NULL, 0); + +**Maximum number of filters:** The maximum amount of filters that can be +applied using ``SO_J1939_FILTER`` is defined by ``J1939_FILTER_MAX``, which is +set to 512. This means you can configure up to 512 individual filters to match +your specific filtering needs. + +Practical use case: **Monitoring Address Claiming** + +One practical use case is monitoring the J1939 address claiming process by +filtering for specific PGNs related to address claiming. This allows a process +to monitor and handle address claims without processing unrelated messages. + +Example: + +.. code-block:: c + + struct j1939_filter filt[] = { + { + .pgn = J1939_PGN_ADDRESS_CLAIMED, + .pgn_mask = J1939_PGN_PDU1_MAX, + }, { + .pgn = J1939_PGN_REQUEST, + .pgn_mask = J1939_PGN_PDU1_MAX, + }, { + .pgn = J1939_PGN_ADDRESS_COMMANDED, + .pgn_mask = J1939_PGN_MAX, + }, + }; + setsockopt(sock, SOL_CAN_J1939, SO_J1939_FILTER, &filt, sizeof(filt)); + +In this example, the socket will only receive messages with the PGNs related to +address claiming: ``J1939_PGN_ADDRESS_CLAIMED``, ``J1939_PGN_REQUEST``, and +``J1939_PGN_ADDRESS_COMMANDED``. This is particularly useful in scenarios where +you want to monitor and process address claims without being overwhelmed by +other traffic on the J1939 network. + +``SO_J1939_PROMISC`` +~~~~~~~~~~~~~~~~~~~~ + +The ``SO_J1939_PROMISC`` option enables socket-level promiscuous mode. When +this option is enabled, the socket will receive all J1939 traffic, regardless +of any filters set by ``bind()`` or ``connect()``. This is analogous to +enabling promiscuous mode for an Ethernet interface, where all traffic on the +network segment is captured. + +However, **`SO_J1939_FILTER` has a higher priority** compared to +``SO_J1939_PROMISC``. This means that even in promiscuous mode, you can reduce +the number of packets received by applying specific filters with +`SO_J1939_FILTER`. The filters will limit which packets are passed to the +socket, allowing for more refined traffic selection while promiscuous mode is +active. + +The acceptable value size for this option is ``sizeof(int)``, and the value is +only differentiated between `0` and non-zero. A value of `0` disables +promiscuous mode, while any non-zero value enables it. + +This combination can be useful for debugging or monitoring specific types of +traffic while still capturing a broad set of messages. + +Example: + +.. code-block:: c + + int value = 1; + setsockopt(sock, SOL_CAN_J1939, SO_J1939_PROMISC, &value, sizeof(value)); + +In this example, setting ``value`` to any non-zero value (e.g., `1`) enables +promiscuous mode, allowing the socket to receive all J1939 traffic on the +network. + +``SO_BROADCAST`` +~~~~~~~~~~~~~~~~ + +The ``SO_BROADCAST`` option enables the sending and receiving of broadcast +messages. By default, broadcast messages are disabled for J1939 sockets. When +this option is enabled, the socket will be allowed to send and receive +broadcast packets on the J1939 network. + +Due to the nature of the CAN bus as a shared medium, all messages transmitted +on the bus are visible to all participants. In the context of J1939, +broadcasting refers to using a specific destination address field, where the +destination address is set to a value that indicates the message is intended +for all participants (usually a global address such as 0xFF). Enabling the +broadcast option allows the socket to send and receive such broadcast messages. + +The acceptable value size for this option is ``sizeof(int)``, and the value is +only differentiated between `0` and non-zero. A value of `0` disables the +ability to send and receive broadcast messages, while any non-zero value +enables it. + +Example: + +.. code-block:: c + + int value = 1; + setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &value, sizeof(value)); + +In this example, setting ``value`` to any non-zero value (e.g., `1`) enables +the socket to send and receive broadcast messages. + +``SO_J1939_SEND_PRIO`` +~~~~~~~~~~~~~~~~~~~~~~ + +The ``SO_J1939_SEND_PRIO`` option sets the priority of outgoing J1939 messages +for the socket. In J1939, messages can have different priorities, and lower +numerical values indicate higher priority. This option allows the user to +control the priority of messages sent from the socket by adjusting the priority +bits in the CAN identifier. + +The acceptable value **size** for this option is ``sizeof(int)``, and the value +is expected to be in the range of 0 to 7, where `0` is the highest priority, +and `7` is the lowest. By default, the priority is set to `6` if this option is +not explicitly configured. + +Note that the priority values `0` and `1` can only be set if the process has +the `CAP_NET_ADMIN` capability. These are reserved for high-priority traffic +and require administrative privileges. + +Example: + +.. code-block:: c + + int prio = 3; // Priority value between 0 (highest) and 7 (lowest) + setsockopt(sock, SOL_CAN_J1939, SO_J1939_SEND_PRIO, &prio, sizeof(prio)); + +In this example, the priority is set to `3`, meaning the outgoing messages will +be sent with a moderate priority level. + +``SO_J1939_ERRQUEUE`` +~~~~~~~~~~~~~~~~~~~~~ + +The ``SO_J1939_ERRQUEUE`` option enables the socket to receive error messages +from the error queue, providing diagnostic information about transmission +failures, protocol violations, or other issues that occur during J1939 +communication. Once this option is set, user space is required to handle +``MSG_ERRQUEUE`` messages. + +Setting ``SO_J1939_ERRQUEUE`` to ``0`` will purge any currently present error +messages in the error queue. When enabled, error messages can be retrieved +using the ``recvmsg(2)`` system call. + +When subscribing to the error queue, the following error events can be +accessed: + +- **``J1939_EE_INFO_TX_ABORT``**: Transmission abort errors. +- **``J1939_EE_INFO_RX_RTS``**: Reception of RTS (Request to Send) control + frames. +- **``J1939_EE_INFO_RX_DPO``**: Reception of data packets with Data Page Offset + (DPO). +- **``J1939_EE_INFO_RX_ABORT``**: Reception abort errors. + +The error queue can be used to correlate errors with specific message transfer +sessions using the session ID (``tskey``). The session ID is assigned via the +``SOF_TIMESTAMPING_OPT_ID`` flag, which is set by enabling the +``SO_TIMESTAMPING`` option. + +If ``SO_J1939_ERRQUEUE`` is activated, the user is required to pull messages +from the error queue, meaning that using plain ``recv(2)`` is not sufficient +anymore. The user must use ``recvmsg(2)`` with appropriate flags to handle +error messages. Failure to do so can result in the socket becoming blocked with +unprocessed error messages in the queue. + +It is **recommended** that ``SO_J1939_ERRQUEUE`` be used in combination with +``SO_TIMESTAMPING`` in most cases. This enables proper error handling along +with session tracking and timestamping, providing a more detailed analysis of +message transfers and errors. + +The acceptable value **size** for this option is ``sizeof(int)``, and the value +is only differentiated between ``0`` and non-zero. A value of ``0`` disables +error queue reception and purges any existing error messages, while any +non-zero value enables it. + +Example: + +.. code-block:: c + + int enable = 1; // Enable error queue reception + setsockopt(sock, SOL_CAN_J1939, SO_J1939_ERRQUEUE, &enable, sizeof(enable)); + + // Enable timestamping with session tracking via tskey + int timestamping = SOF_TIMESTAMPING_OPT_ID | SOF_TIMESTAMPING_TX_ACK | + SOF_TIMESTAMPING_TX_SCHED | + SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_OPT_CMSG; + setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, ×tamping, + sizeof(timestamping)); + +When enabled, error messages can be retrieved using ``recvmsg(2)``. By +combining ``SO_J1939_ERRQUEUE`` with ``SO_TIMESTAMPING`` (with +``SOF_TIMESTAMPING_OPT_ID`` and ``SOF_TIMESTAMPING_OPT_CMSG`` enabled), the +user can track message transfers, retrieve precise timestamps, and correlate +errors with specific sessions. + +For more information on enabling timestamps and session tracking, refer to the +`SO_TIMESTAMPING` section. + +``SO_TIMESTAMPING`` +~~~~~~~~~~~~~~~~~~~ + +The ``SO_TIMESTAMPING`` option allows the socket to receive timestamps for +various events related to message transmissions and receptions in J1939. This +option is often used in combination with ``SO_J1939_ERRQUEUE`` to provide +detailed diagnostic information, session tracking, and precise timing data for +message transfers. + +In J1939, all payloads provided by user space, regardless of size, are +processed by the kernel as **sessions**. This includes both single-frame +messages (up to 8 bytes) and multi-frame protocols such as the Transport +Protocol (TP) and Extended Transport Protocol (ETP). Even for small, +single-frame messages, the kernel creates a session to manage the transmission +and reception. The concept of sessions allows the kernel to manage various +aspects of the protocol, such as reassembling multi-frame messages and tracking +the status of transmissions. + +When receiving extended error messages from the error queue, the error +information is delivered through a `struct sock_extended_err`, accessible via +the control message (``cmsg``) retrieved using the ``recvmsg(2)`` system call. + +There are two typical origins for the extended error messages in J1939: + +1. ``serr->ee_origin == SO_EE_ORIGIN_TIMESTAMPING``: + + In this case, the `serr->ee_info` field will contain one of the following + timestamp types: + + - ``SCM_TSTAMP_SCHED``: This timestamp is valid for Extended Transport + Protocol (ETP) transfers and simple transfers (8 bytes or less). It + indicates when a message or set of frames has been scheduled for + transmission. + + - For simple transfers (8 bytes or less), it marks the point when the + message is queued and ready to be sent onto the CAN bus. + + - For ETP transfers, it is sent after receiving a CTS (Clear to Send) + frame on the sender side, indicating that a new set of frames has been + scheduled for transmission. + + - The Transport Protocol (TP) case is currently not implemented for this + timestamp. + + - On the receiver side, the counterpart to this event for ETP is + represented by the ``J1939_EE_INFO_RX_DPO`` message, which indicates the + reception of a Data Page Offset (DPO) control frame. + + - ``SCM_TSTAMP_ACK``: This timestamp indicates the acknowledgment of the + message or session. + + - For simple transfers (8 bytes or less), it marks when the message has + been sent and an echo confirmation has been received from the CAN + controller, indicating that the frame was transmitted onto the bus. + + - For multi-frame transfers (TP or ETP), it signifies that the entire + session has been acknowledged, typically after receiving the End of + Message Acknowledgment (EOMA) packet. + +2. ``serr->ee_origin == SO_EE_ORIGIN_LOCAL``: + + In this case, the `serr->ee_info` field will contain one of the following + J1939 stack-specific message types: + + - ``J1939_EE_INFO_TX_ABORT``: This message indicates that the transmission + of a message or session was aborted. The cause of the abort can come from + various sources: + + - **CAN stack failure**: The J1939 stack was unable to pass the frame to + the CAN framework for transmission. + + - **Echo failure**: The J1939 stack did not receive an echo confirmation + from the CAN controller, meaning the frame may not have been successfully + transmitted to the CAN bus. + + - **Protocol-level issues**: For multi-frame transfers (TP/ETP), this + could include protocol-related errors, such as an abort signaled by the + receiver or a timeout at the protocol level, which causes the session to + terminate prematurely. + + - The corresponding error code is stored in ``serr->ee_data`` + (``session->err`` on kernel side), providing additional details about + the specific reason for the abort. + + - ``J1939_EE_INFO_RX_RTS``: This message indicates that the J1939 stack has + received a Request to Send (RTS) control frame, signaling the start of a + multi-frame transfer using the Transport Protocol (TP) or Extended + Transport Protocol (ETP). + + - It informs the receiver that the sender is ready to transmit a + multi-frame message and includes details about the total message size + and the number of frames to be sent. + + - Statistics such as ``J1939_NLA_TOTAL_SIZE``, ``J1939_NLA_PGN``, + ``J1939_NLA_SRC_NAME``, and ``J1939_NLA_DEST_NAME`` are provided along + with the ``J1939_EE_INFO_RX_RTS`` message, giving detailed information + about the incoming transfer. + + - ``J1939_EE_INFO_RX_DPO``: This message indicates that the J1939 stack has + received a Data Page Offset (DPO) control frame, which is part of the + Extended Transport Protocol (ETP). + + - The DPO frame signals the continuation of an ETP multi-frame message by + indicating the offset position in the data being transferred. It helps + the receiver manage large data sets by identifying which portion of the + message is being received. + + - It is typically paired with a corresponding ``SCM_TSTAMP_SCHED`` event + on the sender side, which indicates when the next set of frames is + scheduled for transmission. + + - This event includes statistics such as ``J1939_NLA_BYTES_ACKED``, which + tracks the number of bytes acknowledged up to that point in the session. + + - ``J1939_EE_INFO_RX_ABORT``: This message indicates that the reception of a + multi-frame message (Transport Protocol or Extended Transport Protocol) has + been aborted. + + - The abort can be triggered by protocol-level errors such as timeouts, an + unexpected frame, or a specific abort request from the sender. + + - This message signals that the receiver cannot continue processing the + transfer, and the session is terminated. + + - The corresponding error code is stored in ``serr->ee_data`` + (``session->err`` on kernel side ), providing further details about the + reason for the abort, such as protocol violations or timeouts. + + - After receiving this message, the receiver discards the partially received + frames, and the multi-frame session is considered incomplete. + +In both cases, if ``SOF_TIMESTAMPING_OPT_ID`` is enabled, ``serr->ee_data`` +will be set to the session’s unique identifier (``session->tskey``). This +allows user space to track message transfers by their session identifier across +multiple frames or stages. + +In all other cases, ``serr->ee_errno`` will be set to ``ENOMSG``, except for +the ``J1939_EE_INFO_TX_ABORT`` and ``J1939_EE_INFO_RX_ABORT`` cases, where the +kernel sets ``serr->ee_data`` to the error stored in ``session->err``. All +protocol-specific errors are converted to standard kernel error values and +stored in ``session->err``. These error values are unified across system calls +and ``serr->ee_errno``. Some of the known error values are described in the +`Error Codes in the J1939 Stack` section. + +When the `J1939_EE_INFO_RX_RTS` message is provided, it will include the +following statistics for multi-frame messages (TP and ETP): + + - ``J1939_NLA_TOTAL_SIZE``: Total size of the message in the session. + - ``J1939_NLA_PGN``: Parameter Group Number (PGN) identifying the message type. + - ``J1939_NLA_SRC_NAME``: 64-bit name of the source ECU. + - ``J1939_NLA_DEST_NAME``: 64-bit name of the destination ECU. + - ``J1939_NLA_SRC_ADDR``: 8-bit source address of the sending ECU. + - ``J1939_NLA_DEST_ADDR``: 8-bit destination address of the receiving ECU. + +- For other messages (including single-frame messages), only the following + statistic is included: + + - ``J1939_NLA_BYTES_ACKED``: Number of bytes successfully acknowledged in the + session. + +The key flags for ``SO_TIMESTAMPING`` include: + +- ``SOF_TIMESTAMPING_OPT_ID``: Enables the use of a unique session identifier + (``tskey``) for each transfer. This identifier helps track message transfers + and errors as distinct sessions in user space. When this option is enabled, + ``serr->ee_data`` will be set to ``session->tskey``. + +- ``SOF_TIMESTAMPING_OPT_CMSG``: Sends timestamp information through control + messages (``struct scm_timestamping``), allowing the application to retrieve + timestamps alongside the data. + +- ``SOF_TIMESTAMPING_TX_SCHED``: Provides the timestamp for when a message is + scheduled for transmission (``SCM_TSTAMP_SCHED``). + +- ``SOF_TIMESTAMPING_TX_ACK``: Provides the timestamp for when a message + transmission is fully acknowledged (``SCM_TSTAMP_ACK``). + +- ``SOF_TIMESTAMPING_RX_SOFTWARE``: Provides timestamps for reception-related + events (e.g., ``J1939_EE_INFO_RX_RTS``, ``J1939_EE_INFO_RX_DPO``, + ``J1939_EE_INFO_RX_ABORT``). + +These flags enable detailed monitoring of message lifecycles, including +transmission scheduling, acknowledgments, reception timestamps, and gathering +detailed statistics about the communication session, especially for multi-frame +payloads like TP and ETP. + +Example: + +.. code-block:: c + + // Enable timestamping with various options, including session tracking and + // statistics + int sock_opt = SOF_TIMESTAMPING_OPT_CMSG | + SOF_TIMESTAMPING_TX_ACK | + SOF_TIMESTAMPING_TX_SCHED | + SOF_TIMESTAMPING_OPT_ID | + SOF_TIMESTAMPING_RX_SOFTWARE; + + setsockopt(sock, SOL_SOCKET, SO_TIMESTAMPING, &sock_opt, sizeof(sock_opt)); + + + Dynamic Addressing ------------------ @@ -458,3 +995,141 @@ Send: }; sendto(sock, dat, sizeof(dat), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); + + +Error Codes in the J1939 Stack +------------------------------ + +This section lists all potential kernel error codes that can be exposed to user +space when interacting with the J1939 stack. It includes both standard error +codes and those derived from protocol-specific abort codes. + +- ``EAGAIN``: Operation would block; retry may succeed. One common reason is + that an active TP or ETP session exists, and an attempt was made to start a + new overlapping TP or ETP session between the same peers. + +- ``ENETDOWN``: Network is down. This occurs when the CAN interface is switched + to the "down" state. + +- ``ENOBUFS``: No buffer space available. This error occurs when the CAN + interface's transmit (TX) queue is full, and no more messages can be queued. + +- ``EOVERFLOW``: Value too large for defined data type. In J1939, this can + happen if the requested data lies outside of the queued buffer. For example, + if a CTS (Clear to Send) requests an offset not available in the kernel buffer + because user space did not provide enough data. + +- ``EBUSY``: Device or resource is busy. For example, this occurs if an + identical session is already active and the stack is unable to recover from + the condition. + +- ``EACCES``: Permission denied. This error can occur, for example, when + attempting to send broadcast messages, but the socket is not configured with + ``SO_BROADCAST``. + +- ``EADDRNOTAVAIL``: Address not available. This error occurs in cases such as: + + - When attempting to use ``getsockname(2)`` to retrieve the peer's address, + but the socket is not connected. + + - When trying to send data to or from a NAME, but address claiming for the + NAME was not performed or detected by the stack. + +- ``EBADFD``: File descriptor in bad state. This error can occur if: + + - Attempting to send data to an unbound socket. + + - The socket is bound but has no source name, and the source address is + ``J1939_NO_ADDR``. + + - The ``can_ifindex`` is incorrect. + +- ``EFAULT``: Bad address. Occurs mostly when the stack can't copy from or to a + sockptr, when there is insufficient data from user space, or when the buffer + provided by user space is not large enough for the requested data. + +- ``EINTR``: A signal occurred before any data was transmitted; see ``signal(7)``. + +- ``EINVAL``: Invalid argument passed. For example: + + - ``msg->msg_namelen`` is less than ``J1939_MIN_NAMELEN``. + + - ``addr->can_family`` is not equal to ``AF_CAN``. + + - An incorrect PGN was provided. + +- ``ENODEV``: No such device. This happens when the CAN network device cannot + be found for the provided ``can_ifindex`` or if ``can_ifindex`` is 0. + +- ``ENOMEM``: Out of memory. Typically related to issues with memory allocation + in the stack. + +- ``ENOPROTOOPT``: Protocol not available. This can occur when using + ``getsockopt(2)`` or ``setsockopt(2)`` if the requested socket option is not + available. + +- ``EDESTADDRREQ``: Destination address required. This error occurs: + + - In the case of ``connect(2)``, if the ``struct sockaddr *uaddr`` is ``NULL``. + + - In the case of ``send*(2)``, if there is an attempt to send an ETP message + to a broadcast address. + +- ``EDOM``: Argument out of domain. This error may happen if attempting to send + a TP or ETP message to a PGN that is reserved for control PGNs for TP or ETP + operations. + +- ``EIO``: I/O error. This can occur if the amount of data provided to the + socket for a TP or ETP session does not match the announced amount of data for + the session. + +- ``ENOENT``: No such file or directory. This can happen when the stack + attempts to transfer CTS or EOMA but cannot find a matching receiving socket + anymore. + +- ``ENOIOCTLCMD``: No ioctls are available for the socket layer. + +- ``EPERM``: Operation not permitted. For example, this can occur if a + requested action requires ``CAP_NET_ADMIN`` privileges. + +- ``ENETUNREACH``: Network unreachable. Most likely, this occurs when frames + cannot be transmitted to the CAN bus. + +- ``ETIME``: Timer expired. This can happen if a timeout occurs while + attempting to send a simple message, for example, when an echo message from + the controller is not received. + +- ``EPROTO``: Protocol error. + + - Used for various protocol-level errors in J1939, including: + + - Duplicate sequence number. + + - Unexpected EDPO or ECTS packet. + + - Invalid PGN or offset in EDPO/ECTS. + + - Number of EDPO packets exceeded CTS allowance. + + - Any other protocol-level error. + +- ``EMSGSIZE``: Message too long. + +- ``ENOMSG``: No message available. + +- ``EALREADY``: The ECU is already engaged in one or more connection-managed + sessions and cannot support another. + +- ``EHOSTUNREACH``: A timeout occurred, and the session was aborted. + +- ``EBADMSG``: CTS (Clear to Send) messages were received during an active data + transfer, causing an abort. + +- ``ENOTRECOVERABLE``: The maximum retransmission request limit was reached, + and the session cannot recover. + +- ``ENOTCONN``: An unexpected data transfer packet was received. + +- ``EILSEQ``: A bad sequence number was received, and the software could not + recover. + diff --git a/Documentation/networking/kcm.rst b/Documentation/networking/kcm.rst index db0f5560ac1c2..71f44d0beaa36 100644 --- a/Documentation/networking/kcm.rst +++ b/Documentation/networking/kcm.rst @@ -200,7 +200,7 @@ while. Example use:: setsockopt(kcmfd, SOL_KCM, KCM_RECV_DISABLE, &val, sizeof(val)) -BFP programs for message delineation +BPF programs for message delineation ------------------------------------ BPF programs can be compiled using the BPF LLVM backend. For example, diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst index 03e1d3610333e..5bfab01eff5a9 100644 --- a/Documentation/networking/mptcp-sysctl.rst +++ b/Documentation/networking/mptcp-sysctl.rst @@ -30,6 +30,10 @@ allow_join_initial_addr_port - BOOLEAN Default: 1 +available_path_managers - STRING + Shows the available path managers choices that are registered. More + path managers may be available, but not loaded. + available_schedulers - STRING Shows the available schedulers choices that are registered. More packet schedulers may be available, but not loaded. @@ -72,6 +76,23 @@ enabled - BOOLEAN Default: 1 (enabled) +path_manager - STRING + Set the default path manager name to use for each new MPTCP + socket. In-kernel path management will control subflow + connections and address advertisements according to + per-namespace values configured over the MPTCP netlink + API. Userspace path management puts per-MPTCP-connection subflow + connection decisions and address advertisements under control of + a privileged userspace program, at the cost of more netlink + traffic to propagate all of the related events and commands. + + This is a per-namespace sysctl. + + * "kernel" - In-kernel path manager + * "userspace" - Userspace path manager + + Default: "kernel" + pm_type - INTEGER Set the default path manager type to use for each new MPTCP socket. In-kernel path management will control subflow @@ -84,6 +105,8 @@ pm_type - INTEGER This is a per-namespace sysctl. + Deprecated since v6.15, use path_manager instead. + * 0 - In-kernel path manager * 1 - Userspace path manager diff --git a/Documentation/networking/napi.rst b/Documentation/networking/napi.rst index f970a2be271a0..0f83142c624d6 100644 --- a/Documentation/networking/napi.rst +++ b/Documentation/networking/napi.rst @@ -171,12 +171,43 @@ a channel as an IRQ/NAPI which services queues of a given type. For example, a configuration of 1 ``rx``, 1 ``tx`` and 1 ``combined`` channel is expected to utilize 3 interrupts, 2 Rx and 2 Tx queues. +Persistent NAPI config +---------------------- + +Drivers often allocate and free NAPI instances dynamically. This leads to loss +of NAPI-related user configuration each time NAPI instances are reallocated. +The netif_napi_add_config() API prevents this loss of configuration by +associating each NAPI instance with a persistent NAPI configuration based on +a driver defined index value, like a queue number. + +Using this API allows for persistent NAPI IDs (among other settings), which can +be beneficial to userspace programs using ``SO_INCOMING_NAPI_ID``. See the +sections below for other NAPI configuration settings. + +Drivers should try to use netif_napi_add_config() whenever possible. + User API ======== User interactions with NAPI depend on NAPI instance ID. The instance IDs are only visible to the user thru the ``SO_INCOMING_NAPI_ID`` socket option. -It's not currently possible to query IDs used by a given device. + +Users can query NAPI IDs for a device or device queue using netlink. This can +be done programmatically in a user application or by using a script included in +the kernel source tree: ``tools/net/ynl/pyynl/cli.py``. + +For example, using the script to dump all of the queues for a device (which +will reveal each queue's NAPI ID): + +.. code-block:: bash + + $ kernel-source/tools/net/ynl/pyynl/cli.py \ + --spec Documentation/netlink/specs/netdev.yaml \ + --dump queue-get \ + --json='{"ifindex": 2}' + +See ``Documentation/netlink/specs/netdev.yaml`` for more details on +available operations and attributes. Software IRQ coalescing ----------------------- @@ -232,7 +263,9 @@ are not well known). Busy polling is enabled by either setting ``SO_BUSY_POLL`` on selected sockets or using the global ``net.core.busy_poll`` and ``net.core.busy_read`` sysctls. An io_uring API for NAPI busy polling -also exists. +also exists. Threaded polling of NAPI also has a mode to busy poll for +packets (:ref:`threaded busy polling`) using the same +thread that is used for NAPI processing. epoll-based busy polling ------------------------ @@ -395,6 +428,69 @@ Therefore, setting ``gro_flush_timeout`` and ``napi_defer_hard_irqs`` is the recommended usage, because otherwise setting ``irq-suspend-timeout`` might not have any discernible effect. +.. _threaded_busy_poll: + +Threaded NAPI busy polling +-------------------------- + +Threaded napi allows processing of packets from each NAPI in a kthread in +kernel. Threaded napi busy polling extends this and adds support to do +continuous busy polling of this napi. This can be used to enable busy polling +independent of userspace application or the API (epoll, io_uring, raw sockets) +being used in userspace to process the packets. + +It can be enabled for each NAPI using netlink interface or at device level using +the threaded NAPI sysctl. + +For example, using following script: + +.. code-block:: bash + + $ kernel-source/tools/net/ynl/pyynl/cli.py \ + --spec Documentation/netlink/specs/netdev.yaml \ + --do napi-set \ + --json='{"id": 66, + "threaded": "busy-poll-enable"}' + + +Enabling it for each NAPI allows finer control to enable busy pollling for +only a set of NIC queues which will get traffic with low latency requirements. + +Depending on application requirement, user might want to set affinity of the +kthread that is busy polling each NAPI. User might also want to set priority +and the scheduler of the thread depending on the latency requirements. + +For a hard low-latency application, user might want to dedicate the full core +for the NAPI polling so the NIC queue descriptors are picked up from the queue +as soon as they appear. For more relaxed low-latency requirement, user might +want to share the core with other threads. + +Once threaded busy polling is enabled for a NAPI, PID of the kthread can be +fetched using netlink interface so the affinity, priority and scheduler +configuration can be done. + +For example, following script can be used to fetch the pid: + +.. code-block:: bash + + $ kernel-source/tools/net/ynl/pyynl/cli.py \ + --spec Documentation/netlink/specs/netdev.yaml \ + --do napi-get \ + --json='{"id": 66}' + +This will output something like following, the pid `258` is the PID of the +kthread that is polling this NAPI. + +.. code-block:: bash + + $ {'defer-hard-irqs': 0, + 'gro-flush-timeout': 0, + 'id': 66, + 'ifindex': 2, + 'irq-suspend-timeout': 0, + 'pid': 258, + 'threaded': 'enable'} + .. _threaded: Threaded NAPI @@ -413,7 +509,18 @@ dependent). The NAPI instance IDs will be assigned in the opposite order than the process IDs of the kernel threads. Threaded NAPI is controlled by writing 0/1 to the ``threaded`` file in -netdev's sysfs directory. +netdev's sysfs directory. It can also be enabled for a specific napi using +netlink interface. + +For example, using the script: + +.. code-block:: bash + + $ kernel-source/tools/net/ynl/pyynl/cli.py \ + --spec Documentation/netlink/specs/netdev.yaml \ + --do napi-set \ + --json='{"id": 66, + "threaded": 1}' .. rubric:: Footnotes diff --git a/Documentation/networking/net_cachelines/inet_connection_sock.rst b/Documentation/networking/net_cachelines/inet_connection_sock.rst index 4a15627fc93b8..8fae85ebb7730 100644 --- a/Documentation/networking/net_cachelines/inet_connection_sock.rst +++ b/Documentation/networking/net_cachelines/inet_connection_sock.rst @@ -12,11 +12,11 @@ struct inet_sock icsk_inet read_mostly r struct request_sock_queue icsk_accept_queue struct inet_bind_bucket icsk_bind_hash read_mostly tcp_set_state struct inet_bind2_bucket icsk_bind2_hash read_mostly tcp_set_state,inet_put_port -unsigned_long icsk_timeout read_mostly inet_csk_reset_xmit_timer,tcp_connect -struct timer_list icsk_retransmit_timer read_mostly inet_csk_reset_xmit_timer,tcp_connect +struct timer_list icsk_retransmit_timer read_write inet_csk_reset_xmit_timer,tcp_connect struct timer_list icsk_delack_timer read_mostly inet_csk_reset_xmit_timer,tcp_connect u32 icsk_rto read_write tcp_cwnd_validate,tcp_schedule_loss_probe,tcp_connect_init,tcp_connect,tcp_write_xmit,tcp_push_one u32 icsk_rto_min +u32 icsk_rto_max read_mostly tcp_reset_xmit_timer u32 icsk_delack_max u32 icsk_pmtu_cookie read_write tcp_sync_mss,tcp_current_mss,tcp_send_syn_data,tcp_connect_init,tcp_connect struct tcp_congestion_ops icsk_ca_ops read_write tcp_cwnd_validate,tcp_tso_segs,tcp_ca_dst_init,tcp_connect_init,tcp_connect,tcp_write_xmit @@ -38,7 +38,6 @@ struct icsk_ack_u8 quick read_write w struct icsk_ack_u8 pingpong struct icsk_ack_u8 retry write_mostly read_write inet_csk_clear_xmit_timer,tcp_rearm_rto,tcp_event_new_data_sent,tcp_write_xmit,__tcp_send_ack,tcp_send_ack, struct icsk_ack_u8 ato read_mostly write_mostly tcp_dec_quickack_mode,tcp_event_ack_sent,__tcp_transmit_skb,__tcp_send_ack,tcp_send_ack -struct icsk_ack_unsigned_long timeout read_write read_write inet_csk_reset_xmit_timer,tcp_connect struct icsk_ack_u32 lrcvtime read_write tcp_finish_connect,tcp_connect,tcp_event_data_sent,__tcp_transmit_skb struct icsk_ack_u16 rcv_mss write_mostly read_mostly __tcp_select_window,__tcp_cleanup_rbuf,tcp_initialize_rcv_mss,tcp_connect_init struct icsk_mtup_int search_high read_write tcp_mtup_init,tcp_sync_mss,tcp_connect_init,tcp_mtu_check_reprobe,tcp_write_xmit diff --git a/Documentation/networking/net_cachelines/net_device.rst b/Documentation/networking/net_cachelines/net_device.rst index 15e31ece675fc..6327e689e8a84 100644 --- a/Documentation/networking/net_cachelines/net_device.rst +++ b/Documentation/networking/net_cachelines/net_device.rst @@ -167,7 +167,7 @@ unsigned:1 wol_enabled unsigned:1 threaded napi_poll(napi_enable,dev_set_threaded) unsigned_long:1 see_all_hwtstamp_requests unsigned_long:1 change_proto_down -unsigned_long:1 netns_local +unsigned_long:1 netns_immutable unsigned_long:1 fcoe_mtu struct list_head net_notifier_list struct macsec_ops* macsec_ops diff --git a/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst b/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst index de0263302f16d..6e7b20afd2d49 100644 --- a/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst +++ b/Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst @@ -86,6 +86,7 @@ u8 sysctl_tcp_sack u8 sysctl_tcp_window_scaling tcp_syn_options,tcp_parse_options u8 sysctl_tcp_timestamps u8 sysctl_tcp_early_retrans read_mostly tcp_schedule_loss_probe(tcp_write_xmit) +u32 sysctl_tcp_rto_max_ms u8 sysctl_tcp_recovery tcp_fastretrans_alert u8 sysctl_tcp_thin_linear_timeouts tcp_retrans_timer(on_thin_streams) u8 sysctl_tcp_slow_start_after_idle unlikely(tcp_cwnd_validate-network-not-starved) diff --git a/Documentation/networking/net_cachelines/snmp.rst b/Documentation/networking/net_cachelines/snmp.rst index 90ca2d92547d4..bc96efc92cf5b 100644 --- a/Documentation/networking/net_cachelines/snmp.rst +++ b/Documentation/networking/net_cachelines/snmp.rst @@ -36,6 +36,7 @@ unsigned_long LINUX_MIB_TIMEWAITRECYCLED unsigned_long LINUX_MIB_TIMEWAITKILLED unsigned_long LINUX_MIB_PAWSACTIVEREJECTED unsigned_long LINUX_MIB_PAWSESTABREJECTED +unsigned_long LINUX_MIB_TSECR_REJECTED unsigned_long LINUX_MIB_DELAYEDACKLOST unsigned_long LINUX_MIB_LISTENOVERFLOWS unsigned_long LINUX_MIB_LISTENDROPS diff --git a/Documentation/networking/net_cachelines/tcp_sock.rst b/Documentation/networking/net_cachelines/tcp_sock.rst index 1f79765072b10..bc9b2131bf7ac 100644 --- a/Documentation/networking/net_cachelines/tcp_sock.rst +++ b/Documentation/networking/net_cachelines/tcp_sock.rst @@ -27,6 +27,7 @@ u32 dsack_dups u32 snd_una read_mostly read_write tcp_wnd_end,tcp_urg_mode,tcp_minshall_check,tcp_cwnd_validate(tx);tcp_ack,tcp_may_update_window,tcp_clean_rtx_queue(write),tcp_ack_tstamp(rx) u32 snd_sml read_write tcp_minshall_check,tcp_minshall_update u32 rcv_tstamp read_mostly tcp_ack +void * tcp_clean_acked read_mostly tcp_ack u32 lsndtime read_write tcp_slow_start_after_idle_check,tcp_event_data_sent u32 last_oow_ack_time u32 compressed_ack_rcv_nxt diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst index 94c4680fdf3e7..a0076b542e9c7 100644 --- a/Documentation/networking/netconsole.rst +++ b/Documentation/networking/netconsole.rst @@ -17,6 +17,8 @@ Release prepend support by Breno Leitao , Jul 7 2023 Userdata append support by Matthew Wood , Jan 22 2024 +Sysdata append support by Breno Leitao , Jan 15 2025 + Please send bug reports to Matt Mackall Satyam Sharma , and Cong Wang @@ -45,7 +47,7 @@ following format:: r if present, prepend kernel version (release) to the message src-port source for UDP packets (defaults to 6665) src-ip source IP to use (interface address) - dev network interface (eth0) + dev network interface name (eth0) or MAC address tgt-port port for logging agent (6666) tgt-ip IP address for logging agent tgt-macaddr ethernet MAC address for logging agent (broadcast) @@ -62,6 +64,10 @@ or using IPv6:: insmod netconsole netconsole=@/,@fd00:1:2:3::1/ +or using a MAC address to select the egress interface:: + + linux netconsole=4444@10.0.0.1/22:33:44:55:66:77,9353@10.0.0.2/12:34:56:78:9a:bc + It also supports logging to multiple remote agents by specifying parameters for the multiple agents separated by semicolons and the complete string enclosed in "quotes", thusly:: @@ -238,6 +244,102 @@ Delete `userdata` entries with `rmdir`:: It is recommended to not write user data values with newlines. +Task name auto population in userdata +------------------------------------- + +Inside the netconsole configfs hierarchy, there is a file called +`taskname_enabled` under the `userdata` directory. This file is used to enable +or disable the automatic task name population feature. This feature +automatically populates the current task name that is scheduled in the CPU +sneding the message. + +To enable task name auto-population:: + + echo 1 > /sys/kernel/config/netconsole/target1/userdata/taskname_enabled + +When this option is enabled, the netconsole messages will include an additional +line in the userdata field with the format `taskname=`. This allows +the receiver of the netconsole messages to easily find which application was +currently scheduled when that message was generated, providing extra context +for kernel messages and helping to categorize them. + +Example:: + + echo "This is a message" > /dev/kmsg + 12,607,22085407756,-;This is a message + taskname=echo + +In this example, the message was generated while "echo" was the current +scheduled process. + +Kernel release auto population in userdata +------------------------------------------ + +Within the netconsole configfs hierarchy, there is a file named `release_enabled` +located in the `userdata` directory. This file controls the kernel release +(version) auto-population feature, which appends the kernel release information +to userdata dictionary in every message sent. + +To enable the release auto-population:: + + echo 1 > /sys/kernel/config/netconsole/target1/userdata/release_enabled + +Example:: + + echo "This is a message" > /dev/kmsg + 12,607,22085407756,-;This is a message + release=6.14.0-rc6-01219-g3c027fbd941d + +.. note:: + + This feature provides the same data as the "release prepend" feature. + However, in this case, the release information is appended to the userdata + dictionary rather than being included in the message header. + + +CPU number auto population in userdata +-------------------------------------- + +Inside the netconsole configfs hierarchy, there is a file called +`cpu_nr` under the `userdata` directory. This file is used to enable or disable +the automatic CPU number population feature. This feature automatically +populates the CPU number that is sending the message. + +To enable the CPU number auto-population:: + + echo 1 > /sys/kernel/config/netconsole/target1/userdata/cpu_nr + +When this option is enabled, the netconsole messages will include an additional +line in the userdata field with the format `cpu=`. This allows the +receiver of the netconsole messages to easily differentiate and demultiplex +messages originating from different CPUs, which is particularly useful when +dealing with parallel log output. + +Example:: + + echo "This is a message" > /dev/kmsg + 12,607,22085407756,-;This is a message + cpu=42 + +In this example, the message was sent by CPU 42. + +.. note:: + + If the user has set a conflicting `cpu` key in the userdata dictionary, + both keys will be reported, with the kernel-populated entry appearing after + the user one. For example:: + + # User-defined CPU entry + mkdir -p /sys/kernel/config/netconsole/target1/userdata/cpu + echo "1" > /sys/kernel/config/netconsole/target1/userdata/cpu/value + + Output might look like:: + + 12,607,22085407756,-;This is a message + cpu=1 + cpu=42 # kernel-populated value + + Extended console: ================= diff --git a/Documentation/networking/netdevices.rst b/Documentation/networking/netdevices.rst index 1d37038e9fbed..ebb868f50ac2b 100644 --- a/Documentation/networking/netdevices.rst +++ b/Documentation/networking/netdevices.rst @@ -210,49 +210,55 @@ packets is preferred. struct net_device synchronization rules ======================================= ndo_open: - Synchronization: rtnl_lock() semaphore. + Synchronization: rtnl_lock() semaphore. In addition, netdev instance + lock if the driver implements queue management or shaper API. Context: process ndo_stop: - Synchronization: rtnl_lock() semaphore. + Synchronization: rtnl_lock() semaphore. In addition, netdev instance + lock if the driver implements queue management or shaper API. Context: process Note: netif_running() is guaranteed false ndo_do_ioctl: Synchronization: rtnl_lock() semaphore. - Context: process - This is only called by network subsystems internally, - not by user space calling ioctl as it was in before - linux-5.14. + This is only called by network subsystems internally, + not by user space calling ioctl as it was in before + linux-5.14. ndo_siocbond: - Synchronization: rtnl_lock() semaphore. + Synchronization: rtnl_lock() semaphore. In addition, netdev instance + lock if the driver implements queue management or shaper API. Context: process - Used by the bonding driver for the SIOCBOND family of - ioctl commands. + Used by the bonding driver for the SIOCBOND family of + ioctl commands. ndo_siocwandev: - Synchronization: rtnl_lock() semaphore. + Synchronization: rtnl_lock() semaphore. In addition, netdev instance + lock if the driver implements queue management or shaper API. Context: process Used by the drivers/net/wan framework to handle the SIOCWANDEV ioctl with the if_settings structure. ndo_siocdevprivate: - Synchronization: rtnl_lock() semaphore. + Synchronization: rtnl_lock() semaphore. In addition, netdev instance + lock if the driver implements queue management or shaper API. Context: process This is used to implement SIOCDEVPRIVATE ioctl helpers. These should not be added to new drivers, so don't use. ndo_eth_ioctl: - Synchronization: rtnl_lock() semaphore. + Synchronization: rtnl_lock() semaphore. In addition, netdev instance + lock if the driver implements queue management or shaper API. Context: process ndo_get_stats: - Synchronization: rtnl_lock() semaphore, or RCU. + Synchronization: RCU (can be called concurrently with the stats + update path). Context: atomic (can't sleep under RCU) ndo_start_xmit: @@ -284,6 +290,16 @@ ndo_set_rx_mode: Synchronization: netif_addr_lock spinlock. Context: BHs disabled +ndo_setup_tc: + ``TC_SETUP_BLOCK`` and ``TC_SETUP_FT`` are running under NFT locks + (i.e. no ``rtnl_lock`` and no device instance lock). The rest of + ``tc_setup_type`` types run under netdev instance lock if the driver + implements queue management or shaper API. + +Most ndo callbacks not specified in the list above are running +under ``rtnl_lock``. In addition, netdev instance lock is taken as well if +the driver implements queue management or shaper API. + struct napi_struct synchronization rules ======================================== napi->poll: @@ -298,6 +314,35 @@ napi->poll: softirq will be called with interrupts disabled by netconsole. +struct netdev_queue_mgmt_ops synchronization rules +================================================== + +All queue management ndo callbacks are holding netdev instance lock. + +RTNL and netdev instance lock +============================= + +Historically, all networking control operations were protected by a single +global lock known as ``rtnl_lock``. There is an ongoing effort to replace this +global lock with separate locks for each network namespace. Additionally, +properties of individual netdev are increasingly protected by per-netdev locks. + +For device drivers that implement shaping or queue management APIs, all control +operations will be performed under the netdev instance lock. Currently, this +instance lock is acquired within the context of ``rtnl_lock``. The drivers +can also explicitly request instance lock to be acquired via +``request_ops_lock``. In the future, there will be an option for individual +drivers to opt out of using ``rtnl_lock`` and instead perform their control +operations directly under the netdev instance lock. + +Devices drivers are encouraged to rely on the instance lock where possible. + +For the (mostly software) drivers that need to interact with the core stack, +there are two sets of interfaces: ``dev_xxx`` and ``netif_xxx`` (e.g., +``dev_set_mtu`` and ``netif_set_mtu``). The ``dev_xxx`` functions handle +acquiring the instance lock themselves, while the ``netif_xxx`` functions +assume that the driver has already acquired the instance lock. + NETDEV_INTERNAL symbol namespace ================================ diff --git a/Documentation/networking/scaling.rst b/Documentation/networking/scaling.rst index 4eb50bcb9d429..99b6a61e5e31c 100644 --- a/Documentation/networking/scaling.rst +++ b/Documentation/networking/scaling.rst @@ -49,14 +49,21 @@ destination address) and TCP/UDP (source port, destination port) tuples are swapped, the computed hash is the same. This is beneficial in some applications that monitor TCP/IP flows (IDS, firewalls, ...etc) and need both directions of the flow to land on the same Rx queue (and CPU). The -"Symmetric-XOR" is a type of RSS algorithms that achieves this hash -symmetry by XORing the input source and destination fields of the IP -and/or L4 protocols. This, however, results in reduced input entropy and -could potentially be exploited. Specifically, the algorithm XORs the input +"Symmetric-XOR" and "Symmetric-OR-XOR" are types of RSS algorithms that +achieve this hash symmetry by XOR/ORing the input source and destination +fields of the IP and/or L4 protocols. This, however, results in reduced +input entropy and could potentially be exploited. + +Specifically, the "Symmetric-XOR" algorithm XORs the input as follows:: # (SRC_IP ^ DST_IP, SRC_IP ^ DST_IP, SRC_PORT ^ DST_PORT, SRC_PORT ^ DST_PORT) +The "Symmetric-OR-XOR" algorithm, on the other hand, transforms the input as +follows:: + + # (SRC_IP | DST_IP, SRC_IP ^ DST_IP, SRC_PORT | DST_PORT, SRC_PORT ^ DST_PORT) + The result is then fed to the underlying RSS algorithm. Some advanced NICs allow steering packets to queues based on @@ -427,8 +434,10 @@ rps_dev_flow_table. The stack consults a CPU to hardware queue map which is maintained by the NIC driver. This is an auto-generated reverse map of the IRQ affinity table shown by /proc/interrupts. Drivers can use functions in the cpu_rmap (“CPU affinity reverse map”) kernel library -to populate the map. For each CPU, the corresponding queue in the map is -set to be one whose processing CPU is closest in cache locality. +to populate the map. Alternatively, drivers can delegate the cpu_rmap +management to the Kernel by calling netif_enable_cpu_rmap(). For each CPU, +the corresponding queue in the map is set to be one whose processing CPU is +closest in cache locality. Accelerated RFS Configuration diff --git a/Documentation/networking/strparser.rst b/Documentation/networking/strparser.rst index 7f623d1db72aa..8dc6bb04c710b 100644 --- a/Documentation/networking/strparser.rst +++ b/Documentation/networking/strparser.rst @@ -180,7 +180,7 @@ There are seven callbacks: struct contains two fields: offset and full_len. Offset is where the message starts in the skb, and full_len is the the length of the message. skb->len - offset may be greater - then full_len since strparser does not trim the skb. + than full_len since strparser does not trim the skb. :: diff --git a/Documentation/networking/switchdev.rst b/Documentation/networking/switchdev.rst index f355f0166f1b4..2966b7122f050 100644 --- a/Documentation/networking/switchdev.rst +++ b/Documentation/networking/switchdev.rst @@ -137,7 +137,7 @@ would be sub-port 0 on port 1 on switch 1. Port Features ^^^^^^^^^^^^^ -dev->netns_local +dev->netns_immutable If the switchdev driver (and device) only supports offloading of the default network namespace (netns), the driver should set this private flag to prevent diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst index 61ef9da10e284..b8fef8101176d 100644 --- a/Documentation/networking/timestamping.rst +++ b/Documentation/networking/timestamping.rst @@ -140,6 +140,14 @@ SOF_TIMESTAMPING_TX_ACK: cumulative acknowledgment. The mechanism ignores SACK and FACK. This flag can be enabled via both socket options and control messages. +SOF_TIMESTAMPING_TX_COMPLETION: + Request tx timestamps on packet tx completion. The completion + timestamp is generated by the kernel when it receives packet a + completion report from the hardware. Hardware may report multiple + packets at once, and completion timestamps reflect the timing of the + report and not actual tx time. This flag can be enabled via both + socket options and control messages. + 1.3.2 Timestamp Reporting ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/Documentation/networking/xfrm_device.rst b/Documentation/networking/xfrm_device.rst index 66f6e9a9b59ae..7f24c09f26943 100644 --- a/Documentation/networking/xfrm_device.rst +++ b/Documentation/networking/xfrm_device.rst @@ -126,7 +126,8 @@ been setup for offload, it first calls into xdo_dev_offload_ok() with the skb and the intended offload state to ask the driver if the offload will serviceable. This can check the packet information to be sure the offload can be supported (e.g. IPv4 or IPv6, no IPv4 options, etc) and -return true of false to signify its support. +return true or false to signify its support. In case driver doesn't implement +this callback, the stack provides reasonable defaults. Crypto offload mode: When ready to send, the driver needs to inspect the Tx packet for the diff --git a/Documentation/networking/xsk-tx-metadata.rst b/Documentation/networking/xsk-tx-metadata.rst index e76b0cfc32f7d..df53a10ccac34 100644 --- a/Documentation/networking/xsk-tx-metadata.rst +++ b/Documentation/networking/xsk-tx-metadata.rst @@ -50,6 +50,10 @@ The flags field enables the particular offload: checksum. ``csum_start`` specifies byte offset of where the checksumming should start and ``csum_offset`` specifies byte offset where the device should store the computed checksum. +- ``XDP_TXMD_FLAGS_LAUNCH_TIME``: requests the device to schedule the + packet for transmission at a pre-determined time called launch time. The + value of launch time is indicated by ``launch_time`` field of + ``union xsk_tx_metadata``. Besides the flags above, in order to trigger the offloads, the first packet's ``struct xdp_desc`` descriptor should set ``XDP_TX_METADATA`` @@ -65,6 +69,63 @@ In this case, when running in ``XDK_COPY`` mode, the TX checksum is calculated on the CPU. Do not enable this option in production because it will negatively affect performance. +Launch Time +=========== + +The value of the requested launch time should be based on the device's PTP +Hardware Clock (PHC) to ensure accuracy. AF_XDP takes a different data path +compared to the ETF queuing discipline, which organizes packets and delays +their transmission. Instead, AF_XDP immediately hands off the packets to +the device driver without rearranging their order or holding them prior to +transmission. Since the driver maintains FIFO behavior and does not perform +packet reordering, a packet with a launch time request will block other +packets in the same Tx Queue until it is sent. Therefore, it is recommended +to allocate separate queue for scheduling traffic that is intended for +future transmission. + +In scenarios where the launch time offload feature is disabled, the device +driver is expected to disregard the launch time request. For correct +interpretation and meaningful operation, the launch time should never be +set to a value larger than the farthest programmable time in the future +(the horizon). Different devices have different hardware limitations on the +launch time offload feature. + +stmmac driver +------------- + +For stmmac, TSO and launch time (TBS) features are mutually exclusive for +each individual Tx Queue. By default, the driver configures Tx Queue 0 to +support TSO and the rest of the Tx Queues to support TBS. The launch time +hardware offload feature can be enabled or disabled by using the tc-etf +command to call the driver's ndo_setup_tc() callback. + +The value of the launch time that is programmed in the Enhanced Normal +Transmit Descriptors is a 32-bit value, where the most significant 8 bits +represent the time in seconds and the remaining 24 bits represent the time +in 256 ns increments. The programmed launch time is compared against the +PTP time (bits[39:8]) and rolls over after 256 seconds. Therefore, the +horizon of the launch time for dwmac4 and dwxlgmac2 is 128 seconds in the +future. + +igc driver +---------- + +For igc, all four Tx Queues support the launch time feature. The launch +time hardware offload feature can be enabled or disabled by using the +tc-etf command to call the driver's ndo_setup_tc() callback. When entering +TSN mode, the igc driver will reset the device and create a default Qbv +schedule with a 1-second cycle time, with all Tx Queues open at all times. + +The value of the launch time that is programmed in the Advanced Transmit +Context Descriptor is a relative offset to the starting time of the Qbv +transmission window of the queue. The Frst flag of the descriptor can be +set to schedule the packet for the next Qbv cycle. Therefore, the horizon +of the launch time for i225 and i226 is the ending time of the next cycle +of the Qbv transmission window of the queue. For example, when the Qbv +cycle time is set to 1 second, the horizon of the launch time ranges +from 1 second to 2 seconds, depending on where the Qbv cycle is currently +running. + Querying Device Capabilities ============================ @@ -74,6 +135,7 @@ Refer to ``xsk-flags`` features bitmask in - ``tx-timestamp``: device supports ``XDP_TXMD_FLAGS_TIMESTAMP`` - ``tx-checksum``: device supports ``XDP_TXMD_FLAGS_CHECKSUM`` +- ``tx-launch-time-fifo``: device supports ``XDP_TXMD_FLAGS_LAUNCH_TIME`` See ``tools/net/ynl/samples/netdev.c`` on how to query this information. diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst index e497729525d5b..1ac62dc3a66f6 100644 --- a/Documentation/process/maintainer-netdev.rst +++ b/Documentation/process/maintainer-netdev.rst @@ -311,6 +311,14 @@ to the mailing list, e.g.:: Posting as one thread is discouraged because it confuses patchwork (as of patchwork 2.2.2). +Co-posting selftests +-------------------- + +Selftests should be part of the same series as the code changes. +Specifically for fixes both code change and related test should go into +the same tree (the tests may lack a Fixes tag, which is expected). +Mixing code changes and test changes in a single commit is discouraged. + Preparing changes ----------------- diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst index 4aa50e5fcb8c0..6d2607870ba44 100644 --- a/Documentation/rust/quick-start.rst +++ b/Documentation/rust/quick-start.rst @@ -145,7 +145,7 @@ Rust standard library source **************************** The Rust standard library source is required because the build system will -cross-compile ``core`` and ``alloc``. +cross-compile ``core``. If ``rustup`` is being used, run:: diff --git a/Documentation/rust/testing.rst b/Documentation/rust/testing.rst index 568b71b415a45..180b886e0f1ee 100644 --- a/Documentation/rust/testing.rst +++ b/Documentation/rust/testing.rst @@ -97,7 +97,7 @@ operator are also supported as usual, e.g.: /// ``` /// # use kernel::{spawn_work_item, workqueue}; - /// spawn_work_item!(workqueue::system(), || pr_info!("x"))?; + /// spawn_work_item!(workqueue::system(), || pr_info!("x\n"))?; /// # Ok::<(), Error>(()) /// ``` diff --git a/Documentation/scheduler/sched-rt-group.rst b/Documentation/scheduler/sched-rt-group.rst index 80b05a3009ea2..ab464335d3204 100644 --- a/Documentation/scheduler/sched-rt-group.rst +++ b/Documentation/scheduler/sched-rt-group.rst @@ -102,6 +102,9 @@ The system wide settings are configured under the /proc virtual file system: * sched_rt_period_us takes values from 1 to INT_MAX. * sched_rt_runtime_us takes values from -1 to sched_rt_period_us. * A run time of -1 specifies runtime == period, ie. no limit. + * sched_rt_runtime_us/sched_rt_period_us > 0.05 inorder to preserve + bandwidth for fair dl_server. For accurate value check average of + runtime/period in /sys/kernel/debug/sched/fair_server/cpuX/ 2.2 Default behaviour diff --git a/Documentation/userspace-api/landlock.rst b/Documentation/userspace-api/landlock.rst index d639c61cb472a..ad587f53fe417 100644 --- a/Documentation/userspace-api/landlock.rst +++ b/Documentation/userspace-api/landlock.rst @@ -8,7 +8,7 @@ Landlock: unprivileged access control ===================================== :Author: Mickaël Salaün -:Date: October 2024 +:Date: January 2025 The goal of Landlock is to enable restriction of ambient rights (e.g. global filesystem or network access) for a set of processes. Because Landlock @@ -329,11 +329,11 @@ non-sandboxed process, we can specify this restriction with A sandboxed process can connect to a non-sandboxed process when its domain is not scoped. If a process's domain is scoped, it can only connect to sockets created by processes in the same scope. -Moreover, If a process is scoped to send signal to a non-scoped process, it can +Moreover, if a process is scoped to send signal to a non-scoped process, it can only send signals to processes in the same scope. A connected datagram socket behaves like a stream socket when its domain is -scoped, meaning if the domain is scoped after the socket is connected , it can +scoped, meaning if the domain is scoped after the socket is connected, it can still :manpage:`send(2)` data just like a stream socket. However, in the same scenario, a non-connected datagram socket cannot send data (with :manpage:`sendto(2)`) outside its scope. diff --git a/MAINTAINERS b/MAINTAINERS index 3864d473f52f2..1e3752956eede 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -124,6 +124,7 @@ F: include/net/ieee80211_radiotap.h F: include/net/iw_handler.h F: include/net/wext.h F: include/uapi/linux/nl80211.h +N: include/uapi/linux/nl80211-.* F: include/uapi/linux/wireless.h F: net/wireless/ @@ -514,7 +515,7 @@ F: drivers/hwmon/adm1029.c ADM8211 WIRELESS DRIVER L: linux-wireless@vger.kernel.org S: Orphan -F: drivers/net/wireless/admtek/adm8211.* +F: drivers/net/wireless/admtek/ ADP1050 HARDWARE MONITOR DRIVER M: Radu Sabau @@ -726,7 +727,7 @@ L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) L: netdev@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml -F: drivers/net/ethernet/mediatek/airoha_eth.c +F: drivers/net/ethernet/airoha/ AIROHA PCIE PHY DRIVER M: Lorenzo Bianconi @@ -1046,14 +1047,14 @@ F: drivers/crypto/ccp/hsti.* AMD DISPLAY CORE M: Harry Wentland M: Leo Li -M: Rodrigo Siqueira +R: Rodrigo Siqueira L: amd-gfx@lists.freedesktop.org S: Supported T: git https://gitlab.freedesktop.org/agd5f/linux.git F: drivers/gpu/drm/amd/display/ AMD DISPLAY CORE - DML -M: Chaitanya Dhere +M: Austin Zheng M: Jun Lei S: Supported F: drivers/gpu/drm/amd/display/dc/dml/ @@ -2210,7 +2211,9 @@ F: sound/soc/codecs/ssm3515.c ARM/APPLE MACHINE SUPPORT M: Sven Peter +M: Janne Grunau R: Alyssa Rosenzweig +R: Neal Gompa L: asahi@lists.linux.dev L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -2235,6 +2238,7 @@ F: Documentation/devicetree/bindings/pci/apple,pcie.yaml F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml F: Documentation/devicetree/bindings/power/apple* F: Documentation/devicetree/bindings/pwm/apple,s5l-fpwm.yaml +F: Documentation/devicetree/bindings/spi/apple,spi.yaml F: Documentation/devicetree/bindings/watchdog/apple,wdt.yaml F: arch/arm64/boot/dts/apple/ F: drivers/bluetooth/hci_bcm4377.c @@ -2252,6 +2256,7 @@ F: drivers/nvmem/apple-efuses.c F: drivers/pinctrl/pinctrl-apple-gpio.c F: drivers/pwm/pwm-apple.c F: drivers/soc/apple/* +F: drivers/spi/spi-apple.c F: drivers/watchdog/apple_wdt.c F: include/dt-bindings/interrupt-controller/apple-aic.h F: include/dt-bindings/pinctrl/apple.h @@ -2284,7 +2289,7 @@ F: drivers/irqchip/irq-aspeed-i2c-ic.c ARM/ASPEED MACHINE SUPPORT M: Joel Stanley -R: Andrew Jeffery +M: Andrew Jeffery L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers) S: Supported @@ -2877,7 +2882,7 @@ F: drivers/pinctrl/nxp/ ARM/NXP S32G/S32R DWMAC ETHERNET DRIVER M: Jan Petrous -L: NXP S32 Linux Team +R: s32@nxp.com S: Maintained F: Documentation/devicetree/bindings/net/nxp,s32-dwmac.yaml F: drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c @@ -5774,6 +5779,7 @@ X: drivers/clk/clkdev.c COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3) M: Steve French +M: Steve French R: Paulo Alcantara (DFS, global name space) R: Ronnie Sahlberg (directory leases, sparse files) R: Shyam Prasad N (multichannel) @@ -5855,7 +5861,6 @@ F: Documentation/security/snp-tdx-threat-model.rst CONFIGFS M: Joel Becker -M: Christoph Hellwig S: Supported T: git git://git.infradead.org/users/hch/configfs.git F: fs/configfs/ @@ -5926,6 +5931,17 @@ F: tools/testing/selftests/cgroup/test_cpuset.c F: tools/testing/selftests/cgroup/test_cpuset_prs.sh F: tools/testing/selftests/cgroup/test_cpuset_v1_base.sh +CONTROL GROUP - DEVICE MEMORY CONTROLLER (DMEM) +M: Maarten Lankhorst +M: Maxime Ripard +M: Natalie Vock +L: cgroups@vger.kernel.org +L: dri-devel@lists.freedesktop.org +S: Maintained +T: git https://gitlab.freedesktop.org/drm/misc/kernel.git +F: include/linux/cgroup_dmem.h +F: kernel/cgroup/dmem.c + CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG) M: Johannes Weiner M: Michal Hocko @@ -6195,7 +6211,7 @@ F: Documentation/process/cve.rst CW1200 WLAN driver S: Orphan -F: drivers/net/wireless/st/cw1200/ +F: drivers/net/wireless/st/ F: include/linux/platform_data/net-cw1200.h CX18 VIDEO4LINUX DRIVER @@ -6878,7 +6894,6 @@ F: kernel/dma/map_benchmark.c F: tools/testing/selftests/dma/ DMA MAPPING HELPERS -M: Christoph Hellwig M: Marek Szyprowski R: Robin Murphy L: iommu@lists.linux.dev @@ -7425,7 +7440,6 @@ F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS -M: Karol Herbst M: Lyude Paul M: Danilo Krummrich L: dri-devel@lists.freedesktop.org @@ -8579,12 +8593,14 @@ F: Documentation/networking/devlink/etas_es58x.rst F: drivers/net/can/usb/etas_es58x/ ETHERNET BRIDGE -M: Roopa Prabhu M: Nikolay Aleksandrov +M: Ido Schimmel L: bridge@lists.linux.dev L: netdev@vger.kernel.org S: Maintained W: http://www.linuxfoundation.org/en/Net:Bridge +F: include/linux/if_bridge.h +F: include/uapi/linux/if_bridge.h F: include/linux/netfilter_bridge/ F: net/bridge/ @@ -8633,7 +8649,6 @@ F: rust/kernel/net/phy/reg.rs EXEC & BINFMT API, ELF M: Kees Cook -R: Eric Biederman L: linux-mm@kvack.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve @@ -9433,14 +9448,11 @@ F: include/linux/fscrypt.h F: include/uapi/linux/fscrypt.h FSI SUBSYSTEM -M: Jeremy Kerr -M: Joel Stanley -R: Alistar Popple -R: Eddie James +M: Eddie James +R: Ninad Palsule L: linux-fsi@lists.ozlabs.org S: Supported Q: http://patchwork.ozlabs.org/project/linux-fsi/list/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/fsi.git F: drivers/fsi/ F: include/linux/fsi*.h F: include/trace/events/fsi*.h @@ -9821,7 +9833,6 @@ S: Maintained F: drivers/media/usb/go7007/ GOODIX TOUCHSCREEN -M: Bastien Nocera M: Hans de Goede L: linux-input@vger.kernel.org S: Maintained @@ -10689,6 +10700,13 @@ S: Maintained F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst F: drivers/net/ethernet/huawei/hinic/ +HUAWEI 3RD GEN ETHERNET DRIVER +M: Fan Gong +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/networking/device_drivers/ethernet/huawei/hinic3.rst +F: drivers/net/ethernet/huawei/hinic3/ + HUGETLB SUBSYSTEM M: Muchun Song L: linux-mm@kvack.org @@ -11133,7 +11151,7 @@ S: Maintained F: drivers/i2c/busses/i2c-icy.c IDEAPAD LAPTOP EXTRAS DRIVER -M: Ike Panhc +M: Ike Panhc L: platform-driver-x86@vger.kernel.org S: Maintained W: http://launchpad.net/ideapad-laptop @@ -11864,6 +11882,7 @@ L: platform-driver-x86@vger.kernel.org S: Maintained F: Documentation/ABI/testing/sysfs-platform-intel-pmc F: drivers/platform/x86/intel/pmc/ +F: include/linux/platform_data/x86/intel_pmc_ipc.h INTEL PMIC GPIO DRIVERS M: Andy Shevchenko @@ -12646,7 +12665,9 @@ F: tools/testing/selftests/ KERNEL SMB3 SERVER (KSMBD) M: Namjae Jeon +M: Namjae Jeon M: Steve French +M: Steve French R: Sergey Senozhatsky R: Tom Talpey L: linux-cifs@vger.kernel.org @@ -12815,9 +12836,7 @@ F: fs/kernfs/ F: include/linux/kernfs.h KEXEC -M: Eric Biederman L: kexec@lists.infradead.org -S: Maintained W: http://kernel.org/pub/linux/utils/kernel/kexec/ F: include/linux/kexec.h F: include/uapi/linux/kexec.h @@ -12863,7 +12882,7 @@ F: include/keys/trusted_dcp.h F: security/keys/trusted-keys/trusted_dcp.c KEYS-TRUSTED-TEE -M: Sumit Garg +M: Sumit Garg L: linux-integrity@vger.kernel.org L: keyrings@vger.kernel.org S: Supported @@ -13743,12 +13762,10 @@ F: drivers/hwmon/ltc4282.c LTC4286 HARDWARE MONITOR DRIVER M: Delphine CC Chiu -L: linux-i2c@vger.kernel.org +L: linux-hwmon@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/hwmon/lltc,ltc4286.yaml F: Documentation/hwmon/ltc4286.rst -F: drivers/hwmon/pmbus/Kconfig -F: drivers/hwmon/pmbus/Makefile F: drivers/hwmon/pmbus/ltc4286.c LTC4306 I2C MULTIPLEXER DRIVER @@ -13896,6 +13913,7 @@ L: netdev@vger.kernel.org S: Maintained F: Documentation/networking/mctp.rst F: drivers/net/mctp/ +F: include/linux/usb/mctp-usb.h F: include/net/mctp.h F: include/net/mctpdevice.h F: include/net/netns/mctp.h @@ -13985,6 +14003,7 @@ MARVELL LIBERTAS WIRELESS DRIVER L: libertas-dev@lists.infradead.org S: Orphan F: drivers/net/wireless/marvell/libertas/ +F: drivers/net/wireless/marvell/libertas_tf/ MARVELL MACCHIATOBIN SUPPORT M: Russell King @@ -15654,7 +15673,7 @@ M: Ajay Singh M: Claudiu Beznea L: linux-wireless@vger.kernel.org S: Supported -F: drivers/net/wireless/microchip/wilc1000/ +F: drivers/net/wireless/microchip/ MICROSEMI MIPS SOCS M: Alexandre Belloni @@ -15682,7 +15701,7 @@ F: include/uapi/linux/cciss*.h MICROSOFT MANA RDMA DRIVER M: Long Li -M: Ajay Sharma +M: Konstantin Taranov L: linux-rdma@vger.kernel.org S: Supported F: drivers/infiniband/hw/mana/ @@ -16440,6 +16459,23 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git F: Documentation/devicetree/bindings/net/wireless/ F: drivers/net/wireless/ +X: drivers/net/wireless/ath/ +X: drivers/net/wireless/broadcom/ +X: drivers/net/wireless/intel/ +X: drivers/net/wireless/intersil/ +X: drivers/net/wireless/marvell/ +X: drivers/net/wireless/mediatek/mt76/ +X: drivers/net/wireless/mediatek/mt7601u/ +X: drivers/net/wireless/microchip/ +X: drivers/net/wireless/purelifi/ +X: drivers/net/wireless/quantenna/ +X: drivers/net/wireless/ralink/ +X: drivers/net/wireless/realtek/ +X: drivers/net/wireless/rsi/ +X: drivers/net/wireless/silabs/ +X: drivers/net/wireless/st/ +X: drivers/net/wireless/ti/ +X: drivers/net/wireless/zydas/ NETWORKING [DSA] M: Andrew Lunn @@ -16635,6 +16671,17 @@ F: net/mptcp/ F: tools/testing/selftests/bpf/*/*mptcp*.[ch] F: tools/testing/selftests/net/mptcp/ +NETWORKING [SRv6] +M: Andrea Mayer +L: netdev@vger.kernel.org +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git +F: include/linux/seg6* +F: include/net/seg6* +F: include/uapi/linux/seg6* +F: net/ipv6/seg6* +F: tools/testing/selftests/net/srv6* + NETWORKING [TCP] M: Eric Dumazet M: Neal Cardwell @@ -17663,7 +17710,7 @@ F: Documentation/ABI/testing/sysfs-bus-optee-devices F: drivers/tee/optee/ OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER -M: Sumit Garg +M: Sumit Garg L: op-tee@lists.trustedfirmware.org S: Maintained F: drivers/char/hw_random/optee-rng.c @@ -17750,6 +17797,17 @@ F: arch/openrisc/ F: drivers/irqchip/irq-ompic.c F: drivers/irqchip/irq-or1k-* +OPENVPN DATA CHANNEL OFFLOAD +M: Antonio Quartulli +L: openvpn-devel@lists.sourceforge.net (subscribers-only) +L: netdev@vger.kernel.org +S: Supported +T: git https://github.com/OpenVPN/linux-kernel-ovpn.git +F: Documentation/netlink/specs/ovpn.yaml +F: drivers/net/ovpn/ +F: include/uapi/linux/ovpn.h +F: tools/testing/selftests/net/ovpn/ + OPENVSWITCH M: Pravin B Shelar L: netdev@vger.kernel.org @@ -17824,7 +17882,7 @@ M: Christian Lamparter L: linux-wireless@vger.kernel.org S: Maintained W: https://wireless.wiki.kernel.org/en/users/Drivers/p54 -F: drivers/net/wireless/intersil/p54/ +F: drivers/net/wireless/intersil/ PACKET SOCKETS M: Willem de Bruijn @@ -19101,7 +19159,7 @@ PURELIFI PLFXLC DRIVER M: Srinivasan Raju L: linux-wireless@vger.kernel.org S: Supported -F: drivers/net/wireless/purelifi/plfxlc/ +F: drivers/net/wireless/purelifi/ PVRUSB2 VIDEO4LINUX DRIVER M: Mike Isely @@ -19652,12 +19710,11 @@ M: Igor Mitsyanko R: Sergey Matyukevich L: linux-wireless@vger.kernel.org S: Maintained -F: drivers/net/wireless/quantenna +F: drivers/net/wireless/quantenna/ RADEON and AMDGPU DRM DRIVERS M: Alex Deucher M: Christian König -M: Xinhui Pan L: amd-gfx@lists.freedesktop.org S: Supported B: https://gitlab.freedesktop.org/drm/amd/-/issues @@ -19733,7 +19790,7 @@ RALINK RT2X00 WIRELESS LAN DRIVER M: Stanislaw Gruszka L: linux-wireless@vger.kernel.org S: Maintained -F: drivers/net/wireless/ralink/rt2x00/ +F: drivers/net/wireless/ralink/ RAMDISK RAM BLOCK DEVICE DRIVER M: Jens Axboe @@ -19879,7 +19936,7 @@ F: net/rds/ F: tools/testing/selftests/net/rds/ RDT - RESOURCE ALLOCATION -M: Fenghua Yu +M: Tony Luck M: Reinette Chatre L: linux-kernel@vger.kernel.org S: Supported @@ -20330,6 +20387,7 @@ RISC-V ARCHITECTURE M: Paul Walmsley M: Palmer Dabbelt M: Albert Ou +R: Alexandre Ghiti L: linux-riscv@lists.infradead.org S: Supported Q: https://patchwork.kernel.org/project/linux-riscv/list/ @@ -21080,6 +21138,7 @@ F: include/linux/clk/samsung.h SAMSUNG SPI DRIVERS M: Andi Shyti +R: Tudor Ambarus L: linux-spi@vger.kernel.org L: linux-samsung-soc@vger.kernel.org S: Maintained @@ -21490,7 +21549,6 @@ F: include/linux/slimbus.h SFC NETWORK DRIVER M: Edward Cree -M: Martin Habets L: netdev@vger.kernel.org L: linux-net-drivers@amd.com S: Maintained @@ -21699,7 +21757,7 @@ SILICON LABS WIRELESS DRIVERS (for WFxxx series) M: Jérôme Pouiller S: Supported F: Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml -F: drivers/net/wireless/silabs/wfx/ +F: drivers/net/wireless/silabs/ SILICON MOTION SM712 FRAME BUFFER DRIVER M: Sudip Mukherjee @@ -21923,10 +21981,13 @@ F: sound/soc/uniphier/ SOCKET TIMESTAMPING M: Willem de Bruijn +R: Jason Xing S: Maintained F: Documentation/networking/timestamping.rst F: include/linux/net_tstamp.h F: include/uapi/linux/net_tstamp.h +F: tools/testing/selftests/bpf/*/net_timestamping* +F: tools/testing/selftests/net/*timestamp* F: tools/testing/selftests/net/so_txtime.c SOEKRIS NET48XX LED SUPPORT @@ -23273,7 +23334,7 @@ F: include/media/i2c/tw9910.h TEE SUBSYSTEM M: Jens Wiklander -R: Sumit Garg +R: Sumit Garg L: op-tee@lists.trustedfirmware.org S: Maintained F: Documentation/ABI/testing/sysfs-class-tee @@ -24069,7 +24130,6 @@ F: tools/testing/selftests/ftrace/ TRACING MMIO ACCESSES (MMIOTRACE) M: Steven Rostedt M: Masami Hiramatsu -R: Karol Herbst R: Pekka Paalanen L: linux-kernel@vger.kernel.org L: nouveau@lists.freedesktop.org @@ -24159,7 +24219,7 @@ W: http://vtun.sourceforge.net/tun F: Documentation/networking/tuntap.rst F: arch/um/os-Linux/drivers/ F: drivers/net/tap.c -F: drivers/net/tun.c +F: drivers/net/tun* TURBOCHANNEL SUBSYSTEM M: "Maciej W. Rozycki" @@ -26170,6 +26230,13 @@ S: Maintained F: Documentation/input/devices/yealink.rst F: drivers/input/misc/yealink.* +YUNSILICON XSC DRIVERS +M: Honggang Wei +M: Xin Tian +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/yunsilicon/xsc + Z3FOLD COMPRESSED PAGE ALLOCATOR M: Vitaly Wool R: Miaohe Lin @@ -26197,7 +26264,7 @@ F: mm/zbud.c ZD1211RW WIRELESS DRIVER L: linux-wireless@vger.kernel.org S: Orphan -F: drivers/net/wireless/zydas/zd1211rw/ +F: drivers/net/wireless/zydas/ ZD1301 MEDIA DRIVER L: linux-media@vger.kernel.org diff --git a/Makefile b/Makefile index 96407c1d6be16..50694bbbf828e 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 14 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc7 NAME = Baby Opossum Posse # *DOCUMENTATION* @@ -1123,6 +1123,11 @@ endif KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)) KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)) +# userspace programs are linked via the compiler, use the correct linker +ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy) +KBUILD_USERLDFLAGS += --ld-path=$(LD) +endif + # make the checker run with the right architecture CHECKFLAGS += --arch=$(ARCH) diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi index 6bf4241fe3b73..c78ed064d1667 100644 --- a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcm2835-rpi.dtsi" -#include #include / { @@ -101,7 +100,3 @@ &vchiq { interrupts = ; }; - -&xhci { - power-domains = <&power RPI_POWER_DOMAIN_USB>; -}; diff --git a/arch/arm/boot/dts/broadcom/bcm2711.dtsi b/arch/arm/boot/dts/broadcom/bcm2711.dtsi index e4e42af21ef3a..c06d9f5e53c80 100644 --- a/arch/arm/boot/dts/broadcom/bcm2711.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm2711.dtsi @@ -134,7 +134,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -145,7 +145,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -156,7 +156,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -167,7 +167,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -451,8 +451,6 @@ IRQ_TYPE_LEVEL_LOW)>, ; - /* This only applies to the ARMv7 stub */ - arm,cpu-registers-not-fw-configured; }; cpus: cpus { @@ -610,6 +608,7 @@ #address-cells = <1>; #size-cells = <0>; interrupts = ; + power-domains = <&pm BCM2835_POWER_DOMAIN_USB>; /* DWC2 and this IP block share the same USB PHY, * enabling both at the same time results in lockups. * So keep this node disabled and let the bootloader @@ -1177,6 +1176,7 @@ }; &uart0 { + arm,primecell-periphid = <0x00341011>; interrupts = ; }; diff --git a/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts b/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts index 53cb0c58f6d05..3da2daee0c849 100644 --- a/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts +++ b/arch/arm/boot/dts/broadcom/bcm4709-asus-rt-ac3200.dts @@ -124,19 +124,19 @@ }; port@1 { - label = "lan1"; + label = "lan4"; }; port@2 { - label = "lan2"; + label = "lan3"; }; port@3 { - label = "lan3"; + label = "lan2"; }; port@4 { - label = "lan4"; + label = "lan1"; }; }; }; diff --git a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts index 6c666dc7ad23e..01ec8c03686a6 100644 --- a/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts +++ b/arch/arm/boot/dts/broadcom/bcm47094-asus-rt-ac5300.dts @@ -126,11 +126,11 @@ ports { port@0 { - label = "lan4"; + label = "wan"; }; port@1 { - label = "lan3"; + label = "lan1"; }; port@2 { @@ -138,11 +138,11 @@ }; port@3 { - label = "lan1"; + label = "lan3"; }; port@4 { - label = "wan"; + label = "lan4"; }; }; }; diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi index dffab5aa8b9ca..88be29166c1ae 100644 --- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi +++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi @@ -108,6 +108,11 @@ }; }; + poweroff { + compatible = "regulator-poweroff"; + cpu-supply = <&vgen2_reg>; + }; + reg_module_3v3: regulator-module-3v3 { compatible = "regulator-fixed"; regulator-always-on; @@ -236,10 +241,6 @@ status = "disabled"; }; -&clks { - fsl,pmic-stby-poweroff; -}; - /* Apalis SPI1 */ &ecspi1 { cs-gpios = <&gpio5 25 GPIO_ACTIVE_LOW>; @@ -527,7 +528,6 @@ pmic: pmic@8 { compatible = "fsl,pfuze100"; - fsl,pmic-stby-poweroff; reg = <0x08>; regulators { diff --git a/arch/arm/boot/dts/st/stm32mp151.dtsi b/arch/arm/boot/dts/st/stm32mp151.dtsi index b9a87fbe971d6..0daa8ffe2ff5d 100644 --- a/arch/arm/boot/dts/st/stm32mp151.dtsi +++ b/arch/arm/boot/dts/st/stm32mp151.dtsi @@ -1781,7 +1781,6 @@ st,syscon = <&syscfg 0x4>; snps,mixed-burst; snps,pbl = <2>; - snps,en-tx-lpi-clockgating; snps,axi-config = <&stmmac_axi_config_0>; snps,tso; access-controllers = <&etzpc 94>; diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index 2a8a9fe46586d..3fa15f3422409 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -27,6 +27,7 @@ config ARCH_DAVINCI_DA830 config ARCH_DAVINCI_DA850 bool "DA850/OMAP-L138/AM18x based system" + select ARCH_DAVINCI_DA8XX select DAVINCI_CP_INTC config ARCH_DAVINCI_DA8XX diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig index a643b71e30a35..08ec6bd84ada5 100644 --- a/arch/arm/mach-omap1/Kconfig +++ b/arch/arm/mach-omap1/Kconfig @@ -8,6 +8,7 @@ menuconfig ARCH_OMAP1 select ARCH_OMAP select CLKSRC_MMIO select FORCE_PCI if PCCARD + select GENERIC_IRQ_CHIP select GPIOLIB help Support for older TI OMAP1 (omap7xx, omap15xx or omap16xx) diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S index a956b489b6ea1..2bc7e73a8582d 100644 --- a/arch/arm/mach-shmobile/headsmp.S +++ b/arch/arm/mach-shmobile/headsmp.S @@ -136,6 +136,7 @@ ENDPROC(shmobile_smp_sleep) .long shmobile_smp_arg - 1b .bss + .align 2 .globl shmobile_smp_mpidr shmobile_smp_mpidr: .space NR_CPUS * 4 diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 2bec87c3327d2..39fd5df733178 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -62,7 +62,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, } static int adjust_pte(struct vm_area_struct *vma, unsigned long address, - unsigned long pfn, struct vm_fault *vmf) + unsigned long pfn, bool need_lock) { spinlock_t *ptl; pgd_t *pgd; @@ -99,12 +99,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, if (!pte) return 0; - /* - * If we are using split PTE locks, then we need to take the page - * lock here. Otherwise we are using shared mm->page_table_lock - * which is already locked, thus cannot take it. - */ - if (ptl != vmf->ptl) { + if (need_lock) { + /* + * Use nested version here to indicate that we are already + * holding one similar spinlock. + */ spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) { pte_unmap_unlock(pte, ptl); @@ -114,7 +113,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, ret = do_adjust_pte(vma, address, pfn, pte); - if (ptl != vmf->ptl) + if (need_lock) spin_unlock(ptl); pte_unmap(pte); @@ -123,9 +122,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, static void make_coherent(struct address_space *mapping, struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, unsigned long pfn, - struct vm_fault *vmf) + unsigned long addr, pte_t *ptep, unsigned long pfn) { + const unsigned long pmd_start_addr = ALIGN_DOWN(addr, PMD_SIZE); + const unsigned long pmd_end_addr = pmd_start_addr + PMD_SIZE; struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *mpnt; unsigned long offset; @@ -141,6 +141,14 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, */ flush_dcache_mmap_lock(mapping); vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { + /* + * If we are using split PTE locks, then we need to take the pte + * lock. Otherwise we are using shared mm->page_table_lock which + * is already locked, thus cannot take it. + */ + bool need_lock = IS_ENABLED(CONFIG_SPLIT_PTE_PTLOCKS); + unsigned long mpnt_addr; + /* * If this VMA is not in our MM, we can ignore it. * Note that we intentionally mask out the VMA @@ -151,7 +159,12 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, if (!(mpnt->vm_flags & VM_MAYSHARE)) continue; offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; - aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn, vmf); + mpnt_addr = mpnt->vm_start + offset; + + /* Avoid deadlocks by not grabbing the same PTE lock again. */ + if (mpnt_addr >= pmd_start_addr && mpnt_addr < pmd_end_addr) + need_lock = false; + aliases += adjust_pte(mpnt, mpnt_addr, pfn, need_lock); } flush_dcache_mmap_unlock(mapping); if (aliases) @@ -194,7 +207,7 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, __flush_dcache_folio(mapping, folio); if (mapping) { if (cache_is_vivt()) - make_coherent(mapping, vma, addr, ptep, pfn, vmf); + make_coherent(mapping, vma, addr, ptep, pfn); else if (vma->vm_flags & VM_EXEC) __flush_icache_all(); } diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi index 689c82b7f596a..9e610a89a3378 100644 --- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi +++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi @@ -227,7 +227,7 @@ interrupts = ; clocks = <&clk_uart>, <&clk_vpu>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi index ce20de2598054..3d0b149681310 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi @@ -16,10 +16,10 @@ "Headphone Jack", "HPOUTR", "IN2L", "Line In Jack", "IN2R", "Line In Jack", - "Headphone Jack", "MICBIAS", - "IN1L", "Headphone Jack"; + "Microphone Jack", "MICBIAS", + "IN1L", "Microphone Jack"; simple-audio-card,widgets = - "Microphone", "Headphone Jack", + "Microphone", "Microphone Jack", "Headphone", "Headphone Jack", "Line", "Line In Jack"; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi index 336785a9fba89..3ddc5aaa7c5f0 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-or-later OR MIT /* - * Copyright 2021-2022 TQ-Systems GmbH - * Author: Alexander Stein + * Copyright 2021-2025 TQ-Systems GmbH , + * D-82229 Seefeld, Germany. + * Author: Alexander Stein */ #include "imx8mp.dtsi" @@ -23,15 +24,6 @@ regulator-max-microvolt = <3300000>; regulator-always-on; }; - - /* e-MMC IO, needed for HS modes */ - reg_vcc1v8: regulator-vcc1v8 { - compatible = "regulator-fixed"; - regulator-name = "VCC1V8"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-always-on; - }; }; &A53_0 { @@ -197,7 +189,7 @@ no-sd; no-sdio; vmmc-supply = <®_vcc3v3>; - vqmmc-supply = <®_vcc1v8>; + vqmmc-supply = <&buck5_reg>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi index da8902c5f7e5b..1493319aa748d 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin-dahlia.dtsi @@ -28,10 +28,10 @@ "Headphone Jack", "HPOUTR", "IN2L", "Line In Jack", "IN2R", "Line In Jack", - "Headphone Jack", "MICBIAS", - "IN1L", "Headphone Jack"; + "Microphone Jack", "MICBIAS", + "IN1L", "Microphone Jack"; simple-audio-card,widgets = - "Microphone", "Headphone Jack", + "Microphone", "Microphone Jack", "Headphone", "Headphone Jack", "Line", "Line In Jack"; diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index e0ce804bb1a35..d0314cdf0b92f 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -5163,7 +5163,6 @@ , , ; - dma-coherent; }; anoc_1_tbu: tbu@150c5000 { diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts index e4517f47d519c..1a59e8b1dc46e 100644 --- a/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/px30-ringneck-haikou.dts @@ -194,6 +194,13 @@ <3 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>; }; }; + + uart { + uart5_rts_pin: uart5-rts-pin { + rockchip,pins = + <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>; + }; + }; }; &pwm0 { @@ -222,11 +229,15 @@ }; &uart0 { + pinctrl-names = "default"; + pinctrl-0 = <&uart0_xfer>; status = "okay"; }; &uart5 { - pinctrl-0 = <&uart5_xfer>; + /* Add pinmux for rts-gpios (uart5_rts_pin) */ + pinctrl-names = "default"; + pinctrl-0 = <&uart5_xfer &uart5_rts_pin>; rts-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi index ae050cc6cd050..e80412abec081 100644 --- a/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi +++ b/arch/arm64/boot/dts/rockchip/px30-ringneck.dtsi @@ -396,6 +396,12 @@ status = "okay"; }; +&uart5 { + /delete-property/ dmas; + /delete-property/ dma-names; + pinctrl-0 = <&uart5_xfer>; +}; + /* Mule UCAN */ &usb_host0_ehci { status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts index 67c246ad8b8c0..ec2ce894da1fc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts @@ -17,8 +17,7 @@ &gmac2io { phy-handle = <&yt8531c>; - tx_delay = <0x19>; - rx_delay = <0x05>; + phy-mode = "rgmii-id"; status = "okay"; mdio { diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dts b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dts index 324a8e951f7e4..846b931e16d21 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dts @@ -15,6 +15,7 @@ &gmac2io { phy-handle = <&rtl8211e>; + phy-mode = "rgmii"; tx_delay = <0x24>; rx_delay = <0x18>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi index 4f193704e5dc2..09508e324a280 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus.dtsi @@ -109,7 +109,6 @@ assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>; assigned-clock-parents = <&gmac_clk>, <&gmac_clk>; clock_in_out = "input"; - phy-mode = "rgmii"; phy-supply = <&vcc_io>; pinctrl-0 = <&rgmiim1_pins>; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi index 988e6ca32fac9..a9ea4b0daa04c 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi @@ -22,11 +22,11 @@ }; /* EC turns on w/ pp900_usb_en */ - pp900_usb: pp900-ap { + pp900_usb: regulator-pp900-ap { }; /* EC turns on w/ pp900_pcie_en */ - pp900_pcie: pp900-ap { + pp900_pcie: regulator-pp900-ap { }; pp3000: regulator-pp3000 { @@ -126,7 +126,7 @@ }; /* Always on; plain and simple */ - pp3000_ap: pp3000_emmc: pp3000 { + pp3000_ap: pp3000_emmc: regulator-pp3000 { }; pp1500_ap_io: regulator-pp1500-ap-io { @@ -160,7 +160,7 @@ }; /* EC turns on w/ pp3300_usb_en_l */ - pp3300_usb: pp3300 { + pp3300_usb: regulator-pp3300 { }; /* gpio is shared with pp1800_pcie and pinctrl is set there */ diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi index 19b23b4389658..5e068377a0a28 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi @@ -92,7 +92,7 @@ }; /* EC turns on pp1800_s3_en */ - pp1800_s3: pp1800 { + pp1800_s3: regulator-pp1800 { }; /* pp3300 children, sorted by name */ @@ -109,11 +109,11 @@ }; /* EC turns on pp3300_s0_en */ - pp3300_s0: pp3300 { + pp3300_s0: regulator-pp3300 { }; /* EC turns on pp3300_s3_en */ - pp3300_s3: pp3300 { + pp3300_s3: regulator-pp3300 { }; /* diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index 6d9e60b01225e..7eca1da78cffa 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -189,39 +189,39 @@ }; /* EC turns on w/ pp900_ddrpll_en */ - pp900_ddrpll: pp900-ap { + pp900_ddrpll: regulator-pp900-ap { }; /* EC turns on w/ pp900_pll_en */ - pp900_pll: pp900-ap { + pp900_pll: regulator-pp900-ap { }; /* EC turns on w/ pp900_pmu_en */ - pp900_pmu: pp900-ap { + pp900_pmu: regulator-pp900-ap { }; /* EC turns on w/ pp1800_s0_en_l */ - pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: pp1800 { + pp1800_ap_io: pp1800_emmc: pp1800_nfc: pp1800_s0: regulator-pp1800 { }; /* EC turns on w/ pp1800_avdd_en_l */ - pp1800_avdd: pp1800 { + pp1800_avdd: regulator-pp1800 { }; /* EC turns on w/ pp1800_lid_en_l */ - pp1800_lid: pp1800_mic: pp1800 { + pp1800_lid: pp1800_mic: regulator-pp1800 { }; /* EC turns on w/ lpddr_pwr_en */ - pp1800_lpddr: pp1800 { + pp1800_lpddr: regulator-pp1800 { }; /* EC turns on w/ pp1800_pmu_en_l */ - pp1800_pmu: pp1800 { + pp1800_pmu: regulator-pp1800 { }; /* EC turns on w/ pp1800_usb_en_l */ - pp1800_usb: pp1800 { + pp1800_usb: regulator-pp1800 { }; pp3000_sd_slot: regulator-pp3000-sd-slot { @@ -259,11 +259,11 @@ }; /* EC turns on w/ pp3300_trackpad_en_l */ - pp3300_trackpad: pp3300-trackpad { + pp3300_trackpad: regulator-pp3300-trackpad { }; /* EC turns on w/ usb_a_en */ - pp5000_usb_a_vbus: pp5000 { + pp5000_usb_a_vbus: regulator-pp5000 { }; ap_rtc_clk: ap-rtc-clk { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi index b1c9bd0e63ef3..8d94d9f91a5c6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dtsi @@ -115,7 +115,7 @@ }; &u2phy1_host { - status = "disabled"; + phy-supply = <&vdd_5v>; }; &uart0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi index 69a9d61706495..51c6aa26d8285 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi @@ -227,6 +227,16 @@ vin-supply = <&vcc12v_dcin>; }; + vcca_0v9: regulator-vcca-0v9 { + compatible = "regulator-fixed"; + regulator-name = "vcca_0v9"; + regulator-always-on; + regulator-boot-on; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <900000>; + vin-supply = <&vcc3v3_sys>; + }; + vdd_log: regulator-vdd-log { compatible = "pwm-regulator"; pwms = <&pwm2 0 25000 1>; @@ -312,6 +322,8 @@ }; &hdmi { + avdd-0v9-supply = <&vcca_0v9>; + avdd-1v8-supply = <&vcc1v8_dvp>; ddc-i2c-bus = <&i2c3>; pinctrl-names = "default"; pinctrl-0 = <&hdmi_cec>; @@ -661,6 +673,8 @@ num-lanes = <4>; pinctrl-names = "default"; pinctrl-0 = <&pcie_perst>; + vpcie0v9-supply = <&vcca_0v9>; + vpcie1v8-supply = <&vcca_1v8>; vpcie12v-supply = <&vcc12v_dcin>; vpcie3v3-supply = <&vcc3v3_pcie>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts index 61dd71c259aac..ddf84c2a19cfa 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-lubancat-1.dts @@ -512,7 +512,6 @@ &sdmmc0 { max-frequency = <150000000>; - supports-sd; bus-width = <4>; cap-mmc-highspeed; cap-sd-highspeed; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi index 8cfa30837ce72..c3abdfb04f8f4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-base.dtsi @@ -549,10 +549,10 @@ mmu600_pcie: iommu@fc900000 { compatible = "arm,smmu-v3"; reg = <0x0 0xfc900000 0x0 0x200000>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; interrupt-names = "eventq", "gerror", "priq", "cmdq-sync"; #iommu-cells = <1>; }; @@ -560,10 +560,10 @@ mmu600_php: iommu@fcb00000 { compatible = "arm,smmu-v3"; reg = <0x0 0xfcb00000 0x0 0x200000>; - interrupts = , - , - , - ; + interrupts = , + , + , + ; interrupt-names = "eventq", "gerror", "priq", "cmdq-sync"; #iommu-cells = <1>; status = "disabled"; @@ -2668,9 +2668,9 @@ rockchip,hw-tshut-temp = <120000>; rockchip,hw-tshut-mode = <0>; /* tshut mode 0:CRU 1:GPIO */ rockchip,hw-tshut-polarity = <0>; /* tshut polarity 0:LOW 1:HIGH */ - pinctrl-0 = <&tsadc_gpio_func>; - pinctrl-1 = <&tsadc_shut>; - pinctrl-names = "gpio", "otpout"; + pinctrl-0 = <&tsadc_shut_org>; + pinctrl-1 = <&tsadc_gpio_func>; + pinctrl-names = "default", "sleep"; #thermal-sensor-cells = <1>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts index 92f0ed83c9902..bc6b43a771537 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-coolpi-cm5-genbook.dts @@ -113,7 +113,7 @@ compatible = "regulator-fixed"; regulator-name = "vcc3v3_lcd"; enable-active-high; - gpio = <&gpio1 RK_PC4 GPIO_ACTIVE_HIGH>; + gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&lcdpwr_en>; vin-supply = <&vcc3v3_sys>; @@ -241,7 +241,7 @@ &pinctrl { lcd { lcdpwr_en: lcdpwr-en { - rockchip,pins = <1 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>; + rockchip,pins = <0 RK_PC4 RK_FUNC_GPIO &pcfg_pull_down>; }; bl_en: bl-en { diff --git a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi index 4a950907ea6f5..840b638af1c24 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-extra.dtsi @@ -213,7 +213,6 @@ interrupt-names = "sys", "pmc", "msg", "legacy", "err", "dma0", "dma1", "dma2", "dma3"; max-link-speed = <3>; - iommus = <&mmu600_pcie 0x0000>; num-lanes = <4>; phys = <&pcie30phy>; phy-names = "pcie-phy"; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts index 90f823b2c2191..7f457ab78015a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-jaguar.dts @@ -503,7 +503,6 @@ non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk &emmc_data_strobe>; - supports-cqe; vmmc-supply = <&vcc_3v3_s3>; vqmmc-supply = <&vcc_1v8_s3>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts b/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts index 6d68f70284e45..2a0590209462e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts +++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5-itx.dts @@ -690,10 +690,9 @@ &sdhci { bus-width = <8>; - max-frequency = <200000000>; + max-frequency = <150000000>; mmc-hs400-1_8v; mmc-hs400-enhanced-strobe; - mmc-hs200-1_8v; no-sdio; no-sd; non-removable; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-ep.dtso b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-ep.dtso index 672d748fcc67c..f229cb49da680 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-ep.dtso +++ b/arch/arm64/boot/dts/rockchip/rk3588-rock-5b-pcie-ep.dtso @@ -23,3 +23,7 @@ vpcie3v3-supply = <&vcc3v3_pcie30>; status = "okay"; }; + +&mmu600_pcie { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi index 81a6a05ce13b6..e8fa449517c27 100644 --- a/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3588-tiger.dtsi @@ -386,7 +386,6 @@ non-removable; pinctrl-names = "default"; pinctrl-0 = <&emmc_bus8 &emmc_cmd &emmc_clk &emmc_data_strobe>; - supports-cqe; vmmc-supply = <&vcc_3v3_s3>; vqmmc-supply = <&vcc_1v8_s3>; status = "okay"; diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index cb7da44155999..1f25423de3833 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -1551,6 +1551,8 @@ CONFIG_PWM_VISCONTI=m CONFIG_SL28CPLD_INTC=y CONFIG_QCOM_PDC=y CONFIG_QCOM_MPM=y +CONFIG_TI_SCI_INTR_IRQCHIP=y +CONFIG_TI_SCI_INTA_IRQCHIP=y CONFIG_RESET_GPIO=m CONFIG_RESET_IMX7=y CONFIG_RESET_QCOM_AOSS=y diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 25e1626517500..555c613fd2324 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -16,6 +16,32 @@ #include #include +.macro init_el2_hcr val + mov_q x0, \val + + /* + * Compliant CPUs advertise their VHE-onlyness with + * ID_AA64MMFR4_EL1.E2H0 < 0. On such CPUs HCR_EL2.E2H is RES1, but it + * can reset into an UNKNOWN state and might not read as 1 until it has + * been initialized explicitly. + * + * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but + * don't advertise it (they predate this relaxation). + * + * Initalize HCR_EL2.E2H so that later code can rely upon HCR_EL2.E2H + * indicating whether the CPU is running in E2H mode. + */ + mrs_s x1, SYS_ID_AA64MMFR4_EL1 + sbfx x1, x1, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH + cmp x1, #0 + b.ge .LnVHE_\@ + + orr x0, x0, #HCR_E2H +.LnVHE_\@: + msr hcr_el2, x0 + isb +.endm + .macro __init_el2_sctlr mov_q x0, INIT_SCTLR_EL2_MMU_OFF msr sctlr_el2, x0 @@ -244,11 +270,6 @@ .Lskip_gcs_\@: .endm -.macro __init_el2_nvhe_prepare_eret - mov x0, #INIT_PSTATE_EL1 - msr spsr_el2, x0 -.endm - .macro __init_el2_mpam /* Memory Partitioning And Monitoring: disable EL2 traps */ mrs x1, id_aa64pfr0_el1 diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index c6dff3e69539b..07fbf5bf85a7e 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -42,8 +42,8 @@ extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t pte, int dirty); #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR -extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep); +extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT extern void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep); @@ -76,12 +76,22 @@ static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma, { unsigned long stride = huge_page_size(hstate_vma(vma)); - if (stride == PMD_SIZE) - __flush_tlb_range(vma, start, end, stride, false, 2); - else if (stride == PUD_SIZE) - __flush_tlb_range(vma, start, end, stride, false, 1); - else - __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0); + switch (stride) { +#ifndef __PAGETABLE_PMD_FOLDED + case PUD_SIZE: + __flush_tlb_range(vma, start, end, PUD_SIZE, false, 1); + break; +#endif + case CONT_PMD_SIZE: + case PMD_SIZE: + __flush_tlb_range(vma, start, end, PMD_SIZE, false, 2); + break; + case CONT_PTE_SIZE: + __flush_tlb_range(vma, start, end, PAGE_SIZE, false, 3); + break; + default: + __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN); + } } #endif /* __ASM_HUGETLB_H */ diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 8d94a6c0ed5c4..c2417a424b98d 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -119,7 +119,7 @@ #define TCR_EL2_IRGN0_MASK TCR_IRGN0_MASK #define TCR_EL2_T0SZ_MASK 0x3f #define TCR_EL2_MASK (TCR_EL2_TG0_MASK | TCR_EL2_SH0_MASK | \ - TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK) + TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK) /* VTCR_EL2 Registers bits */ #define VTCR_EL2_DS TCR_EL2_DS diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 3a7ec98ef1238..d919557af5e50 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1259,7 +1259,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, extern unsigned int __ro_after_init kvm_arm_vmid_bits; int __init kvm_arm_vmid_alloc_init(void); void __init kvm_arm_vmid_alloc_free(void); -bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); +void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); void kvm_arm_vmid_clear_active(void); static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index bc94e036a26b9..8104aee4f9a08 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -396,33 +396,35 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) #define __flush_tlb_range_op(op, start, pages, stride, \ asid, tlb_level, tlbi_user, lpa2) \ do { \ + typeof(start) __flush_start = start; \ + typeof(pages) __flush_pages = pages; \ int num = 0; \ int scale = 3; \ int shift = lpa2 ? 16 : PAGE_SHIFT; \ unsigned long addr; \ \ - while (pages > 0) { \ + while (__flush_pages > 0) { \ if (!system_supports_tlb_range() || \ - pages == 1 || \ - (lpa2 && start != ALIGN(start, SZ_64K))) { \ - addr = __TLBI_VADDR(start, asid); \ + __flush_pages == 1 || \ + (lpa2 && __flush_start != ALIGN(__flush_start, SZ_64K))) { \ + addr = __TLBI_VADDR(__flush_start, asid); \ __tlbi_level(op, addr, tlb_level); \ if (tlbi_user) \ __tlbi_user_level(op, addr, tlb_level); \ - start += stride; \ - pages -= stride >> PAGE_SHIFT; \ + __flush_start += stride; \ + __flush_pages -= stride >> PAGE_SHIFT; \ continue; \ } \ \ - num = __TLBI_RANGE_NUM(pages, scale); \ + num = __TLBI_RANGE_NUM(__flush_pages, scale); \ if (num >= 0) { \ - addr = __TLBI_VADDR_RANGE(start >> shift, asid, \ + addr = __TLBI_VADDR_RANGE(__flush_start >> shift, asid, \ scale, num, tlb_level); \ __tlbi(r##op, addr); \ if (tlbi_user) \ __tlbi_user(r##op, addr); \ - start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \ - pages -= __TLBI_RANGE_PAGES(num, scale); \ + __flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \ + __flush_pages -= __TLBI_RANGE_PAGES(num, scale);\ } \ scale--; \ } \ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 5ab1970ee5436..2ce73525de2c9 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -298,25 +298,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) msr sctlr_el2, x0 isb 0: - mov_q x0, HCR_HOST_NVHE_FLAGS - - /* - * Compliant CPUs advertise their VHE-onlyness with - * ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be - * RES1 in that case. Publish the E2H bit early so that - * it can be picked up by the init_el2_state macro. - * - * Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but - * don't advertise it (they predate this relaxation). - */ - mrs_s x1, SYS_ID_AA64MMFR4_EL1 - tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f - - orr x0, x0, #HCR_E2H -1: - msr hcr_el2, x0 - isb + init_el2_hcr HCR_HOST_NVHE_FLAGS init_el2_state /* Hypervisor stub */ @@ -339,7 +322,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) msr sctlr_el1, x1 mov x2, xzr 3: - __init_el2_nvhe_prepare_eret + mov x0, #INIT_PSTATE_EL1 + msr spsr_el2, x0 mov w0, #BOOT_CPU_MODE_EL2 orr x0, x0, x2 diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b8e55a441282f..0160b49243511 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -559,6 +559,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) mmu = vcpu->arch.hw_mmu; last_ran = this_cpu_ptr(mmu->last_vcpu_ran); + /* + * Ensure a VMID is allocated for the MMU before programming VTTBR_EL2, + * which happens eagerly in VHE. + * + * Also, the VMID allocator only preserves VMIDs that are active at the + * time of rollover, so KVM might need to grab a new VMID for the MMU if + * this is called from kvm_sched_in(). + */ + kvm_arm_vmid_update(&mmu->vmid); + /* * We guarantee that both TLBs and I-cache are private to each * vcpu. If detecting that a vcpu from the same VM has @@ -1138,18 +1148,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) */ preempt_disable(); - /* - * The VMID allocator only tracks active VMIDs per - * physical CPU, and therefore the VMID allocated may not be - * preserved on VMID roll-over if the task was preempted, - * making a thread's VMID inactive. So we need to call - * kvm_arm_vmid_update() in non-premptible context. - */ - if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) && - has_vhe()) - __load_stage2(vcpu->arch.hw_mmu, - vcpu->arch.hw_mmu->arch); - kvm_pmu_flush_hwstate(vcpu); local_irq_disable(); @@ -1980,7 +1978,7 @@ static int kvm_init_vector_slots(void) static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) { struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); - unsigned long tcr, ips; + unsigned long tcr; /* * Calculate the raw per-cpu offset without a translation from the @@ -1994,19 +1992,18 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) params->mair_el2 = read_sysreg(mair_el1); tcr = read_sysreg(tcr_el1); - ips = FIELD_GET(TCR_IPS_MASK, tcr); if (cpus_have_final_cap(ARM64_KVM_HVHE)) { + tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK); tcr |= TCR_EPD1_MASK; } else { + unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr); + tcr &= TCR_EL2_MASK; - tcr |= TCR_EL2_RES1; + tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips); + if (lpa2_is_enabled()) + tcr |= TCR_EL2_DS; } - tcr &= ~TCR_T0SZ_MASK; tcr |= TCR_T0SZ(hyp_va_bits); - tcr &= ~TCR_EL2_PS_MASK; - tcr |= FIELD_PREP(TCR_EL2_PS_MASK, ips); - if (lpa2_is_enabled()) - tcr |= TCR_EL2_DS; params->tcr_el2 = tcr; params->pgd_pa = kvm_mmu_get_httbr(); diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index fc18662260676..f8af11189572f 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -73,8 +73,12 @@ __do_hyp_init: eret SYM_CODE_END(__kvm_hyp_init) +/* + * Initialize EL2 CPU state to sane values. + * + * HCR_EL2.E2H must have been initialized already. + */ SYM_CODE_START_LOCAL(__kvm_init_el2_state) - /* Initialize EL2 CPU state to sane values. */ init_el2_state // Clobbers x0..x2 finalise_el2_state ret @@ -206,9 +210,9 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) 2: msr SPsel, #1 // We want to use SP_EL{1,2} - bl __kvm_init_el2_state + init_el2_hcr 0 - __init_el2_nvhe_prepare_eret + bl __kvm_init_el2_state /* Enable MMU, set vectors and stack. */ mov x0, x28 diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index 9c2ce1e0e99a5..c3e196fb8b18f 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -218,6 +218,9 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on) if (is_cpu_on) release_boot_args(boot_args); + write_sysreg_el1(INIT_SCTLR_EL1_MMU_OFF, SYS_SCTLR); + write_sysreg(INIT_PSTATE_EL1, SPSR_EL2); + __host_enter(host_ctxt); } diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index 806223b7022af..7fe8ba1a2851c 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -135,11 +135,10 @@ void kvm_arm_vmid_clear_active(void) atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID); } -bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) +void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) { unsigned long flags; u64 vmid, old_active_vmid; - bool updated = false; vmid = atomic64_read(&kvm_vmid->id); @@ -157,21 +156,17 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) if (old_active_vmid != 0 && vmid_gen_match(vmid) && 0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids), old_active_vmid, vmid)) - return false; + return; raw_spin_lock_irqsave(&cpu_vmid_lock, flags); /* Check that our VMID belongs to the current generation. */ vmid = atomic64_read(&kvm_vmid->id); - if (!vmid_gen_match(vmid)) { + if (!vmid_gen_match(vmid)) vmid = new_vmid(kvm_vmid); - updated = true; - } atomic64_set(this_cpu_ptr(&active_vmids), vmid); raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags); - - return updated; } /* diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 98a2a0e64e255..b3a7fafe8892d 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -100,20 +100,11 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr, static inline int num_contig_ptes(unsigned long size, size_t *pgsize) { - int contig_ptes = 0; + int contig_ptes = 1; *pgsize = size; switch (size) { -#ifndef __PAGETABLE_PMD_FOLDED - case PUD_SIZE: - if (pud_sect_supported()) - contig_ptes = 1; - break; -#endif - case PMD_SIZE: - contig_ptes = 1; - break; case CONT_PMD_SIZE: *pgsize = PMD_SIZE; contig_ptes = CONT_PMDS; @@ -122,6 +113,8 @@ static inline int num_contig_ptes(unsigned long size, size_t *pgsize) *pgsize = PAGE_SIZE; contig_ptes = CONT_PTES; break; + default: + WARN_ON(!__hugetlb_valid_size(size)); } return contig_ptes; @@ -163,24 +156,23 @@ static pte_t get_clear_contig(struct mm_struct *mm, unsigned long pgsize, unsigned long ncontig) { - pte_t orig_pte = __ptep_get(ptep); - unsigned long i; - - for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { - pte_t pte = __ptep_get_and_clear(mm, addr, ptep); - - /* - * If HW_AFDBM is enabled, then the HW could turn on - * the dirty or accessed bit for any page in the set, - * so check them all. - */ - if (pte_dirty(pte)) - orig_pte = pte_mkdirty(orig_pte); - - if (pte_young(pte)) - orig_pte = pte_mkyoung(orig_pte); + pte_t pte, tmp_pte; + bool present; + + pte = __ptep_get_and_clear(mm, addr, ptep); + present = pte_present(pte); + while (--ncontig) { + ptep++; + addr += pgsize; + tmp_pte = __ptep_get_and_clear(mm, addr, ptep); + if (present) { + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } } - return orig_pte; + return pte; } static pte_t get_clear_contig_flush(struct mm_struct *mm, @@ -396,18 +388,13 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr, __pte_clear(mm, addr, ptep); } -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) { int ncontig; size_t pgsize; - pte_t orig_pte = __ptep_get(ptep); - - if (!pte_cont(orig_pte)) - return __ptep_get_and_clear(mm, addr, ptep); - - ncontig = find_num_contig(mm, addr, ptep, &pgsize); + ncontig = num_contig_ptes(sz, &pgsize); return get_clear_contig(mm, addr, ptep, pgsize, ncontig); } @@ -549,6 +536,8 @@ bool __init arch_hugetlb_valid_size(unsigned long size) pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { + unsigned long psize = huge_page_size(hstate_vma(vma)); + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) { /* * Break-before-make (BBM) is required for all user space mappings @@ -558,7 +547,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr if (pte_user_exec(__ptep_get(ptep))) return huge_ptep_clear_flush(vma, addr, ptep); } - return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize); } void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9c0b8d9558fc4..ccdef53872a0b 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -279,12 +279,7 @@ void __init arm64_memblock_init(void) if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern u16 memstart_offset_seed; - - /* - * Use the sanitised version of id_aa64mmfr0_el1 so that linear - * map randomization can be enabled by shrinking the IPA space. - */ - u64 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); + u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); int parange = cpuid_feature_extract_unsigned_field( mmfr0, ID_AA64MMFR0_EL1_PARANGE_SHIFT); s64 range = linear_region_size - diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index b4df5bc5b1b8b..1dfe1a8efdbe4 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1177,8 +1177,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); + /* [start, end] should be within one section */ + WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page)); - if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES)) + if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES) || + (end - start < PAGES_PER_SECTION * sizeof(struct page))) return vmemmap_populate_basepages(start, end, node, altmap); else return vmemmap_populate_hugepages(start, end, node, altmap); diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h index c8e4057734d0d..4dc4b3e04225f 100644 --- a/arch/loongarch/include/asm/hugetlb.h +++ b/arch/loongarch/include/asm/hugetlb.h @@ -36,7 +36,8 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) + unsigned long addr, pte_t *ptep, + unsigned long sz) { pte_t clear; pte_t pte = ptep_get(ptep); @@ -51,8 +52,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; + unsigned long sz = huge_page_size(hstate_vma(vma)); - pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz); flush_tlb_page(vma, addr); return pte; } diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 382a09a7152c3..1120ac2824f6e 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -249,18 +249,6 @@ static __init int setup_node(int pxm) return acpi_map_pxm_to_node(pxm); } -/* - * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for - * I/O localities since SRAT does not list them. I/O localities are - * not supported at this point. - */ -unsigned int numa_distance_cnt; - -static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit) -{ - return slit->locality_count; -} - void __init numa_set_distance(int from, int to, int distance) { if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c index 8ae641dc53bb7..f9381800e291c 100644 --- a/arch/loongarch/kernel/machine_kexec.c +++ b/arch/loongarch/kernel/machine_kexec.c @@ -126,14 +126,14 @@ void kexec_reboot(void) /* All secondary cpus go to kexec_smp_wait */ if (smp_processor_id() > 0) { relocated_kexec_smp_wait(NULL); - unreachable(); + BUG(); } #endif do_kexec = (void *)reboot_code_buffer; do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry); - unreachable(); + BUG(); } diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index edcfdfcad7d22..90cb3ca96f085 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -387,6 +387,9 @@ static void __init check_kernel_sections_mem(void) */ static void __init arch_mem_init(char **cmdline_p) { + /* Recalculate max_low_pfn for "mem=xxx" */ + max_pfn = max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); + if (usermem) pr_info("User-defined physical RAM map overwrite\n"); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index fbf747447f13f..4b24589c0b565 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -423,7 +424,7 @@ void loongson_cpu_die(unsigned int cpu) mb(); } -void __noreturn arch_cpu_idle_dead(void) +static void __noreturn idle_play_dead(void) { register uint64_t addr; register void (*init_fn)(void); @@ -447,6 +448,50 @@ void __noreturn arch_cpu_idle_dead(void) BUG(); } +#ifdef CONFIG_HIBERNATION +static void __noreturn poll_play_dead(void) +{ + register uint64_t addr; + register void (*init_fn)(void); + + idle_task_exit(); + __this_cpu_write(cpu_state, CPU_DEAD); + + __smp_mb(); + do { + __asm__ __volatile__("nop\n\t"); + addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0); + } while (addr == 0); + + init_fn = (void *)TO_CACHE(addr); + iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR); + + init_fn(); + BUG(); +} +#endif + +static void (*play_dead)(void) = idle_play_dead; + +void __noreturn arch_cpu_idle_dead(void) +{ + play_dead(); + BUG(); /* play_dead() doesn't return */ +} + +#ifdef CONFIG_HIBERNATION +int hibernate_resume_nonboot_cpu_disable(void) +{ + int ret; + + play_dead = poll_play_dead; + ret = suspend_disable_secondary_cpus(); + play_dead = idle_play_dead; + + return ret; +} +#endif + #endif /* diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c index c1e8ec5b941b2..ea321403644ad 100644 --- a/arch/loongarch/kvm/exit.c +++ b/arch/loongarch/kvm/exit.c @@ -669,6 +669,12 @@ static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) struct kvm_run *run = vcpu->run; unsigned long badv = vcpu->arch.badv; + /* Inject ADE exception if exceed max GPA size */ + if (unlikely(badv >= vcpu->kvm->arch.gpa_size)) { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM); + return RESUME_GUEST; + } + ret = kvm_handle_mm_fault(vcpu, badv, write); if (ret) { /* Treat as MMIO */ diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c index f6d3242b9234a..b6864d6e5ec8d 100644 --- a/arch/loongarch/kvm/main.c +++ b/arch/loongarch/kvm/main.c @@ -317,6 +317,13 @@ int kvm_arch_enable_virtualization_cpu(void) kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); + /* + * HW Guest CSR registers are lost after CPU suspend and resume. + * Clear last_vcpu so that Guest CSR registers forced to reload + * from vCPU SW state. + */ + this_cpu_ptr(vmcs)->last_vcpu = NULL; + return 0; } diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c index 20f941af3e9ea..9e1a9b4aa4c6a 100644 --- a/arch/loongarch/kvm/vcpu.c +++ b/arch/loongarch/kvm/vcpu.c @@ -311,7 +311,7 @@ static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) { int ret = RESUME_GUEST; unsigned long estat = vcpu->arch.host_estat; - u32 intr = estat & 0x1fff; /* Ignore NMI */ + u32 intr = estat & CSR_ESTAT_IS; u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; vcpu->mode = OUTSIDE_GUEST_MODE; diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c index b8b3e1972d6ea..edccfc8c9cd80 100644 --- a/arch/loongarch/kvm/vm.c +++ b/arch/loongarch/kvm/vm.c @@ -48,7 +48,11 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) if (kvm_pvtime_supported()) kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); - kvm->arch.gpa_size = BIT(cpu_vabits - 1); + /* + * cpu_vabits means user address space only (a half of total). + * GPA size of VM is the same with the size of user address space. + */ + kvm->arch.gpa_size = BIT(cpu_vabits); kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; kvm->arch.invalid_ptes[0] = 0; kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table; diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c index 914e82ff3f656..1df9e99582cc6 100644 --- a/arch/loongarch/mm/mmap.c +++ b/arch/loongarch/mm/mmap.c @@ -3,6 +3,7 @@ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited */ #include +#include #include #include #include @@ -63,8 +64,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, } info.length = len; - info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0; info.align_offset = pgoff << PAGE_SHIFT; + if (filp && is_file_hugepages(filp)) + info.align_mask = huge_page_mask_align(filp); + else + info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0; if (dir == DOWN) { info.flags = VM_UNMAPPED_AREA_TOPDOWN; diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h index f1ae4ed890db5..80afc3a187249 100644 --- a/arch/m68k/include/asm/sun3_pgalloc.h +++ b/arch/m68k/include/asm/sun3_pgalloc.h @@ -44,8 +44,10 @@ static inline pgd_t * pgd_alloc(struct mm_struct *mm) pgd_t *new_pgd; new_pgd = __pgd_alloc(mm, 0); - memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); - memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT)); + if (likely(new_pgd != NULL)) { + memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); + memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT)); + } return new_pgd; } diff --git a/arch/mips/boot/tools/relocs.c b/arch/mips/boot/tools/relocs.c index a88d66c46d7f7..9863e1d5c62e3 100644 --- a/arch/mips/boot/tools/relocs.c +++ b/arch/mips/boot/tools/relocs.c @@ -468,6 +468,8 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, const char *symname)) { int i; + struct section *extab_sec = sec_lookup("__ex_table"); + int extab_index = extab_sec ? extab_sec - secs : -1; /* Walk through the relocations */ for (i = 0; i < ehdr.e_shnum; i++) { @@ -480,6 +482,9 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, if (sec->shdr.sh_type != SHT_REL_TYPE) continue; + if (sec->shdr.sh_info == extab_index) + continue; + sec_symtab = sec->link; sec_applies = &secs[sec->shdr.sh_info]; if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h index d0a86ce83de91..fbc71ddcf0f68 100644 --- a/arch/mips/include/asm/hugetlb.h +++ b/arch/mips/include/asm/hugetlb.h @@ -27,7 +27,8 @@ static inline int prepare_hugepage_range(struct file *file, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) + unsigned long addr, pte_t *ptep, + unsigned long sz) { pte_t clear; pte_t pte = *ptep; @@ -42,13 +43,14 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; + unsigned long sz = huge_page_size(hstate_vma(vma)); /* * clear the huge pte entry firstly, so that the other smp threads will * not get old pte entry after finishing flush_tlb_page and before * setting new huge pte entry */ - pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz); flush_tlb_page(vma, addr); return pte; } diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h index 5b3a5429f71b3..21e9ace177395 100644 --- a/arch/parisc/include/asm/hugetlb.h +++ b/arch/parisc/include/asm/hugetlb.h @@ -10,7 +10,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); + pte_t *ptep, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index aa9cd4b951fe5..96831c9886065 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -132,16 +132,16 @@ #define SO_PASSPIDFD 0x404A #define SO_PEERPIDFD 0x404B -#define SO_DEVMEM_LINEAR 78 -#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR -#define SO_DEVMEM_DMABUF 79 -#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF -#define SO_DEVMEM_DONTNEED 80 - #define SCM_TS_OPT_ID 0x404C #define SO_RCVPRIORITY 0x404D +#define SO_DEVMEM_LINEAR 0x404E +#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR +#define SO_DEVMEM_DMABUF 0x404F +#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF +#define SO_DEVMEM_DONTNEED 0x4050 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index e9d18cf25b792..a94fe546d434f 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -126,7 +126,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { pte_t entry; diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index ca0c90e958379..e69260b5d67f0 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig @@ -485,7 +485,6 @@ CONFIG_VIA_VELOCITY=m CONFIG_PCMCIA_XIRC2PS=m CONFIG_FDDI=y CONFIG_SKFP=m -CONFIG_NET_SB1000=m CONFIG_BROADCOM_PHY=m CONFIG_CICADA_PHY=m CONFIG_DAVICOM_PHY=m diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index dad2e7980f245..86326587e58de 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -45,7 +45,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) + unsigned long addr, pte_t *ptep, + unsigned long sz) { return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); } @@ -55,8 +56,9 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pte_t pte; + unsigned long sz = huge_page_size(hstate_vma(vma)); - pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz); flush_hugetlb_page(vma, addr); return pte; } diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index 1916cf7ba450e..17606940bb523 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -26,6 +26,7 @@ config ARCH_SOPHGO config ARCH_SPACEMIT bool "SpacemiT SoCs" + select PINCTRL help This enables support for SpacemiT SoC platform hardware. diff --git a/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h b/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h index 256de17f52611..ae49c908e7fb3 100644 --- a/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h +++ b/arch/riscv/boot/dts/starfive/jh7110-pinfunc.h @@ -89,7 +89,7 @@ #define GPOUT_SYS_SDIO1_DATA1 59 #define GPOUT_SYS_SDIO1_DATA2 60 #define GPOUT_SYS_SDIO1_DATA3 61 -#define GPOUT_SYS_SDIO1_DATA4 63 +#define GPOUT_SYS_SDIO1_DATA4 62 #define GPOUT_SYS_SDIO1_DATA5 63 #define GPOUT_SYS_SDIO1_DATA6 64 #define GPOUT_SYS_SDIO1_DATA7 65 diff --git a/arch/riscv/boot/dts/starfive/jh7110.dtsi b/arch/riscv/boot/dts/starfive/jh7110.dtsi index 0d8339357bad3..a7aed4a21b655 100644 --- a/arch/riscv/boot/dts/starfive/jh7110.dtsi +++ b/arch/riscv/boot/dts/starfive/jh7110.dtsi @@ -1022,7 +1022,6 @@ snps,force_thresh_dma_mode; snps,axi-config = <&stmmac_axi_setup>; snps,tso; - snps,en-tx-lpi-clockgating; snps,txpbl = <16>; snps,rxpbl = <16>; starfive,syscon = <&aon_syscon 0xc 0x12>; @@ -1053,7 +1052,6 @@ snps,force_thresh_dma_mode; snps,axi-config = <&stmmac_axi_setup>; snps,tso; - snps,en-tx-lpi-clockgating; snps,txpbl = <16>; snps,rxpbl = <16>; starfive,syscon = <&sys_syscon 0x90 0x2>; diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 4cadc56220fea..427c41dde6431 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -231,7 +231,7 @@ __arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \ sc_prepend, sc_append, \ cas_prepend, cas_append, \ - __ret, __ptr, (long), __old, __new); \ + __ret, __ptr, (long)(int)(long), __old, __new); \ break; \ case 8: \ __arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \ diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h index 72be100afa236..90c86b115e008 100644 --- a/arch/riscv/include/asm/futex.h +++ b/arch/riscv/include/asm/futex.h @@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp) - : [ov] "Jr" (oldval), [nv] "Jr" (newval) + : [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval) : "memory"); __disable_user_access(); diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index faf3624d80577..4461264977684 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -28,7 +28,8 @@ void set_huge_pte_at(struct mm_struct *mm, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep); + unsigned long addr, pte_t *ptep, + unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c index 2d40736fc37ce..26b085dbdd073 100644 --- a/arch/riscv/kernel/cacheinfo.c +++ b/arch/riscv/kernel/cacheinfo.c @@ -108,11 +108,11 @@ int populate_cache_leaves(unsigned int cpu) if (!np) return -ENOENT; - if (of_property_read_bool(np, "cache-size")) + if (of_property_present(np, "cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level); - if (of_property_read_bool(np, "i-cache-size")) + if (of_property_present(np, "i-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); - if (of_property_read_bool(np, "d-cache-size")) + if (of_property_present(np, "d-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); prev = np; @@ -125,11 +125,11 @@ int populate_cache_leaves(unsigned int cpu) break; if (level <= levels) break; - if (of_property_read_bool(np, "cache-size")) + if (of_property_present(np, "cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_UNIFIED, level); - if (of_property_read_bool(np, "i-cache-size")) + if (of_property_present(np, "i-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); - if (of_property_read_bool(np, "d-cache-size")) + if (of_property_present(np, "d-cache-size")) ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); levels = level; } diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index c6ba750536c32..40ac72e407b68 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -479,7 +479,7 @@ static void __init riscv_resolve_isa(unsigned long *source_isa, if (bit < RISCV_ISA_EXT_BASE) *this_hwcap |= isa2hwcap[bit]; } - } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa))); + } while (loop && !bitmap_equal(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX)); } static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap) diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index f1793630fc518..4fe45daa6281e 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -322,8 +322,8 @@ void __init setup_arch(char **cmdline_p) riscv_init_cbo_blocksizes(); riscv_fill_hwcap(); - init_rt_signal_env(); apply_boot_alternatives(); + init_rt_signal_env(); if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) && riscv_isa_extension_available(NULL, ZICBOM)) diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 94e905eea1dee..08378fea3a111 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -215,12 +215,6 @@ static size_t get_rt_frame_size(bool cal_all) if (cal_all || riscv_v_vstate_query(task_pt_regs(current))) total_context_size += riscv_v_sc_size; } - /* - * Preserved a __riscv_ctx_hdr for END signal context header if an - * extension uses __riscv_extra_ext_header - */ - if (total_context_size) - total_context_size += sizeof(struct __riscv_ctx_hdr); frame_size += total_context_size; diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c index a8085cd8215e3..29ef9c2133a93 100644 --- a/arch/riscv/kvm/aia_imsic.c +++ b/arch/riscv/kvm/aia_imsic.c @@ -974,7 +974,6 @@ int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu, if (imsic->vsfile_cpu >= 0) { writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE); - kvm_vcpu_kick(vcpu); } else { eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)]; set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip); diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c index dce667f4b6ab0..3070bb31745de 100644 --- a/arch/riscv/kvm/vcpu_sbi_hsm.c +++ b/arch/riscv/kvm/vcpu_sbi_hsm.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -79,12 +80,12 @@ static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu) target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid); if (!target_vcpu) return SBI_ERR_INVALID_PARAM; - if (!kvm_riscv_vcpu_stopped(target_vcpu)) - return SBI_HSM_STATE_STARTED; - else if (vcpu->stat.generic.blocking) + if (kvm_riscv_vcpu_stopped(target_vcpu)) + return SBI_HSM_STATE_STOPPED; + else if (target_vcpu->stat.generic.blocking) return SBI_HSM_STATE_SUSPENDED; else - return SBI_HSM_STATE_STOPPED; + return SBI_HSM_STATE_STARTED; } static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, @@ -109,7 +110,7 @@ static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, } return 0; case SBI_EXT_HSM_HART_SUSPEND: - switch (cp->a0) { + switch (lower_32_bits(cp->a0)) { case SBI_HSM_SUSPEND_RET_DEFAULT: kvm_riscv_vcpu_wfi(vcpu); break; diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 9c2ab3dfa93aa..5fbf3f94f1e85 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -21,7 +21,7 @@ static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, u64 next_cycle; if (cp->a6 != SBI_EXT_TIME_SET_TIMER) { - retdata->err_val = SBI_ERR_INVALID_PARAM; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; return 0; } @@ -51,9 +51,10 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, struct kvm_cpu_context *cp = &vcpu->arch.guest_context; unsigned long hmask = cp->a0; unsigned long hbase = cp->a1; + unsigned long hart_bit = 0, sentmask = 0; if (cp->a6 != SBI_EXT_IPI_SEND_IPI) { - retdata->err_val = SBI_ERR_INVALID_PARAM; + retdata->err_val = SBI_ERR_NOT_SUPPORTED; return 0; } @@ -62,15 +63,23 @@ static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, if (hbase != -1UL) { if (tmp->vcpu_id < hbase) continue; - if (!(hmask & (1UL << (tmp->vcpu_id - hbase)))) + hart_bit = tmp->vcpu_id - hbase; + if (hart_bit >= __riscv_xlen) + goto done; + if (!(hmask & (1UL << hart_bit))) continue; } ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT); if (ret < 0) break; + sentmask |= 1UL << hart_bit; kvm_riscv_vcpu_pmu_incr_fw(tmp, SBI_PMU_FW_IPI_RCVD); } +done: + if (hbase != -1UL && (hmask ^ sentmask)) + retdata->err_val = SBI_ERR_INVALID_PARAM; + return ret; } diff --git a/arch/riscv/kvm/vcpu_sbi_system.c b/arch/riscv/kvm/vcpu_sbi_system.c index 5d55e08791fa1..bc0ebba890037 100644 --- a/arch/riscv/kvm/vcpu_sbi_system.c +++ b/arch/riscv/kvm/vcpu_sbi_system.c @@ -4,6 +4,7 @@ */ #include +#include #include #include @@ -19,7 +20,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, switch (funcid) { case SBI_EXT_SUSP_SYSTEM_SUSPEND: - if (cp->a0 != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) { + if (lower_32_bits(cp->a0) != SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM) { retdata->err_val = SBI_ERR_INVALID_PARAM; return 0; } diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index 42314f0939220..b4a78a4b35cff 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -293,7 +293,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { pte_t orig_pte = ptep_get(ptep); int pte_num; diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 885bd1dd2c82f..9276e0576d0ab 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -86,7 +86,7 @@ static int cmma_test_essa(void) : [reg1] "=&d" (reg1), [reg2] "=&a" (reg2), [rc] "+&d" (rc), - [tmp] "=&d" (tmp), + [tmp] "+&d" (tmp), "+Q" (get_lowcore()->program_new_psw), "=Q" (old) : [psw_old] "a" (&old), diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 44f01a4bc810f..80bdfbae6e5b4 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -469,6 +469,7 @@ CONFIG_SCSI_DH_ALUA=m CONFIG_MD=y CONFIG_BLK_DEV_MD=y # CONFIG_MD_BITMAP_FILE is not set +CONFIG_MD_LINEAR=m CONFIG_MD_CLUSTER=m CONFIG_BCACHE=m CONFIG_BLK_DEV_DM=y @@ -874,6 +875,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=300 CONFIG_LATENCYTOP=y CONFIG_BOOTTIME_TRACING=y CONFIG_FUNCTION_GRAPH_RETVAL=y +CONFIG_FUNCTION_GRAPH_RETADDR=y CONFIG_FPROBE=y CONFIG_FUNCTION_PROFILER=y CONFIG_STACK_TRACER=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 8bcd37edd3c97..449a0e996b963 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -459,6 +459,7 @@ CONFIG_SCSI_DH_ALUA=m CONFIG_MD=y CONFIG_BLK_DEV_MD=y # CONFIG_MD_BITMAP_FILE is not set +CONFIG_MD_LINEAR=m CONFIG_MD_CLUSTER=m CONFIG_BCACHE=m CONFIG_BLK_DEV_DM=y @@ -825,6 +826,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_LATENCYTOP=y CONFIG_BOOTTIME_TRACING=y CONFIG_FUNCTION_GRAPH_RETVAL=y +CONFIG_FUNCTION_GRAPH_RETADDR=y CONFIG_FPROBE=y CONFIG_FUNCTION_PROFILER=y CONFIG_STACK_TRACER=y diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index 7c52acaf9f828..663e87220e89f 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -25,8 +25,16 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_HUGE_PTEP_GET pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep); + #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned long sz) +{ + return __huge_ptep_get_and_clear(mm, addr, ptep); +} static inline void arch_clear_hugetlb_flags(struct folio *folio) { @@ -48,7 +56,7 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - return huge_ptep_get_and_clear(vma->vm_mm, address, ptep); + return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep); } #define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS @@ -59,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte); if (changed) { - huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + __huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); } return changed; @@ -69,7 +77,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); + pte_t pte = __huge_ptep_get_and_clear(mm, addr, ptep); __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); } diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index d9e705f4a697e..bde6a496df5f8 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h @@ -54,7 +54,6 @@ enum interruption_class { IRQIO_C70, IRQIO_TAP, IRQIO_VMR, - IRQIO_LCS, IRQIO_CTC, IRQIO_ADM, IRQIO_CSC, diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 63ba6306632ef..e540b022ceb23 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -266,12 +266,13 @@ void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) { unsigned long *parent = &arch_ftrace_regs(fregs)->regs.gprs[14]; + unsigned long sp = arch_ftrace_regs(fregs)->regs.gprs[15]; if (unlikely(ftrace_graph_is_dead())) return; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; - if (!function_graph_enter_regs(*parent, ip, 0, parent, fregs)) + if (!function_graph_enter_regs(*parent, ip, 0, (unsigned long *)sp, fregs)) *parent = (unsigned long)&return_to_handler; } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index ef7be599e1f78..7ca157ffab30e 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -84,7 +84,6 @@ static const struct irq_class irqclass_sub_desc[] = { {.irq = IRQIO_C70, .name = "C70", .desc = "[I/O] 3270"}, {.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"}, {.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"}, - {.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"}, {.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"}, {.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"}, {.irq = IRQIO_CSC, .name = "CSC", .desc = "[I/O] CHSC Subchannel"}, diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 24fee11b030d8..b746213d3110c 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -285,10 +285,10 @@ static void __init test_monitor_call(void) return; asm volatile( " mc 0,0\n" - "0: xgr %0,%0\n" + "0: lhi %[val],0\n" "1:\n" - EX_TABLE(0b,1b) - : "+d" (val)); + EX_TABLE(0b, 1b) + : [val] "+d" (val)); if (!val) panic("Monitor call doesn't work!\n"); } diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index d9ce199953de9..2e568f175cd41 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -188,8 +188,8 @@ pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep) return __rste_to_pte(pte_val(*ptep)); } -pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) +pte_t __huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) { pte_t pte = huge_ptep_get(mm, addr, ptep); pmd_t *pmdp = (pmd_t *) ptep; diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile index bdcf2a3b6c41b..bd39b36e7bd68 100644 --- a/arch/s390/purgatory/Makefile +++ b/arch/s390/purgatory/Makefile @@ -8,7 +8,7 @@ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y)) $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE $(call if_changed_rule,cc_o_c) -CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY +CFLAGS_sha256.o := -D__NO_FORTIFY $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE $(call if_changed_rule,as_o_S) @@ -19,9 +19,11 @@ KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common KBUILD_CFLAGS += -fno-stack-protector KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING +KBUILD_CFLAGS += -D__DISABLE_EXPORTS KBUILD_CFLAGS += $(CLANG_FLAGS) KBUILD_CFLAGS += $(call cc-option,-fno-PIE) KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS)) +KBUILD_AFLAGS += -D__DISABLE_EXPORTS # Since we link purgatory with -r unresolved symbols are not checked, so we # also link a purgatory.chk binary without -r to check for unresolved symbols. diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h index c714ca6a05aa0..e7a9cdd498dca 100644 --- a/arch/sparc/include/asm/hugetlb.h +++ b/arch/sparc/include/asm/hugetlb.h @@ -20,7 +20,7 @@ void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep); + pte_t *ptep, unsigned long sz); #define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index eee601a0d2cfb..80504148d8a5b 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -260,7 +260,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, } pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, - pte_t *ptep) + pte_t *ptep, unsigned long sz) { unsigned int i, nptes, orig_shift, shift; unsigned long size; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index be2c311f5118d..0e27ebd7e36a9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1341,6 +1341,7 @@ config X86_REBOOTFIXUPS config MICROCODE def_bool y depends on CPU_SUP_AMD || CPU_SUP_INTEL + select CRYPTO_LIB_SHA256 if CPU_SUP_AMD config MICROCODE_INITRD32 def_bool y diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index c882e1f67af01..d8c5de40669d3 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include "misc.h" #include +#include #include #include #include "pgtable.h" @@ -107,6 +108,7 @@ asmlinkage void configure_5level_paging(struct boot_params *bp, void *pgtable) bool l5_required = false; /* Initialize boot_params. Required for cmdline_find_option_bool(). */ + sanitize_boot_params(bp); boot_params_ptr = bp; /* diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index 82492efc5d949..96c7bc698e6b6 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -2853,19 +2853,8 @@ struct snp_msg_desc *snp_msg_alloc(void) if (!mdesc->response) goto e_free_request; - mdesc->certs_data = alloc_shared_pages(SEV_FW_BLOB_MAX_SIZE); - if (!mdesc->certs_data) - goto e_free_response; - - /* initial the input address for guest request */ - mdesc->input.req_gpa = __pa(mdesc->request); - mdesc->input.resp_gpa = __pa(mdesc->response); - mdesc->input.data_gpa = __pa(mdesc->certs_data); - return mdesc; -e_free_response: - free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg)); e_free_request: free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); e_unmap: @@ -2885,7 +2874,6 @@ void snp_msg_free(struct snp_msg_desc *mdesc) kfree(mdesc->ctx); free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg)); free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); - free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE); iounmap((__force void __iomem *)mdesc->secrets); memset(mdesc, 0, sizeof(*mdesc)); @@ -3054,7 +3042,7 @@ static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_r * sequence number must be incremented or the VMPCK must be deleted to * prevent reuse of the IV. */ - rc = snp_issue_guest_request(req, &mdesc->input, rio); + rc = snp_issue_guest_request(req, &req->input, rio); switch (rc) { case -ENOSPC: /* @@ -3064,7 +3052,7 @@ static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_r * order to increment the sequence number and thus avoid * IV reuse. */ - override_npages = mdesc->input.data_npages; + override_npages = req->input.data_npages; req->exit_code = SVM_VMGEXIT_GUEST_REQUEST; /* @@ -3120,7 +3108,7 @@ static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_r } if (override_npages) - mdesc->input.data_npages = override_npages; + req->input.data_npages = override_npages; return rc; } @@ -3158,6 +3146,11 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req */ memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request)); + /* Initialize the input address for guest request */ + req->input.req_gpa = __pa(mdesc->request); + req->input.resp_gpa = __pa(mdesc->response); + req->input.data_gpa = req->certs_data ? __pa(req->certs_data) : 0; + rc = __handle_guest_request(mdesc, req, rio); if (rc) { if (rc == -EIO && diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 94941c5a10ac1..14db5b85114c1 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -190,6 +190,7 @@ static __always_inline bool int80_is_external(void) /** * do_int80_emulation - 32-bit legacy syscall C entry from asm + * @regs: syscall arguments in struct pt_args on the stack. * * This entry point can be used by 32-bit and 64-bit programs to perform * 32-bit system calls. Instances of INT $0x80 can be found inline in diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 8f218ac0d445c..2092d615333da 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -628,7 +628,7 @@ int x86_pmu_hw_config(struct perf_event *event) if (event->attr.type == event->pmu->type) event->hw.config |= x86_pmu_get_event_config(event); - if (event->attr.sample_period && x86_pmu.limit_period) { + if (!event->attr.freq && x86_pmu.limit_period) { s64 left = event->attr.sample_period; x86_pmu.limit_period(event, &left); if (left > event->attr.sample_period) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index e86333eee2668..cdb19e3ba3aa3 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -397,34 +397,28 @@ static struct event_constraint intel_lnc_event_constraints[] = { METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6), METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7), + INTEL_EVENT_CONSTRAINT(0x20, 0xf), + + INTEL_UEVENT_CONSTRAINT(0x012a, 0xf), + INTEL_UEVENT_CONSTRAINT(0x012b, 0xf), INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), INTEL_UEVENT_CONSTRAINT(0x0175, 0x4), INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff), INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff), - /* - * Generally event codes < 0x90 are restricted to counters 0-3. - * The 0x2E and 0x3C are exception, which has no restriction. - */ - INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf), - INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf), - INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1), INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1), INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1), INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8), + INTEL_UEVENT_CONSTRAINT(0x01cd, 0x3fc), INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3), - INTEL_EVENT_CONSTRAINT(0xce, 0x1), INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf), - /* - * Generally event codes >= 0x90 are likely to have no restrictions. - * The exception are defined as above. - */ - INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff), + + INTEL_UEVENT_CONSTRAINT(0x00e0, 0xf), EVENT_CONSTRAINT_END }; @@ -3958,6 +3952,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx) return test_bit(idx, (unsigned long *)&intel_cap->capabilities); } +static u64 intel_pmu_freq_start_period(struct perf_event *event) +{ + int type = event->attr.type; + u64 config, factor; + s64 start; + + /* + * The 127 is the lowest possible recommended SAV (sample after value) + * for a 4000 freq (default freq), according to the event list JSON file. + * Also, assume the workload is idle 50% time. + */ + factor = 64 * 4000; + if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE) + goto end; + + /* + * The estimation of the start period in the freq mode is + * based on the below assumption. + * + * For a cycles or an instructions event, 1GHZ of the + * underlying platform, 1 IPC. The workload is idle 50% time. + * The start period = 1,000,000,000 * 1 / freq / 2. + * = 500,000,000 / freq + * + * Usually, the branch-related events occur less than the + * instructions event. According to the Intel event list JSON + * file, the SAV (sample after value) of a branch-related event + * is usually 1/4 of an instruction event. + * The start period of branch-related events = 125,000,000 / freq. + * + * The cache-related events occurs even less. The SAV is usually + * 1/20 of an instruction event. + * The start period of cache-related events = 25,000,000 / freq. + */ + config = event->attr.config & PERF_HW_EVENT_MASK; + if (type == PERF_TYPE_HARDWARE) { + switch (config) { + case PERF_COUNT_HW_CPU_CYCLES: + case PERF_COUNT_HW_INSTRUCTIONS: + case PERF_COUNT_HW_BUS_CYCLES: + case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND: + case PERF_COUNT_HW_STALLED_CYCLES_BACKEND: + case PERF_COUNT_HW_REF_CPU_CYCLES: + factor = 500000000; + break; + case PERF_COUNT_HW_BRANCH_INSTRUCTIONS: + case PERF_COUNT_HW_BRANCH_MISSES: + factor = 125000000; + break; + case PERF_COUNT_HW_CACHE_REFERENCES: + case PERF_COUNT_HW_CACHE_MISSES: + factor = 25000000; + break; + default: + goto end; + } + } + + if (type == PERF_TYPE_HW_CACHE) + factor = 25000000; +end: + /* + * Usually, a prime or a number with less factors (close to prime) + * is chosen as an SAV, which makes it less likely that the sampling + * period synchronizes with some periodic event in the workload. + * Minus 1 to make it at least avoiding values near power of twos + * for the default freq. + */ + start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1; + + if (start > x86_pmu.max_period) + start = x86_pmu.max_period; + + if (x86_pmu.limit_period) + x86_pmu.limit_period(event, &start); + + return start; +} + static int intel_pmu_hw_config(struct perf_event *event) { int ret = x86_pmu_hw_config(event); @@ -3969,6 +4042,12 @@ static int intel_pmu_hw_config(struct perf_event *event) if (ret) return ret; + if (event->attr.freq && event->attr.sample_freq) { + event->hw.sample_period = intel_pmu_freq_start_period(event); + event->hw.last_period = event->hw.sample_period; + local64_set(&event->hw.period_left, event->hw.sample_period); + } + if (event->attr.precise_ip) { if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) return -EINVAL; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index c2e2eae7309c3..f122882ef278f 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1199,7 +1199,7 @@ struct event_constraint intel_lnc_pebs_event_constraints[] = { INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), - INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3ff), + INTEL_HYBRID_LDLAT_CONSTRAINT(0x1cd, 0x3fc), INTEL_HYBRID_STLAT_CONSTRAINT(0x2cd, 0x3), INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c index 4952faf03e82d..6941f4811bec1 100644 --- a/arch/x86/events/rapl.c +++ b/arch/x86/events/rapl.c @@ -879,6 +879,7 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = { X86_MATCH_VFM(INTEL_METEORLAKE_L, &model_skl), X86_MATCH_VFM(INTEL_ARROWLAKE_H, &model_skl), X86_MATCH_VFM(INTEL_ARROWLAKE, &model_skl), + X86_MATCH_VFM(INTEL_ARROWLAKE_U, &model_skl), X86_MATCH_VFM(INTEL_LUNARLAKE_M, &model_skl), {}, }; diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c index 4e1b1e3b56584..3f4e20d7b724b 100644 --- a/arch/x86/hyperv/hv_vtl.c +++ b/arch/x86/hyperv/hv_vtl.c @@ -30,6 +30,7 @@ void __init hv_vtl_init_platform(void) x86_platform.realmode_init = x86_init_noop; x86_init.irqs.pre_vector_init = x86_init_noop; x86_init.timers.timer_init = x86_init_noop; + x86_init.resources.probe_roms = x86_init_noop; /* Avoid searching for BIOS MP tables */ x86_init.mpparse.find_mptable = x86_init_noop; diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c index dd68d9ad9b22c..ec7880271cf98 100644 --- a/arch/x86/hyperv/ivm.c +++ b/arch/x86/hyperv/ivm.c @@ -464,7 +464,6 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], enum hv_mem_host_visibility visibility) { struct hv_gpa_range_for_visibility *input; - u16 pages_processed; u64 hv_status; unsigned long flags; @@ -493,7 +492,7 @@ static int hv_mark_gpa_visibility(u16 count, const u64 pfn[], memcpy((void *)input->gpa_page_list, pfn, count * sizeof(*pfn)); hv_status = hv_do_rep_hypercall( HVCALL_MODIFY_SPARSE_GPA_PAGE_HOST_VISIBILITY, count, - 0, input, &pages_processed); + 0, input, NULL); local_irq_restore(flags); if (hv_result_success(hv_status)) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0b7af5902ff75..32ae3aa50c7e3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -780,6 +780,7 @@ struct kvm_vcpu_arch { u32 pkru; u32 hflags; u64 efer; + u64 host_debugctl; u64 apic_base; struct kvm_lapic *apic; /* kernel irqchip context */ bool load_eoi_exitmap_pending; diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 7e8bf78c03d5d..aee26bb8230f8 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -198,9 +198,8 @@ .endm /* - * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call - * to the retpoline thunk with a CS prefix when the register requires - * a RAX prefix byte to encode. Also see apply_retpolines(). + * Emits a conditional CS prefix that is compatible with + * -mindirect-branch-cs-prefix. */ .macro __CS_PREFIX reg:req .irp rs,r8,r9,r10,r11,r12,r13,r14,r15 @@ -420,20 +419,27 @@ static inline void call_depth_return_thunk(void) {} #ifdef CONFIG_X86_64 +/* + * Emits a conditional CS prefix that is compatible with + * -mindirect-branch-cs-prefix. + */ +#define __CS_PREFIX(reg) \ + ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \ + ".ifc \\rs," reg "\n" \ + ".byte 0x2e\n" \ + ".endif\n" \ + ".endr\n" + /* * Inline asm uses the %V modifier which is only in newer GCC * which is ensured when CONFIG_MITIGATION_RETPOLINE is defined. */ -# define CALL_NOSPEC \ - ALTERNATIVE_2( \ - ANNOTATE_RETPOLINE_SAFE \ - "call *%[thunk_target]\n", \ - "call __x86_indirect_thunk_%V[thunk_target]\n", \ - X86_FEATURE_RETPOLINE, \ - "lfence;\n" \ - ANNOTATE_RETPOLINE_SAFE \ - "call *%[thunk_target]\n", \ - X86_FEATURE_RETPOLINE_LFENCE) +#ifdef CONFIG_MITIGATION_RETPOLINE +#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \ + "call __x86_indirect_thunk_%V[thunk_target]\n" +#else +#define CALL_NOSPEC "call *%[thunk_target]\n" +#endif # define THUNK_TARGET(addr) [thunk_target] "r" (addr) diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h index 7f6ccff0ba727..4a12c276b1812 100644 --- a/arch/x86/include/asm/pgtable-2level_types.h +++ b/arch/x86/include/asm/pgtable-2level_types.h @@ -23,17 +23,17 @@ typedef union { #define ARCH_PAGE_TABLE_SYNC_MASK PGTBL_PMD_MODIFIED /* - * traditional i386 two-level paging structure: + * Traditional i386 two-level paging structure: */ #define PGDIR_SHIFT 22 #define PTRS_PER_PGD 1024 - /* - * the i386 is two-level, so we don't really have any - * PMD directory physically. + * The i386 is two-level, so we don't really have any + * PMD directory physically: */ +#define PTRS_PER_PMD 1 #define PTRS_PER_PTE 1024 diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index 1581246491b54..ba7999f66abe6 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -203,6 +203,9 @@ struct snp_guest_req { unsigned int vmpck_id; u8 msg_version; u8 msg_type; + + struct snp_req_data input; + void *certs_data; }; /* @@ -263,9 +266,6 @@ struct snp_msg_desc { struct snp_guest_msg secret_request, secret_response; struct snp_secrets_page *secrets; - struct snp_req_data input; - - void *certs_data; struct aesgcm_ctx *ctx; diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 11fac09e3a8cb..67e773744edb2 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -143,7 +143,6 @@ bool __init early_is_amd_nb(u32 device) struct resource *amd_get_mmconfig_range(struct resource *res) { - u32 address; u64 base, msr; unsigned int segn_busn_bits; @@ -151,13 +150,11 @@ struct resource *amd_get_mmconfig_range(struct resource *res) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return NULL; - /* assume all cpus from fam10h have mmconfig */ - if (boot_cpu_data.x86 < 0x10) + /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ + if (boot_cpu_data.x86 < 0x10 || + rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) return NULL; - address = MSR_FAM10H_MMIO_CONF_BASE; - rdmsrl(address, msr); - /* mmconfig is not enabled */ if (!(msr & FAM10H_MMIO_CONF_ENABLE)) return NULL; diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index e6fa03ed9172c..a6c6bccfa8b8d 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -808,7 +808,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c) cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); /* If bit 31 is set, this is an unknown format */ - for (j = 0 ; j < 3 ; j++) + for (j = 0 ; j < 4 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index 8bd84114c2d96..df838e3bdbe02 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -45,6 +45,7 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_AES, X86_FEATURE_XMM2 }, { X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 }, { X86_FEATURE_GFNI, X86_FEATURE_XMM2 }, + { X86_FEATURE_AVX_VNNI, X86_FEATURE_AVX }, { X86_FEATURE_FMA, X86_FEATURE_AVX }, { X86_FEATURE_VAES, X86_FEATURE_AVX }, { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX }, diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 9651275aecd1b..dfec2c61e3547 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -153,8 +153,8 @@ static void geode_configure(void) u8 ccr3; local_irq_save(flags); - /* Suspend on halt power saving and enable #SUSP pin */ - setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); + /* Suspend on halt power saving */ + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 3dce22f00dc34..134368a3f4b1e 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -635,26 +635,37 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) } #endif -#define TLB_INST_4K 0x01 -#define TLB_INST_4M 0x02 -#define TLB_INST_2M_4M 0x03 +#define TLB_INST_4K 0x01 +#define TLB_INST_4M 0x02 +#define TLB_INST_2M_4M 0x03 -#define TLB_INST_ALL 0x05 -#define TLB_INST_1G 0x06 +#define TLB_INST_ALL 0x05 +#define TLB_INST_1G 0x06 -#define TLB_DATA_4K 0x11 -#define TLB_DATA_4M 0x12 -#define TLB_DATA_2M_4M 0x13 -#define TLB_DATA_4K_4M 0x14 +#define TLB_DATA_4K 0x11 +#define TLB_DATA_4M 0x12 +#define TLB_DATA_2M_4M 0x13 +#define TLB_DATA_4K_4M 0x14 -#define TLB_DATA_1G 0x16 +#define TLB_DATA_1G 0x16 +#define TLB_DATA_1G_2M_4M 0x17 -#define TLB_DATA0_4K 0x21 -#define TLB_DATA0_4M 0x22 -#define TLB_DATA0_2M_4M 0x23 +#define TLB_DATA0_4K 0x21 +#define TLB_DATA0_4M 0x22 +#define TLB_DATA0_2M_4M 0x23 -#define STLB_4K 0x41 -#define STLB_4K_2M 0x42 +#define STLB_4K 0x41 +#define STLB_4K_2M 0x42 + +/* + * All of leaf 0x2's one-byte TLB descriptors implies the same number of + * entries for their respective TLB types. The 0x63 descriptor is an + * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries + * for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for + * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the + * intel_tlb_table[] mapping. + */ +#define TLB_0x63_2M_4M_ENTRIES 32 static const struct _tlb_table intel_tlb_table[] = { { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, @@ -676,7 +687,8 @@ static const struct _tlb_table intel_tlb_table[] = { { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, - { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, + { 0x63, TLB_DATA_1G_2M_4M, 4, " TLB_DATA 1 GByte pages, 4-way set associative" + " (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" }, { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, @@ -776,6 +788,12 @@ static void intel_tlb_lookup(const unsigned char desc) if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; + case TLB_DATA_1G_2M_4M: + if (tlb_lld_2m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES) + tlb_lld_2m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES; + if (tlb_lld_4m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES) + tlb_lld_4m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES; + fallthrough; case TLB_DATA_1G: if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; @@ -799,7 +817,7 @@ static void intel_detect_tlb(struct cpuinfo_x86 *c) cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); /* If bit 31 is set, this is an unknown format */ - for (j = 0 ; j < 3 ; j++) + for (j = 0 ; j < 4 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index a5dac7f3c0a07..138689b8e1d83 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -23,14 +23,18 @@ #include #include +#include #include #include #include #include #include +#include + #include #include +#include #include #include #include @@ -145,6 +149,113 @@ ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin"; */ static u32 bsp_cpuid_1_eax __ro_after_init; +static bool sha_check = true; + +struct patch_digest { + u32 patch_id; + u8 sha256[SHA256_DIGEST_SIZE]; +}; + +#include "amd_shas.c" + +static int cmp_id(const void *key, const void *elem) +{ + struct patch_digest *pd = (struct patch_digest *)elem; + u32 patch_id = *(u32 *)key; + + if (patch_id == pd->patch_id) + return 0; + else if (patch_id < pd->patch_id) + return -1; + else + return 1; +} + +static bool need_sha_check(u32 cur_rev) +{ + switch (cur_rev >> 8) { + case 0x80012: return cur_rev <= 0x800126f; break; + case 0x80082: return cur_rev <= 0x800820f; break; + case 0x83010: return cur_rev <= 0x830107c; break; + case 0x86001: return cur_rev <= 0x860010e; break; + case 0x86081: return cur_rev <= 0x8608108; break; + case 0x87010: return cur_rev <= 0x8701034; break; + case 0x8a000: return cur_rev <= 0x8a0000a; break; + case 0xa0010: return cur_rev <= 0xa00107a; break; + case 0xa0011: return cur_rev <= 0xa0011da; break; + case 0xa0012: return cur_rev <= 0xa001243; break; + case 0xa0082: return cur_rev <= 0xa00820e; break; + case 0xa1011: return cur_rev <= 0xa101153; break; + case 0xa1012: return cur_rev <= 0xa10124e; break; + case 0xa1081: return cur_rev <= 0xa108109; break; + case 0xa2010: return cur_rev <= 0xa20102f; break; + case 0xa2012: return cur_rev <= 0xa201212; break; + case 0xa4041: return cur_rev <= 0xa404109; break; + case 0xa5000: return cur_rev <= 0xa500013; break; + case 0xa6012: return cur_rev <= 0xa60120a; break; + case 0xa7041: return cur_rev <= 0xa704109; break; + case 0xa7052: return cur_rev <= 0xa705208; break; + case 0xa7080: return cur_rev <= 0xa708009; break; + case 0xa70c0: return cur_rev <= 0xa70C009; break; + case 0xaa001: return cur_rev <= 0xaa00116; break; + case 0xaa002: return cur_rev <= 0xaa00218; break; + default: break; + } + + pr_info("You should not be seeing this. Please send the following couple of lines to x86--kernel.org\n"); + pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev); + return true; +} + +static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len) +{ + struct patch_digest *pd = NULL; + u8 digest[SHA256_DIGEST_SIZE]; + struct sha256_state s; + int i; + + if (x86_family(bsp_cpuid_1_eax) < 0x17 || + x86_family(bsp_cpuid_1_eax) > 0x19) + return true; + + if (!need_sha_check(cur_rev)) + return true; + + if (!sha_check) + return true; + + pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id); + if (!pd) { + pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id); + return false; + } + + sha256_init(&s); + sha256_update(&s, data, len); + sha256_final(&s, digest); + + if (memcmp(digest, pd->sha256, sizeof(digest))) { + pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id); + + for (i = 0; i < SHA256_DIGEST_SIZE; i++) + pr_cont("0x%x ", digest[i]); + pr_info("\n"); + + return false; + } + + return true; +} + +static u32 get_patch_level(void) +{ + u32 rev, dummy __always_unused; + + native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); + + return rev; +} + static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val) { union zen_patch_rev p; @@ -246,8 +357,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size) * On success, @sh_psize returns the patch size according to the section header, * to the caller. */ -static bool -__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) +static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) { u32 p_type, p_size; const u32 *hdr; @@ -484,10 +594,13 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc) } } -static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize) +static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev, + unsigned int psize) { unsigned long p_addr = (unsigned long)&mc->hdr.data_code; - u32 rev, dummy; + + if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize)) + return -1; native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr); @@ -505,47 +618,13 @@ static bool __apply_microcode_amd(struct microcode_amd *mc, unsigned int psize) } /* verify patch application was successful */ - native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - - if (rev != mc->hdr.patch_id) + *cur_rev = get_patch_level(); + if (*cur_rev != mc->hdr.patch_id) return false; return true; } -/* - * Early load occurs before we can vmalloc(). So we look for the microcode - * patch container file in initrd, traverse equivalent cpu table, look for a - * matching microcode patch, and update, all in initrd memory in place. - * When vmalloc() is available for use later -- on 64-bit during first AP load, - * and on 32-bit during save_microcode_in_initrd_amd() -- we can call - * load_microcode_amd() to save equivalent cpu table and microcode patches in - * kernel heap memory. - * - * Returns true if container found (sets @desc), false otherwise. - */ -static bool early_apply_microcode(u32 old_rev, void *ucode, size_t size) -{ - struct cont_desc desc = { 0 }; - struct microcode_amd *mc; - - scan_containers(ucode, size, &desc); - - mc = desc.mc; - if (!mc) - return false; - - /* - * Allow application of the same revision to pick up SMT-specific - * changes even if the revision of the other SMT thread is already - * up-to-date. - */ - if (old_rev > mc->hdr.patch_id) - return false; - - return __apply_microcode_amd(mc, desc.psize); -} - static bool get_builtin_microcode(struct cpio_data *cp) { char fw_name[36] = "amd-ucode/microcode_amd.bin"; @@ -583,14 +662,35 @@ static bool __init find_blobs_in_containers(struct cpio_data *ret) return found; } +/* + * Early load occurs before we can vmalloc(). So we look for the microcode + * patch container file in initrd, traverse equivalent cpu table, look for a + * matching microcode patch, and update, all in initrd memory in place. + * When vmalloc() is available for use later -- on 64-bit during first AP load, + * and on 32-bit during save_microcode_in_initrd() -- we can call + * load_microcode_amd() to save equivalent cpu table and microcode patches in + * kernel heap memory. + */ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax) { + struct cont_desc desc = { }; + struct microcode_amd *mc; struct cpio_data cp = { }; - u32 dummy; + char buf[4]; + u32 rev; + + if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) { + if (!strncmp(buf, "off", 3)) { + sha_check = false; + pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n"); + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + } + } bsp_cpuid_1_eax = cpuid_1_eax; - native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy); + rev = get_patch_level(); + ed->old_rev = rev; /* Needed in load_microcode_amd() */ ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; @@ -598,37 +698,23 @@ void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_ if (!find_blobs_in_containers(&cp)) return; - if (early_apply_microcode(ed->old_rev, cp.data, cp.size)) - native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy); -} - -static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size); - -static int __init save_microcode_in_initrd(void) -{ - unsigned int cpuid_1_eax = native_cpuid_eax(1); - struct cpuinfo_x86 *c = &boot_cpu_data; - struct cont_desc desc = { 0 }; - enum ucode_state ret; - struct cpio_data cp; - - if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) - return 0; - - if (!find_blobs_in_containers(&cp)) - return -EINVAL; - scan_containers(cp.data, cp.size, &desc); - if (!desc.mc) - return -EINVAL; - ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); - if (ret > UCODE_UPDATED) - return -EINVAL; + mc = desc.mc; + if (!mc) + return; - return 0; + /* + * Allow application of the same revision to pick up SMT-specific + * changes even if the revision of the other SMT thread is already + * up-to-date. + */ + if (ed->old_rev > mc->hdr.patch_id) + return; + + if (__apply_microcode_amd(mc, &rev, desc.psize)) + ed->new_rev = rev; } -early_initcall(save_microcode_in_initrd); static inline bool patch_cpus_equivalent(struct ucode_patch *p, struct ucode_patch *n, @@ -729,14 +815,9 @@ static void free_cache(void) static struct ucode_patch *find_patch(unsigned int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - u32 rev, dummy __always_unused; u16 equiv_id = 0; - /* fetch rev if not populated yet: */ - if (!uci->cpu_sig.rev) { - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - uci->cpu_sig.rev = rev; - } + uci->cpu_sig.rev = get_patch_level(); if (x86_family(bsp_cpuid_1_eax) < 0x17) { equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); @@ -759,22 +840,20 @@ void reload_ucode_amd(unsigned int cpu) mc = p->data; - rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); - + rev = get_patch_level(); if (rev < mc->hdr.patch_id) { - if (__apply_microcode_amd(mc, p->size)) - pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id); + if (__apply_microcode_amd(mc, &rev, p->size)) + pr_info_once("reload revision: 0x%08x\n", rev); } } static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) { - struct cpuinfo_x86 *c = &cpu_data(cpu); struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct ucode_patch *p; csig->sig = cpuid_eax(0x00000001); - csig->rev = c->microcode; + csig->rev = get_patch_level(); /* * a patch could have been loaded early, set uci->mc so that @@ -815,7 +894,7 @@ static enum ucode_state apply_microcode_amd(int cpu) goto out; } - if (!__apply_microcode_amd(mc_amd, p->size)) { + if (!__apply_microcode_amd(mc_amd, &rev, p->size)) { pr_err("CPU%d: update failed for patch_level=0x%08x\n", cpu, mc_amd->hdr.patch_id); return UCODE_ERROR; @@ -937,8 +1016,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, } /* Scan the blob in @data and add microcode patches to the cache. */ -static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, - size_t size) +static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size) { u8 *fw = (u8 *)data; size_t offset; @@ -996,7 +1074,7 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz if (ret != UCODE_OK) return ret; - for_each_node(nid) { + for_each_node_with_cpus(nid) { cpu = cpumask_first(cpumask_of_node(nid)); c = &cpu_data(cpu); @@ -1013,6 +1091,32 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz return ret; } +static int __init save_microcode_in_initrd(void) +{ + unsigned int cpuid_1_eax = native_cpuid_eax(1); + struct cpuinfo_x86 *c = &boot_cpu_data; + struct cont_desc desc = { 0 }; + enum ucode_state ret; + struct cpio_data cp; + + if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) + return 0; + + if (!find_blobs_in_containers(&cp)) + return -EINVAL; + + scan_containers(cp.data, cp.size, &desc); + if (!desc.mc) + return -EINVAL; + + ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size); + if (ret > UCODE_UPDATED) + return -EINVAL; + + return 0; +} +early_initcall(save_microcode_in_initrd); + /* * AMD microcode firmware naming convention, up to family 15h they are in * the legacy file: diff --git a/arch/x86/kernel/cpu/microcode/amd_shas.c b/arch/x86/kernel/cpu/microcode/amd_shas.c new file mode 100644 index 0000000000000..2a1655b1fdd88 --- /dev/null +++ b/arch/x86/kernel/cpu/microcode/amd_shas.c @@ -0,0 +1,444 @@ +/* Keep 'em sorted. */ +static const struct patch_digest phashes[] = { + { 0x8001227, { + 0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b, + 0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46, + 0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8, + 0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18, + } + }, + { 0x8001250, { + 0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60, + 0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa, + 0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3, + 0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19, + } + }, + { 0x800126e, { + 0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c, + 0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3, + 0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6, + 0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43, + } + }, + { 0x800126f, { + 0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec, + 0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b, + 0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18, + 0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33, + } + }, + { 0x800820d, { + 0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59, + 0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67, + 0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c, + 0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e, + } + }, + { 0x8301025, { + 0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36, + 0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1, + 0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30, + 0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77, + } + }, + { 0x8301055, { + 0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a, + 0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea, + 0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b, + 0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b, + } + }, + { 0x8301072, { + 0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e, + 0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28, + 0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1, + 0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30, + } + }, + { 0x830107a, { + 0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72, + 0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34, + 0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20, + 0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a, + } + }, + { 0x830107b, { + 0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad, + 0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a, + 0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04, + 0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19, + } + }, + { 0x830107c, { + 0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47, + 0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac, + 0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3, + 0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38, + } + }, + { 0x860010d, { + 0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0, + 0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7, + 0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c, + 0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8, + } + }, + { 0x8608108, { + 0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2, + 0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38, + 0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc, + 0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd, + } + }, + { 0x8701034, { + 0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83, + 0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d, + 0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8, + 0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b, + } + }, + { 0x8a00008, { + 0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e, + 0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e, + 0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72, + 0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c, + } + }, + { 0x8a0000a, { + 0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c, + 0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44, + 0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb, + 0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20, + } + }, + { 0xa00104c, { + 0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe, + 0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76, + 0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b, + 0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b, + } + }, + { 0xa00104e, { + 0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2, + 0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0, + 0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e, + 0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b, + } + }, + { 0xa001053, { + 0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d, + 0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25, + 0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb, + 0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3, + } + }, + { 0xa001058, { + 0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36, + 0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19, + 0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec, + 0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2, + } + }, + { 0xa001075, { + 0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9, + 0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a, + 0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f, + 0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0, + } + }, + { 0xa001078, { + 0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25, + 0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce, + 0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04, + 0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e, + } + }, + { 0xa001079, { + 0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb, + 0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93, + 0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb, + 0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a, + } + }, + { 0xa00107a, { + 0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f, + 0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd, + 0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f, + 0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18, + } + }, + { 0xa001143, { + 0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80, + 0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42, + 0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d, + 0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8, + } + }, + { 0xa001144, { + 0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41, + 0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b, + 0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25, + 0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc, + } + }, + { 0xa00115d, { + 0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd, + 0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75, + 0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e, + 0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05, + } + }, + { 0xa001173, { + 0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a, + 0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35, + 0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3, + 0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e, + } + }, + { 0xa0011a8, { + 0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b, + 0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6, + 0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4, + 0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e, + } + }, + { 0xa0011ce, { + 0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71, + 0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86, + 0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e, + 0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a, + } + }, + { 0xa0011d1, { + 0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e, + 0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09, + 0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5, + 0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8, + } + }, + { 0xa0011d3, { + 0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b, + 0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6, + 0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00, + 0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26, + } + }, + { 0xa0011d5, { + 0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13, + 0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7, + 0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62, + 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21, + } + }, + { 0xa001223, { + 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8, + 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4, + 0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15, + 0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45, + } + }, + { 0xa001224, { + 0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25, + 0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad, + 0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9, + 0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a, + } + }, + { 0xa001227, { + 0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad, + 0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d, + 0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9, + 0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0, + } + }, + { 0xa001229, { + 0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6, + 0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75, + 0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4, + 0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d, + } + }, + { 0xa00122e, { + 0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf, + 0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15, + 0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa, + 0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10, + } + }, + { 0xa001231, { + 0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e, + 0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64, + 0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b, + 0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd, + } + }, + { 0xa001234, { + 0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7, + 0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc, + 0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b, + 0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a, + } + }, + { 0xa001236, { + 0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78, + 0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d, + 0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b, + 0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25, + } + }, + { 0xa001238, { + 0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc, + 0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a, + 0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e, + 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59, + } + }, + { 0xa00820c, { + 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3, + 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63, + 0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1, + 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2, + } + }, + { 0xa10113e, { + 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10, + 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0, + 0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5, + 0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53, + } + }, + { 0xa101144, { + 0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26, + 0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09, + 0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc, + 0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47, + } + }, + { 0xa101148, { + 0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90, + 0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f, + 0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08, + 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4, + } + }, + { 0xa10123e, { + 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18, + 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d, + 0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc, + 0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b, + } + }, + { 0xa101244, { + 0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c, + 0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1, + 0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72, + 0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0, + } + }, + { 0xa101248, { + 0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e, + 0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22, + 0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e, + 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75, + } + }, + { 0xa108108, { + 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9, + 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6, + 0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c, + 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16, + } + }, + { 0xa20102d, { + 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11, + 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89, + 0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23, + 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4, + } + }, + { 0xa201210, { + 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe, + 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9, + 0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68, + 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41, + } + }, + { 0xa404107, { + 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45, + 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0, + 0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81, + 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99, + } + }, + { 0xa500011, { + 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4, + 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1, + 0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2, + 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74, + } + }, + { 0xa601209, { + 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32, + 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30, + 0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46, + 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d, + } + }, + { 0xa704107, { + 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6, + 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93, + 0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2, + 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39, + } + }, + { 0xa705206, { + 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4, + 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7, + 0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b, + 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc, + } + }, + { 0xa708007, { + 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3, + 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2, + 0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80, + 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93, + } + }, + { 0xa70c005, { + 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b, + 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f, + 0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55, + 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13, + } + }, + { 0xaa00116, { + 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63, + 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5, + 0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d, + 0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9, + } + }, + { 0xaa00212, { + 0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75, + 0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71, + 0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2, + 0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1, + } + }, + { 0xaa00213, { + 0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a, + 0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f, + 0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed, + 0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b, + } + }, + { 0xaa00215, { + 0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9, + 0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4, + 0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b, + 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef, + } + }, +}; diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index 21776c529fa97..5df621752fefa 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -100,14 +100,12 @@ extern bool force_minrev; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family); void load_ucode_amd_ap(unsigned int family); -int save_microcode_in_initrd_amd(unsigned int family); void reload_ucode_amd(unsigned int cpu); struct microcode_ops *init_amd_microcode(void); void exit_amd_microcode(void); #else /* CONFIG_CPU_SUP_AMD */ static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { } static inline void load_ucode_amd_ap(unsigned int family) { } -static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } static inline void reload_ucode_amd(unsigned int cpu) { } static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } static inline void exit_amd_microcode(void) { } diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c index 22b65a5f5ec6c..7f8d1e11dbee2 100644 --- a/arch/x86/kernel/cpu/sgx/driver.c +++ b/arch/x86/kernel/cpu/sgx/driver.c @@ -150,13 +150,15 @@ int __init sgx_drv_init(void) u64 xfrm_mask; int ret; - if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) + if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) { + pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n"); return -ENODEV; + } cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx); if (!(eax & 1)) { - pr_err("SGX disabled: SGX1 instruction support not available.\n"); + pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n"); return -ENODEV; } @@ -173,8 +175,10 @@ int __init sgx_drv_init(void) } ret = misc_register(&sgx_dev_enclave); - if (ret) + if (ret) { + pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret); return ret; + } return 0; } diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c index b65ab214bdf57..776a20172867e 100644 --- a/arch/x86/kernel/cpu/sgx/ioctl.c +++ b/arch/x86/kernel/cpu/sgx/ioctl.c @@ -64,6 +64,13 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs) struct file *backing; long ret; + /* + * ECREATE would detect this too, but checking here also ensures + * that the 'encl_size' calculations below can never overflow. + */ + if (!is_power_of_2(secs->size)) + return -EINVAL; + va_page = sgx_encl_grow(encl, true); if (IS_ERR(va_page)) return PTR_ERR(va_page); diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 00189cdeb775f..cb3f900c46fcc 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -429,6 +430,9 @@ static void __init vmware_platform_setup(void) pr_warn("Failed to get TSC freq from the hypervisor\n"); } + if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !efi_enabled(EFI_BOOT)) + x86_init.mpparse.find_mptable = mpparse_find_mptable; + vmware_paravirt_ops_setup(); #ifdef CONFIG_X86_IO_APIC diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 59d23cdf4ed0f..dd8748c45529a 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -2,6 +2,7 @@ /* * Architecture specific OF callbacks. */ +#include #include #include #include @@ -313,6 +314,6 @@ void __init x86_flattree_get_config(void) if (initial_dtb) early_memunmap(dt, map_len); #endif - if (of_have_populated_dt()) + if (acpi_disabled && of_have_populated_dt()) x86_init.mpparse.parse_smp_cfg = x86_dtb_parse_smp_config; } diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 385e3a5fc3045..feca4f20b06aa 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -25,8 +25,10 @@ #include #include +#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR) #define CREATE_TRACE_POINTS #include +#endif DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 8eb3a88707f21..121edf1f2a79a 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -1763,7 +1763,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->ecx = entry->edx = 0; if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) { - entry->eax = entry->ebx; + entry->eax = entry->ebx = 0; break; } diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index d4ac4a1f8b81b..8160870398b90 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -7460,7 +7460,7 @@ static bool kvm_nx_huge_page_recovery_worker(void *data) return true; } -static void kvm_mmu_start_lpage_recovery(struct once *once) +static int kvm_mmu_start_lpage_recovery(struct once *once) { struct kvm_arch *ka = container_of(once, struct kvm_arch, nx_once); struct kvm *kvm = container_of(ka, struct kvm, arch); @@ -7471,13 +7471,14 @@ static void kvm_mmu_start_lpage_recovery(struct once *once) kvm_nx_huge_page_recovery_worker_kill, kvm, "kvm-nx-lpage-recovery"); - if (!nx_thread) - return; + if (IS_ERR(nx_thread)) + return PTR_ERR(nx_thread); vhost_task_start(nx_thread); /* Make the task visible only once it is fully started. */ WRITE_ONCE(kvm->arch.nx_huge_page_recovery_thread, nx_thread); + return 0; } int kvm_mmu_post_init_vm(struct kvm *kvm) @@ -7485,10 +7486,7 @@ int kvm_mmu_post_init_vm(struct kvm *kvm) if (nx_hugepage_mitigation_hard_disabled) return 0; - call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery); - if (!kvm->arch.nx_huge_page_recovery_thread) - return -ENOMEM; - return 0; + return call_once(&kvm->arch.nx_once, kvm_mmu_start_lpage_recovery); } void kvm_mmu_pre_destroy_vm(struct kvm *kvm) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0dbb25442ec14..661108d65ee72 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -4590,6 +4590,8 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm) void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa) { + struct kvm *kvm = svm->vcpu.kvm; + /* * All host state for SEV-ES guests is categorized into three swap types * based on how it is handled by hardware during a world switch: @@ -4613,14 +4615,22 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are /* * If DebugSwap is enabled, debug registers are loaded but NOT saved by - * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both - * saves and loads debug registers (Type-A). + * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU does + * not save or load debug registers. Sadly, KVM can't prevent SNP + * guests from lying about DebugSwap on secondary vCPUs, i.e. the + * SEV_FEATURES provided at "AP Create" isn't guaranteed to match what + * the guest has actually enabled (or not!) in the VMSA. + * + * If DebugSwap is *possible*, save the masks so that they're restored + * if the guest enables DebugSwap. But for the DRs themselves, do NOT + * rely on the CPU to restore the host values; KVM will restore them as + * needed in common code, via hw_breakpoint_restore(). Note, KVM does + * NOT support virtualizing Breakpoint Extensions, i.e. the mask MSRs + * don't need to be restored per se, KVM just needs to ensure they are + * loaded with the correct values *if* the CPU writes the MSRs. */ - if (sev_vcpu_has_debug_swap(svm)) { - hostsa->dr0 = native_get_debugreg(0); - hostsa->dr1 = native_get_debugreg(1); - hostsa->dr2 = native_get_debugreg(2); - hostsa->dr3 = native_get_debugreg(3); + if (sev_vcpu_has_debug_swap(svm) || + (sev_snp_guest(kvm) && cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP))) { hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0); hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1); hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index a713c803a3a37..e67de787fc714 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3165,6 +3165,27 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) kvm_pr_unimpl_wrmsr(vcpu, ecx, data); break; } + + /* + * AMD changed the architectural behavior of bits 5:2. On CPUs + * without BusLockTrap, bits 5:2 control "external pins", but + * on CPUs that support BusLockDetect, bit 2 enables BusLockTrap + * and bits 5:3 are reserved-to-zero. Sadly, old KVM allowed + * the guest to set bits 5:2 despite not actually virtualizing + * Performance-Monitoring/Breakpoint external pins. Drop bits + * 5:2 for backwards compatibility. + */ + data &= ~GENMASK(5, 2); + + /* + * Suppress BTF as KVM doesn't virtualize BTF, but there's no + * way to communicate lack of support to the guest. + */ + if (data & DEBUGCTLMSR_BTF) { + kvm_pr_unimpl_wrmsr(vcpu, MSR_IA32_DEBUGCTLMSR, data); + data &= ~DEBUGCTLMSR_BTF; + } + if (data & DEBUGCTL_RESERVED_BITS) return 1; @@ -4189,6 +4210,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in guest_state_enter_irqoff(); + /* + * Set RFLAGS.IF prior to VMRUN, as the host's RFLAGS.IF at the time of + * VMRUN controls whether or not physical IRQs are masked (KVM always + * runs with V_INTR_MASKING_MASK). Toggle RFLAGS.IF here to avoid the + * temptation to do STI+VMRUN+CLI, as AMD CPUs bleed the STI shadow + * into guest state if delivery of an event during VMRUN triggers a + * #VMEXIT, and the guest_state transitions already tell lockdep that + * IRQs are being enabled/disabled. Note! GIF=0 for the entirety of + * this path, so IRQs aren't actually unmasked while running host code. + */ + raw_local_irq_enable(); + amd_clear_divider(); if (sev_es_guest(vcpu->kvm)) @@ -4197,6 +4230,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in else __svm_vcpu_run(svm, spec_ctrl_intercepted); + raw_local_irq_disable(); + guest_state_exit_irqoff(); } @@ -4253,6 +4288,16 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, clgi(); kvm_load_guest_xsave_state(vcpu); + /* + * Hardware only context switches DEBUGCTL if LBR virtualization is + * enabled. Manually load DEBUGCTL if necessary (and restore it after + * VM-Exit), as running with the host's DEBUGCTL can negatively affect + * guest state and can even be fatal, e.g. due to Bus Lock Detect. + */ + if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) && + vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl) + update_debugctlmsr(svm->vmcb->save.dbgctl); + kvm_wait_lapic_expire(vcpu); /* @@ -4280,6 +4325,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) kvm_before_interrupt(vcpu, KVM_HANDLING_NMI); + if (!(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) && + vcpu->arch.host_debugctl != svm->vmcb->save.dbgctl) + update_debugctlmsr(vcpu->arch.host_debugctl); + kvm_load_host_xsave_state(vcpu); stgi(); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 9d7cdb8fbf872..ea44c1da5a7c9 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -584,7 +584,7 @@ static inline bool is_vnmi_enabled(struct vcpu_svm *svm) /* svm.c */ #define MSR_INVALID 0xffffffffU -#define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) +#define DEBUGCTL_RESERVED_BITS (~DEBUGCTLMSR_LBR) extern bool dump_invalid_vmcb; diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 2ed80aea3bb13..0c61153b275f6 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -170,12 +170,8 @@ SYM_FUNC_START(__svm_vcpu_run) mov VCPU_RDI(%_ASM_DI), %_ASM_DI /* Enter guest mode */ - sti - 3: vmrun %_ASM_AX 4: - cli - /* Pop @svm to RAX while it's the only available register. */ pop %_ASM_AX @@ -340,12 +336,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) mov KVM_VMCB_pa(%rax), %rax /* Enter guest mode */ - sti - 1: vmrun %rax - -2: cli - +2: /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 8a7af02d466e9..ed8a3cb539612 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -5084,6 +5084,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason, load_vmcs12_host_state(vcpu, vmcs12); + /* + * Process events if an injectable IRQ or NMI is pending, even + * if the event is blocked (RFLAGS.IF is cleared on VM-Exit). + * If an event became pending while L2 was active, KVM needs to + * either inject the event or request an IRQ/NMI window. SMIs + * don't need to be processed as SMM is mutually exclusive with + * non-root mode. INIT/SIPI don't need to be checked as INIT + * is blocked post-VMXON, and SIPIs are ignored. + */ + if (kvm_cpu_has_injectable_intr(vcpu) || vcpu->arch.nmi_pending) + kvm_make_request(KVM_REQ_EVENT, vcpu); return; } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6c56d5235f0f3..3b92f893b2392 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1514,16 +1514,12 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, */ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { - struct vcpu_vmx *vmx = to_vmx(vcpu); - if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm)) shrink_ple_window(vcpu); vmx_vcpu_load_vmcs(vcpu, cpu, NULL); vmx_vcpu_pi_load(vcpu, cpu); - - vmx->host_debugctlmsr = get_debugctlmsr(); } void vmx_vcpu_put(struct kvm_vcpu *vcpu) @@ -7458,8 +7454,8 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit) } /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ - if (vmx->host_debugctlmsr) - update_debugctlmsr(vmx->host_debugctlmsr); + if (vcpu->arch.host_debugctl) + update_debugctlmsr(vcpu->arch.host_debugctl); #ifndef CONFIG_X86_64 /* diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 8b111ce1087c7..951e44dc9d0ea 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -340,8 +340,6 @@ struct vcpu_vmx { /* apic deadline value in host tsc */ u64 hv_deadline_tsc; - unsigned long host_debugctlmsr; - /* * Only bits masked by msr_ia32_feature_control_valid_bits can be set in * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 02159c967d29e..4b64ab350bcd4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10968,6 +10968,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) set_debugreg(0, 7); } + vcpu->arch.host_debugctl = get_debugctlmsr(); + guest_timing_enter_irqoff(); for (;;) { @@ -12877,11 +12879,11 @@ void kvm_arch_destroy_vm(struct kvm *kvm) mutex_unlock(&kvm->slots_lock); } kvm_unload_vcpu_mmus(kvm); + kvm_destroy_vcpus(kvm); kvm_x86_call(vm_destroy)(kvm); kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); kvm_pic_destroy(kvm); kvm_ioapic_destroy(kvm); - kvm_destroy_vcpus(kvm); kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); kvm_mmu_uninit_vm(kvm); diff --git a/block/bio.c b/block/bio.c index f0c416e5931d9..6ac5983ba51e6 100644 --- a/block/bio.c +++ b/block/bio.c @@ -77,7 +77,7 @@ struct bio_slab { struct kmem_cache *slab; unsigned int slab_ref; unsigned int slab_size; - char name[8]; + char name[12]; }; static DEFINE_MUTEX(bio_slab_lock); static DEFINE_XARRAY(bio_slabs); diff --git a/block/blk-merge.c b/block/blk-merge.c index 15cd231d560cb..1d1589c352976 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -270,7 +270,7 @@ static bool bvec_split_segs(const struct queue_limits *lim, const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes, unsigned max_segs, unsigned max_bytes) { - unsigned max_len = min(max_bytes, UINT_MAX) - *bytes; + unsigned max_len = max_bytes - *bytes; unsigned len = min(bv->bv_len, max_len); unsigned total_len = 0; unsigned seg_size = 0; @@ -329,7 +329,7 @@ int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim, if (nsegs < lim->max_segments && bytes + bv.bv_len <= max_bytes && - bv.bv_offset + bv.bv_len <= PAGE_SIZE) { + bv.bv_offset + bv.bv_len <= lim->min_segment_size) { nsegs++; bytes += bv.bv_len; } else { @@ -556,11 +556,14 @@ int __blk_rq_map_sg(struct request_queue *q, struct request *rq, { struct req_iterator iter = { .bio = rq->bio, - .iter = rq->bio->bi_iter, }; struct phys_vec vec; int nsegs = 0; + /* the internal flush request may not have bio attached */ + if (iter.bio) + iter.iter = iter.bio->bi_iter; + while (blk_map_iter_next(rq, &iter, &vec)) { *last_sg = blk_next_sg(last_sg, sglist); sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len, diff --git a/block/blk-settings.c b/block/blk-settings.c index c44dadc35e1ec..b9c6f0ec1c499 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -246,6 +246,7 @@ int blk_validate_limits(struct queue_limits *lim) { unsigned int max_hw_sectors; unsigned int logical_block_sectors; + unsigned long seg_size; int err; /* @@ -303,7 +304,7 @@ int blk_validate_limits(struct queue_limits *lim) max_hw_sectors = min_not_zero(lim->max_hw_sectors, lim->max_dev_sectors); if (lim->max_user_sectors) { - if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE) + if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE) return -EINVAL; lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors); } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) { @@ -341,7 +342,7 @@ int blk_validate_limits(struct queue_limits *lim) */ if (!lim->seg_boundary_mask) lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; - if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1)) + if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1)) return -EINVAL; /* @@ -362,10 +363,17 @@ int blk_validate_limits(struct queue_limits *lim) */ if (!lim->max_segment_size) lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; - if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE)) + if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE)) return -EINVAL; } + /* setup min segment size for building new segment in fast path */ + if (lim->seg_boundary_mask > lim->max_segment_size - 1) + seg_size = lim->max_segment_size; + else + seg_size = lim->seg_boundary_mask + 1; + lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE); + /* * We require drivers to at least do logical block aligned I/O, but * historically could not check for that due to the separate calls diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 761ea662ddc34..0c77244a35c92 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -410,13 +410,14 @@ static bool disk_insert_zone_wplug(struct gendisk *disk, } } hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]); + atomic_inc(&disk->nr_zone_wplugs); spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); return true; } -static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk, - sector_t sector) +static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk, + sector_t sector) { unsigned int zno = disk_zone_no(disk, sector); unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits); @@ -437,6 +438,15 @@ static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk, return NULL; } +static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk, + sector_t sector) +{ + if (!atomic_read(&disk->nr_zone_wplugs)) + return NULL; + + return disk_get_hashed_zone_wplug(disk, sector); +} + static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head) { struct blk_zone_wplug *zwplug = @@ -503,6 +513,7 @@ static void disk_remove_zone_wplug(struct gendisk *disk, zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED; spin_lock_irqsave(&disk->zone_wplugs_lock, flags); hlist_del_init_rcu(&zwplug->node); + atomic_dec(&disk->nr_zone_wplugs); spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); disk_put_zone_wplug(zwplug); } @@ -593,6 +604,11 @@ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug) { struct bio *bio; + if (bio_list_empty(&zwplug->bio_list)) + return; + + pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n", + zwplug->disk->disk_name, zwplug->zone_no); while ((bio = bio_list_pop(&zwplug->bio_list))) blk_zone_wplug_bio_io_error(zwplug, bio); } @@ -1040,6 +1056,47 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) return true; } +static void blk_zone_wplug_handle_native_zone_append(struct bio *bio) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + struct blk_zone_wplug *zwplug; + unsigned long flags; + + /* + * We have native support for zone append operations, so we are not + * going to handle @bio through plugging. However, we may already have a + * zone write plug for the target zone if that zone was previously + * partially written using regular writes. In such case, we risk leaving + * the plug in the disk hash table if the zone is fully written using + * zone append operations. Avoid this by removing the zone write plug. + */ + zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector); + if (likely(!zwplug)) + return; + + spin_lock_irqsave(&zwplug->lock, flags); + + /* + * We are about to remove the zone write plug. But if the user + * (mistakenly) has issued regular writes together with native zone + * append, we must aborts the writes as otherwise the plugged BIOs would + * not be executed by the plug BIO work as disk_get_zone_wplug() will + * return NULL after the plug is removed. Aborting the plugged write + * BIOs is consistent with the fact that these writes will most likely + * fail anyway as there is no ordering guarantees between zone append + * operations and regular write operations. + */ + if (!bio_list_empty(&zwplug->bio_list)) { + pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n", + disk->disk_name, zwplug->zone_no); + disk_zone_wplug_abort(zwplug); + } + disk_remove_zone_wplug(disk, zwplug); + spin_unlock_irqrestore(&zwplug->lock, flags); + + disk_put_zone_wplug(zwplug); +} + /** * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging * @bio: The BIO being submitted @@ -1096,8 +1153,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) */ switch (bio_op(bio)) { case REQ_OP_ZONE_APPEND: - if (!bdev_emulates_zone_append(bdev)) + if (!bdev_emulates_zone_append(bdev)) { + blk_zone_wplug_handle_native_zone_append(bio); return false; + } fallthrough; case REQ_OP_WRITE: case REQ_OP_WRITE_ZEROES: @@ -1284,6 +1343,7 @@ static int disk_alloc_zone_resources(struct gendisk *disk, { unsigned int i; + atomic_set(&disk->nr_zone_wplugs, 0); disk->zone_wplugs_hash_bits = min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS); @@ -1338,6 +1398,7 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk) } } + WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs)); kfree(disk->zone_wplugs_hash); disk->zone_wplugs_hash = NULL; disk->zone_wplugs_hash_bits = 0; @@ -1550,11 +1611,12 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx, } /* - * We need to track the write pointer of all zones that are not - * empty nor full. So make sure we have a zone write plug for - * such zone if the device has a zone write plug hash table. + * If the device needs zone append emulation, we need to track the + * write pointer of all zones that are not empty nor full. So make sure + * we have a zone write plug for such zone if the device has a zone + * write plug hash table. */ - if (!disk->zone_wplugs_hash) + if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash) return 0; disk_zone_wplug_sync_wp_offset(disk, zone); diff --git a/block/blk.h b/block/blk.h index 90fa5f28ccabf..9cf9a0099416d 100644 --- a/block/blk.h +++ b/block/blk.h @@ -14,6 +14,7 @@ struct elevator_type; #define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9) +#define BLK_MIN_SEGMENT_SIZE 4096 /* Max future timer expiry for timeouts */ #define BLK_MAX_TIMEOUT (5 * HZ) @@ -358,8 +359,12 @@ struct bio *bio_split_zone_append(struct bio *bio, static inline bool bio_may_need_split(struct bio *bio, const struct queue_limits *lim) { - return lim->chunk_sectors || bio->bi_vcnt != 1 || - bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE; + if (lim->chunk_sectors) + return true; + if (bio->bi_vcnt != 1) + return true; + return bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > + lim->min_segment_size; } /** diff --git a/block/partitions/efi.c b/block/partitions/efi.c index 5e9be13a56a82..7acba66eed481 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c @@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out) out[size] = 0; while (i < size) { - u8 c = le16_to_cpu(in[i]) & 0xff; + u8 c = le16_to_cpu(in[i]) & 0x7f; if (c && !isprint(c)) c = '!'; diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c index 814b16bb1953f..e5301fac13971 100644 --- a/drivers/accel/amdxdna/amdxdna_mailbox.c +++ b/drivers/accel/amdxdna/amdxdna_mailbox.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index 01abf26764b00..435ec60a96823 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c @@ -120,8 +120,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = { {"IBM0071"}, /* smsc-ircc2 */ {"SMCf010"}, - /* sb1000 */ - {"GIC1000"}, /* parport_pc */ {"PNP0400"}, /* Standard LPT Printer Port */ {"PNP0401"}, /* ECP Printer Port */ diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c index fc92e43d0fe93..ef9444482db19 100644 --- a/drivers/acpi/platform_profile.c +++ b/drivers/acpi/platform_profile.c @@ -21,9 +21,15 @@ struct platform_profile_handler { struct device dev; int minor; unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; + unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; const struct platform_profile_ops *ops; }; +struct aggregate_choices_data { + unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; + int count; +}; + static const char * const profile_names[] = { [PLATFORM_PROFILE_LOW_POWER] = "low-power", [PLATFORM_PROFILE_COOL] = "cool", @@ -73,7 +79,7 @@ static int _store_class_profile(struct device *dev, void *data) lockdep_assert_held(&profile_lock); handler = to_pprof_handler(dev); - if (!test_bit(*bit, handler->choices)) + if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices)) return -EOPNOTSUPP; return handler->ops->profile_set(dev, *bit); @@ -239,21 +245,44 @@ static const struct class platform_profile_class = { /** * _aggregate_choices - Aggregate the available profile choices * @dev: The device - * @data: The available profile choices + * @arg: struct aggregate_choices_data * * Return: 0 on success, -errno on failure */ -static int _aggregate_choices(struct device *dev, void *data) +static int _aggregate_choices(struct device *dev, void *arg) { + unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; + struct aggregate_choices_data *data = arg; struct platform_profile_handler *handler; - unsigned long *aggregate = data; lockdep_assert_held(&profile_lock); handler = to_pprof_handler(dev); - if (test_bit(PLATFORM_PROFILE_LAST, aggregate)) - bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST); + bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST); + if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate)) + bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST); else - bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST); + bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST); + data->count++; + + return 0; +} + +/** + * _remove_hidden_choices - Remove hidden choices from aggregate data + * @dev: The device + * @arg: struct aggregate_choices_data + * + * Return: 0 on success, -errno on failure + */ +static int _remove_hidden_choices(struct device *dev, void *arg) +{ + struct aggregate_choices_data *data = arg; + struct platform_profile_handler *handler; + + lockdep_assert_held(&profile_lock); + handler = to_pprof_handler(dev); + bitmap_andnot(data->aggregate, handler->choices, + handler->hidden_choices, PLATFORM_PROFILE_LAST); return 0; } @@ -270,22 +299,31 @@ static ssize_t platform_profile_choices_show(struct device *dev, struct device_attribute *attr, char *buf) { - unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; + struct aggregate_choices_data data = { + .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL }, + .count = 0, + }; int err; - set_bit(PLATFORM_PROFILE_LAST, aggregate); + set_bit(PLATFORM_PROFILE_LAST, data.aggregate); scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) { err = class_for_each_device(&platform_profile_class, NULL, - aggregate, _aggregate_choices); + &data, _aggregate_choices); if (err) return err; + if (data.count == 1) { + err = class_for_each_device(&platform_profile_class, NULL, + &data, _remove_hidden_choices); + if (err) + return err; + } } /* no profile handler registered any more */ - if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST)) + if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST)) return -EINVAL; - return _commmon_choices_show(aggregate, buf); + return _commmon_choices_show(data.aggregate, buf); } /** @@ -373,7 +411,10 @@ static ssize_t platform_profile_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; + struct aggregate_choices_data data = { + .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL }, + .count = 0, + }; int ret; int i; @@ -381,13 +422,13 @@ static ssize_t platform_profile_store(struct device *dev, i = sysfs_match_string(profile_names, buf); if (i < 0 || i == PLATFORM_PROFILE_CUSTOM) return -EINVAL; - set_bit(PLATFORM_PROFILE_LAST, choices); + set_bit(PLATFORM_PROFILE_LAST, data.aggregate); scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) { ret = class_for_each_device(&platform_profile_class, NULL, - choices, _aggregate_choices); + &data, _aggregate_choices); if (ret) return ret; - if (!test_bit(i, choices)) + if (!test_bit(i, data.aggregate)) return -EOPNOTSUPP; ret = class_for_each_device(&platform_profile_class, NULL, &i, @@ -417,8 +458,14 @@ static int profile_class_registered(struct device *dev, const void *data) static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { - if (!class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered)) + struct device *dev; + + dev = class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered); + if (!dev) return 0; + + put_device(dev); + return attr->mode; } @@ -447,12 +494,15 @@ EXPORT_SYMBOL_GPL(platform_profile_notify); */ int platform_profile_cycle(void) { + struct aggregate_choices_data data = { + .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL }, + .count = 0, + }; enum platform_profile_option next = PLATFORM_PROFILE_LAST; enum platform_profile_option profile = PLATFORM_PROFILE_LAST; - unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)]; int err; - set_bit(PLATFORM_PROFILE_LAST, choices); + set_bit(PLATFORM_PROFILE_LAST, data.aggregate); scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) { err = class_for_each_device(&platform_profile_class, NULL, &profile, _aggregate_profiles); @@ -464,14 +514,14 @@ int platform_profile_cycle(void) return -EINVAL; err = class_for_each_device(&platform_profile_class, NULL, - choices, _aggregate_choices); + &data, _aggregate_choices); if (err) return err; /* never iterate into a custom if all drivers supported it */ - clear_bit(PLATFORM_PROFILE_CUSTOM, choices); + clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate); - next = find_next_bit_wrap(choices, + next = find_next_bit_wrap(data.aggregate, PLATFORM_PROFILE_LAST, profile + 1); @@ -526,6 +576,14 @@ struct device *platform_profile_register(struct device *dev, const char *name, return ERR_PTR(-EINVAL); } + if (ops->hidden_choices) { + err = ops->hidden_choices(drvdata, pprof->hidden_choices); + if (err) { + dev_err(dev, "platform_profile hidden_choices failed\n"); + return ERR_PTR(err); + } + } + guard(mutex)(&profile_lock); /* create class interface for individual handler */ diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index bc6bae76ccaf1..94c6446604fc9 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -274,6 +274,7 @@ static void binderfs_evict_inode(struct inode *inode) mutex_unlock(&binderfs_minors_mutex); if (refcount_dec_and_test(&device->ref)) { + hlist_del_init(&device->hlist); kfree(device->context.name); kfree(device); } diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 8e895ae45c86b..c842e2de6ef98 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -386,8 +386,12 @@ struct ahci_host_priv { static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv, unsigned int portid) { - return portid >= hpriv->nports || - !(hpriv->mask_port_map & (1 << portid)); + if (portid >= hpriv->nports) + return true; + /* mask_port_map not set means that all ports are available */ + if (!hpriv->mask_port_map) + return false; + return !(hpriv->mask_port_map & (1 << portid)); } extern int ahci_ignore_sss; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index fdfa7b2662180..e7ace4b10f15b 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -541,6 +541,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) hpriv->saved_port_map = port_map; } + /* mask_port_map not set means that all ports are available */ if (hpriv->mask_port_map) { dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n", port_map, diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index 53b2c7719dc51..91d44302eac92 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -651,8 +651,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev, * If no sub-node was found, keep this for device tree * compatibility */ - hpriv->mask_port_map |= BIT(0); - rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node); if (rc) goto err_out; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 63ec2f2184319..d956735e2a764 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2845,6 +2845,10 @@ int ata_dev_configure(struct ata_device *dev) (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) dev->quirks |= ATA_QUIRK_NOLPM; + if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI && + ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) + dev->quirks |= ATA_QUIRK_NOLPM; + if (ap->flags & ATA_FLAG_NO_LPM) dev->quirks |= ATA_QUIRK_NOLPM; @@ -3897,6 +3901,7 @@ static const char * const ata_quirk_names[] = { [__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024", [__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m", [__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati", + [__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati", [__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog", [__ATA_QUIRK_NO_LOG_DIR] = "nologdir", [__ATA_QUIRK_NO_FUA] = "nofua", @@ -4141,18 +4146,17 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = { { "Samsung SSD 850*", NULL, ATA_QUIRK_NO_NCQ_TRIM | ATA_QUIRK_ZERO_AFTER_TRIM }, { "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM | - ATA_QUIRK_ZERO_AFTER_TRIM | - ATA_QUIRK_NO_NCQ_ON_ATI }, - { "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM | ATA_QUIRK_ZERO_AFTER_TRIM | ATA_QUIRK_NO_NCQ_ON_ATI | - ATA_QUIRK_NOLPM }, + ATA_QUIRK_NO_LPM_ON_ATI }, { "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM | ATA_QUIRK_ZERO_AFTER_TRIM | - ATA_QUIRK_NO_NCQ_ON_ATI }, + ATA_QUIRK_NO_NCQ_ON_ATI | + ATA_QUIRK_NO_LPM_ON_ATI }, { "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM | ATA_QUIRK_ZERO_AFTER_TRIM | - ATA_QUIRK_NO_NCQ_ON_ATI, }, + ATA_QUIRK_NO_NCQ_ON_ATI | + ATA_QUIRK_NO_LPM_ON_ATI }, { "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM | ATA_QUIRK_ZERO_AFTER_TRIM }, diff --git a/drivers/base/core.c b/drivers/base/core.c index 5a1f051981149..2fde698430dff 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2079,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle, out: sup_handle->flags &= ~FWNODE_FLAG_VISITED; put_device(sup_dev); + put_device(con_dev); put_device(par_dev); return ret; } diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index d94ef37480bd4..fdc7a0b2af109 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1549,8 +1549,8 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) cmd = blk_mq_rq_to_pdu(req); cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), blk_rq_sectors(req)); - if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error, - blk_mq_end_request_batch)) + if (!blk_mq_add_to_batch(req, iob, cmd->error != BLK_STS_OK, + blk_mq_end_request_batch)) blk_mq_end_request(req, cmd->error); nr++; } diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 529085181f355..ca9a67b5b537a 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2715,9 +2715,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub, if (ph.len > sizeof(struct ublk_params)) ph.len = sizeof(struct ublk_params); - /* parameters can only be changed when device isn't live */ mutex_lock(&ub->mutex); - if (ub->dev_info.state == UBLK_S_DEV_LIVE) { + if (test_bit(UB_STATE_USED, &ub->state)) { + /* + * Parameters can only be changed when device hasn't + * been started yet + */ ret = -EACCES; } else if (copy_from_user(&ub->params, argp, ph.len)) { ret = -EFAULT; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6a61ec35f4260..91cde76a4b3e2 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -1207,11 +1207,12 @@ static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) { struct request *req = blk_mq_rq_from_pdu(vbr); + u8 status = virtblk_vbr_status(vbr); found++; if (!blk_mq_complete_request_remote(req) && - !blk_mq_add_to_batch(req, iob, virtblk_vbr_status(vbr), - virtblk_complete_batch)) + !blk_mq_add_to_batch(req, iob, status != VIRTIO_BLK_S_OK, + virtblk_complete_batch)) virtblk_request_done(req); } diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 4ab32abf0f486..7771edf54fb3f 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -56,6 +56,18 @@ config BT_HCIBTUSB_POLL_SYNC Say Y here to enable USB poll_sync for Bluetooth USB devices by default. +config BT_HCIBTUSB_AUTO_ISOC_ALT + bool "Automatically adjust alternate setting for Isoc endpoints" + depends on BT_HCIBTUSB + default y if CHROME_PLATFORMS + help + Say Y here to automatically adjusting the alternate setting for + HCI_USER_CHANNEL whenever a SCO link is established. + + When enabled, btusb intercepts the HCI_EV_SYNC_CONN_COMPLETE packets + and configures isoc endpoint alternate setting automatically when + HCI_USER_CHANNEL is in use. + config BT_HCIBTUSB_BCM bool "Broadcom protocol support" depends on BT_HCIBTUSB diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c index cab93935cc7f1..0d6ad50da0466 100644 --- a/drivers/bluetooth/bfusb.c +++ b/drivers/bluetooth/bfusb.c @@ -365,9 +365,8 @@ static void bfusb_rx_complete(struct urb *urb) buf += 3; } - if (count < len) { + if (count < len) bt_dev_err(data->hdev, "block extends over URB buffer ranges"); - } if ((hdr & 0xe1) == 0xc1) bfusb_recv_block(data, hdr, buf, len); diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index d2540b28bc7ae..48e2f400957bc 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -35,6 +35,11 @@ enum { DSM_SET_RESET_METHOD = 3, }; +#define BTINTEL_BT_DOMAIN 0x12 +#define BTINTEL_SAR_LEGACY 0 +#define BTINTEL_SAR_INC_PWR 1 +#define BTINTEL_SAR_INC_PWR_SUPPORTED 0 + #define CMD_WRITE_BOOT_PARAMS 0xfc0e struct cmd_write_boot_params { __le32 boot_addr; @@ -478,6 +483,7 @@ int btintel_version_info_tlv(struct hci_dev *hdev, case 0x1c: /* Gale Peak (GaP) */ case 0x1d: /* BlazarU (BzrU) */ case 0x1e: /* BlazarI (Bzr) */ + case 0x1f: /* Scorpious Peak */ break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)", @@ -2756,6 +2762,7 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver) /* DSBR command needs to be sent for, * 1. BlazarI or BlazarIW + B0 step product in IML image. * 2. Gale Peak2 or BlazarU in OP image. + * 3. Scorpious Peak in IML image. */ switch (cnvi) { @@ -2771,6 +2778,10 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver) hdev->bus == HCI_USB) break; return 0; + case BTINTEL_CNVI_SCP: + if (ver->img_type == BTINTEL_IMG_IML) + break; + return 0; default: return 0; } @@ -2800,6 +2811,331 @@ static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver) return 0; } +#ifdef CONFIG_ACPI +static acpi_status btintel_evaluate_acpi_method(struct hci_dev *hdev, + acpi_string method, + union acpi_object **ptr, + u8 pkg_size) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *p; + acpi_status status; + acpi_handle handle; + + handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev)); + if (!handle) { + bt_dev_dbg(hdev, "ACPI-BT: No ACPI support for Bluetooth device"); + return AE_NOT_EXIST; + } + + status = acpi_evaluate_object(handle, method, NULL, &buffer); + + if (ACPI_FAILURE(status)) { + bt_dev_dbg(hdev, "ACPI-BT: ACPI Failure: %s method: %s", + acpi_format_exception(status), method); + return status; + } + + p = buffer.pointer; + + if (p->type != ACPI_TYPE_PACKAGE || p->package.count < pkg_size) { + bt_dev_warn(hdev, "ACPI-BT: Invalid object type: %d or package count: %d", + p->type, p->package.count); + kfree(buffer.pointer); + return AE_ERROR; + } + + *ptr = buffer.pointer; + return 0; +} + +static union acpi_object *btintel_acpi_get_bt_pkg(union acpi_object *buffer) +{ + union acpi_object *domain, *bt_pkg; + int i; + + for (i = 1; i < buffer->package.count; i++) { + bt_pkg = &buffer->package.elements[i]; + domain = &bt_pkg->package.elements[0]; + if (domain->type == ACPI_TYPE_INTEGER && + domain->integer.value == BTINTEL_BT_DOMAIN) + return bt_pkg; + } + return ERR_PTR(-ENOENT); +} + +static int btintel_send_sar_ddc(struct hci_dev *hdev, struct btintel_cp_ddc_write *data, u8 len) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc8b, len, data, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_warn(hdev, "Failed to send sar ddc id:0x%4.4x (%ld)", + le16_to_cpu(data->id), PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + return 0; +} + +static int btintel_send_edr(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd, + int id, struct btintel_sar_inc_pwr *sar) +{ + cmd->len = 5; + cmd->id = cpu_to_le16(id); + cmd->data[0] = sar->br >> 3; + cmd->data[1] = sar->edr2 >> 3; + cmd->data[2] = sar->edr3 >> 3; + return btintel_send_sar_ddc(hdev, cmd, 6); +} + +static int btintel_send_le(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd, + int id, struct btintel_sar_inc_pwr *sar) +{ + cmd->len = 3; + cmd->id = cpu_to_le16(id); + cmd->data[0] = min3(sar->le, sar->le_lr, sar->le_2mhz) >> 3; + return btintel_send_sar_ddc(hdev, cmd, 4); +} + +static int btintel_send_br(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd, + int id, struct btintel_sar_inc_pwr *sar) +{ + cmd->len = 3; + cmd->id = cpu_to_le16(id); + cmd->data[0] = sar->br >> 3; + return btintel_send_sar_ddc(hdev, cmd, 4); +} + +static int btintel_send_br_mutual(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd, + int id, struct btintel_sar_inc_pwr *sar) +{ + cmd->len = 3; + cmd->id = cpu_to_le16(id); + cmd->data[0] = sar->br; + return btintel_send_sar_ddc(hdev, cmd, 4); +} + +static int btintel_send_edr2(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd, + int id, struct btintel_sar_inc_pwr *sar) +{ + cmd->len = 3; + cmd->id = cpu_to_le16(id); + cmd->data[0] = sar->edr2; + return btintel_send_sar_ddc(hdev, cmd, 4); +} + +static int btintel_send_edr3(struct hci_dev *hdev, struct btintel_cp_ddc_write *cmd, + int id, struct btintel_sar_inc_pwr *sar) +{ + cmd->len = 3; + cmd->id = cpu_to_le16(id); + cmd->data[0] = sar->edr3; + return btintel_send_sar_ddc(hdev, cmd, 4); +} + +static int btintel_set_legacy_sar(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar) +{ + struct btintel_cp_ddc_write *cmd; + u8 buffer[64]; + int ret; + + cmd = (void *)buffer; + ret = btintel_send_br(hdev, cmd, 0x0131, sar); + if (ret) + return ret; + + ret = btintel_send_br(hdev, cmd, 0x0132, sar); + if (ret) + return ret; + + ret = btintel_send_le(hdev, cmd, 0x0133, sar); + if (ret) + return ret; + + ret = btintel_send_edr(hdev, cmd, 0x0137, sar); + if (ret) + return ret; + + ret = btintel_send_edr(hdev, cmd, 0x0138, sar); + if (ret) + return ret; + + ret = btintel_send_edr(hdev, cmd, 0x013b, sar); + if (ret) + return ret; + + ret = btintel_send_edr(hdev, cmd, 0x013c, sar); + + return ret; +} + +static int btintel_set_mutual_sar(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar) +{ + struct btintel_cp_ddc_write *cmd; + struct sk_buff *skb; + u8 buffer[64]; + bool enable; + int ret; + + cmd = (void *)buffer; + + cmd->len = 3; + cmd->id = cpu_to_le16(0x019e); + + if (sar->revision == BTINTEL_SAR_INC_PWR && + sar->inc_power_mode == BTINTEL_SAR_INC_PWR_SUPPORTED) + cmd->data[0] = 0x01; + else + cmd->data[0] = 0x00; + + ret = btintel_send_sar_ddc(hdev, cmd, 4); + if (ret) + return ret; + + if (sar->revision == BTINTEL_SAR_INC_PWR && + sar->inc_power_mode == BTINTEL_SAR_INC_PWR_SUPPORTED) { + cmd->len = 3; + cmd->id = cpu_to_le16(0x019f); + cmd->data[0] = sar->sar_2400_chain_a; + + ret = btintel_send_sar_ddc(hdev, cmd, 4); + if (ret) + return ret; + } + + ret = btintel_send_br_mutual(hdev, cmd, 0x01a0, sar); + if (ret) + return ret; + + ret = btintel_send_edr2(hdev, cmd, 0x01a1, sar); + if (ret) + return ret; + + ret = btintel_send_edr3(hdev, cmd, 0x01a2, sar); + if (ret) + return ret; + + ret = btintel_send_le(hdev, cmd, 0x01a3, sar); + if (ret) + return ret; + + enable = true; + skb = __hci_cmd_sync(hdev, 0xfe25, 1, &enable, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_warn(hdev, "Failed to send Intel SAR Enable (%ld)", PTR_ERR(skb)); + return PTR_ERR(skb); + } + + kfree_skb(skb); + return 0; +} + +static int btintel_sar_send_to_device(struct hci_dev *hdev, struct btintel_sar_inc_pwr *sar, + struct intel_version_tlv *ver) +{ + u16 cnvi, cnvr; + int ret; + + cnvi = ver->cnvi_top & 0xfff; + cnvr = ver->cnvr_top & 0xfff; + + if (cnvi < BTINTEL_CNVI_BLAZARI && cnvr < BTINTEL_CNVR_FMP2) { + bt_dev_info(hdev, "Applying legacy Bluetooth SAR"); + ret = btintel_set_legacy_sar(hdev, sar); + } else if (cnvi == BTINTEL_CNVI_GAP || cnvr == BTINTEL_CNVR_FMP2) { + bt_dev_info(hdev, "Applying mutual Bluetooth SAR"); + ret = btintel_set_mutual_sar(hdev, sar); + } else { + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int btintel_acpi_set_sar(struct hci_dev *hdev, struct intel_version_tlv *ver) +{ + union acpi_object *bt_pkg, *buffer = NULL; + struct btintel_sar_inc_pwr sar; + acpi_status status; + u8 revision; + int ret; + + status = btintel_evaluate_acpi_method(hdev, "BRDS", &buffer, 2); + if (ACPI_FAILURE(status)) + return -ENOENT; + + bt_pkg = btintel_acpi_get_bt_pkg(buffer); + + if (IS_ERR(bt_pkg)) { + ret = PTR_ERR(bt_pkg); + goto error; + } + + if (!bt_pkg->package.count) { + ret = -EINVAL; + goto error; + } + + revision = buffer->package.elements[0].integer.value; + + if (revision > BTINTEL_SAR_INC_PWR) { + bt_dev_dbg(hdev, "BT_SAR: revision: 0x%2.2x not supported", revision); + ret = -EOPNOTSUPP; + goto error; + } + + memset(&sar, 0, sizeof(sar)); + + if (revision == BTINTEL_SAR_LEGACY && bt_pkg->package.count == 8) { + sar.revision = revision; + sar.bt_sar_bios = bt_pkg->package.elements[1].integer.value; + sar.br = bt_pkg->package.elements[2].integer.value; + sar.edr2 = bt_pkg->package.elements[3].integer.value; + sar.edr3 = bt_pkg->package.elements[4].integer.value; + sar.le = bt_pkg->package.elements[5].integer.value; + sar.le_2mhz = bt_pkg->package.elements[6].integer.value; + sar.le_lr = bt_pkg->package.elements[7].integer.value; + + } else if (revision == BTINTEL_SAR_INC_PWR && bt_pkg->package.count == 10) { + sar.revision = revision; + sar.bt_sar_bios = bt_pkg->package.elements[1].integer.value; + sar.inc_power_mode = bt_pkg->package.elements[2].integer.value; + sar.sar_2400_chain_a = bt_pkg->package.elements[3].integer.value; + sar.br = bt_pkg->package.elements[4].integer.value; + sar.edr2 = bt_pkg->package.elements[5].integer.value; + sar.edr3 = bt_pkg->package.elements[6].integer.value; + sar.le = bt_pkg->package.elements[7].integer.value; + sar.le_2mhz = bt_pkg->package.elements[8].integer.value; + sar.le_lr = bt_pkg->package.elements[9].integer.value; + } else { + ret = -EINVAL; + goto error; + } + + /* Apply only if it is enabled in BIOS */ + if (sar.bt_sar_bios != 1) { + bt_dev_dbg(hdev, "Bluetooth SAR is not enabled"); + ret = -EOPNOTSUPP; + goto error; + } + + ret = btintel_sar_send_to_device(hdev, &sar, ver); +error: + kfree(buffer); + return ret; +} +#endif /* CONFIG_ACPI */ + +static int btintel_set_specific_absorption_rate(struct hci_dev *hdev, + struct intel_version_tlv *ver) +{ +#ifdef CONFIG_ACPI + return btintel_acpi_set_sar(hdev, ver); +#endif + return 0; +} + int btintel_bootloader_setup_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver) { @@ -2877,6 +3213,9 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev, hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); + /* Send sar values to controller */ + btintel_set_specific_absorption_rate(hdev, ver); + /* Set PPAG feature */ btintel_set_ppag(hdev, ver); @@ -2919,6 +3258,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) case 0x1c: case 0x1d: case 0x1e: + case 0x1f: hci_set_msft_opcode(hdev, 0xFC1E); break; default: @@ -3258,6 +3598,7 @@ static int btintel_setup_combined(struct hci_dev *hdev) case 0x1b: case 0x1d: case 0x1e: + case 0x1f: /* Display version information of TLV type */ btintel_version_info_tlv(hdev, &ver_tlv); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index fa43eb1378218..2aece3effa4e9 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -56,6 +56,10 @@ struct intel_tlv { #define BTINTEL_CNVI_BLAZARIW 0x901 #define BTINTEL_CNVI_GAP 0x910 #define BTINTEL_CNVI_BLAZARU 0x930 +#define BTINTEL_CNVI_SCP 0xA00 + +/* CNVR */ +#define BTINTEL_CNVR_FMP2 0x910 #define BTINTEL_IMG_BOOTLOADER 0x01 /* Bootloader image */ #define BTINTEL_IMG_IML 0x02 /* Intermediate image */ @@ -164,6 +168,26 @@ struct hci_ppag_enable_cmd { #define INTEL_TLV_DEBUG_EXCEPTION 0x02 #define INTEL_TLV_TEST_EXCEPTION 0xDE +struct btintel_cp_ddc_write { + u8 len; + __le16 id; + u8 data[]; +} __packed; + +/* Bluetooth SAR feature (BRDS), Revision 1 */ +struct btintel_sar_inc_pwr { + u8 revision; + u32 bt_sar_bios; /* Mode of SAR control to be used, 1:enabled in bios */ + u32 inc_power_mode; /* Increased power mode */ + u8 sar_2400_chain_a; /* Sar power restriction LB */ + u8 br; + u8 edr2; + u8 edr3; + u8 le; + u8 le_2mhz; + u8 le_lr; +}; + #define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8)) #define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16)) #define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff) diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index 091ffe3e14954..c1e69fcc9c4fa 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -36,6 +36,7 @@ /* Intel Bluetooth PCIe device id table */ static const struct pci_device_id btintel_pcie_table[] = { { BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) }, + { BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) }, { 0 } }; MODULE_DEVICE_TABLE(pci, btintel_pcie_table); @@ -48,6 +49,19 @@ MODULE_DEVICE_TABLE(pci, btintel_pcie_table); #define BTINTEL_PCIE_HCI_EVT_PKT 0x00000004 #define BTINTEL_PCIE_HCI_ISO_PKT 0x00000005 +#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5 + +#define BTINTEL_PCIE_BLZR_HWEXP_SIZE 1024 +#define BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR 0xB00A7C00 + +#define BTINTEL_PCIE_SCP_HWEXP_SIZE 4096 +#define BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR 0xB030F800 + +#define BTINTEL_PCIE_MAGIC_NUM 0xA5A5A5A5 + +#define BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER 0x17A2 +#define BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT 0x1E61 + /* Alive interrupt context */ enum { BTINTEL_PCIE_ROM, @@ -59,6 +73,83 @@ enum { BTINTEL_PCIE_D3 }; +/* Structure for dbgc fragment buffer + * @buf_addr_lsb: LSB of the buffer's physical address + * @buf_addr_msb: MSB of the buffer's physical address + * @buf_size: Total size of the buffer + */ +struct btintel_pcie_dbgc_ctxt_buf { + u32 buf_addr_lsb; + u32 buf_addr_msb; + u32 buf_size; +}; + +/* Structure for dbgc fragment + * @magic_num: 0XA5A5A5A5 + * @ver: For Driver-FW compatibility + * @total_size: Total size of the payload debug info + * @num_buf: Num of allocated debug bufs + * @bufs: All buffer's addresses and sizes + */ +struct btintel_pcie_dbgc_ctxt { + u32 magic_num; + u32 ver; + u32 total_size; + u32 num_buf; + struct btintel_pcie_dbgc_ctxt_buf bufs[BTINTEL_PCIE_DBGC_BUFFER_COUNT]; +}; + +/* This function initializes the memory for DBGC buffers and formats the + * DBGC fragment which consists header info and DBGC buffer's LSB, MSB and + * size as the payload + */ +static int btintel_pcie_setup_dbgc(struct btintel_pcie_data *data) +{ + struct btintel_pcie_dbgc_ctxt db_frag; + struct data_buf *buf; + int i; + + data->dbgc.count = BTINTEL_PCIE_DBGC_BUFFER_COUNT; + data->dbgc.bufs = devm_kcalloc(&data->pdev->dev, data->dbgc.count, + sizeof(*buf), GFP_KERNEL); + if (!data->dbgc.bufs) + return -ENOMEM; + + data->dbgc.buf_v_addr = dmam_alloc_coherent(&data->pdev->dev, + data->dbgc.count * + BTINTEL_PCIE_DBGC_BUFFER_SIZE, + &data->dbgc.buf_p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!data->dbgc.buf_v_addr) + return -ENOMEM; + + data->dbgc.frag_v_addr = dmam_alloc_coherent(&data->pdev->dev, + sizeof(struct btintel_pcie_dbgc_ctxt), + &data->dbgc.frag_p_addr, + GFP_KERNEL | __GFP_NOWARN); + if (!data->dbgc.frag_v_addr) + return -ENOMEM; + + data->dbgc.frag_size = sizeof(struct btintel_pcie_dbgc_ctxt); + + db_frag.magic_num = BTINTEL_PCIE_MAGIC_NUM; + db_frag.ver = BTINTEL_PCIE_DBGC_FRAG_VERSION; + db_frag.total_size = BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE; + db_frag.num_buf = BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT; + + for (i = 0; i < data->dbgc.count; i++) { + buf = &data->dbgc.bufs[i]; + buf->data_p_addr = data->dbgc.buf_p_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE; + buf->data = data->dbgc.buf_v_addr + i * BTINTEL_PCIE_DBGC_BUFFER_SIZE; + db_frag.bufs[i].buf_addr_lsb = lower_32_bits(buf->data_p_addr); + db_frag.bufs[i].buf_addr_msb = upper_32_bits(buf->data_p_addr); + db_frag.bufs[i].buf_size = BTINTEL_PCIE_DBGC_BUFFER_SIZE; + } + + memcpy(data->dbgc.frag_v_addr, &db_frag, sizeof(db_frag)); + return 0; +} + static inline void ipc_print_ia_ring(struct hci_dev *hdev, struct ia *ia, u16 queue_num) { @@ -273,6 +364,271 @@ static int btintel_pcie_reset_bt(struct btintel_pcie_data *data) return reg == 0 ? 0 : -ENODEV; } +static void btintel_pcie_mac_init(struct btintel_pcie_data *data) +{ + u32 reg; + + /* Set MAC_INIT bit to start primary bootloader */ + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); + reg &= ~(BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT | + BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON | + BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET); + reg |= (BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_ENA | + BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT); + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg); +} + +static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size) +{ + struct sk_buff *skb; + int err; + + skb = alloc_skb(size, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, data, size); + err = hci_devcd_append(hdev, skb); + if (err) { + bt_dev_err(hdev, "Failed to append data in the coredump"); + return err; + } + + return 0; +} + +static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data) +{ + u32 reg; + int retry = 15; + + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); + + reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS; + reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ; + if ((reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) == 0) + reg |= BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ; + + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg); + + do { + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); + if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS) + return 0; + /* Need delay here for Target Access harwdware to settle down*/ + usleep_range(1000, 1200); + + } while (--retry > 0); + + return -ETIME; +} + +static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data) +{ + u32 reg; + + reg = btintel_pcie_rd_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG); + + if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ) + reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ; + + if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS) + reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS; + + if (reg & BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ) + reg &= ~BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ; + + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg); +} + +static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type, + void *data, int size) +{ + struct intel_tlv *tlv; + + tlv = skb_put(skb, sizeof(*tlv) + size); + tlv->type = type; + tlv->len = size; + memcpy(tlv->val, data, tlv->len); +} + +static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data) +{ + u32 offset, prev_size, wr_ptr_status, dump_size, i; + struct btintel_pcie_dbgc *dbgc = &data->dbgc; + u8 buf_idx, dump_time_len, fw_build; + struct hci_dev *hdev = data->hdev; + struct intel_tlv *tlv; + struct timespec64 now; + struct sk_buff *skb; + struct tm tm_now; + char buf[256]; + u16 hdr_len; + int ret; + + wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS); + offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK; + + buf_idx = BTINTEL_PCIE_DBGC_DBG_BUF_IDX(wr_ptr_status); + if (buf_idx > dbgc->count) { + bt_dev_warn(hdev, "Buffer index is invalid"); + return -EINVAL; + } + + prev_size = buf_idx * BTINTEL_PCIE_DBGC_BUFFER_SIZE; + if (prev_size + offset >= prev_size) + data->dmp_hdr.write_ptr = prev_size + offset; + else + return -EINVAL; + + ktime_get_real_ts64(&now); + time64_to_tm(now.tv_sec, 0, &tm_now); + dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d", + tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900, + tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec); + + fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len, + "Firmware Timestamp: Year %u WW %02u buildtype %u build %u", + 2000 + (data->dmp_hdr.fw_timestamp >> 8), + data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type, + data->dmp_hdr.fw_build_num); + + hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) + + sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) + + sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) + + sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) + + sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) + + sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) + + sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) + + sizeof(*tlv) + dump_time_len + + sizeof(*tlv) + fw_build; + + dump_size = hdr_len + sizeof(hdr_len); + + skb = alloc_skb(dump_size, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + /* Add debug buffers data length to dump size */ + dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count; + + ret = hci_devcd_init(hdev, dump_size); + if (ret) { + bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret); + kfree_skb(skb); + return ret; + } + + skb_put_data(skb, &hdr_len, sizeof(hdr_len)); + + btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt, + sizeof(data->dmp_hdr.cnvi_bt)); + + btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr, + sizeof(data->dmp_hdr.write_ptr)); + + data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data, + BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND); + + btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr, + sizeof(data->dmp_hdr.wrap_ctr)); + + btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason, + sizeof(data->dmp_hdr.trigger_reason)); + + btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1, + sizeof(data->dmp_hdr.fw_git_sha1)); + + btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top, + sizeof(data->dmp_hdr.cnvr_top)); + + btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top, + sizeof(data->dmp_hdr.cnvi_top)); + + btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len); + + btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build); + + ret = hci_devcd_append(hdev, skb); + if (ret) + goto exit_err; + + for (i = 0; i < dbgc->count; i++) { + ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data, + BTINTEL_PCIE_DBGC_BUFFER_SIZE); + if (ret) + break; + } + +exit_err: + hci_devcd_complete(hdev); + return ret; +} + +static void btintel_pcie_dump_traces(struct hci_dev *hdev) +{ + struct btintel_pcie_data *data = hci_get_drvdata(hdev); + int ret = 0; + + ret = btintel_pcie_get_mac_access(data); + if (ret) { + bt_dev_err(hdev, "Failed to get mac access: (%d)", ret); + return; + } + + ret = btintel_pcie_read_dram_buffers(data); + + btintel_pcie_release_mac_access(data); + + if (ret) + bt_dev_err(hdev, "Failed to dump traces: (%d)", ret); +} + +static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btintel_pcie_data *data = hci_get_drvdata(hdev); + u16 len = skb->len; + u16 *hdrlen_ptr; + char buf[80]; + + hdrlen_ptr = skb_put_zero(skb, sizeof(len)); + + snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n", + INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt)); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n", + data->dmp_hdr.fw_build_num); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Vendor: Intel\n"); + skb_put_data(skb, buf, strlen(buf)); + + *hdrlen_ptr = skb->len - len; +} + +static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state) +{ + struct btintel_pcie_data *data = hci_get_drvdata(hdev); + + switch (state) { + case HCI_DEVCOREDUMP_IDLE: + data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE; + break; + case HCI_DEVCOREDUMP_ACTIVE: + data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE; + break; + case HCI_DEVCOREDUMP_TIMEOUT: + case HCI_DEVCOREDUMP_ABORT: + case HCI_DEVCOREDUMP_DONE: + data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE; + break; + } +} + /* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in * BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with * BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0. @@ -329,20 +685,6 @@ static int btintel_pcie_enable_bt(struct btintel_pcie_data *data) return 0; } -/* BIT(0) - ROM, BIT(1) - IML and BIT(3) - OP - * Sometimes during firmware image switching from ROM to IML or IML to OP image, - * the previous image bit is not cleared by firmware when alive interrupt is - * received. Driver needs to take care of these sticky bits when deciding the - * current image running on controller. - * Ex: 0x10 and 0x11 - both represents that controller is running IML - */ -static inline bool btintel_pcie_in_rom(struct btintel_pcie_data *data) -{ - return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_ROM && - !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_IML) && - !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW); -} - static inline bool btintel_pcie_in_op(struct btintel_pcie_data *data) { return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_OPFW; @@ -393,6 +735,27 @@ static inline char *btintel_pcie_alivectxt_state2str(u32 alive_intr_ctxt) } } +static int btintel_pcie_read_device_mem(struct btintel_pcie_data *data, + void *buf, u32 dev_addr, int len) +{ + int err; + u32 *val = buf; + + /* Get device mac access */ + err = btintel_pcie_get_mac_access(data); + if (err) { + bt_dev_err(data->hdev, "Failed to get mac access %d", err); + return err; + } + + for (; len > 0; len -= 4, dev_addr += 4, val++) + *val = btintel_pcie_rd_dev_mem(data, dev_addr); + + btintel_pcie_release_mac_access(data); + + return 0; +} + /* This function handles the MSI-X interrupt for gp0 cause (bit 0 in * BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES) which is sent for boot stage and image response. */ @@ -714,6 +1077,126 @@ static int btintel_pcie_recv_frame(struct btintel_pcie_data *data, return ret; } +static void btintel_pcie_read_hwexp(struct btintel_pcie_data *data) +{ + int len, err, offset, pending; + struct sk_buff *skb; + u8 *buf, prefix[64]; + u32 addr, val; + u16 pkt_len; + + struct tlv { + u8 type; + __le16 len; + u8 val[]; + } __packed; + + struct tlv *tlv; + + switch (data->dmp_hdr.cnvi_top & 0xfff) { + case BTINTEL_CNVI_BLAZARI: + case BTINTEL_CNVI_BLAZARIW: + /* only from step B0 onwards */ + if (INTEL_CNVX_TOP_STEP(data->dmp_hdr.cnvi_top) != 0x01) + return; + len = BTINTEL_PCIE_BLZR_HWEXP_SIZE; /* exception data length */ + addr = BTINTEL_PCIE_BLZR_HWEXP_DMP_ADDR; + break; + case BTINTEL_CNVI_SCP: + len = BTINTEL_PCIE_SCP_HWEXP_SIZE; + addr = BTINTEL_PCIE_SCP_HWEXP_DMP_ADDR; + break; + default: + bt_dev_err(data->hdev, "Unsupported cnvi 0x%8.8x", data->dmp_hdr.cnvi_top); + return; + } + + buf = kzalloc(len, GFP_KERNEL); + if (!buf) + goto exit_on_error; + + btintel_pcie_mac_init(data); + + err = btintel_pcie_read_device_mem(data, buf, addr, len); + if (err) + goto exit_on_error; + + val = get_unaligned_le32(buf); + if (val != BTINTEL_PCIE_MAGIC_NUM) { + bt_dev_err(data->hdev, "Invalid exception dump signature: 0x%8.8x", + val); + goto exit_on_error; + } + + snprintf(prefix, sizeof(prefix), "Bluetooth: %s: ", bt_dev_name(data->hdev)); + + offset = 4; + do { + pending = len - offset; + if (pending < sizeof(*tlv)) + break; + tlv = (struct tlv *)(buf + offset); + + /* If type == 0, then there are no more TLVs to be parsed */ + if (!tlv->type) { + bt_dev_dbg(data->hdev, "Invalid TLV type 0"); + break; + } + pkt_len = le16_to_cpu(tlv->len); + offset += sizeof(*tlv); + pending = len - offset; + if (pkt_len > pending) + break; + + offset += pkt_len; + + /* Only TLVs of type == 1 are HCI events, no need to process other + * TLVs + */ + if (tlv->type != 1) + continue; + + bt_dev_dbg(data->hdev, "TLV packet length: %u", pkt_len); + if (pkt_len > HCI_MAX_EVENT_SIZE) + break; + skb = bt_skb_alloc(pkt_len, GFP_KERNEL); + if (!skb) + goto exit_on_error; + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + skb_put_data(skb, tlv->val, pkt_len); + + /* copy Intel specific pcie packet type */ + val = BTINTEL_PCIE_HCI_EVT_PKT; + memcpy(skb_push(skb, BTINTEL_PCIE_HCI_TYPE_LEN), &val, + BTINTEL_PCIE_HCI_TYPE_LEN); + + print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_OFFSET, 16, 1, + tlv->val, pkt_len, false); + + btintel_pcie_recv_frame(data, skb); + } while (offset < len); + +exit_on_error: + kfree(buf); +} + +static void btintel_pcie_msix_hw_exp_handler(struct btintel_pcie_data *data) +{ + bt_dev_err(data->hdev, "Received hw exception interrupt"); + + if (test_and_set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags)) + return; + + if (test_and_set_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) + return; + + /* Trigger device core dump when there is HW exception */ + if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) + data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT; + + queue_work(data->workqueue, &data->rx_work); +} + static void btintel_pcie_rx_work(struct work_struct *work) { struct btintel_pcie_data *data = container_of(work, @@ -722,6 +1205,23 @@ static void btintel_pcie_rx_work(struct work_struct *work) int err; struct hci_dev *hdev = data->hdev; + if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) { + /* Unlike usb products, controller will not send hardware + * exception event on exception. Instead controller writes the + * hardware event to device memory along with optional debug + * events, raises MSIX and halts. Driver shall read the + * exception event from device memory and passes it stack for + * further processing. + */ + btintel_pcie_read_hwexp(data); + clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags); + } + + if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) { + btintel_pcie_dump_traces(data->hdev); + clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags); + } + /* Process the sk_buf in queue and send to the HCI layer */ while ((skb = skb_dequeue(&data->rx_skb_q))) { err = btintel_pcie_recv_frame(data, skb); @@ -840,6 +1340,10 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) return IRQ_NONE; } + /* This interrupt is raised when there is an hardware exception */ + if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP) + btintel_pcie_msix_hw_exp_handler(data); + /* This interrupt is triggered by the firmware after updating * boot_stage register and image_response register */ @@ -920,7 +1424,8 @@ struct btintel_pcie_causes_list { static struct btintel_pcie_causes_list causes_list[] = { { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x00 }, { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_1, BTINTEL_PCIE_CSR_MSIX_FH_INT_MASK, 0x01 }, - { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 }, + { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x20 }, + { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP, BTINTEL_PCIE_CSR_MSIX_HW_INT_MASK, 0x23 }, }; /* This function configures the interrupt masks for both HW_INT_CAUSES and @@ -1007,6 +1512,11 @@ static void btintel_pcie_init_ci(struct btintel_pcie_data *data, ci->addr_urbdq1 = data->rxq.urbd1s_p_addr; ci->num_urbdq1 = data->rxq.count; ci->urbdq_db_vec = BTINTEL_PCIE_RXQ_NUM; + + ci->dbg_output_mode = 0x01; + ci->dbgc_addr = data->dbgc.frag_p_addr; + ci->dbgc_size = data->dbgc.frag_size; + ci->dbg_preset = 0x00; } static void btintel_pcie_free_txq_bufs(struct btintel_pcie_data *data, @@ -1219,6 +1729,11 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data) /* Setup Index Array */ btintel_pcie_setup_ia(data, p_addr, v_addr, &data->ia); + /* Setup data buffers for dbgc */ + err = btintel_pcie_setup_dbgc(data); + if (err) + goto exit_error_txq; + /* Setup Context Information */ p_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4; v_addr += sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4; @@ -1392,6 +1907,7 @@ static void btintel_pcie_release_hdev(struct btintel_pcie_data *data) static int btintel_pcie_setup_internal(struct hci_dev *hdev) { + struct btintel_pcie_data *data = hci_get_drvdata(hdev); const u8 param[1] = { 0xFF }; struct intel_version_tlv ver_tlv; struct sk_buff *skb; @@ -1449,6 +1965,7 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev) */ switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) { case 0x1e: /* BzrI */ + case 0x1f: /* ScP */ /* Display version information of TLV type */ btintel_version_info_tlv(hdev, &ver_tlv); @@ -1474,6 +1991,23 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev) break; } + data->dmp_hdr.cnvi_top = ver_tlv.cnvi_top; + data->dmp_hdr.cnvr_top = ver_tlv.cnvr_top; + data->dmp_hdr.fw_timestamp = ver_tlv.timestamp; + data->dmp_hdr.fw_build_type = ver_tlv.build_type; + data->dmp_hdr.fw_build_num = ver_tlv.build_num; + data->dmp_hdr.cnvi_bt = ver_tlv.cnvi_bt; + + if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03) + data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1; + + err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr, + btintel_pcie_dump_notify); + if (err) { + bt_dev_err(hdev, "Failed to register coredump (%d)", err); + goto exit_error; + } + btintel_print_fseq_info(hdev); exit_error: kfree_skb(skb); @@ -1538,6 +2072,7 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data) goto exit_error; } + data->dmp_hdr.driver_name = KBUILD_MODNAME; return 0; exit_error: @@ -1650,11 +2185,28 @@ static void btintel_pcie_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } +#ifdef CONFIG_DEV_COREDUMP +static void btintel_pcie_coredump(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct btintel_pcie_data *data = pci_get_drvdata(pdev); + + if (test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) + return; + + data->dmp_hdr.trigger_reason = BTINTEL_PCIE_TRIGGER_REASON_USER_TRIGGER; + queue_work(data->workqueue, &data->rx_work); +} +#endif + static struct pci_driver btintel_pcie_driver = { .name = KBUILD_MODNAME, .id_table = btintel_pcie_table, .probe = btintel_pcie_probe, .remove = btintel_pcie_remove, +#ifdef CONFIG_DEV_COREDUMP + .driver.coredump = btintel_pcie_coredump +#endif }; module_pci_driver(btintel_pcie_driver); diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h index f9aada0543c48..873178019cad0 100644 --- a/drivers/bluetooth/btintel_pcie.h +++ b/drivers/bluetooth/btintel_pcie.h @@ -16,6 +16,8 @@ #define BTINTEL_PCIE_CSR_CI_ADDR_LSB_REG (BTINTEL_PCIE_CSR_BASE + 0x118) #define BTINTEL_PCIE_CSR_CI_ADDR_MSB_REG (BTINTEL_PCIE_CSR_BASE + 0x11C) #define BTINTEL_PCIE_CSR_IMG_RESPONSE_REG (BTINTEL_PCIE_CSR_BASE + 0x12C) +#define BTINTEL_PCIE_PRPH_DEV_ADDR_REG (BTINTEL_PCIE_CSR_BASE + 0x440) +#define BTINTEL_PCIE_PRPH_DEV_RD_REG (BTINTEL_PCIE_CSR_BASE + 0x458) #define BTINTEL_PCIE_CSR_HBUS_TARG_WRPTR (BTINTEL_PCIE_CSR_BASE + 0x460) /* BTINTEL_PCIE_CSR Function Control Register */ @@ -23,6 +25,12 @@ #define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT (BIT(6)) #define BTINTEL_PCIE_CSR_FUNC_CTRL_FUNC_INIT (BIT(7)) #define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_STS (BIT(20)) + +#define BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_ACCESS_REQ (BIT(21)) +/* Stop MAC Access disconnection request */ +#define BTINTEL_PCIE_CSR_FUNC_CTRL_STOP_MAC_ACCESS_DIS (BIT(22)) +#define BTINTEL_PCIE_CSR_FUNC_CTRL_XTAL_CLK_REQ (BIT(23)) + #define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_STS (BIT(28)) #define BTINTEL_PCIE_CSR_FUNC_CTRL_BUS_MASTER_DISCON (BIT(29)) #define BTINTEL_PCIE_CSR_FUNC_CTRL_SW_RESET (BIT(31)) @@ -48,6 +56,30 @@ #define BTINTEL_PCIE_CSR_MSIX_IVAR_BASE (BTINTEL_PCIE_CSR_MSIX_BASE + 0x0880) #define BTINTEL_PCIE_CSR_MSIX_IVAR(cause) (BTINTEL_PCIE_CSR_MSIX_IVAR_BASE + (cause)) +/* IOSF Debug Register */ +#define BTINTEL_PCIE_DBGC_BASE_ADDR (0xf3800300) +#define BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x1C) +#define BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND (BTINTEL_PCIE_DBGC_BASE_ADDR + 0x2C) + +#define BTINTEL_PCIE_DBG_IDX_BIT_MASK 0x0F +#define BTINTEL_PCIE_DBGC_DBG_BUF_IDX(data) (((data) >> 24) & BTINTEL_PCIE_DBG_IDX_BIT_MASK) +#define BTINTEL_PCIE_DBG_OFFSET_BIT_MASK 0xFFFFFF + +/* The DRAM buffer count, each buffer size, and + * fragment buffer size + */ +#define BTINTEL_PCIE_DBGC_BUFFER_COUNT 16 +#define BTINTEL_PCIE_DBGC_BUFFER_SIZE (256 * 1024) /* 256 KB */ + +#define BTINTEL_PCIE_DBGC_FRAG_VERSION 1 +#define BTINTEL_PCIE_DBGC_FRAG_BUFFER_COUNT BTINTEL_PCIE_DBGC_BUFFER_COUNT + +/* Magic number(4), version(4), size of payload length(4) */ +#define BTINTEL_PCIE_DBGC_FRAG_HEADER_SIZE 12 + +/* Num of alloc Dbg buff (4) + (LSB(4), MSB(4), Size(4)) for each buffer */ +#define BTINTEL_PCIE_DBGC_FRAG_PAYLOAD_SIZE 196 + /* Causes for the FH register interrupts */ enum msix_fh_int_causes { BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0 = BIT(0), /* cause 0 */ @@ -57,6 +89,7 @@ enum msix_fh_int_causes { /* Causes for the HW register interrupts */ enum msix_hw_int_causes { BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0 = BIT(0), /* cause 32 */ + BTINTEL_PCIE_MSIX_HW_INT_CAUSES_HWEXP = BIT(3), /* cause 35 */ }; /* PCIe device states @@ -69,6 +102,25 @@ enum { BTINTEL_PCIE_STATE_D3_HOT = 2, BTINTEL_PCIE_STATE_D3_COLD = 3, }; + +enum { + BTINTEL_PCIE_CORE_HALTED, + BTINTEL_PCIE_HWEXP_INPROGRESS, + BTINTEL_PCIE_COREDUMP_INPROGRESS +}; + +enum btintel_pcie_tlv_type { + BTINTEL_CNVI_BT, + BTINTEL_WRITE_PTR, + BTINTEL_WRAP_CTR, + BTINTEL_TRIGGER_REASON, + BTINTEL_FW_SHA, + BTINTEL_CNVR_TOP, + BTINTEL_CNVI_TOP, + BTINTEL_DUMP_TIME, + BTINTEL_FW_BUILD, +}; + #define BTINTEL_PCIE_MSIX_NON_AUTO_CLEAR_CAUSE BIT(7) /* Minimum and Maximum number of MSI-X Vector @@ -325,6 +377,37 @@ struct rxq { struct data_buf *bufs; }; +/* Structure for DRAM Buffer + * @count: Number of descriptors + * @buf: Array of data_buf structure + */ +struct btintel_pcie_dbgc { + u16 count; + + void *frag_v_addr; + dma_addr_t frag_p_addr; + u16 frag_size; + + dma_addr_t buf_p_addr; + void *buf_v_addr; + struct data_buf *bufs; +}; + +struct btintel_pcie_dump_header { + const char *driver_name; + u32 cnvi_top; + u32 cnvr_top; + u16 fw_timestamp; + u8 fw_build_type; + u32 fw_build_num; + u32 fw_git_sha1; + u32 cnvi_bt; + u32 write_ptr; + u32 wrap_ctr; + u16 trigger_reason; + int state; +}; + /* struct btintel_pcie_data * @pdev: pci device * @hdev: hdev device @@ -405,6 +488,8 @@ struct btintel_pcie_data { struct txq txq; struct rxq rxq; u32 alive_intr_ctxt; + struct btintel_pcie_dbgc dbgc; + struct btintel_pcie_dump_header dmp_hdr; }; static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data, @@ -444,3 +529,11 @@ static inline void btintel_pcie_clr_reg_bits(struct btintel_pcie_data *data, r &= ~bits; iowrite32(r, data->base_addr + offset); } + +static inline u32 btintel_pcie_rd_dev_mem(struct btintel_pcie_data *data, + u32 addr) +{ + btintel_pcie_wr_reg32(data, BTINTEL_PCIE_PRPH_DEV_ADDR_REG, addr); + return btintel_pcie_rd_reg32(data, BTINTEL_PCIE_PRPH_DEV_RD_REG); +} + diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c index 68846c5bd4f79..4390fd571dbd1 100644 --- a/drivers/bluetooth/btmtk.c +++ b/drivers/bluetooth/btmtk.c @@ -1330,13 +1330,6 @@ int btmtk_usb_setup(struct hci_dev *hdev) break; case 0x7922: case 0x7925: - /* Reset the device to ensure it's in the initial state before - * downloading the firmware to ensure. - */ - - if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags)) - btmtk_usb_subsys_reset(hdev, dev_id); - fallthrough; case 0x7961: btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id, fw_version, fw_flavor); @@ -1345,12 +1338,9 @@ int btmtk_usb_setup(struct hci_dev *hdev) btmtk_usb_hci_wmt_sync); if (err < 0) { bt_dev_err(hdev, "Failed to set up firmware (%d)", err); - clear_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags); return err; } - set_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags); - /* It's Device EndPoint Reset Option Register */ err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index bd5464bde174f..edd5eead1e93b 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -610,7 +610,8 @@ static void btmtksdio_txrx_work(struct work_struct *work) } while (int_status || time_is_before_jiffies(txrx_timeout)); /* Enable interrupt */ - sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL); + if (bdev->func->irq_handler) + sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL); sdio_release_host(bdev->func); diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index aa5ec1d444a9d..5091dea762a07 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * NXP Bluetooth driver - * Copyright 2023 NXP + * Copyright 2023-2025 NXP */ #include @@ -31,6 +31,7 @@ #define BTNXPUART_SERDEV_OPEN 4 #define BTNXPUART_IR_IN_PROGRESS 5 #define BTNXPUART_FW_DOWNLOAD_ABORT 6 +#define BTNXPUART_FW_DUMP_IN_PROGRESS 7 /* NXP HW err codes */ #define BTNXPUART_IR_HW_ERR 0xb0 @@ -98,14 +99,19 @@ #define PS_STATE_AWAKE 0 #define PS_STATE_SLEEP 1 -/* Bluetooth vendor command : Sleep mode */ +/* NXP Vendor Commands. Refer user manual UM11628 on nxp.com */ +/* Set custom BD Address */ +#define HCI_NXP_SET_BD_ADDR 0xfc22 +/* Set Auto-Sleep mode */ #define HCI_NXP_AUTO_SLEEP_MODE 0xfc23 -/* Bluetooth vendor command : Wakeup method */ +/* Set Wakeup method */ #define HCI_NXP_WAKEUP_METHOD 0xfc53 -/* Bluetooth vendor command : Set operational baudrate */ +/* Set operational baudrate */ #define HCI_NXP_SET_OPER_SPEED 0xfc09 -/* Bluetooth vendor command: Independent Reset */ +/* Independent Reset (Soft Reset) */ #define HCI_NXP_IND_RESET 0xfcfc +/* Bluetooth vendor command: Trigger FW dump */ +#define HCI_NXP_TRIGGER_DUMP 0xfe91 /* Bluetooth Power State : Vendor cmd params */ #define BT_PS_ENABLE 0x02 @@ -162,6 +168,12 @@ struct btnxpuart_data { const char *fw_name_old; }; +enum bootloader_param_change { + not_changed, + cmd_sent, + changed +}; + struct btnxpuart_dev { struct hci_dev *hdev; struct serdev_device *serdev; @@ -177,6 +189,7 @@ struct btnxpuart_dev { u32 fw_v1_sent_bytes; u32 fw_dnld_v3_offset; u32 fw_v3_offset_correction; + u32 fw_v3_prev_sent; u32 fw_v1_expected_len; u32 boot_reg_offset; wait_queue_head_t fw_dnld_done_wait_q; @@ -185,8 +198,8 @@ struct btnxpuart_dev { u32 new_baudrate; u32 current_baudrate; u32 fw_init_baudrate; - bool timeout_changed; - bool baudrate_changed; + enum bootloader_param_change timeout_changed; + enum bootloader_param_change baudrate_changed; bool helper_downloaded; struct ps_data psdata; @@ -204,10 +217,11 @@ struct btnxpuart_dev { #define NXP_NAK_V3 0x7b #define NXP_CRC_ERROR_V3 0x7c -/* Bootloader signature error codes */ -#define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */ -#define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */ -#define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */ +/* Bootloader signature error codes: Refer AN12820 from nxp.com */ +#define NXP_CRC_RX_ERROR BIT(0) /* CRC error in previous packet */ +#define NXP_ACK_RX_TIMEOUT BIT(2) /* ACK not received from host */ +#define NXP_HDR_RX_TIMEOUT BIT(3) /* FW Header chunk not received */ +#define NXP_DATA_RX_TIMEOUT BIT(4) /* FW Data chunk not received */ #define HDR_LEN 16 @@ -310,6 +324,35 @@ union nxp_v3_rx_timeout_nak_u { u8 buf[6]; }; +struct nxp_v3_crc_nak { + u8 nak; + u8 crc; +} __packed; + +union nxp_v3_crc_nak_u { + struct nxp_v3_crc_nak pkt; + u8 buf[2]; +}; + +/* FW dump */ +#define NXP_FW_DUMP_SIZE (1024 * 1000) + +struct nxp_fw_dump_hdr { + __le16 seq_num; + __le16 reserved; + __le16 buf_type; + __le16 buf_len; +}; + +union nxp_set_bd_addr_payload { + struct { + u8 param_id; + u8 param_len; + u8 param[6]; + } __packed data; + u8 buf[8]; +}; + static u8 crc8_table[CRC8_TABLE_SIZE]; /* Default configurations */ @@ -447,8 +490,14 @@ static int ps_setup(struct hci_dev *hdev) return PTR_ERR(psdata->h2c_ps_gpio); } - if (!psdata->h2c_ps_gpio) + if (device_property_read_u8(&serdev->dev, "nxp,wakein-pin", &psdata->h2c_wakeup_gpio)) { + psdata->h2c_wakeup_gpio = 0xff; /* 0xff: use default pin/gpio */ + } else if (!psdata->h2c_ps_gpio) { + bt_dev_warn(hdev, "nxp,wakein-pin property without device-wakeup GPIO"); psdata->h2c_wakeup_gpio = 0xff; + } + + device_property_read_u8(&serdev->dev, "nxp,wakeout-pin", &psdata->c2h_wakeup_gpio); psdata->hdev = hdev; INIT_WORK(&psdata->work, ps_work_func); @@ -540,9 +589,11 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data) pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode; pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio; + pcmd.h2c_wakeup_gpio = 0xff; switch (psdata->h2c_wakeupmode) { case WAKEUP_METHOD_GPIO: pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_GPIO; + pcmd.h2c_wakeup_gpio = psdata->h2c_wakeup_gpio; break; case WAKEUP_METHOD_DTR: pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR; @@ -552,7 +603,6 @@ static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data) pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_BREAK; break; } - pcmd.h2c_wakeup_gpio = 0xff; skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd); if (IS_ERR(skb)) { @@ -586,8 +636,13 @@ static void ps_init(struct hci_dev *hdev) usleep_range(5000, 10000); psdata->ps_state = PS_STATE_AWAKE; - psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE; - psdata->c2h_wakeup_gpio = 0xff; + + if (psdata->c2h_wakeup_gpio) { + psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_GPIO; + } else { + psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE; + psdata->c2h_wakeup_gpio = 0xff; + } psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID; if (psdata->h2c_ps_gpio) @@ -618,11 +673,6 @@ static void ps_init(struct hci_dev *hdev) psdata->cur_psmode = PS_MODE_DISABLE; psdata->target_ps_mode = DEFAULT_PS_MODE; - - if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode) - hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL); - if (psdata->cur_psmode != psdata->target_ps_mode) - hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL); } /* NXP Firmware Download Feature */ @@ -637,8 +687,8 @@ static int nxp_download_firmware(struct hci_dev *hdev) nxpdev->boot_reg_offset = 0; nxpdev->fw_dnld_v3_offset = 0; nxpdev->fw_v3_offset_correction = 0; - nxpdev->baudrate_changed = false; - nxpdev->timeout_changed = false; + nxpdev->baudrate_changed = not_changed; + nxpdev->timeout_changed = not_changed; nxpdev->helper_downloaded = false; serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE); @@ -651,8 +701,10 @@ static int nxp_download_firmware(struct hci_dev *hdev) &nxpdev->tx_state), msecs_to_jiffies(60000)); - release_firmware(nxpdev->fw); - memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); + if (nxpdev->fw && strlen(nxpdev->fw_name)) { + release_firmware(nxpdev->fw); + memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); + } if (err == 0) { bt_dev_err(hdev, "FW Download Timeout. offset: %d", @@ -767,6 +819,16 @@ static bool is_fw_downloading(struct btnxpuart_dev *nxpdev) return test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); } +static bool ind_reset_in_progress(struct btnxpuart_dev *nxpdev) +{ + return test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state); +} + +static bool fw_dump_in_progress(struct btnxpuart_dev *nxpdev) +{ + return test_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state); +} + static bool process_boot_signature(struct btnxpuart_dev *nxpdev) { if (test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state)) { @@ -860,15 +922,14 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb) len = __le16_to_cpu(req->len); if (!nxp_data->helper_fw_name) { - if (!nxpdev->timeout_changed) { - nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, - len); + if (nxpdev->timeout_changed != changed) { + nxp_fw_change_timeout(hdev, len); + nxpdev->timeout_changed = changed; goto free_skb; } - if (!nxpdev->baudrate_changed) { - nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, - len); - if (nxpdev->baudrate_changed) { + if (nxpdev->baudrate_changed != changed) { + if (nxp_fw_change_baudrate(hdev, len)) { + nxpdev->baudrate_changed = changed; serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_SEC_BAUDRATE); serdev_device_set_flow_control(nxpdev->serdev, true); @@ -1047,32 +1108,35 @@ static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_re struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); __u32 offset = __le32_to_cpu(req->offset); __u16 err = __le16_to_cpu(req->error); - union nxp_v3_rx_timeout_nak_u nak_tx_buf; - - switch (err) { - case NXP_ACK_RX_TIMEOUT: - case NXP_HDR_RX_TIMEOUT: - case NXP_DATA_RX_TIMEOUT: - nak_tx_buf.pkt.nak = NXP_NAK_V3; - nak_tx_buf.pkt.offset = __cpu_to_le32(offset); - nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf, - sizeof(nak_tx_buf) - 1, 0xff); - serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf, - sizeof(nak_tx_buf)); - break; - default: - bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err); - break; - + union nxp_v3_rx_timeout_nak_u timeout_nak_buf; + union nxp_v3_crc_nak_u crc_nak_buf; + + if (err & NXP_CRC_RX_ERROR) { + crc_nak_buf.pkt.nak = NXP_CRC_ERROR_V3; + crc_nak_buf.pkt.crc = crc8(crc8_table, crc_nak_buf.buf, + sizeof(crc_nak_buf) - 1, 0xff); + serdev_device_write_buf(nxpdev->serdev, crc_nak_buf.buf, + sizeof(crc_nak_buf)); + } else if (err & NXP_ACK_RX_TIMEOUT || + err & NXP_HDR_RX_TIMEOUT || + err & NXP_DATA_RX_TIMEOUT) { + timeout_nak_buf.pkt.nak = NXP_NAK_V3; + timeout_nak_buf.pkt.offset = __cpu_to_le32(offset); + timeout_nak_buf.pkt.crc = crc8(crc8_table, timeout_nak_buf.buf, + sizeof(timeout_nak_buf) - 1, 0xff); + serdev_device_write_buf(nxpdev->serdev, timeout_nak_buf.buf, + sizeof(timeout_nak_buf)); + } else { + bt_dev_err(hdev, "Unknown bootloader error code: %d", err); } - } static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct v3_data_req *req; - __u16 len; + __u16 len = 0; + __u16 err = 0; __u32 offset; if (!process_boot_signature(nxpdev)) @@ -1082,23 +1146,40 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) if (!req || !nxpdev->fw) goto free_skb; - if (!req->error) { + err = __le16_to_cpu(req->error); + + if (!err) { nxp_send_ack(NXP_ACK_V3, hdev); + if (nxpdev->timeout_changed == cmd_sent) + nxpdev->timeout_changed = changed; + if (nxpdev->baudrate_changed == cmd_sent) + nxpdev->baudrate_changed = changed; } else { nxp_handle_fw_download_error(hdev, req); + if (nxpdev->timeout_changed == cmd_sent && + err == NXP_CRC_RX_ERROR) { + nxpdev->fw_v3_offset_correction -= nxpdev->fw_v3_prev_sent; + nxpdev->timeout_changed = not_changed; + } + if (nxpdev->baudrate_changed == cmd_sent && + err == NXP_CRC_RX_ERROR) { + nxpdev->fw_v3_offset_correction -= nxpdev->fw_v3_prev_sent; + nxpdev->baudrate_changed = not_changed; + } goto free_skb; } len = __le16_to_cpu(req->len); - if (!nxpdev->timeout_changed) { - nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, len); + if (nxpdev->timeout_changed != changed) { + nxp_fw_change_timeout(hdev, len); + nxpdev->timeout_changed = cmd_sent; goto free_skb; } - if (!nxpdev->baudrate_changed) { - nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, len); - if (nxpdev->baudrate_changed) { + if (nxpdev->baudrate_changed != changed) { + if (nxp_fw_change_baudrate(hdev, len)) { + nxpdev->baudrate_changed = cmd_sent; serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_SEC_BAUDRATE); serdev_device_set_flow_control(nxpdev->serdev, true); @@ -1130,6 +1211,7 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) nxpdev->fw_dnld_v3_offset, len); free_skb: + nxpdev->fw_v3_prev_sent = len; kfree_skb(skb); return 0; } @@ -1168,7 +1250,7 @@ static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data) static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev) { serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE); - if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) + if (ind_reset_in_progress(nxpdev)) serdev_device_set_flow_control(nxpdev->serdev, false); else serdev_device_set_flow_control(nxpdev->serdev, true); @@ -1197,6 +1279,102 @@ static int nxp_set_ind_reset(struct hci_dev *hdev, void *data) return hci_recv_frame(hdev, skb); } +/* Firmware dump */ +static void nxp_coredump(struct hci_dev *hdev) +{ + struct sk_buff *skb; + u8 pcmd = 2; + + skb = nxp_drv_send_cmd(hdev, HCI_NXP_TRIGGER_DUMP, 1, &pcmd); + if (!IS_ERR(skb)) + kfree_skb(skb); +} + +static void nxp_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) +{ + /* Nothing to be added in FW dump header */ +} + +static int nxp_process_fw_dump(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_acl_hdr *acl_hdr = (struct hci_acl_hdr *)skb_pull_data(skb, + sizeof(*acl_hdr)); + struct nxp_fw_dump_hdr *fw_dump_hdr = (struct nxp_fw_dump_hdr *)skb->data; + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + __u16 seq_num = __le16_to_cpu(fw_dump_hdr->seq_num); + __u16 buf_len = __le16_to_cpu(fw_dump_hdr->buf_len); + int err; + + if (seq_num == 0x0001) { + if (test_and_set_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state)) { + bt_dev_err(hdev, "FW dump already in progress"); + goto free_skb; + } + bt_dev_warn(hdev, "==== Start FW dump ==="); + err = hci_devcd_init(hdev, NXP_FW_DUMP_SIZE); + if (err < 0) + goto free_skb; + + schedule_delayed_work(&hdev->dump.dump_timeout, + msecs_to_jiffies(20000)); + } + + err = hci_devcd_append(hdev, skb_clone(skb, GFP_ATOMIC)); + if (err < 0) + goto free_skb; + + if (buf_len == 0) { + bt_dev_warn(hdev, "==== FW dump complete ==="); + clear_bit(BTNXPUART_FW_DUMP_IN_PROGRESS, &nxpdev->tx_state); + hci_devcd_complete(hdev); + nxp_set_ind_reset(hdev, NULL); + } + +free_skb: + kfree_skb(skb); + return 0; +} + +static int nxp_recv_acl_pkt(struct hci_dev *hdev, struct sk_buff *skb) +{ + __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); + + /* FW dump chunks are ACL packets with conn handle 0xfff */ + if ((handle & 0x0FFF) == 0xFFF) + return nxp_process_fw_dump(hdev, skb); + else + return hci_recv_frame(hdev, skb); +} + +static int nxp_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) +{ + union nxp_set_bd_addr_payload pcmd; + int err; + + pcmd.data.param_id = 0xfe; + pcmd.data.param_len = 6; + memcpy(pcmd.data.param, bdaddr, 6); + + /* BD address can be assigned only after first reset command. */ + err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, + HCI_INIT_TIMEOUT); + if (err) { + bt_dev_err(hdev, + "Reset before setting local-bd-addr failed (%d)", + err); + return err; + } + + err = __hci_cmd_sync_status(hdev, HCI_NXP_SET_BD_ADDR, sizeof(pcmd), + pcmd.buf, HCI_CMD_TIMEOUT); + if (err) { + bt_dev_err(hdev, "Changing device address failed (%d)", err); + return err; + } + + return 0; +} + /* NXP protocol */ static int nxp_setup(struct hci_dev *hdev) { @@ -1216,11 +1394,6 @@ static int nxp_setup(struct hci_dev *hdev) serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate); nxpdev->current_baudrate = nxpdev->fw_init_baudrate; - if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) { - nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE; - hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL); - } - ps_init(hdev); if (test_and_clear_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) @@ -1229,6 +1402,22 @@ static int nxp_setup(struct hci_dev *hdev) return 0; } +static int nxp_post_init(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + + if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) { + nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE; + nxp_set_baudrate_cmd(hdev, NULL); + } + if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode) + send_wakeup_method_cmd(hdev, NULL); + if (psdata->cur_psmode != psdata->target_ps_mode) + send_ps_cmd(hdev, NULL); + return 0; +} + static void nxp_hw_err(struct hci_dev *hdev, u8 code) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); @@ -1247,25 +1436,44 @@ static int nxp_shutdown(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct sk_buff *skb; - u8 *status; u8 pcmd = 0; - if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) { + if (ind_reset_in_progress(nxpdev)) { skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - status = skb_pull_data(skb, 1); - if (status) { - serdev_device_set_flow_control(nxpdev->serdev, false); - set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); - } - kfree_skb(skb); + serdev_device_set_flow_control(nxpdev->serdev, false); + set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + /* HCI_NXP_IND_RESET command may not returns any response */ + if (!IS_ERR(skb)) + kfree_skb(skb); + } else if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { + nxpdev->new_baudrate = nxpdev->fw_init_baudrate; + nxp_set_baudrate_cmd(hdev, NULL); } return 0; } +static bool nxp_wakeup(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + + if (psdata->c2h_wakeupmode != BT_HOST_WAKEUP_METHOD_NONE) + return true; + + return false; +} + +static void nxp_reset(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + if (!ind_reset_in_progress(nxpdev) && !fw_dump_in_progress(nxpdev)) { + bt_dev_dbg(hdev, "CMD Timeout detected. Resetting."); + nxp_set_ind_reset(hdev, NULL); + } +} + static int btnxpuart_queue_skb(struct hci_dev *hdev, struct sk_buff *skb) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); @@ -1286,6 +1494,9 @@ static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb) struct wakeup_cmd_payload wakeup_parm; __le32 baudrate_parm; + if (fw_dump_in_progress(nxpdev)) + return -EBUSY; + /* if vendor commands are received from user space (e.g. hcitool), update * driver flags accordingly and ask driver to re-send the command to FW. * In case the payload for any command does not match expected payload @@ -1454,7 +1665,7 @@ static int btnxpuart_flush(struct hci_dev *hdev) } static const struct h4_recv_pkt nxp_recv_pkts[] = { - { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_ACL, .recv = nxp_recv_acl_pkt }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { H4_RECV_ISO, .recv = hci_recv_frame }, @@ -1476,11 +1687,13 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev, if (IS_ERR(nxpdev->rx_skb)) { int err = PTR_ERR(nxpdev->rx_skb); /* Safe to ignore out-of-sync bootloader signatures */ - if (!is_fw_downloading(nxpdev)) + if (!is_fw_downloading(nxpdev) && + !ind_reset_in_progress(nxpdev)) bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err); return count; } - if (!is_fw_downloading(nxpdev)) + if (!is_fw_downloading(nxpdev) && + !ind_reset_in_progress(nxpdev)) nxpdev->hdev->stat.byte_rx += count; return count; } @@ -1499,6 +1712,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev) { struct hci_dev *hdev; struct btnxpuart_dev *nxpdev; + bdaddr_t ba = {0}; nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL); if (!nxpdev) @@ -1543,11 +1757,21 @@ static int nxp_serdev_probe(struct serdev_device *serdev) hdev->close = btnxpuart_close; hdev->flush = btnxpuart_flush; hdev->setup = nxp_setup; + hdev->post_init = nxp_post_init; hdev->send = nxp_enqueue; hdev->hw_error = nxp_hw_err; hdev->shutdown = nxp_shutdown; + hdev->wakeup = nxp_wakeup; + hdev->reset = nxp_reset; + hdev->set_bdaddr = nxp_set_bdaddr; SET_HCIDEV_DEV(hdev, &serdev->dev); + device_property_read_u8_array(&nxpdev->serdev->dev, + "local-bd-address", + (u8 *)&ba, sizeof(ba)); + if (bacmp(&ba, BDADDR_ANY)) + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + if (hci_register_dev(hdev) < 0) { dev_err(&serdev->dev, "Can't register HCI device\n"); goto probe_fail; @@ -1556,6 +1780,8 @@ static int nxp_serdev_probe(struct serdev_device *serdev) if (ps_setup(hdev)) goto probe_fail; + hci_devcd_register(hdev, nxp_coredump, nxp_coredump_hdr, NULL); + return 0; probe_fail: @@ -1573,16 +1799,15 @@ static void nxp_serdev_remove(struct serdev_device *serdev) clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); wake_up_interruptible(&nxpdev->check_boot_sign_wait_q); wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); - } else { - /* Restore FW baudrate to fw_init_baudrate if changed. - * This will ensure FW baudrate is in sync with - * driver baudrate in case this driver is re-inserted. + } + + if (test_bit(HCI_RUNNING, &hdev->flags)) { + /* Ensure shutdown callback is executed before unregistering, so + * that baudrate is reset to initial value. */ - if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { - nxpdev->new_baudrate = nxpdev->fw_init_baudrate; - nxp_set_baudrate_cmd(hdev, NULL); - } + nxp_shutdown(hdev); } + ps_cleanup(nxpdev); hci_unregister_dev(hdev); hci_free_dev(hdev); @@ -1608,6 +1833,17 @@ static int nxp_serdev_resume(struct device *dev) } #endif +#ifdef CONFIG_DEV_COREDUMP +static void nxp_serdev_coredump(struct device *dev) +{ + struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev); + struct hci_dev *hdev = nxpdev->hdev; + + if (hdev->dump.coredump) + hdev->dump.coredump(hdev); +} +#endif + static struct btnxpuart_data w8987_data __maybe_unused = { .helper_fw_name = NULL, .fw_name = FIRMWARE_W8987, @@ -1638,6 +1874,9 @@ static struct serdev_device_driver nxp_serdev_driver = { .name = "btnxpuart", .of_match_table = of_match_ptr(nxpuart_of_match_table), .pm = &nxp_pm_ops, +#ifdef CONFIG_DEV_COREDUMP + .coredump = nxp_serdev_coredump, +#endif }, }; diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index cdf09d9a9ad27..3d6778b95e005 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -785,6 +785,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, const char *firmware_name, const char *rampatch_name) { struct qca_fw_config config = {}; + const char *variant = ""; int err; u8 rom_ver = 0; u32 soc_ver; @@ -815,6 +816,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, snprintf(config.fwname, sizeof(config.fwname), "qca/%s", rampatch_name); } else { switch (soc_type) { + case QCA_WCN3950: + snprintf(config.fwname, sizeof(config.fwname), + "qca/cmbtfw%02x.tlv", rom_ver); + break; case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: @@ -880,16 +885,23 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, } } else { switch (soc_type) { + case QCA_WCN3950: + if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_T) + variant = "t"; + else if (le32_to_cpu(ver.soc_id) == QCA_WCN3950_SOC_ID_S) + variant = "u"; + + snprintf(config.fwname, sizeof(config.fwname), + "qca/cmnv%02x%s.bin", rom_ver, variant); + break; case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: - if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) { - snprintf(config.fwname, sizeof(config.fwname), - "qca/crnv%02xu.bin", rom_ver); - } else { - snprintf(config.fwname, sizeof(config.fwname), - "qca/crnv%02x.bin", rom_ver); - } + if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) + variant = "u"; + + snprintf(config.fwname, sizeof(config.fwname), + "qca/crnv%02x%s.bin", rom_ver, variant); break; case QCA_WCN3988: snprintf(config.fwname, sizeof(config.fwname), @@ -948,6 +960,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, * VsMsftOpCode. */ switch (soc_type) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index 9d28c88002257..8f3c1b1c77b3d 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -41,6 +41,9 @@ #define QCA_WCN3991_SOC_ID 0x40014320 +#define QCA_WCN3950_SOC_ID_T 0x40074130 +#define QCA_WCN3950_SOC_ID_S 0x40075130 + /* QCA chipset version can be decided by patch and SoC * version, combination with upper 2 bytes from SoC * and lower 2 bytes from patch will be used. @@ -145,6 +148,7 @@ enum qca_btsoc_type { QCA_INVALID = -1, QCA_AR3002, QCA_ROME, + QCA_WCN3950, QCA_WCN3988, QCA_WCN3990, QCA_WCN3998, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 90966dfbd2781..5012b5ff92c8a 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -34,6 +34,7 @@ static bool force_scofix; static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND); static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC); static bool reset = true; +static bool auto_isoc_alt = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTO_ISOC_ALT); static struct usb_driver btusb_driver; @@ -375,10 +376,38 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0f3), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe100), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe103), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe10a), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe10d), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe11b), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe11c), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe11f), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe141), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe14a), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe14b), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe14d), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3623), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3624), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x2c7c, 0x0130), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x2c7c, 0x0131), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x2c7c, 0x0132), .driver_info = BTUSB_QCA_WCN6855 | + BTUSB_WIDEBAND_SPEECH }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, @@ -639,6 +668,10 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe152), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe153), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK | @@ -1085,6 +1118,42 @@ static inline void btusb_free_frags(struct btusb_data *data) spin_unlock_irqrestore(&data->rxlock, flags); } +static void btusb_sco_connected(struct btusb_data *data, struct sk_buff *skb) +{ + struct hci_event_hdr *hdr = (void *) skb->data; + struct hci_ev_sync_conn_complete *ev = + (void *) skb->data + sizeof(*hdr); + struct hci_dev *hdev = data->hdev; + unsigned int notify_air_mode; + + if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT) + return; + + if (skb->len < sizeof(*hdr) || hdr->evt != HCI_EV_SYNC_CONN_COMPLETE) + return; + + if (skb->len != sizeof(*hdr) + sizeof(*ev) || ev->status) + return; + + switch (ev->air_mode) { + case BT_CODEC_CVSD: + notify_air_mode = HCI_NOTIFY_ENABLE_SCO_CVSD; + break; + + case BT_CODEC_TRANSPARENT: + notify_air_mode = HCI_NOTIFY_ENABLE_SCO_TRANSP; + break; + + default: + return; + } + + bt_dev_info(hdev, "enabling SCO with air mode %u", ev->air_mode); + data->sco_num = 1; + data->air_mode = notify_air_mode; + schedule_work(&data->work); +} + static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb) { if (data->intr_interval) { @@ -1092,6 +1161,10 @@ static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb) schedule_delayed_work(&data->rx_work, 0); } + /* Configure altsetting for HCI_USER_CHANNEL on SCO connected */ + if (auto_isoc_alt && hci_dev_test_flag(data->hdev, HCI_USER_CHANNEL)) + btusb_sco_connected(data, skb); + return data->recv_event(data->hdev, skb); } @@ -2102,7 +2175,8 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return submit_or_queue_tx_urb(hdev, urb); case HCI_SCODATA_PKT: - if (hci_conn_num(hdev, SCO_LINK) < 1) + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_conn_num(hdev, SCO_LINK) < 1) return -ENODEV; urb = alloc_isoc_urb(hdev, skb); @@ -2435,6 +2509,8 @@ static int btusb_setup_csr(struct hci_dev *hdev) set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks); set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks); set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks); /* Clear the reset quirk since this is not an actual * early Bluetooth 1.1 device from CSR. @@ -2576,7 +2652,8 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) return submit_or_queue_tx_urb(hdev, urb); case HCI_SCODATA_PKT: - if (hci_conn_num(hdev, SCO_LINK) < 1) + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_conn_num(hdev, SCO_LINK) < 1) return -ENODEV; urb = alloc_isoc_urb(hdev, skb); @@ -2645,7 +2722,7 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data) device_unlock(&btmtk_data->isopkt_intf->dev); if (err < 0) { btmtk_data->isopkt_intf = NULL; - bt_dev_err(data->hdev, "Failed to claim iso interface"); + bt_dev_err(data->hdev, "Failed to claim iso interface: %d", err); return; } @@ -3642,6 +3719,7 @@ static ssize_t force_poll_sync_write(struct file *file, } static const struct file_operations force_poll_sync_fops = { + .owner = THIS_MODULE, .open = simple_open, .read = force_poll_sync_read, .write = force_poll_sync_write, diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index d2d6ba8d2f8b1..acba83156de9a 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) if (!skb) { percpu_down_read(&hu->proto_lock); - if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + if (test_bit(HCI_UART_PROTO_READY, &hu->flags) || + test_bit(HCI_UART_PROTO_INIT, &hu->flags)) skb = hu->proto->dequeue(hu); percpu_up_read(&hu->proto_lock); @@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) if (!percpu_down_read_trylock(&hu->proto_lock)) return 0; - if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) && + !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) goto no_schedule; set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); @@ -278,7 +280,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) percpu_down_read(&hu->proto_lock); - if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) && + !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) { percpu_up_read(&hu->proto_lock); return -EUNATCH; } @@ -585,7 +588,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty) if (tty != hu->tty) return; - if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + if (test_bit(HCI_UART_PROTO_READY, &hu->flags) || + test_bit(HCI_UART_PROTO_INIT, &hu->flags)) hci_uart_tx_wakeup(hu); } @@ -611,7 +615,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, percpu_down_read(&hu->proto_lock); - if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) && + !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) { percpu_up_read(&hu->proto_lock); return; } @@ -707,12 +712,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id) hu->proto = p; + set_bit(HCI_UART_PROTO_INIT, &hu->flags); + err = hci_uart_register_dev(hu); if (err) { return err; } set_bit(HCI_UART_PROTO_READY, &hu->flags); + clear_bit(HCI_UART_PROTO_INIT, &hu->flags); + return 0; } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 0ac2168f1dc4f..f2558506a02c7 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -623,6 +623,7 @@ static int qca_open(struct hci_uart *hu) qcadev = serdev_device_get_drvdata(hu->serdev); switch (qcadev->btsoc_type) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -1366,6 +1367,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) /* Give the controller time to process the request */ switch (qca_soc_type(hu)) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -1452,6 +1454,7 @@ static unsigned int qca_get_speed(struct hci_uart *hu, static int qca_check_speeds(struct hci_uart *hu) { switch (qca_soc_type(hu)) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -1494,6 +1497,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) * changing the baudrate of chip and host. */ switch (soc_type) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -1528,6 +1532,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) error: switch (soc_type) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -1746,6 +1751,7 @@ static int qca_regulator_init(struct hci_uart *hu) } switch (soc_type) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -1776,6 +1782,7 @@ static int qca_regulator_init(struct hci_uart *hu) qca_set_speed(hu, QCA_INIT_SPEED); switch (soc_type) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -1807,6 +1814,7 @@ static int qca_power_on(struct hci_dev *hdev) return 0; switch (soc_type) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -1891,6 +1899,7 @@ static int qca_setup(struct hci_uart *hu) soc_name = "qca2066"; break; + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -1925,6 +1934,7 @@ static int qca_setup(struct hci_uart *hu) clear_bit(QCA_SSR_TRIGGERED, &qca->flags); switch (soc_type) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -1958,6 +1968,7 @@ static int qca_setup(struct hci_uart *hu) } switch (soc_type) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -2046,6 +2057,17 @@ static const struct hci_uart_proto qca_proto = { .dequeue = qca_dequeue, }; +static const struct qca_device_data qca_soc_data_wcn3950 __maybe_unused = { + .soc_type = QCA_WCN3950, + .vregs = (struct qca_vreg []) { + { "vddio", 15000 }, + { "vddxo", 60000 }, + { "vddrf", 155000 }, + { "vddch0", 585000 }, + }, + .num_vregs = 4, +}; + static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = { .soc_type = QCA_WCN3988, .vregs = (struct qca_vreg []) { @@ -2338,6 +2360,7 @@ static int qca_serdev_probe(struct serdev_device *serdev) qcadev->btsoc_type = QCA_ROME; switch (qcadev->btsoc_type) { + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: @@ -2359,6 +2382,7 @@ static int qca_serdev_probe(struct serdev_device *serdev) switch (qcadev->btsoc_type) { case QCA_WCN6855: case QCA_WCN7850: + case QCA_WCN6750: if (!device_property_present(&serdev->dev, "enable-gpios")) { /* * Backward compatibility with old DT sources. If the @@ -2374,11 +2398,11 @@ static int qca_serdev_probe(struct serdev_device *serdev) break; } fallthrough; + case QCA_WCN3950: case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: - case QCA_WCN6750: qcadev->bt_power->dev = &serdev->dev; err = qca_init_regulators(qcadev->bt_power, data->vregs, data->num_vregs); @@ -2683,6 +2707,7 @@ static const struct of_device_id qca_bluetooth_of_match[] = { { .compatible = "qcom,qca6174-bt" }, { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390}, { .compatible = "qcom,qca9377-bt" }, + { .compatible = "qcom,wcn3950-bt", .data = &qca_soc_data_wcn3950}, { .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988}, { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990}, { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991}, diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index fbf3079b92a53..5ea5dd80e297c 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -90,6 +90,7 @@ struct hci_uart { #define HCI_UART_REGISTERED 1 #define HCI_UART_PROTO_READY 2 #define HCI_UART_NO_SUSPEND_NOTIFIER 3 +#define HCI_UART_PROTO_INIT 4 /* TX states */ #define HCI_UART_SENDING 1 diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 7651321d351cc..a51935d37e5d7 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -316,7 +316,7 @@ static inline void force_devcd_timeout(struct hci_dev *hdev, unsigned int timeout) { #ifdef CONFIG_DEV_COREDUMP - hdev->dump.timeout = msecs_to_jiffies(timeout * 1000); + hdev->dump.timeout = secs_to_jiffies(timeout); #endif } @@ -416,6 +416,7 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) hdev->wakeup = vhci_wakeup; hdev->setup = vhci_setup; set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); + set_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks); /* bit 6 is for external configuration */ if (opcode & 0x40) @@ -645,7 +646,7 @@ static int vhci_open(struct inode *inode, struct file *file) file->private_data = data; nonseekable_open(inode, file); - schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000)); + schedule_delayed_work(&data->open_timeout, secs_to_jiffies(1)); return 0; } diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index c41119b9079f0..7ffea0f981628 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -1095,8 +1095,9 @@ static void mhi_pci_recovery_work(struct work_struct *work) err_unprepare: mhi_unprepare_after_power_down(mhi_cntrl); err_try_reset: - if (pci_reset_function(pdev)) - dev_err(&pdev->dev, "Recovery failed\n"); + err = pci_try_reset_function(pdev); + if (err) + dev_err(&pdev->dev, "Recovery failed: %d\n", err); } static void health_check(struct timer_list *t) diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c index 5dea31769f9a8..d8e029e7e53f7 100644 --- a/drivers/bus/simple-pm-bus.c +++ b/drivers/bus/simple-pm-bus.c @@ -109,9 +109,29 @@ static int simple_pm_bus_runtime_resume(struct device *dev) return 0; } +static int simple_pm_bus_suspend(struct device *dev) +{ + struct simple_pm_bus *bus = dev_get_drvdata(dev); + + if (!bus) + return 0; + + return pm_runtime_force_suspend(dev); +} + +static int simple_pm_bus_resume(struct device *dev) +{ + struct simple_pm_bus *bus = dev_get_drvdata(dev); + + if (!bus) + return 0; + + return pm_runtime_force_resume(dev); +} + static const struct dev_pm_ops simple_pm_bus_pm_ops = { RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL) - NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) + NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume) }; #define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */ diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c index c573ed2ee71a8..7811aa7340537 100644 --- a/drivers/cdx/cdx.c +++ b/drivers/cdx/cdx.c @@ -473,8 +473,12 @@ static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cdx_device *cdx_dev = to_cdx_device(dev); + ssize_t len; - return sysfs_emit(buf, "%s\n", cdx_dev->driver_override); + device_lock(dev); + len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override); + device_unlock(dev); + return len; } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 2cf595d2e10b8..f7dd455dd0dd3 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -264,8 +264,8 @@ int misc_register(struct miscdevice *misc) device_create_with_groups(&misc_class, misc->parent, dev, misc, misc->groups, "%s", misc->name); if (IS_ERR(misc->this_device)) { + misc_minor_free(misc->minor); if (is_dynamic) { - misc_minor_free(misc->minor); misc->minor = MISC_DYNAMIC_MINOR; } err = PTR_ERR(misc->this_device); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 24442485e73e7..18f92dd44d456 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -923,14 +923,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, pipe_lock(pipe); ret = 0; - if (pipe_empty(pipe->head, pipe->tail)) + if (pipe_is_empty(pipe)) goto error_out; ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); if (ret < 0) goto error_out; - occupancy = pipe_occupancy(pipe->head, pipe->tail); + occupancy = pipe_buf_usage(pipe); buf = alloc_buf(port->portdev->vdev, 0, occupancy); if (!buf) { diff --git a/drivers/clk/qcom/dispcc-sm8750.c b/drivers/clk/qcom/dispcc-sm8750.c index 0358dff91da5d..e9bca179998b9 100644 --- a/drivers/clk/qcom/dispcc-sm8750.c +++ b/drivers/clk/qcom/dispcc-sm8750.c @@ -827,7 +827,6 @@ static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = { &disp_cc_mdss_byte0_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ops, }, }; @@ -842,7 +841,6 @@ static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = { &disp_cc_mdss_byte1_clk_src.clkr.hw, }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ops, }, }; diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c index 86b39edba1227..08b867ae3ed9d 100644 --- a/drivers/clk/samsung/clk-gs101.c +++ b/drivers/clk/samsung/clk-gs101.c @@ -382,17 +382,9 @@ static const unsigned long cmu_top_clk_regs[] __initconst = { EARLY_WAKEUP_DPU_DEST, EARLY_WAKEUP_CSIS_DEST, EARLY_WAKEUP_SW_TRIG_APM, - EARLY_WAKEUP_SW_TRIG_APM_SET, - EARLY_WAKEUP_SW_TRIG_APM_CLEAR, EARLY_WAKEUP_SW_TRIG_CLUSTER0, - EARLY_WAKEUP_SW_TRIG_CLUSTER0_SET, - EARLY_WAKEUP_SW_TRIG_CLUSTER0_CLEAR, EARLY_WAKEUP_SW_TRIG_DPU, - EARLY_WAKEUP_SW_TRIG_DPU_SET, - EARLY_WAKEUP_SW_TRIG_DPU_CLEAR, EARLY_WAKEUP_SW_TRIG_CSIS, - EARLY_WAKEUP_SW_TRIG_CSIS_SET, - EARLY_WAKEUP_SW_TRIG_CSIS_CLEAR, CLK_CON_MUX_MUX_CLKCMU_BO_BUS, CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS, CLK_CON_MUX_MUX_CLKCMU_BUS1_BUS, diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index 2e94bba6c3966..023a25af73c47 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -206,6 +206,7 @@ static const struct clk_ops samsung_pll3000_clk_ops = { */ /* Maximum lock time can be 270 * PDIV cycles */ #define PLL35XX_LOCK_FACTOR (270) +#define PLL142XX_LOCK_FACTOR (150) #define PLL35XX_MDIV_MASK (0x3FF) #define PLL35XX_PDIV_MASK (0x3F) @@ -272,7 +273,11 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate, } /* Set PLL lock time. */ - writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR, + if (pll->type == pll_142xx) + writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR, + pll->lock_reg); + else + writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR, pll->lock_reg); /* Change PLL PMS values */ diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c index a3fe98cd38382..82815428f8f92 100644 --- a/drivers/clocksource/jcore-pit.c +++ b/drivers/clocksource/jcore-pit.c @@ -114,6 +114,18 @@ static int jcore_pit_local_init(unsigned cpu) pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd); clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX); + enable_percpu_irq(pit->ced.irq, IRQ_TYPE_NONE); + + return 0; +} + +static int jcore_pit_local_teardown(unsigned cpu) +{ + struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu); + + pr_info("Local J-Core PIT teardown on cpu %u\n", cpu); + + disable_percpu_irq(pit->ced.irq); return 0; } @@ -168,6 +180,7 @@ static int __init jcore_pit_init(struct device_node *node) return -ENOMEM; } + irq_set_percpu_devid(pit_irq); err = request_percpu_irq(pit_irq, jcore_timer_interrupt, "jcore_pit", jcore_pit_percpu); if (err) { @@ -237,7 +250,7 @@ static int __init jcore_pit_init(struct device_node *node) cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING, "clockevents/jcore:starting", - jcore_pit_local_init, NULL); + jcore_pit_local_init, jcore_pit_local_teardown); return 0; } diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index c14557efd5770..bbc3276992bb0 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -59,9 +59,6 @@ struct bam_desc_hw { #define DESC_FLAG_NWD BIT(12) #define DESC_FLAG_CMD BIT(11) -#define BAM_NDP_REVISION_START 0x20 -#define BAM_NDP_REVISION_END 0x27 - struct bam_async_desc { struct virt_dma_desc vd; @@ -401,7 +398,6 @@ struct bam_device { /* dma start transaction tasklet */ struct tasklet_struct task; - u32 bam_revision; }; /** @@ -445,10 +441,8 @@ static void bam_reset(struct bam_device *bdev) writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL)); /* set descriptor threshold, start with 4 bytes */ - if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START, - BAM_NDP_REVISION_END)) - writel_relaxed(DEFAULT_CNT_THRSHLD, - bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); + writel_relaxed(DEFAULT_CNT_THRSHLD, + bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS)); @@ -1006,10 +1000,9 @@ static void bam_apply_new_config(struct bam_chan *bchan, maxburst = bchan->slave.src_maxburst; else maxburst = bchan->slave.dst_maxburst; - if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START, - BAM_NDP_REVISION_END)) - writel_relaxed(maxburst, - bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); + + writel_relaxed(maxburst, + bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD)); } bchan->reconfigure = 0; @@ -1199,11 +1192,10 @@ static int bam_init(struct bam_device *bdev) u32 val; /* read revision and configuration information */ - val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)); - if (!bdev->num_ees) + if (!bdev->num_ees) { + val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)); bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK; - - bdev->bam_revision = val & REVISION_MASK; + } /* check that configured EE is within range */ if (bdev->ee >= bdev->num_ees) diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c index 5c6a5b3589873..ce80ac4b1a1b0 100644 --- a/drivers/dma/tegra210-adma.c +++ b/drivers/dma/tegra210-adma.c @@ -83,7 +83,9 @@ struct tegra_adma; * @nr_channels: Number of DMA channels available. * @ch_fifo_size_mask: Mask for FIFO size field. * @sreq_index_offset: Slave channel index offset. + * @max_page: Maximum ADMA Channel Page. * @has_outstanding_reqs: If DMA channel can have outstanding requests. + * @set_global_pg_config: Global page programming. */ struct tegra_adma_chip_data { unsigned int (*adma_get_burst_config)(unsigned int burst_size); @@ -99,6 +101,7 @@ struct tegra_adma_chip_data { unsigned int nr_channels; unsigned int ch_fifo_size_mask; unsigned int sreq_index_offset; + unsigned int max_page; bool has_outstanding_reqs; void (*set_global_pg_config)(struct tegra_adma *tdma); }; @@ -854,6 +857,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = { .nr_channels = 22, .ch_fifo_size_mask = 0xf, .sreq_index_offset = 2, + .max_page = 0, .has_outstanding_reqs = false, .set_global_pg_config = NULL, }; @@ -871,6 +875,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = { .nr_channels = 32, .ch_fifo_size_mask = 0x1f, .sreq_index_offset = 4, + .max_page = 4, .has_outstanding_reqs = true, .set_global_pg_config = tegra186_adma_global_page_config, }; diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c index 32019dc33cca7..20bdc52f63a50 100644 --- a/drivers/dpll/dpll_core.c +++ b/drivers/dpll/dpll_core.c @@ -443,8 +443,11 @@ static void dpll_pin_prop_free(struct dpll_pin_properties *prop) static int dpll_pin_prop_dup(const struct dpll_pin_properties *src, struct dpll_pin_properties *dst) { + if (WARN_ON(src->freq_supported && !src->freq_supported_num)) + return -EINVAL; + memcpy(dst, src, sizeof(*dst)); - if (src->freq_supported && src->freq_supported_num) { + if (src->freq_supported) { size_t freq_size = src->freq_supported_num * sizeof(*src->freq_supported); dst->freq_supported = kmemdup(src->freq_supported, @@ -505,7 +508,7 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module, xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC); ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b, &dpll_pin_xa_id, GFP_KERNEL); - if (ret) + if (ret < 0) goto err_xa_alloc; return pin; err_xa_alloc: diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c index 04c42c83a2bad..f3da9385ca0d8 100644 --- a/drivers/edac/qcom_edac.c +++ b/drivers/edac/qcom_edac.c @@ -95,7 +95,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b * Configure interrupt enable registers such that Tag, Data RAM related * interrupts are propagated to interrupt controller for servicing */ - ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable, + ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable, TRP0_INTERRUPT_ENABLE, TRP0_INTERRUPT_ENABLE); if (ret) @@ -113,7 +113,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b if (ret) return ret; - ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable, + ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable, DRP0_INTERRUPT_ENABLE, DRP0_INTERRUPT_ENABLE); if (ret) diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c index 83b69fc4fba5b..a8915d3b4df51 100644 --- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c +++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c @@ -254,8 +254,8 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph, if (num > max_num) return -EINVAL; - ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in), - 0, &t); + ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, + sizeof(*in) + num * sizeof(__le32), 0, &t); if (ret) return ret; diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig index 0a883091259a2..1fdf948535957 100644 --- a/drivers/firmware/cirrus/Kconfig +++ b/drivers/firmware/cirrus/Kconfig @@ -12,7 +12,6 @@ config FW_CS_DSP_KUNIT_TEST_UTILS config FW_CS_DSP_KUNIT_TEST tristate "KUnit tests for Cirrus Logic cs_dsp" if !KUNIT_ALL_TESTS depends on KUNIT && REGMAP - default KUNIT_ALL_TESTS select FW_CS_DSP select FW_CS_DSP_KUNIT_TEST_UTILS help diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 5365e9a430007..42433c19eb308 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -1609,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, goto out_fw; } - ret = regmap_raw_write_async(regmap, reg, buf->buf, - le32_to_cpu(region->len)); + ret = regmap_raw_write(regmap, reg, buf->buf, + le32_to_cpu(region->len)); if (ret != 0) { cs_dsp_err(dsp, "%s.%d: Failed to write %d bytes at %d in %s: %d\n", @@ -1625,12 +1625,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, regions++; } - ret = regmap_async_complete(regmap); - if (ret != 0) { - cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret); - goto out_fw; - } - if (pos > firmware->size) cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n", file, regions, pos - firmware->size); @@ -1638,7 +1632,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, cs_dsp_debugfs_save_wmfwname(dsp, file); out_fw: - regmap_async_complete(regmap); cs_dsp_buf_free(&buf_list); if (ret == -EOVERFLOW) @@ -2326,8 +2319,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", file, blocks, le32_to_cpu(blk->len), reg); - ret = regmap_raw_write_async(regmap, reg, buf->buf, - le32_to_cpu(blk->len)); + ret = regmap_raw_write(regmap, reg, buf->buf, + le32_to_cpu(blk->len)); if (ret != 0) { cs_dsp_err(dsp, "%s.%d: Failed to write to %x in %s: %d\n", @@ -2339,10 +2332,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware blocks++; } - ret = regmap_async_complete(regmap); - if (ret != 0) - cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret); - if (pos > firmware->size) cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n", file, blocks, pos - firmware->size); @@ -2350,7 +2339,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware cs_dsp_debugfs_save_binname(dsp, file); out_fw: - regmap_async_complete(regmap); cs_dsp_buf_free(&buf_list); if (ret == -EOVERFLOW) @@ -2561,8 +2549,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp) { int ret; - ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL, - ADSP2_SYS_ENA, ADSP2_SYS_ENA); + ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL, + ADSP2_SYS_ENA, ADSP2_SYS_ENA); if (ret != 0) return ret; diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c index fa9c1c3bf168b..f0a63d09d3c49 100644 --- a/drivers/firmware/efi/cper-arm.c +++ b/drivers/firmware/efi/cper-arm.c @@ -311,7 +311,7 @@ void cper_print_proc_arm(const char *pfx, ctx_info = (struct cper_arm_ctx_info *)err_info; max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1; for (i = 0; i < proc->context_info_num; i++) { - int size = sizeof(*ctx_info) + ctx_info->size; + int size = ALIGN(sizeof(*ctx_info) + ctx_info->size, 16); printk("%sContext info structure %d:\n", pfx, i); if (len < size) { diff --git a/drivers/firmware/efi/cper-x86.c b/drivers/firmware/efi/cper-x86.c index 438ed9eff6d01..3949d7b5e808f 100644 --- a/drivers/firmware/efi/cper-x86.c +++ b/drivers/firmware/efi/cper-x86.c @@ -325,7 +325,7 @@ void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc) ctx_info = (struct cper_ia_proc_ctx *)err_info; for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) { - int size = sizeof(*ctx_info) + ctx_info->reg_arr_size; + int size = ALIGN(sizeof(*ctx_info) + ctx_info->reg_arr_size, 16); int groupsize = 4; printk("%sContext Information Structure %d:\n", pfx, i); diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index 5a732018be36d..fd80b2f3233aa 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -75,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size, if (align < EFI_ALLOC_ALIGN) align = EFI_ALLOC_ALIGN; + /* Avoid address 0x0, as it can be mistaken for NULL */ + if (alloc_min == 0) + alloc_min = align; + size = round_up(size, EFI_ALLOC_ALIGN); /* count the suitable slots in each memory map entry */ diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c index 5ed0602c2f75f..208db29613c63 100644 --- a/drivers/firmware/efi/mokvar-table.c +++ b/drivers/firmware/efi/mokvar-table.c @@ -99,14 +99,13 @@ static struct kobject *mokvar_kobj; */ void __init efi_mokvar_table_init(void) { + struct efi_mokvar_table_entry __aligned(1) *mokvar_entry, *next_entry; efi_memory_desc_t md; void *va = NULL; unsigned long cur_offset = 0; unsigned long offset_limit; - unsigned long map_size = 0; unsigned long map_size_needed = 0; unsigned long size; - struct efi_mokvar_table_entry *mokvar_entry; int err; if (!efi_enabled(EFI_MEMMAP)) @@ -134,48 +133,46 @@ void __init efi_mokvar_table_init(void) */ err = -EINVAL; while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) { - mokvar_entry = va + cur_offset; - map_size_needed = cur_offset + sizeof(*mokvar_entry); - if (map_size_needed > map_size) { - if (va) - early_memunmap(va, map_size); - /* - * Map a little more than the fixed size entry - * header, anticipating some data. It's safe to - * do so as long as we stay within current memory - * descriptor. - */ - map_size = min(map_size_needed + 2*EFI_PAGE_SIZE, - offset_limit); - va = early_memremap(efi.mokvar_table, map_size); - if (!va) { - pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n", - efi.mokvar_table, map_size); - return; - } - mokvar_entry = va + cur_offset; + if (va) + early_memunmap(va, sizeof(*mokvar_entry)); + va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry)); + if (!va) { + pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n", + efi.mokvar_table + cur_offset, sizeof(*mokvar_entry)); + return; } - + mokvar_entry = va; +next: /* Check for last sentinel entry */ if (mokvar_entry->name[0] == '\0') { if (mokvar_entry->data_size != 0) break; err = 0; + map_size_needed = cur_offset + sizeof(*mokvar_entry); break; } - /* Sanity check that the name is null terminated */ - size = strnlen(mokvar_entry->name, - sizeof(mokvar_entry->name)); - if (size >= sizeof(mokvar_entry->name)) - break; + /* Enforce that the name is NUL terminated */ + mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0'; /* Advance to the next entry */ - cur_offset = map_size_needed + mokvar_entry->data_size; + size = sizeof(*mokvar_entry) + mokvar_entry->data_size; + cur_offset += size; + + /* + * Don't bother remapping if the current entry header and the + * next one end on the same page. + */ + next_entry = (void *)((unsigned long)mokvar_entry + size); + if (((((unsigned long)(mokvar_entry + 1) - 1) ^ + ((unsigned long)(next_entry + 1) - 1)) & PAGE_MASK) == 0) { + mokvar_entry = next_entry; + goto next; + } } if (va) - early_memunmap(va, map_size); + early_memunmap(va, sizeof(*mokvar_entry)); if (err) { pr_err("EFI MOKvar config table is not valid\n"); return; diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 907cd149c40a8..c964f4924359f 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -25,6 +25,7 @@ config IMX_SCU config IMX_SCMI_MISC_DRV tristate "IMX SCMI MISC Protocol driver" + depends on ARCH_MXC || COMPILE_TEST default y if ARCH_MXC help The System Controller Management Interface firmware (SCMI FW) is diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index 1dd4362ef9a3f..8c28e25ddc8a6 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -280,6 +280,7 @@ static int imx_scu_probe(struct platform_device *pdev) return ret; sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu"); + of_node_put(args.np); num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM; for (i = 0; i < num_channel; i++) { diff --git a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c index 447246bd04be3..98a463e9774bf 100644 --- a/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c +++ b/drivers/firmware/qcom/qcom_qseecom_uefisecapp.c @@ -814,15 +814,6 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, qcuefi->client = container_of(aux_dev, struct qseecom_client, aux_dev); - auxiliary_set_drvdata(aux_dev, qcuefi); - status = qcuefi_set_reference(qcuefi); - if (status) - return status; - - status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops); - if (status) - qcuefi_set_reference(NULL); - memset(&pool_config, 0, sizeof(pool_config)); pool_config.initial_size = SZ_4K; pool_config.policy = QCOM_TZMEM_POLICY_MULTIPLIER; @@ -833,6 +824,15 @@ static int qcom_uefisecapp_probe(struct auxiliary_device *aux_dev, if (IS_ERR(qcuefi->mempool)) return PTR_ERR(qcuefi->mempool); + auxiliary_set_drvdata(aux_dev, qcuefi); + status = qcuefi_set_reference(qcuefi); + if (status) + return status; + + status = efivars_register(&qcuefi->efivars, &qcom_efivar_ops); + if (status) + qcuefi_set_reference(NULL); + return status; } diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c index f0569bb9411f7..fc4d67e4c4a67 100644 --- a/drivers/firmware/qcom/qcom_scm.c +++ b/drivers/firmware/qcom/qcom_scm.c @@ -2301,8 +2301,8 @@ static int qcom_scm_probe(struct platform_device *pdev) __scm->mempool = devm_qcom_tzmem_pool_new(__scm->dev, &pool_config); if (IS_ERR(__scm->mempool)) { - dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool), - "Failed to create the SCM memory pool\n"); + ret = dev_err_probe(__scm->dev, PTR_ERR(__scm->mempool), + "Failed to create the SCM memory pool\n"); goto err; } diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index 65f41cc3eafcc..d668ddb2e81d3 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -119,10 +119,15 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf, struct platform_device *pdev; int res, id; + if (!try_module_get(THIS_MODULE)) + return -ENOENT; + /* kernfs guarantees string termination, so count + 1 is safe */ aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL); - if (!aggr) - return -ENOMEM; + if (!aggr) { + res = -ENOMEM; + goto put_module; + } memcpy(aggr->args, buf, count + 1); @@ -161,6 +166,7 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf, } aggr->pdev = pdev; + module_put(THIS_MODULE); return count; remove_table: @@ -175,6 +181,8 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf, kfree(aggr->lookups); free_ga: kfree(aggr); +put_module: + module_put(THIS_MODULE); return res; } @@ -203,13 +211,19 @@ static ssize_t delete_device_store(struct device_driver *driver, if (error) return error; + if (!try_module_get(THIS_MODULE)) + return -ENOENT; + mutex_lock(&gpio_aggregator_lock); aggr = idr_remove(&gpio_aggregator_idr, id); mutex_unlock(&gpio_aggregator_lock); - if (!aggr) + if (!aggr) { + module_put(THIS_MODULE); return -ENOENT; + } gpio_aggregator_free(aggr); + module_put(THIS_MODULE); return count; } static DRIVER_ATTR_WO(delete_device); diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 2ecee3269a0cc..a7a1cdf7ac66d 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -40,7 +40,7 @@ struct gpio_rcar_info { struct gpio_rcar_priv { void __iomem *base; - spinlock_t lock; + raw_spinlock_t lock; struct device *dev; struct gpio_chip gpio_chip; unsigned int irq_parent; @@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p, * "Setting Level-Sensitive Interrupt Input Mode" */ - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); /* Configure positive or negative logic in POSNEG */ gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge); @@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p, if (!level_trigger) gpio_rcar_write(p, INTCLR, BIT(hwirq)); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type) @@ -246,7 +246,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip, * "Setting General Input Mode" */ - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); /* Configure positive logic in POSNEG */ gpio_rcar_modify_bit(p, POSNEG, gpio, false); @@ -261,7 +261,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip, if (p->info.has_outdtsel && output) gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) @@ -347,7 +347,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask, return 0; } - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); outputs = gpio_rcar_read(p, INOUTSEL); m = outputs & bankmask; if (m) @@ -356,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask, m = ~outputs & bankmask; if (m) val |= gpio_rcar_read(p, INDT) & m; - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); bits[0] = val; return 0; @@ -367,9 +367,9 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value) struct gpio_rcar_priv *p = gpiochip_get_data(chip); unsigned long flags; - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); gpio_rcar_modify_bit(p, OUTDT, offset, value); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask, @@ -386,12 +386,12 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask, if (!bankmask) return; - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); val = gpio_rcar_read(p, OUTDT); val &= ~bankmask; val |= (bankmask & bits[0]); gpio_rcar_write(p, OUTDT, val); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset, @@ -468,7 +468,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins) p->info = *info; ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); - *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; + if (ret) { + *npins = RCAR_MAX_GPIO_PER_BANK; + } else { + *npins = args.args[2]; + of_node_put(args.np); + } if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n", @@ -505,7 +510,7 @@ static int gpio_rcar_probe(struct platform_device *pdev) return -ENOMEM; p->dev = dev; - spin_lock_init(&p->lock); + raw_spin_lock_init(&p->lock); /* Get device configuration from DT node */ ret = gpio_rcar_parse_dt(p, &npins); diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index c4f34a347cb6e..c36a9dbccd4dd 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c @@ -36,6 +36,7 @@ struct vf610_gpio_port { struct clk *clk_port; struct clk *clk_gpio; int irq; + spinlock_t lock; /* protect gpio direction registers */ }; #define GPIO_PDOR 0x00 @@ -124,6 +125,7 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio) u32 val; if (port->sdata->have_paddr) { + guard(spinlock_irqsave)(&port->lock); val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR); val &= ~mask; vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR); @@ -142,6 +144,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio vf610_gpio_set(chip, gpio, value); if (port->sdata->have_paddr) { + guard(spinlock_irqsave)(&port->lock); val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR); val |= mask; vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR); @@ -297,6 +300,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) return -ENOMEM; port->sdata = device_get_match_data(dev); + spin_lock_init(&port->lock); dual_base = port->sdata->have_dual_base; diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 40f76a90fd7db..107d75558b5a8 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -2729,8 +2729,9 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) cdev->gdev = gpio_device_get(gdev); cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify; - ret = atomic_notifier_chain_register(&gdev->line_state_notifier, - &cdev->lineinfo_changed_nb); + scoped_guard(write_lock_irqsave, &gdev->line_state_lock) + ret = raw_notifier_chain_register(&gdev->line_state_notifier, + &cdev->lineinfo_changed_nb); if (ret) goto out_free_bitmap; @@ -2754,8 +2755,9 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file) blocking_notifier_chain_unregister(&gdev->device_notifier, &cdev->device_unregistered_nb); out_unregister_line_notifier: - atomic_notifier_chain_unregister(&gdev->line_state_notifier, - &cdev->lineinfo_changed_nb); + scoped_guard(write_lock_irqsave, &gdev->line_state_lock) + raw_notifier_chain_unregister(&gdev->line_state_notifier, + &cdev->lineinfo_changed_nb); out_free_bitmap: gpio_device_put(gdev); bitmap_free(cdev->watched_lines); @@ -2779,8 +2781,9 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file) blocking_notifier_chain_unregister(&gdev->device_notifier, &cdev->device_unregistered_nb); - atomic_notifier_chain_unregister(&gdev->line_state_notifier, - &cdev->lineinfo_changed_nb); + scoped_guard(write_lock_irqsave, &gdev->line_state_lock) + raw_notifier_chain_unregister(&gdev->line_state_notifier, + &cdev->lineinfo_changed_nb); bitmap_free(cdev->watched_lines); gpio_device_put(gdev); kfree(cdev); diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 2e537ee979f3e..44ba1e16ed0d8 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -203,6 +203,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np, */ { "qi,lb60", "rb-gpios", true }, #endif +#if IS_ENABLED(CONFIG_IEEE802154_CA8210) + /* + * According to the datasheet, the NRST pin 27 is an active-low + * signal. However, the device tree schema and admittedly + * the out-of-tree implementations have been used for a long + * time incorrectly by describing reset GPIO as active-high. + */ + { "cascoda,ca8210", "reset-gpio", false }, +#endif #if IS_ENABLED(CONFIG_PCI_LANTIQ) /* * According to the PCI specification, the RST# pin is an diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index ca2f58a2cd45e..0c00ed2ab4315 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1025,7 +1025,8 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, } } - ATOMIC_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier); + rwlock_init(&gdev->line_state_lock); + RAW_INIT_NOTIFIER_HEAD(&gdev->line_state_notifier); BLOCKING_INIT_NOTIFIER_HEAD(&gdev->device_notifier); ret = init_srcu_struct(&gdev->srcu); @@ -1056,13 +1057,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, desc->gdev = gdev; - if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) { - assign_bit(FLAG_IS_OUT, - &desc->flags, !gc->get_direction(gc, desc_index)); - } else { + /* + * We would typically want to check the return value of + * get_direction() here but we must not check the return value + * and bail-out as pin controllers can have pins configured to + * alternate functions and return -EINVAL. Also: there's no + * need to take the SRCU lock here. + */ + if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) + assign_bit(FLAG_IS_OUT, &desc->flags, + !gc->get_direction(gc, desc_index)); + else assign_bit(FLAG_IS_OUT, &desc->flags, !gc->direction_input); - } } ret = of_gpiochip_add(gc); @@ -2701,7 +2708,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_input); int gpiod_direction_input_nonotify(struct gpio_desc *desc) { - int ret = 0; + int ret = 0, dir; CLASS(gpio_chip_guard, guard)(desc); if (!guard.gc) @@ -2728,13 +2735,18 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc) if (guard.gc->direction_input) { ret = guard.gc->direction_input(guard.gc, gpio_chip_hwgpio(desc)); - } else if (guard.gc->get_direction && - (guard.gc->get_direction(guard.gc, - gpio_chip_hwgpio(desc)) != 1)) { - gpiod_warn(desc, - "%s: missing direction_input() operation and line is output\n", - __func__); - return -EIO; + } else if (guard.gc->get_direction) { + dir = guard.gc->get_direction(guard.gc, + gpio_chip_hwgpio(desc)); + if (dir < 0) + return dir; + + if (dir != GPIO_LINE_DIRECTION_IN) { + gpiod_warn(desc, + "%s: missing direction_input() operation and line is output\n", + __func__); + return -EIO; + } } if (ret == 0) { clear_bit(FLAG_IS_OUT, &desc->flags); @@ -2748,7 +2760,7 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc) static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value) { - int val = !!value, ret = 0; + int val = !!value, ret = 0, dir; CLASS(gpio_chip_guard, guard)(desc); if (!guard.gc) @@ -2771,12 +2783,18 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value) gpio_chip_hwgpio(desc), val); } else { /* Check that we are in output mode if we can */ - if (guard.gc->get_direction && - guard.gc->get_direction(guard.gc, gpio_chip_hwgpio(desc))) { - gpiod_warn(desc, - "%s: missing direction_output() operation\n", - __func__); - return -EIO; + if (guard.gc->get_direction) { + dir = guard.gc->get_direction(guard.gc, + gpio_chip_hwgpio(desc)); + if (dir < 0) + return dir; + + if (dir != GPIO_LINE_DIRECTION_OUT) { + gpiod_warn(desc, + "%s: missing direction_output() operation\n", + __func__); + return -EIO; + } } /* * If we can't actively set the direction, we are some @@ -3129,6 +3147,8 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc) static int gpio_chip_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { + lockdep_assert_held(&gc->gpiodev->srcu); + if (gc->get_multiple) return gc->get_multiple(gc, mask, bits); if (gc->get) { @@ -3159,6 +3179,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, struct gpio_array *array_info, unsigned long *value_bitmap) { + struct gpio_chip *gc; int ret, i = 0; /* @@ -3170,10 +3191,15 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, array_size <= array_info->size && (void *)array_info == desc_array + array_info->size) { if (!can_sleep) - WARN_ON(array_info->chip->can_sleep); + WARN_ON(array_info->gdev->can_sleep); - ret = gpio_chip_get_multiple(array_info->chip, - array_info->get_mask, + guard(srcu)(&array_info->gdev->srcu); + gc = srcu_dereference(array_info->gdev->chip, + &array_info->gdev->srcu); + if (!gc) + return -ENODEV; + + ret = gpio_chip_get_multiple(gc, array_info->get_mask, value_bitmap); if (ret) return ret; @@ -3454,6 +3480,8 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value) static void gpio_chip_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { + lockdep_assert_held(&gc->gpiodev->srcu); + if (gc->set_multiple) { gc->set_multiple(gc, mask, bits); } else { @@ -3471,6 +3499,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, struct gpio_array *array_info, unsigned long *value_bitmap) { + struct gpio_chip *gc; int i = 0; /* @@ -3482,14 +3511,19 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, array_size <= array_info->size && (void *)array_info == desc_array + array_info->size) { if (!can_sleep) - WARN_ON(array_info->chip->can_sleep); + WARN_ON(array_info->gdev->can_sleep); + + guard(srcu)(&array_info->gdev->srcu); + gc = srcu_dereference(array_info->gdev->chip, + &array_info->gdev->srcu); + if (!gc) + return -ENODEV; if (!raw && !bitmap_empty(array_info->invert_mask, array_size)) bitmap_xor(value_bitmap, value_bitmap, array_info->invert_mask, array_size); - gpio_chip_set_multiple(array_info->chip, array_info->set_mask, - value_bitmap); + gpio_chip_set_multiple(gc, array_info->set_mask, value_bitmap); i = find_first_zero_bit(array_info->set_mask, array_size); if (i == array_size) @@ -4155,8 +4189,9 @@ EXPORT_SYMBOL_GPL(gpiod_set_array_value_cansleep); void gpiod_line_state_notify(struct gpio_desc *desc, unsigned long action) { - atomic_notifier_call_chain(&desc->gdev->line_state_notifier, - action, desc); + guard(read_lock_irqsave)(&desc->gdev->line_state_lock); + + raw_notifier_call_chain(&desc->gdev->line_state_notifier, action, desc); } /** @@ -4751,9 +4786,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, { struct gpio_desc *desc; struct gpio_descs *descs; + struct gpio_device *gdev; struct gpio_array *array_info = NULL; - struct gpio_chip *gc; int count, bitmap_size; + unsigned long dflags; size_t descs_size; count = gpiod_count(dev, con_id); @@ -4774,7 +4810,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, descs->desc[descs->ndescs] = desc; - gc = gpiod_to_chip(desc); + gdev = gpiod_to_gpio_device(desc); /* * If pin hardware number of array member 0 is also 0, select * its chip as a candidate for fast bitmap processing path. @@ -4782,8 +4818,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) { struct gpio_descs *array; - bitmap_size = BITS_TO_LONGS(gc->ngpio > count ? - gc->ngpio : count); + bitmap_size = BITS_TO_LONGS(gdev->ngpio > count ? + gdev->ngpio : count); array = krealloc(descs, descs_size + struct_size(array_info, invert_mask, 3 * bitmap_size), @@ -4803,7 +4839,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, array_info->desc = descs->desc; array_info->size = count; - array_info->chip = gc; + array_info->gdev = gdev; bitmap_set(array_info->get_mask, descs->ndescs, count - descs->ndescs); bitmap_set(array_info->set_mask, descs->ndescs, @@ -4816,7 +4852,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, continue; /* Unmark array members which don't belong to the 'fast' chip */ - if (array_info->chip != gc) { + if (array_info->gdev != gdev) { __clear_bit(descs->ndescs, array_info->get_mask); __clear_bit(descs->ndescs, array_info->set_mask); } @@ -4839,9 +4875,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, array_info->set_mask); } } else { + dflags = READ_ONCE(desc->flags); /* Exclude open drain or open source from fast output */ - if (gpiochip_line_is_open_drain(gc, descs->ndescs) || - gpiochip_line_is_open_source(gc, descs->ndescs)) + if (test_bit(FLAG_OPEN_DRAIN, &dflags) || + test_bit(FLAG_OPEN_SOURCE, &dflags)) __clear_bit(descs->ndescs, array_info->set_mask); /* Identify 'fast' pins which require invertion */ @@ -4853,7 +4890,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, if (array_info) dev_dbg(dev, "GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\n", - array_info->chip->label, array_info->size, + array_info->gdev->label, array_info->size, *array_info->get_mask, *array_info->set_mask, *array_info->invert_mask); return descs; diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 83690f72f7e5c..c129a03e20408 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -45,6 +46,7 @@ * @list: links gpio_device:s together for traversal * @line_state_notifier: used to notify subscribers about lines being * requested, released or reconfigured + * @line_state_lock: RW-spinlock protecting the line state notifier * @line_state_wq: used to emit line state events from a separate thread in * process context * @device_notifier: used to notify character device wait queues about the GPIO @@ -72,7 +74,8 @@ struct gpio_device { const char *label; void *data; struct list_head list; - struct atomic_notifier_head line_state_notifier; + struct raw_notifier_head line_state_notifier; + rwlock_t line_state_lock; struct workqueue_struct *line_state_wq; struct blocking_notifier_head device_notifier; struct srcu_struct srcu; @@ -114,7 +117,7 @@ extern const char *const gpio_suffixes[]; * * @desc: Array of pointers to the GPIO descriptors * @size: Number of elements in desc - * @chip: Parent GPIO chip + * @gdev: Parent GPIO device * @get_mask: Get mask used in fastpath * @set_mask: Set mask used in fastpath * @invert_mask: Invert mask used in fastpath @@ -126,7 +129,7 @@ extern const char *const gpio_suffixes[]; struct gpio_array { struct gpio_desc **desc; unsigned int size; - struct gpio_chip *chip; + struct gpio_device *gdev; unsigned long *get_mask; unsigned long *set_mask; unsigned long invert_mask[]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d100bb7a137cd..018dfccd771ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1638,6 +1638,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + /* resizing on Dell G5 SE platforms causes problems with runtime pm */ + if ((amdgpu_runtime_pm != 0) && + adev->pdev->vendor == PCI_VENDOR_ID_ATI && + adev->pdev->device == 0x731f && + adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) + return 0; + /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) DRM_WARN("System can't access extended configuration space, please check!!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 95a05b03f799d..c0ddbe7d6f0bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2555,7 +2555,6 @@ static int amdgpu_pmops_freeze(struct device *dev) int r; r = amdgpu_device_suspend(drm_dev, true); - adev->in_s4 = false; if (r) return r; @@ -2567,8 +2566,13 @@ static int amdgpu_pmops_freeze(struct device *dev) static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - return amdgpu_device_resume(drm_dev, true); + r = amdgpu_device_resume(drm_dev, true); + adev->in_s4 = false; + + return r; } static int amdgpu_pmops_poweroff(struct device *dev) @@ -2581,6 +2585,9 @@ static int amdgpu_pmops_poweroff(struct device *dev) static int amdgpu_pmops_restore(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + + adev->in_s4 = false; return amdgpu_device_resume(drm_dev, true); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 784b03abb3a43..c1f35ded684e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1638,22 +1638,19 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev, } mutex_lock(&adev->enforce_isolation_mutex); - for (i = 0; i < num_partitions; i++) { - if (adev->enforce_isolation[i] && !partition_values[i]) { + if (adev->enforce_isolation[i] && !partition_values[i]) /* Going from enabled to disabled */ amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i)); - amdgpu_mes_set_enforce_isolation(adev, i, false); - } else if (!adev->enforce_isolation[i] && partition_values[i]) { + else if (!adev->enforce_isolation[i] && partition_values[i]) /* Going from disabled to enabled */ amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i)); - amdgpu_mes_set_enforce_isolation(adev, i, true); - } adev->enforce_isolation[i] = partition_values[i]; } - mutex_unlock(&adev->enforce_isolation_mutex); + amdgpu_mes_update_enforce_isolation(adev); + return count; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 32b27a1658e78..709c11cbeabd8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -1681,7 +1681,8 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev) } /* Fix me -- node_id is used to identify the correct MES instances in the future */ -int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable) +static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, + uint32_t node_id, bool enable) { struct mes_misc_op_input op_input = {0}; int r; @@ -1703,6 +1704,23 @@ int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_i return r; } +int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev) +{ + int i, r = 0; + + if (adev->enable_mes && adev->gfx.enable_cleaner_shader) { + mutex_lock(&adev->enforce_isolation_mutex); + for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) { + if (adev->enforce_isolation[i]) + r |= amdgpu_mes_set_enforce_isolation(adev, i, true); + else + r |= amdgpu_mes_set_enforce_isolation(adev, i, false); + } + mutex_unlock(&adev->enforce_isolation_mutex); + } + return r; +} + #if defined(CONFIG_DEBUG_FS) static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 2df2444ee892c..e98ea7ede1bab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -534,6 +534,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes) bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev); -int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable); +int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev); #endif /* __AMDGPU_MES_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 01ae2f88dec8c..262bd010a283d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2281,7 +2281,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_res_cursor cursor; u64 addr; - int r; + int r = 0; if (!adev->mman.buffer_funcs_enabled) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c index b749f1c3f6a9a..0fb88e6d5d54b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c @@ -528,8 +528,9 @@ static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev, bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT; - is_system = (bo->tbo.resource->mem_type == TTM_PL_TT) || - (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT); + is_system = bo->tbo.resource && + (bo->tbo.resource->mem_type == TTM_PL_TT || + bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT); if (bo && bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC) *flags |= AMDGPU_PTE_DCC; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 65f389eb65e5f..f9a4d08eef925 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -1633,6 +1633,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block) goto failure; } + r = amdgpu_mes_update_enforce_isolation(adev); + if (r) + goto failure; + out: /* * Disable KIQ ring usage from the driver once MES is enabled. diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index 901e924e69ad9..0fd0fa6ed5184 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -1743,6 +1743,10 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block) goto failure; } + r = amdgpu_mes_update_enforce_isolation(adev); + if (r) + goto failure; + out: /* * Disable KIQ ring usage from the driver once MES is enabled. diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index c633b7ff29438..09fd6ef99b3d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -284,7 +284,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev) return 0; } - ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCN); + ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE); if (!ip_block) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index d4593374e7a1e..34c2c42c0f95c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1230,11 +1230,13 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, decrement_queue_count(dqm, qpd, q); if (dqm->dev->kfd->shared_resources.enable_mes) { - retval = remove_queue_mes(dqm, q, qpd); - if (retval) { + int err; + + err = remove_queue_mes(dqm, q, qpd); + if (err) { dev_err(dev, "Failed to evict queue %d\n", q->properties.queue_id); - goto out; + retval = err; } } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 2eff37aaf8273..1695dd78ede8e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; m->cp_mqd_base_addr_lo = lower_32_bits(addr); @@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); - m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK; m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; - m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; + pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 68dbc0399c87a..3c0ae28c5923b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; m->cp_mqd_base_addr_lo = lower_32_bits(addr); @@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); - m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK; m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; - m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c index 2b72d5b4949b6..565858b9044d4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c @@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; m->cp_mqd_base_addr_lo = lower_32_bits(addr); @@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); - m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK; m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; - m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index ff417d5361c42..3014925d95ffc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -183,6 +183,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; + m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; m->cp_mqd_base_addr_lo = lower_32_bits(addr); @@ -245,7 +248,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m = get_mqd(mqd); - m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK; m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c index ecccd7adbab4d..24396a2c77bd0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c @@ -266,8 +266,8 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope /* EOP buffer is not required for all ASICs */ if (properties->eop_ring_buffer_address) { if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) { - pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n", - properties->eop_buf_bo->tbo.base.size, + pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n", + properties->eop_ring_buffer_size, topo_dev->node_props.eop_buffer_size); err = -EINVAL; goto out_err_unreserve; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index ac3fd81fecef2..74ad0d1240feb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -245,6 +245,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); static void handle_hpd_rx_irq(void *param); +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, + int bl_idx, + u32 user_brightness); + static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state); @@ -1618,75 +1622,130 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev) return false; } -static const struct dmi_system_id hpd_disconnect_quirk_table[] = { +struct amdgpu_dm_quirks { + bool aux_hpd_discon; + bool support_edp0_on_dp1; +}; + +static struct amdgpu_dm_quirks quirk_entries = { + .aux_hpd_discon = false, + .support_edp0_on_dp1 = false +}; + +static int edp0_on_dp1_callback(const struct dmi_system_id *id) +{ + quirk_entries.support_edp0_on_dp1 = true; + return 0; +} + +static int aux_hpd_discon_callback(const struct dmi_system_id *id) +{ + quirk_entries.aux_hpd_discon = true; + return 0; +} + +static const struct dmi_system_id dmi_quirk_table[] = { { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), }, }, { + .callback = aux_hpd_discon_callback, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), }, }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"), + }, + }, + { + .callback = edp0_on_dp1_callback, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "HP"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"), + }, + }, {} /* TODO: refactor this from a fixed table to a dynamic option */ }; -static void retrieve_dmi_info(struct amdgpu_display_manager *dm) +static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data) { - const struct dmi_system_id *dmi_id; + int dmi_id; + struct drm_device *dev = dm->ddev; dm->aux_hpd_discon_quirk = false; + init_data->flags.support_edp0_on_dp1 = false; + + dmi_id = dmi_check_system(dmi_quirk_table); + + if (!dmi_id) + return; - dmi_id = dmi_first_match(hpd_disconnect_quirk_table); - if (dmi_id) { + if (quirk_entries.aux_hpd_discon) { dm->aux_hpd_discon_quirk = true; - DRM_INFO("aux_hpd_discon_quirk attached\n"); + drm_info(dev, "aux_hpd_discon_quirk attached\n"); + } + if (quirk_entries.support_edp0_on_dp1) { + init_data->flags.support_edp0_on_dp1 = true; + drm_info(dev, "aux_hpd_discon_quirk attached\n"); } } @@ -1994,7 +2053,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) init_data.num_virtual_links = 1; - retrieve_dmi_info(&adev->dm); + retrieve_dmi_info(&adev->dm, &init_data); if (adev->dm.bb_from_dmub) init_data.bb_from_dmub = adev->dm.bb_from_dmub; @@ -3316,8 +3375,19 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) mutex_unlock(&dm->dc_lock); + /* set the backlight after a reset */ + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + return 0; } + + /* leave display off for S4 sequence */ + if (adev->in_s4) + return 0; + /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_state_release(dm_state->context); dm_state->context = dc_state_create(dm->dc, NULL); @@ -4851,6 +4921,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) dm->backlight_dev[aconnector->bl_idx] = backlight_device_register(bl_name, aconnector->base.kdev, dm, &amdgpu_dm_backlight_ops, &props); + dm->brightness[aconnector->bl_idx] = props.brightness; if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { DRM_ERROR("DM: Backlight registration failed!\n"); @@ -4918,7 +4989,6 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm, aconnector->bl_idx = bl_idx; amdgpu_dm_update_backlight_caps(dm, bl_idx); - dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; dm->backlight_link[bl_idx] = link; dm->num_of_edps++; @@ -7240,8 +7310,14 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) struct dc_link *dc_link = aconnector->dc_link; struct dc_sink *dc_em_sink = aconnector->dc_em_sink; const struct drm_edid *drm_edid; + struct i2c_adapter *ddc; - drm_edid = drm_edid_read(connector); + if (dc_link && dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; + + drm_edid = drm_edid_read_ddc(connector, ddc); drm_edid_connector_update(connector, drm_edid); if (!drm_edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); @@ -7286,14 +7362,21 @@ static int get_modes(struct drm_connector *connector) static void create_eml_sink(struct amdgpu_dm_connector *aconnector) { struct drm_connector *connector = &aconnector->base; + struct dc_link *dc_link = aconnector->dc_link; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_VIRTUAL }; const struct drm_edid *drm_edid; const struct edid *edid; + struct i2c_adapter *ddc; + + if (dc_link && dc_link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; - drm_edid = drm_edid_read(connector); + drm_edid = drm_edid_read_ddc(connector, ddc); drm_edid_connector_update(connector, drm_edid); if (!drm_edid) { DRM_ERROR("No EDID found on connector: %s.\n", connector->name); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index e339c7a8d541c..c0dc232440490 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -455,6 +455,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) for (i = 0; i < hdcp_work->max_link; i++) { cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); + cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork); } sysfs_remove_bin_file(kobj, &hdcp_work[0].attr); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 3390f0d8420a0..a215234151ac3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -894,6 +894,15 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; + int irq_type; + int i; + + /* First, clear all hpd and hpdrx interrupts */ + for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) { + if (!dc_interrupt_set(adev->dm.dc, i, false)) + drm_err(dev, "Failed to clear hpd(rx) source=%d on init\n", + i); + } drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -907,10 +916,31 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) dc_link = amdgpu_dm_connector->dc_link; + /* + * Get a base driver irq reference for hpd ints for the lifetime + * of dm. Note that only hpd interrupt types are registered with + * base driver; hpd_rx types aren't. IOW, amdgpu_irq_get/put on + * hpd_rx isn't available. DM currently controls hpd_rx + * explicitly with dc_interrupt_set() + */ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { - dc_interrupt_set(adev->dm.dc, - dc_link->irq_source_hpd, - true); + irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1; + /* + * TODO: There's a mismatch between mode_info.num_hpd + * and what bios reports as the # of connectors with hpd + * sources. Since the # of hpd source types registered + * with base driver == mode_info.num_hpd, we have to + * fallback to dc_interrupt_set for the remaining types. + */ + if (irq_type < adev->mode_info.num_hpd) { + if (amdgpu_irq_get(adev, &adev->hpd_irq, irq_type)) + drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", + dc_link->irq_source_hpd); + } else { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + true); + } } if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { @@ -935,6 +965,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; + int irq_type; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -948,9 +979,18 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) dc_link = amdgpu_dm_connector->dc_link; if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { - dc_interrupt_set(adev->dm.dc, - dc_link->irq_source_hpd, - false); + irq_type = dc_link->irq_source_hpd - DC_IRQ_SOURCE_HPD1; + + /* TODO: See same TODO in amdgpu_dm_hpd_init() */ + if (irq_type < adev->mode_info.num_hpd) { + if (amdgpu_irq_put(adev, &adev->hpd_irq, irq_type)) + drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", + dc_link->irq_source_hpd); + } else { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + false); + } } if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index 774cc3f4f3fd9..92472109f84a9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -277,8 +277,11 @@ static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev, if (!dcc->enable) return 0; - if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || - !dc->cap_funcs.get_dcc_compression_cap) + if (adev->family < AMDGPU_FAMILY_GC_12_0_0 && + format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return -EINVAL; + + if (!dc->cap_funcs.get_dcc_compression_cap) return -EINVAL; input.format = format; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 45858bf1523d8..e140b7a04d724 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link) if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU) return false; - return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub); + /* Temporarily disable PSR-SU to avoid glitches */ + return false; } /* diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 520a34a42827b..298668e9729c7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1455,7 +1455,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Invalid input */ - if (!plane_state->dst_rect.width || + if (!plane_state || + !plane_state->dst_rect.width || !plane_state->dst_rect.height || !plane_state->src_rect.width || !plane_state->src_rect.height) { @@ -3388,10 +3389,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing) break; case COLOR_DEPTH_121212: normalized_pix_clk = (pix_clk * 36) / 24; - break; + break; + case COLOR_DEPTH_141414: + normalized_pix_clk = (pix_clk * 42) / 24; + break; case COLOR_DEPTH_161616: normalized_pix_clk = (pix_clk * 48) / 24; - break; + break; default: ASSERT(0); break; diff --git a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c index e5fb0e8333e43..e691a1cf33567 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c @@ -239,6 +239,7 @@ static const struct timing_generator_funcs dce60_tg_funcs = { dce60_timing_generator_enable_advanced_request, .configure_crc = dce60_configure_crc, .get_crc = dce110_get_crc, + .is_two_pixels_per_container = dce110_is_two_pixels_per_container, }; void dce60_timing_generator_construct( diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 67a8e22b1126d..e237ea1185a71 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -3042,6 +3042,7 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block) if (!amdgpu_dpm) return 0; + mutex_lock(&adev->pm.mutex); kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); if (ret) @@ -3049,6 +3050,8 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block) else adev->pm.dpm_enabled = true; amdgpu_legacy_dpm_compute_clocks(adev); + mutex_unlock(&adev->pm.mutex); + return ret; } @@ -3066,32 +3069,42 @@ static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + cancel_work_sync(&adev->pm.dpm.thermal.work); + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm_enabled = false; /* disable dpm */ kv_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); } return 0; } static int kv_dpm_resume(struct amdgpu_ip_block *ip_block) { - int ret; + int ret = 0; struct amdgpu_device *adev = ip_block->adev; - if (adev->pm.dpm_enabled) { + if (!amdgpu_dpm) + return 0; + + if (!adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); /* asic init will reset to the boot state */ kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); - if (ret) + if (ret) { adev->pm.dpm_enabled = false; - else + } else { adev->pm.dpm_enabled = true; - if (adev->pm.dpm_enabled) amdgpu_legacy_dpm_compute_clocks(adev); + } + mutex_unlock(&adev->pm.mutex); } - return 0; + return ret; } static bool kv_dpm_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index e861355ebd75b..c7518b13e7879 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; int temp, size = sizeof(temp); - if (!adev->pm.dpm_enabled) - return; + mutex_lock(&adev->pm.mutex); + if (!adev->pm.dpm_enabled) { + mutex_unlock(&adev->pm.mutex); + return; + } if (!pp_funcs->read_sensor(adev->powerplay.pp_handle, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&temp, @@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) adev->pm.dpm.state = dpm_state; amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle); + mutex_unlock(&adev->pm.mutex); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index a87dcf0974bc1..d6dfe2599ebea 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7786,6 +7786,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block) if (!amdgpu_dpm) return 0; + mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); if (ret) @@ -7793,6 +7794,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block) else adev->pm.dpm_enabled = true; amdgpu_legacy_dpm_compute_clocks(adev); + mutex_unlock(&adev->pm.mutex); return ret; } @@ -7810,32 +7812,44 @@ static int si_dpm_suspend(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + cancel_work_sync(&adev->pm.dpm.thermal.work); + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm_enabled = false; /* disable dpm */ si_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); } + return 0; } static int si_dpm_resume(struct amdgpu_ip_block *ip_block) { - int ret; + int ret = 0; struct amdgpu_device *adev = ip_block->adev; - if (adev->pm.dpm_enabled) { + if (!amdgpu_dpm) + return 0; + + if (!adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ + mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); - if (ret) + if (ret) { adev->pm.dpm_enabled = false; - else + } else { adev->pm.dpm_enabled = true; - if (adev->pm.dpm_enabled) amdgpu_legacy_dpm_compute_clocks(adev); + } + mutex_unlock(&adev->pm.mutex); } - return 0; + + return ret; } static bool si_dpm_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 9b2f4fe1578b8..ddb6444406d28 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1895,16 +1895,6 @@ static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu) NULL); } -static int smu_v14_0_process_pending_interrupt(struct smu_context *smu) -{ - int ret = 0; - - if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT)) - ret = smu_v14_0_allow_ih_interrupt(smu); - - return ret; -} - int smu_v14_0_enable_thermal_alert(struct smu_context *smu) { int ret = 0; @@ -1916,7 +1906,7 @@ int smu_v14_0_enable_thermal_alert(struct smu_context *smu) if (ret) return ret; - return smu_v14_0_process_pending_interrupt(smu); + return smu_v14_0_allow_ih_interrupt(smu); } int smu_v14_0_disable_thermal_alert(struct smu_context *smu) diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 06c91c5b7f7c8..6d09bef671da0 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -4025,6 +4025,22 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) return 0; } +static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr) +{ + bool probing_done = false; + + mutex_lock(&mgr->lock); + + if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) { + probing_done = mgr->mst_primary->link_address_sent; + drm_dp_mst_topology_put_mstb(mgr->mst_primary); + } + + mutex_unlock(&mgr->lock); + + return probing_done; +} + static inline bool drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_pending_up_req *up_req) @@ -4055,8 +4071,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { - dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); - hotplug = true; + if (!primary_mstb_probing_is_done(mgr)) { + drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n"); + } else { + dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); + hotplug = true; + } } drm_dp_mst_topology_put_mstb(mstb); @@ -4138,10 +4158,11 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type, false); + drm_dp_mst_topology_put_mstb(mst_primary); + if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { const struct drm_dp_connection_status_notify *conn_stat = &up_req->msg.u.conn_stat; - bool handle_csn; drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", conn_stat->port_number, @@ -4150,16 +4171,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) conn_stat->message_capability_status, conn_stat->input_port, conn_stat->peer_device_type); - - mutex_lock(&mgr->probe_lock); - handle_csn = mst_primary->link_address_sent; - mutex_unlock(&mgr->probe_lock); - - if (!handle_csn) { - drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it."); - kfree(up_req); - goto out_put_primary; - } } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { const struct drm_dp_resource_status_notify *res_stat = &up_req->msg.u.resource_stat; @@ -4174,9 +4185,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) list_add_tail(&up_req->next, &mgr->up_req_list); mutex_unlock(&mgr->up_req_lock); queue_work(system_long_wq, &mgr->up_req_work); - -out_put_primary: - drm_dp_mst_topology_put_mstb(mst_primary); out_clear_reply: reset_msg_rx_state(&mgr->up_req_recv); return ret; diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 370dc676e3aa5..fd36b8fd54e9e 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -956,6 +956,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, if (mode != DRM_MODE_DPMS_ON) mode = DRM_MODE_DPMS_OFF; + + if (connector->dpms == mode) + goto out; + connector->dpms = mode; crtc = connector->state->crtc; diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 5f24d6b41cc6d..48b08c9611a7b 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1427,6 +1427,10 @@ EXPORT_SYMBOL(drm_hdmi_connector_get_output_format_name); * callback. For atomic drivers the remapping to the "ACTIVE" property is * implemented in the DRM core. * + * On atomic drivers any DPMS setproperty ioctl where the value does not + * change is completely skipped, otherwise a full atomic commit will occur. + * On legacy drivers the exact behavior is driver specific. + * * Note that this property cannot be set through the MODE_ATOMIC ioctl, * userspace must use "ACTIVE" on the CRTC instead. * diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index b14b581c059d3..02a516e771927 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT #include +#include #include #include @@ -70,37 +71,102 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = { .fb_destroy = drm_fbdev_dma_fb_destroy, }; -FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma, +FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed, drm_fb_helper_damage_range, drm_fb_helper_damage_area); -static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) +static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0); + void *shadow = info->screen_buffer; + + if (!fb_helper->dev) + return; - if (!dma->map_noncoherent) - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + if (info->fbdefio) + fb_deferred_io_cleanup(info); + drm_fb_helper_fini(fb_helper); + vfree(shadow); - return fb_deferred_io_mmap(info, vma); + drm_client_buffer_vunmap(fb_helper->buffer); + drm_client_framebuffer_delete(fb_helper->buffer); + drm_client_release(&fb_helper->client); + drm_fb_helper_unprepare(fb_helper); + kfree(fb_helper); } -static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = { +static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = { .owner = THIS_MODULE, .fb_open = drm_fbdev_dma_fb_open, .fb_release = drm_fbdev_dma_fb_release, - __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma), + FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed), DRM_FB_HELPER_DEFAULT_OPS, - __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma), - .fb_mmap = drm_fbdev_dma_deferred_fb_mmap, - .fb_destroy = drm_fbdev_dma_fb_destroy, + .fb_destroy = drm_fbdev_dma_shadowed_fb_destroy, }; /* * struct drm_fb_helper */ +static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip, + struct iosys_map *dst) +{ + struct drm_framebuffer *fb = fb_helper->fb; + size_t offset = clip->y1 * fb->pitches[0]; + size_t len = clip->x2 - clip->x1; + unsigned int y; + void *src; + + switch (drm_format_info_bpp(fb->format, 0)) { + case 1: + offset += clip->x1 / 8; + len = DIV_ROUND_UP(len + clip->x1 % 8, 8); + break; + case 2: + offset += clip->x1 / 4; + len = DIV_ROUND_UP(len + clip->x1 % 4, 4); + break; + case 4: + offset += clip->x1 / 2; + len = DIV_ROUND_UP(len + clip->x1 % 2, 2); + break; + default: + offset += clip->x1 * fb->format->cpp[0]; + len *= fb->format->cpp[0]; + break; + } + + src = fb_helper->info->screen_buffer + offset; + iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ + + for (y = clip->y1; y < clip->y2; y++) { + iosys_map_memcpy_to(dst, 0, src, len); + iosys_map_incr(dst, fb->pitches[0]); + src += fb->pitches[0]; + } +} + +static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip) +{ + struct drm_client_buffer *buffer = fb_helper->buffer; + struct iosys_map dst; + + /* + * For fbdev emulation, we only have to protect against fbdev modeset + * operations. Nothing else will involve the client buffer's BO. So it + * is sufficient to acquire struct drm_fb_helper.lock here. + */ + mutex_lock(&fb_helper->lock); + + dst = buffer->map; + drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst); + + mutex_unlock(&fb_helper->lock); + + return 0; +} static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip) { @@ -112,6 +178,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper, return 0; if (helper->fb->funcs->dirty) { + ret = drm_fbdev_dma_damage_blit(helper, clip); + if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) + return ret; + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1); if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) return ret; @@ -128,14 +198,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = { * struct drm_fb_helper */ +static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_device *dev = fb_helper->dev; + struct drm_client_buffer *buffer = fb_helper->buffer; + struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem); + struct drm_framebuffer *fb = fb_helper->fb; + struct fb_info *info = fb_helper->info; + struct iosys_map map = buffer->map; + + info->fbops = &drm_fbdev_dma_fb_ops; + + /* screen */ + info->flags |= FBINFO_VIRTFB; /* system memory */ + if (dma_obj->map_noncoherent) + info->flags |= FBINFO_READS_FAST; /* signal caching */ + info->screen_size = sizes->surface_height * fb->pitches[0]; + info->screen_buffer = map.vaddr; + if (!(info->flags & FBINFO_HIDE_SMEM_START)) { + if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer))) + info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer)); + } + info->fix.smem_len = info->screen_size; + + return 0; +} + +static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct drm_client_buffer *buffer = fb_helper->buffer; + struct fb_info *info = fb_helper->info; + size_t screen_size = buffer->gem->size; + void *screen_buffer; + int ret; + + /* + * Deferred I/O requires struct page for framebuffer memory, + * which is not guaranteed for all DMA ranges. We thus create + * a shadow buffer in system memory. + */ + screen_buffer = vzalloc(screen_size); + if (!screen_buffer) + return -ENOMEM; + + info->fbops = &drm_fbdev_dma_shadowed_fb_ops; + + /* screen */ + info->flags |= FBINFO_VIRTFB; /* system memory */ + info->flags |= FBINFO_READS_FAST; /* signal caching */ + info->screen_buffer = screen_buffer; + info->fix.smem_len = screen_size; + + fb_helper->fbdefio.delay = HZ / 20; + fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; + + info->fbdefio = &fb_helper->fbdefio; + ret = fb_deferred_io_init(info); + if (ret) + goto err_vfree; + + return 0; + +err_vfree: + vfree(screen_buffer); + return ret; +} + int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { struct drm_client_dev *client = &fb_helper->client; struct drm_device *dev = fb_helper->dev; - bool use_deferred_io = false; struct drm_client_buffer *buffer; - struct drm_gem_dma_object *dma_obj; struct drm_framebuffer *fb; struct fb_info *info; u32 format; @@ -152,19 +288,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, sizes->surface_height, format); if (IS_ERR(buffer)) return PTR_ERR(buffer); - dma_obj = to_drm_gem_dma_obj(buffer->gem); fb = buffer->fb; - /* - * Deferred I/O requires struct page for framebuffer memory, - * which is not guaranteed for all DMA ranges. We thus only - * install deferred I/O if we have a framebuffer that requires - * it. - */ - if (fb->funcs->dirty) - use_deferred_io = true; - ret = drm_client_buffer_vmap(buffer, &map); if (ret) { goto err_drm_client_buffer_delete; @@ -185,45 +311,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, drm_fb_helper_fill_info(info, fb_helper, sizes); - if (use_deferred_io) - info->fbops = &drm_fbdev_dma_deferred_fb_ops; + if (fb->funcs->dirty) + ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes); else - info->fbops = &drm_fbdev_dma_fb_ops; - - /* screen */ - info->flags |= FBINFO_VIRTFB; /* system memory */ - if (dma_obj->map_noncoherent) - info->flags |= FBINFO_READS_FAST; /* signal caching */ - info->screen_size = sizes->surface_height * fb->pitches[0]; - info->screen_buffer = map.vaddr; - if (!(info->flags & FBINFO_HIDE_SMEM_START)) { - if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer))) - info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer)); - } - info->fix.smem_len = info->screen_size; - - /* - * Only set up deferred I/O if the screen buffer supports - * it. If this disagrees with the previous test for ->dirty, - * mmap on the /dev/fb file might not work correctly. - */ - if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) { - unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT; - - if (drm_WARN_ON(dev, !pfn_to_page(pfn))) - use_deferred_io = false; - } - - /* deferred I/O */ - if (use_deferred_io) { - fb_helper->fbdefio.delay = HZ / 20; - fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; - - info->fbdefio = &fb_helper->fbdefio; - ret = fb_deferred_io_init(info); - if (ret) - goto err_drm_fb_helper_release_info; - } + ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes); + if (ret) + goto err_drm_fb_helper_release_info; return 0; diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index bcf248f69252c..6903e2010cb98 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -545,7 +545,7 @@ impl EncodedMsg<'_> { } self.push(&mut offset, (MODE_STOP, 4)); - let pad_offset = (offset + 7) / 8; + let pad_offset = offset.div_ceil(8); for i in pad_offset..self.version.max_data() { self.data[i] = PADDING[(i & 1) ^ (pad_offset & 1)]; } @@ -659,7 +659,7 @@ struct QrImage<'a> { impl QrImage<'_> { fn new<'a, 'b>(em: &'b EncodedMsg<'b>, qrdata: &'a mut [u8]) -> QrImage<'a> { let width = em.version.width(); - let stride = (width + 7) / 8; + let stride = width.div_ceil(8); let data = qrdata; let mut qr_image = QrImage { @@ -911,16 +911,16 @@ impl QrImage<'_> { /// /// * `url`: The base URL of the QR code. It will be encoded as Binary segment. /// * `data`: A pointer to the binary data, to be encoded. if URL is NULL, it -/// will be encoded as binary segment, otherwise it will be encoded -/// efficiently as a numeric segment, and appended to the URL. +/// will be encoded as binary segment, otherwise it will be encoded +/// efficiently as a numeric segment, and appended to the URL. /// * `data_len`: Length of the data, that needs to be encoded, must be less -/// than data_size. +/// than data_size. /// * `data_size`: Size of data buffer, it should be at least 4071 bytes to hold -/// a V40 QR code. It will then be overwritten with the QR code image. +/// a V40 QR code. It will then be overwritten with the QR code image. /// * `tmp`: A temporary buffer that the QR code encoder will use, to write the -/// segments and ECC. +/// segments and ECC. /// * `tmp_size`: Size of the temporary buffer, it must be at least 3706 bytes -/// long for V40. +/// long for V40. /// /// # Safety /// diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c index 7e76790c6a81f..cba97d7db131d 100644 --- a/drivers/gpu/drm/gma500/mid_bios.c +++ b/drivers/gpu/drm/gma500/mid_bios.c @@ -279,6 +279,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv) 0, PCI_DEVFN(2, 0)); int ret = -1; + if (pci_gfx_root == NULL) { + WARN_ON(1); + return; + } + /* Get the address of the platform config vbt */ pci_read_config_dword(pci_gfx_root, 0xFC, &addr); pci_dev_put(pci_gfx_root); diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index f59abfa7622ac..0d49f168a919d 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -154,6 +154,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, return 0; err_free_mmio: + iounmap(hv->vram); vmbus_free_mmio(hv->mem->start, hv->fb_size); err_vmbus_close: vmbus_close(hdev->channel); @@ -172,6 +173,7 @@ static void hyperv_vmbus_remove(struct hv_device *hdev) vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); + iounmap(hv->vram); vmbus_free_mmio(hv->mem->start, hv->fb_size); } diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index c977b74f82f0b..82bf6c654de25 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -809,8 +809,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, /* select data lane width */ tmp = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans)); - tmp &= ~DDI_PORT_WIDTH_MASK; - tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count); + tmp &= ~TRANS_DDI_PORT_WIDTH_MASK; + tmp |= TRANS_DDI_PORT_WIDTH(intel_dsi->lane_count); /* select input pipe */ tmp &= ~TRANS_DDI_EDP_INPUT_MASK; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index acb986bc1f33a..ff2cf3daa7a2b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -658,7 +658,6 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); u32 ctl; if (DISPLAY_VER(dev_priv) >= 11) @@ -678,8 +677,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK); if (DISPLAY_VER(dev_priv) >= 12) { - if (!intel_dp_mst_is_master_trans(crtc_state) || - (!is_mst && intel_dp_is_uhbr(crtc_state))) { + if (!intel_dp_mst_is_master_trans(crtc_state)) { ctl &= ~(TGL_TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK); } @@ -868,7 +866,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, encoder->base.base.id, encoder->base.name); if (!mst_pipe_mask && dp128b132b_pipe_mask) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); /* * If we don't have 8b/10b MST, but have more than one @@ -880,7 +878,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, * we don't expect MST to have been enabled at that point, and * can assume it's SST. */ - if (hweight8(dp128b132b_pipe_mask) > 1 || intel_dp->is_mst) + if (hweight8(dp128b132b_pipe_mask) > 1 || + intel_dp_mst_encoder_active_links(dig_port)) mst_pipe_mask = dp128b132b_pipe_mask; } @@ -3134,7 +3133,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, intel_dp_set_power(intel_dp, DP_SET_POWER_D3); if (DISPLAY_VER(dev_priv) >= 12) { - if (is_mst) { + if (is_mst || intel_dp_is_uhbr(old_crtc_state)) { enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; intel_de_rmw(dev_priv, @@ -3487,7 +3486,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state, intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port), XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf); - buf_ctl |= DDI_PORT_WIDTH(lane_count); + buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count); if (DISPLAY_VER(dev_priv) >= 20) buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE; @@ -4153,13 +4152,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, } else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) { intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl); } else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); /* * If this is true, we know we're being called from mst stream * encoder's ->get_config(). */ - if (intel_dp->is_mst) + if (intel_dp_mst_encoder_active_links(dig_port)) intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl); else intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 4271da219b410..c9dcf2bbd4c73 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6628,12 +6628,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_plane_state *plane_state; struct intel_crtc_state *crtc_state; + struct intel_plane *plane; struct intel_crtc *crtc; u8 affected_pipes = 0; u8 modeset_pipes = 0; int i; + /* + * Any plane which is in use by the joiner needs its crtc. + * Pull those in first as this will not have happened yet + * if the plane remains disabled according to uapi. + */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + crtc = to_intel_crtc(plane_state->hw.crtc); + if (!crtc) + continue; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + /* Now pull in all joined crtcs */ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { affected_pipes |= crtc_state->joiner_pipes; if (intel_crtc_needs_modeset(crtc_state)) @@ -7812,9 +7830,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_program_dpkgc_latency(state); - if (state->modeset) - intel_set_cdclk_post_plane_update(state); - intel_wait_for_vblank_workers(state); /* FIXME: We should call drm_atomic_helper_commit_hw_done() here @@ -7888,6 +7903,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_verify_planes(state); intel_sagv_post_plane_update(state); + if (state->modeset) + intel_set_cdclk_post_plane_update(state); intel_pmdemand_post_plane_update(state); drm_atomic_helper_commit_hw_done(&state->base); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 8b1977cfec503..6696a32cdd3e6 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -1563,7 +1563,7 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp, if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n"); - return false; + goto out; } if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) && @@ -1575,6 +1575,19 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp, passed ? "passed" : "failed", crtc_state->port_clock, crtc_state->lane_count); +out: + /* + * Ensure that the training pattern does get set to TPS2 even in case + * of a failure, as is the case at the end of a passing link training + * and what is expected by the transcoder. Leaving TPS1 set (and + * disabling the link train mode in DP_TP_CTL later from TPS1 directly) + * would result in a stuck transcoder HW state and flip-done timeouts + * later in the modeset sequence. + */ + if (!passed) + intel_dp_program_link_training_pattern(intel_dp, crtc_state, + DP_PHY_DPRX, DP_TRAINING_PATTERN_2); + return passed; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index a65cf97ad12df..86d6185fda50a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -1867,7 +1867,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) /* create encoders */ mst_stream_encoders_create(dig_port); ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm, - &intel_dp->aux, 16, 3, conn_base_id); + &intel_dp->aux, 16, + INTEL_NUM_PIPES(display), conn_base_id); if (ret) { intel_dp->mst_mgr.cbs = NULL; return ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 21274aa9bdddc..c3dabb8579605 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -164,6 +164,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) * 4 - Support multiple fault handlers per object depending on object's * backing storage (a.k.a. MMAP_OFFSET). * + * 5 - Support multiple partial mmaps(mmap part of BO + unmap a offset, multiple + * times with different size and offset). + * * Restrictions: * * * snoopable objects cannot be accessed via the GTT. It can cause machine @@ -191,7 +194,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj) */ int i915_gem_mmap_gtt_version(void) { - return 4; + return 5; } static inline struct i915_gtt_view diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index cc05bd9e43b49..3fce5c0001444 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -3449,10 +3449,10 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce) */ ret = deregister_context(ce, ce->guc_id.id); if (ret) { - spin_lock(&ce->guc_state.lock); + spin_lock_irqsave(&ce->guc_state.lock, flags); set_context_registered(ce); clr_context_destroyed(ce); - spin_unlock(&ce->guc_state.lock); + spin_unlock_irqrestore(&ce->guc_state.lock, flags); /* * As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements * the wakeref immediately but per function spec usage call this after unlock. diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 765e6c0528fb0..786c727aea454 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3633,7 +3633,7 @@ enum skl_power_gate { #define DDI_BUF_IS_IDLE (1 << 7) #define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6) #define DDI_A_4_LANES (1 << 4) -#define DDI_PORT_WIDTH(width) (((width) - 1) << 1) +#define DDI_PORT_WIDTH(width) (((width) == 3 ? 4 : ((width) - 1)) << 1) #define DDI_PORT_WIDTH_MASK (7 << 1) #define DDI_PORT_WIDTH_SHIFT 1 #define DDI_INIT_DISPLAY_DETECTED (1 << 0) diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile index 9bc6a3884c223..3d9d4d40fb806 100644 --- a/drivers/gpu/drm/imagination/Makefile +++ b/drivers/gpu/drm/imagination/Makefile @@ -1,8 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only OR MIT # Copyright (c) 2023 Imagination Technologies Ltd. -subdir-ccflags-y := -I$(src) - powervr-y := \ pvr_ccb.o \ pvr_cccb.o \ diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c index c39beb70c3173..6d13864851fc2 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_meta.c +++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c @@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) static void pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) { - pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start, - fw_obj->fw_mm_node.size); + struct pvr_gem_object *pvr_obj = fw_obj->gem; + + pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj, + fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size); } static bool diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c index 73707daa4e52d..5dbb636d7d4ff 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_trace.c +++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c @@ -333,8 +333,8 @@ static int fw_trace_seq_show(struct seq_file *s, void *v) if (sf_id == ROGUE_FW_SF_LAST) return -EINVAL; - timestamp = read_fw_trace(trace_seq_data, 1) | - ((u64)read_fw_trace(trace_seq_data, 2) << 32); + timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) | + read_fw_trace(trace_seq_data, 2); timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >> ROGUE_FWT_TIMESTAMP_TIME_SHIFT; diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index c4f08432882b1..43411be930a21 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct dma_fence *f) return PVR_DRIVER_NAME; } +static void pvr_queue_fence_release_work(struct work_struct *w) +{ + struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work); + + pvr_context_put(fence->queue->ctx); + dma_fence_free(&fence->base); +} + static void pvr_queue_fence_release(struct dma_fence *f) { struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base); + struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev; - pvr_context_put(fence->queue->ctx); - dma_fence_free(f); + queue_work(pvr_dev->sched_wq, &fence->release_work); } static const char * @@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f, pvr_context_get(queue->ctx); fence->queue = queue; + INIT_WORK(&fence->release_work, pvr_queue_fence_release_work); dma_fence_init(&fence->base, fence_ops, &fence_ctx->lock, fence_ctx->id, atomic_inc_return(&fence_ctx->seqno)); @@ -304,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue) static void pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue) { - pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops, - &queue->job_fence_ctx); + if (!fence->ops) + pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops, + &queue->job_fence_ctx); } /** diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h index e06ced69302fc..93fe9ac9f58cc 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.h +++ b/drivers/gpu/drm/imagination/pvr_queue.h @@ -5,6 +5,7 @@ #define PVR_QUEUE_H #include +#include #include "pvr_cccb.h" #include "pvr_device.h" @@ -63,6 +64,9 @@ struct pvr_queue_fence { /** @queue: Queue that created this fence. */ struct pvr_queue *queue; + + /** @release_work: Fence release work structure. */ + struct work_struct release_work; }; /** diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 363f885a70982..2896fa7501b1c 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -293,8 +293,9 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op, static int pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op, - struct pvr_vm_context *vm_ctx, u64 device_addr, - u64 size) + struct pvr_vm_context *vm_ctx, + struct pvr_gem_object *pvr_obj, + u64 device_addr, u64 size) { int err; @@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op, goto err_bind_op_fini; } + bind_op->pvr_obj = pvr_obj; bind_op->vm_ctx = vm_ctx; bind_op->device_addr = device_addr; bind_op->size = size; @@ -597,20 +599,6 @@ pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context) return ERR_PTR(err); } -/** - * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. - * @vm_ctx: Target VM context. - * - * This function ensures that no mappings are left dangling by unmapping them - * all in order of ascending device-virtual address. - */ -void -pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) -{ - WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, - vm_ctx->gpuvm_mgr.mm_range)); -} - /** * pvr_vm_context_release() - Teardown a VM context. * @ref_count: Pointer to reference counter of the VM context. @@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec) struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv; struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; - /* Unmap operations don't have an object to lock. */ - if (!pvr_obj) - return 0; - - /* Acquire lock on the GEM being mapped. */ + /* Acquire lock on the GEM object being mapped/unmapped. */ return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj)); } @@ -772,8 +756,10 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, } /** - * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory. + * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual + * memory. * @vm_ctx: Target VM context. + * @pvr_obj: Target PowerVR memory object. * @device_addr: Virtual device address at the start of the target mapping. * @size: Size of the target mapping. * @@ -784,9 +770,13 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, * * Any error encountered while performing internal operations required to * destroy the mapping (returned from pvr_vm_gpuva_unmap or * pvr_vm_gpuva_remap). + * + * The vm_ctx->lock must be held when calling this function. */ -int -pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) +static int +pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx, + struct pvr_gem_object *pvr_obj, + u64 device_addr, u64 size) { struct pvr_vm_bind_op bind_op = {0}; struct drm_gpuvm_exec vm_exec = { @@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) }, }; - int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr, - size); + int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj, + device_addr, size); if (err) return err; + pvr_gem_object_get(pvr_obj); + err = drm_gpuvm_exec_lock(&vm_exec); if (err) goto err_cleanup; @@ -818,6 +810,96 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) return err; } +/** + * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual + * memory. + * @vm_ctx: Target VM context. + * @pvr_obj: Target PowerVR memory object. + * @device_addr: Virtual device address at the start of the target mapping. + * @size: Size of the target mapping. + * + * Return: + * * 0 on success, + * * Any error encountered by pvr_vm_unmap_obj_locked. + */ +int +pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, + u64 device_addr, u64 size) +{ + int err; + + mutex_lock(&vm_ctx->lock); + err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size); + mutex_unlock(&vm_ctx->lock); + + return err; +} + +/** + * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory. + * @vm_ctx: Target VM context. + * @device_addr: Virtual device address at the start of the target mapping. + * @size: Size of the target mapping. + * + * Return: + * * 0 on success, + * * Any error encountered by drm_gpuva_find, + * * Any error encountered by pvr_vm_unmap_obj_locked. + */ +int +pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) +{ + struct pvr_gem_object *pvr_obj; + struct drm_gpuva *va; + int err; + + mutex_lock(&vm_ctx->lock); + + va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size); + if (va) { + pvr_obj = gem_to_pvr_gem(va->gem.obj); + err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, + va->va.addr, va->va.range); + } else { + err = -ENOENT; + } + + mutex_unlock(&vm_ctx->lock); + + return err; +} + +/** + * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context. + * @vm_ctx: Target VM context. + * + * This function ensures that no mappings are left dangling by unmapping them + * all in order of ascending device-virtual address. + */ +void +pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx) +{ + mutex_lock(&vm_ctx->lock); + + for (;;) { + struct pvr_gem_object *pvr_obj; + struct drm_gpuva *va; + + va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, + vm_ctx->gpuvm_mgr.mm_start, + vm_ctx->gpuvm_mgr.mm_range); + if (!va) + break; + + pvr_obj = gem_to_pvr_gem(va->gem.obj); + + WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, + va->va.addr, va->va.range)); + } + + mutex_unlock(&vm_ctx->lock); +} + /* Static data areas are determined by firmware. */ static const struct drm_pvr_static_data_area static_data_areas[] = { { diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h index 79406243617c1..b0528dffa7f1b 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.h +++ b/drivers/gpu/drm/imagination/pvr_vm.h @@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev, int pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset, u64 device_addr, u64 size); +int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, + struct pvr_gem_object *pvr_obj, + u64 device_addr, u64 size); int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size); void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 65d38b25c0707..699b0dd34b18f 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -813,10 +813,10 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) } ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION); - DRM_INFO("Loaded GMU firmware v%u.%u.%u\n", - FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver), - FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver), - FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver)); + DRM_INFO_ONCE("Loaded GMU firmware v%u.%u.%u\n", + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver), + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver), + FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver)); return 0; } diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h index 421afacb72480..36cc9dbc00b5c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h @@ -297,7 +297,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = { { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, - .features = WB_SDM845_MASK, + .features = WB_SM8250_MASK, .format_list = wb2_formats_rgb, .num_formats = ARRAY_SIZE(wb2_formats_rgb), .clk_ctrl = DPU_CLK_CTRL_WB2, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h index 641023b102bf5..e8eacdb47967a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h @@ -304,7 +304,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = { { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, - .features = WB_SDM845_MASK, + .features = WB_SM8250_MASK, .format_list = wb2_formats_rgb, .num_formats = ARRAY_SIZE(wb2_formats_rgb), .clk_ctrl = DPU_CLK_CTRL_WB2, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h index 621a2140f675f..d761ed705bac3 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h @@ -116,14 +116,12 @@ static const struct dpu_lm_cfg sm6150_lm[] = { .sblk = &sdm845_lm_sblk, .pingpong = PINGPONG_0, .dspp = DSPP_0, - .lm_pair = LM_1, }, { .name = "lm_1", .id = LM_1, .base = 0x45000, .len = 0x320, .features = MIXER_QCM2290_MASK, .sblk = &sdm845_lm_sblk, .pingpong = PINGPONG_1, - .lm_pair = LM_0, }, { .name = "lm_2", .id = LM_2, .base = 0x46000, .len = 0x320, diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h index d039b96beb97c..76f60a2df7a89 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h +++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h @@ -144,7 +144,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = { { .name = "wb_2", .id = WB_2, .base = 0x65000, .len = 0x2c8, - .features = WB_SDM845_MASK, + .features = WB_SM8250_MASK, .format_list = wb2_formats_rgb, .num_formats = ARRAY_SIZE(wb2_formats_rgb), .clk_ctrl = DPU_CLK_CTRL_WB2, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 7191b1a6d41b3..e5dcd41a361f4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1228,8 +1228,6 @@ static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state done: kfree(states); return ret; - - return 0; } static int dpu_crtc_atomic_check(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 5172ab4dea995..48e6e8d74c855 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -2281,6 +2281,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) } } + if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither) + phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL); + /* reset the merge 3D HW block */ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c index 657200401f576..cec6d4e8baec4 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c @@ -52,6 +52,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, u32 slice_last_group_size; u32 det_thresh_flatness; bool is_cmd_mode = !(mode & DSC_MODE_VIDEO); + bool input_10_bits = dsc->bits_per_component == 10; DPU_REG_WRITE(c, DSC_COMMON_MODE, mode); @@ -68,7 +69,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, data |= (dsc->line_buf_depth << 3); data |= (dsc->simple_422 << 2); data |= (dsc->convert_rgb << 1); - data |= dsc->bits_per_component; + data |= input_10_bits; DPU_REG_WRITE(c, DSC_ENC, data); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c index ad19330de61ab..562a3f4c5238a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c @@ -272,7 +272,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops, if (cap & BIT(DPU_MDP_VSYNC_SEL)) ops->setup_vsync_source = dpu_hw_setup_vsync_sel; - else + else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED))) ops->setup_vsync_source = dpu_hw_setup_wd_timer; ops->get_safe_status = dpu_hw_get_safe_status; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c index 098abc2c0003c..af3e541f60c30 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c @@ -1164,7 +1164,6 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state, unsigned int num_planes) { unsigned int i; - int ret; for (i = 0; i < num_planes; i++) { struct drm_plane_state *plane_state = states[i]; @@ -1173,13 +1172,13 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state, !plane_state->visible) continue; - ret = dpu_plane_virtual_assign_resources(crtc, global_state, + int ret = dpu_plane_virtual_assign_resources(crtc, global_state, state, plane_state); if (ret) - break; + return ret; } - return ret; + return 0; } static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe) diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index 24dd37f1682bf..3898850739abb 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -930,16 +930,17 @@ enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge, return -EINVAL; } - if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) - return MODE_CLOCK_HIGH; - msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display); link_info = &msm_dp_display->panel->link_info; - if (drm_mode_is_420_only(&dp->connector->display_info, mode) && - msm_dp_display->panel->vsc_sdp_supported) + if ((drm_mode_is_420_only(&dp->connector->display_info, mode) && + msm_dp_display->panel->vsc_sdp_supported) || + msm_dp_wide_bus_available(dp)) mode_pclk_khz /= 2; + if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) + return MODE_CLOCK_HIGH; + mode_bpp = dp->connector->display_info.bpc * num_components; if (!mode_bpp) mode_bpp = default_bpp; diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index d3e241ea69416..16b7913d1eefa 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -257,7 +257,10 @@ static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge, return -EINVAL; } - if (mode->clock > DP_MAX_PIXEL_CLK_KHZ) + if (msm_dp_wide_bus_available(dp)) + mode_pclk_khz /= 2; + + if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) return MODE_CLOCK_HIGH; /* diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c index 031446c87daec..798168180c1ab 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c @@ -83,6 +83,9 @@ struct dsi_pll_7nm { /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */ spinlock_t postdiv_lock; + /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */ + spinlock_t pclk_mux_lock; + struct pll_7nm_cached_state cached_state; struct dsi_pll_7nm *slave; @@ -372,22 +375,41 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll) ndelay(250); } -static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll) +static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val) { + unsigned long flags; + + spin_lock_irqsave(&pll->postdiv_lock, flags); + writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); + spin_unlock_irqrestore(&pll->postdiv_lock, flags); +} + +static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask, + u32 val) +{ + unsigned long flags; u32 data; + spin_lock_irqsave(&pll->pclk_mux_lock, flags); data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + data &= ~mask; + data |= val & mask; + + writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + spin_unlock_irqrestore(&pll->pclk_mux_lock, flags); +} + +static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll) +{ + dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0); } static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll) { - u32 data; + u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL; writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3); - - data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1); } static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll) @@ -565,7 +587,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) { struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw); struct pll_7nm_cached_state *cached = &pll_7nm->cached_state; - void __iomem *phy_base = pll_7nm->phy->base; u32 val; int ret; @@ -574,13 +595,10 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) val |= cached->pll_out_div; writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE); - writel(cached->bit_clk_div | (cached->pix_clk_div << 4), - phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0); - - val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); - val &= ~0x3; - val |= cached->pll_mux; - writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + dsi_pll_cmn_clk_cfg0_write(pll_7nm, + DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) | + DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div)); + dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux); ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw, pll_7nm->vco_current_rate, @@ -599,7 +617,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy) static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy) { struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw); - void __iomem *base = phy->base; u32 data = 0x0; /* internal PLL */ DBG("DSI PLL%d", pll_7nm->phy->id); @@ -618,7 +635,8 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy) } /* set PLL src */ - writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1); + dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK, + DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data)); return 0; } @@ -733,7 +751,7 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide pll_by_2_bit, }), 2, 0, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, - 0, 1, 0, NULL); + 0, 1, 0, &pll_7nm->pclk_mux_lock); if (IS_ERR(hw)) { ret = PTR_ERR(hw); goto fail; @@ -778,6 +796,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy) pll_7nm_list[phy->id] = pll_7nm; spin_lock_init(&pll_7nm->postdiv_lock); + spin_lock_init(&pll_7nm->pclk_mux_lock); pll_7nm->phy = phy; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index fee31680a6d54..a650778552017 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -537,15 +537,12 @@ static inline int align_pitch(int width, int bpp) static inline unsigned long timeout_to_jiffies(const ktime_t *timeout) { ktime_t now = ktime_get(); - s64 remaining_jiffies; - if (ktime_compare(*timeout, now) < 0) { - remaining_jiffies = 0; - } else { - ktime_t rem = ktime_sub(*timeout, now); - remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ); - } + if (ktime_compare(*timeout, now) <= 0) + return 0; + ktime_t rem = ktime_sub(*timeout, now); + s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ); return clamp(remaining_jiffies, 1LL, (s64)INT_MAX); } diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml index d54b72f924493..35f7f40e405b7 100644 --- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml +++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml @@ -9,8 +9,15 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd"> - - + + + + + + + + + diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index ce840300578d8..1050a4617fc15 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -4,6 +4,7 @@ config DRM_NOUVEAU depends on DRM && PCI && MMU select IOMMU_API select FW_LOADER + select FW_CACHE if PM_SLEEP select DRM_CLIENT_SELECTION select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDMI_HELPER diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 8d5c9c74cbb90..eac0d1d2dbda2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -775,7 +775,6 @@ nouveau_connector_force(struct drm_connector *connector) if (!nv_encoder) { NV_ERROR(drm, "can't find encoder to force %s on!\n", connector->name); - connector->status = connector_status_disconnected; return; } diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index b4da82ddbb6b2..8ea98f06d39af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -590,6 +590,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); struct mm_struct *mm = svmm->notifier.mm; + struct folio *folio; struct page *page; unsigned long start = args->p.addr; unsigned long notifier_seq; @@ -616,12 +617,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, ret = -EINVAL; goto out; } + folio = page_folio(page); mutex_lock(&svmm->mutex); if (!mmu_interval_read_retry(¬ifier->notifier, notifier_seq)) break; mutex_unlock(&svmm->mutex); + + folio_unlock(folio); + folio_put(folio); } /* Map the page on the GPU. */ @@ -637,8 +642,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); mutex_unlock(&svmm->mutex); - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); out: mmu_interval_notifier_remove(¬ifier->notifier); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c index a6f410ba60bc9..d393bc540f862 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c @@ -75,7 +75,7 @@ gp10b_pmu_acr = { .bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons, }; -#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC) +#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin"); MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin"); MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin"); diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c index 45d09e6fa667f..7d68a8acfe2ea 100644 --- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c +++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c @@ -109,13 +109,13 @@ static int jadard_prepare(struct drm_panel *panel) if (jadard->desc->lp11_to_reset_delay_ms) msleep(jadard->desc->lp11_to_reset_delay_ms); - gpiod_set_value(jadard->reset, 1); + gpiod_set_value(jadard->reset, 0); msleep(5); - gpiod_set_value(jadard->reset, 0); + gpiod_set_value(jadard->reset, 1); msleep(10); - gpiod_set_value(jadard->reset, 1); + gpiod_set_value(jadard->reset, 0); msleep(130); ret = jadard->desc->init(jadard); @@ -1130,7 +1130,7 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi) dsi->format = desc->format; dsi->lanes = desc->lanes; - jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(jadard->reset)) { DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n"); return PTR_ERR(jadard->reset); diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 05c13102a8cb8..d22889fbfa9c8 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -static void r300_gpu_init(struct radeon_device *rdev) +/* rs400_gpu_init also calls this! */ +void r300_gpu_init(struct radeon_device *rdev) { uint32_t gb_tile_config, tmp; diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 1e00f6b99f94b..8f5e07834fcc6 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -165,6 +165,7 @@ void r200_set_safe_registers(struct radeon_device *rdev); */ extern int r300_init(struct radeon_device *rdev); extern void r300_fini(struct radeon_device *rdev); +extern void r300_gpu_init(struct radeon_device *rdev); extern int r300_suspend(struct radeon_device *rdev); extern int r300_resume(struct radeon_device *rdev); extern int r300_asic_reset(struct radeon_device *rdev, bool hard); diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index d6c18fd740ec6..13cd0a688a65c 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -256,8 +256,22 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) static void rs400_gpu_init(struct radeon_device *rdev) { - /* FIXME: is this correct ? */ - r420_pipes_init(rdev); + /* Earlier code was calling r420_pipes_init and then + * rs400_mc_wait_for_idle(rdev). The problem is that + * at least on my Mobility Radeon Xpress 200M RC410 card + * that ends up in this code path ends up num_gb_pipes == 3 + * while the card seems to have only one pipe. With the + * r420 pipe initialization method. + * + * Problems shown up as HyperZ glitches, see: + * https://bugs.freedesktop.org/show_bug.cgi?id=110897 + * + * Delegating initialization to r300 code seems to work + * and results in proper pipe numbers. The rs400 cards + * are said to be not r400, but r300 kind of cards. + */ + r300_gpu_init(rdev); + if (rs400_mc_wait_for_idle(rdev)) { pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS)); diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h index c75302ca3427c..f56e77e7f6d02 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h @@ -21,7 +21,7 @@ * */ -#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _GPU_SCHED_TRACE_H_ #include @@ -106,7 +106,7 @@ TRACE_EVENT(drm_sched_job_wait_dep, __entry->seqno) ); -#endif +#endif /* _GPU_SCHED_TRACE_H_ */ /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index c67e1f9067859..8706763af8fba 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -335,8 +335,6 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode bochs->xres, bochs->yres, bochs->bpp, bochs->yres_virtual); - bochs_hw_blank(bochs, false); - bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0); bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp); bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres); @@ -506,6 +504,9 @@ static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc, static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { + struct bochs_device *bochs = to_bochs_device(crtc->dev); + + bochs_hw_blank(bochs, false); } static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c index b20ac17057262..fa269d279e257 100644 --- a/drivers/gpu/drm/vkms/vkms_composer.c +++ b/drivers/gpu/drm/vkms/vkms_composer.c @@ -67,7 +67,7 @@ static u16 lerp_u16(u16 a, u16 b, s64 t) s64 delta = drm_fixp_mul(b_fp - a_fp, t); - return drm_fixp2int(a_fp + delta); + return drm_fixp2int_round(a_fp + delta); } static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value) diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index 2eb9633f163a7..2a2f250fa495d 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -194,8 +194,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, to_intel_plane(crtc->base.primary); struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); struct drm_framebuffer *fb; struct i915_vma *vma; @@ -241,14 +239,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); plane_config->vma = vma; - - /* - * Flip to the newly created mapping ASAP, so we can re-use the - * first part of GGTT for WOPCM, prevent flickering, and prevent - * the lookup of sysmem scratch pages. - */ - plane->check_plane(crtc_state, plane_state); - plane->async_flip(NULL, plane, crtc_state, plane_state, true); return; nofb: diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index d86219dedde2a..b732c89816dff 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -53,7 +53,6 @@ #define RING_CTL(base) XE_REG((base) + 0x3c) #define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ -#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ #define RING_START_UDW(base) XE_REG((base) + 0x48) diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 5d6fb79957b63..9f4f27d1ef4a9 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -380,9 +380,7 @@ int xe_gt_init_early(struct xe_gt *gt) if (err) return err; - xe_wa_process_gt(gt); xe_wa_process_oob(gt); - xe_tuning_process_gt(gt); xe_force_wake_init_gt(gt, gt_to_fw(gt)); spin_lock_init(>->global_invl_lock); @@ -474,6 +472,8 @@ static int all_fw_domain_init(struct xe_gt *gt) } xe_gt_mcr_set_implicit_defaults(gt); + xe_wa_process_gt(gt); + xe_tuning_process_gt(gt); xe_reg_sr_apply_mmio(>->reg_sr, gt); err = xe_gt_clock_init(gt); diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 50c8076b51585..72ad576fc18eb 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -1723,9 +1723,11 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, drm_printf(p, "\tg2h outstanding: %d\n", snapshot->g2h_outstanding); - if (snapshot->ctb) - xe_print_blob_ascii85(p, "CTB data", '\n', + if (snapshot->ctb) { + drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size); + xe_print_blob_ascii85(p, "[CTB].data", '\n', snapshot->ctb, 0, snapshot->ctb_size); + } } else { drm_puts(p, "CT disabled\n"); } diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c index 2baa4d95571fb..0ca3056d8bd3f 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.c +++ b/drivers/gpu/drm/xe/xe_guc_log.c @@ -208,10 +208,11 @@ void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_ drm_printf(p, "GuC timestamp: 0x%08llX [%llu]\n", snapshot->stamp, snapshot->stamp); drm_printf(p, "Log level: %u\n", snapshot->level); + drm_printf(p, "[LOG].length: 0x%zx\n", snapshot->size); remain = snapshot->size; for (i = 0; i < snapshot->num_chunks; i++) { size_t size = min(GUC_LOG_CHUNK_SIZE, remain); - const char *prefix = i ? NULL : "Log data"; + const char *prefix = i ? NULL : "[LOG].data"; char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0; xe_print_blob_ascii85(p, prefix, suffix, snapshot->copy[i], 0, size); diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index df7f130fb663f..b995d1d51aed0 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -6,6 +6,7 @@ #include "xe_guc_pc.h" #include +#include #include #include @@ -19,6 +20,7 @@ #include "xe_gt.h" #include "xe_gt_idle.h" #include "xe_gt_printk.h" +#include "xe_gt_throttle.h" #include "xe_gt_types.h" #include "xe_guc.h" #include "xe_guc_ct.h" @@ -49,6 +51,9 @@ #define LNL_MERT_FREQ_CAP 800 #define BMG_MERT_FREQ_CAP 2133 +#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */ +#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */ + /** * DOC: GuC Power Conservation (PC) * @@ -113,9 +118,10 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc) FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count)) static int wait_for_pc_state(struct xe_guc_pc *pc, - enum slpc_global_state state) + enum slpc_global_state state, + int timeout_ms) { - int timeout_us = 5000; /* rought 5ms, but no need for precision */ + int timeout_us = 1000 * timeout_ms; int slept, wait = 10; xe_device_assert_mem_access(pc_to_xe(pc)); @@ -164,7 +170,8 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc) }; int ret; - if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) + if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, + SLPC_RESET_TIMEOUT_MS)) return -EAGAIN; /* Blocking here to ensure the results are ready before reading them */ @@ -187,7 +194,8 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value) }; int ret; - if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) + if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, + SLPC_RESET_TIMEOUT_MS)) return -EAGAIN; ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); @@ -208,7 +216,8 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id) struct xe_guc_ct *ct = &pc_to_guc(pc)->ct; int ret; - if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) + if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, + SLPC_RESET_TIMEOUT_MS)) return -EAGAIN; ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); @@ -440,6 +449,15 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) return freq; } +static u32 get_cur_freq(struct xe_gt *gt) +{ + u32 freq; + + freq = xe_mmio_read32(>->mmio, RPNSWREQ); + freq = REG_FIELD_GET(REQ_RATIO_MASK, freq); + return decode_freq(freq); +} + /** * xe_guc_pc_get_cur_freq - Get Current requested frequency * @pc: The GuC PC @@ -463,10 +481,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) return -ETIMEDOUT; } - *freq = xe_mmio_read32(>->mmio, RPNSWREQ); - - *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq); - *freq = decode_freq(*freq); + *freq = get_cur_freq(gt); xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; @@ -1002,6 +1017,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) struct xe_gt *gt = pc_to_gt(pc); u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); unsigned int fw_ref; + ktime_t earlier; int ret; xe_gt_assert(gt, xe_device_uc_enabled(xe)); @@ -1026,14 +1042,25 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) memset(pc->bo->vmap.vaddr, 0, size); slpc_shared_data_write(pc, header.size, size); + earlier = ktime_get(); ret = pc_action_reset(pc); if (ret) goto out; - if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) { - xe_gt_err(gt, "GuC PC Start failed\n"); - ret = -EIO; - goto out; + if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, + SLPC_RESET_TIMEOUT_MS)) { + xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n", + xe_guc_pc_get_act_freq(pc), get_cur_freq(gt), + xe_gt_throttle_get_limit_reasons(gt)); + + if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, + SLPC_RESET_EXTENDED_TIMEOUT_MS)) { + xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n"); + goto out; + } + + xe_gt_warn(gt, "GuC PC excessive start time: %lldms", + ktime_ms_delta(ktime_get(), earlier)); } ret = pc_init_freqs(pc); diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index 913c74d6e2aeb..1a5fe4822a62e 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1246,9 +1246,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w) xe_pm_runtime_get(guc_to_xe(guc)); trace_xe_exec_queue_destroy(q); + release_guc_id(guc, q); if (xe_exec_queue_is_lr(q)) cancel_work_sync(&ge->lr_tdr); - release_guc_id(guc, q); + /* Confirm no work left behind accessing device structures */ + cancel_delayed_work_sync(&ge->sched.base.work_tdr); xe_sched_entity_fini(&ge->entity); xe_sched_fini(&ge->sched); diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c index 0898344678801..c3cc0fa105e84 100644 --- a/drivers/gpu/drm/xe/xe_hmm.c +++ b/drivers/gpu/drm/xe/xe_hmm.c @@ -19,11 +19,10 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end) return (end - start) >> PAGE_SHIFT; } -/* +/** * xe_mark_range_accessed() - mark a range is accessed, so core mm * have such information for memory eviction or write back to * hard disk - * * @range: the range to mark * @write: if write to this range, we mark pages in this range * as dirty @@ -43,15 +42,51 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write) } } -/* +static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st, + struct hmm_range *range, struct rw_semaphore *notifier_sem) +{ + unsigned long i, npages, hmm_pfn; + unsigned long num_chunks = 0; + int ret; + + /* HMM docs says this is needed. */ + ret = down_read_interruptible(notifier_sem); + if (ret) + return ret; + + if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) { + up_read(notifier_sem); + return -EAGAIN; + } + + npages = xe_npages_in_range(range->start, range->end); + for (i = 0; i < npages;) { + unsigned long len; + + hmm_pfn = range->hmm_pfns[i]; + xe_assert(xe, hmm_pfn & HMM_PFN_VALID); + + len = 1UL << hmm_pfn_to_map_order(hmm_pfn); + + /* If order > 0 the page may extend beyond range->start */ + len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1); + i += len; + num_chunks++; + } + up_read(notifier_sem); + + return sg_alloc_table(st, num_chunks, GFP_KERNEL); +} + +/** * xe_build_sg() - build a scatter gather table for all the physical pages/pfn * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table * and will be used to program GPU page table later. - * * @xe: the xe device who will access the dma-address in sg table * @range: the hmm range that we build the sg table from. range->hmm_pfns[] * has the pfn numbers of pages that back up this hmm address range. * @st: pointer to the sg table. + * @notifier_sem: The xe notifier lock. * @write: whether we write to this range. This decides dma map direction * for system pages. If write we map it bi-diretional; otherwise * DMA_TO_DEVICE @@ -78,43 +113,88 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write) * Returns 0 if successful; -ENOMEM if fails to allocate memory */ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range, - struct sg_table *st, bool write) + struct sg_table *st, + struct rw_semaphore *notifier_sem, + bool write) { + unsigned long npages = xe_npages_in_range(range->start, range->end); struct device *dev = xe->drm.dev; - struct page **pages; - u64 i, npages; - int ret; + struct scatterlist *sgl; + struct page *page; + unsigned long i, j; - npages = xe_npages_in_range(range->start, range->end); - pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL); - if (!pages) - return -ENOMEM; + lockdep_assert_held(notifier_sem); - for (i = 0; i < npages; i++) { - pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); - xe_assert(xe, !is_device_private_page(pages[i])); + i = 0; + for_each_sg(st->sgl, sgl, st->nents, j) { + unsigned long hmm_pfn, size; + + hmm_pfn = range->hmm_pfns[i]; + page = hmm_pfn_to_page(hmm_pfn); + xe_assert(xe, !is_device_private_page(page)); + + size = 1UL << hmm_pfn_to_map_order(hmm_pfn); + size -= page_to_pfn(page) & (size - 1); + i += size; + + if (unlikely(j == st->nents - 1)) { + xe_assert(xe, i >= npages); + if (i > npages) + size -= (i - npages); + + sg_mark_end(sgl); + } else { + xe_assert(xe, i < npages); + } + + sg_set_page(sgl, page, size << PAGE_SHIFT, 0); } - ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT, - xe_sg_segment_size(dev), GFP_KERNEL); - if (ret) - goto free_pages; + return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING); +} + +static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma) +{ + struct xe_userptr *userptr = &uvma->userptr; + struct xe_vm *vm = xe_vma_vm(&uvma->vma); - ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING); - if (ret) { - sg_free_table(st); - st = NULL; + lockdep_assert_held_write(&vm->lock); + lockdep_assert_held(&vm->userptr.notifier_lock); + + mutex_lock(&userptr->unmap_mutex); + xe_assert(vm->xe, !userptr->mapped); + userptr->mapped = true; + mutex_unlock(&userptr->unmap_mutex); +} + +void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma) +{ + struct xe_userptr *userptr = &uvma->userptr; + struct xe_vma *vma = &uvma->vma; + bool write = !xe_vma_read_only(vma); + struct xe_vm *vm = xe_vma_vm(vma); + struct xe_device *xe = vm->xe; + + if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) && + !lockdep_is_held_type(&vm->lock, 0) && + !(vma->gpuva.flags & XE_VMA_DESTROYED)) { + /* Don't unmap in exec critical section. */ + xe_vm_assert_held(vm); + /* Don't unmap while mapping the sg. */ + lockdep_assert_held(&vm->lock); } -free_pages: - kvfree(pages); - return ret; + mutex_lock(&userptr->unmap_mutex); + if (userptr->sg && userptr->mapped) + dma_unmap_sgtable(xe->drm.dev, userptr->sg, + write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0); + userptr->mapped = false; + mutex_unlock(&userptr->unmap_mutex); } -/* +/** * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr - * * @uvma: the userptr vma which hold the scatter gather table * * With function xe_userptr_populate_range, we allocate storage of @@ -124,16 +204,9 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range, void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma) { struct xe_userptr *userptr = &uvma->userptr; - struct xe_vma *vma = &uvma->vma; - bool write = !xe_vma_read_only(vma); - struct xe_vm *vm = xe_vma_vm(vma); - struct xe_device *xe = vm->xe; - struct device *dev = xe->drm.dev; - - xe_assert(xe, userptr->sg); - dma_unmap_sgtable(dev, userptr->sg, - write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0); + xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg); + xe_hmm_userptr_unmap(uvma); sg_free_table(userptr->sg); userptr->sg = NULL; } @@ -166,13 +239,20 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, { unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); - unsigned long *pfns, flags = HMM_PFN_REQ_FAULT; + unsigned long *pfns; struct xe_userptr *userptr; struct xe_vma *vma = &uvma->vma; u64 userptr_start = xe_vma_userptr(vma); u64 userptr_end = userptr_start + xe_vma_size(vma); struct xe_vm *vm = xe_vma_vm(vma); - struct hmm_range hmm_range; + struct hmm_range hmm_range = { + .pfn_flags_mask = 0, /* ignore pfns */ + .default_flags = HMM_PFN_REQ_FAULT, + .start = userptr_start, + .end = userptr_end, + .notifier = &uvma->userptr.notifier, + .dev_private_owner = vm->xe, + }; bool write = !xe_vma_read_only(vma); unsigned long notifier_seq; u64 npages; @@ -199,19 +279,14 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, return -ENOMEM; if (write) - flags |= HMM_PFN_REQ_WRITE; + hmm_range.default_flags |= HMM_PFN_REQ_WRITE; if (!mmget_not_zero(userptr->notifier.mm)) { ret = -EFAULT; goto free_pfns; } - hmm_range.default_flags = flags; hmm_range.hmm_pfns = pfns; - hmm_range.notifier = &userptr->notifier; - hmm_range.start = userptr_start; - hmm_range.end = userptr_end; - hmm_range.dev_private_owner = vm->xe; while (true) { hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier); @@ -238,16 +313,37 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, if (ret) goto free_pfns; - ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write); + ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock); if (ret) goto free_pfns; + ret = down_read_interruptible(&vm->userptr.notifier_lock); + if (ret) + goto free_st; + + if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) { + ret = -EAGAIN; + goto out_unlock; + } + + ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, + &vm->userptr.notifier_lock, write); + if (ret) + goto out_unlock; + xe_mark_range_accessed(&hmm_range, write); userptr->sg = &userptr->sgt; + xe_hmm_userptr_set_mapped(uvma); userptr->notifier_seq = hmm_range.notifier_seq; + up_read(&vm->userptr.notifier_lock); + kvfree(pfns); + return 0; +out_unlock: + up_read(&vm->userptr.notifier_lock); +free_st: + sg_free_table(&userptr->sgt); free_pfns: kvfree(pfns); return ret; } - diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h index 909dc2bdcd97e..0ea98d8e7bbc7 100644 --- a/drivers/gpu/drm/xe/xe_hmm.h +++ b/drivers/gpu/drm/xe/xe_hmm.h @@ -3,9 +3,16 @@ * Copyright © 2024 Intel Corporation */ +#ifndef _XE_HMM_H_ +#define _XE_HMM_H_ + #include struct xe_userptr_vma; int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked); + void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma); + +void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma); +#endif diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 32f5a67a917b5..08552ee3fb94b 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -757,19 +757,7 @@ int xe_irq_install(struct xe_device *xe) xe_irq_postinstall(xe); - err = devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe); - if (err) - goto free_irq_handler; - - return 0; - -free_irq_handler: - if (xe_device_has_msix(xe)) - xe_irq_msix_free(xe); - else - xe_irq_msi_free(xe); - - return err; + return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe); } static void xe_irq_msi_synchronize_irq(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index fa873f3d0a9d1..eb6cd91e1e226 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -1689,7 +1689,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format]; stream->sample = param->sample; - stream->periodic = param->period_exponent > 0; + stream->periodic = param->period_exponent >= 0; stream->period_exponent = param->period_exponent; stream->no_preempt = param->no_preempt; stream->wait_num_reports = param->wait_num_reports; @@ -1970,6 +1970,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f } param.xef = xef; + param.period_exponent = -1; ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, ¶m); if (ret) return ret; @@ -2024,7 +2025,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f goto err_exec_q; } - if (param.period_exponent > 0) { + if (param.period_exponent >= 0) { u64 oa_period, oa_freq_hz; /* Requesting samples from OAG buffer is a privileged operation */ diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index c9cc0c091dfdd..89fd2c043136e 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -267,6 +267,15 @@ int xe_pm_init_early(struct xe_device *xe) } ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */ +static u32 vram_threshold_value(struct xe_device *xe) +{ + /* FIXME: D3Cold temporarily disabled by default on BMG */ + if (xe->info.platform == XE_BATTLEMAGE) + return 0; + + return DEFAULT_VRAM_THRESHOLD; +} + /** * xe_pm_init - Initialize Xe Power Management * @xe: xe device instance @@ -277,6 +286,7 @@ ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */ */ int xe_pm_init(struct xe_device *xe) { + u32 vram_threshold; int err; /* For now suspend/resume is only allowed with GuC */ @@ -290,7 +300,8 @@ int xe_pm_init(struct xe_device *xe) if (err) return err; - err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD); + vram_threshold = vram_threshold_value(xe); + err = xe_pm_set_vram_threshold(xe, vram_threshold); if (err) return err; } diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index 1ddcc7e79a93e..dc24baa840924 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -28,6 +28,8 @@ struct xe_pt_dir { struct xe_pt pt; /** @children: Array of page-table child nodes */ struct xe_ptw *children[XE_PDES]; + /** @staging: Array of page-table staging nodes */ + struct xe_ptw *staging[XE_PDES]; }; #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) @@ -48,9 +50,10 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt) return container_of(pt, struct xe_pt_dir, pt); } -static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index) +static struct xe_pt * +xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index) { - return container_of(pt_dir->children[index], struct xe_pt, base); + return container_of(pt_dir->staging[index], struct xe_pt, base); } static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm, @@ -125,6 +128,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile, } pt->bo = bo; pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL; + pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL; if (vm->xef) xe_drm_client_add_bo(vm->xef->client, pt->bo); @@ -206,8 +210,8 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred) struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt); for (i = 0; i < XE_PDES; i++) { - if (xe_pt_entry(pt_dir, i)) - xe_pt_destroy(xe_pt_entry(pt_dir, i), flags, + if (xe_pt_entry_staging(pt_dir, i)) + xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags, deferred); } } @@ -376,8 +380,10 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent, /* Continue building a non-connected subtree. */ struct iosys_map *map = &parent->bo->vmap; - if (unlikely(xe_child)) + if (unlikely(xe_child)) { parent->base.children[offset] = &xe_child->base; + parent->base.staging[offset] = &xe_child->base; + } xe_pt_write(xe_walk->vm->xe, map, offset, pte); parent->num_live++; @@ -614,6 +620,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma, .ops = &xe_pt_stage_bind_ops, .shifts = xe_normal_pt_shifts, .max_level = XE_PT_HIGHEST_LEVEL, + .staging = true, }, .vm = xe_vma_vm(vma), .tile = tile, @@ -873,7 +880,7 @@ static void xe_pt_cancel_bind(struct xe_vma *vma, } } -static void xe_pt_commit_locks_assert(struct xe_vma *vma) +static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma) { struct xe_vm *vm = xe_vma_vm(vma); @@ -885,6 +892,16 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma) xe_vm_assert_held(vm); } +static void xe_pt_commit_locks_assert(struct xe_vma *vma) +{ + struct xe_vm *vm = xe_vma_vm(vma); + + xe_pt_commit_prepare_locks_assert(vma); + + if (xe_vma_is_userptr(vma)) + lockdep_assert_held_read(&vm->userptr.notifier_lock); +} + static void xe_pt_commit(struct xe_vma *vma, struct xe_vm_pgtable_update *entries, u32 num_entries, struct llist_head *deferred) @@ -895,13 +912,17 @@ static void xe_pt_commit(struct xe_vma *vma, for (i = 0; i < num_entries; i++) { struct xe_pt *pt = entries[i].pt; + struct xe_pt_dir *pt_dir; if (!pt->level) continue; + pt_dir = as_xe_pt_dir(pt); for (j = 0; j < entries[i].qwords; j++) { struct xe_pt *oldpte = entries[i].pt_entries[j].pt; + int j_ = j + entries[i].ofs; + pt_dir->children[j_] = pt_dir->staging[j_]; xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred); } } @@ -913,7 +934,7 @@ static void xe_pt_abort_bind(struct xe_vma *vma, { int i, j; - xe_pt_commit_locks_assert(vma); + xe_pt_commit_prepare_locks_assert(vma); for (i = num_entries - 1; i >= 0; --i) { struct xe_pt *pt = entries[i].pt; @@ -928,10 +949,10 @@ static void xe_pt_abort_bind(struct xe_vma *vma, pt_dir = as_xe_pt_dir(pt); for (j = 0; j < entries[i].qwords; j++) { u32 j_ = j + entries[i].ofs; - struct xe_pt *newpte = xe_pt_entry(pt_dir, j_); + struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_); struct xe_pt *oldpte = entries[i].pt_entries[j].pt; - pt_dir->children[j_] = oldpte ? &oldpte->base : 0; + pt_dir->staging[j_] = oldpte ? &oldpte->base : 0; xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL); } } @@ -943,7 +964,7 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma, { u32 i, j; - xe_pt_commit_locks_assert(vma); + xe_pt_commit_prepare_locks_assert(vma); for (i = 0; i < num_entries; i++) { struct xe_pt *pt = entries[i].pt; @@ -961,10 +982,10 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma, struct xe_pt *newpte = entries[i].pt_entries[j].pt; struct xe_pt *oldpte = NULL; - if (xe_pt_entry(pt_dir, j_)) - oldpte = xe_pt_entry(pt_dir, j_); + if (xe_pt_entry_staging(pt_dir, j_)) + oldpte = xe_pt_entry_staging(pt_dir, j_); - pt_dir->children[j_] = &newpte->base; + pt_dir->staging[j_] = &newpte->base; entries[i].pt_entries[j].pt = oldpte; } } @@ -1213,42 +1234,22 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma, return 0; uvma = to_userptr_vma(vma); - notifier_seq = uvma->userptr.notifier_seq; + if (xe_pt_userptr_inject_eagain(uvma)) + xe_vma_userptr_force_invalidate(uvma); - if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm)) - return 0; + notifier_seq = uvma->userptr.notifier_seq; if (!mmu_interval_read_retry(&uvma->userptr.notifier, - notifier_seq) && - !xe_pt_userptr_inject_eagain(uvma)) + notifier_seq)) return 0; - if (xe_vm_in_fault_mode(vm)) { + if (xe_vm_in_fault_mode(vm)) return -EAGAIN; - } else { - spin_lock(&vm->userptr.invalidated_lock); - list_move_tail(&uvma->userptr.invalidate_link, - &vm->userptr.invalidated); - spin_unlock(&vm->userptr.invalidated_lock); - - if (xe_vm_in_preempt_fence_mode(vm)) { - struct dma_resv_iter cursor; - struct dma_fence *fence; - long err; - - dma_resv_iter_begin(&cursor, xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP); - dma_resv_for_each_fence_unlocked(&cursor, fence) - dma_fence_enable_sw_signaling(fence); - dma_resv_iter_end(&cursor); - - err = dma_resv_wait_timeout(xe_vm_resv(vm), - DMA_RESV_USAGE_BOOKKEEP, - false, MAX_SCHEDULE_TIMEOUT); - XE_WARN_ON(err <= 0); - } - } + /* + * Just continue the operation since exec or rebind worker + * will take care of rebinding. + */ return 0; } @@ -1514,6 +1515,7 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma, .ops = &xe_pt_stage_unbind_ops, .shifts = xe_normal_pt_shifts, .max_level = XE_PT_HIGHEST_LEVEL, + .staging = true, }, .tile = tile, .modified_start = xe_vma_start(vma), @@ -1555,7 +1557,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma, { int i, j; - xe_pt_commit_locks_assert(vma); + xe_pt_commit_prepare_locks_assert(vma); for (i = num_entries - 1; i >= 0; --i) { struct xe_vm_pgtable_update *entry = &entries[i]; @@ -1568,7 +1570,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma, continue; for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) - pt_dir->children[j] = + pt_dir->staging[j] = entries[i].pt_entries[j - entry->ofs].pt ? &entries[i].pt_entries[j - entry->ofs].pt->base : NULL; } @@ -1581,7 +1583,7 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma, { int i, j; - xe_pt_commit_locks_assert(vma); + xe_pt_commit_prepare_locks_assert(vma); for (i = 0; i < num_entries; ++i) { struct xe_vm_pgtable_update *entry = &entries[i]; @@ -1595,8 +1597,8 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma, pt_dir = as_xe_pt_dir(pt); for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) { entry->pt_entries[j - entry->ofs].pt = - xe_pt_entry(pt_dir, j); - pt_dir->children[j] = NULL; + xe_pt_entry_staging(pt_dir, j); + pt_dir->staging[j] = NULL; } } } diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c index b8b3d2aea4923..be602a763ff32 100644 --- a/drivers/gpu/drm/xe/xe_pt_walk.c +++ b/drivers/gpu/drm/xe/xe_pt_walk.c @@ -74,7 +74,8 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level, u64 addr, u64 end, struct xe_pt_walk *walk) { pgoff_t offset = xe_pt_offset(addr, level, walk); - struct xe_ptw **entries = parent->children ? parent->children : NULL; + struct xe_ptw **entries = walk->staging ? (parent->staging ?: NULL) : + (parent->children ?: NULL); const struct xe_pt_walk_ops *ops = walk->ops; enum page_walk_action action; struct xe_ptw *child; diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h index 5ecc4d2f0f653..5c02c244f7de3 100644 --- a/drivers/gpu/drm/xe/xe_pt_walk.h +++ b/drivers/gpu/drm/xe/xe_pt_walk.h @@ -11,12 +11,14 @@ /** * struct xe_ptw - base class for driver pagetable subclassing. * @children: Pointer to an array of children if any. + * @staging: Pointer to an array of staging if any. * * Drivers could subclass this, and if it's a page-directory, typically * embed an array of xe_ptw pointers. */ struct xe_ptw { struct xe_ptw **children; + struct xe_ptw **staging; }; /** @@ -41,6 +43,8 @@ struct xe_pt_walk { * as shared pagetables. */ bool shared_pt_mode; + /** @staging: Walk staging PT structure */ + bool staging; }; /** diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 690330352d4cd..5956631c0d40a 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -579,51 +579,26 @@ static void preempt_rebind_work_func(struct work_struct *w) trace_xe_vm_rebind_worker_exit(vm); } -static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, - const struct mmu_notifier_range *range, - unsigned long cur_seq) +static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma) { - struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier); - struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr); + struct xe_userptr *userptr = &uvma->userptr; struct xe_vma *vma = &uvma->vma; - struct xe_vm *vm = xe_vma_vm(vma); struct dma_resv_iter cursor; struct dma_fence *fence; long err; - xe_assert(vm->xe, xe_vma_is_userptr(vma)); - trace_xe_vma_userptr_invalidate(vma); - - if (!mmu_notifier_range_blockable(range)) - return false; - - vm_dbg(&xe_vma_vm(vma)->xe->drm, - "NOTIFIER: addr=0x%016llx, range=0x%016llx", - xe_vma_start(vma), xe_vma_size(vma)); - - down_write(&vm->userptr.notifier_lock); - mmu_interval_set_seq(mni, cur_seq); - - /* No need to stop gpu access if the userptr is not yet bound. */ - if (!userptr->initial_bind) { - up_write(&vm->userptr.notifier_lock); - return true; - } - /* * Tell exec and rebind worker they need to repin and rebind this * userptr. */ if (!xe_vm_in_fault_mode(vm) && - !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) { + !(vma->gpuva.flags & XE_VMA_DESTROYED)) { spin_lock(&vm->userptr.invalidated_lock); list_move_tail(&userptr->invalidate_link, &vm->userptr.invalidated); spin_unlock(&vm->userptr.invalidated_lock); } - up_write(&vm->userptr.notifier_lock); - /* * Preempt fences turn into schedule disables, pipeline these. * Note that even in fault mode, we need to wait for binds and @@ -641,11 +616,37 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, false, MAX_SCHEDULE_TIMEOUT); XE_WARN_ON(err <= 0); - if (xe_vm_in_fault_mode(vm)) { + if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) { err = xe_vm_invalidate_vma(vma); XE_WARN_ON(err); } + xe_hmm_userptr_unmap(uvma); +} + +static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier); + struct xe_vma *vma = &uvma->vma; + struct xe_vm *vm = xe_vma_vm(vma); + + xe_assert(vm->xe, xe_vma_is_userptr(vma)); + trace_xe_vma_userptr_invalidate(vma); + + if (!mmu_notifier_range_blockable(range)) + return false; + + vm_dbg(&xe_vma_vm(vma)->xe->drm, + "NOTIFIER: addr=0x%016llx, range=0x%016llx", + xe_vma_start(vma), xe_vma_size(vma)); + + down_write(&vm->userptr.notifier_lock); + mmu_interval_set_seq(mni, cur_seq); + + __vma_userptr_invalidate(vm, uvma); + up_write(&vm->userptr.notifier_lock); trace_xe_vma_userptr_invalidate_complete(vma); return true; @@ -655,6 +656,34 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = { .invalidate = vma_userptr_invalidate, }; +#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) +/** + * xe_vma_userptr_force_invalidate() - force invalidate a userptr + * @uvma: The userptr vma to invalidate + * + * Perform a forced userptr invalidation for testing purposes. + */ +void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma) +{ + struct xe_vm *vm = xe_vma_vm(&uvma->vma); + + /* Protect against concurrent userptr pinning */ + lockdep_assert_held(&vm->lock); + /* Protect against concurrent notifiers */ + lockdep_assert_held(&vm->userptr.notifier_lock); + /* + * Protect against concurrent instances of this function and + * the critical exec sections + */ + xe_vm_assert_held(vm); + + if (!mmu_interval_read_retry(&uvma->userptr.notifier, + uvma->userptr.notifier_seq)) + uvma->userptr.notifier_seq -= 2; + __vma_userptr_invalidate(vm, uvma); +} +#endif + int xe_vm_userptr_pin(struct xe_vm *vm) { struct xe_userptr_vma *uvma, *next; @@ -666,20 +695,33 @@ int xe_vm_userptr_pin(struct xe_vm *vm) /* Collect invalidated userptrs */ spin_lock(&vm->userptr.invalidated_lock); + xe_assert(vm->xe, list_empty(&vm->userptr.repin_list)); list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, userptr.invalidate_link) { list_del_init(&uvma->userptr.invalidate_link); - list_move_tail(&uvma->userptr.repin_link, - &vm->userptr.repin_list); + list_add_tail(&uvma->userptr.repin_link, + &vm->userptr.repin_list); } spin_unlock(&vm->userptr.invalidated_lock); - /* Pin and move to temporary list */ + /* Pin and move to bind list */ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, userptr.repin_link) { err = xe_vma_userptr_pin_pages(uvma); if (err == -EFAULT) { list_del_init(&uvma->userptr.repin_link); + /* + * We might have already done the pin once already, but + * then had to retry before the re-bind happened, due + * some other condition in the caller, but in the + * meantime the userptr got dinged by the notifier such + * that we need to revalidate here, but this time we hit + * the EFAULT. In such a case make sure we remove + * ourselves from the rebind list to avoid going down in + * flames. + */ + if (!list_empty(&uvma->vma.combined_links.rebind)) + list_del_init(&uvma->vma.combined_links.rebind); /* Wait for pending binds */ xe_vm_lock(vm, false); @@ -690,10 +732,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm) err = xe_vm_invalidate_vma(&uvma->vma); xe_vm_unlock(vm); if (err) - return err; + break; } else { - if (err < 0) - return err; + if (err) + break; list_del_init(&uvma->userptr.repin_link); list_move_tail(&uvma->vma.combined_links.rebind, @@ -701,7 +743,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm) } } - return 0; + if (err) { + down_write(&vm->userptr.notifier_lock); + spin_lock(&vm->userptr.invalidated_lock); + list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, + userptr.repin_link) { + list_del_init(&uvma->userptr.repin_link); + list_move_tail(&uvma->userptr.invalidate_link, + &vm->userptr.invalidated); + } + spin_unlock(&vm->userptr.invalidated_lock); + up_write(&vm->userptr.notifier_lock); + } + return err; } /** @@ -987,6 +1041,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, INIT_LIST_HEAD(&userptr->invalidate_link); INIT_LIST_HEAD(&userptr->repin_link); vma->gpuva.gem.offset = bo_offset_or_userptr; + mutex_init(&userptr->unmap_mutex); err = mmu_interval_notifier_insert(&userptr->notifier, current->mm, @@ -1028,6 +1083,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma) * them anymore */ mmu_interval_notifier_remove(&userptr->notifier); + mutex_destroy(&userptr->unmap_mutex); xe_vm_put(vm); } else if (xe_vma_is_null(vma)) { xe_vm_put(vm); @@ -1066,6 +1122,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); spin_lock(&vm->userptr.invalidated_lock); + xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link)); list_del(&to_userptr_vma(vma)->userptr.invalidate_link); spin_unlock(&vm->userptr.invalidated_lock); } else if (!xe_vma_is_null(vma)) { @@ -1752,9 +1809,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) return -EINVAL; - if (XE_IOCTL_DBG(xe, args->extensions)) - return -EINVAL; - if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE) flags |= XE_VM_FLAG_SCRATCH_PAGE; if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) @@ -2260,8 +2314,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, break; } case DRM_GPUVA_OP_UNMAP: + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); + break; case DRM_GPUVA_OP_PREFETCH: - /* FIXME: Need to skip some prefetch ops */ + vma = gpuva_to_vma(op->base.prefetch.va); + + if (xe_vma_is_userptr(vma)) { + err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); + if (err) + return err; + } + xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask); break; default: diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index 23adb74428815..b882bfb31bd05 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -274,9 +274,17 @@ static inline void vm_dbg(const struct drm_device *dev, const char *format, ...) { /* noop */ } #endif -#endif struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm); void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap); void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p); void xe_vm_snapshot_free(struct xe_vm_snapshot *snap); + +#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) +void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma); +#else +static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma) +{ +} +#endif +#endif diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 7f9a303e51d89..a4b4091cfd0da 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -59,12 +59,16 @@ struct xe_userptr { struct sg_table *sg; /** @notifier_seq: notifier sequence number */ unsigned long notifier_seq; + /** @unmap_mutex: Mutex protecting dma-unmapping */ + struct mutex unmap_mutex; /** * @initial_bind: user pointer has been bound at least once. * write: vm->userptr.notifier_lock in read mode and vm->resv held. * read: vm->userptr.notifier_lock in write mode or vm->resv held. */ bool initial_bind; + /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */ + bool mapped; #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) u32 divisor; #endif @@ -227,8 +231,8 @@ struct xe_vm { * up for revalidation. Protected from access with the * @invalidated_lock. Removing items from the list * additionally requires @lock in write mode, and adding - * items to the list requires the @userptr.notifer_lock in - * write mode. + * items to the list requires either the @userptr.notifer_lock in + * write mode, OR @lock in write mode. */ struct list_head invalidated; } userptr; diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 49812a76b7edd..d900dd05c335c 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -378,6 +378,12 @@ static bool apple_is_non_apple_keyboard(struct hid_device *hdev) return false; } +static bool apple_is_omoton_kb066(struct hid_device *hdev) +{ + return hdev->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI && + strcmp(hdev->name, "Bluetooth Keyboard") == 0; +} + static inline void apple_setup_key_translation(struct input_dev *input, const struct apple_key_translation *table) { @@ -546,9 +552,6 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, } } - if (usage->hid == 0xc0301) /* Omoton KB066 quirk */ - code = KEY_F6; - if (usage->code != code) { input_event_with_scancode(input, usage->type, code, usage->hid, value); @@ -728,7 +731,7 @@ static int apple_input_configured(struct hid_device *hdev, { struct apple_sc *asc = hid_get_drvdata(hdev); - if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { + if (((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) || apple_is_omoton_kb066(hdev)) { hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); asc->quirks &= ~APPLE_HAS_FN; } diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c index 8deded1857254..c45e5aa569d25 100644 --- a/drivers/hid/hid-appleir.c +++ b/drivers/hid/hid-appleir.c @@ -188,7 +188,7 @@ static int appleir_raw_event(struct hid_device *hid, struct hid_report *report, static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 }; unsigned long flags; - if (len != 5) + if (len != 5 || !(hid->claimed & HID_CLAIMED_INPUT)) goto out; if (!memcmp(data, keydown, sizeof(keydown))) { diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c index 56e858066c3c3..afbd67aa97192 100644 --- a/drivers/hid/hid-corsair-void.c +++ b/drivers/hid/hid-corsair-void.c @@ -71,11 +71,9 @@ #include #include -#include #include #include #include -#include #include #include #include @@ -120,6 +118,12 @@ enum { CORSAIR_VOID_BATTERY_CHARGING = 5, }; +enum { + CORSAIR_VOID_ADD_BATTERY = 0, + CORSAIR_VOID_REMOVE_BATTERY = 1, + CORSAIR_VOID_UPDATE_BATTERY = 2, +}; + static enum power_supply_property corsair_void_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, @@ -155,12 +159,12 @@ struct corsair_void_drvdata { struct power_supply *battery; struct power_supply_desc battery_desc; - struct mutex battery_mutex; struct delayed_work delayed_status_work; struct delayed_work delayed_firmware_work; - struct work_struct battery_remove_work; - struct work_struct battery_add_work; + + unsigned long battery_work_flags; + struct work_struct battery_work; }; /* @@ -260,11 +264,9 @@ static void corsair_void_process_receiver(struct corsair_void_drvdata *drvdata, /* Inform power supply if battery values changed */ if (memcmp(&orig_battery_data, battery_data, sizeof(*battery_data))) { - scoped_guard(mutex, &drvdata->battery_mutex) { - if (drvdata->battery) { - power_supply_changed(drvdata->battery); - } - } + set_bit(CORSAIR_VOID_UPDATE_BATTERY, + &drvdata->battery_work_flags); + schedule_work(&drvdata->battery_work); } } @@ -536,29 +538,11 @@ static void corsair_void_firmware_work_handler(struct work_struct *work) } -static void corsair_void_battery_remove_work_handler(struct work_struct *work) -{ - struct corsair_void_drvdata *drvdata; - - drvdata = container_of(work, struct corsair_void_drvdata, - battery_remove_work); - scoped_guard(mutex, &drvdata->battery_mutex) { - if (drvdata->battery) { - power_supply_unregister(drvdata->battery); - drvdata->battery = NULL; - } - } -} - -static void corsair_void_battery_add_work_handler(struct work_struct *work) +static void corsair_void_add_battery(struct corsair_void_drvdata *drvdata) { - struct corsair_void_drvdata *drvdata; struct power_supply_config psy_cfg = {}; struct power_supply *new_supply; - drvdata = container_of(work, struct corsair_void_drvdata, - battery_add_work); - guard(mutex)(&drvdata->battery_mutex); if (drvdata->battery) return; @@ -583,16 +567,42 @@ static void corsair_void_battery_add_work_handler(struct work_struct *work) drvdata->battery = new_supply; } +static void corsair_void_battery_work_handler(struct work_struct *work) +{ + struct corsair_void_drvdata *drvdata = container_of(work, + struct corsair_void_drvdata, battery_work); + + bool add_battery = test_and_clear_bit(CORSAIR_VOID_ADD_BATTERY, + &drvdata->battery_work_flags); + bool remove_battery = test_and_clear_bit(CORSAIR_VOID_REMOVE_BATTERY, + &drvdata->battery_work_flags); + bool update_battery = test_and_clear_bit(CORSAIR_VOID_UPDATE_BATTERY, + &drvdata->battery_work_flags); + + if (add_battery && !remove_battery) { + corsair_void_add_battery(drvdata); + } else if (remove_battery && !add_battery && drvdata->battery) { + power_supply_unregister(drvdata->battery); + drvdata->battery = NULL; + } + + if (update_battery && drvdata->battery) + power_supply_changed(drvdata->battery); + +} + static void corsair_void_headset_connected(struct corsair_void_drvdata *drvdata) { - schedule_work(&drvdata->battery_add_work); + set_bit(CORSAIR_VOID_ADD_BATTERY, &drvdata->battery_work_flags); + schedule_work(&drvdata->battery_work); schedule_delayed_work(&drvdata->delayed_firmware_work, msecs_to_jiffies(100)); } static void corsair_void_headset_disconnected(struct corsair_void_drvdata *drvdata) { - schedule_work(&drvdata->battery_remove_work); + set_bit(CORSAIR_VOID_REMOVE_BATTERY, &drvdata->battery_work_flags); + schedule_work(&drvdata->battery_work); corsair_void_set_unknown_wireless_data(drvdata); corsair_void_set_unknown_batt(drvdata); @@ -678,13 +688,7 @@ static int corsair_void_probe(struct hid_device *hid_dev, drvdata->battery_desc.get_property = corsair_void_battery_get_property; drvdata->battery = NULL; - INIT_WORK(&drvdata->battery_remove_work, - corsair_void_battery_remove_work_handler); - INIT_WORK(&drvdata->battery_add_work, - corsair_void_battery_add_work_handler); - ret = devm_mutex_init(drvdata->dev, &drvdata->battery_mutex); - if (ret) - return ret; + INIT_WORK(&drvdata->battery_work, corsair_void_battery_work_handler); ret = sysfs_create_group(&hid_dev->dev.kobj, &corsair_void_attr_group); if (ret) @@ -721,8 +725,7 @@ static void corsair_void_remove(struct hid_device *hid_dev) struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev); hid_hw_stop(hid_dev); - cancel_work_sync(&drvdata->battery_remove_work); - cancel_work_sync(&drvdata->battery_add_work); + cancel_work_sync(&drvdata->battery_work); if (drvdata->battery) power_supply_unregister(drvdata->battery); diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 541d682af15aa..8433306148d57 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -3450,7 +3450,7 @@ static const char *keys[KEY_MAX + 1] = { [KEY_MACRO_RECORD_START] = "MacroRecordStart", [KEY_MACRO_RECORD_STOP] = "MacroRecordStop", [KEY_MARK_WAYPOINT] = "MarkWayPoint", [KEY_MEDIA_REPEAT] = "MediaRepeat", - [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messanger", + [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messenger", [KEY_NAV_CHART] = "NavChar", [KEY_NAV_INFO] = "NavInfo", [KEY_NEWS] = "News", [KEY_NEXT_ELEMENT] = "NextElement", [KEY_NEXT_FAVORITE] = "NextFavorite", [KEY_NOTIFICATION_CENTER] = "NotificationCenter", diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index 0f292b5d3e26d..eb6fd2dc75d0a 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -268,11 +268,13 @@ static void cbas_ec_remove(struct platform_device *pdev) mutex_unlock(&cbas_ec_reglock); } +#ifdef CONFIG_ACPI static const struct acpi_device_id cbas_ec_acpi_ids[] = { { "GOOG000B", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids); +#endif #ifdef CONFIG_OF static const struct of_device_id cbas_ec_of_match[] = { diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index 11ac246176ae1..839d5bcd72b1e 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -457,13 +457,13 @@ static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = { }; static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = { - { BTN_A, JC_BTN_A, }, - { BTN_B, JC_BTN_B, }, - { BTN_C, JC_BTN_R, }, - { BTN_X, JC_BTN_X, }, /* MD/GEN 6B Only */ - { BTN_Y, JC_BTN_Y, }, /* MD/GEN 6B Only */ - { BTN_Z, JC_BTN_L, }, /* MD/GEN 6B Only */ - { BTN_SELECT, JC_BTN_ZR, }, + { BTN_WEST, JC_BTN_A, }, /* A */ + { BTN_SOUTH, JC_BTN_B, }, /* B */ + { BTN_EAST, JC_BTN_R, }, /* C */ + { BTN_TL, JC_BTN_X, }, /* X MD/GEN 6B Only */ + { BTN_NORTH, JC_BTN_Y, }, /* Y MD/GEN 6B Only */ + { BTN_TR, JC_BTN_L, }, /* Z MD/GEN 6B Only */ + { BTN_SELECT, JC_BTN_ZR, }, /* Mode */ { BTN_START, JC_BTN_PLUS, }, { BTN_MODE, JC_BTN_HOME, }, { BTN_Z, JC_BTN_CAP, }, diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c index c9e65e9088b31..10460b7bde1a2 100644 --- a/drivers/hid/hid-steam.c +++ b/drivers/hid/hid-steam.c @@ -1327,11 +1327,11 @@ static void steam_remove(struct hid_device *hdev) return; } + hid_destroy_device(steam->client_hdev); cancel_delayed_work_sync(&steam->mode_switch); cancel_work_sync(&steam->work_connect); cancel_work_sync(&steam->rumble_work); cancel_work_sync(&steam->unregister_work); - hid_destroy_device(steam->client_hdev); steam->client_hdev = NULL; steam->client_opened = 0; if (steam->quirks & STEAM_QUIRK_WIRELESS) { diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 75544448c2393..d3912e3f2f13a 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -290,7 +290,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid, ihid->rawbuf, recv_len + sizeof(__le16)); if (error) { dev_err(&ihid->client->dev, - "failed to set a report to device: %d\n", error); + "failed to get a report from device: %d\n", error); return error; } diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c index cb04cd1d980bd..6550ad5bfbb53 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c @@ -832,9 +832,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device) hid_ishtp_cl); dev_dbg(ishtp_device(cl_device), "%s\n", __func__); - hid_ishtp_cl_deinit(hid_ishtp_cl); ishtp_put_device(cl_device); ishtp_hid_remove(client_data); + hid_ishtp_cl_deinit(hid_ishtp_cl); hid_ishtp_cl = NULL; diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c index 00c6f0ebf3563..be2c62fc8251d 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid.c @@ -261,12 +261,14 @@ int ishtp_hid_probe(unsigned int cur_hid_dev, */ void ishtp_hid_remove(struct ishtp_cl_data *client_data) { + void *data; int i; for (i = 0; i < client_data->num_hid_devices; ++i) { if (client_data->hid_sensor_hubs[i]) { - kfree(client_data->hid_sensor_hubs[i]->driver_data); + data = client_data->hid_sensor_hubs[i]->driver_data; hid_destroy_device(client_data->hid_sensor_hubs[i]); + kfree(data); client_data->hid_sensor_hubs[i] = NULL; } } diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c index 4641e818dfa44..6b2c7620be2b1 100644 --- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c +++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c @@ -909,6 +909,8 @@ static int quickspi_restore(struct device *device) thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE); + qsdev->state = QUICKSPI_ENABLED; + return 0; } diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c index 7373238ceb18b..918050af73e55 100644 --- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c +++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c @@ -107,7 +107,7 @@ static int quickspi_get_device_descriptor(struct quickspi_device *qsdev) return 0; } - dev_err_once(qsdev->dev, "Unexpected intput report type: %d\n", input_rep_type); + dev_err_once(qsdev->dev, "Unexpected input report type: %d\n", input_rep_type); return -EINVAL; } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 0f6cd44fff292..6e55a1a2613d3 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2262,12 +2262,25 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size) struct resource *iter; mutex_lock(&hyperv_mmio_lock); + + /* + * If all bytes of the MMIO range to be released are within the + * special case fb_mmio shadow region, skip releasing the shadow + * region since no corresponding __request_region() was done + * in vmbus_allocate_mmio(). + */ + if (fb_mmio && start >= fb_mmio->start && + (start + size - 1 <= fb_mmio->end)) + goto skip_shadow_release; + for (iter = hyperv_mmio; iter; iter = iter->sibling) { if ((iter->start >= start + size) || (iter->end <= start)) continue; __release_region(iter, start, size); } + +skip_shadow_release: release_mem_region(start, size); mutex_unlock(&hyperv_mmio_lock); diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index 7802bbf5f9587..59424103f6348 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c @@ -22,11 +22,13 @@ */ #define AD7314_TEMP_MASK 0x7FE0 #define AD7314_TEMP_SHIFT 5 +#define AD7314_LEADING_ZEROS_MASK BIT(15) /* * ADT7301 and ADT7302 temperature masks */ #define ADT7301_TEMP_MASK 0x3FFF +#define ADT7301_LEADING_ZEROS_MASK (BIT(15) | BIT(14)) enum ad7314_variant { adt7301, @@ -65,12 +67,20 @@ static ssize_t ad7314_temperature_show(struct device *dev, return ret; switch (spi_get_device_id(chip->spi_dev)->driver_data) { case ad7314: + if (ret & AD7314_LEADING_ZEROS_MASK) { + /* Invalid read-out, leading zero part is missing */ + return -EIO; + } data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT; data = sign_extend32(data, 9); return sprintf(buf, "%d\n", 250 * data); case adt7301: case adt7302: + if (ret & ADT7301_LEADING_ZEROS_MASK) { + /* Invalid read-out, leading zero part is missing */ + return -EIO; + } /* * Documented as a 13 bit twos complement register * with a sign bit - which is a 14 bit 2's complement diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c index fa3351351825b..79bc67ffb9986 100644 --- a/drivers/hwmon/nct6775-core.c +++ b/drivers/hwmon/nct6775-core.c @@ -273,8 +273,8 @@ static const s8 NCT6776_BEEP_BITS[NUM_BEEP_BITS] = { static const u16 NCT6776_REG_TOLERANCE_H[] = { 0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c }; -static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 }; -static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 }; +static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0, 0 }; +static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0, 0 }; static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index b5352900463fb..0d29c8f97ba7c 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -181,40 +181,40 @@ static const struct ntc_compensation ncpXXwf104[] = { }; static const struct ntc_compensation ncpXXxh103[] = { - { .temp_c = -40, .ohm = 247565 }, - { .temp_c = -35, .ohm = 181742 }, - { .temp_c = -30, .ohm = 135128 }, - { .temp_c = -25, .ohm = 101678 }, - { .temp_c = -20, .ohm = 77373 }, - { .temp_c = -15, .ohm = 59504 }, - { .temp_c = -10, .ohm = 46222 }, - { .temp_c = -5, .ohm = 36244 }, - { .temp_c = 0, .ohm = 28674 }, - { .temp_c = 5, .ohm = 22878 }, - { .temp_c = 10, .ohm = 18399 }, - { .temp_c = 15, .ohm = 14910 }, - { .temp_c = 20, .ohm = 12169 }, + { .temp_c = -40, .ohm = 195652 }, + { .temp_c = -35, .ohm = 148171 }, + { .temp_c = -30, .ohm = 113347 }, + { .temp_c = -25, .ohm = 87559 }, + { .temp_c = -20, .ohm = 68237 }, + { .temp_c = -15, .ohm = 53650 }, + { .temp_c = -10, .ohm = 42506 }, + { .temp_c = -5, .ohm = 33892 }, + { .temp_c = 0, .ohm = 27219 }, + { .temp_c = 5, .ohm = 22021 }, + { .temp_c = 10, .ohm = 17926 }, + { .temp_c = 15, .ohm = 14674 }, + { .temp_c = 20, .ohm = 12081 }, { .temp_c = 25, .ohm = 10000 }, - { .temp_c = 30, .ohm = 8271 }, - { .temp_c = 35, .ohm = 6883 }, - { .temp_c = 40, .ohm = 5762 }, - { .temp_c = 45, .ohm = 4851 }, - { .temp_c = 50, .ohm = 4105 }, - { .temp_c = 55, .ohm = 3492 }, - { .temp_c = 60, .ohm = 2985 }, - { .temp_c = 65, .ohm = 2563 }, - { .temp_c = 70, .ohm = 2211 }, - { .temp_c = 75, .ohm = 1915 }, - { .temp_c = 80, .ohm = 1666 }, - { .temp_c = 85, .ohm = 1454 }, - { .temp_c = 90, .ohm = 1275 }, - { .temp_c = 95, .ohm = 1121 }, - { .temp_c = 100, .ohm = 990 }, - { .temp_c = 105, .ohm = 876 }, - { .temp_c = 110, .ohm = 779 }, - { .temp_c = 115, .ohm = 694 }, - { .temp_c = 120, .ohm = 620 }, - { .temp_c = 125, .ohm = 556 }, + { .temp_c = 30, .ohm = 8315 }, + { .temp_c = 35, .ohm = 6948 }, + { .temp_c = 40, .ohm = 5834 }, + { .temp_c = 45, .ohm = 4917 }, + { .temp_c = 50, .ohm = 4161 }, + { .temp_c = 55, .ohm = 3535 }, + { .temp_c = 60, .ohm = 3014 }, + { .temp_c = 65, .ohm = 2586 }, + { .temp_c = 70, .ohm = 2228 }, + { .temp_c = 75, .ohm = 1925 }, + { .temp_c = 80, .ohm = 1669 }, + { .temp_c = 85, .ohm = 1452 }, + { .temp_c = 90, .ohm = 1268 }, + { .temp_c = 95, .ohm = 1110 }, + { .temp_c = 100, .ohm = 974 }, + { .temp_c = 105, .ohm = 858 }, + { .temp_c = 110, .ohm = 758 }, + { .temp_c = 115, .ohm = 672 }, + { .temp_c = 120, .ohm = 596 }, + { .temp_c = 125, .ohm = 531 }, }; /* diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c index d6762259dd69c..fbe82d9852e01 100644 --- a/drivers/hwmon/peci/dimmtemp.c +++ b/drivers/hwmon/peci/dimmtemp.c @@ -127,8 +127,6 @@ static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no) return 0; ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data); - if (ret == -ENODATA) /* Use default or previous value */ - return 0; if (ret) return ret; @@ -509,11 +507,11 @@ read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, ®_val); if (ret || !(reg_val & BIT(31))) - return -ENODATA; /* Use default or previous value */ + return -ENODATA; ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, ®_val); if (ret) - return -ENODATA; /* Use default or previous value */ + return -ENODATA; /* * Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0 @@ -546,11 +544,11 @@ read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, ®_val); if (ret || !(reg_val & BIT(31))) - return -ENODATA; /* Use default or previous value */ + return -ENODATA; ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, ®_val); if (ret) - return -ENODATA; /* Use default or previous value */ + return -ENODATA; /* * Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0 diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c index 77cf268e7d2d6..920cd5408141a 100644 --- a/drivers/hwmon/pmbus/pmbus.c +++ b/drivers/hwmon/pmbus/pmbus.c @@ -103,6 +103,8 @@ static int pmbus_identify(struct i2c_client *client, if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) { int page; + info->pages = PMBUS_PAGES; + for (page = 1; page < PMBUS_PAGES; page++) { if (pmbus_set_page(client, page, 0xff) < 0) break; diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index 1e3bd129a922d..7087197383c96 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c @@ -706,7 +706,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev) goto out; } - if (!ctx->pcc_comm_addr) { + if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) { dev_err(&pdev->dev, "Failed to ioremap PCC comm region\n"); rc = -ENOMEM; diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index 66123d684ac9e..bf99d79a41920 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -105,23 +105,32 @@ struct msc_iter { /** * struct msc - MSC device representation - * @reg_base: register window base address + * @reg_base: register window base address for the entire MSU + * @msu_base: register window base address for this MSC * @thdev: intel_th_device pointer * @mbuf: MSU buffer, if assigned - * @mbuf_priv MSU buffer's private data, if @mbuf + * @mbuf_priv: MSU buffer's private data, if @mbuf + * @work: a work to stop the trace when the buffer is full * @win_list: list of windows in multiblock mode * @single_sgt: single mode buffer * @cur_win: current window + * @switch_on_unlock: window to switch to when it becomes available * @nr_pages: total number of pages allocated for this buffer * @single_sz: amount of data in single mode * @single_wrap: single mode wrap occurred * @base: buffer's base pointer * @base_addr: buffer's base address + * @orig_addr: MSC0 buffer's base address + * @orig_sz: MSC0 buffer's size * @user_count: number of users of the buffer * @mmap_count: number of mappings * @buf_mutex: mutex to serialize access to buffer-related bits + * @iter_list: list of open file descriptor iterators + * @stop_on_full: stop the trace if the current window is full * @enabled: MSC is enabled * @wrap: wrapping is enabled + * @do_irq: IRQ resource is available, handle interrupts + * @multi_is_broken: multiblock mode enabled (not disabled by PCI drvdata) * @mode: MSC operating mode * @burst_len: write burst length * @index: number of this MSC in the MSU diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index e9d8d28e055f3..e3def163d5cf7 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -334,6 +334,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Arrow Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7724), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Panther Lake-H */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe324), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Panther Lake-P/U */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe424), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Alder Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f), diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c index 544c94e86b896..1eac358380405 100644 --- a/drivers/i2c/busses/i2c-ali1535.c +++ b/drivers/i2c/busses/i2c-ali1535.c @@ -485,6 +485,8 @@ MODULE_DEVICE_TABLE(pci, ali1535_ids); static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int ret; + if (ali1535_setup(dev)) { dev_warn(&dev->dev, "ALI1535 not detected, module not inserted.\n"); @@ -496,7 +498,15 @@ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id) snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name), "SMBus ALI1535 adapter at %04x", ali1535_offset); - return i2c_add_adapter(&ali1535_adapter); + ret = i2c_add_adapter(&ali1535_adapter); + if (ret) + goto release_region; + + return 0; + +release_region: + release_region(ali1535_smba, ALI1535_SMB_IOSIZE); + return ret; } static void ali1535_remove(struct pci_dev *dev) diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c index 4761c72081022..418d11266671e 100644 --- a/drivers/i2c/busses/i2c-ali15x3.c +++ b/drivers/i2c/busses/i2c-ali15x3.c @@ -472,6 +472,8 @@ MODULE_DEVICE_TABLE (pci, ali15x3_ids); static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int ret; + if (ali15x3_setup(dev)) { dev_err(&dev->dev, "ALI15X3 not detected, module not inserted.\n"); @@ -483,7 +485,15 @@ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id) snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name), "SMBus ALI15X3 adapter at %04x", ali15x3_smba); - return i2c_add_adapter(&ali15x3_adapter); + ret = i2c_add_adapter(&ali15x3_adapter); + if (ret) + goto release_region; + + return 0; + +release_region: + release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE); + return ret; } static void ali15x3_remove(struct pci_dev *dev) diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c index 7512614bf4b73..93ebec162c6dd 100644 --- a/drivers/i2c/busses/i2c-amd-asf-plat.c +++ b/drivers/i2c/busses/i2c-amd-asf-plat.c @@ -293,6 +293,7 @@ static irqreturn_t amd_asf_irq_handler(int irq, void *ptr) amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true); } + iowrite32(irq, dev->eoi_base); return IRQ_HANDLED; } diff --git a/drivers/i2c/busses/i2c-ls2x.c b/drivers/i2c/busses/i2c-ls2x.c index 8821cac3897b6..b475dd27b7af9 100644 --- a/drivers/i2c/busses/i2c-ls2x.c +++ b/drivers/i2c/busses/i2c-ls2x.c @@ -10,6 +10,7 @@ * Rewritten for mainline by Binbin Zhou */ +#include #include #include #include @@ -26,7 +27,8 @@ #include /* I2C Registers */ -#define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */ +#define I2C_LS2X_PRER_LO 0x0 /* Freq Division Low Byte Register */ +#define I2C_LS2X_PRER_HI 0x1 /* Freq Division High Byte Register */ #define I2C_LS2X_CTR 0x2 /* Control Register */ #define I2C_LS2X_TXR 0x3 /* Transport Data Register */ #define I2C_LS2X_RXR 0x3 /* Receive Data Register */ @@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id) */ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv) { + u16 val; struct i2c_timings *t = &priv->i2c_t; struct device *dev = priv->adapter.dev.parent; u32 acpi_speed = i2c_acpi_find_bus_speed(dev); @@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv) else t->bus_freq_hz = LS2X_I2C_FREQ_STD; - /* Calculate and set i2c frequency. */ - writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1, - priv->base + I2C_LS2X_PRER); + /* + * According to the chip manual, we can only access the registers as bytes, + * otherwise the high bits will be truncated. + * So set the I2C frequency with a sequential writeb() instead of writew(). + */ + val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1; + writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO); + writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI); } static void ls2x_i2c_init(struct ls2x_i2c_priv *priv) diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 3ca08b8ef8af3..de713b5747fe5 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2554,6 +2554,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev) if (irq < 0) return irq; + /* + * Disable the interrupt to avoid the interrupt handler being triggered + * incorrectly by the asynchronous interrupt status since the machine + * might do a warm reset during the last smbus/i2c transfer session. + */ + npcm_i2c_int_enable(bus, false); + ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0, dev_name(bus->dev), bus); if (ret) diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 92faf03d64cfb..f18c3e74b0762 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1048,23 +1048,6 @@ static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes, return 0; } -static irqreturn_t -omap_i2c_isr(int irq, void *dev_id) -{ - struct omap_i2c_dev *omap = dev_id; - irqreturn_t ret = IRQ_HANDLED; - u16 mask; - u16 stat; - - stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); - mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK; - - if (stat & mask) - ret = IRQ_WAKE_THREAD; - - return ret; -} - static int omap_i2c_xfer_data(struct omap_i2c_dev *omap) { u16 bits; @@ -1095,8 +1078,13 @@ static int omap_i2c_xfer_data(struct omap_i2c_dev *omap) } if (stat & OMAP_I2C_STAT_NACK) { - err |= OMAP_I2C_STAT_NACK; + omap->cmd_err |= OMAP_I2C_STAT_NACK; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK); + + if (!(stat & ~OMAP_I2C_STAT_NACK)) { + err = -EAGAIN; + break; + } } if (stat & OMAP_I2C_STAT_AL) { @@ -1472,7 +1460,7 @@ omap_i2c_probe(struct platform_device *pdev) IRQF_NO_SUSPEND, pdev->name, omap); else r = devm_request_threaded_irq(&pdev->dev, omap->irq, - omap_i2c_isr, omap_i2c_isr_thread, + NULL, omap_i2c_isr_thread, IRQF_NO_SUSPEND | IRQF_ONESHOT, pdev->name, omap); diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c index 3505cf29cedda..a19c3d251804d 100644 --- a/drivers/i2c/busses/i2c-sis630.c +++ b/drivers/i2c/busses/i2c-sis630.c @@ -509,6 +509,8 @@ MODULE_DEVICE_TABLE(pci, sis630_ids); static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int ret; + if (sis630_setup(dev)) { dev_err(&dev->dev, "SIS630 compatible bus not detected, " @@ -522,7 +524,15 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) snprintf(sis630_adapter.name, sizeof(sis630_adapter.name), "SMBus SIS630 adapter at %04x", smbus_base + SMB_STS); - return i2c_add_adapter(&sis630_adapter); + ret = i2c_add_adapter(&sis630_adapter); + if (ret) + goto release_region; + + return 0; + +release_region: + release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION); + return ret; } static void sis630_remove(struct pci_dev *dev) diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 35a221e2c11c1..7ad1ad5c8c3f5 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -2506,7 +2506,7 @@ static int i2c_detect_address(struct i2c_client *temp_client, static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) { const unsigned short *address_list; - struct i2c_client temp_client; + struct i2c_client *temp_client; int i, err = 0; address_list = driver->address_list; @@ -2527,19 +2527,24 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) return 0; /* Set up a temporary client to help detect callback */ - memset(&temp_client, 0, sizeof(temp_client)); - temp_client.adapter = adapter; + temp_client = kzalloc(sizeof(*temp_client), GFP_KERNEL); + if (!temp_client) + return -ENOMEM; + + temp_client->adapter = adapter; for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) { dev_dbg(&adapter->dev, "found normal entry for adapter %d, addr 0x%02x\n", i2c_adapter_id(adapter), address_list[i]); - temp_client.addr = address_list[i]; - err = i2c_detect_address(&temp_client, driver); + temp_client->addr = address_list[i]; + err = i2c_detect_address(temp_client, driver); if (unlikely(err)) break; } + kfree(temp_client); + return err; } diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 118fe1d37c226..0fdb1d1316c44 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #define INTEL_IDLE_VERSION "0.5.1" @@ -1799,6 +1800,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) if (intel_idle_state_needs_timer_stop(state)) state->flags |= CPUIDLE_FLAG_TIMER_STOP; + if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle"); + state->enter = intel_idle; state->enter_s2idle = intel_idle_s2idle; } diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index e96a5ae92375d..cfaf8f7e0a07d 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -1084,7 +1084,7 @@ static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned lon conf &= ~AD7192_CONF_CHAN_MASK; for_each_set_bit(i, scan_mask, 8) - conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, i); + conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, BIT(i)); ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf); if (ret < 0) diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c index d8e3c7a43678c..d39354afd5394 100644 --- a/drivers/iio/adc/ad7606.c +++ b/drivers/iio/adc/ad7606.c @@ -1047,7 +1047,7 @@ static int ad7606_read_avail(struct iio_dev *indio_dev, cs = &st->chan_scales[ch]; *vals = (int *)cs->scale_avail; - *length = cs->num_scales; + *length = cs->num_scales * 2; *type = IIO_VAL_INT_PLUS_MICRO; return IIO_AVAIL_LIST; diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 8e5aaf15a9215..c3a1dea2aa82e 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -329,7 +329,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = { #define AT91_HWFIFO_MAX_SIZE_STR "128" #define AT91_HWFIFO_MAX_SIZE 128 -#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \ +#define AT91_SAMA_CHAN_SINGLE(index, num, addr, rbits) \ { \ .type = IIO_VOLTAGE, \ .channel = num, \ @@ -337,7 +337,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .scan_index = index, \ .scan_type = { \ .sign = 'u', \ - .realbits = 14, \ + .realbits = rbits, \ .storagebits = 16, \ }, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ @@ -350,7 +350,13 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .indexed = 1, \ } -#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \ +#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \ + AT91_SAMA_CHAN_SINGLE(index, num, addr, 14) + +#define AT91_SAMA7G5_CHAN_SINGLE(index, num, addr) \ + AT91_SAMA_CHAN_SINGLE(index, num, addr, 16) + +#define AT91_SAMA_CHAN_DIFF(index, num, num2, addr, rbits) \ { \ .type = IIO_VOLTAGE, \ .differential = 1, \ @@ -360,7 +366,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .scan_index = index, \ .scan_type = { \ .sign = 's', \ - .realbits = 14, \ + .realbits = rbits, \ .storagebits = 16, \ }, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ @@ -373,6 +379,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .indexed = 1, \ } +#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \ + AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 14) + +#define AT91_SAMA7G5_CHAN_DIFF(index, num, num2, addr) \ + AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 16) + #define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod) \ { \ .type = IIO_POSITIONRELATIVE, \ @@ -666,30 +678,30 @@ static const struct iio_chan_spec at91_sama5d2_adc_channels[] = { }; static const struct iio_chan_spec at91_sama7g5_adc_channels[] = { - AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x60), - AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x64), - AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x68), - AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x6c), - AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x70), - AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x74), - AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x78), - AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x7c), - AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x80), - AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x84), - AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x88), - AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x8c), - AT91_SAMA5D2_CHAN_SINGLE(12, 12, 0x90), - AT91_SAMA5D2_CHAN_SINGLE(13, 13, 0x94), - AT91_SAMA5D2_CHAN_SINGLE(14, 14, 0x98), - AT91_SAMA5D2_CHAN_SINGLE(15, 15, 0x9c), - AT91_SAMA5D2_CHAN_DIFF(16, 0, 1, 0x60), - AT91_SAMA5D2_CHAN_DIFF(17, 2, 3, 0x68), - AT91_SAMA5D2_CHAN_DIFF(18, 4, 5, 0x70), - AT91_SAMA5D2_CHAN_DIFF(19, 6, 7, 0x78), - AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x80), - AT91_SAMA5D2_CHAN_DIFF(21, 10, 11, 0x88), - AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90), - AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98), + AT91_SAMA7G5_CHAN_SINGLE(0, 0, 0x60), + AT91_SAMA7G5_CHAN_SINGLE(1, 1, 0x64), + AT91_SAMA7G5_CHAN_SINGLE(2, 2, 0x68), + AT91_SAMA7G5_CHAN_SINGLE(3, 3, 0x6c), + AT91_SAMA7G5_CHAN_SINGLE(4, 4, 0x70), + AT91_SAMA7G5_CHAN_SINGLE(5, 5, 0x74), + AT91_SAMA7G5_CHAN_SINGLE(6, 6, 0x78), + AT91_SAMA7G5_CHAN_SINGLE(7, 7, 0x7c), + AT91_SAMA7G5_CHAN_SINGLE(8, 8, 0x80), + AT91_SAMA7G5_CHAN_SINGLE(9, 9, 0x84), + AT91_SAMA7G5_CHAN_SINGLE(10, 10, 0x88), + AT91_SAMA7G5_CHAN_SINGLE(11, 11, 0x8c), + AT91_SAMA7G5_CHAN_SINGLE(12, 12, 0x90), + AT91_SAMA7G5_CHAN_SINGLE(13, 13, 0x94), + AT91_SAMA7G5_CHAN_SINGLE(14, 14, 0x98), + AT91_SAMA7G5_CHAN_SINGLE(15, 15, 0x9c), + AT91_SAMA7G5_CHAN_DIFF(16, 0, 1, 0x60), + AT91_SAMA7G5_CHAN_DIFF(17, 2, 3, 0x68), + AT91_SAMA7G5_CHAN_DIFF(18, 4, 5, 0x70), + AT91_SAMA7G5_CHAN_DIFF(19, 6, 7, 0x78), + AT91_SAMA7G5_CHAN_DIFF(20, 8, 9, 0x80), + AT91_SAMA7G5_CHAN_DIFF(21, 10, 11, 0x88), + AT91_SAMA7G5_CHAN_DIFF(22, 12, 13, 0x90), + AT91_SAMA7G5_CHAN_DIFF(23, 14, 15, 0x98), IIO_CHAN_SOFT_TIMESTAMP(24), AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc), }; diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c index 90f61c47b1c46..63f5182151565 100644 --- a/drivers/iio/adc/pac1921.c +++ b/drivers/iio/adc/pac1921.c @@ -1198,11 +1198,11 @@ static int pac1921_match_acpi_device(struct iio_dev *indio_dev) label = devm_kstrdup(dev, status->package.elements[0].string.pointer, GFP_KERNEL); + ACPI_FREE(status); if (!label) return -ENOMEM; indio_dev->label = label; - ACPI_FREE(status); return 0; } diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c index e7206af53af61..7944f5c1d264d 100644 --- a/drivers/iio/dac/ad3552r.c +++ b/drivers/iio/dac/ad3552r.c @@ -410,6 +410,12 @@ static int ad3552r_reset(struct ad3552r_desc *dac) return ret; } + /* Clear reset error flag, see ad3552r manual, rev B table 38. */ + ret = ad3552r_write_reg(dac, AD3552R_REG_ADDR_ERR_STATUS, + AD3552R_MASK_RESET_STATUS); + if (ret) + return ret; + return ad3552r_update_reg_field(dac, AD3552R_REG_ADDR_INTERFACE_CONFIG_A, AD3552R_MASK_ADDR_ASCENSION, diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c index 848baa6e3bbf5..d85b7d3de8660 100644 --- a/drivers/iio/filter/admv8818.c +++ b/drivers/iio/filter/admv8818.c @@ -574,21 +574,15 @@ static int admv8818_init(struct admv8818_state *st) struct spi_device *spi = st->spi; unsigned int chip_id; - ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A, - ADMV8818_SOFTRESET_N_MSK | - ADMV8818_SOFTRESET_MSK, - FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) | - FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1)); + ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A, + ADMV8818_SOFTRESET_N_MSK | ADMV8818_SOFTRESET_MSK); if (ret) { dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n"); return ret; } - ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A, - ADMV8818_SDOACTIVE_N_MSK | - ADMV8818_SDOACTIVE_MSK, - FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) | - FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1)); + ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A, + ADMV8818_SDOACTIVE_N_MSK | ADMV8818_SDOACTIVE_MSK); if (ret) { dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n"); return ret; diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c index 69a0d609cffc9..5ed7e17f49e76 100644 --- a/drivers/iio/light/apds9306.c +++ b/drivers/iio/light/apds9306.c @@ -108,11 +108,11 @@ static const struct part_id_gts_multiplier apds9306_gts_mul[] = { { .part_id = 0xB1, .max_scale_int = 16, - .max_scale_nano = 3264320, + .max_scale_nano = 326432000, }, { .part_id = 0xB3, .max_scale_int = 14, - .max_scale_nano = 9712000, + .max_scale_nano = 97120000, }, }; diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 7ab64f5c623c1..76b76d12b3882 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -49,9 +49,10 @@ static const u32 prox_sensitivity_addresses[] = { #define PROX_CHANNEL(_is_proximity, _channel) \ {\ .type = _is_proximity ? IIO_PROXIMITY : IIO_ATTENTION,\ - .info_mask_separate = _is_proximity ? BIT(IIO_CHAN_INFO_RAW) :\ - BIT(IIO_CHAN_INFO_PROCESSED),\ - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |\ + .info_mask_separate = \ + (_is_proximity ? BIT(IIO_CHAN_INFO_RAW) :\ + BIT(IIO_CHAN_INFO_PROCESSED)) |\ + BIT(IIO_CHAN_INFO_OFFSET) |\ BIT(IIO_CHAN_INFO_SCALE) |\ BIT(IIO_CHAN_INFO_SAMP_FREQ) |\ BIT(IIO_CHAN_INFO_HYSTERESIS),\ diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c index e092a935dbac7..5aa8e5a22f326 100644 --- a/drivers/iio/proximity/hx9023s.c +++ b/drivers/iio/proximity/hx9023s.c @@ -1036,12 +1036,13 @@ static int hx9023s_send_cfg(const struct firmware *fw, struct hx9023s_data *data return -ENOMEM; memcpy(bin->data, fw->data, fw->size); - release_firmware(fw); bin->fw_size = fw->size; bin->fw_ver = bin->data[FW_VER_OFFSET]; bin->reg_count = get_unaligned_le16(bin->data + FW_REG_CNT_OFFSET); + release_firmware(fw); + return hx9023s_bin_load(data, bin); } diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index b91a85a491d05..502a79136d4df 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -53,12 +53,6 @@ #define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39) #define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH -#define BNXT_RE_MAX_QPC_COUNT (64 * 1024) -#define BNXT_RE_MAX_MRW_COUNT (64 * 1024) -#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) -#define BNXT_RE_MAX_CQ_COUNT (64 * 1024) -#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024) -#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024) /* Number of MRs to reserve for PF, leaving remainder for VFs */ #define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024) @@ -187,7 +181,6 @@ struct bnxt_re_dev { #define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29 struct net_device *netdev; struct auxiliary_device *adev; - struct notifier_block nb; unsigned int version, major, minor; struct bnxt_qplib_chip_ctx *chip_ctx; struct bnxt_en_dev *en_dev; diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c index 3ac47f4e61229..f039aefcaf675 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.c +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c @@ -348,8 +348,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, goto done; } bnxt_re_copy_err_stats(rdev, stats, err_s); - if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags) && - !rdev->is_virtfn) { + if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags, + rdev->is_virtfn)) { rc = bnxt_re_get_ext_stat(rdev, stats); if (rc) { clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 2de101d6e8255..6f5db32082dd7 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1870,6 +1870,8 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; srq->srq_limit = srq_init_attr->attr.srq_limit; srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id; + srq->qplib_srq.sg_info.pgsize = PAGE_SIZE; + srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT; nq = &rdev->nqr->nq[0]; if (udata) { diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index e9e4da4dd576b..4659a2f733644 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -396,11 +396,16 @@ static void bnxt_re_dcb_wq_task(struct work_struct *work) static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl) { - struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle; + struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle); struct bnxt_re_dcb_work *dcb_work; + struct bnxt_re_dev *rdev; u32 data1, data2; u16 event_id; + rdev = en_info->rdev; + if (!rdev) + return; + event_id = le16_to_cpu(cmpl->event_id); data1 = le32_to_cpu(cmpl->event_data1); data2 = le32_to_cpu(cmpl->event_data2); @@ -433,6 +438,8 @@ static void bnxt_re_stop_irq(void *handle, bool reset) int indx; rdev = en_info->rdev; + if (!rdev) + return; rcfw = &rdev->rcfw; if (reset) { @@ -461,6 +468,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent) int indx, rc; rdev = en_info->rdev; + if (!rdev) + return; msix_ent = rdev->nqr->msix_entries; rcfw = &rdev->rcfw; if (!ent) { @@ -1350,7 +1359,6 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev, return NULL; } /* Default values */ - rdev->nb.notifier_call = NULL; rdev->netdev = en_dev->net; rdev->en_dev = en_dev; rdev->adev = adev; @@ -2122,8 +2130,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) * memory for the function and all child VFs */ rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res, &rdev->rcfw, - &rdev->qplib_ctx, - BNXT_RE_MAX_QPC_COUNT); + &rdev->qplib_ctx); if (rc) { ibdev_err(&rdev->ibdev, "Failed to allocate RCFW Channel: %#x\n", rc); @@ -2345,15 +2352,6 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 op_type) static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type, struct auxiliary_device *aux_dev) { - if (rdev->nb.notifier_call) { - unregister_netdevice_notifier(&rdev->nb); - rdev->nb.notifier_call = NULL; - } else { - /* If notifier is null, we should have already done a - * clean up before coming here. - */ - return; - } bnxt_re_setup_cc(rdev, false); ib_unregister_device(&rdev->ibdev); bnxt_re_dev_uninit(rdev, op_type); @@ -2433,6 +2431,7 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state) ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx", __func__, en_dev->en_state); bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev); + bnxt_re_update_en_info_rdev(NULL, en_info, adev); mutex_unlock(&bnxt_re_mutex); return 0; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 5336f74297f81..457eecb99f969 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1217,8 +1217,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp) qp->path_mtu = CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; } - qp->modify_flags &= - ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; /* Bono FW require the max_dest_rd_atomic to be >= 1 */ if (qp->max_dest_rd_atomic < 1) qp->max_dest_rd_atomic = 1; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 17e62f22683b1..d230743834282 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -915,7 +915,6 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { - kfree(rcfw->qp_tbl); kfree(rcfw->crsqe_tbl); bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq); bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq); @@ -924,8 +923,7 @@ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, struct bnxt_qplib_rcfw *rcfw, - struct bnxt_qplib_ctx *ctx, - int qp_tbl_sz) + struct bnxt_qplib_ctx *ctx) { struct bnxt_qplib_hwq_attr hwq_attr = {}; struct bnxt_qplib_sg_info sginfo = {}; @@ -969,12 +967,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, if (!rcfw->crsqe_tbl) goto fail; - /* Allocate one extra to hold the QP1 entries */ - rcfw->qp_tbl_size = qp_tbl_sz + 1; - rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), - GFP_KERNEL); - if (!rcfw->qp_tbl) - goto fail; spin_lock_init(&rcfw->tbl_lock); rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 88814cb3aa741..ff873c5f1b259 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -262,8 +262,7 @@ static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg, void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, struct bnxt_qplib_rcfw *rcfw, - struct bnxt_qplib_ctx *ctx, - int qp_tbl_sz); + struct bnxt_qplib_ctx *ctx); void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill); void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector, @@ -285,9 +284,10 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int is_virtfn); void bnxt_qplib_mark_qp_error(void *qp_handle); + static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw) { /* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/ - return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2; + return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2)); } #endif /* __BNXT_QPLIB_RCFW_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 02922a0987ad7..6cd05207ffedd 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -871,6 +871,7 @@ int bnxt_qplib_init_res(struct bnxt_qplib_res *res) void bnxt_qplib_free_res(struct bnxt_qplib_res *res) { + kfree(res->rcfw->qp_tbl); bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl); bnxt_qplib_free_pd_tbl(&res->pd_tbl); bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); @@ -878,12 +879,20 @@ void bnxt_qplib_free_res(struct bnxt_qplib_res *res) int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev) { + struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_dev_attr *dev_attr; int rc; res->netdev = netdev; dev_attr = res->dattr; + /* Allocate one extra to hold the QP1 entries */ + rcfw->qp_tbl_size = max_t(u32, BNXT_RE_MAX_QPC_COUNT + 1, dev_attr->max_qp); + rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), + GFP_KERNEL); + if (!rcfw->qp_tbl) + return -ENOMEM; + rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid); if (rc) goto fail; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index be5d907a036b6..6a13927674b44 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -49,6 +49,13 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; #define CHIP_NUM_58818 0xd818 #define CHIP_NUM_57608 0x1760 +#define BNXT_RE_MAX_QPC_COUNT (64 * 1024) +#define BNXT_RE_MAX_MRW_COUNT (64 * 1024) +#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) +#define BNXT_RE_MAX_CQ_COUNT (64 * 1024) +#define BNXT_RE_MAX_MRW_COUNT_64K (64 * 1024) +#define BNXT_RE_MAX_MRW_COUNT_256K (256 * 1024) + #define BNXT_QPLIB_DBR_VALID (0x1UL << 26) #define BNXT_QPLIB_DBR_EPOCH_SHIFT 24 #define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25 @@ -547,6 +554,14 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags) CREQ_QUERY_FUNC_RESP_SB_EXT_STATS; } +static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx, + u16 flags, bool virtfn) +{ + /* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */ + return (_is_ext_stats_supported(flags) && + ((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn))); +} + static inline bool _is_hw_retx_supported(u16 dev_cap_flags) { return dev_cap_flags & @@ -592,4 +607,9 @@ static inline bool _is_cq_coalescing_supported(u16 dev_cap_ext_flags2) return dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED; } +static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2) +{ + return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED); +} + #endif /* __BNXT_QPLIB_RES_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 4ccd4405355af..f231e886ad9d5 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -176,6 +176,9 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw) attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags); attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2); + if (_is_max_srq_ext_supported(attr->dev_cap_flags2)) + attr->max_srq += le16_to_cpu(sb->max_srq_ext); + bnxt_qplib_query_version(rcfw, attr->fw_ver); for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) { diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 0ee60fdc18b33..7eceb3e9f4ce5 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -2215,11 +2215,12 @@ struct creq_query_func_resp_sb { #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4) #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \ CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE + #define CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED 0x40UL #define CREQ_QUERY_FUNC_RESP_SB_MIN_RNR_RTR_RTS_OPT_SUPPORTED 0x1000UL __le16 max_xp_qp_size; __le16 create_qp_batch_size; __le16 destroy_qp_batch_size; - __le16 reserved16; + __le16 max_srq_ext; __le64 reserved64; }; diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index 950c133d4220e..6ee911f6885b5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -175,8 +175,10 @@ void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev) if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC) ida_destroy(&hr_dev->xrcd_ida.ida); - if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { ida_destroy(&hr_dev->srq_table.srq_ida.ida); + xa_destroy(&hr_dev->srq_table.xa); + } hns_roce_cleanup_qp_table(hr_dev); hns_roce_cleanup_cq_table(hr_dev); ida_destroy(&hr_dev->mr_table.mtpt_ida.ida); diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 4106423a1b399..3a5c93c9fb3e6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -537,5 +537,6 @@ void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev) for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) ida_destroy(&hr_dev->cq_table.bank[i].ida); + xa_destroy(&hr_dev->cq_table.array); mutex_destroy(&hr_dev->cq_table.bank_mutex); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 605562122ecce..ca0798224e565 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -1361,6 +1361,11 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, return ret; } +/* This is the bottom bt pages number of a 100G MR on 4K OS, assuming + * the bt page size is not expanded by cal_best_bt_pg_sz() + */ +#define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800 + /* construct the base address table and link them by address hop config */ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list, @@ -1369,6 +1374,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, { const struct hns_roce_buf_region *r; int ofs, end; + int loop; int unit; int ret; int i; @@ -1386,7 +1392,10 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, continue; end = r->offset + r->count; - for (ofs = r->offset; ofs < end; ofs += unit) { + for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) { + if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K)) + cond_resched(); + ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs, hem_list->mid_bt[i], &hem_list->btm_bt); @@ -1443,9 +1452,14 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, struct list_head *head = &hem_list->btm_bt; struct hns_roce_hem_item *hem, *temp_hem; void *cpu_base = NULL; + int loop = 1; int nr = 0; list_for_each_entry_safe(hem, temp_hem, head, sibling) { + if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K)) + cond_resched(); + loop++; + if (hem_list_page_is_in_range(hem, offset)) { nr = offset - hem->start; cpu_base = hem->addr + nr * BA_BYTE_LEN; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index dded339802b33..160e8927d364e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1286,10 +1286,8 @@ static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout) return tx_timeout; } -static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode) +static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout) { - struct hns_roce_v2_priv *priv = hr_dev->priv; - u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout); u32 timeout = 0; do { @@ -1299,8 +1297,9 @@ static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode) } while (++timeout < tx_timeout); } -static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, - struct hns_roce_cmq_desc *desc, int num) +static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc, + int num, u32 tx_timeout) { struct hns_roce_v2_priv *priv = hr_dev->priv; struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; @@ -1309,8 +1308,6 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, int ret; int i; - spin_lock_bh(&csq->lock); - tail = csq->head; for (i = 0; i < num; i++) { @@ -1324,22 +1321,17 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]); - hns_roce_wait_csq_done(hr_dev, le16_to_cpu(desc->opcode)); + hns_roce_wait_csq_done(hr_dev, tx_timeout); if (hns_roce_cmq_csq_done(hr_dev)) { ret = 0; for (i = 0; i < num; i++) { /* check the result of hardware write back */ - desc[i] = csq->desc[tail++]; + desc_ret = le16_to_cpu(csq->desc[tail++].retval); if (tail == csq->desc_num) tail = 0; - - desc_ret = le16_to_cpu(desc[i].retval); if (likely(desc_ret == CMD_EXEC_SUCCESS)) continue; - dev_err_ratelimited(hr_dev->dev, - "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", - desc->opcode, desc_ret); ret = hns_roce_cmd_err_convert_errno(desc_ret); } } else { @@ -1354,14 +1346,54 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, ret = -EAGAIN; } - spin_unlock_bh(&csq->lock); - if (ret) atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]); return ret; } +static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc, int num) +{ + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; + u16 opcode = le16_to_cpu(desc->opcode); + u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout); + u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT; + u32 rsv_tail; + int ret; + int i; + + while (try_cnt) { + try_cnt--; + + spin_lock_bh(&csq->lock); + rsv_tail = csq->head; + ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout); + if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME && + try_cnt) { + spin_unlock_bh(&csq->lock); + mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC); + continue; + } + + for (i = 0; i < num; i++) { + desc[i] = csq->desc[rsv_tail++]; + if (rsv_tail == csq->desc_num) + rsv_tail = 0; + } + spin_unlock_bh(&csq->lock); + break; + } + + if (ret) + dev_err_ratelimited(hr_dev->dev, + "Cmdq IO error, opcode = 0x%x, return = %d.\n", + opcode, ret); + + return ret; +} + static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, struct hns_roce_cmq_desc *desc, int num) { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index cbdbc9edbce6e..91a5665465ffb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -230,6 +230,8 @@ enum hns_roce_opcode_type { }; #define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000 +#define HNS_ROCE_OPC_POST_MB_TRY_CNT 8 +#define HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC 5 struct hns_roce_cmdq_tx_timeout_map { u16 opcode; u32 tx_timeout; diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index ae24c81c9812d..cf89a8db4f64c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -183,7 +183,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, IB_DEVICE_RC_RNR_NAK_GEN; props->max_send_sge = hr_dev->caps.max_sq_sg; props->max_recv_sge = hr_dev->caps.max_rq_sg; - props->max_sge_rd = 1; + props->max_sge_rd = hr_dev->caps.max_sq_sg; props->max_cq = hr_dev->caps.num_cqs; props->max_cqe = hr_dev->caps.max_cqes; props->max_mr = hr_dev->caps.num_mtpts; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 9e2e76c594063..8901c142c1b65 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -868,12 +868,14 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_ib_create_qp *ucmd, struct hns_roce_ib_create_qp_resp *resp) { + bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd); struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext, ibucontext); + bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp); struct ib_device *ibdev = &hr_dev->ib_dev; int ret; - if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { + if (has_sdb) { ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); if (ret) { ibdev_err(ibdev, @@ -884,7 +886,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; } - if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { + if (has_rdb) { ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); if (ret) { ibdev_err(ibdev, @@ -898,7 +900,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, return 0; err_sdb: - if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) + if (has_sdb) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); err_out: return ret; @@ -1119,24 +1121,23 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ibucontext); hr_qp->config = uctx->config; ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); - if (ret) + if (ret) { ibdev_err(ibdev, "failed to set user SQ size, ret = %d.\n", ret); + return ret; + } ret = set_congest_param(hr_dev, hr_qp, ucmd); - if (ret) - return ret; } else { if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) hr_qp->config = HNS_ROCE_EXSGE_FLAGS; + default_congest_type(hr_dev, hr_qp); ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); if (ret) ibdev_err(ibdev, "failed to set kernel SQ size, ret = %d.\n", ret); - - default_congest_type(hr_dev, hr_qp); } return ret; @@ -1219,7 +1220,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, min(udata->outlen, sizeof(resp))); if (ret) { ibdev_err(ibdev, "copy qp resp failed!\n"); - goto err_store; + goto err_flow_ctrl; } } @@ -1602,6 +1603,7 @@ void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev) for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) ida_destroy(&hr_dev->qp_table.bank[i].ida); xa_destroy(&hr_dev->qp_table.dip_xa); + xa_destroy(&hr_dev->qp_table_xa); mutex_destroy(&hr_dev->qp_table.bank_mutex); mutex_destroy(&hr_dev->qp_table.scc_mutex); } diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index ad50b77282f8a..69ce1862eabe0 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -498,8 +498,6 @@ static int irdma_save_msix_info(struct irdma_pci_f *rf) iw_qvlist->num_vectors = rf->msix_count; if (rf->msix_count <= num_online_cpus()) rf->msix_shared = true; - else if (rf->msix_count > num_online_cpus() + 1) - rf->msix_count = num_online_cpus() + 1; pmsix = rf->msix_entries; for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c index 3f13200ff71bc..1ee8969595d3d 100644 --- a/drivers/infiniband/hw/irdma/main.c +++ b/drivers/infiniband/hw/irdma/main.c @@ -206,6 +206,43 @@ static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi, ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n"); } +static int irdma_init_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf) +{ + int i; + + rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX; + rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries), + GFP_KERNEL); + if (!rf->msix_entries) + return -ENOMEM; + + for (i = 0; i < rf->msix_count; i++) + if (ice_alloc_rdma_qvector(pf, &rf->msix_entries[i])) + break; + + if (i < IRDMA_MIN_MSIX) { + for (; i > 0; i--) + ice_free_rdma_qvector(pf, &rf->msix_entries[i]); + + kfree(rf->msix_entries); + return -ENOMEM; + } + + rf->msix_count = i; + + return 0; +} + +static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct ice_pf *pf) +{ + int i; + + for (i = 0; i < rf->msix_count; i++) + ice_free_rdma_qvector(pf, &rf->msix_entries[i]); + + kfree(rf->msix_entries); +} + static void irdma_remove(struct auxiliary_device *aux_dev) { struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev, @@ -216,6 +253,7 @@ static void irdma_remove(struct auxiliary_device *aux_dev) irdma_ib_unregister_device(iwdev); ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false); + irdma_deinit_interrupts(iwdev->rf, pf); pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn)); } @@ -230,9 +268,7 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf rf->gen_ops.unregister_qset = irdma_lan_unregister_qset; rf->hw.hw_addr = pf->hw.hw_addr; rf->pcidev = pf->pdev; - rf->msix_count = pf->num_rdma_msix; rf->pf_id = pf->hw.pf_id; - rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector]; rf->default_vsi.vsi_idx = vsi->vsi_num; rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY; @@ -281,6 +317,10 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ irdma_fill_device_info(iwdev, pf, vsi); rf = iwdev->rf; + err = irdma_init_interrupts(rf, pf); + if (err) + goto err_init_interrupts; + err = irdma_ctrl_init_hw(rf); if (err) goto err_ctrl_init; @@ -311,6 +351,8 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ err_rt_init: irdma_ctrl_deinit_hw(rf); err_ctrl_init: + irdma_deinit_interrupts(rf, pf); +err_init_interrupts: kfree(iwdev->rf); ib_dealloc_device(&iwdev->ibdev); diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index 9f0ed6e844711..ef9a9b79d711d 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -117,6 +117,9 @@ extern struct auxiliary_driver i40iw_auxiliary_drv; #define IRDMA_IRQ_NAME_STR_LEN (64) +#define IRDMA_NUM_AEQ_MSIX 1 +#define IRDMA_MIN_MSIX 2 + enum init_completion_state { INVALID_STATE = 0, INITIAL_STATE, diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index 67c2d43135a8a..457cea6d99095 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc, req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE; req.num_resources = 1; - req.alignment = 1; + req.alignment = PAGE_SIZE / MANA_PAGE_SIZE; /* Have GDMA start searching from 0 */ req.allocated_resources = 0; diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c index 505bc47fd575d..531a57f9ee7e8 100644 --- a/drivers/infiniband/hw/mlx5/ah.c +++ b/drivers/infiniband/hw/mlx5/ah.c @@ -50,11 +50,12 @@ static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev, return sport; } -static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah, +static int create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah, struct rdma_ah_init_attr *init_attr) { struct rdma_ah_attr *ah_attr = init_attr->ah_attr; enum ib_gid_type gid_type; + int rate_val; if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) { const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr); @@ -67,7 +68,10 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah, ah->av.tclass = grh->traffic_class; } - ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4); + rate_val = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)); + if (rate_val < 0) + return rate_val; + ah->av.stat_rate_sl = rate_val << 4; if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { if (init_attr->xmit_slave) @@ -88,6 +92,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah, ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f; ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf); } + + return 0; } int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, @@ -120,8 +126,7 @@ int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, return err; } - create_ib_ah(dev, ah, init_attr); - return 0; + return create_ib_ah(dev, ah, init_attr); } int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 4f6c1968a2ee3..81cfa74147a18 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -546,6 +546,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, struct ib_qp *qp) { struct mlx5_ib_dev *dev = to_mdev(qp->device); + bool new = false; int err; if (!counter->id) { @@ -560,6 +561,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, return err; counter->id = MLX5_GET(alloc_q_counter_out, out, counter_set_id); + new = true; } err = mlx5_ib_qp_set_counter(qp, counter); @@ -569,8 +571,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, return 0; fail_set_counter: - mlx5_ib_counter_dealloc(counter); - counter->id = 0; + if (new) { + mlx5_ib_counter_dealloc(counter); + counter->id = 0; + } return err; } diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index bb02b6adbf2c2..753faa9ad06a8 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1550,7 +1550,7 @@ static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach) dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); - if (!umem_dmabuf->sgt) + if (!umem_dmabuf->sgt || !mr) return; mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP); @@ -1935,7 +1935,8 @@ mlx5_alloc_priv_descs(struct ib_device *device, static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr) { - if (!mr->umem && !mr->data_direct && mr->descs) { + if (!mr->umem && !mr->data_direct && + mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) { struct ib_device *device = mr->ibmr.device; int size = mr->max_descs * mr->desc_size; struct mlx5_ib_dev *dev = to_mdev(device); @@ -2022,11 +2023,16 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; bool is_odp = is_odp_mr(mr); + bool is_odp_dma_buf = is_dmabuf_mr(mr) && + !to_ib_umem_dmabuf(mr->umem)->pinned; int ret = 0; if (is_odp) mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex); + if (is_odp_dma_buf) + dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL); + if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) { ent = mr->mmkey.cache_ent; /* upon storing to a clean temp entry - schedule its cleanup */ @@ -2054,6 +2060,12 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex); } + if (is_odp_dma_buf) { + if (!ret) + to_ib_umem_dmabuf(mr->umem)->private = NULL; + dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv); + } + return ret; } diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index f1e23583e6c08..e77c9280c07e4 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -242,6 +242,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr) if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) != mr) { xa_unlock(&imr->implicit_children); + mlx5r_deref_odp_mkey(&imr->mmkey); return; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index a43eba9d3572c..88724d15705d4 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3447,11 +3447,11 @@ static int ib_to_mlx5_rate_map(u8 rate) return 0; } -static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) +int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate) { u32 stat_rate_support; - if (rate == IB_RATE_PORT_CURRENT) + if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS) return 0; if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS) @@ -3596,7 +3596,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, sizeof(grh->dgid.raw)); } - err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah)); + err = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah)); if (err < 0) return err; MLX5_SET(ads, path, stat_rate, err); @@ -4579,6 +4579,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1); MLX5_SET(dctc, dctc, counter_set_id, set_id); + + qp->port = attr->port_num; } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { struct mlx5_ib_modify_qp_resp resp = {}; u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {}; @@ -5074,7 +5076,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp, } if (qp_attr_mask & IB_QP_PORT) - qp_attr->port_num = MLX5_GET(dctc, dctc, port); + qp_attr->port_num = mqp->port; if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak); if (qp_attr_mask & IB_QP_AV) { diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h index b6ee7c3ee1ca1..2530e7730635f 100644 --- a/drivers/infiniband/hw/mlx5/qp.h +++ b/drivers/infiniband/hw/mlx5/qp.h @@ -56,4 +56,5 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn); int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); int mlx5_ib_qp_event_init(void); void mlx5_ib_qp_event_cleanup(void); +int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate); #endif /* _MLX5_IB_QP_H */ diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c index 887fd6fa3ba93..793f3c5c4d012 100644 --- a/drivers/infiniband/hw/mlx5/umr.c +++ b/drivers/infiniband/hw/mlx5/umr.c @@ -231,30 +231,6 @@ void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev) ib_dealloc_pd(dev->umrc.pd); } -static int mlx5r_umr_recover(struct mlx5_ib_dev *dev) -{ - struct umr_common *umrc = &dev->umrc; - struct ib_qp_attr attr; - int err; - - attr.qp_state = IB_QPS_RESET; - err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE); - if (err) { - mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); - goto err; - } - - err = mlx5r_umr_qp_rst2rts(dev, umrc->qp); - if (err) - goto err; - - umrc->state = MLX5_UMR_STATE_ACTIVE; - return 0; - -err: - umrc->state = MLX5_UMR_STATE_ERR; - return err; -} static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe, struct mlx5r_umr_wqe *wqe, bool with_data) @@ -302,6 +278,61 @@ static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe, return err; } +static int mlx5r_umr_recover(struct mlx5_ib_dev *dev, u32 mkey, + struct mlx5r_umr_context *umr_context, + struct mlx5r_umr_wqe *wqe, bool with_data) +{ + struct umr_common *umrc = &dev->umrc; + struct ib_qp_attr attr; + int err; + + mutex_lock(&umrc->lock); + /* Preventing any further WRs to be sent now */ + if (umrc->state != MLX5_UMR_STATE_RECOVER) { + mlx5_ib_warn(dev, "UMR recovery encountered an unexpected state=%d\n", + umrc->state); + umrc->state = MLX5_UMR_STATE_RECOVER; + } + mutex_unlock(&umrc->lock); + + /* Sending a final/barrier WR (the failed one) and wait for its completion. + * This will ensure that all the previous WRs got a completion before + * we set the QP state to RESET. + */ + err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context->cqe, wqe, + with_data); + if (err) { + mlx5_ib_warn(dev, "UMR recovery post send failed, err %d\n", err); + goto err; + } + + /* Since the QP is in an error state, it will only receive + * IB_WC_WR_FLUSH_ERR. However, as it serves only as a barrier + * we don't care about its status. + */ + wait_for_completion(&umr_context->done); + + attr.qp_state = IB_QPS_RESET; + err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE); + if (err) { + mlx5_ib_warn(dev, "Couldn't modify UMR QP to RESET, err=%d\n", err); + goto err; + } + + err = mlx5r_umr_qp_rst2rts(dev, umrc->qp); + if (err) { + mlx5_ib_warn(dev, "Couldn't modify UMR QP to RTS, err=%d\n", err); + goto err; + } + + umrc->state = MLX5_UMR_STATE_ACTIVE; + return 0; + +err: + umrc->state = MLX5_UMR_STATE_ERR; + return err; +} + static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc) { struct mlx5_ib_umr_context *context = @@ -366,9 +397,7 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey, mlx5_ib_warn(dev, "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n", umr_context.status, mkey); - mutex_lock(&umrc->lock); - err = mlx5r_umr_recover(dev); - mutex_unlock(&umrc->lock); + err = mlx5r_umr_recover(dev, mkey, &umr_context, wqe, with_data); if (err) mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n", err); diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 1ba4a0c8726ae..e27478fe9456c 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -38,10 +38,8 @@ void rxe_dealloc(struct ib_device *ib_dev) } /* initialize rxe device parameters */ -static void rxe_init_device_param(struct rxe_dev *rxe) +static void rxe_init_device_param(struct rxe_dev *rxe, struct net_device *ndev) { - struct net_device *ndev; - rxe->max_inline_data = RXE_MAX_INLINE_DATA; rxe->attr.vendor_id = RXE_VENDOR_ID; @@ -74,15 +72,9 @@ static void rxe_init_device_param(struct rxe_dev *rxe) rxe->attr.max_pkeys = RXE_MAX_PKEYS; rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY; - ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); - if (!ndev) - return; - addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid, ndev->dev_addr); - dev_put(ndev); - rxe->max_ucontext = RXE_MAX_UCONTEXT; } @@ -115,18 +107,13 @@ static void rxe_init_port_param(struct rxe_port *port) /* initialize port state, note IB convention that HCA ports are always * numbered from 1 */ -static void rxe_init_ports(struct rxe_dev *rxe) +static void rxe_init_ports(struct rxe_dev *rxe, struct net_device *ndev) { struct rxe_port *port = &rxe->port; - struct net_device *ndev; rxe_init_port_param(port); - ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); - if (!ndev) - return; addrconf_addr_eui48((unsigned char *)&port->port_guid, ndev->dev_addr); - dev_put(ndev); spin_lock_init(&port->port_lock); } @@ -144,12 +131,12 @@ static void rxe_init_pools(struct rxe_dev *rxe) } /* initialize rxe device state */ -static void rxe_init(struct rxe_dev *rxe) +static void rxe_init(struct rxe_dev *rxe, struct net_device *ndev) { /* init default device parameters */ - rxe_init_device_param(rxe); + rxe_init_device_param(rxe, ndev); - rxe_init_ports(rxe); + rxe_init_ports(rxe, ndev); rxe_init_pools(rxe); /* init pending mmap list */ @@ -184,7 +171,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu) int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name, struct net_device *ndev) { - rxe_init(rxe); + rxe_init(rxe, ndev); rxe_set_mtu(rxe, mtu); return rxe_register_device(rxe, ibdev_name, ndev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c index 9ad8d98562752..53db7c8191e38 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c @@ -97,10 +97,13 @@ static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[], return ret; } -static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int ipoib_new_child_link(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct net_device *pdev; struct ipoib_dev_priv *ppriv; u16 child_pkey; @@ -109,7 +112,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev, if (!tb[IFLA_LINK]) return -EINVAL; - pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + pdev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK])); if (!pdev || pdev->type != ARPHRD_INFINIBAND) return -ENODEV; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 8fe2a51df649e..c33e6f33265ba 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -140,6 +140,7 @@ static const struct xpad_device { { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, + { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE }, { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX }, { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX }, @@ -177,6 +178,7 @@ static const struct xpad_device { { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX }, { 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX }, { 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 }, + { 0x0738, 0x4503, "Mad Catz Racing Wheel", 0, XTYPE_XBOXONE }, { 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX }, { 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX }, @@ -238,6 +240,7 @@ static const struct xpad_device { { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, + { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE }, { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, @@ -276,12 +279,15 @@ static const struct xpad_device { { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE }, + { 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE }, { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX }, { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, + { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 }, { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, @@ -306,7 +312,7 @@ static const struct xpad_device { { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 }, { 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 }, - { 0x1a86, 0xe310, "QH Electronics Controller", 0, XTYPE_XBOX360 }, + { 0x1a86, 0xe310, "Legion Go S", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, @@ -343,6 +349,7 @@ static const struct xpad_device { { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, + { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 }, { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, @@ -366,6 +373,7 @@ static const struct xpad_device { { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x581a, "ThrustMaster XB1 Classic Controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, @@ -374,10 +382,15 @@ static const struct xpad_device { { 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 }, { 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE }, { 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE }, + { 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE }, { 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 }, + { 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 }, { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE }, { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 }, @@ -385,11 +398,16 @@ static const struct xpad_device { { 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 }, + { 0x3285, 0x0603, "Nacon Pro Compact controller for Xbox", 0, XTYPE_XBOXONE }, { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 }, + { 0x3285, 0x0614, "Nacon Pro Compact", 0, XTYPE_XBOXONE }, { 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE }, + { 0x3285, 0x0662, "Nacon Revolution5 Pro", 0, XTYPE_XBOX360 }, { 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE }, { 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 }, + { 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE }, { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX }, + { 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 }, { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN } }; @@ -488,6 +506,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */ + XPAD_XBOXONE_VENDOR(0x044f), /* Thrustmaster Xbox One controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */ XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */ @@ -519,8 +538,9 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */ XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */ - XPAD_XBOX360_VENDOR(0x1a86), /* QH Electronics */ + XPAD_XBOX360_VENDOR(0x1a86), /* Nanjing Qinheng Microelectronics (WCH) */ XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */ + XPAD_XBOX360_VENDOR(0x1ee9), /* ZOTAC Technology Limited */ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA controllers */ XPAD_XBOX360_VENDOR(0x2345), /* Machenike Controllers */ @@ -528,17 +548,20 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA controllers */ XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */ XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */ - XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */ + XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */ + XPAD_XBOX360_VENDOR(0x2993), /* TECNO Mobile */ XPAD_XBOX360_VENDOR(0x2c22), /* Qanba Controllers */ - XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller */ - XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Pro 2 Wired Controller for Xbox */ - XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke Xbox One pad */ - XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */ + XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Controllers */ + XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Controllers */ + XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Controllers */ + XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ + XPAD_XBOXONE_VENDOR(0x2e95), /* SCUF Gaming Controller */ XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */ XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */ XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */ XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */ XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */ + XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */ { } }; @@ -691,7 +714,9 @@ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_led_on), + XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_led_on), XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_auth), + XBOXONE_INIT_PKT(0x20d6, 0xa01a, xboxone_pdp_auth), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c index 22022d11470db..80b917944b51e 100644 --- a/drivers/input/misc/iqs7222.c +++ b/drivers/input/misc/iqs7222.c @@ -100,11 +100,11 @@ enum iqs7222_reg_key_id { enum iqs7222_reg_grp_id { IQS7222_REG_GRP_STAT, - IQS7222_REG_GRP_FILT, IQS7222_REG_GRP_CYCLE, IQS7222_REG_GRP_GLBL, IQS7222_REG_GRP_BTN, IQS7222_REG_GRP_CHAN, + IQS7222_REG_GRP_FILT, IQS7222_REG_GRP_SLDR, IQS7222_REG_GRP_TPAD, IQS7222_REG_GRP_GPIO, @@ -286,6 +286,7 @@ static const struct iqs7222_event_desc iqs7222_tp_events[] = { struct iqs7222_reg_grp_desc { u16 base; + u16 val_len; int num_row; int num_col; }; @@ -342,6 +343,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAC00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -400,6 +402,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAC00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -454,6 +457,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xC400, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -496,6 +500,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xC400, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -543,6 +548,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAA00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -600,6 +606,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAA00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -656,6 +663,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAE00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -712,6 +720,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAE00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -768,6 +777,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { }, [IQS7222_REG_GRP_FILT] = { .base = 0xAE00, + .val_len = 3, .num_row = 1, .num_col = 2, }, @@ -1604,7 +1614,7 @@ static int iqs7222_force_comms(struct iqs7222_private *iqs7222) } static int iqs7222_read_burst(struct iqs7222_private *iqs7222, - u16 reg, void *val, u16 num_val) + u16 reg, void *val, u16 val_len) { u8 reg_buf[sizeof(__be16)]; int ret, i; @@ -1619,7 +1629,7 @@ static int iqs7222_read_burst(struct iqs7222_private *iqs7222, { .addr = client->addr, .flags = I2C_M_RD, - .len = num_val * sizeof(__le16), + .len = val_len, .buf = (u8 *)val, }, }; @@ -1675,7 +1685,7 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val) __le16 val_buf; int error; - error = iqs7222_read_burst(iqs7222, reg, &val_buf, 1); + error = iqs7222_read_burst(iqs7222, reg, &val_buf, sizeof(val_buf)); if (error) return error; @@ -1685,10 +1695,9 @@ static int iqs7222_read_word(struct iqs7222_private *iqs7222, u16 reg, u16 *val) } static int iqs7222_write_burst(struct iqs7222_private *iqs7222, - u16 reg, const void *val, u16 num_val) + u16 reg, const void *val, u16 val_len) { int reg_len = reg > U8_MAX ? sizeof(reg) : sizeof(u8); - int val_len = num_val * sizeof(__le16); int msg_len = reg_len + val_len; int ret, i; struct i2c_client *client = iqs7222->client; @@ -1747,7 +1756,7 @@ static int iqs7222_write_word(struct iqs7222_private *iqs7222, u16 reg, u16 val) { __le16 val_buf = cpu_to_le16(val); - return iqs7222_write_burst(iqs7222, reg, &val_buf, 1); + return iqs7222_write_burst(iqs7222, reg, &val_buf, sizeof(val_buf)); } static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222) @@ -1831,30 +1840,14 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir) /* * Acknowledge reset before writing any registers in case the device - * suffers a spurious reset during initialization. Because this step - * may change the reserved fields of the second filter beta register, - * its cache must be updated. - * - * Writing the second filter beta register, in turn, may clobber the - * system status register. As such, the filter beta register pair is - * written first to protect against this hazard. + * suffers a spurious reset during initialization. */ if (dir == WRITE) { - u16 reg = dev_desc->reg_grps[IQS7222_REG_GRP_FILT].base + 1; - u16 filt_setup; - error = iqs7222_write_word(iqs7222, IQS7222_SYS_SETUP, iqs7222->sys_setup[0] | IQS7222_SYS_SETUP_ACK_RESET); if (error) return error; - - error = iqs7222_read_word(iqs7222, reg, &filt_setup); - if (error) - return error; - - iqs7222->filt_setup[1] &= GENMASK(7, 0); - iqs7222->filt_setup[1] |= (filt_setup & ~GENMASK(7, 0)); } /* @@ -1883,6 +1876,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir) int num_col = dev_desc->reg_grps[i].num_col; u16 reg = dev_desc->reg_grps[i].base; __le16 *val_buf; + u16 val_len = dev_desc->reg_grps[i].val_len ? : num_col * sizeof(*val_buf); u16 *val; if (!num_col) @@ -1900,7 +1894,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir) switch (dir) { case READ: error = iqs7222_read_burst(iqs7222, reg, - val_buf, num_col); + val_buf, val_len); for (k = 0; k < num_col; k++) val[k] = le16_to_cpu(val_buf[k]); break; @@ -1909,7 +1903,7 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir) for (k = 0; k < num_col; k++) val_buf[k] = cpu_to_le16(val[k]); error = iqs7222_write_burst(iqs7222, reg, - val_buf, num_col); + val_buf, val_len); break; default: @@ -1962,7 +1956,7 @@ static int iqs7222_dev_info(struct iqs7222_private *iqs7222) int error, i; error = iqs7222_read_burst(iqs7222, IQS7222_PROD_NUM, dev_id, - ARRAY_SIZE(dev_id)); + sizeof(dev_id)); if (error) return error; @@ -2915,7 +2909,7 @@ static int iqs7222_report(struct iqs7222_private *iqs7222) __le16 status[IQS7222_MAX_COLS_STAT]; error = iqs7222_read_burst(iqs7222, IQS7222_SYS_STATUS, status, - num_stat); + num_stat * sizeof(*status)); if (error) return error; diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index 127cfdc8668a0..6ed9fc34948cb 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -1080,16 +1080,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"), DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"), DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* Mivvy M310 */ @@ -1159,9 +1157,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { }, /* * A lot of modern Clevo barebones have touchpad and/or keyboard issues - * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, - * none of them have an external PS/2 port so this can safely be set for - * all of them. + * after suspend fixable with the forcenorestore quirk. * Clevo barebones come with board_vendor and/or system_vendor set to * either the very generic string "Notebook" and/or a different value * for each individual reseller. The only somewhat universal way to @@ -1171,29 +1167,25 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "N140CU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "N141CU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { @@ -1205,29 +1197,19 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { - /* - * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes - * the keyboard very laggy for ~5 seconds after boot and - * sometimes also after resume. - * However both are required for the keyboard to not fail - * completely sometimes after boot or resume. - */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, /* * At least one modern Clevo barebone has the touchpad connected both @@ -1243,17 +1225,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NS50MU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX | - SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | - SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_NOAUX | + SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX | - SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | - SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_NOAUX | + SERIO_QUIRK_FORCENORESTORE) }, { .matches = { @@ -1265,8 +1245,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "P640RE"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1277,16 +1262,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1297,8 +1280,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1309,8 +1291,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1321,8 +1302,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1333,22 +1313,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PB51RF"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PB71RD"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PC70DR"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PCX0DX_GN20"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, /* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */ { @@ -1361,15 +1362,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "X170SM"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index 066dc04003fa8..67264c5b49cb4 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -1021,7 +1021,7 @@ static int ads7846_setup_pendown(struct spi_device *spi, if (pdata->get_pendown_state) { ts->get_pendown_state = pdata->get_pendown_state; } else { - ts->gpio_pendown = gpiod_get(&spi->dev, "pendown", GPIOD_IN); + ts->gpio_pendown = devm_gpiod_get(&spi->dev, "pendown", GPIOD_IN); if (IS_ERR(ts->gpio_pendown)) { dev_err(&spi->dev, "failed to request pendown GPIO\n"); return PTR_ERR(ts->gpio_pendown); diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c index 3fc03cf0ca23f..7f8cfdd106fae 100644 --- a/drivers/input/touchscreen/goodix_berlin_core.c +++ b/drivers/input/touchscreen/goodix_berlin_core.c @@ -165,7 +165,7 @@ struct goodix_berlin_core { struct device *dev; struct regmap *regmap; struct regulator *avdd; - struct regulator *iovdd; + struct regulator *vddio; struct gpio_desc *reset_gpio; struct touchscreen_properties props; struct goodix_berlin_fw_version fw_version; @@ -248,22 +248,22 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd) { int error; - error = regulator_enable(cd->iovdd); + error = regulator_enable(cd->vddio); if (error) { - dev_err(cd->dev, "Failed to enable iovdd: %d\n", error); + dev_err(cd->dev, "Failed to enable vddio: %d\n", error); return error; } - /* Vendor waits 3ms for IOVDD to settle */ + /* Vendor waits 3ms for VDDIO to settle */ usleep_range(3000, 3100); error = regulator_enable(cd->avdd); if (error) { dev_err(cd->dev, "Failed to enable avdd: %d\n", error); - goto err_iovdd_disable; + goto err_vddio_disable; } - /* Vendor waits 15ms for IOVDD to settle */ + /* Vendor waits 15ms for AVDD to settle */ usleep_range(15000, 15100); gpiod_set_value_cansleep(cd->reset_gpio, 0); @@ -283,8 +283,8 @@ static int goodix_berlin_power_on(struct goodix_berlin_core *cd) err_dev_reset: gpiod_set_value_cansleep(cd->reset_gpio, 1); regulator_disable(cd->avdd); -err_iovdd_disable: - regulator_disable(cd->iovdd); +err_vddio_disable: + regulator_disable(cd->vddio); return error; } @@ -292,7 +292,7 @@ static void goodix_berlin_power_off(struct goodix_berlin_core *cd) { gpiod_set_value_cansleep(cd->reset_gpio, 1); regulator_disable(cd->avdd); - regulator_disable(cd->iovdd); + regulator_disable(cd->vddio); } static int goodix_berlin_read_version(struct goodix_berlin_core *cd) @@ -744,10 +744,10 @@ int goodix_berlin_probe(struct device *dev, int irq, const struct input_id *id, return dev_err_probe(dev, PTR_ERR(cd->avdd), "Failed to request avdd regulator\n"); - cd->iovdd = devm_regulator_get(dev, "iovdd"); - if (IS_ERR(cd->iovdd)) - return dev_err_probe(dev, PTR_ERR(cd->iovdd), - "Failed to request iovdd regulator\n"); + cd->vddio = devm_regulator_get(dev, "vddio"); + if (IS_ERR(cd->vddio)) + return dev_err_probe(dev, PTR_ERR(cd->vddio), + "Failed to request vddio regulator\n"); error = goodix_berlin_power_on(cd); if (error) { diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c index abeae9102323c..3c8bbe284b731 100644 --- a/drivers/input/touchscreen/imagis.c +++ b/drivers/input/touchscreen/imagis.c @@ -22,6 +22,7 @@ #define IST3032C_WHOAMI 0x32c #define IST3038C_WHOAMI 0x38c +#define IST3038H_WHOAMI 0x38d #define IST3038B_REG_CHIPID 0x30 #define IST3038B_WHOAMI 0x30380b @@ -428,11 +429,19 @@ static const struct imagis_properties imagis_3038c_data = { .protocol_b = true, }; +static const struct imagis_properties imagis_3038h_data = { + .interrupt_msg_cmd = IST3038C_REG_INTR_MESSAGE, + .touch_coord_cmd = IST3038C_REG_TOUCH_COORD, + .whoami_cmd = IST3038C_REG_CHIPID, + .whoami_val = IST3038H_WHOAMI, +}; + static const struct of_device_id imagis_of_match[] = { { .compatible = "imagis,ist3032c", .data = &imagis_3032c_data }, { .compatible = "imagis,ist3038", .data = &imagis_3038_data }, { .compatible = "imagis,ist3038b", .data = &imagis_3038b_data }, { .compatible = "imagis,ist3038c", .data = &imagis_3038c_data }, + { .compatible = "imagis,ist3038h", .data = &imagis_3038h_data }, { }, }; MODULE_DEVICE_TABLE(of, imagis_of_match); diff --git a/drivers/input/touchscreen/wdt87xx_i2c.c b/drivers/input/touchscreen/wdt87xx_i2c.c index 27941245e962f..88d376090e6e6 100644 --- a/drivers/input/touchscreen/wdt87xx_i2c.c +++ b/drivers/input/touchscreen/wdt87xx_i2c.c @@ -1153,11 +1153,13 @@ static const struct i2c_device_id wdt87xx_dev_id[] = { }; MODULE_DEVICE_TABLE(i2c, wdt87xx_dev_id); +#ifdef CONFIG_ACPI static const struct acpi_device_id wdt87xx_acpi_id[] = { { "WDHT0001", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, wdt87xx_acpi_id); +#endif static struct i2c_driver wdt87xx_driver = { .probe = wdt87xx_ts_probe, diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index b48a72bd7b23d..cd5116d8c3b28 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2043,12 +2043,12 @@ static void set_dte_entry(struct amd_iommu *iommu, make_clear_dte(dev_data, dte, &new); if (domain->iop.mode != PAGE_MODE_NONE) - new.data[0] = iommu_virt_to_phys(domain->iop.root); + new.data[0] |= iommu_virt_to_phys(domain->iop.root); new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; - new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V; + new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW; /* * When SNP is enabled, we can only support TV=1 with non-zero domain ID. diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 9f424acf474e9..e540092d664d2 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -2043,6 +2043,7 @@ int enable_drhd_fault_handling(unsigned int cpu) /* * Enable fault control interrupt. */ + guard(rwsem_read)(&dmar_global_lock); for_each_iommu(iommu, drhd) { u32 fault_status; int ret; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index cc46098f875b1..bf1f0c8143483 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3146,7 +3146,14 @@ int __init intel_iommu_init(void) iommu_device_sysfs_add(&iommu->iommu, NULL, intel_iommu_groups, "%s", iommu->name); + /* + * The iommu device probe is protected by the iommu_probe_device_lock. + * Release the dmar_global_lock before entering the device probe path + * to avoid unnecessary lock order splat. + */ + up_read(&dmar_global_lock); iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); + down_read(&dmar_global_lock); iommu_pmu_register(iommu); } @@ -4378,9 +4385,6 @@ static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void * { struct device *dev = data; - if (dev != &pdev->dev) - return 0; - return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 76dce0aac2465..270d7a4d85a6d 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -44,6 +44,7 @@ static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI; #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2) +#define FLAGS_WORKAROUND_INSECURE (1ULL << 3) #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) @@ -83,6 +84,8 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) +static bool nmi_support_forbidden; + /* * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs * are potentially stolen by the secure side. Some code, especially code dealing @@ -163,21 +166,27 @@ static void __init gic_prio_init(void) { bool ds; - ds = gic_dist_security_disabled(); - if (!ds) { - u32 val; - - val = readl_relaxed(gic_data.dist_base + GICD_CTLR); - val |= GICD_CTLR_DS; - writel_relaxed(val, gic_data.dist_base + GICD_CTLR); + cpus_have_group0 = gic_has_group0(); - ds = gic_dist_security_disabled(); - if (ds) - pr_warn("Broken GIC integration, security disabled"); + ds = gic_dist_security_disabled(); + if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) { + if (cpus_have_group0) { + u32 val; + + val = readl_relaxed(gic_data.dist_base + GICD_CTLR); + val |= GICD_CTLR_DS; + writel_relaxed(val, gic_data.dist_base + GICD_CTLR); + + ds = gic_dist_security_disabled(); + if (ds) + pr_warn("Broken GIC integration, security disabled\n"); + } else { + pr_warn("Broken GIC integration, pNMI forbidden\n"); + nmi_support_forbidden = true; + } } cpus_have_security_disabled = ds; - cpus_have_group0 = gic_has_group0(); /* * How priority values are used by the GIC depends on two things: @@ -209,7 +218,7 @@ static void __init gic_prio_init(void) * be in the non-secure range, we program the non-secure values into * the distributor to match the PMR values we want. */ - if (cpus_have_group0 & !cpus_have_security_disabled) { + if (cpus_have_group0 && !cpus_have_security_disabled) { dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq); dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi); } @@ -1922,6 +1931,18 @@ static bool gic_enable_quirk_arm64_2941627(void *data) return true; } +static bool gic_enable_quirk_rk3399(void *data) +{ + struct gic_chip_data *d = data; + + if (of_machine_is_compatible("rockchip,rk3399")) { + d->flags |= FLAGS_WORKAROUND_INSECURE; + return true; + } + + return false; +} + static bool rd_set_non_coherent(void *data) { struct gic_chip_data *d = data; @@ -1996,6 +2017,12 @@ static const struct gic_quirk gic_quirks[] = { .property = "dma-noncoherent", .init = rd_set_non_coherent, }, + { + .desc = "GICv3: Insecure RK3399 integration", + .iidr = 0x0000043b, + .mask = 0xff000fff, + .init = gic_enable_quirk_rk3399, + }, { } }; @@ -2004,7 +2031,7 @@ static void gic_enable_nmi_support(void) { int i; - if (!gic_prio_masking_enabled()) + if (!gic_prio_masking_enabled() || nmi_support_forbidden) return; rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR, diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c index b9dcc8e78c750..1f613eb7b7f03 100644 --- a/drivers/irqchip/irq-jcore-aic.c +++ b/drivers/irqchip/irq-jcore-aic.c @@ -38,7 +38,7 @@ static struct irq_chip jcore_aic; static void handle_jcore_irq(struct irq_desc *desc) { if (irqd_is_per_cpu(irq_desc_get_irq_data(desc))) - handle_percpu_irq(desc); + handle_percpu_devid_irq(desc); else handle_simple_irq(desc); } diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c index 74b2f124116e3..52d77546aacb9 100644 --- a/drivers/irqchip/qcom-pdc.c +++ b/drivers/irqchip/qcom-pdc.c @@ -21,9 +21,11 @@ #include #define PDC_MAX_GPIO_IRQS 256 +#define PDC_DRV_OFFSET 0x10000 /* Valid only on HW version < 3.2 */ #define IRQ_ENABLE_BANK 0x10 +#define IRQ_ENABLE_BANK_MAX (IRQ_ENABLE_BANK + BITS_TO_BYTES(PDC_MAX_GPIO_IRQS)) #define IRQ_i_CFG 0x110 /* Valid only on HW version >= 3.2 */ @@ -46,13 +48,20 @@ struct pdc_pin_region { static DEFINE_RAW_SPINLOCK(pdc_lock); static void __iomem *pdc_base; +static void __iomem *pdc_prev_base; static struct pdc_pin_region *pdc_region; static int pdc_region_cnt; static unsigned int pdc_version; +static bool pdc_x1e_quirk; + +static void pdc_base_reg_write(void __iomem *base, int reg, u32 i, u32 val) +{ + writel_relaxed(val, base + reg + i * sizeof(u32)); +} static void pdc_reg_write(int reg, u32 i, u32 val) { - writel_relaxed(val, pdc_base + reg + i * sizeof(u32)); + pdc_base_reg_write(pdc_base, reg, i, val); } static u32 pdc_reg_read(int reg, u32 i) @@ -60,6 +69,34 @@ static u32 pdc_reg_read(int reg, u32 i) return readl_relaxed(pdc_base + reg + i * sizeof(u32)); } +static void pdc_x1e_irq_enable_write(u32 bank, u32 enable) +{ + void __iomem *base; + + /* Remap the write access to work around a hardware bug on X1E */ + switch (bank) { + case 0 ... 1: + /* Use previous DRV (client) region and shift to bank 3-4 */ + base = pdc_prev_base; + bank += 3; + break; + case 2 ... 4: + /* Use our own region and shift to bank 0-2 */ + base = pdc_base; + bank -= 2; + break; + case 5: + /* No fixup required for bank 5 */ + base = pdc_base; + break; + default: + WARN_ON(1); + return; + } + + pdc_base_reg_write(base, IRQ_ENABLE_BANK, bank, enable); +} + static void __pdc_enable_intr(int pin_out, bool on) { unsigned long enable; @@ -72,7 +109,11 @@ static void __pdc_enable_intr(int pin_out, bool on) enable = pdc_reg_read(IRQ_ENABLE_BANK, index); __assign_bit(mask, &enable, on); - pdc_reg_write(IRQ_ENABLE_BANK, index, enable); + + if (pdc_x1e_quirk) + pdc_x1e_irq_enable_write(index, enable); + else + pdc_reg_write(IRQ_ENABLE_BANK, index, enable); } else { enable = pdc_reg_read(IRQ_i_CFG, pin_out); __assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on); @@ -324,10 +365,29 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent) if (res_size > resource_size(&res)) pr_warn("%pOF: invalid reg size, please fix DT\n", node); + /* + * PDC has multiple DRV regions, each one provides the same set of + * registers for a particular client in the system. Due to a hardware + * bug on X1E, some writes to the IRQ_ENABLE_BANK register must be + * issued inside the previous region. This region belongs to + * a different client and is not described in the device tree. Map the + * region with the expected offset to preserve support for old DTs. + */ + if (of_device_is_compatible(node, "qcom,x1e80100-pdc")) { + pdc_prev_base = ioremap(res.start - PDC_DRV_OFFSET, IRQ_ENABLE_BANK_MAX); + if (!pdc_prev_base) { + pr_err("%pOF: unable to map previous PDC DRV region\n", node); + return -ENXIO; + } + + pdc_x1e_quirk = true; + } + pdc_base = ioremap(res.start, res_size); if (!pdc_base) { pr_err("%pOF: unable to map PDC registers\n", node); - return -ENXIO; + ret = -ENXIO; + goto fail; } pdc_version = pdc_reg_read(PDC_VERSION_REG, 0); @@ -363,6 +423,7 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent) fail: kfree(pdc_region); iounmap(pdc_base); + iounmap(pdc_prev_base); return ret; } diff --git a/drivers/leds/leds-st1202.c b/drivers/leds/leds-st1202.c index b691c4886993f..e894b3f9a0f46 100644 --- a/drivers/leds/leds-st1202.c +++ b/drivers/leds/leds-st1202.c @@ -261,8 +261,6 @@ static int st1202_dt_init(struct st1202_chip *chip) int err, reg; for_each_available_child_of_node_scoped(dev_of_node(dev), child) { - struct led_init_data init_data = {}; - err = of_property_read_u32(child, "reg", ®); if (err) return dev_err_probe(dev, err, "Invalid register\n"); @@ -276,15 +274,6 @@ static int st1202_dt_init(struct st1202_chip *chip) led->led_cdev.pattern_set = st1202_led_pattern_set; led->led_cdev.pattern_clear = st1202_led_pattern_clear; led->led_cdev.default_trigger = "pattern"; - - init_data.fwnode = led->fwnode; - init_data.devicename = "st1202"; - init_data.default_label = ":"; - - err = devm_led_classdev_register_ext(dev, &led->led_cdev, &init_data); - if (err < 0) - return dev_err_probe(dev, err, "Failed to register LED class device\n"); - led->led_cdev.brightness_set = st1202_brightness_set; led->led_cdev.brightness_get = st1202_brightness_get; } @@ -368,6 +357,7 @@ static int st1202_probe(struct i2c_client *client) return ret; for (int i = 0; i < ST1202_MAX_LEDS; i++) { + struct led_init_data init_data = {}; led = &chip->leds[i]; led->chip = chip; led->led_num = i; @@ -384,6 +374,15 @@ static int st1202_probe(struct i2c_client *client) if (ret < 0) return dev_err_probe(&client->dev, ret, "Failed to clear LED pattern\n"); + + init_data.fwnode = led->fwnode; + init_data.devicename = "st1202"; + init_data.default_label = ":"; + + ret = devm_led_classdev_register_ext(&client->dev, &led->led_cdev, &init_data); + if (ret < 0) + return dev_err_probe(&client->dev, ret, + "Failed to register LED class device\n"); } return 0; diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 731467d4ed101..b690905ab89ff 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -426,7 +426,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b if (!clone) return NULL; - bio_init(clone, fc->dev->bdev, bio->bi_inline_vecs, nr_iovecs, bio->bi_opf); + bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf); clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector); clone->bi_private = bio; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index ee9f7cecd78e0..c45464b6576aa 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -3790,20 +3790,18 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, break; case STATUSTYPE_TABLE: { - __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; - - watermark_percentage += ic->journal_entries / 2; - do_div(watermark_percentage, ic->journal_entries); - arg_count = 3; + arg_count = 1; /* buffer_sectors */ arg_count += !!ic->meta_dev; arg_count += ic->sectors_per_block != 1; arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); arg_count += ic->reset_recalculate_flag; arg_count += ic->discard; - arg_count += ic->mode == 'J'; - arg_count += ic->mode == 'J'; - arg_count += ic->mode == 'B'; - arg_count += ic->mode == 'B'; + arg_count += ic->mode != 'I'; /* interleave_sectors */ + arg_count += ic->mode == 'J'; /* journal_sectors */ + arg_count += ic->mode == 'J'; /* journal_watermark */ + arg_count += ic->mode == 'J'; /* commit_time */ + arg_count += ic->mode == 'B'; /* sectors_per_bit */ + arg_count += ic->mode == 'B'; /* bitmap_flush_interval */ arg_count += !!ic->internal_hash_alg.alg_string; arg_count += !!ic->journal_crypt_alg.alg_string; arg_count += !!ic->journal_mac_alg.alg_string; @@ -3822,10 +3820,15 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, DMEMIT(" reset_recalculate"); if (ic->discard) DMEMIT(" allow_discards"); - DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); - DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); + if (ic->mode != 'I') + DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); if (ic->mode == 'J') { + __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; + + watermark_percentage += ic->journal_entries / 2; + do_div(watermark_percentage, ic->journal_entries); + DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage); DMEMIT(" commit_time:%u", ic->autocommit_msec); } diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index b6f8e2dc7729f..3f3d29af1be47 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -2178,6 +2178,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones) vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval); vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval); + spin_lock_init(&zones->lock); /* * Since we will save up the timeouts that would have been reported but were ratelimited, diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 8fc9339b00c72..70bcc3cdf2cd1 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -386,10 +386,8 @@ static int raid0_set_limits(struct mddev *mddev) lim.io_opt = lim.io_min * mddev->raid_disks; lim.features |= BLK_FEAT_ATOMIC_WRITES; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); - if (err) { - queue_limits_cancel_update(mddev->gendisk->queue); + if (err) return err; - } return queue_limits_set(mddev->gendisk->queue, &lim); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 9d57a88dbd261..10ea3af40991d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3219,10 +3219,8 @@ static int raid1_set_limits(struct mddev *mddev) lim.max_write_zeroes_sectors = 0; lim.features |= BLK_FEAT_ATOMIC_WRITES; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); - if (err) { - queue_limits_cancel_update(mddev->gendisk->queue); + if (err) return err; - } return queue_limits_set(mddev->gendisk->queue, &lim); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index efe93b9791677..15b9ae5bf84d8 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -4020,10 +4020,8 @@ static int raid10_set_queue_limits(struct mddev *mddev) lim.io_opt = lim.io_min * raid10_nr_stripes(conf); lim.features |= BLK_FEAT_ATOMIC_WRITES; err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY); - if (err) { - queue_limits_cancel_update(mddev->gendisk->queue); + if (err) return err; - } return queue_limits_set(mddev->gendisk->queue, &lim); } diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c index 05254d8717db8..0357624968f1b 100644 --- a/drivers/media/dvb-frontends/rtl2832_sdr.c +++ b/drivers/media/dvb-frontends/rtl2832_sdr.c @@ -1363,6 +1363,7 @@ static int rtl2832_sdr_probe(struct platform_device *pdev) dev->vb_queue.ops = &rtl2832_sdr_vb2_ops; dev->vb_queue.mem_ops = &vb2_vmalloc_memops; dev->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + dev->vb_queue.lock = &dev->vb_queue_lock; ret = vb2_queue_init(&dev->vb_queue); if (ret) { dev_err(&pdev->dev, "Could not initialize vb2 queue\n"); @@ -1421,7 +1422,6 @@ static int rtl2832_sdr_probe(struct platform_device *pdev) /* Init video_device structure */ dev->vdev = rtl2832_sdr_template; dev->vdev.queue = &dev->vb_queue; - dev->vdev.queue->lock = &dev->vb_queue_lock; video_set_drvdata(&dev->vdev, dev); /* Register the v4l2_device structure */ diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index e2a75a52563f5..53f1888cc84f6 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -2226,26 +2226,6 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, goto err; } - if (of_node_name_eq(child, "nand")) { - /* Warn about older DT blobs with no compatible property */ - if (!of_property_read_bool(child, "compatible")) { - dev_warn(&pdev->dev, - "Incompatible NAND node: missing compatible"); - ret = -EINVAL; - goto err; - } - } - - if (of_node_name_eq(child, "onenand")) { - /* Warn about older DT blobs with no compatible property */ - if (!of_property_read_bool(child, "compatible")) { - dev_warn(&pdev->dev, - "Incompatible OneNAND node: missing compatible"); - ret = -EINVAL; - goto err; - } - } - if (of_match_node(omap_nand_ids, child)) { /* NAND specific setup */ val = 8; diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c index e0174da5e9fc3..77b0490a1b38d 100644 --- a/drivers/misc/cardreader/rtsx_usb.c +++ b/drivers/misc/cardreader/rtsx_usb.c @@ -286,7 +286,6 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status) int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) { int ret; - u8 interrupt_val = 0; u16 *buf; if (!status) @@ -309,20 +308,6 @@ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status) ret = rtsx_usb_get_status_with_bulk(ucr, status); } - rtsx_usb_read_register(ucr, CARD_INT_PEND, &interrupt_val); - /* Cross check presence with interrupts */ - if (*status & XD_CD) - if (!(interrupt_val & XD_INT)) - *status &= ~XD_CD; - - if (*status & SD_CD) - if (!(interrupt_val & SD_INT)) - *status &= ~SD_CD; - - if (*status & MS_CD) - if (!(interrupt_val & MS_INT)) - *status &= ~MS_CD; - /* usb_control_msg may return positive when success */ if (ret < 0) return ret; diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c index 88888485e6f8e..ee58f7ce5bfa9 100644 --- a/drivers/misc/eeprom/digsy_mtc_eeprom.c +++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c @@ -50,7 +50,7 @@ static struct platform_device digsy_mtc_eeprom = { }; static struct gpiod_lookup_table eeprom_spi_gpiod_table = { - .dev_id = "spi_gpio", + .dev_id = "spi_gpio.1", .table = { GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK, "sck", GPIO_ACTIVE_HIGH), diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index c3a6657dcd4a2..a5f88ec97df75 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -117,6 +117,8 @@ #define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */ +#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 6589635f8ba32..d6ff9d82ae94b 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -124,6 +124,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c index 35d349fee7698..7be1649b19725 100644 --- a/drivers/misc/mei/vsc-tp.c +++ b/drivers/misc/mei/vsc-tp.c @@ -502,7 +502,7 @@ static int vsc_tp_probe(struct spi_device *spi) if (ret) return ret; - tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN); + tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN); if (IS_ERR(tp->wakeuphost)) return PTR_ERR(tp->wakeuphost); diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c index 055395cde42b6..999026a1ae048 100644 --- a/drivers/misc/ntsync.c +++ b/drivers/misc/ntsync.c @@ -873,6 +873,7 @@ static int setup_wait(struct ntsync_device *dev, { int fds[NTSYNC_MAX_WAIT_COUNT + 1]; const __u32 count = args->count; + size_t size = array_size(count, sizeof(fds[0])); struct ntsync_q *q; __u32 total_count; __u32 i, j; @@ -880,15 +881,14 @@ static int setup_wait(struct ntsync_device *dev, if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME)) return -EINVAL; - if (args->count > NTSYNC_MAX_WAIT_COUNT) + if (size >= sizeof(fds)) return -EINVAL; total_count = count; if (args->alert) total_count++; - if (copy_from_user(fds, u64_to_user_ptr(args->objs), - array_size(count, sizeof(*fds)))) + if (copy_from_user(fds, u64_to_user_ptr(args->objs), size)) return -EFAULT; if (args->alert) fds[count] = args->alert; @@ -1208,6 +1208,7 @@ static struct miscdevice ntsync_misc = { .minor = MISC_DYNAMIC_MINOR, .name = NTSYNC_NAME, .fops = &ntsync_fops, + .mode = 0666, }; module_misc_device(ntsync_misc); diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index fc360902729db..24fffc702a948 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2499,8 +2499,10 @@ static int atmci_probe(struct platform_device *pdev) /* Get MCI capabilities and set operations according to it */ atmci_get_cap(host); ret = atmci_configure_dma(host); - if (ret == -EPROBE_DEFER) + if (ret == -EPROBE_DEFER) { + clk_disable_unprepare(host->mck); goto err_dma_probe_defer; + } if (ret == 0) { host->prepare_data = &atmci_prepare_data_dma; host->submit_data = &atmci_submit_data_dma; diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 0ef4d578ade84..48cdcba0f39cc 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -503,8 +503,15 @@ static int sdhci_brcmstb_suspend(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; clk_disable_unprepare(priv->base_clk); + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + return sdhci_pltfm_suspend(dev); } @@ -529,6 +536,9 @@ static int sdhci_brcmstb_resume(struct device *dev) ret = clk_set_rate(priv->base_clk, priv->base_freq_hz); } + if (host->mmc->caps2 & MMC_CAP2_CQE) + ret = cqhci_resume(host->mmc); + return ret; } #endif diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index 8d1d710e439dd..6667eea955977 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -471,6 +471,8 @@ struct cdns_nand_ctrl { struct { void __iomem *virt; dma_addr_t dma; + dma_addr_t iova_dma; + u32 size; } io; int irq; @@ -1835,11 +1837,11 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl, } if (dir == DMA_FROM_DEVICE) { - src_dma = cdns_ctrl->io.dma; + src_dma = cdns_ctrl->io.iova_dma; dst_dma = buf_dma; } else { src_dma = buf_dma; - dst_dma = cdns_ctrl->io.dma; + dst_dma = cdns_ctrl->io.iova_dma; } tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len, @@ -1861,12 +1863,12 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl, dma_async_issue_pending(cdns_ctrl->dmac); wait_for_completion(&finished); - dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); + dma_unmap_single(dma_dev->dev, buf_dma, len, dir); return 0; err_unmap: - dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); + dma_unmap_single(dma_dev->dev, buf_dma, len, dir); err: dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n"); @@ -2869,6 +2871,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl) static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) { dma_cap_mask_t mask; + struct dma_device *dma_dev = cdns_ctrl->dmac->device; int ret; cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev, @@ -2904,15 +2907,24 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) dma_cap_set(DMA_MEMCPY, mask); if (cdns_ctrl->caps1->has_dma) { - cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL); - if (!cdns_ctrl->dmac) { - dev_err(cdns_ctrl->dev, - "Unable to get a DMA channel\n"); - ret = -EBUSY; + cdns_ctrl->dmac = dma_request_chan_by_mask(&mask); + if (IS_ERR(cdns_ctrl->dmac)) { + ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac), + "%d: Failed to get a DMA channel\n", ret); goto disable_irq; } } + cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma, + cdns_ctrl->io.size, + DMA_BIDIRECTIONAL, 0); + + ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma); + if (ret) { + dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n"); + goto dma_release_chnl; + } + nand_controller_init(&cdns_ctrl->controller); INIT_LIST_HEAD(&cdns_ctrl->chips); @@ -2923,18 +2935,22 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) if (ret) { dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n", ret); - goto dma_release_chnl; + goto unmap_dma_resource; } kfree(cdns_ctrl->buf); cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL); if (!cdns_ctrl->buf) { ret = -ENOMEM; - goto dma_release_chnl; + goto unmap_dma_resource; } return 0; +unmap_dma_resource: + dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma, + cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0); + dma_release_chnl: if (cdns_ctrl->dmac) dma_release_channel(cdns_ctrl->dmac); @@ -2956,6 +2972,10 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl) { cadence_nand_chips_cleanup(cdns_ctrl); + if (cdns_ctrl->dmac) + dma_unmap_resource(cdns_ctrl->dmac->device->dev, + cdns_ctrl->io.iova_dma, cdns_ctrl->io.size, + DMA_BIDIRECTIONAL, 0); cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl); kfree(cdns_ctrl->buf); dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc), @@ -3020,7 +3040,9 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev) cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res); if (IS_ERR(cdns_ctrl->io.virt)) return PTR_ERR(cdns_ctrl->io.virt); + cdns_ctrl->io.dma = res->start; + cdns_ctrl->io.size = resource_size(res); dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk"); if (IS_ERR(dt->clk)) diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index d2d2aeee42a7e..6720b547892ba 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -1881,18 +1881,18 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ nandc->regs->addr0 = 0; nandc->regs->addr1 = 0; - host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) | - FIELD_PREP(UD_SIZE_BYTES_MASK, 512) | - FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) | - FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0); - - host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) | - FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) | - FIELD_PREP(CS_ACTIVE_BSY, 0) | - FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) | - FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) | - FIELD_PREP(WIDE_FLASH, 0) | - FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1); + nandc->regs->cfg0 = cpu_to_le32(FIELD_PREP(CW_PER_PAGE_MASK, 0) | + FIELD_PREP(UD_SIZE_BYTES_MASK, 512) | + FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) | + FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0)); + + nandc->regs->cfg1 = cpu_to_le32(FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) | + FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) | + FIELD_PREP(CS_ACTIVE_BSY, 0) | + FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) | + FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) | + FIELD_PREP(WIDE_FLASH, 0) | + FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1)); if (!nandc->props->qpic_version2) nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE); diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c index b5ad7118c49a2..175211fe6a5ed 100644 --- a/drivers/mtd/spi-nor/sst.c +++ b/drivers/mtd/spi-nor/sst.c @@ -174,7 +174,7 @@ static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, int ret; nor->program_opcode = op; - ret = spi_nor_write_data(nor, to, 1, buf); + ret = spi_nor_write_data(nor, to, len, buf); if (ret < 0) return ret; WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 1fd5acdc73c6a..b29628d46be9b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -115,6 +115,21 @@ config WIREGUARD_DEBUG Say N here unless you know what you're doing. +config OVPN + tristate "OpenVPN data channel offload" + depends on NET && INET + depends on IPV6 || !IPV6 + select DST_CACHE + select NET_UDP_TUNNEL + select CRYPTO + select CRYPTO_AES + select CRYPTO_GCM + select CRYPTO_CHACHA20POLY1305 + select STREAM_PARSER + help + This module enhances the performance of the OpenVPN userspace software + by offloading the data channel processing to kernelspace. + config EQUALIZER tristate "EQL (serial line load balancing) support" help @@ -518,30 +533,6 @@ source "drivers/net/hippi/Kconfig" source "drivers/net/ipa/Kconfig" -config NET_SB1000 - tristate "General Instruments Surfboard 1000" - depends on ISA && PNP - help - This is a driver for the General Instrument (also known as - NextLevel) SURFboard 1000 internal - cable modem. This is an ISA card which is used by a number of cable - TV companies to provide cable modem access. It's a one-way - downstream-only cable modem, meaning that your upstream net link is - provided by your regular phone modem. - - At present this driver only compiles as a module, so say M here if - you have this card. The module will be called sb1000. Then read - for - information on how to use this module, as it needs special ppp - scripts for establishing a connection. Further documentation - and the necessary scripts can be found at: - - - - - - If you don't have this card, of course say N. - source "drivers/net/phy/Kconfig" source "drivers/net/pse-pd/Kconfig" diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 13743d0e83b5f..73bc63ecd65ff 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/ obj-$(CONFIG_IPVTAP) += ipvlan/ obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_WIREGUARD) += wireguard/ +obj-$(CONFIG_OVPN) += ovpn/ obj-$(CONFIG_EQUALIZER) += eql.o obj-$(CONFIG_IFB) += ifb.o obj-$(CONFIG_MACSEC) += macsec.o @@ -69,7 +70,6 @@ obj-$(CONFIG_PPPOL2TP) += ppp/ obj-$(CONFIG_PPTP) += ppp/ obj-$(CONFIG_SLIP) += slip/ obj-$(CONFIG_SLHC) += slip/ -obj-$(CONFIG_NET_SB1000) += sb1000.o obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o obj-$(CONFIG_WAN) += wan/ obj-$(CONFIG_WLAN) += wireless/ diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 98c6205ed19f1..734a0b3242a9b 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -3099,7 +3099,7 @@ static void amt_link_setup(struct net_device *dev) dev->addr_len = 0; dev->priv_flags |= IFF_NO_QUEUE; dev->lltx = true; - dev->netns_local = true; + dev->netns_immutable = true; dev->features |= NETIF_F_GSO_SOFTWARE; dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM; @@ -3161,14 +3161,17 @@ static int amt_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } -static int amt_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int amt_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); struct amt_dev *amt = netdev_priv(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; int err = -EINVAL; - amt->net = net; + amt->net = link_net; amt->mode = nla_get_u32(data[IFLA_AMT_MODE]); if (data[IFLA_AMT_MAX_TUNNELS] && @@ -3183,7 +3186,7 @@ static int amt_newlink(struct net *net, struct net_device *dev, amt->hash_buckets = AMT_HSIZE; amt->nr_tunnels = 0; get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed)); - amt->stream_dev = dev_get_by_index(net, + amt->stream_dev = dev_get_by_index(link_net, nla_get_u32(data[IFLA_AMT_LINK])); if (!amt->stream_dev) { NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK], diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 70814303aab86..d1473c5f8eefc 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -698,10 +698,13 @@ static void bareudp_dellink(struct net_device *dev, struct list_head *head) unregister_netdevice_queue(dev, head); } -static int bareudp_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int bareudp_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct bareudp_conf conf; int err; @@ -709,7 +712,7 @@ static int bareudp_newlink(struct net *net, struct net_device *dev, if (err) return err; - err = bareudp_configure(net, dev, &conf, extack); + err = bareudp_configure(link_net, dev, &conf, extack); if (err) return err; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e45bba240cbcd..da91aa36a9a01 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -90,6 +90,7 @@ #include #endif #include +#include #include #include "bonding_priv.h" @@ -322,9 +323,9 @@ static bool bond_sk_check(struct bonding *bond) } } -static bool bond_xdp_check(struct bonding *bond) +bool bond_xdp_check(struct bonding *bond, int mode) { - switch (BOND_MODE(bond)) { + switch (mode) { case BOND_MODE_ROUNDROBIN: case BOND_MODE_ACTIVEBACKUP: return true; @@ -432,9 +433,6 @@ static struct net_device *bond_ipsec_dev(struct xfrm_state *xs) struct bonding *bond; struct slave *slave; - if (!bond_dev) - return NULL; - bond = netdev_priv(bond_dev); if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) return NULL; @@ -676,22 +674,16 @@ static void bond_ipsec_free_sa(struct xfrm_state *xs) static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { struct net_device *real_dev; - bool ok = false; rcu_read_lock(); real_dev = bond_ipsec_dev(xs); - if (!real_dev) - goto out; - - if (!real_dev->xfrmdev_ops || - !real_dev->xfrmdev_ops->xdo_dev_offload_ok || - netif_is_bond_master(real_dev)) - goto out; + if (!real_dev || netif_is_bond_master(real_dev)) { + rcu_read_unlock(); + return false; + } - ok = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); -out: rcu_read_unlock(); - return ok; + return true; } /** @@ -858,7 +850,6 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_dev, int reporting) { const struct net_device_ops *slave_ops = slave_dev->netdev_ops; - int (*ioctl)(struct net_device *, struct ifreq *, int); struct ifreq ifr; struct mii_ioctl_data *mii; @@ -874,8 +865,7 @@ static int bond_check_dev_link(struct bonding *bond, BMSR_LSTATUS : 0; /* Ethtool can't be used, fallback to MII ioctls. */ - ioctl = slave_ops->ndo_eth_ioctl; - if (ioctl) { + if (slave_ops->ndo_eth_ioctl) { /* TODO: set pointer to correct ioctl on a per team member * bases to make this more efficient. that is, once * we determine the correct ioctl, we will always @@ -891,9 +881,10 @@ static int bond_check_dev_link(struct bonding *bond, /* Yes, the mii is overlaid on the ifreq.ifr_ifru */ strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ); mii = if_mii(&ifr); - if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) { + + if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) { mii->reg_num = MII_BMSR; - if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0) + if (dev_eth_ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0) return mii->val_out & BMSR_LSTATUS; } } @@ -1107,8 +1098,13 @@ static void bond_do_fail_over_mac(struct bonding *bond, old_active = bond_get_old_active(bond, new_active); if (old_active) { - bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr, - new_active->dev->addr_len); + if (bond_hw_addr_equal(old_active->dev->dev_addr, new_active->dev->dev_addr, + new_active->dev->addr_len)) + bond_hw_addr_copy(tmp_mac, new_active->perm_hwaddr, + new_active->dev->addr_len); + else + bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr, + new_active->dev->addr_len); bond_hw_addr_copy(ss.__data, old_active->dev->dev_addr, old_active->dev->addr_len); @@ -1937,7 +1933,7 @@ void bond_xdp_set_features(struct net_device *bond_dev) ASSERT_RTNL(); - if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) { + if (!bond_xdp_check(bond, BOND_MODE(bond)) || !bond_has_slaves(bond)) { xdp_clear_features_flag(bond_dev); return; } @@ -2551,7 +2547,7 @@ static int __bond_release_one(struct net_device *bond_dev, RCU_INIT_POINTER(bond->current_arp_slave, NULL); - if (!all && (!bond->params.fail_over_mac || + if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE || BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && bond_has_slaves(bond)) @@ -2648,10 +2644,13 @@ static int __bond_release_one(struct net_device *bond_dev, dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL); } - if (unregister) + if (unregister) { + netdev_lock_ops(slave_dev); __dev_set_mtu(slave_dev, slave->original_mtu); - else + netdev_unlock_ops(slave_dev); + } else { dev_set_mtu(slave_dev, slave->original_mtu); + } if (!netif_is_bond_master(slave_dev)) slave_dev->priv_flags &= ~IFF_BONDING; @@ -4217,7 +4216,7 @@ static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void * } if (l34 && *ip_proto >= 0) - fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen); + fk->ports.ports = skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen); return true; } @@ -5699,7 +5698,7 @@ static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog, ASSERT_RTNL(); - if (!bond_xdp_check(bond)) { + if (!bond_xdp_check(bond, BOND_MODE(bond))) { BOND_NL_ERR(dev, extack, "No native XDP support for the current bonding mode"); return -EOPNOTSUPP; @@ -6028,7 +6027,7 @@ void bond_setup(struct net_device *bond_dev) bond_dev->lltx = true; /* Don't allow bond devices to change network namespaces. */ - bond_dev->netns_local = true; + bond_dev->netns_immutable = true; /* By default, we declare the bond to be fully * VLAN hardware accelerated capable. Special diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c index 2a6a424806aa6..ac5e402c34bc6 100644 --- a/drivers/net/bonding/bond_netlink.c +++ b/drivers/net/bonding/bond_netlink.c @@ -564,10 +564,12 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[], return 0; } -static int bond_newlink(struct net *src_net, struct net_device *bond_dev, - struct nlattr *tb[], struct nlattr *data[], +static int bond_newlink(struct net_device *bond_dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; int err; err = bond_changelink(bond_dev, tb, data, extack); diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 327b6ecdc77e0..91893c29b8995 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -868,6 +868,9 @@ static bool bond_set_xfrm_features(struct bonding *bond) static int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval) { + if (bond->xdp_prog && !bond_xdp_check(bond, newval->value)) + return -EOPNOTSUPP; + if (!bond_mode_uses_arp(newval->value)) { if (bond->params.arp_interval) { netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", @@ -1242,10 +1245,28 @@ static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *sla slave->dev->flags & IFF_MULTICAST; } +/** + * slave_set_ns_maddrs - add/del all NS mac addresses for slave + * @bond: bond device + * @slave: slave device + * @add: add or remove all the NS mac addresses + * + * This function tries to add or delete all the NS mac addresses on the slave + * + * Note, the IPv6 NS target address is the unicast address in Neighbor + * Solicitation (NS) message. The dest address of NS message should be + * solicited-node multicast address of the target. The dest mac of NS message + * is converted from the solicited-node multicast address. + * + * This function is called when + * * arp_validate changes + * * enslaving, releasing new slaves + */ static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add) { struct in6_addr *targets = bond->params.ns_targets; char slot_maddr[MAX_ADDR_LEN]; + struct in6_addr mcaddr; int i; if (!slave_can_set_ns_maddr(bond, slave)) @@ -1255,7 +1276,8 @@ static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool if (ipv6_addr_any(&targets[i])) break; - if (!ndisc_mc_map(&targets[i], slot_maddr, slave->dev, 0)) { + addrconf_addr_solict_mult(&targets[i], &mcaddr); + if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) { if (add) dev_mc_add(slave->dev, slot_maddr); else @@ -1278,23 +1300,43 @@ void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) slave_set_ns_maddrs(bond, slave, false); } +/** + * slave_set_ns_maddr - set new NS mac address for slave + * @bond: bond device + * @slave: slave device + * @target: the new IPv6 target + * @slot: the old IPv6 target in the slot + * + * This function tries to replace the old mac address to new one on the slave. + * + * Note, the target/slot IPv6 address is the unicast address in Neighbor + * Solicitation (NS) message. The dest address of NS message should be + * solicited-node multicast address of the target. The dest mac of NS message + * is converted from the solicited-node multicast address. + * + * This function is called when + * * An IPv6 NS target is added or removed. + */ static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave, struct in6_addr *target, struct in6_addr *slot) { - char target_maddr[MAX_ADDR_LEN], slot_maddr[MAX_ADDR_LEN]; + char mac_addr[MAX_ADDR_LEN]; + struct in6_addr mcast_addr; if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave)) return; - /* remove the previous maddr from slave */ + /* remove the previous mac addr from slave */ + addrconf_addr_solict_mult(slot, &mcast_addr); if (!ipv6_addr_any(slot) && - !ndisc_mc_map(slot, slot_maddr, slave->dev, 0)) - dev_mc_del(slave->dev, slot_maddr); + !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0)) + dev_mc_del(slave->dev, mac_addr); - /* add new maddr on slave if target is set */ + /* add new mac addr on slave if target is set */ + addrconf_addr_solict_mult(target, &mcast_addr); if (!ipv6_addr_any(target) && - !ndisc_mc_map(target, target_maddr, slave->dev, 0)) - dev_mc_add(slave->dev, target_maddr); + !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0)) + dev_mc_add(slave->dev, mac_addr); } static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot, diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index ed3a589def6b1..90ea3dc0fb109 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -126,15 +126,6 @@ static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size) ser->rx_blob.data = ser->rx_data; ser->rx_blob.size = size; } - -static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size) -{ - if (size > sizeof(ser->tx_data)) - size = sizeof(ser->tx_data); - memcpy(ser->tx_data, data, size); - ser->tx_blob.data = ser->tx_data; - ser->tx_blob.size = size; -} #else static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty) { @@ -151,11 +142,6 @@ static inline void update_tty_status(struct ser_device *ser) static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size) { } - -static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size) -{ -} - #endif static void ldisc_receive(struct tty_struct *tty, const u8 *data, diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 7fea00c7ca8a6..c60386bf2d1a4 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -745,7 +745,7 @@ static int cfv_probe(struct virtio_device *vdev) if (cfv->vr_rx) vdev->vringh_config->del_vrhs(cfv->vdev); - if (cfv->vdev) + if (cfv->vq_tx) vdev->config->del_vqs(cfv->vdev); free_netdev(netdev); return err; diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 399844809bbea..19c86b94a40e4 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -269,30 +269,22 @@ static int c_can_plat_probe(struct platform_device *pdev) /* get the appropriate clk */ clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - goto exit; - } + if (IS_ERR(clk)) + return PTR_ERR(clk); /* get the platform data */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - ret = irq; - goto exit; - } + if (irq < 0) + return irq; addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); - if (IS_ERR(addr)) { - ret = PTR_ERR(addr); - goto exit; - } + if (IS_ERR(addr)) + return PTR_ERR(addr); /* allocate the c_can device */ dev = alloc_c_can_dev(drvdata->msg_obj_num); - if (!dev) { - ret = -ENOMEM; - goto exit; - } + if (!dev) + return -ENOMEM; priv = netdev_priv(dev); switch (drvdata->id) { @@ -324,33 +316,22 @@ static int c_can_plat_probe(struct platform_device *pdev) /* Check if we need custom RAMINIT via syscon. Mostly for TI * platforms. Only supported with DT boot. */ - if (np && of_property_read_bool(np, "syscon-raminit")) { + if (np && of_property_present(np, "syscon-raminit")) { + unsigned int args[2]; u32 id; struct c_can_raminit *raminit = &priv->raminit_sys; ret = -EINVAL; - raminit->syscon = syscon_regmap_lookup_by_phandle(np, - "syscon-raminit"); + raminit->syscon = syscon_regmap_lookup_by_phandle_args(np, + "syscon-raminit", + 2, args); if (IS_ERR(raminit->syscon)) { - /* can fail with -EPROBE_DEFER */ ret = PTR_ERR(raminit->syscon); - free_c_can_dev(dev); - return ret; - } - - if (of_property_read_u32_index(np, "syscon-raminit", 1, - &raminit->reg)) { - dev_err(&pdev->dev, - "couldn't get the RAMINIT reg. offset!\n"); goto exit_free_device; } - if (of_property_read_u32_index(np, "syscon-raminit", 2, - &id)) { - dev_err(&pdev->dev, - "couldn't get the CAN instance ID\n"); - goto exit_free_device; - } + raminit->reg = args[0]; + id = args[1]; if (id >= drvdata->raminit_num) { dev_err(&pdev->dev, @@ -396,8 +377,6 @@ static int c_can_plat_probe(struct platform_device *pdev) pm_runtime_disable(priv->device); exit_free_device: free_c_can_dev(dev); -exit: - dev_err(&pdev->dev, "probe failed\n"); return ret; } diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 01aacdcda2606..f1db9b7ffd4d0 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -624,8 +624,8 @@ static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev) return -EMSGSIZE; } -static int can_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int can_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { return -EOPNOTSUPP; diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index ac1a860986df6..6d80c341b26fd 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -386,6 +387,16 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = { FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; +static const struct flexcan_devtype_data nxp_s32g2_devtype_data = { + .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | + FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD | + FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_NR_IRQ_3 | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | + FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | + FLEXCAN_QUIRK_SECONDARY_MB_IRQ, +}; + static const struct can_bittiming_const flexcan_bittiming_const = { .name = DRV_NAME, .tseg1_min = 4, @@ -634,18 +645,22 @@ static void flexcan_clks_disable(const struct flexcan_priv *priv) static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) { - if (!priv->reg_xceiver) - return 0; + if (priv->reg_xceiver) + return regulator_enable(priv->reg_xceiver); + else if (priv->transceiver) + return phy_power_on(priv->transceiver); - return regulator_enable(priv->reg_xceiver); + return 0; } static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) { - if (!priv->reg_xceiver) - return 0; + if (priv->reg_xceiver) + return regulator_disable(priv->reg_xceiver); + else if (priv->transceiver) + return phy_power_off(priv->transceiver); - return regulator_disable(priv->reg_xceiver); + return 0; } static int flexcan_chip_enable(struct flexcan_priv *priv) @@ -1762,14 +1777,25 @@ static int flexcan_open(struct net_device *dev) goto out_free_irq_boff; } + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) { + err = request_irq(priv->irq_secondary_mb, + flexcan_irq, IRQF_SHARED, dev->name, dev); + if (err) + goto out_free_irq_err; + } + flexcan_chip_interrupts_enable(dev); netif_start_queue(dev); return 0; + out_free_irq_err: + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) + free_irq(priv->irq_err, dev); out_free_irq_boff: - free_irq(priv->irq_boff, dev); + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) + free_irq(priv->irq_boff, dev); out_free_irq: free_irq(dev->irq, dev); out_can_rx_offload_disable: @@ -1794,6 +1820,9 @@ static int flexcan_close(struct net_device *dev) netif_stop_queue(dev); flexcan_chip_interrupts_disable(dev); + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) + free_irq(priv->irq_secondary_mb, dev); + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { free_irq(priv->irq_err, dev); free_irq(priv->irq_boff, dev); @@ -2041,6 +2070,7 @@ static const struct of_device_id flexcan_of_match[] = { { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, }, { .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, }, + { .compatible = "nxp,s32g2-flexcan", .data = &nxp_s32g2_devtype_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, flexcan_of_match); @@ -2061,6 +2091,7 @@ static int flexcan_probe(struct platform_device *pdev) struct net_device *dev; struct flexcan_priv *priv; struct regulator *reg_xceiver; + struct phy *transceiver; struct clk *clk_ipg = NULL, *clk_per = NULL; struct flexcan_regs __iomem *regs; struct flexcan_platform_data *pdata; @@ -2076,6 +2107,11 @@ static int flexcan_probe(struct platform_device *pdev) else if (IS_ERR(reg_xceiver)) return PTR_ERR(reg_xceiver); + transceiver = devm_phy_optional_get(&pdev->dev, NULL); + if (IS_ERR(transceiver)) + return dev_err_probe(&pdev->dev, PTR_ERR(transceiver), + "failed to get phy\n"); + if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "clock-frequency", &clock_freq); @@ -2173,6 +2209,10 @@ static int flexcan_probe(struct platform_device *pdev) priv->clk_per = clk_per; priv->clk_src = clk_src; priv->reg_xceiver = reg_xceiver; + priv->transceiver = transceiver; + + if (transceiver) + priv->can.bitrate_max = transceiver->attrs.max_link_rate; if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { priv->irq_boff = platform_get_irq(pdev, 1); @@ -2187,6 +2227,14 @@ static int flexcan_probe(struct platform_device *pdev) } } + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SECONDARY_MB_IRQ) { + priv->irq_secondary_mb = platform_get_irq_byname(pdev, "mb-1"); + if (priv->irq_secondary_mb < 0) { + err = priv->irq_secondary_mb; + goto failed_platform_get_irq; + } + } + if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) { priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO; @@ -2260,14 +2308,19 @@ static int __maybe_unused flexcan_suspend(struct device *device) flexcan_chip_interrupts_disable(dev); + err = flexcan_transceiver_disable(priv); + if (err) + return err; + err = pinctrl_pm_select_sleep_state(device); if (err) return err; } netif_stop_queue(dev); netif_device_detach(dev); + + priv->can.state = CAN_STATE_SLEEPING; } - priv->can.state = CAN_STATE_SLEEPING; return 0; } @@ -2278,7 +2331,6 @@ static int __maybe_unused flexcan_resume(struct device *device) struct flexcan_priv *priv = netdev_priv(dev); int err; - priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); @@ -2292,12 +2344,20 @@ static int __maybe_unused flexcan_resume(struct device *device) if (err) return err; - err = flexcan_chip_start(dev); + err = flexcan_transceiver_enable(priv); if (err) return err; + err = flexcan_chip_start(dev); + if (err) { + flexcan_transceiver_disable(priv); + return err; + } + flexcan_chip_interrupts_enable(dev); } + + priv->can.state = CAN_STATE_ERROR_ACTIVE; } return 0; diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h index 4933d8c7439e6..16692a2502eba 100644 --- a/drivers/net/can/flexcan/flexcan.h +++ b/drivers/net/can/flexcan/flexcan.h @@ -70,6 +70,10 @@ #define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16) /* Setup stop mode with ATF SCMI protocol to support wakeup */ #define FLEXCAN_QUIRK_SETUP_STOP_MODE_SCMI BIT(17) +/* Device has two separate interrupt lines for two mailbox ranges, which + * both need to have an interrupt handler registered. + */ +#define FLEXCAN_QUIRK_SECONDARY_MB_IRQ BIT(18) struct flexcan_devtype_data { u32 quirks; /* quirks needed for different IP cores */ @@ -103,10 +107,12 @@ struct flexcan_priv { struct clk *clk_per; struct flexcan_devtype_data devtype_data; struct regulator *reg_xceiver; + struct phy *transceiver; struct flexcan_stop_mode stm; int irq_boff; int irq_err; + int irq_secondary_mb; /* IPC handle when setup stop mode by System Controller firmware(scfw) */ struct imx_sc_ipc *sc_ipc_handle; diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index df1a5d0b37b22..aa3df0d05b853 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -787,22 +787,14 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv) } static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, - u32 ch) + u32 ch, u32 rule_entry) { - u32 cfg; - int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES; + int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES; + u32 rule_entry_index = rule_entry % 16; u32 ridx = ch + RCANFD_RFFIFO_IDX; - if (ch == 0) { - start = 0; /* Channel 0 always starts from 0th rule */ - } else { - /* Get number of Channel 0 rules and adjust */ - cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch)); - start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg); - } - /* Enable write access to entry */ - page = RCANFD_GAFL_PAGENUM(start); + page = RCANFD_GAFL_PAGENUM(rule_entry); rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR, (RCANFD_GAFLECTR_AFLPN(gpriv, page) | RCANFD_GAFLECTR_AFLDAE)); @@ -818,13 +810,13 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, offset = RCANFD_C_GAFL_OFFSET; /* Accept all IDs */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0); /* IDE or RTR is not considered for matching */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0); /* Any data length accepted */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0); /* Place the msg in corresponding Rx FIFO entry */ - rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start), + rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index), RCANFD_GAFLP1_GAFLFDP(ridx)); /* Disable write access to page */ @@ -1851,6 +1843,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) unsigned long channels_mask = 0; int err, ch_irq, g_irq; int g_err_irq, g_recc_irq; + u32 rule_entry = 0; bool fdmode = true; /* CAN FD only mode - default */ char name[9] = "channelX"; int i; @@ -2023,7 +2016,8 @@ static int rcar_canfd_probe(struct platform_device *pdev) rcar_canfd_configure_tx(gpriv, ch); /* Configure receive rules */ - rcar_canfd_configure_afl_rules(gpriv, ch); + rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry); + rule_entry += RCANFD_CHANNEL_NUMRULES; } /* Configure common interrupts */ diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c index d9a937ba126c3..46201c126703c 100644 --- a/drivers/net/can/rockchip/rockchip_canfd-core.c +++ b/drivers/net/can/rockchip/rockchip_canfd-core.c @@ -236,11 +236,6 @@ static void rkcanfd_chip_fifo_setup(struct rkcanfd_priv *priv) { u32 reg; - /* TXE FIFO */ - reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL); - reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE; - rkcanfd_write(priv, RKCANFD_REG_RX_FIFO_CTRL, reg); - /* RX FIFO */ reg = rkcanfd_read(priv, RKCANFD_REG_RX_FIFO_CTRL); reg |= RKCANFD_REG_RX_FIFO_CTRL_RX_FIFO_ENABLE; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index b6f4de375df75..3ccac6781b983 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -43,6 +43,9 @@ #define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0 #define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30 +#define USB_CANNECTIVITY_VENDOR_ID 0x1209 +#define USB_CANNECTIVITY_PRODUCT_ID 0xca01 + /* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts * for timer overflow (will be after ~71 minutes) */ @@ -1546,6 +1549,8 @@ static const struct usb_device_id gs_usb_table[] = { USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID, USB_XYLANTA_SAINT3_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_CANNECTIVITY_VENDOR_ID, + USB_CANNECTIVITY_PRODUCT_ID, 0) }, {} /* Terminating entry */ }; diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index 39a63b7313a46..07406daf7c88e 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -186,7 +186,7 @@ union ucan_ctl_payload { */ struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version; - u8 raw[128]; + u8 fw_str[128]; } __packed; enum { @@ -424,18 +424,20 @@ static int ucan_ctrl_command_out(struct ucan_priv *up, UCAN_USB_CTL_PIPE_TIMEOUT); } -static int ucan_device_request_in(struct ucan_priv *up, - u8 cmd, u16 subcmd, u16 datalen) +static void ucan_get_fw_str(struct ucan_priv *up, char *fw_str, size_t size) { - return usb_control_msg(up->udev, - usb_rcvctrlpipe(up->udev, 0), - cmd, - USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - subcmd, - 0, - up->ctl_msg_buffer, - datalen, - UCAN_USB_CTL_PIPE_TIMEOUT); + int ret; + + ret = usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0), + UCAN_DEVICE_GET_FW_STRING, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, + 0, 0, fw_str, size - 1, + UCAN_USB_CTL_PIPE_TIMEOUT); + if (ret > 0) + fw_str[ret] = '\0'; + else + strscpy(fw_str, "unknown", size); } /* Parse the device information structure reported by the device and @@ -1314,7 +1316,6 @@ static int ucan_probe(struct usb_interface *intf, u8 in_ep_addr; u8 out_ep_addr; union ucan_ctl_payload *ctl_msg_buffer; - char firmware_str[sizeof(union ucan_ctl_payload) + 1]; udev = interface_to_usbdev(intf); @@ -1527,17 +1528,6 @@ static int ucan_probe(struct usb_interface *intf, */ ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info); - /* just print some device information - if available */ - ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0, - sizeof(union ucan_ctl_payload)); - if (ret > 0) { - /* copy string while ensuring zero termination */ - strscpy(firmware_str, up->ctl_msg_buffer->raw, - sizeof(union ucan_ctl_payload) + 1); - } else { - strcpy(firmware_str, "unknown"); - } - /* device is compatible, reset it */ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); if (ret < 0) @@ -1555,7 +1545,10 @@ static int ucan_probe(struct usb_interface *intf, /* initialisation complete, log device info */ netdev_info(up->netdev, "registered device\n"); - netdev_info(up->netdev, "firmware string: %s\n", firmware_str); + ucan_get_fw_str(up, up->ctl_msg_buffer->fw_str, + sizeof(up->ctl_msg_buffer->fw_str)); + netdev_info(up->netdev, "firmware string: %s\n", + up->ctl_msg_buffer->fw_str); /* success */ return 0; diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index ca88119410852..99a78a7571674 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -172,10 +172,13 @@ static void vxcan_setup(struct net_device *dev) /* forward declaration for rtnl_create_link() */ static struct rtnl_link_ops vxcan_link_ops; -static int vxcan_newlink(struct net *peer_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int vxcan_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *peer_net = rtnl_newlink_peer_net(params); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct vxcan_priv *priv; struct net_device *peer; diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 2d10b4d6cfbbe..bb9812b3b0e82 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -37,6 +37,7 @@ config NET_DSA_LANTIQ_GSWIP config NET_DSA_MT7530 tristate "MediaTek MT7530 and MT7531 Ethernet switch support" select NET_DSA_TAG_MTK + select REGMAP_IRQ imply NET_DSA_MT7530_MDIO imply NET_DSA_MT7530_MMIO help diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 79dc77835681c..61d164ffb3ae9 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -2409,6 +2409,19 @@ static const struct b53_chip_data b53_switch_chips[] = { .jumbo_pm_reg = B53_JUMBO_PORT_MASK, .jumbo_size_reg = B53_JUMBO_MAX_SIZE, }, + { + .chip_id = BCM53101_DEVICE_ID, + .dev_name = "BCM53101", + .vlans = 4096, + .enabled_ports = 0x11f, + .arl_bins = 4, + .arl_buckets = 512, + .vta_regs = B53_VTA_REGS, + .imp_port = 8, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, { .chip_id = BCM53115_DEVICE_ID, .dev_name = "BCM53115", @@ -2789,6 +2802,7 @@ int b53_switch_detect(struct b53_device *dev) return ret; switch (id32) { + case BCM53101_DEVICE_ID: case BCM53115_DEVICE_ID: case BCM53125_DEVICE_ID: case BCM53128_DEVICE_ID: diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c index 31d070bf161a3..43a3b37b731bf 100644 --- a/drivers/net/dsa/b53/b53_mdio.c +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -374,6 +374,7 @@ static void b53_mdio_shutdown(struct mdio_device *mdiodev) static const struct of_device_id b53_of_match[] = { { .compatible = "brcm,bcm5325" }, + { .compatible = "brcm,bcm53101" }, { .compatible = "brcm,bcm53115" }, { .compatible = "brcm,bcm53125" }, { .compatible = "brcm,bcm53128" }, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 9e9b5bc0c5d6a..0166c37a13a7f 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -66,6 +66,7 @@ enum { BCM5395_DEVICE_ID = 0x95, BCM5397_DEVICE_ID = 0x97, BCM5398_DEVICE_ID = 0x98, + BCM53101_DEVICE_ID = 0x53101, BCM53115_DEVICE_ID = 0x53115, BCM53125_DEVICE_ID = 0x53125, BCM53128_DEVICE_ID = 0x53128, @@ -188,6 +189,7 @@ static inline int is531x5(struct b53_device *dev) { return dev->chip_id == BCM53115_DEVICE_ID || dev->chip_id == BCM53125_DEVICE_ID || + dev->chip_id == BCM53101_DEVICE_ID || dev->chip_id == BCM53128_DEVICE_ID || dev->chip_id == BCM53134_DEVICE_ID; } diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c index 4730982b68400..7460122f6abca 100644 --- a/drivers/net/dsa/b53/b53_serdes.c +++ b/drivers/net/dsa/b53/b53_serdes.c @@ -239,7 +239,6 @@ int b53_serdes_init(struct b53_device *dev, int port) pcs->dev = dev; pcs->lane = lane; pcs->pcs.ops = &b53_pcs_ops; - pcs->pcs.neg_mode = true; return 0; } diff --git a/drivers/net/dsa/microchip/ksz8.c b/drivers/net/dsa/microchip/ksz8.c index da7110d675583..be433b4e2b1ca 100644 --- a/drivers/net/dsa/microchip/ksz8.c +++ b/drivers/net/dsa/microchip/ksz8.c @@ -1625,7 +1625,6 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) const u16 *regs = dev->info->regs; struct dsa_switch *ds = dev->ds; const u32 *masks; - int queues; u8 member; masks = dev->info->masks; @@ -1633,15 +1632,7 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port) /* enable broadcast storm limit */ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); - /* For KSZ88x3 enable only one queue by default, otherwise we won't - * be able to get rid of PCP prios on Port 2. - */ - if (ksz_is_ksz88x3(dev)) - queues = 1; - else - queues = dev->info->num_tx_queues; - - ksz8_port_queue_split(dev, port, queues); + ksz8_port_queue_split(dev, port, dev->info->num_tx_queues); /* replace priority */ ksz_port_cfg(dev, port, P_802_1P_CTRL, diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c index 30b4a6186e38f..c3b501997ac94 100644 --- a/drivers/net/dsa/microchip/ksz_dcb.c +++ b/drivers/net/dsa/microchip/ksz_dcb.c @@ -10,7 +10,12 @@ #include "ksz_dcb.h" #include "ksz8.h" -#define KSZ8_REG_PORT_1_CTRL_0 0x10 +/* Port X Control 0 register. + * The datasheet specifies: Port 1 - 0x10, Port 2 - 0x20, Port 3 - 0x30. + * However, the driver uses get_port_addr(), which maps Port 1 to offset 0. + * Therefore, we define the base offset as 0x00 here to align with that logic. + */ +#define KSZ8_REG_PORT_1_CTRL_0 0x00 #define KSZ8_PORT_DIFFSERV_ENABLE BIT(6) #define KSZ8_PORT_802_1P_ENABLE BIT(5) #define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3) @@ -181,49 +186,6 @@ int ksz_port_get_default_prio(struct dsa_switch *ds, int port) return (data & mask) >> shift; } -/** - * ksz88x3_port_set_default_prio_quirks - Quirks for default priority - * @dev: Pointer to the KSZ switch device structure - * @port: Port number for which to set the default priority - * @prio: Priority value to set - * - * This function implements quirks for setting the default priority on KSZ88x3 - * devices. On Port 2, no other priority providers are working - * except of PCP. So, configuring default priority on Port 2 is not possible. - * On Port 1, it is not possible to configure port priority if PCP - * apptrust on Port 2 is disabled. Since we disable multiple queues on the - * switch to disable PCP on Port 2, we need to ensure that the default priority - * configuration on Port 1 is in agreement with the configuration on Port 2. - * - * Return: 0 on success, or a negative error code on failure - */ -static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port, - u8 prio) -{ - if (!prio) - return 0; - - if (port == KSZ_PORT_2) { - dev_err(dev->dev, "Port priority configuration is not working on Port 2\n"); - return -EINVAL; - } else if (port == KSZ_PORT_1) { - u8 port2_data; - int ret; - - ret = ksz_pread8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0, - &port2_data); - if (ret) - return ret; - - if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) { - dev_err(dev->dev, "Not possible to configure port priority on Port 1 if PCP apptrust on Port 2 is disabled\n"); - return -EINVAL; - } - } - - return 0; -} - /** * ksz_port_set_default_prio - Sets the default priority for a port on a KSZ * switch @@ -239,18 +201,12 @@ static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio) { struct ksz_device *dev = ds->priv; - int reg, shift, ret; + int reg, shift; u8 mask; if (prio >= dev->info->num_ipms) return -EINVAL; - if (ksz_is_ksz88x3(dev)) { - ret = ksz88x3_port_set_default_prio_quirks(dev, port, prio); - if (ret) - return ret; - } - ksz_get_default_port_prio_reg(dev, ®, &mask, &shift); return ksz_prmw8(dev, port, reg, mask, (prio << shift) & mask); @@ -518,155 +474,6 @@ static int ksz_port_set_apptrust_validate(struct ksz_device *dev, int port, return -EINVAL; } -/** - * ksz88x3_port1_apptrust_quirk - Quirk for apptrust configuration on Port 1 - * of KSZ88x3 devices - * @dev: Pointer to the KSZ switch device structure - * @port: Port number for which to set the apptrust selectors - * @reg: Register address for the apptrust configuration - * @port1_data: Data to set for the apptrust configuration - * - * This function implements a quirk for apptrust configuration on Port 1 of - * KSZ88x3 devices. It ensures that apptrust configuration on Port 1 is not - * possible if PCP apptrust on Port 2 is disabled. This is because the Port 2 - * seems to be permanently hardwired to PCP classification, so we need to - * do Port 1 configuration always in agreement with Port 2 configuration. - * - * Return: 0 on success, or a negative error code on failure - */ -static int ksz88x3_port1_apptrust_quirk(struct ksz_device *dev, int port, - int reg, u8 port1_data) -{ - u8 port2_data; - int ret; - - /* If no apptrust is requested for Port 1, no need to care about Port 2 - * configuration. - */ - if (!(port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE))) - return 0; - - /* We got request to enable any apptrust on Port 1. To make it possible, - * we need to enable multiple queues on the switch. If we enable - * multiqueue support, PCP classification on Port 2 will be - * automatically activated by HW. - */ - ret = ksz_pread8(dev, KSZ_PORT_2, reg, &port2_data); - if (ret) - return ret; - - /* If KSZ8_PORT_802_1P_ENABLE bit is set on Port 2, the driver showed - * the interest in PCP classification on Port 2. In this case, - * multiqueue support is enabled and we can enable any apptrust on - * Port 1. - * If KSZ8_PORT_802_1P_ENABLE bit is not set on Port 2, the PCP - * classification on Port 2 is still active, but the driver disabled - * multiqueue support and made frame prioritization inactive for - * all ports. In this case, we can't enable any apptrust on Port 1. - */ - if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) { - dev_err(dev->dev, "Not possible to enable any apptrust on Port 1 if PCP apptrust on Port 2 is disabled\n"); - return -EINVAL; - } - - return 0; -} - -/** - * ksz88x3_port2_apptrust_quirk - Quirk for apptrust configuration on Port 2 - * of KSZ88x3 devices - * @dev: Pointer to the KSZ switch device structure - * @port: Port number for which to set the apptrust selectors - * @reg: Register address for the apptrust configuration - * @port2_data: Data to set for the apptrust configuration - * - * This function implements a quirk for apptrust configuration on Port 2 of - * KSZ88x3 devices. It ensures that DSCP apptrust is not working on Port 2 and - * that it is not possible to disable PCP on Port 2. The only way to disable PCP - * on Port 2 is to disable multiple queues on the switch. - * - * Return: 0 on success, or a negative error code on failure - */ -static int ksz88x3_port2_apptrust_quirk(struct ksz_device *dev, int port, - int reg, u8 port2_data) -{ - struct dsa_switch *ds = dev->ds; - u8 port1_data; - int ret; - - /* First validate Port 2 configuration. DiffServ/DSCP is not working - * on this port. - */ - if (port2_data & KSZ8_PORT_DIFFSERV_ENABLE) { - dev_err(dev->dev, "DSCP apptrust is not working on Port 2\n"); - return -EINVAL; - } - - /* If PCP support is requested, we need to enable all queues on the - * switch to make PCP priority working on Port 2. - */ - if (port2_data & KSZ8_PORT_802_1P_ENABLE) - return ksz8_all_queues_split(dev, dev->info->num_tx_queues); - - /* We got request to disable PCP priority on Port 2. - * Now, we need to compare Port 2 configuration with Port 1 - * configuration. - */ - ret = ksz_pread8(dev, KSZ_PORT_1, reg, &port1_data); - if (ret) - return ret; - - /* If Port 1 has any apptrust enabled, we can't disable multiple queues - * on the switch, so we can't disable PCP on Port 2. - */ - if (port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)) { - dev_err(dev->dev, "Not possible to disable PCP on Port 2 if any apptrust is enabled on Port 1\n"); - return -EINVAL; - } - - /* Now we need to ensure that default priority on Port 1 is set to 0 - * otherwise we can't disable multiqueue support on the switch. - */ - ret = ksz_port_get_default_prio(ds, KSZ_PORT_1); - if (ret < 0) { - return ret; - } else if (ret) { - dev_err(dev->dev, "Not possible to disable PCP on Port 2 if non zero default priority is set on Port 1\n"); - return -EINVAL; - } - - /* Port 1 has no apptrust or default priority set and we got request to - * disable PCP on Port 2. We can disable multiqueue support to disable - * PCP on Port 2. - */ - return ksz8_all_queues_split(dev, 1); -} - -/** - * ksz88x3_port_apptrust_quirk - Quirk for apptrust configuration on KSZ88x3 - * devices - * @dev: Pointer to the KSZ switch device structure - * @port: Port number for which to set the apptrust selectors - * @reg: Register address for the apptrust configuration - * @data: Data to set for the apptrust configuration - * - * This function implements a quirk for apptrust configuration on KSZ88x3 - * devices. It ensures that apptrust configuration on Port 1 and - * Port 2 is done in agreement with each other. - * - * Return: 0 on success, or a negative error code on failure - */ -static int ksz88x3_port_apptrust_quirk(struct ksz_device *dev, int port, - int reg, u8 data) -{ - if (port == KSZ_PORT_1) - return ksz88x3_port1_apptrust_quirk(dev, port, reg, data); - else if (port == KSZ_PORT_2) - return ksz88x3_port2_apptrust_quirk(dev, port, reg, data); - - return 0; -} - /** * ksz_port_set_apptrust - Sets the apptrust selectors for a port on a KSZ * switch @@ -707,12 +514,6 @@ int ksz_port_set_apptrust(struct dsa_switch *ds, int port, } } - if (ksz_is_ksz88x3(dev)) { - ret = ksz88x3_port_apptrust_quirk(dev, port, reg, data); - if (ret) - return ret; - } - return ksz_prmw8(dev, port, reg, mask, data); } @@ -799,21 +600,5 @@ int ksz_dcb_init_port(struct ksz_device *dev, int port) */ int ksz_dcb_init(struct ksz_device *dev) { - int ret; - - ret = ksz_init_global_dscp_map(dev); - if (ret) - return ret; - - /* Enable 802.1p priority control on Port 2 during switch initialization. - * This setup is critical for the apptrust functionality on Port 1, which - * relies on the priority settings of Port 2. Note: Port 1 is naturally - * configured before Port 2, necessitating this configuration order. - */ - if (ksz_is_ksz88x3(dev)) - return ksz_prmw8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0, - KSZ8_PORT_802_1P_ENABLE, - KSZ8_PORT_802_1P_ENABLE); - - return 0; + return ksz_init_global_dscp_map(dev); } diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 1c83af805209c..d70399bce5b9e 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -2050,131 +2050,6 @@ mt7530_setup_gpio(struct mt7530_priv *priv) } #endif /* CONFIG_GPIOLIB */ -static irqreturn_t -mt7530_irq_thread_fn(int irq, void *dev_id) -{ - struct mt7530_priv *priv = dev_id; - bool handled = false; - u32 val; - int p; - - mt7530_mutex_lock(priv); - val = mt7530_mii_read(priv, MT7530_SYS_INT_STS); - mt7530_mii_write(priv, MT7530_SYS_INT_STS, val); - mt7530_mutex_unlock(priv); - - for (p = 0; p < MT7530_NUM_PHYS; p++) { - if (BIT(p) & val) { - unsigned int irq; - - irq = irq_find_mapping(priv->irq_domain, p); - handle_nested_irq(irq); - handled = true; - } - } - - return IRQ_RETVAL(handled); -} - -static void -mt7530_irq_mask(struct irq_data *d) -{ - struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); - - priv->irq_enable &= ~BIT(d->hwirq); -} - -static void -mt7530_irq_unmask(struct irq_data *d) -{ - struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); - - priv->irq_enable |= BIT(d->hwirq); -} - -static void -mt7530_irq_bus_lock(struct irq_data *d) -{ - struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); - - mt7530_mutex_lock(priv); -} - -static void -mt7530_irq_bus_sync_unlock(struct irq_data *d) -{ - struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); - - mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); - mt7530_mutex_unlock(priv); -} - -static struct irq_chip mt7530_irq_chip = { - .name = KBUILD_MODNAME, - .irq_mask = mt7530_irq_mask, - .irq_unmask = mt7530_irq_unmask, - .irq_bus_lock = mt7530_irq_bus_lock, - .irq_bus_sync_unlock = mt7530_irq_bus_sync_unlock, -}; - -static int -mt7530_irq_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_data(irq, domain->host_data); - irq_set_chip_and_handler(irq, &mt7530_irq_chip, handle_simple_irq); - irq_set_nested_thread(irq, true); - irq_set_noprobe(irq); - - return 0; -} - -static const struct irq_domain_ops mt7530_irq_domain_ops = { - .map = mt7530_irq_map, - .xlate = irq_domain_xlate_onecell, -}; - -static void -mt7988_irq_mask(struct irq_data *d) -{ - struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); - - priv->irq_enable &= ~BIT(d->hwirq); - mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); -} - -static void -mt7988_irq_unmask(struct irq_data *d) -{ - struct mt7530_priv *priv = irq_data_get_irq_chip_data(d); - - priv->irq_enable |= BIT(d->hwirq); - mt7530_mii_write(priv, MT7530_SYS_INT_EN, priv->irq_enable); -} - -static struct irq_chip mt7988_irq_chip = { - .name = KBUILD_MODNAME, - .irq_mask = mt7988_irq_mask, - .irq_unmask = mt7988_irq_unmask, -}; - -static int -mt7988_irq_map(struct irq_domain *domain, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_set_chip_data(irq, domain->host_data); - irq_set_chip_and_handler(irq, &mt7988_irq_chip, handle_simple_irq); - irq_set_nested_thread(irq, true); - irq_set_noprobe(irq); - - return 0; -} - -static const struct irq_domain_ops mt7988_irq_domain_ops = { - .map = mt7988_irq_map, - .xlate = irq_domain_xlate_onecell, -}; - static void mt7530_setup_mdio_irq(struct mt7530_priv *priv) { @@ -2191,49 +2066,72 @@ mt7530_setup_mdio_irq(struct mt7530_priv *priv) } } +static const struct regmap_irq mt7530_irqs[] = { + REGMAP_IRQ_REG_LINE(0, 32), /* PHY0_LC */ + REGMAP_IRQ_REG_LINE(1, 32), /* PHY1_LC */ + REGMAP_IRQ_REG_LINE(2, 32), /* PHY2_LC */ + REGMAP_IRQ_REG_LINE(3, 32), /* PHY3_LC */ + REGMAP_IRQ_REG_LINE(4, 32), /* PHY4_LC */ + REGMAP_IRQ_REG_LINE(5, 32), /* PHY5_LC */ + REGMAP_IRQ_REG_LINE(6, 32), /* PHY6_LC */ + REGMAP_IRQ_REG_LINE(16, 32), /* MAC_PC */ + REGMAP_IRQ_REG_LINE(17, 32), /* BMU */ + REGMAP_IRQ_REG_LINE(18, 32), /* MIB */ + REGMAP_IRQ_REG_LINE(22, 32), /* ARL_COL_FULL_COL */ + REGMAP_IRQ_REG_LINE(23, 32), /* ARL_COL_FULL */ + REGMAP_IRQ_REG_LINE(24, 32), /* ARL_TBL_ERR */ + REGMAP_IRQ_REG_LINE(25, 32), /* ARL_PKT_QERR */ + REGMAP_IRQ_REG_LINE(26, 32), /* ARL_EQ_ERR */ + REGMAP_IRQ_REG_LINE(27, 32), /* ARL_PKT_BC */ + REGMAP_IRQ_REG_LINE(28, 32), /* ARL_SEC_IG1X */ + REGMAP_IRQ_REG_LINE(29, 32), /* ARL_SEC_VLAN */ + REGMAP_IRQ_REG_LINE(30, 32), /* ARL_SEC_TAG */ + REGMAP_IRQ_REG_LINE(31, 32), /* ACL */ +}; + +static const struct regmap_irq_chip mt7530_regmap_irq_chip = { + .name = KBUILD_MODNAME, + .status_base = MT7530_SYS_INT_STS, + .unmask_base = MT7530_SYS_INT_EN, + .ack_base = MT7530_SYS_INT_STS, + .init_ack_masked = true, + .irqs = mt7530_irqs, + .num_irqs = ARRAY_SIZE(mt7530_irqs), + .num_regs = 1, +}; + static int mt7530_setup_irq(struct mt7530_priv *priv) { + struct regmap_irq_chip_data *irq_data; struct device *dev = priv->dev; struct device_node *np = dev->of_node; - int ret; + int irq, ret; if (!of_property_read_bool(np, "interrupt-controller")) { dev_info(dev, "no interrupt support\n"); return 0; } - priv->irq = of_irq_get(np, 0); - if (priv->irq <= 0) { - dev_err(dev, "failed to get parent IRQ: %d\n", priv->irq); - return priv->irq ? : -EINVAL; - } - - if (priv->id == ID_MT7988 || priv->id == ID_EN7581) - priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, - &mt7988_irq_domain_ops, - priv); - else - priv->irq_domain = irq_domain_add_linear(np, MT7530_NUM_PHYS, - &mt7530_irq_domain_ops, - priv); - - if (!priv->irq_domain) { - dev_err(dev, "failed to create IRQ domain\n"); - return -ENOMEM; + irq = of_irq_get(np, 0); + if (irq <= 0) { + dev_err(dev, "failed to get parent IRQ: %d\n", irq); + return irq ? : -EINVAL; } /* This register must be set for MT7530 to properly fire interrupts */ if (priv->id == ID_MT7530 || priv->id == ID_MT7621) mt7530_set(priv, MT7530_TOP_SIG_CTRL, TOP_SIG_CTRL_NORMAL); - ret = request_threaded_irq(priv->irq, NULL, mt7530_irq_thread_fn, - IRQF_ONESHOT, KBUILD_MODNAME, priv); - if (ret) { - irq_domain_remove(priv->irq_domain); - dev_err(dev, "failed to request IRQ: %d\n", ret); + ret = devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev), + priv->regmap, irq, + IRQF_ONESHOT, + 0, &mt7530_regmap_irq_chip, + &irq_data); + if (ret) return ret; - } + + priv->irq_domain = regmap_irq_get_domain(irq_data); return 0; } @@ -2253,26 +2151,6 @@ mt7530_free_mdio_irq(struct mt7530_priv *priv) } } -static void -mt7530_free_irq_common(struct mt7530_priv *priv) -{ - free_irq(priv->irq, priv); - irq_domain_remove(priv->irq_domain); -} - -static void -mt7530_free_irq(struct mt7530_priv *priv) -{ - struct device_node *mnp, *np = priv->dev->of_node; - - mnp = of_get_child_by_name(np, "mdio"); - if (!mnp) - mt7530_free_mdio_irq(priv); - of_node_put(mnp); - - mt7530_free_irq_common(priv); -} - static int mt7530_setup_mdio(struct mt7530_priv *priv) { @@ -2307,13 +2185,13 @@ mt7530_setup_mdio(struct mt7530_priv *priv) bus->parent = dev; bus->phy_mask = ~ds->phys_mii_mask; - if (priv->irq && !mnp) + if (priv->irq_domain && !mnp) mt7530_setup_mdio_irq(priv); ret = devm_of_mdiobus_register(dev, bus, mnp); if (ret) { dev_err(dev, "failed to register MDIO bus: %d\n", ret); - if (priv->irq && !mnp) + if (priv->irq_domain && !mnp) mt7530_free_mdio_irq(priv); } @@ -2586,12 +2464,18 @@ mt7531_setup_common(struct dsa_switch *ds) /* Allow mirroring frames received on the local port (monitor port). */ mt7530_set(priv, MT753X_AGC, LOCAL_EN); + /* Enable Special Tag for rx frames */ + if (priv->id == ID_EN7581) + mt7530_write(priv, MT753X_CPORT_SPTAG_CFG, + CPORT_SW2FE_STAG_EN | CPORT_FE2SW_STAG_EN); + /* Flush the FDB table */ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL); if (ret < 0) return ret; - return 0; + /* Setup VLAN ID 0 for VLAN-unaware bridges */ + return mt7530_setup_vlan0(priv); } static int @@ -2687,11 +2571,6 @@ mt7531_setup(struct dsa_switch *ds) if (ret) return ret; - /* Setup VLAN ID 0 for VLAN-unaware bridges */ - ret = mt7530_setup_vlan0(priv); - if (ret) - return ret; - ds->assisted_learning_on_cpu_port = true; ds->mtu_enforcement_ingress = true; @@ -2957,28 +2836,61 @@ static void mt753x_phylink_mac_link_up(struct phylink_config *config, mcr |= PMCR_FORCE_RX_FC_EN; } - if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) { - switch (speed) { - case SPEED_1000: - case SPEED_2500: - mcr |= PMCR_FORCE_EEE1G; - break; - case SPEED_100: - mcr |= PMCR_FORCE_EEE100; - break; - } - } - mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr); } +static void mt753x_phylink_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mt7530_priv *priv = dp->ds->priv; + + mt7530_clear(priv, MT753X_PMCR_P(dp->index), + PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100); +} + +static int mt753x_phylink_mac_enable_tx_lpi(struct phylink_config *config, + u32 timer, bool tx_clock_stop) +{ + struct dsa_port *dp = dsa_phylink_to_port(config); + struct mt7530_priv *priv = dp->ds->priv; + u32 val; + + /* If the timer is zero, then set LPI_MODE_EN, which allows the + * system to enter LPI mode immediately rather than waiting for + * the LPI threshold. + */ + if (!timer) + val = LPI_MODE_EN; + else if (FIELD_FIT(LPI_THRESH_MASK, timer)) + val = FIELD_PREP(LPI_THRESH_MASK, timer); + else + val = LPI_THRESH_MASK; + + mt7530_rmw(priv, MT753X_PMEEECR_P(dp->index), + LPI_THRESH_MASK | LPI_MODE_EN, val); + + mt7530_set(priv, MT753X_PMCR_P(dp->index), + PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100); + + return 0; +} + static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { struct mt7530_priv *priv = ds->priv; + u32 eeecr; config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE; + config->lpi_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD; + + eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port)); + /* tx_lpi_timer should be in microseconds. The time units for + * LPI threshold are unspecified. + */ + config->lpi_timer_default = FIELD_GET(LPI_THRESH_MASK, eeecr); + priv->info->mac_port_get_caps(ds, port, config); } @@ -3063,24 +2975,21 @@ mt753x_setup(struct dsa_switch *ds) return ret; ret = mt7530_setup_mdio(priv); - if (ret && priv->irq) - mt7530_free_irq_common(priv); if (ret) return ret; /* Initialise the PCS devices */ for (i = 0; i < priv->ds->num_ports; i++) { priv->pcs[i].pcs.ops = priv->info->pcs_ops; - priv->pcs[i].pcs.neg_mode = true; priv->pcs[i].priv = priv; priv->pcs[i].port = i; } - if (priv->create_sgmii) { + if (priv->create_sgmii) ret = priv->create_sgmii(priv); - if (ret && priv->irq) - mt7530_free_irq(priv); - } + + if (ret && priv->irq_domain) + mt7530_free_mdio_irq(priv); return ret; } @@ -3088,18 +2997,9 @@ mt753x_setup(struct dsa_switch *ds) static int mt753x_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_keee *e) { - struct mt7530_priv *priv = ds->priv; - u32 set, mask = LPI_THRESH_MASK | LPI_MODE_EN; - if (e->tx_lpi_timer > 0xFFF) return -EINVAL; - set = LPI_THRESH_SET(e->tx_lpi_timer); - if (!e->tx_lpi_enabled) - /* Force LPI Mode without a delay */ - set |= LPI_MODE_EN; - mt7530_rmw(priv, MT753X_PMEEECR_P(port), mask, set); - return 0; } @@ -3238,6 +3138,8 @@ static const struct phylink_mac_ops mt753x_phylink_mac_ops = { .mac_config = mt753x_phylink_mac_config, .mac_link_down = mt753x_phylink_mac_link_down, .mac_link_up = mt753x_phylink_mac_link_up, + .mac_disable_tx_lpi = mt753x_phylink_mac_disable_tx_lpi, + .mac_enable_tx_lpi = mt753x_phylink_mac_enable_tx_lpi, }; const struct mt753x_info mt753x_table[] = { @@ -3331,8 +3233,8 @@ EXPORT_SYMBOL_GPL(mt7530_probe_common); void mt7530_remove_common(struct mt7530_priv *priv) { - if (priv->irq) - mt7530_free_irq(priv); + if (priv->irq_domain) + mt7530_free_mdio_irq(priv); dsa_unregister_switch(priv->ds); diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 448200689f492..c3ea403d7acfd 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -627,6 +627,10 @@ enum mt7531_xtal_fsel { #define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16) #define MT7531_EXT_P_MDIO_12 (2 << 16) +#define MT753X_CPORT_SPTAG_CFG 0x7c10 +#define CPORT_SW2FE_STAG_EN BIT(1) +#define CPORT_FE2SW_STAG_EN BIT(0) + /* Registers for LED GPIO control (MT7530 only) * All registers follow this pattern: * [ 2: 0] port 0 @@ -815,9 +819,7 @@ struct mt753x_info { * @p5_mode: Holding the current mode of port 5 of the MT7530 switch * @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch * has got SGMII - * @irq: IRQ number of the switch * @irq_domain: IRQ domain of the switch irq_chip - * @irq_enable: IRQ enable bits, synced to SYS_INT_EN * @create_sgmii: Pointer to function creating SGMII PCS instance(s) * @active_cpu_ports: Holding the active CPU ports * @mdiodev: The pointer to the MDIO device structure @@ -842,9 +844,7 @@ struct mt7530_priv { struct mt753x_pcs pcs[MT7530_NUM_PORTS]; /* protect among processes for registers access*/ struct mutex reg_mutex; - int irq; struct irq_domain *irq_domain; - u32 irq_enable; int (*create_sgmii)(struct mt7530_priv *priv); u8 active_cpu_ports; struct mdio_device *mdiodev; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 68d1e891752b8..901929f96b380 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2208,13 +2208,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, return err; } -static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, - const unsigned char *addr, u16 vid, - u8 state) +static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip, + const unsigned char *addr, u16 vid, + u16 *fid, struct mv88e6xxx_atu_entry *entry) { - struct mv88e6xxx_atu_entry entry; struct mv88e6xxx_vtu_entry vlan; - u16 fid; int err; /* Ports have two private address databases: one for when the port is @@ -2225,7 +2223,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, * VLAN ID into the port's database used for VLAN-unaware bridging. */ if (vid == 0) { - fid = MV88E6XXX_FID_BRIDGED; + *fid = MV88E6XXX_FID_BRIDGED; } else { err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) @@ -2235,14 +2233,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, if (!vlan.valid) return -EOPNOTSUPP; - fid = vlan.fid; + *fid = vlan.fid; } - entry.state = 0; - ether_addr_copy(entry.mac, addr); - eth_addr_dec(entry.mac); + entry->state = 0; + ether_addr_copy(entry->mac, addr); + eth_addr_dec(entry->mac); + + return mv88e6xxx_g1_atu_getnext(chip, *fid, entry); +} + +static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip, + const unsigned char *addr, u16 vid) +{ + struct mv88e6xxx_atu_entry entry; + u16 fid; + int err; + + err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry); + if (err) + return false; + + return entry.state && ether_addr_equal(entry.mac, addr); +} + +static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, + const unsigned char *addr, u16 vid, + u8 state) +{ + struct mv88e6xxx_atu_entry entry; + u16 fid; + int err; - err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry); + err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry); if (err) return err; @@ -2846,6 +2869,13 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC); + if (err) + goto out; + + if (!mv88e6xxx_port_db_find(chip, addr, vid)) + err = -ENOSPC; + +out: mv88e6xxx_reg_unlock(chip); return err; @@ -3644,6 +3674,21 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_stats_clear(chip); } +static int mv88e6320_setup_errata(struct mv88e6xxx_chip *chip) +{ + u16 dummy; + int err; + + /* Workaround for erratum + * 3.3 RGMII timing may be out of spec when transmit delay is enabled + */ + err = mv88e6xxx_port_hidden_write(chip, 0, 0xf, 0x7, 0xe000); + if (err) + return err; + + return mv88e6xxx_port_hidden_read(chip, 0, 0xf, 0x7, &dummy); +} + /* Check if the errata has already been applied. */ static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) { @@ -5100,6 +5145,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { static const struct mv88e6xxx_ops mv88e6320_ops = { /* MV88E6XXX_FAMILY_6320 */ + .setup_errata = mv88e6320_setup_errata, .ieee_pri_map = mv88e6085_g1_ieee_pri_map, .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, @@ -5115,6 +5161,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -5139,8 +5186,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, - .vtu_getnext = mv88e6185_g1_vtu_getnext, - .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .stu_getnext = mv88e6352_g1_stu_getnext, + .stu_loadpurge = mv88e6352_g1_stu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -5149,6 +5198,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { static const struct mv88e6xxx_ops mv88e6321_ops = { /* MV88E6XXX_FAMILY_6320 */ + .setup_errata = mv88e6320_setup_errata, .ieee_pri_map = mv88e6085_g1_ieee_pri_map, .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, @@ -5164,6 +5214,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -5187,8 +5238,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, - .vtu_getnext = mv88e6185_g1_vtu_getnext, - .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .stu_getnext = mv88e6352_g1_stu_getnext, + .stu_loadpurge = mv88e6352_g1_stu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -5788,7 +5841,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, - .atu_move_port_mask = 0x1f, + .atu_move_port_mask = 0xf, .g1_irqs = 9, .g2_irqs = 10, .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, @@ -6206,9 +6259,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_databases = 4096, .num_macs = 8192, .num_ports = 7, - .num_internal_phys = 5, + .num_internal_phys = 2, + .internal_phys_offset = 3, .num_gpio = 15, .max_vid = 4095, + .max_sid = 63, .port_base_addr = 0x10, .phy_base_addr = 0x0, .global1_addr = 0x1b, @@ -6232,9 +6287,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_databases = 4096, .num_macs = 8192, .num_ports = 7, - .num_internal_phys = 5, + .num_internal_phys = 2, + .internal_phys_offset = 3, .num_gpio = 15, .max_vid = 4095, + .max_sid = 63, .port_base_addr = 0x10, .phy_base_addr = 0x0, .global1_addr = 0x1b, @@ -6244,6 +6301,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, .atu_move_port_mask = 0xf, + .pvt = true, .multi_chip = true, .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ptp_support = true, @@ -6266,7 +6324,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, - .atu_move_port_mask = 0x1f, + .atu_move_port_mask = 0xf, .g1_irqs = 9, .g2_irqs = 10, .stats_type = STATS_TYPE_BANK0 | STATS_TYPE_BANK1, @@ -6614,6 +6672,13 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port, mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid, MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC); + if (err) + goto out; + + if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid)) + err = -ENOSPC; + +out: mv88e6xxx_reg_unlock(chip); return err; diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6185.c b/drivers/net/dsa/mv88e6xxx/pcs-6185.c index 75ed1fa500a5c..af7e06d265f7b 100644 --- a/drivers/net/dsa/mv88e6xxx/pcs-6185.c +++ b/drivers/net/dsa/mv88e6xxx/pcs-6185.c @@ -138,7 +138,6 @@ static int mv88e6185_pcs_init(struct mv88e6xxx_chip *chip, int port) mpcs->chip = chip; mpcs->port = port; mpcs->phylink_pcs.ops = &mv88e6185_phylink_pcs_ops; - mpcs->phylink_pcs.neg_mode = true; irq = mv88e6xxx_serdes_irq_mapping(chip, port); if (irq) { diff --git a/drivers/net/dsa/mv88e6xxx/pcs-6352.c b/drivers/net/dsa/mv88e6xxx/pcs-6352.c index 143fe21d18349..36993400837ea 100644 --- a/drivers/net/dsa/mv88e6xxx/pcs-6352.c +++ b/drivers/net/dsa/mv88e6xxx/pcs-6352.c @@ -275,7 +275,6 @@ static struct marvell_c22_pcs *marvell_c22_pcs_alloc(struct device *dev, mpcs->mdio.bus = bus; mpcs->mdio.addr = addr; mpcs->phylink_pcs.ops = &marvell_c22_pcs_ops; - mpcs->phylink_pcs.neg_mode = true; return mpcs; } diff --git a/drivers/net/dsa/mv88e6xxx/pcs-639x.c b/drivers/net/dsa/mv88e6xxx/pcs-639x.c index 59f63d6beec85..5db17c0b77f5b 100644 --- a/drivers/net/dsa/mv88e6xxx/pcs-639x.c +++ b/drivers/net/dsa/mv88e6xxx/pcs-639x.c @@ -565,9 +565,7 @@ static int mv88e6390_pcs_init(struct mv88e6xxx_chip *chip, int port) return -ENOMEM; mpcs->sgmii_pcs.ops = &mv88e639x_sgmii_pcs_ops; - mpcs->sgmii_pcs.neg_mode = true; mpcs->xg_pcs.ops = &mv88e6390_xg_pcs_ops; - mpcs->xg_pcs.neg_mode = true; if (chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6190X || chip->info->prod_num == MV88E6XXX_PORT_SWITCH_ID_PROD_6390X) @@ -945,9 +943,7 @@ static int mv88e6393x_pcs_init(struct mv88e6xxx_chip *chip, int port) return -ENOMEM; mpcs->sgmii_pcs.ops = &mv88e6393x_sgmii_pcs_ops; - mpcs->sgmii_pcs.neg_mode = true; mpcs->xg_pcs.ops = &mv88e6393x_xg_pcs_ops; - mpcs->xg_pcs.neg_mode = true; mpcs->supports_5g = true; err = mv88e6393x_erratum_4_6(mpcs); diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index e8cb4da15dbe4..a36b8b07030e3 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -1634,7 +1634,6 @@ static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs, int port) { qpcs->pcs.ops = &qca8k_pcs_ops; - qpcs->pcs.neg_mode = true; /* We don't have interrupts for link changes, so we need to poll */ qpcs->pcs.poll = true; diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig index 6989972eebc30..d6eb6713e5f6b 100644 --- a/drivers/net/dsa/realtek/Kconfig +++ b/drivers/net/dsa/realtek/Kconfig @@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB help Select to enable support for Realtek RTL8366RB. +config NET_DSA_REALTEK_RTL8366RB_LEDS + bool + depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB) + depends on NET_DSA_REALTEK_RTL8366RB + default NET_DSA_REALTEK_RTL8366RB + endif diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile index 35491dc20d6d6..17367bcba496c 100644 --- a/drivers/net/dsa/realtek/Makefile +++ b/drivers/net/dsa/realtek/Makefile @@ -12,4 +12,7 @@ endif obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o rtl8366-objs := rtl8366-core.o rtl8366rb.o +ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS +rtl8366-objs += rtl8366rb-leds.o +endif obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c new file mode 100644 index 0000000000000..99c890681ae60 --- /dev/null +++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include "rtl83xx.h" +#include "rtl8366rb.h" + +static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port) +{ + switch (led_group) { + case 0: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 1: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 2: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + case 3: + return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); + default: + return 0; + } +} + +static int rb8366rb_get_port_led(struct rtl8366rb_led *led) +{ + struct realtek_priv *priv = led->priv; + u8 led_group = led->led_group; + u8 port_num = led->port_num; + int ret; + u32 val; + + ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group), + &val); + if (ret) { + dev_err(priv->dev, "error reading LED on port %d group %d\n", + led_group, port_num); + return ret; + } + + return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num)); +} + +static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable) +{ + struct realtek_priv *priv = led->priv; + u8 led_group = led->led_group; + u8 port_num = led->port_num; + int ret; + + ret = regmap_update_bits(priv->map, + RTL8366RB_LED_X_X_CTRL_REG(led_group), + rtl8366rb_led_group_port_mask(led_group, + port_num), + enable ? 0xffff : 0); + if (ret) { + dev_err(priv->dev, "error updating LED on port %d group %d\n", + led_group, port_num); + return ret; + } + + /* Change the LED group to manual controlled LEDs if required */ + ret = rb8366rb_set_ledgroup_mode(priv, led_group, + RTL8366RB_LEDGROUP_FORCE); + + if (ret) { + dev_err(priv->dev, "error updating LED GROUP group %d\n", + led_group); + return ret; + } + + return 0; +} + +static int +rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev, + enum led_brightness brightness) +{ + struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led, + cdev); + + return rb8366rb_set_port_led(led, brightness == LED_ON); +} + +static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp, + struct fwnode_handle *led_fwnode) +{ + struct rtl8366rb *rb = priv->chip_data; + struct led_init_data init_data = { }; + enum led_default_state state; + struct rtl8366rb_led *led; + u32 led_group; + int ret; + + ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group); + if (ret) + return ret; + + if (led_group >= RTL8366RB_NUM_LEDGROUPS) { + dev_warn(priv->dev, "Invalid LED reg %d defined for port %d", + led_group, dp->index); + return -EINVAL; + } + + led = &rb->leds[dp->index][led_group]; + led->port_num = dp->index; + led->led_group = led_group; + led->priv = priv; + + state = led_init_default_state_get(led_fwnode); + switch (state) { + case LEDS_DEFSTATE_ON: + led->cdev.brightness = 1; + rb8366rb_set_port_led(led, 1); + break; + case LEDS_DEFSTATE_KEEP: + led->cdev.brightness = + rb8366rb_get_port_led(led); + break; + case LEDS_DEFSTATE_OFF: + default: + led->cdev.brightness = 0; + rb8366rb_set_port_led(led, 0); + } + + led->cdev.max_brightness = 1; + led->cdev.brightness_set_blocking = + rtl8366rb_cled_brightness_set_blocking; + init_data.fwnode = led_fwnode; + init_data.devname_mandatory = true; + + init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d", + dp->ds->index, dp->index, led_group); + if (!init_data.devicename) + return -ENOMEM; + + ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data); + if (ret) { + dev_warn(priv->dev, "Failed to init LED %d for port %d", + led_group, dp->index); + return ret; + } + + return 0; +} + +int rtl8366rb_setup_leds(struct realtek_priv *priv) +{ + struct dsa_switch *ds = &priv->ds; + struct device_node *leds_np; + struct dsa_port *dp; + int ret = 0; + + dsa_switch_for_each_port(dp, ds) { + if (!dp->dn) + continue; + + leds_np = of_get_child_by_name(dp->dn, "leds"); + if (!leds_np) { + dev_dbg(priv->dev, "No leds defined for port %d", + dp->index); + continue; + } + + for_each_child_of_node_scoped(leds_np, led_np) { + ret = rtl8366rb_setup_led(priv, dp, + of_fwnode_handle(led_np)); + if (ret) + break; + } + + of_node_put(leds_np); + if (ret) + return ret; + } + return 0; +} diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c index 4c4a95d4380ce..f54771cab56d4 100644 --- a/drivers/net/dsa/realtek/rtl8366rb.c +++ b/drivers/net/dsa/realtek/rtl8366rb.c @@ -27,11 +27,7 @@ #include "realtek-smi.h" #include "realtek-mdio.h" #include "rtl83xx.h" - -#define RTL8366RB_PORT_NUM_CPU 5 -#define RTL8366RB_NUM_PORTS 6 -#define RTL8366RB_PHY_NO_MAX 4 -#define RTL8366RB_PHY_ADDR_MAX 31 +#include "rtl8366rb.h" /* Switch Global Configuration register */ #define RTL8366RB_SGCR 0x0000 @@ -176,39 +172,6 @@ */ #define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f -/* LED control registers */ -/* The LED blink rate is global; it is used by all triggers in all groups. */ -#define RTL8366RB_LED_BLINKRATE_REG 0x0430 -#define RTL8366RB_LED_BLINKRATE_MASK 0x0007 -#define RTL8366RB_LED_BLINKRATE_28MS 0x0000 -#define RTL8366RB_LED_BLINKRATE_56MS 0x0001 -#define RTL8366RB_LED_BLINKRATE_84MS 0x0002 -#define RTL8366RB_LED_BLINKRATE_111MS 0x0003 -#define RTL8366RB_LED_BLINKRATE_222MS 0x0004 -#define RTL8366RB_LED_BLINKRATE_446MS 0x0005 - -/* LED trigger event for each group */ -#define RTL8366RB_LED_CTRL_REG 0x0431 -#define RTL8366RB_LED_CTRL_OFFSET(led_group) \ - (4 * (led_group)) -#define RTL8366RB_LED_CTRL_MASK(led_group) \ - (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group)) - -/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only - * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is - * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored. - */ -#define RTL8366RB_LED_0_1_CTRL_REG 0x0432 -#define RTL8366RB_LED_2_3_CTRL_REG 0x0433 -#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \ - ((led_group) <= 1 ? \ - RTL8366RB_LED_0_1_CTRL_REG : \ - RTL8366RB_LED_2_3_CTRL_REG) -#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0) -#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6) -#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0) -#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6) - #define RTL8366RB_MIB_COUNT 33 #define RTL8366RB_GLOBAL_MIB_COUNT 1 #define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050 @@ -244,7 +207,6 @@ #define RTL8366RB_PORT_STATUS_AN_MASK 0x0080 #define RTL8366RB_NUM_VLANS 16 -#define RTL8366RB_NUM_LEDGROUPS 4 #define RTL8366RB_NUM_VIDS 4096 #define RTL8366RB_PRIORITYMAX 7 #define RTL8366RB_NUM_FIDS 8 @@ -351,46 +313,6 @@ #define RTL8366RB_GREEN_FEATURE_TX BIT(0) #define RTL8366RB_GREEN_FEATURE_RX BIT(2) -enum rtl8366_ledgroup_mode { - RTL8366RB_LEDGROUP_OFF = 0x0, - RTL8366RB_LEDGROUP_DUP_COL = 0x1, - RTL8366RB_LEDGROUP_LINK_ACT = 0x2, - RTL8366RB_LEDGROUP_SPD1000 = 0x3, - RTL8366RB_LEDGROUP_SPD100 = 0x4, - RTL8366RB_LEDGROUP_SPD10 = 0x5, - RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6, - RTL8366RB_LEDGROUP_SPD100_ACT = 0x7, - RTL8366RB_LEDGROUP_SPD10_ACT = 0x8, - RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9, - RTL8366RB_LEDGROUP_FIBER = 0xa, - RTL8366RB_LEDGROUP_AN_FAULT = 0xb, - RTL8366RB_LEDGROUP_LINK_RX = 0xc, - RTL8366RB_LEDGROUP_LINK_TX = 0xd, - RTL8366RB_LEDGROUP_MASTER = 0xe, - RTL8366RB_LEDGROUP_FORCE = 0xf, - - __RTL8366RB_LEDGROUP_MODE_MAX -}; - -struct rtl8366rb_led { - u8 port_num; - u8 led_group; - struct realtek_priv *priv; - struct led_classdev cdev; -}; - -/** - * struct rtl8366rb - RTL8366RB-specific data - * @max_mtu: per-port max MTU setting - * @pvid_enabled: if PVID is set for respective port - * @leds: per-port and per-ledgroup led info - */ -struct rtl8366rb { - unsigned int max_mtu[RTL8366RB_NUM_PORTS]; - bool pvid_enabled[RTL8366RB_NUM_PORTS]; - struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS]; -}; - static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = { { 0, 0, 4, "IfInOctets" }, { 0, 4, 4, "EtherStatsOctets" }, @@ -831,9 +753,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table, return 0; } -static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, - u8 led_group, - enum rtl8366_ledgroup_mode mode) +/* This code is used also with LEDs disabled */ +int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, + u8 led_group, + enum rtl8366_ledgroup_mode mode) { int ret; u32 val; @@ -850,144 +773,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, return 0; } -static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port) -{ - switch (led_group) { - case 0: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - case 1: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - case 2: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - case 3: - return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port)); - default: - return 0; - } -} - -static int rb8366rb_get_port_led(struct rtl8366rb_led *led) -{ - struct realtek_priv *priv = led->priv; - u8 led_group = led->led_group; - u8 port_num = led->port_num; - int ret; - u32 val; - - ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group), - &val); - if (ret) { - dev_err(priv->dev, "error reading LED on port %d group %d\n", - led_group, port_num); - return ret; - } - - return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num)); -} - -static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable) -{ - struct realtek_priv *priv = led->priv; - u8 led_group = led->led_group; - u8 port_num = led->port_num; - int ret; - - ret = regmap_update_bits(priv->map, - RTL8366RB_LED_X_X_CTRL_REG(led_group), - rtl8366rb_led_group_port_mask(led_group, - port_num), - enable ? 0xffff : 0); - if (ret) { - dev_err(priv->dev, "error updating LED on port %d group %d\n", - led_group, port_num); - return ret; - } - - /* Change the LED group to manual controlled LEDs if required */ - ret = rb8366rb_set_ledgroup_mode(priv, led_group, - RTL8366RB_LEDGROUP_FORCE); - - if (ret) { - dev_err(priv->dev, "error updating LED GROUP group %d\n", - led_group); - return ret; - } - - return 0; -} - -static int -rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev, - enum led_brightness brightness) -{ - struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led, - cdev); - - return rb8366rb_set_port_led(led, brightness == LED_ON); -} - -static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp, - struct fwnode_handle *led_fwnode) -{ - struct rtl8366rb *rb = priv->chip_data; - struct led_init_data init_data = { }; - enum led_default_state state; - struct rtl8366rb_led *led; - u32 led_group; - int ret; - - ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group); - if (ret) - return ret; - - if (led_group >= RTL8366RB_NUM_LEDGROUPS) { - dev_warn(priv->dev, "Invalid LED reg %d defined for port %d", - led_group, dp->index); - return -EINVAL; - } - - led = &rb->leds[dp->index][led_group]; - led->port_num = dp->index; - led->led_group = led_group; - led->priv = priv; - - state = led_init_default_state_get(led_fwnode); - switch (state) { - case LEDS_DEFSTATE_ON: - led->cdev.brightness = 1; - rb8366rb_set_port_led(led, 1); - break; - case LEDS_DEFSTATE_KEEP: - led->cdev.brightness = - rb8366rb_get_port_led(led); - break; - case LEDS_DEFSTATE_OFF: - default: - led->cdev.brightness = 0; - rb8366rb_set_port_led(led, 0); - } - - led->cdev.max_brightness = 1; - led->cdev.brightness_set_blocking = - rtl8366rb_cled_brightness_set_blocking; - init_data.fwnode = led_fwnode; - init_data.devname_mandatory = true; - - init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d", - dp->ds->index, dp->index, led_group); - if (!init_data.devicename) - return -ENOMEM; - - ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data); - if (ret) { - dev_warn(priv->dev, "Failed to init LED %d for port %d", - led_group, dp->index); - return ret; - } - - return 0; -} - +/* This code is used also with LEDs disabled */ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv) { int ret = 0; @@ -1008,38 +794,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv) return ret; } -static int rtl8366rb_setup_leds(struct realtek_priv *priv) -{ - struct dsa_switch *ds = &priv->ds; - struct device_node *leds_np; - struct dsa_port *dp; - int ret = 0; - - dsa_switch_for_each_port(dp, ds) { - if (!dp->dn) - continue; - - leds_np = of_get_child_by_name(dp->dn, "leds"); - if (!leds_np) { - dev_dbg(priv->dev, "No leds defined for port %d", - dp->index); - continue; - } - - for_each_child_of_node_scoped(leds_np, led_np) { - ret = rtl8366rb_setup_led(priv, dp, - of_fwnode_handle(led_np)); - if (ret) - break; - } - - of_node_put(leds_np); - if (ret) - return ret; - } - return 0; -} - static int rtl8366rb_setup(struct dsa_switch *ds) { struct realtek_priv *priv = ds->priv; diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h new file mode 100644 index 0000000000000..685ff3275faa1 --- /dev/null +++ b/drivers/net/dsa/realtek/rtl8366rb.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _RTL8366RB_H +#define _RTL8366RB_H + +#include "realtek.h" + +#define RTL8366RB_PORT_NUM_CPU 5 +#define RTL8366RB_NUM_PORTS 6 +#define RTL8366RB_PHY_NO_MAX 4 +#define RTL8366RB_NUM_LEDGROUPS 4 +#define RTL8366RB_PHY_ADDR_MAX 31 + +/* LED control registers */ +/* The LED blink rate is global; it is used by all triggers in all groups. */ +#define RTL8366RB_LED_BLINKRATE_REG 0x0430 +#define RTL8366RB_LED_BLINKRATE_MASK 0x0007 +#define RTL8366RB_LED_BLINKRATE_28MS 0x0000 +#define RTL8366RB_LED_BLINKRATE_56MS 0x0001 +#define RTL8366RB_LED_BLINKRATE_84MS 0x0002 +#define RTL8366RB_LED_BLINKRATE_111MS 0x0003 +#define RTL8366RB_LED_BLINKRATE_222MS 0x0004 +#define RTL8366RB_LED_BLINKRATE_446MS 0x0005 + +/* LED trigger event for each group */ +#define RTL8366RB_LED_CTRL_REG 0x0431 +#define RTL8366RB_LED_CTRL_OFFSET(led_group) \ + (4 * (led_group)) +#define RTL8366RB_LED_CTRL_MASK(led_group) \ + (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group)) + +/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only + * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is + * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored. + */ +#define RTL8366RB_LED_0_1_CTRL_REG 0x0432 +#define RTL8366RB_LED_2_3_CTRL_REG 0x0433 +#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \ + ((led_group) <= 1 ? \ + RTL8366RB_LED_0_1_CTRL_REG : \ + RTL8366RB_LED_2_3_CTRL_REG) +#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0) +#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6) +#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0) +#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6) + +enum rtl8366_ledgroup_mode { + RTL8366RB_LEDGROUP_OFF = 0x0, + RTL8366RB_LEDGROUP_DUP_COL = 0x1, + RTL8366RB_LEDGROUP_LINK_ACT = 0x2, + RTL8366RB_LEDGROUP_SPD1000 = 0x3, + RTL8366RB_LEDGROUP_SPD100 = 0x4, + RTL8366RB_LEDGROUP_SPD10 = 0x5, + RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6, + RTL8366RB_LEDGROUP_SPD100_ACT = 0x7, + RTL8366RB_LEDGROUP_SPD10_ACT = 0x8, + RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9, + RTL8366RB_LEDGROUP_FIBER = 0xa, + RTL8366RB_LEDGROUP_AN_FAULT = 0xb, + RTL8366RB_LEDGROUP_LINK_RX = 0xc, + RTL8366RB_LEDGROUP_LINK_TX = 0xd, + RTL8366RB_LEDGROUP_MASTER = 0xe, + RTL8366RB_LEDGROUP_FORCE = 0xf, + + __RTL8366RB_LEDGROUP_MODE_MAX +}; + +#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS) + +struct rtl8366rb_led { + u8 port_num; + u8 led_group; + struct realtek_priv *priv; + struct led_classdev cdev; +}; + +int rtl8366rb_setup_leds(struct realtek_priv *priv); + +#else + +static inline int rtl8366rb_setup_leds(struct realtek_priv *priv) +{ + return 0; +} + +#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */ + +/** + * struct rtl8366rb - RTL8366RB-specific data + * @max_mtu: per-port max MTU setting + * @pvid_enabled: if PVID is set for respective port + * @leds: per-port and per-ledgroup led info + */ +struct rtl8366rb { + unsigned int max_mtu[RTL8366RB_NUM_PORTS]; + bool pvid_enabled[RTL8366RB_NUM_PORTS]; +#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS) + struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS]; +#endif +}; + +/* This code is used also with LEDs disabled */ +int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv, + u8 led_group, + enum rtl8366_ledgroup_mode mode); + +#endif /* _RTL8366RB_H */ diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c index 66974379334a4..31ea8130a495f 100644 --- a/drivers/net/dsa/rzn1_a5psw.c +++ b/drivers/net/dsa/rzn1_a5psw.c @@ -1248,18 +1248,16 @@ static int a5psw_probe(struct platform_device *pdev) if (ret) goto clk_disable; - mdio = of_get_child_by_name(dev->of_node, "mdio"); - if (of_device_is_available(mdio)) { + mdio = of_get_available_child_by_name(dev->of_node, "mdio"); + if (mdio) { ret = a5psw_probe_mdio(a5psw, mdio); + of_node_put(mdio); if (ret) { - of_node_put(mdio); dev_err(dev, "Failed to register MDIO: %d\n", ret); goto hclk_disable; } } - of_node_put(mdio); - ds = &a5psw->ds; ds->dev = dev; ds->num_ports = A5PSW_PORTS_NUM; diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c index 2ea64b1d026d7..84d7d3f66bd03 100644 --- a/drivers/net/dsa/sja1105/sja1105_ethtool.c +++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c @@ -571,6 +571,9 @@ void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER; for (i = 0; i < max_ctr; i++) { + if (!strlen(sja1105_port_counters[i].name)) + continue; + rc = sja1105_port_counter_read(priv, port, i, &data[k++]); if (rc) { dev_err(ds->dev, @@ -596,8 +599,12 @@ void sja1105_get_strings(struct dsa_switch *ds, int port, else max_ctr = __MAX_SJA1105PQRS_PORT_COUNTER; - for (i = 0; i < max_ctr; i++) + for (i = 0; i < max_ctr; i++) { + if (!strlen(sja1105_port_counters[i].name)) + continue; + ethtool_puts(&data, sja1105_port_counters[i].name); + } } int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset) diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c index 84b7169f29748..8d535c033cef7 100644 --- a/drivers/net/dsa/sja1105/sja1105_mdio.c +++ b/drivers/net/dsa/sja1105/sja1105_mdio.c @@ -468,13 +468,10 @@ int sja1105_mdiobus_register(struct dsa_switch *ds) if (rc) return rc; - mdio_node = of_get_child_by_name(switch_node, "mdios"); + mdio_node = of_get_available_child_by_name(switch_node, "mdios"); if (!mdio_node) return 0; - if (!of_device_is_available(mdio_node)) - goto out_put_mdio_node; - if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) { rc = sja1105_mdiobus_base_tx_register(priv, mdio_node); if (rc) @@ -487,7 +484,6 @@ int sja1105_mdiobus_register(struct dsa_switch *ds) goto err_free_base_tx_mdiobus; } -out_put_mdio_node: of_node_put(mdio_node); return 0; diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index a1f4ca6ad888f..08b45fdd1d248 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -61,17 +61,21 @@ enum sja1105_ptp_clk_mode { int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) { struct sja1105_private *priv = ds->priv; + unsigned long hwts_tx_en, hwts_rx_en; struct hwtstamp_config config; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; + hwts_tx_en = priv->hwts_tx_en; + hwts_rx_en = priv->hwts_rx_en; + switch (config.tx_type) { case HWTSTAMP_TX_OFF: - priv->hwts_tx_en &= ~BIT(port); + hwts_tx_en &= ~BIT(port); break; case HWTSTAMP_TX_ON: - priv->hwts_tx_en |= BIT(port); + hwts_tx_en |= BIT(port); break; default: return -ERANGE; @@ -79,15 +83,21 @@ int sja1105_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: - priv->hwts_rx_en &= ~BIT(port); + hwts_rx_en &= ~BIT(port); break; - default: - priv->hwts_rx_en |= BIT(port); + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + hwts_rx_en |= BIT(port); break; + default: + return -ERANGE; } if (copy_to_user(ifr->ifr_data, &config, sizeof(config))) return -EFAULT; + + priv->hwts_tx_en = hwts_tx_en; + priv->hwts_rx_en = hwts_rx_en; + return 0; } diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index 3d790f8c6f4da..ffece8a400a66 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -1917,8 +1917,10 @@ int sja1105_table_delete_entry(struct sja1105_table *table, int i) if (i > table->entry_count) return -ERANGE; - memmove(entries + i * entry_size, entries + (i + 1) * entry_size, - (table->entry_count - i) * entry_size); + if (i + 1 < table->entry_count) { + memmove(entries + i * entry_size, entries + (i + 1) * entry_size, + (table->entry_count - i - 1) * entry_size); + } table->entry_count--; diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 005d79975f3bd..a4938c6a5ebb5 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 977b42bc1e8c1..0dcc72d75311a 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -20,6 +20,7 @@ source "drivers/net/ethernet/actions/Kconfig" source "drivers/net/ethernet/adaptec/Kconfig" source "drivers/net/ethernet/aeroflex/Kconfig" source "drivers/net/ethernet/agere/Kconfig" +source "drivers/net/ethernet/airoha/Kconfig" source "drivers/net/ethernet/alacritech/Kconfig" source "drivers/net/ethernet/allwinner/Kconfig" source "drivers/net/ethernet/alteon/Kconfig" @@ -82,6 +83,7 @@ source "drivers/net/ethernet/i825xx/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" source "drivers/net/ethernet/xscale/Kconfig" +source "drivers/net/ethernet/yunsilicon/Kconfig" config JME tristate "JMicron(R) PCI-Express Gigabit Ethernet support" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 99fa180dedb80..1a1b54ce202c0 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/ obj-$(CONFIG_GRETH) += aeroflex/ obj-$(CONFIG_NET_VENDOR_ADI) += adi/ obj-$(CONFIG_NET_VENDOR_AGERE) += agere/ +obj-$(CONFIG_NET_VENDOR_AIROHA) += airoha/ obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/ obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ @@ -51,6 +52,7 @@ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ obj-$(CONFIG_NET_VENDOR_MICROSOFT) += microsoft/ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ +obj-$(CONFIG_NET_VENDOR_YUNSILICON) += yunsilicon/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c index 115f48b3342cc..0a08da799255b 100644 --- a/drivers/net/ethernet/actions/owl-emac.c +++ b/drivers/net/ethernet/actions/owl-emac.c @@ -1325,15 +1325,10 @@ static int owl_emac_mdio_init(struct net_device *netdev) struct device_node *mdio_node; int ret; - mdio_node = of_get_child_by_name(dev->of_node, "mdio"); + mdio_node = of_get_available_child_by_name(dev->of_node, "mdio"); if (!mdio_node) return -ENODEV; - if (!of_device_is_available(mdio_node)) { - ret = -ENODEV; - goto err_put_node; - } - priv->mii = devm_mdiobus_alloc(dev); if (!priv->mii) { ret = -ENOMEM; diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 68fad5575fd4f..30f9d271e5953 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -1599,7 +1599,7 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv) netdev->netdev_ops = &adin1110_netdev_ops; netdev->ethtool_ops = &adin1110_ethtool_ops; netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->netns_local = true; + netdev->netns_immutable = true; port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false); if (IS_ERR(port_priv->phydev)) { diff --git a/drivers/net/ethernet/airoha/Kconfig b/drivers/net/ethernet/airoha/Kconfig new file mode 100644 index 0000000000000..1a4cf6a259f67 --- /dev/null +++ b/drivers/net/ethernet/airoha/Kconfig @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NET_VENDOR_AIROHA + bool "Airoha devices" + depends on ARCH_AIROHA || COMPILE_TEST + help + If you have a Airoha SoC with ethernet, say Y. + +if NET_VENDOR_AIROHA + +config NET_AIROHA_NPU + tristate "Airoha NPU support" + select WANT_DEV_COREDUMP + select REGMAP_MMIO + help + This driver supports Airoha Network Processor (NPU) available + on the Airoha Soc family. + +config NET_AIROHA + tristate "Airoha SoC Gigabit Ethernet support" + depends on NET_DSA || !NET_DSA + select NET_AIROHA_NPU + select PAGE_POOL + help + This driver supports the gigabit ethernet MACs in the + Airoha SoC family. + +endif #NET_VENDOR_AIROHA diff --git a/drivers/net/ethernet/airoha/Makefile b/drivers/net/ethernet/airoha/Makefile new file mode 100644 index 0000000000000..94468053e34be --- /dev/null +++ b/drivers/net/ethernet/airoha/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Airoha for the Mediatek SoCs built-in ethernet macs +# + +obj-$(CONFIG_NET_AIROHA) += airoha-eth.o +airoha-eth-y := airoha_eth.o airoha_ppe.o +airoha-eth-$(CONFIG_DEBUG_FS) += airoha_ppe_debugfs.o +obj-$(CONFIG_NET_AIROHA_NPU) += airoha_npu.o diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c similarity index 66% rename from drivers/net/ethernet/mediatek/airoha_eth.c rename to drivers/net/ethernet/airoha/airoha_eth.c index 09f448f291240..c0a642568ac11 100644 --- a/drivers/net/ethernet/mediatek/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -3,925 +3,30 @@ * Copyright (c) 2024 AIROHA Inc * Author: Lorenzo Bianconi */ -#include -#include -#include -#include #include #include #include -#include #include #include -#include +#include #include #include #include -#define AIROHA_MAX_NUM_GDM_PORTS 1 -#define AIROHA_MAX_NUM_QDMA 2 -#define AIROHA_MAX_NUM_RSTS 3 -#define AIROHA_MAX_NUM_XSI_RSTS 5 -#define AIROHA_MAX_MTU 2000 -#define AIROHA_MAX_PACKET_SIZE 2048 -#define AIROHA_NUM_QOS_CHANNELS 4 -#define AIROHA_NUM_QOS_QUEUES 8 -#define AIROHA_NUM_TX_RING 32 -#define AIROHA_NUM_RX_RING 32 -#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \ - AIROHA_NUM_QOS_CHANNELS) -#define AIROHA_FE_MC_MAX_VLAN_TABLE 64 -#define AIROHA_FE_MC_MAX_VLAN_PORT 16 -#define AIROHA_NUM_TX_IRQ 2 -#define HW_DSCP_NUM 2048 -#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048) -#define TX_DSCP_NUM 1024 -#define RX_DSCP_NUM(_n) \ - ((_n) == 2 ? 128 : \ - (_n) == 11 ? 128 : \ - (_n) == 15 ? 128 : \ - (_n) == 0 ? 1024 : 16) - -#define PSE_RSV_PAGES 128 -#define PSE_QUEUE_RSV_PAGES 64 - -#define QDMA_METER_IDX(_n) ((_n) & 0xff) -#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3) - -/* FE */ -#define PSE_BASE 0x0100 -#define CSR_IFC_BASE 0x0200 -#define CDM1_BASE 0x0400 -#define GDM1_BASE 0x0500 -#define PPE1_BASE 0x0c00 - -#define CDM2_BASE 0x1400 -#define GDM2_BASE 0x1500 - -#define GDM3_BASE 0x1100 -#define GDM4_BASE 0x2500 - -#define GDM_BASE(_n) \ - ((_n) == 4 ? GDM4_BASE : \ - (_n) == 3 ? GDM3_BASE : \ - (_n) == 2 ? GDM2_BASE : GDM1_BASE) - -#define REG_FE_DMA_GLO_CFG 0x0000 -#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4) -#define FE_DMA_GLO_PG_SZ_MASK BIT(3) - -#define REG_FE_RST_GLO_CFG 0x0004 -#define FE_RST_GDM4_MBI_ARB_MASK BIT(3) -#define FE_RST_GDM3_MBI_ARB_MASK BIT(2) -#define FE_RST_CORE_MASK BIT(0) - -#define REG_FE_WAN_MAC_H 0x0030 -#define REG_FE_LAN_MAC_H 0x0040 - -#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04) -#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08) - -#define REG_FE_CDM1_OQ_MAP0 0x0050 -#define REG_FE_CDM1_OQ_MAP1 0x0054 -#define REG_FE_CDM1_OQ_MAP2 0x0058 -#define REG_FE_CDM1_OQ_MAP3 0x005c - -#define REG_FE_PCE_CFG 0x0070 -#define PCE_DPI_EN_MASK BIT(2) -#define PCE_KA_EN_MASK BIT(1) -#define PCE_MC_EN_MASK BIT(0) - -#define REG_FE_PSE_QUEUE_CFG_WR 0x0080 -#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24) -#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16) -#define PSE_CFG_WR_EN_MASK BIT(8) -#define PSE_CFG_OQRSV_SEL_MASK BIT(0) - -#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084 -#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0) - -#define PSE_FQ_CFG 0x008c -#define PSE_FQ_LIMIT_MASK GENMASK(14, 0) - -#define REG_FE_PSE_BUF_SET 0x0090 -#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16) -#define PSE_ALLRSV_MASK GENMASK(14, 0) - -#define REG_PSE_SHARE_USED_THD 0x0094 -#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16) -#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0) - -#define REG_GDM_MISC_CFG 0x0148 -#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9) -#define GDM2_CHN_VLD_MODE_MASK BIT(5) - -#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE -#define FE_IFC_EN_MASK BIT(0) - -#define REG_FE_VIP_PORT_EN 0x01f0 -#define REG_FE_IFC_PORT_EN 0x01f4 - -#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08) -#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16) - -#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c) -#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8) -#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0) - -#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3)) -#define PATN_FCPU_EN_MASK BIT(7) -#define PATN_SWP_EN_MASK BIT(6) -#define PATN_DP_EN_MASK BIT(5) -#define PATN_SP_EN_MASK BIT(4) -#define PATN_TYPE_MASK GENMASK(3, 1) -#define PATN_EN_MASK BIT(0) - -#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3)) -#define PATN_DP_MASK GENMASK(31, 16) -#define PATN_SP_MASK GENMASK(15, 0) - -#define REG_CDM1_VLAN_CTRL CDM1_BASE -#define CDM1_VLAN_MASK GENMASK(31, 16) - -#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08) -#define CDM1_VIP_QSEL_MASK GENMASK(24, 20) - -#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2)) -#define CDM1_CRSN_QSEL_REASON_MASK(_n) \ - GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) - -#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08) -#define CDM2_OAM_QSEL_MASK GENMASK(31, 27) -#define CDM2_VIP_QSEL_MASK GENMASK(24, 20) - -#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2)) -#define CDM2_CRSN_QSEL_REASON_MASK(_n) \ - GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) - -#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n) -#define GDM_DROP_CRC_ERR BIT(23) -#define GDM_IP4_CKSUM BIT(22) -#define GDM_TCP_CKSUM BIT(21) -#define GDM_UDP_CKSUM BIT(20) -#define GDM_UCFQ_MASK GENMASK(15, 12) -#define GDM_BCFQ_MASK GENMASK(11, 8) -#define GDM_MCFQ_MASK GENMASK(7, 4) -#define GDM_OCFQ_MASK GENMASK(3, 0) - -#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10) -#define GDM_INGRESS_FC_EN_MASK BIT(1) -#define GDM_STAG_EN_MASK BIT(0) - -#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14) -#define GDM_SHORT_LEN_MASK GENMASK(13, 0) -#define GDM_LONG_LEN_MASK GENMASK(29, 16) - -#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40) -#define FE_CPORT_PAD BIT(26) -#define FE_CPORT_PORT_XFC_MASK BIT(25) -#define FE_CPORT_QUEUE_XFC_MASK BIT(24) - -#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0) -#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1) -#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0) - -#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4) -#define FE_STRICT_RFC2819_MODE_MASK BIT(31) -#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17) -#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16) -#define FE_TX_MIB_ID_MASK GENMASK(15, 8) -#define FE_RX_MIB_ID_MASK GENMASK(7, 0) - -#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104) -#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c) -#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110) -#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114) -#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118) -#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c) -#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120) -#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124) -#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128) -#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c) -#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130) -#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134) -#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138) -#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c) -#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140) - -#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148) -#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c) -#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150) -#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154) -#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158) -#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c) -#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160) -#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164) -#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168) -#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c) -#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170) -#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174) -#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178) -#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c) -#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180) -#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184) -#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188) -#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c) -#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190) -#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194) -#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198) -#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c) - -#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250) -#define PPE1_SRAM_TABLE_EN_MASK BIT(0) -#define PPE1_SRAM_HASH1_EN_MASK BIT(8) -#define PPE1_DRAM_TABLE_EN_MASK BIT(16) -#define PPE1_DRAM_HASH1_EN_MASK BIT(24) - -#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) -#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) -#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288) -#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c) - -#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290) -#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294) -#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298) -#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c) -#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8) -#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc) -#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0) -#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4) -#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8) -#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc) -#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8) -#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec) -#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0) -#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4) -#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8) -#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc) - -#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20) -#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25) -#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17) - -#define REG_GDM3_FWD_CFG GDM3_BASE -#define GDM3_PAD_EN_MASK BIT(28) - -#define REG_GDM4_FWD_CFG GDM4_BASE -#define GDM4_PAD_EN_MASK BIT(28) -#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8) - -#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c) -#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16) -#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12) -#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8) - -#define REG_IP_FRAG_FP 0x2010 -#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21) -#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16) -#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5) -#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0) - -#define REG_MC_VLAN_EN 0x2100 -#define MC_VLAN_EN_MASK BIT(0) - -#define REG_MC_VLAN_CFG 0x2104 -#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31) -#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16) -#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8) -#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4) -#define MC_VLAN_CFG_RW_MASK BIT(0) - -#define REG_MC_VLAN_DATA 0x2108 - -#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4 - -/* QDMA */ -#define REG_QDMA_GLOBAL_CFG 0x0004 -#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31) -#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29) -#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28) -#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27) -#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26) -#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25) -#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24) -#define GLOBAL_CFG_RESET_MASK BIT(23) -#define GLOBAL_CFG_RESET_DONE_MASK BIT(22) -#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21) -#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20) -#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19) -#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18) -#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17) -#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16) -#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8) -#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7) -#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6) -#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4) -#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3) -#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2) -#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1) -#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0) - -#define REG_FWD_DSCP_BASE 0x0010 -#define REG_FWD_BUF_BASE 0x0014 - -#define REG_HW_FWD_DSCP_CFG 0x0018 -#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28) -#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16) -#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0) - -#define REG_INT_STATUS(_n) \ - (((_n) == 4) ? 0x0730 : \ - ((_n) == 3) ? 0x0724 : \ - ((_n) == 2) ? 0x0720 : \ - ((_n) == 1) ? 0x0024 : 0x0020) - -#define REG_INT_ENABLE(_n) \ - (((_n) == 4) ? 0x0750 : \ - ((_n) == 3) ? 0x0744 : \ - ((_n) == 2) ? 0x0740 : \ - ((_n) == 1) ? 0x002c : 0x0028) - -/* QDMA_CSR_INT_ENABLE1 */ -#define RX15_COHERENT_INT_MASK BIT(31) -#define RX14_COHERENT_INT_MASK BIT(30) -#define RX13_COHERENT_INT_MASK BIT(29) -#define RX12_COHERENT_INT_MASK BIT(28) -#define RX11_COHERENT_INT_MASK BIT(27) -#define RX10_COHERENT_INT_MASK BIT(26) -#define RX9_COHERENT_INT_MASK BIT(25) -#define RX8_COHERENT_INT_MASK BIT(24) -#define RX7_COHERENT_INT_MASK BIT(23) -#define RX6_COHERENT_INT_MASK BIT(22) -#define RX5_COHERENT_INT_MASK BIT(21) -#define RX4_COHERENT_INT_MASK BIT(20) -#define RX3_COHERENT_INT_MASK BIT(19) -#define RX2_COHERENT_INT_MASK BIT(18) -#define RX1_COHERENT_INT_MASK BIT(17) -#define RX0_COHERENT_INT_MASK BIT(16) -#define TX7_COHERENT_INT_MASK BIT(15) -#define TX6_COHERENT_INT_MASK BIT(14) -#define TX5_COHERENT_INT_MASK BIT(13) -#define TX4_COHERENT_INT_MASK BIT(12) -#define TX3_COHERENT_INT_MASK BIT(11) -#define TX2_COHERENT_INT_MASK BIT(10) -#define TX1_COHERENT_INT_MASK BIT(9) -#define TX0_COHERENT_INT_MASK BIT(8) -#define CNT_OVER_FLOW_INT_MASK BIT(7) -#define IRQ1_FULL_INT_MASK BIT(5) -#define IRQ1_INT_MASK BIT(4) -#define HWFWD_DSCP_LOW_INT_MASK BIT(3) -#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2) -#define IRQ0_FULL_INT_MASK BIT(1) -#define IRQ0_INT_MASK BIT(0) - -#define TX_DONE_INT_MASK(_n) \ - ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \ - : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) - -#define INT_TX_MASK \ - (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \ - IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) - -#define INT_IDX0_MASK \ - (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \ - TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \ - TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \ - TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \ - RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \ - RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \ - RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \ - RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \ - RX15_COHERENT_INT_MASK | INT_TX_MASK) - -/* QDMA_CSR_INT_ENABLE2 */ -#define RX15_NO_CPU_DSCP_INT_MASK BIT(31) -#define RX14_NO_CPU_DSCP_INT_MASK BIT(30) -#define RX13_NO_CPU_DSCP_INT_MASK BIT(29) -#define RX12_NO_CPU_DSCP_INT_MASK BIT(28) -#define RX11_NO_CPU_DSCP_INT_MASK BIT(27) -#define RX10_NO_CPU_DSCP_INT_MASK BIT(26) -#define RX9_NO_CPU_DSCP_INT_MASK BIT(25) -#define RX8_NO_CPU_DSCP_INT_MASK BIT(24) -#define RX7_NO_CPU_DSCP_INT_MASK BIT(23) -#define RX6_NO_CPU_DSCP_INT_MASK BIT(22) -#define RX5_NO_CPU_DSCP_INT_MASK BIT(21) -#define RX4_NO_CPU_DSCP_INT_MASK BIT(20) -#define RX3_NO_CPU_DSCP_INT_MASK BIT(19) -#define RX2_NO_CPU_DSCP_INT_MASK BIT(18) -#define RX1_NO_CPU_DSCP_INT_MASK BIT(17) -#define RX0_NO_CPU_DSCP_INT_MASK BIT(16) -#define RX15_DONE_INT_MASK BIT(15) -#define RX14_DONE_INT_MASK BIT(14) -#define RX13_DONE_INT_MASK BIT(13) -#define RX12_DONE_INT_MASK BIT(12) -#define RX11_DONE_INT_MASK BIT(11) -#define RX10_DONE_INT_MASK BIT(10) -#define RX9_DONE_INT_MASK BIT(9) -#define RX8_DONE_INT_MASK BIT(8) -#define RX7_DONE_INT_MASK BIT(7) -#define RX6_DONE_INT_MASK BIT(6) -#define RX5_DONE_INT_MASK BIT(5) -#define RX4_DONE_INT_MASK BIT(4) -#define RX3_DONE_INT_MASK BIT(3) -#define RX2_DONE_INT_MASK BIT(2) -#define RX1_DONE_INT_MASK BIT(1) -#define RX0_DONE_INT_MASK BIT(0) - -#define RX_DONE_INT_MASK \ - (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \ - RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \ - RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \ - RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \ - RX15_DONE_INT_MASK) -#define INT_IDX1_MASK \ - (RX_DONE_INT_MASK | \ - RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \ - RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \ - RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \ - RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \ - RX15_NO_CPU_DSCP_INT_MASK) - -/* QDMA_CSR_INT_ENABLE5 */ -#define TX31_COHERENT_INT_MASK BIT(31) -#define TX30_COHERENT_INT_MASK BIT(30) -#define TX29_COHERENT_INT_MASK BIT(29) -#define TX28_COHERENT_INT_MASK BIT(28) -#define TX27_COHERENT_INT_MASK BIT(27) -#define TX26_COHERENT_INT_MASK BIT(26) -#define TX25_COHERENT_INT_MASK BIT(25) -#define TX24_COHERENT_INT_MASK BIT(24) -#define TX23_COHERENT_INT_MASK BIT(23) -#define TX22_COHERENT_INT_MASK BIT(22) -#define TX21_COHERENT_INT_MASK BIT(21) -#define TX20_COHERENT_INT_MASK BIT(20) -#define TX19_COHERENT_INT_MASK BIT(19) -#define TX18_COHERENT_INT_MASK BIT(18) -#define TX17_COHERENT_INT_MASK BIT(17) -#define TX16_COHERENT_INT_MASK BIT(16) -#define TX15_COHERENT_INT_MASK BIT(15) -#define TX14_COHERENT_INT_MASK BIT(14) -#define TX13_COHERENT_INT_MASK BIT(13) -#define TX12_COHERENT_INT_MASK BIT(12) -#define TX11_COHERENT_INT_MASK BIT(11) -#define TX10_COHERENT_INT_MASK BIT(10) -#define TX9_COHERENT_INT_MASK BIT(9) -#define TX8_COHERENT_INT_MASK BIT(8) - -#define INT_IDX4_MASK \ - (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \ - TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \ - TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \ - TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \ - TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \ - TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \ - TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \ - TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \ - TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \ - TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \ - TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \ - TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK) - -#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050) - -#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054) -#define TX_IRQ_THR_MASK GENMASK(27, 16) -#define TX_IRQ_DEPTH_MASK GENMASK(11, 0) - -#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058) -#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0) - -#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c) -#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16) -#define IRQ_HEAD_IDX_MASK GENMASK(11, 0) - -#define REG_TX_RING_BASE(_n) \ - (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5)) - -#define REG_TX_RING_BLOCKING(_n) \ - (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5)) - -#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6) -#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4) -#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2) -#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1) -#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0) - -#define REG_TX_CPU_IDX(_n) \ - (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5)) - -#define TX_RING_CPU_IDX_MASK GENMASK(15, 0) - -#define REG_TX_DMA_IDX(_n) \ - (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5)) - -#define TX_RING_DMA_IDX_MASK GENMASK(15, 0) - -#define IRQ_RING_IDX_MASK GENMASK(20, 16) -#define IRQ_DESC_IDX_MASK GENMASK(15, 0) - -#define REG_RX_RING_BASE(_n) \ - (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5)) - -#define REG_RX_RING_SIZE(_n) \ - (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5)) - -#define RX_RING_THR_MASK GENMASK(31, 16) -#define RX_RING_SIZE_MASK GENMASK(15, 0) - -#define REG_RX_CPU_IDX(_n) \ - (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5)) - -#define RX_RING_CPU_IDX_MASK GENMASK(15, 0) - -#define REG_RX_DMA_IDX(_n) \ - (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5)) - -#define REG_RX_DELAY_INT_IDX(_n) \ - (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5)) - -#define RX_DELAY_INT_MASK GENMASK(15, 0) - -#define RX_RING_DMA_IDX_MASK GENMASK(15, 0) - -#define REG_INGRESS_TRTCM_CFG 0x0070 -#define INGRESS_TRTCM_EN_MASK BIT(31) -#define INGRESS_TRTCM_MODE_MASK BIT(30) -#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) -#define INGRESS_FAST_TICK_MASK GENMASK(15, 0) - -#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc)) -#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3)) - -#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0) -#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2) - -#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3)) -#define CNTR_EN_MASK BIT(31) -#define CNTR_ALL_CHAN_EN_MASK BIT(30) -#define CNTR_ALL_QUEUE_EN_MASK BIT(29) -#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28) -#define CNTR_SRC_MASK GENMASK(27, 24) -#define CNTR_DSCP_RING_MASK GENMASK(20, 16) -#define CNTR_CHAN_MASK GENMASK(7, 3) -#define CNTR_QUEUE_MASK GENMASK(2, 0) - -#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3)) - -#define REG_LMGR_INIT_CFG 0x1000 -#define LMGR_INIT_START BIT(31) -#define LMGR_SRAM_MODE_MASK BIT(30) -#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20) -#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0) - -#define REG_FWD_DSCP_LOW_THR 0x1004 -#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0) - -#define REG_EGRESS_RATE_METER_CFG 0x100c -#define EGRESS_RATE_METER_EN_MASK BIT(31) -#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17) -#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12) -#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0) - -#define REG_EGRESS_TRTCM_CFG 0x1010 -#define EGRESS_TRTCM_EN_MASK BIT(31) -#define EGRESS_TRTCM_MODE_MASK BIT(30) -#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) -#define EGRESS_FAST_TICK_MASK GENMASK(15, 0) - -#define TRTCM_PARAM_RW_MASK BIT(31) -#define TRTCM_PARAM_RW_DONE_MASK BIT(30) -#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28) -#define TRTCM_METER_GROUP_MASK GENMASK(27, 26) -#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17) -#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16) - -#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4) -#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8) -#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc) - -#define REG_TXWRR_MODE_CFG 0x1020 -#define TWRR_WEIGHT_SCALE_MASK BIT(31) -#define TWRR_WEIGHT_BASE_MASK BIT(3) - -#define REG_TXWRR_WEIGHT_CFG 0x1024 -#define TWRR_RW_CMD_MASK BIT(31) -#define TWRR_RW_CMD_DONE BIT(30) -#define TWRR_CHAN_IDX_MASK GENMASK(23, 19) -#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16) -#define TWRR_VALUE_MASK GENMASK(15, 0) - -#define REG_PSE_BUF_USAGE_CFG 0x1028 -#define PSE_BUF_ESTIMATE_EN_MASK BIT(29) - -#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2)) -#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2) - -#define REG_GLB_TRTCM_CFG 0x1080 -#define GLB_TRTCM_EN_MASK BIT(31) -#define GLB_TRTCM_MODE_MASK BIT(30) -#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16) -#define GLB_FAST_TICK_MASK GENMASK(15, 0) - -#define REG_TXQ_CNGST_CFG 0x10a0 -#define TXQ_CNGST_DROP_EN BIT(31) -#define TXQ_CNGST_DEI_DROP_EN BIT(30) - -#define REG_SLA_TRTCM_CFG 0x1150 -#define SLA_TRTCM_EN_MASK BIT(31) -#define SLA_TRTCM_MODE_MASK BIT(30) -#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16) -#define SLA_FAST_TICK_MASK GENMASK(15, 0) - -/* CTRL */ -#define QDMA_DESC_DONE_MASK BIT(31) -#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */ -#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */ -#define QDMA_DESC_DEI_MASK BIT(25) -#define QDMA_DESC_NO_DROP_MASK BIT(24) -#define QDMA_DESC_LEN_MASK GENMASK(15, 0) -/* DATA */ -#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0) -/* TX MSG0 */ -#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30) -#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14) -#define QDMA_ETH_TXMSG_ICO_MASK BIT(13) -#define QDMA_ETH_TXMSG_UCO_MASK BIT(12) -#define QDMA_ETH_TXMSG_TCO_MASK BIT(11) -#define QDMA_ETH_TXMSG_TSO_MASK BIT(10) -#define QDMA_ETH_TXMSG_FAST_MASK BIT(9) -#define QDMA_ETH_TXMSG_OAM_MASK BIT(8) -#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3) -#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0) -/* TX MSG1 */ -#define QDMA_ETH_TXMSG_NO_DROP BIT(31) -#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */ -#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20) -#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15) -#define QDMA_ETH_TXMSG_HWF_MASK BIT(14) -#define QDMA_ETH_TXMSG_HOP_MASK BIT(13) -#define QDMA_ETH_TXMSG_PTP_MASK BIT(12) -#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */ -#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */ - -/* RX MSG1 */ -#define QDMA_ETH_RXMSG_DEI_MASK BIT(31) -#define QDMA_ETH_RXMSG_IP6_MASK BIT(30) -#define QDMA_ETH_RXMSG_IP4_MASK BIT(29) -#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28) -#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27) -#define QDMA_ETH_RXMSG_L4F_MASK BIT(26) -#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21) -#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16) -#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0) - -struct airoha_qdma_desc { - __le32 rsv; - __le32 ctrl; - __le32 addr; - __le32 data; - __le32 msg0; - __le32 msg1; - __le32 msg2; - __le32 msg3; -}; - -/* CTRL0 */ -#define QDMA_FWD_DESC_CTX_MASK BIT(31) -#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28) -#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16) -#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0) -/* CTRL1 */ -#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0) -/* CTRL2 */ -#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0) - -struct airoha_qdma_fwd_desc { - __le32 addr; - __le32 ctrl0; - __le32 ctrl1; - __le32 ctrl2; - __le32 msg0; - __le32 msg1; - __le32 rsv0; - __le32 rsv1; -}; - -enum { - QDMA_INT_REG_IDX0, - QDMA_INT_REG_IDX1, - QDMA_INT_REG_IDX2, - QDMA_INT_REG_IDX3, - QDMA_INT_REG_IDX4, - QDMA_INT_REG_MAX -}; - -enum { - XSI_PCIE0_PORT, - XSI_PCIE1_PORT, - XSI_USB_PORT, - XSI_AE_PORT, - XSI_ETH_PORT, -}; - -enum { - XSI_PCIE0_VIP_PORT_MASK = BIT(22), - XSI_PCIE1_VIP_PORT_MASK = BIT(23), - XSI_USB_VIP_PORT_MASK = BIT(25), - XSI_ETH_VIP_PORT_MASK = BIT(24), -}; - -enum { - DEV_STATE_INITIALIZED, -}; - -enum { - CDM_CRSN_QSEL_Q1 = 1, - CDM_CRSN_QSEL_Q5 = 5, - CDM_CRSN_QSEL_Q6 = 6, - CDM_CRSN_QSEL_Q15 = 15, -}; - -enum { - CRSN_08 = 0x8, - CRSN_21 = 0x15, /* KA */ - CRSN_22 = 0x16, /* hit bind and force route to CPU */ - CRSN_24 = 0x18, - CRSN_25 = 0x19, -}; - -enum { - FE_PSE_PORT_CDM1, - FE_PSE_PORT_GDM1, - FE_PSE_PORT_GDM2, - FE_PSE_PORT_GDM3, - FE_PSE_PORT_PPE1, - FE_PSE_PORT_CDM2, - FE_PSE_PORT_CDM3, - FE_PSE_PORT_CDM4, - FE_PSE_PORT_PPE2, - FE_PSE_PORT_GDM4, - FE_PSE_PORT_CDM5, - FE_PSE_PORT_DROP = 0xf, -}; - -enum tx_sched_mode { - TC_SCH_WRR8, - TC_SCH_SP, - TC_SCH_WRR7, - TC_SCH_WRR6, - TC_SCH_WRR5, - TC_SCH_WRR4, - TC_SCH_WRR3, - TC_SCH_WRR2, -}; - -enum trtcm_param_type { - TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */ - TRTCM_TOKEN_RATE_MODE, - TRTCM_BUCKETSIZE_SHIFT_MODE, - TRTCM_BUCKET_COUNTER_MODE, -}; - -enum trtcm_mode_type { - TRTCM_COMMIT_MODE, - TRTCM_PEAK_MODE, -}; - -enum trtcm_param { - TRTCM_TICK_SEL = BIT(0), - TRTCM_PKT_MODE = BIT(1), - TRTCM_METER_MODE = BIT(2), -}; - -#define MIN_TOKEN_SIZE 4096 -#define MAX_TOKEN_SIZE_OFFSET 17 -#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6) -#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0) - -struct airoha_queue_entry { - union { - void *buf; - struct sk_buff *skb; - }; - dma_addr_t dma_addr; - u16 dma_len; -}; - -struct airoha_queue { - struct airoha_qdma *qdma; - - /* protect concurrent queue accesses */ - spinlock_t lock; - struct airoha_queue_entry *entry; - struct airoha_qdma_desc *desc; - u16 head; - u16 tail; - - int queued; - int ndesc; - int free_thr; - int buf_size; - - struct napi_struct napi; - struct page_pool *page_pool; -}; - -struct airoha_tx_irq_queue { - struct airoha_qdma *qdma; - - struct napi_struct napi; - - int size; - u32 *q; -}; - -struct airoha_hw_stats { - /* protect concurrent hw_stats accesses */ - spinlock_t lock; - struct u64_stats_sync syncp; - - /* get_stats64 */ - u64 rx_ok_pkts; - u64 tx_ok_pkts; - u64 rx_ok_bytes; - u64 tx_ok_bytes; - u64 rx_multicast; - u64 rx_errors; - u64 rx_drops; - u64 tx_drops; - u64 rx_crc_error; - u64 rx_over_errors; - /* ethtool stats */ - u64 tx_broadcast; - u64 tx_multicast; - u64 tx_len[7]; - u64 rx_broadcast; - u64 rx_fragment; - u64 rx_jabber; - u64 rx_len[7]; -}; - -struct airoha_qdma { - struct airoha_eth *eth; - void __iomem *regs; - - /* protect concurrent irqmask accesses */ - spinlock_t irq_lock; - u32 irqmask[QDMA_INT_REG_MAX]; - int irq; +#include "airoha_regs.h" +#include "airoha_eth.h" - struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; - - struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; - struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; - - /* descriptor and packet buffers for qdma hw forward */ - struct { - void *desc; - void *q; - } hfwd; -}; - -struct airoha_gdm_port { - struct airoha_qdma *qdma; - struct net_device *dev; - int id; - - struct airoha_hw_stats stats; - - DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS); - - /* qos stats counters */ - u64 cpu_tx_packets; - u64 fwd_tx_packets; -}; - -struct airoha_eth { - struct device *dev; - - unsigned long state; - void __iomem *fe_regs; - - struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; - struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; - - struct net_device *napi_dev; - - struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA]; - struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; -}; - -static u32 airoha_rr(void __iomem *base, u32 offset) +u32 airoha_rr(void __iomem *base, u32 offset) { return readl(base + offset); } -static void airoha_wr(void __iomem *base, u32 offset, u32 val) +void airoha_wr(void __iomem *base, u32 offset, u32 val) { writel(val, base + offset); } -static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) +u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) { val |= (airoha_rr(base, offset) & ~mask); airoha_wr(base, offset, val); @@ -929,28 +34,6 @@ static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) return val; } -#define airoha_fe_rr(eth, offset) \ - airoha_rr((eth)->fe_regs, (offset)) -#define airoha_fe_wr(eth, offset, val) \ - airoha_wr((eth)->fe_regs, (offset), (val)) -#define airoha_fe_rmw(eth, offset, mask, val) \ - airoha_rmw((eth)->fe_regs, (offset), (mask), (val)) -#define airoha_fe_set(eth, offset, val) \ - airoha_rmw((eth)->fe_regs, (offset), 0, (val)) -#define airoha_fe_clear(eth, offset, val) \ - airoha_rmw((eth)->fe_regs, (offset), (val), 0) - -#define airoha_qdma_rr(qdma, offset) \ - airoha_rr((qdma)->regs, (offset)) -#define airoha_qdma_wr(qdma, offset, val) \ - airoha_wr((qdma)->regs, (offset), (val)) -#define airoha_qdma_rmw(qdma, offset, mask, val) \ - airoha_rmw((qdma)->regs, (offset), (mask), (val)) -#define airoha_qdma_set(qdma, offset, val) \ - airoha_rmw((qdma)->regs, (offset), 0, (val)) -#define airoha_qdma_clear(qdma, offset, val) \ - airoha_rmw((qdma)->regs, (offset), (val), 0) - static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index, u32 clear, u32 set) { @@ -1021,30 +104,23 @@ static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, FIELD_PREP(GDM_UCFQ_MASK, val)); } -static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable) +static int airoha_set_vip_for_gdm_port(struct airoha_gdm_port *port, + bool enable) { - u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP; - u32 vip_port, cfg_addr; + struct airoha_eth *eth = port->qdma->eth; + u32 vip_port; - switch (port) { - case XSI_PCIE0_PORT: + switch (port->id) { + case 3: + /* FIXME: handle XSI_PCIE1_PORT */ vip_port = XSI_PCIE0_VIP_PORT_MASK; - cfg_addr = REG_GDM_FWD_CFG(3); - break; - case XSI_PCIE1_PORT: - vip_port = XSI_PCIE1_VIP_PORT_MASK; - cfg_addr = REG_GDM_FWD_CFG(3); - break; - case XSI_USB_PORT: - vip_port = XSI_USB_VIP_PORT_MASK; - cfg_addr = REG_GDM_FWD_CFG(4); break; - case XSI_ETH_PORT: + case 4: + /* FIXME: handle XSI_USB_PORT */ vip_port = XSI_ETH_VIP_PORT_MASK; - cfg_addr = REG_GDM_FWD_CFG(4); break; default: - return -EINVAL; + return 0; } if (enable) { @@ -1055,51 +131,17 @@ static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable) airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port); } - airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val); - return 0; } -static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable) -{ - const int port_list[] = { - XSI_PCIE0_PORT, - XSI_PCIE1_PORT, - XSI_USB_PORT, - XSI_ETH_PORT - }; - int i, err; - - for (i = 0; i < ARRAY_SIZE(port_list); i++) { - err = airoha_set_gdm_port(eth, port_list[i], enable); - if (err) - goto error; - } - - return 0; - -error: - for (i--; i >= 0; i--) - airoha_set_gdm_port(eth, port_list[i], false); - - return err; -} - static void airoha_fe_maccr_init(struct airoha_eth *eth) { int p; - for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) { + for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) airoha_fe_set(eth, REG_GDM_FWD_CFG(p), GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM | GDM_DROP_CRC_ERR); - airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p), - FE_PSE_PORT_CDM1); - airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p), - GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, - FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | - FIELD_PREP(GDM_LONG_LEN_MASK, 4004)); - } airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK, FIELD_PREP(CDM1_VLAN_MASK, 0x8100)); @@ -1547,7 +589,7 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1); switch (sport) { - case 0x10 ... 0x13: + case 0x10 ... 0x14: port = 0; break; case 0x2 ... 0x4: @@ -1571,10 +613,12 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) while (done < budget) { struct airoha_queue_entry *e = &q->entry[q->tail]; struct airoha_qdma_desc *desc = &q->desc[q->tail]; + u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); dma_addr_t dma_addr = le32_to_cpu(desc->addr); + struct page *page = virt_to_head_page(e->buf); u32 desc_ctrl = le32_to_cpu(desc->ctrl); - struct sk_buff *skb; - int len, p; + struct airoha_gdm_port *port; + int data_len, len, p; if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) break; @@ -1592,32 +636,74 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) dma_sync_single_for_cpu(eth->dev, dma_addr, SKB_WITH_OVERHEAD(q->buf_size), dir); + data_len = q->skb ? q->buf_size + : SKB_WITH_OVERHEAD(q->buf_size); + if (data_len < len) + goto free_frag; + p = airoha_qdma_get_gdm_port(eth, desc); - if (p < 0 || !eth->ports[p]) { - page_pool_put_full_page(q->page_pool, - virt_to_head_page(e->buf), - true); - continue; + if (p < 0 || !eth->ports[p]) + goto free_frag; + + port = eth->ports[p]; + if (!q->skb) { /* first buffer */ + q->skb = napi_build_skb(e->buf, q->buf_size); + if (!q->skb) + goto free_frag; + + __skb_put(q->skb, len); + skb_mark_for_recycle(q->skb); + q->skb->dev = port->dev; + q->skb->protocol = eth_type_trans(q->skb, port->dev); + q->skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(q->skb, qid); + } else { /* scattered frame */ + struct skb_shared_info *shinfo = skb_shinfo(q->skb); + int nr_frags = shinfo->nr_frags; + + if (nr_frags >= ARRAY_SIZE(shinfo->frags)) + goto free_frag; + + skb_add_rx_frag(q->skb, nr_frags, page, + e->buf - page_address(page), len, + q->buf_size); } - skb = napi_build_skb(e->buf, q->buf_size); - if (!skb) { - page_pool_put_full_page(q->page_pool, - virt_to_head_page(e->buf), - true); - break; + if (FIELD_GET(QDMA_DESC_MORE_MASK, desc_ctrl)) + continue; + + if (netdev_uses_dsa(port->dev)) { + /* PPE module requires untagged packets to work + * properly and it provides DSA port index via the + * DMA descriptor. Report DSA tag to the DSA stack + * via skb dst info. + */ + u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG, + le32_to_cpu(desc->msg0)); + + if (sptag < ARRAY_SIZE(port->dsa_meta) && + port->dsa_meta[sptag]) + skb_dst_set_noref(q->skb, + &port->dsa_meta[sptag]->dst); } - skb_reserve(skb, 2); - __skb_put(skb, len); - skb_mark_for_recycle(skb); - skb->dev = eth->ports[p]->dev; - skb->protocol = eth_type_trans(skb, skb->dev); - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb_record_rx_queue(skb, qid); - napi_gro_receive(&q->napi, skb); + hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1); + if (hash != AIROHA_RXD4_FOE_ENTRY) + skb_set_hash(q->skb, jhash_1word(hash, 0), + PKT_HASH_TYPE_L4); + + reason = FIELD_GET(AIROHA_RXD4_PPE_CPU_REASON, msg1); + if (reason == PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) + airoha_ppe_check_skb(eth->ppe, hash); done++; + napi_gro_receive(&q->napi, q->skb); + q->skb = NULL; + continue; +free_frag: + page_pool_put_full_page(q->page_pool, page, true); + dev_kfree_skb(q->skb); + q->skb = NULL; } airoha_qdma_fill_rx_queue(q); @@ -1692,6 +778,7 @@ static int airoha_qdma_init_rx_queue(struct airoha_queue *q, FIELD_PREP(RX_RING_THR_MASK, thr)); airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); + airoha_qdma_set(qdma, REG_RX_SCATTER_CFG(qid), RX_RING_SG_EN_MASK); airoha_qdma_fill_rx_queue(q); @@ -2091,7 +1178,6 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma) } airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG, - GLOBAL_CFG_RX_2B_OFFSET_MASK | FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | GLOBAL_CFG_CPU_TXR_RR_MASK | GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK | @@ -2235,6 +1321,10 @@ static int airoha_hw_init(struct platform_device *pdev, return err; } + err = airoha_ppe_init(eth); + if (err) + return err; + set_bit(DEV_STATE_INITIALIZED, ð->state); return 0; @@ -2441,12 +1531,12 @@ static void airoha_update_hw_stats(struct airoha_gdm_port *port) static int airoha_dev_open(struct net_device *dev) { + int err, len = ETH_HLEN + dev->mtu + ETH_FCS_LEN; struct airoha_gdm_port *port = netdev_priv(dev); struct airoha_qdma *qdma = port->qdma; - int err; netif_tx_start_all_queues(dev); - err = airoha_set_gdm_ports(qdma->eth, true); + err = airoha_set_vip_for_gdm_port(port, true); if (err) return err; @@ -2457,9 +1547,15 @@ static int airoha_dev_open(struct net_device *dev) airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id), GDM_STAG_EN_MASK); + airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id), + GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, + FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | + FIELD_PREP(GDM_LONG_LEN_MASK, len)); + airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK | GLOBAL_CFG_RX_DMA_EN_MASK); + atomic_inc(&qdma->users); return 0; } @@ -2471,20 +1567,24 @@ static int airoha_dev_stop(struct net_device *dev) int i, err; netif_tx_disable(dev); - err = airoha_set_gdm_ports(qdma->eth, false); + err = airoha_set_vip_for_gdm_port(port, false); if (err) return err; - airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, - GLOBAL_CFG_TX_DMA_EN_MASK | - GLOBAL_CFG_RX_DMA_EN_MASK); + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) + netdev_tx_reset_subqueue(dev, i); - for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { - if (!qdma->q_tx[i].ndesc) - continue; + if (atomic_dec_and_test(&qdma->users)) { + airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG, + GLOBAL_CFG_TX_DMA_EN_MASK | + GLOBAL_CFG_RX_DMA_EN_MASK); - airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); - netdev_tx_reset_subqueue(dev, i); + for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) { + if (!qdma->q_tx[i].ndesc) + continue; + + airoha_qdma_cleanup_tx_queue(&qdma->q_tx[i]); + } } return 0; @@ -2504,12 +1604,82 @@ static int airoha_dev_set_macaddr(struct net_device *dev, void *p) return 0; } +static void airhoha_set_gdm2_loopback(struct airoha_gdm_port *port) +{ + u32 pse_port = port->id == 3 ? FE_PSE_PORT_GDM3 : FE_PSE_PORT_GDM4; + struct airoha_eth *eth = port->qdma->eth; + u32 chan = port->id == 3 ? 4 : 0; + + /* Forward the traffic to the proper GDM port */ + airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(2), pse_port); + airoha_fe_clear(eth, REG_GDM_FWD_CFG(2), GDM_STRIP_CRC); + + /* Enable GDM2 loopback */ + airoha_fe_wr(eth, REG_GDM_TXCHN_EN(2), 0xffffffff); + airoha_fe_wr(eth, REG_GDM_RXCHN_EN(2), 0xffff); + airoha_fe_rmw(eth, REG_GDM_LPBK_CFG(2), + LPBK_CHAN_MASK | LPBK_MODE_MASK | LPBK_EN_MASK, + FIELD_PREP(LPBK_CHAN_MASK, chan) | LPBK_EN_MASK); + airoha_fe_rmw(eth, REG_GDM_LEN_CFG(2), + GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, + FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | + FIELD_PREP(GDM_LONG_LEN_MASK, AIROHA_MAX_MTU)); + + /* Disable VIP and IFC for GDM2 */ + airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(2)); + airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(2)); + + if (port->id == 3) { + /* FIXME: handle XSI_PCE1_PORT */ + airoha_fe_wr(eth, REG_PPE_DFT_CPORT0(0), 0x5500); + airoha_fe_rmw(eth, REG_FE_WAN_PORT, + WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, + FIELD_PREP(WAN0_MASK, HSGMII_LAN_PCIE0_SRCPORT)); + airoha_fe_rmw(eth, + REG_SP_DFT_CPORT(HSGMII_LAN_PCIE0_SRCPORT >> 3), + SP_CPORT_PCIE0_MASK, + FIELD_PREP(SP_CPORT_PCIE0_MASK, + FE_PSE_PORT_CDM2)); + } else { + /* FIXME: handle XSI_USB_PORT */ + airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, + FC_ID_OF_SRC_PORT24_MASK, + FIELD_PREP(FC_ID_OF_SRC_PORT24_MASK, 2)); + airoha_fe_rmw(eth, REG_FE_WAN_PORT, + WAN1_EN_MASK | WAN1_MASK | WAN0_MASK, + FIELD_PREP(WAN0_MASK, HSGMII_LAN_ETH_SRCPORT)); + airoha_fe_rmw(eth, + REG_SP_DFT_CPORT(HSGMII_LAN_ETH_SRCPORT >> 3), + SP_CPORT_ETH_MASK, + FIELD_PREP(SP_CPORT_ETH_MASK, FE_PSE_PORT_CDM2)); + } +} + static int airoha_dev_init(struct net_device *dev) { struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->qdma->eth; + u32 pse_port; airoha_set_macaddr(port, dev->dev_addr); + switch (port->id) { + case 3: + case 4: + /* If GDM2 is active we can't enable loopback */ + if (!eth->ports[1]) + airhoha_set_gdm2_loopback(port); + fallthrough; + case 2: + pse_port = FE_PSE_PORT_PPE2; + break; + default: + pse_port = FE_PSE_PORT_PPE1; + break; + } + + airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(port->id), pse_port); + return 0; } @@ -2535,6 +1705,20 @@ static void airoha_dev_get_stats64(struct net_device *dev, } while (u64_stats_fetch_retry(&port->stats.syncp, start)); } +static int airoha_dev_change_mtu(struct net_device *dev, int mtu) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->qdma->eth; + u32 len = ETH_HLEN + mtu + ETH_FCS_LEN; + + airoha_fe_rmw(eth, REG_GDM_LEN_CFG(port->id), + GDM_LONG_LEN_MASK, + FIELD_PREP(GDM_LONG_LEN_MASK, len)); + WRITE_ONCE(dev->mtu, mtu); + + return 0; +} + static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { @@ -2553,26 +1737,71 @@ static u16 airoha_dev_select_queue(struct net_device *dev, struct sk_buff *skb, return queue < dev->num_tx_queues ? queue : 0; } +static u32 airoha_get_dsa_tag(struct sk_buff *skb, struct net_device *dev) +{ +#if IS_ENABLED(CONFIG_NET_DSA) + struct ethhdr *ehdr; + u8 xmit_tpid; + u16 tag; + + if (!netdev_uses_dsa(dev)) + return 0; + + if (dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) + return 0; + + if (skb_cow_head(skb, 0)) + return 0; + + ehdr = (struct ethhdr *)skb->data; + tag = be16_to_cpu(ehdr->h_proto); + xmit_tpid = tag >> 8; + + switch (xmit_tpid) { + case MTK_HDR_XMIT_TAGGED_TPID_8100: + ehdr->h_proto = cpu_to_be16(ETH_P_8021Q); + tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_8100 << 8); + break; + case MTK_HDR_XMIT_TAGGED_TPID_88A8: + ehdr->h_proto = cpu_to_be16(ETH_P_8021AD); + tag &= ~(MTK_HDR_XMIT_TAGGED_TPID_88A8 << 8); + break; + default: + /* PPE module requires untagged DSA packets to work properly, + * so move DSA tag to DMA descriptor. + */ + memmove(skb->data + MTK_HDR_LEN, skb->data, 2 * ETH_ALEN); + __skb_pull(skb, MTK_HDR_LEN); + break; + } + + return tag; +#else + return 0; +#endif +} + static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, struct net_device *dev) { - struct skb_shared_info *sinfo = skb_shinfo(skb); struct airoha_gdm_port *port = netdev_priv(dev); - u32 msg0, msg1, len = skb_headlen(skb); struct airoha_qdma *qdma = port->qdma; - u32 nr_frags = 1 + sinfo->nr_frags; + u32 nr_frags, tag, msg0, msg1, len; struct netdev_queue *txq; struct airoha_queue *q; - void *data = skb->data; + void *data; int i, qid; u16 index; u8 fport; qid = skb_get_queue_mapping(skb) % ARRAY_SIZE(qdma->q_tx); + tag = airoha_get_dsa_tag(skb, dev); + msg0 = FIELD_PREP(QDMA_ETH_TXMSG_CHAN_MASK, qid / AIROHA_NUM_QOS_QUEUES) | FIELD_PREP(QDMA_ETH_TXMSG_QUEUE_MASK, - qid % AIROHA_NUM_QOS_QUEUES); + qid % AIROHA_NUM_QOS_QUEUES) | + FIELD_PREP(QDMA_ETH_TXMSG_SP_TAG_MASK, tag); if (skb->ip_summed == CHECKSUM_PARTIAL) msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) | FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) | @@ -2583,8 +1812,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, if (skb_cow_head(skb, 0)) goto error; - if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { - __be16 csum = cpu_to_be16(sinfo->gso_size); + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | + SKB_GSO_TCPV6)) { + __be16 csum = cpu_to_be16(skb_shinfo(skb)->gso_size); tcp_hdr(skb)->check = (__force __sum16)csum; msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1); @@ -2602,6 +1832,8 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, spin_lock_bh(&q->lock); txq = netdev_get_tx_queue(dev, qid); + nr_frags = 1 + skb_shinfo(skb)->nr_frags; + if (q->queued + nr_frags > q->ndesc) { /* not enough space in the queue */ netif_tx_stop_queue(txq); @@ -2609,11 +1841,14 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } + len = skb_headlen(skb); + data = skb->data; index = q->head; + for (i = 0; i < nr_frags; i++) { struct airoha_qdma_desc *desc = &q->desc[index]; struct airoha_queue_entry *e = &q->entry[index]; - skb_frag_t *frag = &sinfo->frags[i]; + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t addr; u32 val; @@ -3038,6 +2273,47 @@ static int airoha_tc_htb_alloc_leaf_queue(struct airoha_gdm_port *port, return 0; } +static int airoha_dev_setup_tc_block(struct airoha_gdm_port *port, + struct flow_block_offload *f) +{ + flow_setup_cb_t *cb = airoha_ppe_setup_tc_block_cb; + static LIST_HEAD(block_cb_list); + struct flow_block_cb *block_cb; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + f->driver_block_list = &block_cb_list; + switch (f->command) { + case FLOW_BLOCK_BIND: + block_cb = flow_block_cb_lookup(f->block, cb, port->dev); + if (block_cb) { + flow_block_cb_incref(block_cb); + return 0; + } + block_cb = flow_block_cb_alloc(cb, port->dev, port->dev, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_incref(block_cb); + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &block_cb_list); + return 0; + case FLOW_BLOCK_UNBIND: + block_cb = flow_block_cb_lookup(f->block, cb, port->dev); + if (!block_cb) + return -ENOENT; + + if (!flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } + return 0; + default: + return -EOPNOTSUPP; + } +} + static void airoha_tc_remove_htb_queue(struct airoha_gdm_port *port, int queue) { struct net_device *dev = port->dev; @@ -3121,6 +2397,9 @@ static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type, return airoha_tc_setup_qdisc_ets(port, type_data); case TC_SETUP_QDISC_HTB: return airoha_tc_setup_qdisc_htb(port, type_data); + case TC_SETUP_BLOCK: + case TC_SETUP_FT: + return airoha_dev_setup_tc_block(port, type_data); default: return -EOPNOTSUPP; } @@ -3130,6 +2409,7 @@ static const struct net_device_ops airoha_netdev_ops = { .ndo_init = airoha_dev_init, .ndo_open = airoha_dev_open, .ndo_stop = airoha_dev_stop, + .ndo_change_mtu = airoha_dev_change_mtu, .ndo_select_queue = airoha_dev_select_queue, .ndo_start_xmit = airoha_dev_xmit, .ndo_get_stats64 = airoha_dev_get_stats64, @@ -3143,13 +2423,45 @@ static const struct ethtool_ops airoha_ethtool_ops = { .get_rmon_stats = airoha_ethtool_get_rmon_stats, }; -static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) +static int airoha_metadata_dst_alloc(struct airoha_gdm_port *port) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { + struct metadata_dst *md_dst; + + md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!md_dst) + return -ENOMEM; + + md_dst->u.port_info.port_id = i; + port->dsa_meta[i] = md_dst; + } + + return 0; +} + +static void airoha_metadata_dst_free(struct airoha_gdm_port *port) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(port->dsa_meta); i++) { + if (!port->dsa_meta[i]) + continue; + + metadata_dst_free(port->dsa_meta[i]); + } +} + +static int airoha_alloc_gdm_port(struct airoha_eth *eth, + struct device_node *np, int index) { const __be32 *id_ptr = of_get_property(np, "reg", NULL); struct airoha_gdm_port *port; struct airoha_qdma *qdma; struct net_device *dev; - int err, index; + int err, p; u32 id; if (!id_ptr) { @@ -3158,14 +2470,14 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) } id = be32_to_cpup(id_ptr); - index = id - 1; + p = id - 1; if (!id || id > ARRAY_SIZE(eth->ports)) { dev_err(eth->dev, "invalid gdm port id: %d\n", id); return -EINVAL; } - if (eth->ports[index]) { + if (eth->ports[p]) { dev_err(eth->dev, "duplicate gdm port id: %d\n", id); return -EINVAL; } @@ -3188,6 +2500,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_TC; dev->features |= dev->hw_features; + dev->vlan_features = dev->hw_features; dev->dev.of_node = np; dev->irq = qdma->irq; SET_NETDEV_DEV(dev, eth->dev); @@ -3213,7 +2526,11 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) port->qdma = qdma; port->dev = dev; port->id = id; - eth->ports[index] = port; + eth->ports[p] = port; + + err = airoha_metadata_dst_alloc(port); + if (err) + return err; return register_netdev(dev); } @@ -3281,6 +2598,7 @@ static int airoha_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) airoha_qdma_start_napi(ð->qdma[i]); + i = 0; for_each_child_of_node(pdev->dev.of_node, np) { if (!of_device_is_compatible(np, "airoha,eth-mac")) continue; @@ -3288,7 +2606,7 @@ static int airoha_probe(struct platform_device *pdev) if (!of_device_is_available(np)) continue; - err = airoha_alloc_gdm_port(eth, np); + err = airoha_alloc_gdm_port(eth, np, i++); if (err) { of_node_put(np); goto error_napi_stop; @@ -3307,8 +2625,10 @@ static int airoha_probe(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { struct airoha_gdm_port *port = eth->ports[i]; - if (port && port->dev->reg_state == NETREG_REGISTERED) + if (port && port->dev->reg_state == NETREG_REGISTERED) { unregister_netdev(port->dev); + airoha_metadata_dst_free(port); + } } free_netdev(eth->napi_dev); platform_set_drvdata(pdev, NULL); @@ -3334,9 +2654,11 @@ static void airoha_remove(struct platform_device *pdev) airoha_dev_stop(port->dev); unregister_netdev(port->dev); + airoha_metadata_dst_free(port); } free_netdev(eth->napi_dev); + airoha_ppe_deinit(eth); platform_set_drvdata(pdev, NULL); } diff --git a/drivers/net/ethernet/airoha/airoha_eth.h b/drivers/net/ethernet/airoha/airoha_eth.h new file mode 100644 index 0000000000000..60690b685710c --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_eth.h @@ -0,0 +1,552 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 AIROHA Inc + * Author: Lorenzo Bianconi + */ + +#ifndef AIROHA_ETH_H +#define AIROHA_ETH_H + +#include +#include +#include +#include +#include +#include +#include + +#define AIROHA_MAX_NUM_GDM_PORTS 4 +#define AIROHA_MAX_NUM_QDMA 2 +#define AIROHA_MAX_DSA_PORTS 7 +#define AIROHA_MAX_NUM_RSTS 3 +#define AIROHA_MAX_NUM_XSI_RSTS 5 +#define AIROHA_MAX_MTU 9216 +#define AIROHA_MAX_PACKET_SIZE 2048 +#define AIROHA_NUM_QOS_CHANNELS 4 +#define AIROHA_NUM_QOS_QUEUES 8 +#define AIROHA_NUM_TX_RING 32 +#define AIROHA_NUM_RX_RING 32 +#define AIROHA_NUM_NETDEV_TX_RINGS (AIROHA_NUM_TX_RING + \ + AIROHA_NUM_QOS_CHANNELS) +#define AIROHA_FE_MC_MAX_VLAN_TABLE 64 +#define AIROHA_FE_MC_MAX_VLAN_PORT 16 +#define AIROHA_NUM_TX_IRQ 2 +#define HW_DSCP_NUM 2048 +#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048) +#define TX_DSCP_NUM 1024 +#define RX_DSCP_NUM(_n) \ + ((_n) == 2 ? 128 : \ + (_n) == 11 ? 128 : \ + (_n) == 15 ? 128 : \ + (_n) == 0 ? 1024 : 16) + +#define PSE_RSV_PAGES 128 +#define PSE_QUEUE_RSV_PAGES 64 + +#define QDMA_METER_IDX(_n) ((_n) & 0xff) +#define QDMA_METER_GROUP(_n) (((_n) >> 8) & 0x3) + +#define PPE_NUM 2 +#define PPE1_SRAM_NUM_ENTRIES (8 * 1024) +#define PPE_SRAM_NUM_ENTRIES (2 * PPE1_SRAM_NUM_ENTRIES) +#define PPE_DRAM_NUM_ENTRIES (16 * 1024) +#define PPE_NUM_ENTRIES (PPE_SRAM_NUM_ENTRIES + PPE_DRAM_NUM_ENTRIES) +#define PPE_HASH_MASK (PPE_NUM_ENTRIES - 1) +#define PPE_ENTRY_SIZE 80 +#define PPE_RAM_NUM_ENTRIES_SHIFT(_n) (__ffs((_n) >> 10)) + +#define MTK_HDR_LEN 4 +#define MTK_HDR_XMIT_TAGGED_TPID_8100 1 +#define MTK_HDR_XMIT_TAGGED_TPID_88A8 2 + +enum { + QDMA_INT_REG_IDX0, + QDMA_INT_REG_IDX1, + QDMA_INT_REG_IDX2, + QDMA_INT_REG_IDX3, + QDMA_INT_REG_IDX4, + QDMA_INT_REG_MAX +}; + +enum { + HSGMII_LAN_PCIE0_SRCPORT = 0x16, + HSGMII_LAN_PCIE1_SRCPORT, + HSGMII_LAN_ETH_SRCPORT, + HSGMII_LAN_USB_SRCPORT, +}; + +enum { + XSI_PCIE0_VIP_PORT_MASK = BIT(22), + XSI_PCIE1_VIP_PORT_MASK = BIT(23), + XSI_USB_VIP_PORT_MASK = BIT(25), + XSI_ETH_VIP_PORT_MASK = BIT(24), +}; + +enum { + DEV_STATE_INITIALIZED, +}; + +enum { + CDM_CRSN_QSEL_Q1 = 1, + CDM_CRSN_QSEL_Q5 = 5, + CDM_CRSN_QSEL_Q6 = 6, + CDM_CRSN_QSEL_Q15 = 15, +}; + +enum { + CRSN_08 = 0x8, + CRSN_21 = 0x15, /* KA */ + CRSN_22 = 0x16, /* hit bind and force route to CPU */ + CRSN_24 = 0x18, + CRSN_25 = 0x19, +}; + +enum { + FE_PSE_PORT_CDM1, + FE_PSE_PORT_GDM1, + FE_PSE_PORT_GDM2, + FE_PSE_PORT_GDM3, + FE_PSE_PORT_PPE1, + FE_PSE_PORT_CDM2, + FE_PSE_PORT_CDM3, + FE_PSE_PORT_CDM4, + FE_PSE_PORT_PPE2, + FE_PSE_PORT_GDM4, + FE_PSE_PORT_CDM5, + FE_PSE_PORT_DROP = 0xf, +}; + +enum tx_sched_mode { + TC_SCH_WRR8, + TC_SCH_SP, + TC_SCH_WRR7, + TC_SCH_WRR6, + TC_SCH_WRR5, + TC_SCH_WRR4, + TC_SCH_WRR3, + TC_SCH_WRR2, +}; + +enum trtcm_param_type { + TRTCM_MISC_MODE, /* meter_en, pps_mode, tick_sel */ + TRTCM_TOKEN_RATE_MODE, + TRTCM_BUCKETSIZE_SHIFT_MODE, + TRTCM_BUCKET_COUNTER_MODE, +}; + +enum trtcm_mode_type { + TRTCM_COMMIT_MODE, + TRTCM_PEAK_MODE, +}; + +enum trtcm_param { + TRTCM_TICK_SEL = BIT(0), + TRTCM_PKT_MODE = BIT(1), + TRTCM_METER_MODE = BIT(2), +}; + +#define MIN_TOKEN_SIZE 4096 +#define MAX_TOKEN_SIZE_OFFSET 17 +#define TRTCM_TOKEN_RATE_MASK GENMASK(23, 6) +#define TRTCM_TOKEN_RATE_FRACTION_MASK GENMASK(5, 0) + +struct airoha_queue_entry { + union { + void *buf; + struct sk_buff *skb; + }; + dma_addr_t dma_addr; + u16 dma_len; +}; + +struct airoha_queue { + struct airoha_qdma *qdma; + + /* protect concurrent queue accesses */ + spinlock_t lock; + struct airoha_queue_entry *entry; + struct airoha_qdma_desc *desc; + u16 head; + u16 tail; + + int queued; + int ndesc; + int free_thr; + int buf_size; + + struct napi_struct napi; + struct page_pool *page_pool; + struct sk_buff *skb; +}; + +struct airoha_tx_irq_queue { + struct airoha_qdma *qdma; + + struct napi_struct napi; + + int size; + u32 *q; +}; + +struct airoha_hw_stats { + /* protect concurrent hw_stats accesses */ + spinlock_t lock; + struct u64_stats_sync syncp; + + /* get_stats64 */ + u64 rx_ok_pkts; + u64 tx_ok_pkts; + u64 rx_ok_bytes; + u64 tx_ok_bytes; + u64 rx_multicast; + u64 rx_errors; + u64 rx_drops; + u64 tx_drops; + u64 rx_crc_error; + u64 rx_over_errors; + /* ethtool stats */ + u64 tx_broadcast; + u64 tx_multicast; + u64 tx_len[7]; + u64 rx_broadcast; + u64 rx_fragment; + u64 rx_jabber; + u64 rx_len[7]; +}; + +enum { + PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f, +}; + +enum { + AIROHA_FOE_STATE_INVALID, + AIROHA_FOE_STATE_UNBIND, + AIROHA_FOE_STATE_BIND, + AIROHA_FOE_STATE_FIN +}; + +enum { + PPE_PKT_TYPE_IPV4_HNAPT = 0, + PPE_PKT_TYPE_IPV4_ROUTE = 1, + PPE_PKT_TYPE_BRIDGE = 2, + PPE_PKT_TYPE_IPV4_DSLITE = 3, + PPE_PKT_TYPE_IPV6_ROUTE_3T = 4, + PPE_PKT_TYPE_IPV6_ROUTE_5T = 5, + PPE_PKT_TYPE_IPV6_6RD = 7, +}; + +#define AIROHA_FOE_MAC_SMAC_ID GENMASK(20, 16) +#define AIROHA_FOE_MAC_PPPOE_ID GENMASK(15, 0) + +struct airoha_foe_mac_info_common { + u16 vlan1; + u16 etype; + + u32 dest_mac_hi; + + u16 vlan2; + u16 dest_mac_lo; + + u32 src_mac_hi; +}; + +struct airoha_foe_mac_info { + struct airoha_foe_mac_info_common common; + + u16 pppoe_id; + u16 src_mac_lo; +}; + +#define AIROHA_FOE_IB1_UNBIND_PREBIND BIT(24) +#define AIROHA_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8) +#define AIROHA_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0) + +#define AIROHA_FOE_IB1_BIND_STATIC BIT(31) +#define AIROHA_FOE_IB1_BIND_UDP BIT(30) +#define AIROHA_FOE_IB1_BIND_STATE GENMASK(29, 28) +#define AIROHA_FOE_IB1_BIND_PACKET_TYPE GENMASK(27, 25) +#define AIROHA_FOE_IB1_BIND_TTL BIT(24) +#define AIROHA_FOE_IB1_BIND_TUNNEL_DECAP BIT(23) +#define AIROHA_FOE_IB1_BIND_PPPOE BIT(22) +#define AIROHA_FOE_IB1_BIND_VPM GENMASK(21, 20) +#define AIROHA_FOE_IB1_BIND_VLAN_LAYER GENMASK(19, 16) +#define AIROHA_FOE_IB1_BIND_KEEPALIVE BIT(15) +#define AIROHA_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0) + +#define AIROHA_FOE_IB2_DSCP GENMASK(31, 24) +#define AIROHA_FOE_IB2_PORT_AG GENMASK(23, 13) +#define AIROHA_FOE_IB2_PCP BIT(12) +#define AIROHA_FOE_IB2_MULTICAST BIT(11) +#define AIROHA_FOE_IB2_FAST_PATH BIT(10) +#define AIROHA_FOE_IB2_PSE_QOS BIT(9) +#define AIROHA_FOE_IB2_PSE_PORT GENMASK(8, 5) +#define AIROHA_FOE_IB2_NBQ GENMASK(4, 0) + +#define AIROHA_FOE_ACTDP GENMASK(31, 24) +#define AIROHA_FOE_SHAPER_ID GENMASK(23, 16) +#define AIROHA_FOE_CHANNEL GENMASK(15, 11) +#define AIROHA_FOE_QID GENMASK(10, 8) +#define AIROHA_FOE_DPI BIT(7) +#define AIROHA_FOE_TUNNEL BIT(6) +#define AIROHA_FOE_TUNNEL_ID GENMASK(5, 0) + +struct airoha_foe_bridge { + u32 dest_mac_hi; + + u16 src_mac_hi; + u16 dest_mac_lo; + + u32 src_mac_lo; + + u32 ib2; + + u32 rsv[5]; + + u32 data; + + struct airoha_foe_mac_info l2; +}; + +struct airoha_foe_ipv4_tuple { + u32 src_ip; + u32 dest_ip; + union { + struct { + u16 dest_port; + u16 src_port; + }; + struct { + u8 protocol; + u8 _pad[3]; /* fill with 0xa5a5a5 */ + }; + u32 ports; + }; +}; + +struct airoha_foe_ipv4 { + struct airoha_foe_ipv4_tuple orig_tuple; + + u32 ib2; + + struct airoha_foe_ipv4_tuple new_tuple; + + u32 rsv[2]; + + u32 data; + + struct airoha_foe_mac_info l2; +}; + +struct airoha_foe_ipv4_dslite { + struct airoha_foe_ipv4_tuple ip4; + + u32 ib2; + + u8 flow_label[3]; + u8 priority; + + u32 rsv[4]; + + u32 data; + + struct airoha_foe_mac_info l2; +}; + +struct airoha_foe_ipv6 { + u32 src_ip[4]; + u32 dest_ip[4]; + + union { + struct { + u16 dest_port; + u16 src_port; + }; + struct { + u8 protocol; + u8 pad[3]; + }; + u32 ports; + }; + + u32 data; + + u32 ib2; + + struct airoha_foe_mac_info_common l2; +}; + +struct airoha_foe_entry { + union { + struct { + u32 ib1; + union { + struct airoha_foe_bridge bridge; + struct airoha_foe_ipv4 ipv4; + struct airoha_foe_ipv4_dslite dslite; + struct airoha_foe_ipv6 ipv6; + DECLARE_FLEX_ARRAY(u32, d); + }; + }; + u8 data[PPE_ENTRY_SIZE]; + }; +}; + +struct airoha_flow_data { + struct ethhdr eth; + + union { + struct { + __be32 src_addr; + __be32 dst_addr; + } v4; + + struct { + struct in6_addr src_addr; + struct in6_addr dst_addr; + } v6; + }; + + __be16 src_port; + __be16 dst_port; + + struct { + struct { + u16 id; + __be16 proto; + } hdr[2]; + u8 num; + } vlan; + struct { + u16 sid; + u8 num; + } pppoe; +}; + +struct airoha_flow_table_entry { + struct hlist_node list; + + struct airoha_foe_entry data; + u32 hash; + + struct rhash_head node; + unsigned long cookie; +}; + +struct airoha_qdma { + struct airoha_eth *eth; + void __iomem *regs; + + /* protect concurrent irqmask accesses */ + spinlock_t irq_lock; + u32 irqmask[QDMA_INT_REG_MAX]; + int irq; + + atomic_t users; + + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; + + struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; + struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; + + /* descriptor and packet buffers for qdma hw forward */ + struct { + void *desc; + void *q; + } hfwd; +}; + +struct airoha_gdm_port { + struct airoha_qdma *qdma; + struct net_device *dev; + int id; + + struct airoha_hw_stats stats; + + DECLARE_BITMAP(qos_sq_bmap, AIROHA_NUM_QOS_CHANNELS); + + /* qos stats counters */ + u64 cpu_tx_packets; + u64 fwd_tx_packets; + + struct metadata_dst *dsa_meta[AIROHA_MAX_DSA_PORTS]; +}; + +#define AIROHA_RXD4_PPE_CPU_REASON GENMASK(20, 16) +#define AIROHA_RXD4_FOE_ENTRY GENMASK(15, 0) + +struct airoha_ppe { + struct airoha_eth *eth; + + void *foe; + dma_addr_t foe_dma; + + struct hlist_head *foe_flow; + u16 foe_check_time[PPE_NUM_ENTRIES]; + + struct dentry *debugfs_dir; +}; + +struct airoha_eth { + struct device *dev; + + unsigned long state; + void __iomem *fe_regs; + + struct airoha_npu __rcu *npu; + + struct airoha_ppe *ppe; + struct rhashtable flow_table; + + struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; + struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; + + struct net_device *napi_dev; + + struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA]; + struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; +}; + +u32 airoha_rr(void __iomem *base, u32 offset); +void airoha_wr(void __iomem *base, u32 offset, u32 val); +u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val); + +#define airoha_fe_rr(eth, offset) \ + airoha_rr((eth)->fe_regs, (offset)) +#define airoha_fe_wr(eth, offset, val) \ + airoha_wr((eth)->fe_regs, (offset), (val)) +#define airoha_fe_rmw(eth, offset, mask, val) \ + airoha_rmw((eth)->fe_regs, (offset), (mask), (val)) +#define airoha_fe_set(eth, offset, val) \ + airoha_rmw((eth)->fe_regs, (offset), 0, (val)) +#define airoha_fe_clear(eth, offset, val) \ + airoha_rmw((eth)->fe_regs, (offset), (val), 0) + +#define airoha_qdma_rr(qdma, offset) \ + airoha_rr((qdma)->regs, (offset)) +#define airoha_qdma_wr(qdma, offset, val) \ + airoha_wr((qdma)->regs, (offset), (val)) +#define airoha_qdma_rmw(qdma, offset, mask, val) \ + airoha_rmw((qdma)->regs, (offset), (mask), (val)) +#define airoha_qdma_set(qdma, offset, val) \ + airoha_rmw((qdma)->regs, (offset), 0, (val)) +#define airoha_qdma_clear(qdma, offset, val) \ + airoha_rmw((qdma)->regs, (offset), (val), 0) + +void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash); +int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv); +int airoha_ppe_init(struct airoha_eth *eth); +void airoha_ppe_deinit(struct airoha_eth *eth); +struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, + u32 hash); + +#ifdef CONFIG_DEBUG_FS +int airoha_ppe_debugfs_init(struct airoha_ppe *ppe); +#else +static inline int airoha_ppe_debugfs_init(struct airoha_ppe *ppe) +{ + return 0; +} +#endif + +#endif /* AIROHA_ETH_H */ diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c new file mode 100644 index 0000000000000..7a5710f9ccf6a --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_npu.c @@ -0,0 +1,520 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025 AIROHA Inc + * Author: Lorenzo Bianconi + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "airoha_npu.h" + +#define NPU_EN7581_FIRMWARE_DATA "airoha/en7581_npu_data.bin" +#define NPU_EN7581_FIRMWARE_RV32 "airoha/en7581_npu_rv32.bin" +#define NPU_EN7581_FIRMWARE_RV32_MAX_SIZE 0x200000 +#define NPU_EN7581_FIRMWARE_DATA_MAX_SIZE 0x10000 +#define NPU_DUMP_SIZE 512 + +#define REG_NPU_LOCAL_SRAM 0x0 + +#define NPU_PC_BASE_ADDR 0x305000 +#define REG_PC_DBG(_n) (0x305000 + ((_n) * 0x100)) + +#define NPU_CLUSTER_BASE_ADDR 0x306000 + +#define REG_CR_BOOT_TRIGGER (NPU_CLUSTER_BASE_ADDR + 0x000) +#define REG_CR_BOOT_CONFIG (NPU_CLUSTER_BASE_ADDR + 0x004) +#define REG_CR_BOOT_BASE(_n) (NPU_CLUSTER_BASE_ADDR + 0x020 + ((_n) << 2)) + +#define NPU_MBOX_BASE_ADDR 0x30c000 + +#define REG_CR_MBOX_INT_STATUS (NPU_MBOX_BASE_ADDR + 0x000) +#define MBOX_INT_STATUS_MASK BIT(8) + +#define REG_CR_MBOX_INT_MASK(_n) (NPU_MBOX_BASE_ADDR + 0x004 + ((_n) << 2)) +#define REG_CR_MBQ0_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x030 + ((_n) << 2)) +#define REG_CR_MBQ8_CTRL(_n) (NPU_MBOX_BASE_ADDR + 0x0b0 + ((_n) << 2)) +#define REG_CR_NPU_MIB(_n) (NPU_MBOX_BASE_ADDR + 0x140 + ((_n) << 2)) + +#define NPU_TIMER_BASE_ADDR 0x310100 +#define REG_WDT_TIMER_CTRL(_n) (NPU_TIMER_BASE_ADDR + ((_n) * 0x100)) +#define WDT_EN_MASK BIT(25) +#define WDT_INTR_MASK BIT(21) + +enum { + NPU_OP_SET = 1, + NPU_OP_SET_NO_WAIT, + NPU_OP_GET, + NPU_OP_GET_NO_WAIT, +}; + +enum { + NPU_FUNC_WIFI, + NPU_FUNC_TUNNEL, + NPU_FUNC_NOTIFY, + NPU_FUNC_DBA, + NPU_FUNC_TR471, + NPU_FUNC_PPE, +}; + +enum { + NPU_MBOX_ERROR, + NPU_MBOX_SUCCESS, +}; + +enum { + PPE_FUNC_SET_WAIT, + PPE_FUNC_SET_WAIT_HWNAT_INIT, + PPE_FUNC_SET_WAIT_HWNAT_DEINIT, + PPE_FUNC_SET_WAIT_API, +}; + +enum { + PPE2_SRAM_SET_ENTRY, + PPE_SRAM_SET_ENTRY, + PPE_SRAM_SET_VAL, + PPE_SRAM_RESET_VAL, +}; + +enum { + QDMA_WAN_ETHER = 1, + QDMA_WAN_PON_XDSL, +}; + +#define MBOX_MSG_FUNC_ID GENMASK(14, 11) +#define MBOX_MSG_STATIC_BUF BIT(5) +#define MBOX_MSG_STATUS GENMASK(4, 2) +#define MBOX_MSG_DONE BIT(1) +#define MBOX_MSG_WAIT_RSP BIT(0) + +#define PPE_TYPE_L2B_IPV4 2 +#define PPE_TYPE_L2B_IPV4_IPV6 3 + +struct ppe_mbox_data { + u32 func_type; + u32 func_id; + union { + struct { + u8 cds; + u8 xpon_hal_api; + u8 wan_xsi; + u8 ct_joyme4; + int ppe_type; + int wan_mode; + int wan_sel; + } init_info; + struct { + int func_id; + u32 size; + u32 data; + } set_info; + }; +}; + +static int airoha_npu_send_msg(struct airoha_npu *npu, int func_id, + void *p, int size) +{ + u16 core = 0; /* FIXME */ + u32 val, offset = core << 4; + dma_addr_t dma_addr; + void *addr; + int ret; + + addr = kmemdup(p, size, GFP_ATOMIC); + if (!addr) + return -ENOMEM; + + dma_addr = dma_map_single(npu->dev, addr, size, DMA_TO_DEVICE); + ret = dma_mapping_error(npu->dev, dma_addr); + if (ret) + goto out; + + spin_lock_bh(&npu->cores[core].lock); + + regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(0) + offset, dma_addr); + regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(1) + offset, size); + regmap_read(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, &val); + regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(2) + offset, val + 1); + val = FIELD_PREP(MBOX_MSG_FUNC_ID, func_id) | MBOX_MSG_WAIT_RSP; + regmap_write(npu->regmap, REG_CR_MBQ0_CTRL(3) + offset, val); + + ret = regmap_read_poll_timeout_atomic(npu->regmap, + REG_CR_MBQ0_CTRL(3) + offset, + val, (val & MBOX_MSG_DONE), + 100, 100 * MSEC_PER_SEC); + if (!ret && FIELD_GET(MBOX_MSG_STATUS, val) != NPU_MBOX_SUCCESS) + ret = -EINVAL; + + spin_unlock_bh(&npu->cores[core].lock); + + dma_unmap_single(npu->dev, dma_addr, size, DMA_TO_DEVICE); +out: + kfree(addr); + + return ret; +} + +static int airoha_npu_run_firmware(struct device *dev, void __iomem *base, + struct reserved_mem *rmem) +{ + const struct firmware *fw; + void __iomem *addr; + int ret; + + ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_RV32, dev); + if (ret) + return ret == -ENOENT ? -EPROBE_DEFER : ret; + + if (fw->size > NPU_EN7581_FIRMWARE_RV32_MAX_SIZE) { + dev_err(dev, "%s: fw size too overlimit (%zu)\n", + NPU_EN7581_FIRMWARE_RV32, fw->size); + ret = -E2BIG; + goto out; + } + + addr = devm_ioremap(dev, rmem->base, rmem->size); + if (!addr) { + ret = -ENOMEM; + goto out; + } + + memcpy_toio(addr, fw->data, fw->size); + release_firmware(fw); + + ret = request_firmware(&fw, NPU_EN7581_FIRMWARE_DATA, dev); + if (ret) + return ret == -ENOENT ? -EPROBE_DEFER : ret; + + if (fw->size > NPU_EN7581_FIRMWARE_DATA_MAX_SIZE) { + dev_err(dev, "%s: fw size too overlimit (%zu)\n", + NPU_EN7581_FIRMWARE_DATA, fw->size); + ret = -E2BIG; + goto out; + } + + memcpy_toio(base + REG_NPU_LOCAL_SRAM, fw->data, fw->size); +out: + release_firmware(fw); + + return ret; +} + +static irqreturn_t airoha_npu_mbox_handler(int irq, void *npu_instance) +{ + struct airoha_npu *npu = npu_instance; + + /* clear mbox interrupt status */ + regmap_write(npu->regmap, REG_CR_MBOX_INT_STATUS, + MBOX_INT_STATUS_MASK); + + /* acknowledge npu */ + regmap_update_bits(npu->regmap, REG_CR_MBQ8_CTRL(3), + MBOX_MSG_STATUS | MBOX_MSG_DONE, MBOX_MSG_DONE); + + return IRQ_HANDLED; +} + +static void airoha_npu_wdt_work(struct work_struct *work) +{ + struct airoha_npu_core *core; + struct airoha_npu *npu; + void *dump; + u32 val[3]; + int c; + + core = container_of(work, struct airoha_npu_core, wdt_work); + npu = core->npu; + + dump = vzalloc(NPU_DUMP_SIZE); + if (!dump) + return; + + c = core - &npu->cores[0]; + regmap_bulk_read(npu->regmap, REG_PC_DBG(c), val, ARRAY_SIZE(val)); + snprintf(dump, NPU_DUMP_SIZE, "PC: %08x SP: %08x LR: %08x\n", + val[0], val[1], val[2]); + + dev_coredumpv(npu->dev, dump, NPU_DUMP_SIZE, GFP_KERNEL); +} + +static irqreturn_t airoha_npu_wdt_handler(int irq, void *core_instance) +{ + struct airoha_npu_core *core = core_instance; + struct airoha_npu *npu = core->npu; + int c = core - &npu->cores[0]; + u32 val; + + regmap_set_bits(npu->regmap, REG_WDT_TIMER_CTRL(c), WDT_INTR_MASK); + if (!regmap_read(npu->regmap, REG_WDT_TIMER_CTRL(c), &val) && + FIELD_GET(WDT_EN_MASK, val)) + schedule_work(&core->wdt_work); + + return IRQ_HANDLED; +} + +static int airoha_npu_ppe_init(struct airoha_npu *npu) +{ + struct ppe_mbox_data ppe_data = { + .func_type = NPU_OP_SET, + .func_id = PPE_FUNC_SET_WAIT_HWNAT_INIT, + .init_info = { + .ppe_type = PPE_TYPE_L2B_IPV4_IPV6, + .wan_mode = QDMA_WAN_ETHER, + }, + }; + + return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, + sizeof(struct ppe_mbox_data)); +} + +static int airoha_npu_ppe_deinit(struct airoha_npu *npu) +{ + struct ppe_mbox_data ppe_data = { + .func_type = NPU_OP_SET, + .func_id = PPE_FUNC_SET_WAIT_HWNAT_DEINIT, + }; + + return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, + sizeof(struct ppe_mbox_data)); +} + +static int airoha_npu_ppe_flush_sram_entries(struct airoha_npu *npu, + dma_addr_t foe_addr, + int sram_num_entries) +{ + struct ppe_mbox_data ppe_data = { + .func_type = NPU_OP_SET, + .func_id = PPE_FUNC_SET_WAIT_API, + .set_info = { + .func_id = PPE_SRAM_RESET_VAL, + .data = foe_addr, + .size = sram_num_entries, + }, + }; + + return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, + sizeof(struct ppe_mbox_data)); +} + +static int airoha_npu_foe_commit_entry(struct airoha_npu *npu, + dma_addr_t foe_addr, + u32 entry_size, u32 hash, bool ppe2) +{ + struct ppe_mbox_data ppe_data = { + .func_type = NPU_OP_SET, + .func_id = PPE_FUNC_SET_WAIT_API, + .set_info = { + .data = foe_addr, + .size = entry_size, + }, + }; + int err; + + ppe_data.set_info.func_id = ppe2 ? PPE2_SRAM_SET_ENTRY + : PPE_SRAM_SET_ENTRY; + + err = airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, + sizeof(struct ppe_mbox_data)); + if (err) + return err; + + ppe_data.set_info.func_id = PPE_SRAM_SET_VAL; + ppe_data.set_info.data = hash; + ppe_data.set_info.size = sizeof(u32); + + return airoha_npu_send_msg(npu, NPU_FUNC_PPE, &ppe_data, + sizeof(struct ppe_mbox_data)); +} + +struct airoha_npu *airoha_npu_get(struct device *dev) +{ + struct platform_device *pdev; + struct device_node *np; + struct airoha_npu *npu; + + np = of_parse_phandle(dev->of_node, "airoha,npu", 0); + if (!np) + return ERR_PTR(-ENODEV); + + pdev = of_find_device_by_node(np); + of_node_put(np); + + if (!pdev) { + dev_err(dev, "cannot find device node %s\n", np->name); + return ERR_PTR(-ENODEV); + } + + if (!try_module_get(THIS_MODULE)) { + dev_err(dev, "failed to get the device driver module\n"); + npu = ERR_PTR(-ENODEV); + goto error_pdev_put; + } + + npu = platform_get_drvdata(pdev); + if (!npu) { + npu = ERR_PTR(-ENODEV); + goto error_module_put; + } + + if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER)) { + dev_err(&pdev->dev, + "failed to create device link to consumer %s\n", + dev_name(dev)); + npu = ERR_PTR(-EINVAL); + goto error_module_put; + } + + return npu; + +error_module_put: + module_put(THIS_MODULE); +error_pdev_put: + platform_device_put(pdev); + + return npu; +} +EXPORT_SYMBOL_GPL(airoha_npu_get); + +void airoha_npu_put(struct airoha_npu *npu) +{ + module_put(THIS_MODULE); + put_device(npu->dev); +} +EXPORT_SYMBOL_GPL(airoha_npu_put); + +static const struct of_device_id of_airoha_npu_match[] = { + { .compatible = "airoha,en7581-npu" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_airoha_npu_match); + +static const struct regmap_config regmap_config = { + .name = "npu", + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .disable_locking = true, +}; + +static int airoha_npu_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct reserved_mem *rmem; + struct airoha_npu *npu; + struct device_node *np; + void __iomem *base; + int i, irq, err; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + npu = devm_kzalloc(dev, sizeof(*npu), GFP_KERNEL); + if (!npu) + return -ENOMEM; + + npu->dev = dev; + npu->ops.ppe_init = airoha_npu_ppe_init; + npu->ops.ppe_deinit = airoha_npu_ppe_deinit; + npu->ops.ppe_flush_sram_entries = airoha_npu_ppe_flush_sram_entries; + npu->ops.ppe_foe_commit_entry = airoha_npu_foe_commit_entry; + + npu->regmap = devm_regmap_init_mmio(dev, base, ®map_config); + if (IS_ERR(npu->regmap)) + return PTR_ERR(npu->regmap); + + np = of_parse_phandle(dev->of_node, "memory-region", 0); + if (!np) + return -ENODEV; + + rmem = of_reserved_mem_lookup(np); + of_node_put(np); + + if (!rmem) + return -ENODEV; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + err = devm_request_irq(dev, irq, airoha_npu_mbox_handler, + IRQF_SHARED, "airoha-npu-mbox", npu); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(npu->cores); i++) { + struct airoha_npu_core *core = &npu->cores[i]; + + spin_lock_init(&core->lock); + core->npu = npu; + + irq = platform_get_irq(pdev, i + 1); + if (irq < 0) + return irq; + + err = devm_request_irq(dev, irq, airoha_npu_wdt_handler, + IRQF_SHARED, "airoha-npu-wdt", core); + if (err) + return err; + + INIT_WORK(&core->wdt_work, airoha_npu_wdt_work); + } + + err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + if (err) + return err; + + err = airoha_npu_run_firmware(dev, base, rmem); + if (err) + return dev_err_probe(dev, err, "failed to run npu firmware\n"); + + regmap_write(npu->regmap, REG_CR_NPU_MIB(10), + rmem->base + NPU_EN7581_FIRMWARE_RV32_MAX_SIZE); + regmap_write(npu->regmap, REG_CR_NPU_MIB(11), 0x40000); /* SRAM 256K */ + regmap_write(npu->regmap, REG_CR_NPU_MIB(12), 0); + regmap_write(npu->regmap, REG_CR_NPU_MIB(21), 1); + msleep(100); + + /* setting booting address */ + for (i = 0; i < NPU_NUM_CORES; i++) + regmap_write(npu->regmap, REG_CR_BOOT_BASE(i), rmem->base); + usleep_range(1000, 2000); + + /* enable NPU cores */ + /* do not start core3 since it is used for WiFi offloading */ + regmap_write(npu->regmap, REG_CR_BOOT_CONFIG, 0xf7); + regmap_write(npu->regmap, REG_CR_BOOT_TRIGGER, 0x1); + msleep(100); + + platform_set_drvdata(pdev, npu); + + return 0; +} + +static void airoha_npu_remove(struct platform_device *pdev) +{ + struct airoha_npu *npu = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < ARRAY_SIZE(npu->cores); i++) + cancel_work_sync(&npu->cores[i].wdt_work); +} + +static struct platform_driver airoha_npu_driver = { + .probe = airoha_npu_probe, + .remove = airoha_npu_remove, + .driver = { + .name = "airoha-npu", + .of_match_table = of_airoha_npu_match, + }, +}; +module_platform_driver(airoha_npu_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_DESCRIPTION("Airoha Network Processor Unit driver"); diff --git a/drivers/net/ethernet/airoha/airoha_npu.h b/drivers/net/ethernet/airoha/airoha_npu.h new file mode 100644 index 0000000000000..a2b8ae4d94739 --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_npu.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2025 AIROHA Inc + * Author: Lorenzo Bianconi + */ + +#define NPU_NUM_CORES 8 + +struct airoha_npu { + struct device *dev; + struct regmap *regmap; + + struct airoha_npu_core { + struct airoha_npu *npu; + /* protect concurrent npu memory accesses */ + spinlock_t lock; + struct work_struct wdt_work; + } cores[NPU_NUM_CORES]; + + struct { + int (*ppe_init)(struct airoha_npu *npu); + int (*ppe_deinit)(struct airoha_npu *npu); + int (*ppe_flush_sram_entries)(struct airoha_npu *npu, + dma_addr_t foe_addr, + int sram_num_entries); + int (*ppe_foe_commit_entry)(struct airoha_npu *npu, + dma_addr_t foe_addr, + u32 entry_size, u32 hash, + bool ppe2); + } ops; +}; + +struct airoha_npu *airoha_npu_get(struct device *dev); +void airoha_npu_put(struct airoha_npu *npu); diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c new file mode 100644 index 0000000000000..8b55e871352d3 --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -0,0 +1,910 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025 AIROHA Inc + * Author: Lorenzo Bianconi + */ + +#include +#include +#include +#include +#include + +#include "airoha_npu.h" +#include "airoha_regs.h" +#include "airoha_eth.h" + +static DEFINE_MUTEX(flow_offload_mutex); +static DEFINE_SPINLOCK(ppe_lock); + +static const struct rhashtable_params airoha_flow_table_params = { + .head_offset = offsetof(struct airoha_flow_table_entry, node), + .key_offset = offsetof(struct airoha_flow_table_entry, cookie), + .key_len = sizeof(unsigned long), + .automatic_shrinking = true, +}; + +static bool airoha_ppe2_is_enabled(struct airoha_eth *eth) +{ + return airoha_fe_rr(eth, REG_PPE_GLO_CFG(1)) & PPE_GLO_CFG_EN_MASK; +} + +static u32 airoha_ppe_get_timestamp(struct airoha_ppe *ppe) +{ + u16 timestamp = airoha_fe_rr(ppe->eth, REG_FE_FOE_TS); + + return FIELD_GET(AIROHA_FOE_IB1_BIND_TIMESTAMP, timestamp); +} + +static void airoha_ppe_hw_init(struct airoha_ppe *ppe) +{ + u32 sram_tb_size, sram_num_entries, dram_num_entries; + struct airoha_eth *eth = ppe->eth; + int i; + + sram_tb_size = PPE_SRAM_NUM_ENTRIES * sizeof(struct airoha_foe_entry); + dram_num_entries = PPE_RAM_NUM_ENTRIES_SHIFT(PPE_DRAM_NUM_ENTRIES); + + for (i = 0; i < PPE_NUM; i++) { + int p; + + airoha_fe_wr(eth, REG_PPE_TB_BASE(i), + ppe->foe_dma + sram_tb_size); + + airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i), + PPE_BIND_AGE0_DELTA_NON_L4 | + PPE_BIND_AGE0_DELTA_UDP, + FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) | + FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12)); + airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i), + PPE_BIND_AGE1_DELTA_TCP_FIN | + PPE_BIND_AGE1_DELTA_TCP, + FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) | + FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7)); + + airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i), + PPE_SRAM_TABLE_EN_MASK | + PPE_SRAM_HASH1_EN_MASK | + PPE_DRAM_TABLE_EN_MASK | + PPE_SRAM_HASH0_MODE_MASK | + PPE_SRAM_HASH1_MODE_MASK | + PPE_DRAM_HASH0_MODE_MASK | + PPE_DRAM_HASH1_MODE_MASK, + FIELD_PREP(PPE_SRAM_TABLE_EN_MASK, 1) | + FIELD_PREP(PPE_SRAM_HASH1_EN_MASK, 1) | + FIELD_PREP(PPE_SRAM_HASH1_MODE_MASK, 1) | + FIELD_PREP(PPE_DRAM_HASH1_MODE_MASK, 3)); + + airoha_fe_rmw(eth, REG_PPE_TB_CFG(i), + PPE_TB_CFG_SEARCH_MISS_MASK | + PPE_TB_ENTRY_SIZE_MASK, + FIELD_PREP(PPE_TB_CFG_SEARCH_MISS_MASK, 3) | + FIELD_PREP(PPE_TB_ENTRY_SIZE_MASK, 0)); + + airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED); + + for (p = 0; p < ARRAY_SIZE(eth->ports); p++) + airoha_fe_rmw(eth, REG_PPE_MTU(i, p), + FP0_EGRESS_MTU_MASK | + FP1_EGRESS_MTU_MASK, + FIELD_PREP(FP0_EGRESS_MTU_MASK, + AIROHA_MAX_MTU) | + FIELD_PREP(FP1_EGRESS_MTU_MASK, + AIROHA_MAX_MTU)); + } + + if (airoha_ppe2_is_enabled(eth)) { + sram_num_entries = + PPE_RAM_NUM_ENTRIES_SHIFT(PPE1_SRAM_NUM_ENTRIES); + airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), + PPE_SRAM_TB_NUM_ENTRY_MASK | + PPE_DRAM_TB_NUM_ENTRY_MASK, + FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, + sram_num_entries) | + FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, + dram_num_entries)); + airoha_fe_rmw(eth, REG_PPE_TB_CFG(1), + PPE_SRAM_TB_NUM_ENTRY_MASK | + PPE_DRAM_TB_NUM_ENTRY_MASK, + FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, + sram_num_entries) | + FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, + dram_num_entries)); + } else { + sram_num_entries = + PPE_RAM_NUM_ENTRIES_SHIFT(PPE_SRAM_NUM_ENTRIES); + airoha_fe_rmw(eth, REG_PPE_TB_CFG(0), + PPE_SRAM_TB_NUM_ENTRY_MASK | + PPE_DRAM_TB_NUM_ENTRY_MASK, + FIELD_PREP(PPE_SRAM_TB_NUM_ENTRY_MASK, + sram_num_entries) | + FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, + dram_num_entries)); + } +} + +static void airoha_ppe_flow_mangle_eth(const struct flow_action_entry *act, void *eth) +{ + void *dest = eth + act->mangle.offset; + const void *src = &act->mangle.val; + + if (act->mangle.offset > 8) + return; + + if (act->mangle.mask == 0xffff) { + src += 2; + dest += 2; + } + + memcpy(dest, src, act->mangle.mask ? 2 : 4); +} + +static int airoha_ppe_flow_mangle_ports(const struct flow_action_entry *act, + struct airoha_flow_data *data) +{ + u32 val = be32_to_cpu((__force __be32)act->mangle.val); + + switch (act->mangle.offset) { + case 0: + if ((__force __be32)act->mangle.mask == ~cpu_to_be32(0xffff)) + data->dst_port = cpu_to_be16(val); + else + data->src_port = cpu_to_be16(val >> 16); + break; + case 2: + data->dst_port = cpu_to_be16(val); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int airoha_ppe_flow_mangle_ipv4(const struct flow_action_entry *act, + struct airoha_flow_data *data) +{ + __be32 *dest; + + switch (act->mangle.offset) { + case offsetof(struct iphdr, saddr): + dest = &data->v4.src_addr; + break; + case offsetof(struct iphdr, daddr): + dest = &data->v4.dst_addr; + break; + default: + return -EINVAL; + } + + memcpy(dest, &act->mangle.val, sizeof(u32)); + + return 0; +} + +static int airoha_get_dsa_port(struct net_device **dev) +{ +#if IS_ENABLED(CONFIG_NET_DSA) + struct dsa_port *dp = dsa_port_from_netdev(*dev); + + if (IS_ERR(dp)) + return -ENODEV; + + *dev = dsa_port_to_conduit(dp); + return dp->index; +#else + return -ENODEV; +#endif +} + +static int airoha_ppe_foe_entry_prepare(struct airoha_foe_entry *hwe, + struct net_device *dev, int type, + struct airoha_flow_data *data, + int l4proto) +{ + int dsa_port = airoha_get_dsa_port(&dev); + struct airoha_foe_mac_info_common *l2; + u32 qdata, ports_pad, val; + + memset(hwe, 0, sizeof(*hwe)); + + val = FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, AIROHA_FOE_STATE_BIND) | + FIELD_PREP(AIROHA_FOE_IB1_BIND_PACKET_TYPE, type) | + FIELD_PREP(AIROHA_FOE_IB1_BIND_UDP, l4proto == IPPROTO_UDP) | + FIELD_PREP(AIROHA_FOE_IB1_BIND_VLAN_LAYER, data->vlan.num) | + FIELD_PREP(AIROHA_FOE_IB1_BIND_VPM, data->vlan.num) | + AIROHA_FOE_IB1_BIND_TTL; + hwe->ib1 = val; + + val = FIELD_PREP(AIROHA_FOE_IB2_PORT_AG, 0x1f) | + AIROHA_FOE_IB2_PSE_QOS; + if (dsa_port >= 0) + val |= FIELD_PREP(AIROHA_FOE_IB2_NBQ, dsa_port); + + if (dev) { + struct airoha_gdm_port *port = netdev_priv(dev); + u8 pse_port; + + if (dsa_port >= 0) + pse_port = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; + else + pse_port = 2; /* uplink relies on GDM2 loopback */ + val |= FIELD_PREP(AIROHA_FOE_IB2_PSE_PORT, pse_port); + } + + if (is_multicast_ether_addr(data->eth.h_dest)) + val |= AIROHA_FOE_IB2_MULTICAST; + + ports_pad = 0xa5a5a500 | (l4proto & 0xff); + if (type == PPE_PKT_TYPE_IPV4_ROUTE) + hwe->ipv4.orig_tuple.ports = ports_pad; + if (type == PPE_PKT_TYPE_IPV6_ROUTE_3T) + hwe->ipv6.ports = ports_pad; + + qdata = FIELD_PREP(AIROHA_FOE_SHAPER_ID, 0x7f); + if (type == PPE_PKT_TYPE_BRIDGE) { + hwe->bridge.dest_mac_hi = get_unaligned_be32(data->eth.h_dest); + hwe->bridge.dest_mac_lo = + get_unaligned_be16(data->eth.h_dest + 4); + hwe->bridge.src_mac_hi = + get_unaligned_be16(data->eth.h_source); + hwe->bridge.src_mac_lo = + get_unaligned_be32(data->eth.h_source + 2); + hwe->bridge.data = qdata; + hwe->bridge.ib2 = val; + l2 = &hwe->bridge.l2.common; + } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { + hwe->ipv6.data = qdata; + hwe->ipv6.ib2 = val; + l2 = &hwe->ipv6.l2; + } else { + hwe->ipv4.data = qdata; + hwe->ipv4.ib2 = val; + l2 = &hwe->ipv4.l2.common; + } + + l2->dest_mac_hi = get_unaligned_be32(data->eth.h_dest); + l2->dest_mac_lo = get_unaligned_be16(data->eth.h_dest + 4); + if (type <= PPE_PKT_TYPE_IPV4_DSLITE) { + l2->src_mac_hi = get_unaligned_be32(data->eth.h_source); + hwe->ipv4.l2.src_mac_lo = + get_unaligned_be16(data->eth.h_source + 4); + } else { + l2->src_mac_hi = FIELD_PREP(AIROHA_FOE_MAC_SMAC_ID, 0xf); + } + + if (data->vlan.num) { + l2->etype = dsa_port >= 0 ? BIT(dsa_port) : 0; + l2->vlan1 = data->vlan.hdr[0].id; + if (data->vlan.num == 2) + l2->vlan2 = data->vlan.hdr[1].id; + } else if (dsa_port >= 0) { + l2->etype = BIT(15) | BIT(dsa_port); + } else if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { + l2->etype = ETH_P_IPV6; + } else { + l2->etype = ETH_P_IP; + } + + return 0; +} + +static int airoha_ppe_foe_entry_set_ipv4_tuple(struct airoha_foe_entry *hwe, + struct airoha_flow_data *data, + bool egress) +{ + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + struct airoha_foe_ipv4_tuple *t; + + switch (type) { + case PPE_PKT_TYPE_IPV4_HNAPT: + if (egress) { + t = &hwe->ipv4.new_tuple; + break; + } + fallthrough; + case PPE_PKT_TYPE_IPV4_DSLITE: + case PPE_PKT_TYPE_IPV4_ROUTE: + t = &hwe->ipv4.orig_tuple; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + t->src_ip = be32_to_cpu(data->v4.src_addr); + t->dest_ip = be32_to_cpu(data->v4.dst_addr); + + if (type != PPE_PKT_TYPE_IPV4_ROUTE) { + t->src_port = be16_to_cpu(data->src_port); + t->dest_port = be16_to_cpu(data->dst_port); + } + + return 0; +} + +static int airoha_ppe_foe_entry_set_ipv6_tuple(struct airoha_foe_entry *hwe, + struct airoha_flow_data *data) + +{ + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + u32 *src, *dest; + + switch (type) { + case PPE_PKT_TYPE_IPV6_ROUTE_5T: + case PPE_PKT_TYPE_IPV6_6RD: + hwe->ipv6.src_port = be16_to_cpu(data->src_port); + hwe->ipv6.dest_port = be16_to_cpu(data->dst_port); + fallthrough; + case PPE_PKT_TYPE_IPV6_ROUTE_3T: + src = hwe->ipv6.src_ip; + dest = hwe->ipv6.dest_ip; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + ipv6_addr_be32_to_cpu(src, data->v6.src_addr.s6_addr32); + ipv6_addr_be32_to_cpu(dest, data->v6.dst_addr.s6_addr32); + + return 0; +} + +static u32 airoha_ppe_foe_get_entry_hash(struct airoha_foe_entry *hwe) +{ + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + u32 hash, hv1, hv2, hv3; + + switch (type) { + case PPE_PKT_TYPE_IPV4_ROUTE: + case PPE_PKT_TYPE_IPV4_HNAPT: + hv1 = hwe->ipv4.orig_tuple.ports; + hv2 = hwe->ipv4.orig_tuple.dest_ip; + hv3 = hwe->ipv4.orig_tuple.src_ip; + break; + case PPE_PKT_TYPE_IPV6_ROUTE_3T: + case PPE_PKT_TYPE_IPV6_ROUTE_5T: + hv1 = hwe->ipv6.src_ip[3] ^ hwe->ipv6.dest_ip[3]; + hv1 ^= hwe->ipv6.ports; + + hv2 = hwe->ipv6.src_ip[2] ^ hwe->ipv6.dest_ip[2]; + hv2 ^= hwe->ipv6.dest_ip[0]; + + hv3 = hwe->ipv6.src_ip[1] ^ hwe->ipv6.dest_ip[1]; + hv3 ^= hwe->ipv6.src_ip[0]; + break; + case PPE_PKT_TYPE_IPV4_DSLITE: + case PPE_PKT_TYPE_IPV6_6RD: + default: + WARN_ON_ONCE(1); + return PPE_HASH_MASK; + } + + hash = (hv1 & hv2) | ((~hv1) & hv3); + hash = (hash >> 24) | ((hash & 0xffffff) << 8); + hash ^= hv1 ^ hv2 ^ hv3; + hash ^= hash >> 16; + hash &= PPE_NUM_ENTRIES - 1; + + return hash; +} + +struct airoha_foe_entry *airoha_ppe_foe_get_entry(struct airoha_ppe *ppe, + u32 hash) +{ + if (hash < PPE_SRAM_NUM_ENTRIES) { + u32 *hwe = ppe->foe + hash * sizeof(struct airoha_foe_entry); + struct airoha_eth *eth = ppe->eth; + bool ppe2; + u32 val; + int i; + + ppe2 = airoha_ppe2_is_enabled(ppe->eth) && + hash >= PPE1_SRAM_NUM_ENTRIES; + airoha_fe_wr(ppe->eth, REG_PPE_RAM_CTRL(ppe2), + FIELD_PREP(PPE_SRAM_CTRL_ENTRY_MASK, hash) | + PPE_SRAM_CTRL_REQ_MASK); + if (read_poll_timeout_atomic(airoha_fe_rr, val, + val & PPE_SRAM_CTRL_ACK_MASK, + 10, 100, false, eth, + REG_PPE_RAM_CTRL(ppe2))) + return NULL; + + for (i = 0; i < sizeof(struct airoha_foe_entry) / 4; i++) + hwe[i] = airoha_fe_rr(eth, + REG_PPE_RAM_ENTRY(ppe2, i)); + } + + return ppe->foe + hash * sizeof(struct airoha_foe_entry); +} + +static bool airoha_ppe_foe_compare_entry(struct airoha_flow_table_entry *e, + struct airoha_foe_entry *hwe) +{ + int type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, e->data.ib1); + int len; + + if ((hwe->ib1 ^ e->data.ib1) & AIROHA_FOE_IB1_BIND_UDP) + return false; + + if (type > PPE_PKT_TYPE_IPV4_DSLITE) + len = offsetof(struct airoha_foe_entry, ipv6.data); + else + len = offsetof(struct airoha_foe_entry, ipv4.ib2); + + return !memcmp(&e->data.d, &hwe->d, len - sizeof(hwe->ib1)); +} + +static int airoha_ppe_foe_commit_entry(struct airoha_ppe *ppe, + struct airoha_foe_entry *e, + u32 hash) +{ + struct airoha_foe_entry *hwe = ppe->foe + hash * sizeof(*hwe); + u32 ts = airoha_ppe_get_timestamp(ppe); + struct airoha_eth *eth = ppe->eth; + + memcpy(&hwe->d, &e->d, sizeof(*hwe) - sizeof(hwe->ib1)); + wmb(); + + e->ib1 &= ~AIROHA_FOE_IB1_BIND_TIMESTAMP; + e->ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_TIMESTAMP, ts); + hwe->ib1 = e->ib1; + + if (hash < PPE_SRAM_NUM_ENTRIES) { + dma_addr_t addr = ppe->foe_dma + hash * sizeof(*hwe); + bool ppe2 = airoha_ppe2_is_enabled(eth) && + hash >= PPE1_SRAM_NUM_ENTRIES; + struct airoha_npu *npu; + int err = -ENODEV; + + rcu_read_lock(); + npu = rcu_dereference(eth->npu); + if (npu) + err = npu->ops.ppe_foe_commit_entry(npu, addr, + sizeof(*hwe), hash, + ppe2); + rcu_read_unlock(); + + return err; + } + + return 0; +} + +static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe, u32 hash) +{ + struct airoha_flow_table_entry *e; + struct airoha_foe_entry *hwe; + struct hlist_node *n; + u32 index, state; + + spin_lock_bh(&ppe_lock); + + hwe = airoha_ppe_foe_get_entry(ppe, hash); + if (!hwe) + goto unlock; + + state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); + if (state == AIROHA_FOE_STATE_BIND) + goto unlock; + + index = airoha_ppe_foe_get_entry_hash(hwe); + hlist_for_each_entry_safe(e, n, &ppe->foe_flow[index], list) { + if (airoha_ppe_foe_compare_entry(e, hwe)) { + airoha_ppe_foe_commit_entry(ppe, &e->data, hash); + e->hash = hash; + break; + } + } +unlock: + spin_unlock_bh(&ppe_lock); +} + +static int airoha_ppe_foe_flow_commit_entry(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + u32 hash = airoha_ppe_foe_get_entry_hash(&e->data); + + e->hash = 0xffff; + + spin_lock_bh(&ppe_lock); + hlist_add_head(&e->list, &ppe->foe_flow[hash]); + spin_unlock_bh(&ppe_lock); + + return 0; +} + +static void airoha_ppe_foe_flow_remove_entry(struct airoha_ppe *ppe, + struct airoha_flow_table_entry *e) +{ + spin_lock_bh(&ppe_lock); + + hlist_del_init(&e->list); + if (e->hash != 0xffff) { + e->data.ib1 &= ~AIROHA_FOE_IB1_BIND_STATE; + e->data.ib1 |= FIELD_PREP(AIROHA_FOE_IB1_BIND_STATE, + AIROHA_FOE_STATE_INVALID); + airoha_ppe_foe_commit_entry(ppe, &e->data, e->hash); + e->hash = 0xffff; + } + + spin_unlock_bh(&ppe_lock); +} + +static int airoha_ppe_flow_offload_replace(struct airoha_gdm_port *port, + struct flow_cls_offload *f) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct airoha_eth *eth = port->qdma->eth; + struct airoha_flow_table_entry *e; + struct airoha_flow_data data = {}; + struct net_device *odev = NULL; + struct flow_action_entry *act; + struct airoha_foe_entry hwe; + int err, i, offload_type; + u16 addr_type = 0; + u8 l4proto = 0; + + if (rhashtable_lookup(ð->flow_table, &f->cookie, + airoha_flow_table_params)) + return -EEXIST; + + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) + return -EOPNOTSUPP; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + l4proto = match.key->ip_proto; + } else { + return -EOPNOTSUPP; + } + + switch (addr_type) { + case 0: + offload_type = PPE_PKT_TYPE_BRIDGE; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); + memcpy(data.eth.h_source, match.key->src, ETH_ALEN); + } else { + return -EOPNOTSUPP; + } + break; + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + offload_type = PPE_PKT_TYPE_IPV4_HNAPT; + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + offload_type = PPE_PKT_TYPE_IPV6_ROUTE_5T; + break; + default: + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_MANGLE: + if (offload_type == PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + + if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) + airoha_ppe_flow_mangle_eth(act, &data.eth); + break; + case FLOW_ACTION_REDIRECT: + odev = act->dev; + break; + case FLOW_ACTION_CSUM: + break; + case FLOW_ACTION_VLAN_PUSH: + if (data.vlan.num == 2 || + act->vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; + + data.vlan.hdr[data.vlan.num].id = act->vlan.vid; + data.vlan.hdr[data.vlan.num].proto = act->vlan.proto; + data.vlan.num++; + break; + case FLOW_ACTION_VLAN_POP: + break; + case FLOW_ACTION_PPPOE_PUSH: + break; + default: + return -EOPNOTSUPP; + } + } + + if (!is_valid_ether_addr(data.eth.h_source) || + !is_valid_ether_addr(data.eth.h_dest)) + return -EINVAL; + + err = airoha_ppe_foe_entry_prepare(&hwe, odev, offload_type, + &data, l4proto); + if (err) + return err; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports ports; + + if (offload_type == PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + + flow_rule_match_ports(rule, &ports); + data.src_port = ports.key->src; + data.dst_port = ports.key->dst; + } else if (offload_type != PPE_PKT_TYPE_BRIDGE) { + return -EOPNOTSUPP; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs addrs; + + flow_rule_match_ipv4_addrs(rule, &addrs); + data.v4.src_addr = addrs.key->src; + data.v4.dst_addr = addrs.key->dst; + airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, false); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs addrs; + + flow_rule_match_ipv6_addrs(rule, &addrs); + + data.v6.src_addr = addrs.key->src; + data.v6.dst_addr = addrs.key->dst; + airoha_ppe_foe_entry_set_ipv6_tuple(&hwe, &data); + } + + flow_action_for_each(i, act, &rule->action) { + if (act->id != FLOW_ACTION_MANGLE) + continue; + + if (offload_type == PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + + switch (act->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + err = airoha_ppe_flow_mangle_ports(act, &data); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + err = airoha_ppe_flow_mangle_ipv4(act, &data); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + /* handled earlier */ + break; + default: + return -EOPNOTSUPP; + } + + if (err) + return err; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + err = airoha_ppe_foe_entry_set_ipv4_tuple(&hwe, &data, true); + if (err) + return err; + } + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->cookie = f->cookie; + memcpy(&e->data, &hwe, sizeof(e->data)); + + err = airoha_ppe_foe_flow_commit_entry(eth->ppe, e); + if (err) + goto free_entry; + + err = rhashtable_insert_fast(ð->flow_table, &e->node, + airoha_flow_table_params); + if (err < 0) + goto remove_foe_entry; + + return 0; + +remove_foe_entry: + airoha_ppe_foe_flow_remove_entry(eth->ppe, e); +free_entry: + kfree(e); + + return err; +} + +static int airoha_ppe_flow_offload_destroy(struct airoha_gdm_port *port, + struct flow_cls_offload *f) +{ + struct airoha_eth *eth = port->qdma->eth; + struct airoha_flow_table_entry *e; + + e = rhashtable_lookup(ð->flow_table, &f->cookie, + airoha_flow_table_params); + if (!e) + return -ENOENT; + + airoha_ppe_foe_flow_remove_entry(eth->ppe, e); + rhashtable_remove_fast(ð->flow_table, &e->node, + airoha_flow_table_params); + kfree(e); + + return 0; +} + +static int airoha_ppe_flow_offload_cmd(struct airoha_gdm_port *port, + struct flow_cls_offload *f) +{ + switch (f->command) { + case FLOW_CLS_REPLACE: + return airoha_ppe_flow_offload_replace(port, f); + case FLOW_CLS_DESTROY: + return airoha_ppe_flow_offload_destroy(port, f); + default: + break; + } + + return -EOPNOTSUPP; +} + +static int airoha_ppe_flush_sram_entries(struct airoha_ppe *ppe, + struct airoha_npu *npu) +{ + int i, sram_num_entries = PPE_SRAM_NUM_ENTRIES; + struct airoha_foe_entry *hwe = ppe->foe; + + if (airoha_ppe2_is_enabled(ppe->eth)) + sram_num_entries = sram_num_entries / 2; + + for (i = 0; i < sram_num_entries; i++) + memset(&hwe[i], 0, sizeof(*hwe)); + + return npu->ops.ppe_flush_sram_entries(npu, ppe->foe_dma, + PPE_SRAM_NUM_ENTRIES); +} + +static struct airoha_npu *airoha_ppe_npu_get(struct airoha_eth *eth) +{ + struct airoha_npu *npu = airoha_npu_get(eth->dev); + + if (IS_ERR(npu)) { + request_module("airoha-npu"); + npu = airoha_npu_get(eth->dev); + } + + return npu; +} + +static int airoha_ppe_offload_setup(struct airoha_eth *eth) +{ + struct airoha_npu *npu = airoha_ppe_npu_get(eth); + int err; + + if (IS_ERR(npu)) + return PTR_ERR(npu); + + err = npu->ops.ppe_init(npu); + if (err) + goto error_npu_put; + + airoha_ppe_hw_init(eth->ppe); + err = airoha_ppe_flush_sram_entries(eth->ppe, npu); + if (err) + goto error_npu_put; + + rcu_assign_pointer(eth->npu, npu); + synchronize_rcu(); + + return 0; + +error_npu_put: + airoha_npu_put(npu); + + return err; +} + +int airoha_ppe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct flow_cls_offload *cls = type_data; + struct net_device *dev = cb_priv; + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->qdma->eth; + int err = 0; + + if (!tc_can_offload(dev) || type != TC_SETUP_CLSFLOWER) + return -EOPNOTSUPP; + + mutex_lock(&flow_offload_mutex); + + if (!eth->npu) + err = airoha_ppe_offload_setup(eth); + if (!err) + err = airoha_ppe_flow_offload_cmd(port, cls); + + mutex_unlock(&flow_offload_mutex); + + return err; +} + +void airoha_ppe_check_skb(struct airoha_ppe *ppe, u16 hash) +{ + u16 now, diff; + + if (hash > PPE_HASH_MASK) + return; + + now = (u16)jiffies; + diff = now - ppe->foe_check_time[hash]; + if (diff < HZ / 10) + return; + + ppe->foe_check_time[hash] = now; + airoha_ppe_foe_insert_entry(ppe, hash); +} + +int airoha_ppe_init(struct airoha_eth *eth) +{ + struct airoha_ppe *ppe; + int foe_size, err; + + ppe = devm_kzalloc(eth->dev, sizeof(*ppe), GFP_KERNEL); + if (!ppe) + return -ENOMEM; + + foe_size = PPE_NUM_ENTRIES * sizeof(struct airoha_foe_entry); + ppe->foe = dmam_alloc_coherent(eth->dev, foe_size, &ppe->foe_dma, + GFP_KERNEL); + if (!ppe->foe) + return -ENOMEM; + + ppe->eth = eth; + eth->ppe = ppe; + + ppe->foe_flow = devm_kzalloc(eth->dev, + PPE_NUM_ENTRIES * sizeof(*ppe->foe_flow), + GFP_KERNEL); + if (!ppe->foe_flow) + return -ENOMEM; + + err = rhashtable_init(ð->flow_table, &airoha_flow_table_params); + if (err) + return err; + + err = airoha_ppe_debugfs_init(ppe); + if (err) + rhashtable_destroy(ð->flow_table); + + return err; +} + +void airoha_ppe_deinit(struct airoha_eth *eth) +{ + struct airoha_npu *npu; + + rcu_read_lock(); + npu = rcu_dereference(eth->npu); + if (npu) { + npu->ops.ppe_deinit(npu); + airoha_npu_put(npu); + } + rcu_read_unlock(); + + rhashtable_destroy(ð->flow_table); + debugfs_remove(eth->ppe->debugfs_dir); +} diff --git a/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c new file mode 100644 index 0000000000000..3cdc6fd53fc75 --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_ppe_debugfs.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025 AIROHA Inc + * Author: Lorenzo Bianconi + */ + +#include "airoha_eth.h" + +static void airoha_debugfs_ppe_print_tuple(struct seq_file *m, + void *src_addr, void *dest_addr, + u16 *src_port, u16 *dest_port, + bool ipv6) +{ + __be32 n_addr[IPV6_ADDR_WORDS]; + + if (ipv6) { + ipv6_addr_cpu_to_be32(n_addr, src_addr); + seq_printf(m, "%pI6", n_addr); + } else { + seq_printf(m, "%pI4h", src_addr); + } + if (src_port) + seq_printf(m, ":%d", *src_port); + + seq_puts(m, "->"); + + if (ipv6) { + ipv6_addr_cpu_to_be32(n_addr, dest_addr); + seq_printf(m, "%pI6", n_addr); + } else { + seq_printf(m, "%pI4h", dest_addr); + } + if (dest_port) + seq_printf(m, ":%d", *dest_port); +} + +static int airoha_ppe_debugfs_foe_show(struct seq_file *m, void *private, + bool bind) +{ + static const char *const ppe_type_str[] = { + [PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", + [PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", + [PPE_PKT_TYPE_BRIDGE] = "L2B", + [PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", + [PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", + [PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", + [PPE_PKT_TYPE_IPV6_6RD] = "6RD", + }; + static const char *const ppe_state_str[] = { + [AIROHA_FOE_STATE_INVALID] = "INV", + [AIROHA_FOE_STATE_UNBIND] = "UNB", + [AIROHA_FOE_STATE_BIND] = "BND", + [AIROHA_FOE_STATE_FIN] = "FIN", + }; + struct airoha_ppe *ppe = m->private; + int i; + + for (i = 0; i < PPE_NUM_ENTRIES; i++) { + const char *state_str, *type_str = "UNKNOWN"; + void *src_addr = NULL, *dest_addr = NULL; + u16 *src_port = NULL, *dest_port = NULL; + struct airoha_foe_mac_info_common *l2; + unsigned char h_source[ETH_ALEN] = {}; + unsigned char h_dest[ETH_ALEN]; + struct airoha_foe_entry *hwe; + u32 type, state, ib2, data; + bool ipv6 = false; + + hwe = airoha_ppe_foe_get_entry(ppe, i); + if (!hwe) + continue; + + state = FIELD_GET(AIROHA_FOE_IB1_BIND_STATE, hwe->ib1); + if (!state) + continue; + + if (bind && state != AIROHA_FOE_STATE_BIND) + continue; + + state_str = ppe_state_str[state % ARRAY_SIZE(ppe_state_str)]; + type = FIELD_GET(AIROHA_FOE_IB1_BIND_PACKET_TYPE, hwe->ib1); + if (type < ARRAY_SIZE(ppe_type_str) && ppe_type_str[type]) + type_str = ppe_type_str[type]; + + seq_printf(m, "%05x %s %7s", i, state_str, type_str); + + switch (type) { + case PPE_PKT_TYPE_IPV4_HNAPT: + case PPE_PKT_TYPE_IPV4_DSLITE: + src_port = &hwe->ipv4.orig_tuple.src_port; + dest_port = &hwe->ipv4.orig_tuple.dest_port; + fallthrough; + case PPE_PKT_TYPE_IPV4_ROUTE: + src_addr = &hwe->ipv4.orig_tuple.src_ip; + dest_addr = &hwe->ipv4.orig_tuple.dest_ip; + break; + case PPE_PKT_TYPE_IPV6_ROUTE_5T: + src_port = &hwe->ipv6.src_port; + dest_port = &hwe->ipv6.dest_port; + fallthrough; + case PPE_PKT_TYPE_IPV6_ROUTE_3T: + case PPE_PKT_TYPE_IPV6_6RD: + src_addr = &hwe->ipv6.src_ip; + dest_addr = &hwe->ipv6.dest_ip; + ipv6 = true; + break; + default: + break; + } + + if (src_addr && dest_addr) { + seq_puts(m, " orig="); + airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr, + src_port, dest_port, ipv6); + } + + switch (type) { + case PPE_PKT_TYPE_IPV4_HNAPT: + case PPE_PKT_TYPE_IPV4_DSLITE: + src_port = &hwe->ipv4.new_tuple.src_port; + dest_port = &hwe->ipv4.new_tuple.dest_port; + fallthrough; + case PPE_PKT_TYPE_IPV4_ROUTE: + src_addr = &hwe->ipv4.new_tuple.src_ip; + dest_addr = &hwe->ipv4.new_tuple.dest_ip; + seq_puts(m, " new="); + airoha_debugfs_ppe_print_tuple(m, src_addr, dest_addr, + src_port, dest_port, + ipv6); + break; + default: + break; + } + + if (type >= PPE_PKT_TYPE_IPV6_ROUTE_3T) { + data = hwe->ipv6.data; + ib2 = hwe->ipv6.ib2; + l2 = &hwe->ipv6.l2; + } else { + data = hwe->ipv4.data; + ib2 = hwe->ipv4.ib2; + l2 = &hwe->ipv4.l2.common; + *((__be16 *)&h_source[4]) = + cpu_to_be16(hwe->ipv4.l2.src_mac_lo); + } + + *((__be32 *)h_dest) = cpu_to_be32(l2->dest_mac_hi); + *((__be16 *)&h_dest[4]) = cpu_to_be16(l2->dest_mac_lo); + *((__be32 *)h_source) = cpu_to_be32(l2->src_mac_hi); + + seq_printf(m, " eth=%pM->%pM etype=%04x data=%08x" + " vlan=%d,%d ib1=%08x ib2=%08x\n", + h_source, h_dest, l2->etype, data, + l2->vlan1, l2->vlan2, hwe->ib1, ib2); + } + + return 0; +} + +static int airoha_ppe_debugfs_foe_all_show(struct seq_file *m, void *private) +{ + return airoha_ppe_debugfs_foe_show(m, private, false); +} +DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_all); + +static int airoha_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private) +{ + return airoha_ppe_debugfs_foe_show(m, private, true); +} +DEFINE_SHOW_ATTRIBUTE(airoha_ppe_debugfs_foe_bind); + +int airoha_ppe_debugfs_init(struct airoha_ppe *ppe) +{ + ppe->debugfs_dir = debugfs_create_dir("ppe", NULL); + debugfs_create_file("entries", 0444, ppe->debugfs_dir, ppe, + &airoha_ppe_debugfs_foe_all_fops); + debugfs_create_file("bind", 0444, ppe->debugfs_dir, ppe, + &airoha_ppe_debugfs_foe_bind_fops); + + return 0; +} diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h new file mode 100644 index 0000000000000..8146cde4e8ba3 --- /dev/null +++ b/drivers/net/ethernet/airoha/airoha_regs.h @@ -0,0 +1,803 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 AIROHA Inc + * Author: Lorenzo Bianconi + */ + +#ifndef AIROHA_REGS_H +#define AIROHA_REGS_H + +#include + +/* FE */ +#define PSE_BASE 0x0100 +#define CSR_IFC_BASE 0x0200 +#define CDM1_BASE 0x0400 +#define GDM1_BASE 0x0500 +#define PPE1_BASE 0x0c00 +#define PPE2_BASE 0x1c00 + +#define CDM2_BASE 0x1400 +#define GDM2_BASE 0x1500 + +#define GDM3_BASE 0x1100 +#define GDM4_BASE 0x2500 + +#define GDM_BASE(_n) \ + ((_n) == 4 ? GDM4_BASE : \ + (_n) == 3 ? GDM3_BASE : \ + (_n) == 2 ? GDM2_BASE : GDM1_BASE) + +#define REG_FE_DMA_GLO_CFG 0x0000 +#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4) +#define FE_DMA_GLO_PG_SZ_MASK BIT(3) + +#define REG_FE_RST_GLO_CFG 0x0004 +#define FE_RST_GDM4_MBI_ARB_MASK BIT(3) +#define FE_RST_GDM3_MBI_ARB_MASK BIT(2) +#define FE_RST_CORE_MASK BIT(0) + +#define REG_FE_FOE_TS 0x0010 + +#define REG_FE_WAN_PORT 0x0024 +#define WAN1_EN_MASK BIT(16) +#define WAN1_MASK GENMASK(12, 8) +#define WAN0_MASK GENMASK(4, 0) + +#define REG_FE_WAN_MAC_H 0x0030 +#define REG_FE_LAN_MAC_H 0x0040 + +#define REG_FE_MAC_LMIN(_n) ((_n) + 0x04) +#define REG_FE_MAC_LMAX(_n) ((_n) + 0x08) + +#define REG_FE_CDM1_OQ_MAP0 0x0050 +#define REG_FE_CDM1_OQ_MAP1 0x0054 +#define REG_FE_CDM1_OQ_MAP2 0x0058 +#define REG_FE_CDM1_OQ_MAP3 0x005c + +#define REG_FE_PCE_CFG 0x0070 +#define PCE_DPI_EN_MASK BIT(2) +#define PCE_KA_EN_MASK BIT(1) +#define PCE_MC_EN_MASK BIT(0) + +#define REG_FE_PSE_QUEUE_CFG_WR 0x0080 +#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24) +#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16) +#define PSE_CFG_WR_EN_MASK BIT(8) +#define PSE_CFG_OQRSV_SEL_MASK BIT(0) + +#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084 +#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0) + +#define PSE_FQ_CFG 0x008c +#define PSE_FQ_LIMIT_MASK GENMASK(14, 0) + +#define REG_FE_PSE_BUF_SET 0x0090 +#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16) +#define PSE_ALLRSV_MASK GENMASK(14, 0) + +#define REG_PSE_SHARE_USED_THD 0x0094 +#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16) +#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0) + +#define REG_GDM_MISC_CFG 0x0148 +#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9) +#define GDM2_CHN_VLD_MODE_MASK BIT(5) + +#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE +#define FE_IFC_EN_MASK BIT(0) + +#define REG_FE_VIP_PORT_EN 0x01f0 +#define REG_FE_IFC_PORT_EN 0x01f4 + +#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08) +#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16) + +#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c) +#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8) +#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0) + +#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3)) +#define PATN_FCPU_EN_MASK BIT(7) +#define PATN_SWP_EN_MASK BIT(6) +#define PATN_DP_EN_MASK BIT(5) +#define PATN_SP_EN_MASK BIT(4) +#define PATN_TYPE_MASK GENMASK(3, 1) +#define PATN_EN_MASK BIT(0) + +#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3)) +#define PATN_DP_MASK GENMASK(31, 16) +#define PATN_SP_MASK GENMASK(15, 0) + +#define REG_CDM1_VLAN_CTRL CDM1_BASE +#define CDM1_VLAN_MASK GENMASK(31, 16) + +#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08) +#define CDM1_VIP_QSEL_MASK GENMASK(24, 20) + +#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2)) +#define CDM1_CRSN_QSEL_REASON_MASK(_n) \ + GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) + +#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08) +#define CDM2_OAM_QSEL_MASK GENMASK(31, 27) +#define CDM2_VIP_QSEL_MASK GENMASK(24, 20) + +#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2)) +#define CDM2_CRSN_QSEL_REASON_MASK(_n) \ + GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) + +#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n) +#define GDM_DROP_CRC_ERR BIT(23) +#define GDM_IP4_CKSUM BIT(22) +#define GDM_TCP_CKSUM BIT(21) +#define GDM_UDP_CKSUM BIT(20) +#define GDM_STRIP_CRC BIT(16) +#define GDM_UCFQ_MASK GENMASK(15, 12) +#define GDM_BCFQ_MASK GENMASK(11, 8) +#define GDM_MCFQ_MASK GENMASK(7, 4) +#define GDM_OCFQ_MASK GENMASK(3, 0) + +#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10) +#define GDM_INGRESS_FC_EN_MASK BIT(1) +#define GDM_STAG_EN_MASK BIT(0) + +#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14) +#define GDM_SHORT_LEN_MASK GENMASK(13, 0) +#define GDM_LONG_LEN_MASK GENMASK(29, 16) + +#define REG_GDM_LPBK_CFG(_n) (GDM_BASE(_n) + 0x1c) +#define LPBK_GAP_MASK GENMASK(31, 24) +#define LPBK_LEN_MASK GENMASK(23, 10) +#define LPBK_CHAN_MASK GENMASK(8, 4) +#define LPBK_MODE_MASK GENMASK(3, 1) +#define LPBK_EN_MASK BIT(0) + +#define REG_GDM_TXCHN_EN(_n) (GDM_BASE(_n) + 0x24) +#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28) + +#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40) +#define FE_CPORT_PAD BIT(26) +#define FE_CPORT_PORT_XFC_MASK BIT(25) +#define FE_CPORT_QUEUE_XFC_MASK BIT(24) + +#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0) +#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1) +#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0) + +#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4) +#define FE_STRICT_RFC2819_MODE_MASK BIT(31) +#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17) +#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16) +#define FE_TX_MIB_ID_MASK GENMASK(15, 8) +#define FE_RX_MIB_ID_MASK GENMASK(7, 0) + +#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104) +#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c) +#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110) +#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114) +#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118) +#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c) +#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120) +#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124) +#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128) +#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c) +#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130) +#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134) +#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138) +#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c) +#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140) + +#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148) +#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c) +#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150) +#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154) +#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158) +#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c) +#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160) +#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164) +#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168) +#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c) +#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170) +#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174) +#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178) +#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c) +#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180) +#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184) +#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188) +#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c) +#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190) +#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194) +#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198) +#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c) + +#define REG_PPE_GLO_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x200) +#define PPE_GLO_CFG_BUSY_MASK BIT(31) +#define PPE_GLO_CFG_FLOW_DROP_UPDATE_MASK BIT(9) +#define PPE_GLO_CFG_PSE_HASH_OFS_MASK BIT(6) +#define PPE_GLO_CFG_PPE_BSWAP_MASK BIT(5) +#define PPE_GLO_CFG_TTL_DROP_MASK BIT(4) +#define PPE_GLO_CFG_IP4_CS_DROP_MASK BIT(3) +#define PPE_GLO_CFG_IP4_L4_CS_DROP_MASK BIT(2) +#define PPE_GLO_CFG_EN_MASK BIT(0) + +#define REG_PPE_PPE_FLOW_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x204) +#define PPE_FLOW_CFG_IP6_HASH_GRE_KEY_MASK BIT(20) +#define PPE_FLOW_CFG_IP4_HASH_GRE_KEY_MASK BIT(19) +#define PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL_MASK BIT(18) +#define PPE_FLOW_CFG_IP4_NAT_FRAG_MASK BIT(17) +#define PPE_FLOW_CFG_IP_PROTO_BLACKLIST_MASK BIT(16) +#define PPE_FLOW_CFG_IP4_DSLITE_MASK BIT(14) +#define PPE_FLOW_CFG_IP4_NAPT_MASK BIT(13) +#define PPE_FLOW_CFG_IP4_NAT_MASK BIT(12) +#define PPE_FLOW_CFG_IP6_6RD_MASK BIT(10) +#define PPE_FLOW_CFG_IP6_5T_ROUTE_MASK BIT(9) +#define PPE_FLOW_CFG_IP6_3T_ROUTE_MASK BIT(8) +#define PPE_FLOW_CFG_IP4_UDP_FRAG_MASK BIT(7) +#define PPE_FLOW_CFG_IP4_TCP_FRAG_MASK BIT(6) + +#define REG_PPE_IP_PROTO_CHK(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x208) +#define PPE_IP_PROTO_CHK_IPV4_MASK GENMASK(15, 0) +#define PPE_IP_PROTO_CHK_IPV6_MASK GENMASK(31, 16) + +#define REG_PPE_TB_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x21c) +#define PPE_SRAM_TB_NUM_ENTRY_MASK GENMASK(26, 24) +#define PPE_TB_CFG_KEEPALIVE_MASK GENMASK(13, 12) +#define PPE_TB_CFG_AGE_TCP_FIN_MASK BIT(11) +#define PPE_TB_CFG_AGE_UDP_MASK BIT(10) +#define PPE_TB_CFG_AGE_TCP_MASK BIT(9) +#define PPE_TB_CFG_AGE_UNBIND_MASK BIT(8) +#define PPE_TB_CFG_AGE_NON_L4_MASK BIT(7) +#define PPE_TB_CFG_AGE_PREBIND_MASK BIT(6) +#define PPE_TB_CFG_SEARCH_MISS_MASK GENMASK(5, 4) +#define PPE_TB_ENTRY_SIZE_MASK BIT(3) +#define PPE_DRAM_TB_NUM_ENTRY_MASK GENMASK(2, 0) + +#define REG_PPE_TB_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x220) + +#define REG_PPE_BIND_RATE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x228) +#define PPE_BIND_RATE_L2B_BIND_MASK GENMASK(31, 16) +#define PPE_BIND_RATE_BIND_MASK GENMASK(15, 0) + +#define REG_PPE_BIND_LIMIT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x22c) +#define PPE_BIND_LIMIT0_HALF_MASK GENMASK(29, 16) +#define PPE_BIND_LIMIT0_QUARTER_MASK GENMASK(13, 0) + +#define REG_PPE_BIND_LIMIT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x230) +#define PPE_BIND_LIMIT1_NON_L4_MASK GENMASK(23, 16) +#define PPE_BIND_LIMIT1_FULL_MASK GENMASK(13, 0) + +#define REG_PPE_BND_AGE0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x23c) +#define PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16) +#define PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0) + +#define REG_PPE_UNBIND_AGE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x238) +#define PPE_UNBIND_AGE_MIN_PACKETS_MASK GENMASK(31, 16) +#define PPE_UNBIND_AGE_DELTA_MASK GENMASK(7, 0) + +#define REG_PPE_BND_AGE1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x240) +#define PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16) +#define PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0) + +#define REG_PPE_HASH_SEED(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x244) +#define PPE_HASH_SEED 0x12345678 + +#define REG_PPE_DFT_CPORT0(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x248) + +#define REG_PPE_DFT_CPORT1(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x24c) + +#define REG_PPE_TB_HASH_CFG(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x250) +#define PPE_DRAM_HASH1_MODE_MASK GENMASK(31, 28) +#define PPE_DRAM_HASH1_EN_MASK BIT(24) +#define PPE_DRAM_HASH0_MODE_MASK GENMASK(23, 20) +#define PPE_DRAM_TABLE_EN_MASK BIT(16) +#define PPE_SRAM_HASH1_MODE_MASK GENMASK(15, 12) +#define PPE_SRAM_HASH1_EN_MASK BIT(8) +#define PPE_SRAM_HASH0_MODE_MASK GENMASK(7, 4) +#define PPE_SRAM_TABLE_EN_MASK BIT(0) + +#define REG_PPE_MTU_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x304) +#define REG_PPE_MTU(_m, _n) (REG_PPE_MTU_BASE(_m) + ((_n) << 2)) +#define FP1_EGRESS_MTU_MASK GENMASK(29, 16) +#define FP0_EGRESS_MTU_MASK GENMASK(13, 0) + +#define REG_PPE_RAM_CTRL(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x31c) +#define PPE_SRAM_CTRL_ACK_MASK BIT(31) +#define PPE_SRAM_CTRL_DUAL_SUCESS_MASK BIT(30) +#define PPE_SRAM_CTRL_ENTRY_MASK GENMASK(23, 8) +#define PPE_SRAM_WR_DUAL_DIRECTION_MASK BIT(2) +#define PPE_SRAM_CTRL_WR_MASK BIT(1) +#define PPE_SRAM_CTRL_REQ_MASK BIT(0) + +#define REG_PPE_RAM_BASE(_n) (((_n) ? PPE2_BASE : PPE1_BASE) + 0x320) +#define REG_PPE_RAM_ENTRY(_m, _n) (REG_PPE_RAM_BASE(_m) + ((_n) << 2)) + +#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) +#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) +#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288) +#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c) + +#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290) +#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294) +#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298) +#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c) +#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8) +#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc) +#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0) +#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4) +#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8) +#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc) +#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8) +#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec) +#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0) +#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4) +#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8) +#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc) + +#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20) +#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25) +#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17) + +#define REG_GDM3_FWD_CFG GDM3_BASE +#define GDM3_PAD_EN_MASK BIT(28) + +#define REG_GDM4_FWD_CFG GDM4_BASE +#define GDM4_PAD_EN_MASK BIT(28) +#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8) + +#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x23c) +#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16) +#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12) +#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8) + +#define REG_IP_FRAG_FP 0x2010 +#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21) +#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16) +#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5) +#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0) + +#define REG_MC_VLAN_EN 0x2100 +#define MC_VLAN_EN_MASK BIT(0) + +#define REG_MC_VLAN_CFG 0x2104 +#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31) +#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16) +#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8) +#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4) +#define MC_VLAN_CFG_RW_MASK BIT(0) + +#define REG_MC_VLAN_DATA 0x2108 + +#define REG_SP_DFT_CPORT(_n) (0x20e0 + ((_n) << 2)) +#define SP_CPORT_PCIE1_MASK GENMASK(31, 28) +#define SP_CPORT_PCIE0_MASK GENMASK(27, 24) +#define SP_CPORT_USB_MASK GENMASK(7, 4) +#define SP_CPORT_ETH_MASK GENMASK(7, 4) + +#define REG_SRC_PORT_FC_MAP6 0x2298 +#define FC_ID_OF_SRC_PORT27_MASK GENMASK(28, 24) +#define FC_ID_OF_SRC_PORT26_MASK GENMASK(20, 16) +#define FC_ID_OF_SRC_PORT25_MASK GENMASK(12, 8) +#define FC_ID_OF_SRC_PORT24_MASK GENMASK(4, 0) + +#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4 + +/* QDMA */ +#define REG_QDMA_GLOBAL_CFG 0x0004 +#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31) +#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29) +#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28) +#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27) +#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26) +#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25) +#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24) +#define GLOBAL_CFG_RESET_MASK BIT(23) +#define GLOBAL_CFG_RESET_DONE_MASK BIT(22) +#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21) +#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20) +#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19) +#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18) +#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17) +#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16) +#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8) +#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7) +#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6) +#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4) +#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3) +#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2) +#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1) +#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0) + +#define REG_FWD_DSCP_BASE 0x0010 +#define REG_FWD_BUF_BASE 0x0014 + +#define REG_HW_FWD_DSCP_CFG 0x0018 +#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28) +#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16) +#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0) + +#define REG_INT_STATUS(_n) \ + (((_n) == 4) ? 0x0730 : \ + ((_n) == 3) ? 0x0724 : \ + ((_n) == 2) ? 0x0720 : \ + ((_n) == 1) ? 0x0024 : 0x0020) + +#define REG_INT_ENABLE(_n) \ + (((_n) == 4) ? 0x0750 : \ + ((_n) == 3) ? 0x0744 : \ + ((_n) == 2) ? 0x0740 : \ + ((_n) == 1) ? 0x002c : 0x0028) + +/* QDMA_CSR_INT_ENABLE1 */ +#define RX15_COHERENT_INT_MASK BIT(31) +#define RX14_COHERENT_INT_MASK BIT(30) +#define RX13_COHERENT_INT_MASK BIT(29) +#define RX12_COHERENT_INT_MASK BIT(28) +#define RX11_COHERENT_INT_MASK BIT(27) +#define RX10_COHERENT_INT_MASK BIT(26) +#define RX9_COHERENT_INT_MASK BIT(25) +#define RX8_COHERENT_INT_MASK BIT(24) +#define RX7_COHERENT_INT_MASK BIT(23) +#define RX6_COHERENT_INT_MASK BIT(22) +#define RX5_COHERENT_INT_MASK BIT(21) +#define RX4_COHERENT_INT_MASK BIT(20) +#define RX3_COHERENT_INT_MASK BIT(19) +#define RX2_COHERENT_INT_MASK BIT(18) +#define RX1_COHERENT_INT_MASK BIT(17) +#define RX0_COHERENT_INT_MASK BIT(16) +#define TX7_COHERENT_INT_MASK BIT(15) +#define TX6_COHERENT_INT_MASK BIT(14) +#define TX5_COHERENT_INT_MASK BIT(13) +#define TX4_COHERENT_INT_MASK BIT(12) +#define TX3_COHERENT_INT_MASK BIT(11) +#define TX2_COHERENT_INT_MASK BIT(10) +#define TX1_COHERENT_INT_MASK BIT(9) +#define TX0_COHERENT_INT_MASK BIT(8) +#define CNT_OVER_FLOW_INT_MASK BIT(7) +#define IRQ1_FULL_INT_MASK BIT(5) +#define IRQ1_INT_MASK BIT(4) +#define HWFWD_DSCP_LOW_INT_MASK BIT(3) +#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2) +#define IRQ0_FULL_INT_MASK BIT(1) +#define IRQ0_INT_MASK BIT(0) + +#define TX_DONE_INT_MASK(_n) \ + ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \ + : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) + +#define INT_TX_MASK \ + (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \ + IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) + +#define INT_IDX0_MASK \ + (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \ + TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \ + TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \ + TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \ + RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \ + RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \ + RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \ + RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \ + RX15_COHERENT_INT_MASK | INT_TX_MASK) + +/* QDMA_CSR_INT_ENABLE2 */ +#define RX15_NO_CPU_DSCP_INT_MASK BIT(31) +#define RX14_NO_CPU_DSCP_INT_MASK BIT(30) +#define RX13_NO_CPU_DSCP_INT_MASK BIT(29) +#define RX12_NO_CPU_DSCP_INT_MASK BIT(28) +#define RX11_NO_CPU_DSCP_INT_MASK BIT(27) +#define RX10_NO_CPU_DSCP_INT_MASK BIT(26) +#define RX9_NO_CPU_DSCP_INT_MASK BIT(25) +#define RX8_NO_CPU_DSCP_INT_MASK BIT(24) +#define RX7_NO_CPU_DSCP_INT_MASK BIT(23) +#define RX6_NO_CPU_DSCP_INT_MASK BIT(22) +#define RX5_NO_CPU_DSCP_INT_MASK BIT(21) +#define RX4_NO_CPU_DSCP_INT_MASK BIT(20) +#define RX3_NO_CPU_DSCP_INT_MASK BIT(19) +#define RX2_NO_CPU_DSCP_INT_MASK BIT(18) +#define RX1_NO_CPU_DSCP_INT_MASK BIT(17) +#define RX0_NO_CPU_DSCP_INT_MASK BIT(16) +#define RX15_DONE_INT_MASK BIT(15) +#define RX14_DONE_INT_MASK BIT(14) +#define RX13_DONE_INT_MASK BIT(13) +#define RX12_DONE_INT_MASK BIT(12) +#define RX11_DONE_INT_MASK BIT(11) +#define RX10_DONE_INT_MASK BIT(10) +#define RX9_DONE_INT_MASK BIT(9) +#define RX8_DONE_INT_MASK BIT(8) +#define RX7_DONE_INT_MASK BIT(7) +#define RX6_DONE_INT_MASK BIT(6) +#define RX5_DONE_INT_MASK BIT(5) +#define RX4_DONE_INT_MASK BIT(4) +#define RX3_DONE_INT_MASK BIT(3) +#define RX2_DONE_INT_MASK BIT(2) +#define RX1_DONE_INT_MASK BIT(1) +#define RX0_DONE_INT_MASK BIT(0) + +#define RX_DONE_INT_MASK \ + (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \ + RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \ + RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \ + RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \ + RX15_DONE_INT_MASK) +#define INT_IDX1_MASK \ + (RX_DONE_INT_MASK | \ + RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \ + RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \ + RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \ + RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \ + RX15_NO_CPU_DSCP_INT_MASK) + +/* QDMA_CSR_INT_ENABLE5 */ +#define TX31_COHERENT_INT_MASK BIT(31) +#define TX30_COHERENT_INT_MASK BIT(30) +#define TX29_COHERENT_INT_MASK BIT(29) +#define TX28_COHERENT_INT_MASK BIT(28) +#define TX27_COHERENT_INT_MASK BIT(27) +#define TX26_COHERENT_INT_MASK BIT(26) +#define TX25_COHERENT_INT_MASK BIT(25) +#define TX24_COHERENT_INT_MASK BIT(24) +#define TX23_COHERENT_INT_MASK BIT(23) +#define TX22_COHERENT_INT_MASK BIT(22) +#define TX21_COHERENT_INT_MASK BIT(21) +#define TX20_COHERENT_INT_MASK BIT(20) +#define TX19_COHERENT_INT_MASK BIT(19) +#define TX18_COHERENT_INT_MASK BIT(18) +#define TX17_COHERENT_INT_MASK BIT(17) +#define TX16_COHERENT_INT_MASK BIT(16) +#define TX15_COHERENT_INT_MASK BIT(15) +#define TX14_COHERENT_INT_MASK BIT(14) +#define TX13_COHERENT_INT_MASK BIT(13) +#define TX12_COHERENT_INT_MASK BIT(12) +#define TX11_COHERENT_INT_MASK BIT(11) +#define TX10_COHERENT_INT_MASK BIT(10) +#define TX9_COHERENT_INT_MASK BIT(9) +#define TX8_COHERENT_INT_MASK BIT(8) + +#define INT_IDX4_MASK \ + (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \ + TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \ + TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \ + TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \ + TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \ + TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \ + TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \ + TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \ + TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \ + TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \ + TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \ + TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK) + +#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050) + +#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054) +#define TX_IRQ_THR_MASK GENMASK(27, 16) +#define TX_IRQ_DEPTH_MASK GENMASK(11, 0) + +#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058) +#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0) + +#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c) +#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16) +#define IRQ_HEAD_IDX_MASK GENMASK(11, 0) + +#define REG_TX_RING_BASE(_n) \ + (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5)) + +#define REG_TX_RING_BLOCKING(_n) \ + (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5)) + +#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6) +#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4) +#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2) +#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1) +#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0) + +#define REG_TX_CPU_IDX(_n) \ + (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5)) + +#define TX_RING_CPU_IDX_MASK GENMASK(15, 0) + +#define REG_TX_DMA_IDX(_n) \ + (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5)) + +#define TX_RING_DMA_IDX_MASK GENMASK(15, 0) + +#define IRQ_RING_IDX_MASK GENMASK(20, 16) +#define IRQ_DESC_IDX_MASK GENMASK(15, 0) + +#define REG_RX_RING_BASE(_n) \ + (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5)) + +#define REG_RX_RING_SIZE(_n) \ + (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5)) + +#define RX_RING_THR_MASK GENMASK(31, 16) +#define RX_RING_SIZE_MASK GENMASK(15, 0) + +#define REG_RX_CPU_IDX(_n) \ + (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5)) + +#define RX_RING_CPU_IDX_MASK GENMASK(15, 0) + +#define REG_RX_DMA_IDX(_n) \ + (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5)) + +#define REG_RX_DELAY_INT_IDX(_n) \ + (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5)) + +#define REG_RX_SCATTER_CFG(_n) \ + (((_n) < 16) ? 0x0214 + ((_n) << 5) : 0x0e14 + (((_n) - 16) << 5)) + +#define RX_DELAY_INT_MASK GENMASK(15, 0) + +#define RX_RING_DMA_IDX_MASK GENMASK(15, 0) + +#define RX_RING_SG_EN_MASK BIT(0) + +#define REG_INGRESS_TRTCM_CFG 0x0070 +#define INGRESS_TRTCM_EN_MASK BIT(31) +#define INGRESS_TRTCM_MODE_MASK BIT(30) +#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define INGRESS_FAST_TICK_MASK GENMASK(15, 0) + +#define REG_QUEUE_CLOSE_CFG(_n) (0x00a0 + ((_n) & 0xfc)) +#define TXQ_DISABLE_CHAN_QUEUE_MASK(_n, _m) BIT((_m) + (((_n) & 0x3) << 3)) + +#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0) +#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2) + +#define REG_CNTR_CFG(_n) (0x0400 + ((_n) << 3)) +#define CNTR_EN_MASK BIT(31) +#define CNTR_ALL_CHAN_EN_MASK BIT(30) +#define CNTR_ALL_QUEUE_EN_MASK BIT(29) +#define CNTR_ALL_DSCP_RING_EN_MASK BIT(28) +#define CNTR_SRC_MASK GENMASK(27, 24) +#define CNTR_DSCP_RING_MASK GENMASK(20, 16) +#define CNTR_CHAN_MASK GENMASK(7, 3) +#define CNTR_QUEUE_MASK GENMASK(2, 0) + +#define REG_CNTR_VAL(_n) (0x0404 + ((_n) << 3)) + +#define REG_LMGR_INIT_CFG 0x1000 +#define LMGR_INIT_START BIT(31) +#define LMGR_SRAM_MODE_MASK BIT(30) +#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20) +#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0) + +#define REG_FWD_DSCP_LOW_THR 0x1004 +#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0) + +#define REG_EGRESS_RATE_METER_CFG 0x100c +#define EGRESS_RATE_METER_EN_MASK BIT(31) +#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17) +#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12) +#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0) + +#define REG_EGRESS_TRTCM_CFG 0x1010 +#define EGRESS_TRTCM_EN_MASK BIT(31) +#define EGRESS_TRTCM_MODE_MASK BIT(30) +#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define EGRESS_FAST_TICK_MASK GENMASK(15, 0) + +#define TRTCM_PARAM_RW_MASK BIT(31) +#define TRTCM_PARAM_RW_DONE_MASK BIT(30) +#define TRTCM_PARAM_TYPE_MASK GENMASK(29, 28) +#define TRTCM_METER_GROUP_MASK GENMASK(27, 26) +#define TRTCM_PARAM_INDEX_MASK GENMASK(23, 17) +#define TRTCM_PARAM_RATE_TYPE_MASK BIT(16) + +#define REG_TRTCM_CFG_PARAM(_n) ((_n) + 0x4) +#define REG_TRTCM_DATA_LOW(_n) ((_n) + 0x8) +#define REG_TRTCM_DATA_HIGH(_n) ((_n) + 0xc) + +#define REG_TXWRR_MODE_CFG 0x1020 +#define TWRR_WEIGHT_SCALE_MASK BIT(31) +#define TWRR_WEIGHT_BASE_MASK BIT(3) + +#define REG_TXWRR_WEIGHT_CFG 0x1024 +#define TWRR_RW_CMD_MASK BIT(31) +#define TWRR_RW_CMD_DONE BIT(30) +#define TWRR_CHAN_IDX_MASK GENMASK(23, 19) +#define TWRR_QUEUE_IDX_MASK GENMASK(18, 16) +#define TWRR_VALUE_MASK GENMASK(15, 0) + +#define REG_PSE_BUF_USAGE_CFG 0x1028 +#define PSE_BUF_ESTIMATE_EN_MASK BIT(29) + +#define REG_CHAN_QOS_MODE(_n) (0x1040 + ((_n) << 2)) +#define CHAN_QOS_MODE_MASK(_n) GENMASK(2 + ((_n) << 2), (_n) << 2) + +#define REG_GLB_TRTCM_CFG 0x1080 +#define GLB_TRTCM_EN_MASK BIT(31) +#define GLB_TRTCM_MODE_MASK BIT(30) +#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define GLB_FAST_TICK_MASK GENMASK(15, 0) + +#define REG_TXQ_CNGST_CFG 0x10a0 +#define TXQ_CNGST_DROP_EN BIT(31) +#define TXQ_CNGST_DEI_DROP_EN BIT(30) + +#define REG_SLA_TRTCM_CFG 0x1150 +#define SLA_TRTCM_EN_MASK BIT(31) +#define SLA_TRTCM_MODE_MASK BIT(30) +#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define SLA_FAST_TICK_MASK GENMASK(15, 0) + +/* CTRL */ +#define QDMA_DESC_DONE_MASK BIT(31) +#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */ +#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */ +#define QDMA_DESC_DEI_MASK BIT(25) +#define QDMA_DESC_NO_DROP_MASK BIT(24) +#define QDMA_DESC_LEN_MASK GENMASK(15, 0) +/* DATA */ +#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0) +/* TX MSG0 */ +#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30) +#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14) +#define QDMA_ETH_TXMSG_ICO_MASK BIT(13) +#define QDMA_ETH_TXMSG_UCO_MASK BIT(12) +#define QDMA_ETH_TXMSG_TCO_MASK BIT(11) +#define QDMA_ETH_TXMSG_TSO_MASK BIT(10) +#define QDMA_ETH_TXMSG_FAST_MASK BIT(9) +#define QDMA_ETH_TXMSG_OAM_MASK BIT(8) +#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3) +#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0) +/* TX MSG1 */ +#define QDMA_ETH_TXMSG_NO_DROP BIT(31) +#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */ +#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20) +#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15) +#define QDMA_ETH_TXMSG_HWF_MASK BIT(14) +#define QDMA_ETH_TXMSG_HOP_MASK BIT(13) +#define QDMA_ETH_TXMSG_PTP_MASK BIT(12) +#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */ +#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */ + +/* RX MSG0 */ +#define QDMA_ETH_RXMSG_SPTAG GENMASK(21, 14) +/* RX MSG1 */ +#define QDMA_ETH_RXMSG_DEI_MASK BIT(31) +#define QDMA_ETH_RXMSG_IP6_MASK BIT(30) +#define QDMA_ETH_RXMSG_IP4_MASK BIT(29) +#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28) +#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27) +#define QDMA_ETH_RXMSG_L4F_MASK BIT(26) +#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21) +#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16) +#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0) + +struct airoha_qdma_desc { + __le32 rsv; + __le32 ctrl; + __le32 addr; + __le32 data; + __le32 msg0; + __le32 msg1; + __le32 msg2; + __le32 msg3; +}; + +/* CTRL0 */ +#define QDMA_FWD_DESC_CTX_MASK BIT(31) +#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28) +#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16) +#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0) +/* CTRL1 */ +#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0) +/* CTRL2 */ +#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0) + +struct airoha_qdma_fwd_desc { + __le32 addr; + __le32 ctrl0; + __le32 ctrl1; + __le32 ctrl2; + __le32 msg0; + __le32 msg1; + __le32 rsv0; + __le32 rsv1; +}; + +#endif /* AIROHA_REGS_H */ diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index c1295dfad0d07..70fa3adb49342 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -5,9 +5,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#ifdef CONFIG_RFS_ACCEL -#include -#endif /* CONFIG_RFS_ACCEL */ #include #include #include @@ -162,30 +159,6 @@ int ena_xmit_common(struct ena_adapter *adapter, return 0; } -static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter) -{ -#ifdef CONFIG_RFS_ACCEL - u32 i; - int rc; - - adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues); - if (!adapter->netdev->rx_cpu_rmap) - return -ENOMEM; - for (i = 0; i < adapter->num_io_queues; i++) { - int irq_idx = ENA_IO_IRQ_IDX(i); - - rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, - pci_irq_vector(adapter->pdev, irq_idx)); - if (rc) { - free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); - adapter->netdev->rx_cpu_rmap = NULL; - return rc; - } - } -#endif /* CONFIG_RFS_ACCEL */ - return 0; -} - static void ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, u16 qid) { @@ -1596,7 +1569,7 @@ static int ena_enable_msix(struct ena_adapter *adapter) adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; } - if (ena_init_rx_cpu_rmap(adapter)) + if (netif_enable_cpu_rmap(adapter->netdev, adapter->num_io_queues)) netif_warn(adapter, probe, adapter->netdev, "Failed to map IRQs to CPUs\n"); @@ -1742,16 +1715,13 @@ static void ena_free_io_irq(struct ena_adapter *adapter) struct ena_irq *irq; int i; -#ifdef CONFIG_RFS_ACCEL - if (adapter->msix_vecs >= 1) { - free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); - adapter->netdev->rx_cpu_rmap = NULL; - } -#endif /* CONFIG_RFS_ACCEL */ - for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) { + struct ena_napi *ena_napi; + irq = &adapter->irq_tbl[i]; irq_set_affinity_hint(irq->vector, NULL); + ena_napi = irq->data; + netif_napi_set_irq(&ena_napi->napi, -1); free_irq(irq->vector, irq->data); } } @@ -4131,13 +4101,6 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown) ena_dev = adapter->ena_dev; netdev = adapter->netdev; -#ifdef CONFIG_RFS_ACCEL - if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { - free_irq_cpu_rmap(netdev->rx_cpu_rmap); - netdev->rx_cpu_rmap = NULL; - } - -#endif /* CONFIG_RFS_ACCEL */ /* Make sure timer and reset routine won't be called after * freeing device resources. */ diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 0671a066913bc..9d35ac348ebe3 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -571,7 +571,7 @@ static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup) return pDB; } -void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB) +static void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB) { struct db_dest *pDBfree = aup->pDBfree; if (pDBfree) diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 2a91c84aebdb0..d7ca847d44c7c 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -9,8 +9,6 @@ #include "main.h" -static const struct acpi_device_id xge_acpi_match[]; - static int xge_get_resources(struct xge_pdata *pdata) { struct platform_device *pdev; @@ -731,7 +729,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match); static struct platform_driver xge_driver = { .driver = { .name = "xgene-enet-v2", - .acpi_match_table = ACPI_PTR(xge_acpi_match), + .acpi_match_table = xge_acpi_match, }, .probe = xge_probe, .remove = xge_remove, diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c index eba06831aec2d..6a17045a5f626 100644 --- a/drivers/net/ethernet/apm/xgene-v2/mdio.c +++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c @@ -97,7 +97,6 @@ void xge_mdio_remove(struct net_device *ndev) int xge_mdio_config(struct net_device *ndev) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct mii_bus *mdio_bus; @@ -137,17 +136,12 @@ int xge_mdio_config(struct net_device *ndev) goto err; } - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask); - - linkmode_andnot(phydev->supported, phydev->supported, mask); - linkmode_copy(phydev->advertising, phydev->supported); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + pdata->phy_speed = SPEED_UNKNOWN; return 0; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index 86607b79c09f7..cc3b1631c9053 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -6,8 +6,14 @@ * Keyur Chudgar */ -#include -#include +#include +#include +#include +#include +#include +#include +#include + #include "xgene_enet_main.h" #include "xgene_enet_hw.h" #include "xgene_enet_xgmac.h" diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c index 414b2e448d593..787ea91802e75 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c @@ -113,19 +113,9 @@ static const struct hwmon_ops aq_hwmon_ops = { .read_string = aq_hwmon_read_string, }; -static u32 aq_hwmon_temp_config[] = { - HWMON_T_INPUT | HWMON_T_LABEL, - HWMON_T_INPUT | HWMON_T_LABEL, - 0, -}; - -static const struct hwmon_channel_info aq_hwmon_temp = { - .type = hwmon_temp, - .config = aq_hwmon_temp_config, -}; - static const struct hwmon_channel_info * const aq_hwmon_info[] = { - &aq_hwmon_temp, + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL), NULL, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index f5901f8e39077..f6b990b7f5b47 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -226,7 +226,6 @@ struct __packed offload_info { struct offload_port_info ports; struct offload_ka_info kas; struct offload_rr_info rrs; - u8 buf[]; }; struct __packed hw_atl_utils_fw_rpc { diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index c571614b1d501..5133284767709 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2688,7 +2688,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->mii.mdio_write = atl1c_mdio_write; adapter->mii.phy_id_mask = 0x1f; adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK; - dev_set_threaded(netdev, true); + dev_set_threaded(netdev, NETDEV_NAPI_THREADED_ENABLE); for (i = 0; i < adapter->rx_queue_count; ++i) netif_napi_add(netdev, &adapter->rrd_ring[i].napi, atl1c_clean_rx); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 7b8b5b39c7bbe..934ba9425857d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -54,7 +54,10 @@ #include #include #include +#include #include +#include +#include #include "bnxt_hsi.h" #include "bnxt.h" @@ -76,6 +79,7 @@ #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \ NETIF_MSG_TX_ERR) +MODULE_IMPORT_NS("NETDEV_INTERNAL"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom NetXtreme network driver"); @@ -485,6 +489,17 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) txr = &bp->tx_ring[bp->tx_ring_map[i]]; prod = txr->tx_prod; +#if (MAX_SKB_FRAGS > TX_MAX_FRAGS) + if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS) { + netdev_warn_once(dev, "SKB has too many (%d) fragments, max supported is %d. SKB will be linearized.\n", + skb_shinfo(skb)->nr_frags, TX_MAX_FRAGS); + if (skb_linearize(skb)) { + dev_kfree_skb_any(skb); + dev_core_stats_tx_dropped_inc(dev); + return NETDEV_TX_OK; + } + } +#endif free_size = bnxt_tx_avail(bp, txr); if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) { /* We must have raced with NAPI cleanup */ @@ -564,7 +579,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) TX_BD_FLAGS_LHINT_512_AND_SMALLER | TX_BD_FLAGS_COAL_NOW | TX_BD_FLAGS_PACKET_END | - (2 << TX_BD_FLAGS_BD_CNT_SHIFT)); + TX_BD_CNT(2)); if (skb->ip_summed == CHECKSUM_PARTIAL) tx_push1->tx_bd_hsize_lflags = @@ -639,7 +654,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_unmap_addr_set(tx_buf, mapping, mapping); flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD | - ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT); + TX_BD_CNT(last_frag + 2); txbd->tx_bd_haddr = cpu_to_le64(mapping); txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 2 + last_frag); @@ -2038,6 +2053,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct rx_cmp_ext *rxcmp1; u32 tmp_raw_cons = *raw_cons; u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); + struct skb_shared_info *sinfo; struct bnxt_sw_rx_bd *rx_buf; unsigned int len; u8 *data_ptr, agg_bufs, cmp_type; @@ -2164,6 +2180,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, false); if (!frag_len) goto oom_next_rx; + } xdp_active = true; } @@ -2173,6 +2190,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = 1; goto next_rx; } + if (xdp_buff_has_frags(&xdp)) { + sinfo = xdp_get_shared_info_from_buff(&xdp); + agg_bufs = sinfo->nr_frags; + } else { + agg_bufs = 0; + } } if (len <= bp->rx_copybreak) { @@ -2210,7 +2233,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (!skb) goto oom_next_rx; } else { - skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); + skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, + rxr->page_pool, &xdp); if (!skb) { /* we should be able to free the old skb here */ bnxt_xdp_buff_frags_free(rxr, &xdp); @@ -3314,74 +3338,81 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) return work_done; } -static void bnxt_free_tx_skbs(struct bnxt *bp) +static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp, + struct bnxt_tx_ring_info *txr, int idx) { int i, max_idx; struct pci_dev *pdev = bp->pdev; - if (!bp->tx_ring) - return; - max_idx = bp->tx_nr_pages * TX_DESC_CNT; - for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; - int j; - if (!txr->tx_buf_ring) + for (i = 0; i < max_idx;) { + struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[i]; + struct sk_buff *skb; + int j, last; + + if (idx < bp->tx_nr_rings_xdp && + tx_buf->action == XDP_REDIRECT) { + dma_unmap_single(&pdev->dev, + dma_unmap_addr(tx_buf, mapping), + dma_unmap_len(tx_buf, len), + DMA_TO_DEVICE); + xdp_return_frame(tx_buf->xdpf); + tx_buf->action = 0; + tx_buf->xdpf = NULL; + i++; continue; + } - for (j = 0; j < max_idx;) { - struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j]; - struct sk_buff *skb; - int k, last; - - if (i < bp->tx_nr_rings_xdp && - tx_buf->action == XDP_REDIRECT) { - dma_unmap_single(&pdev->dev, - dma_unmap_addr(tx_buf, mapping), - dma_unmap_len(tx_buf, len), - DMA_TO_DEVICE); - xdp_return_frame(tx_buf->xdpf); - tx_buf->action = 0; - tx_buf->xdpf = NULL; - j++; - continue; - } + skb = tx_buf->skb; + if (!skb) { + i++; + continue; + } - skb = tx_buf->skb; - if (!skb) { - j++; - continue; - } + tx_buf->skb = NULL; - tx_buf->skb = NULL; + if (tx_buf->is_push) { + dev_kfree_skb(skb); + i += 2; + continue; + } - if (tx_buf->is_push) { - dev_kfree_skb(skb); - j += 2; - continue; - } + dma_unmap_single(&pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_headlen(skb), + DMA_TO_DEVICE); - dma_unmap_single(&pdev->dev, - dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), - DMA_TO_DEVICE); + last = tx_buf->nr_frags; + i += 2; + for (j = 0; j < last; j++, i++) { + int ring_idx = i & bp->tx_ring_mask; + skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; - last = tx_buf->nr_frags; - j += 2; - for (k = 0; k < last; k++, j++) { - int ring_idx = j & bp->tx_ring_mask; - skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; - - tx_buf = &txr->tx_buf_ring[ring_idx]; - dma_unmap_page( - &pdev->dev, - dma_unmap_addr(tx_buf, mapping), - skb_frag_size(frag), DMA_TO_DEVICE); - } - dev_kfree_skb(skb); + tx_buf = &txr->tx_buf_ring[ring_idx]; + dma_unmap_page(&pdev->dev, + dma_unmap_addr(tx_buf, mapping), + skb_frag_size(frag), DMA_TO_DEVICE); } - netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i)); + dev_kfree_skb(skb); + } + netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, idx)); +} + +static void bnxt_free_tx_skbs(struct bnxt *bp) +{ + int i; + + if (!bp->tx_ring) + return; + + for (i = 0; i < bp->tx_nr_rings; i++) { + struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; + + if (!txr->tx_buf_ring) + continue; + + bnxt_free_one_tx_ring_skbs(bp, txr, i); } } @@ -5236,8 +5267,10 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all) { int i; - /* Under rtnl_lock and all our NAPIs have been disabled. It's - * safe to delete the hash table. + netdev_assert_locked(bp->dev); + + /* Under netdev instance lock and all our NAPIs have been disabled. + * It's safe to delete the hash table. */ for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { struct hlist_head *head; @@ -5565,6 +5598,8 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2) + flags |= FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT; req->flags = cpu_to_le32(flags); req->ver_maj_8b = DRV_VER_MAJ; req->ver_min_8b = DRV_VER_MIN; @@ -6935,6 +6970,30 @@ static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) hwrm_req_drop(bp, req); } +static void bnxt_set_rx_ring_params_p5(struct bnxt *bp, u32 ring_type, + struct hwrm_ring_alloc_input *req, + struct bnxt_ring_struct *ring) +{ + struct bnxt_ring_grp_info *grp_info = &bp->grp_info[ring->grp_idx]; + u32 enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID | + RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID; + + if (ring_type == HWRM_RING_ALLOC_AGG) { + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; + req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); + req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); + enables |= RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID; + } else { + req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); + if (NET_IP_ALIGN == 2) + req->flags = + cpu_to_le16(RING_ALLOC_REQ_FLAGS_RX_SOP_PAD); + } + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req->enables |= cpu_to_le32(enables); +} + static int hwrm_ring_alloc_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, u32 map_index) @@ -6986,37 +7045,13 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, break; } case HWRM_RING_ALLOC_RX: - req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; - req->length = cpu_to_le32(bp->rx_ring_mask + 1); - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - u16 flags = 0; - - /* Association of rx ring with stats context */ - grp_info = &bp->grp_info[ring->grp_idx]; - req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); - req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req->enables |= cpu_to_le32( - RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); - if (NET_IP_ALIGN == 2) - flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; - req->flags = cpu_to_le16(flags); - } - break; case HWRM_RING_ALLOC_AGG: - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; - /* Association of agg ring with rx ring */ - grp_info = &bp->grp_info[ring->grp_idx]; - req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); - req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); - req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req->enables |= cpu_to_le32( - RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | - RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); - } else { - req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; - } - req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->length = (ring_type == HWRM_RING_ALLOC_RX) ? + cpu_to_le32(bp->rx_ring_mask + 1) : + cpu_to_le32(bp->rx_agg_ring_mask + 1); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + bnxt_set_rx_ring_params_p5(bp, ring_type, req, ring); break; case HWRM_RING_ALLOC_CMPL: req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; @@ -7197,6 +7232,39 @@ static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp, return 0; } +static int bnxt_hwrm_cp_ring_alloc_p5(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr) +{ + const u32 type = HWRM_RING_ALLOC_CMPL; + struct bnxt_napi *bnapi = cpr->bnapi; + struct bnxt_ring_struct *ring; + u32 map_idx = bnapi->index; + int rc; + + ring = &cpr->cp_ring_struct; + ring->handle = BNXT_SET_NQ_HDL(cpr); + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) + return rc; + bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); + bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); + return 0; +} + +static int bnxt_hwrm_tx_ring_alloc(struct bnxt *bp, + struct bnxt_tx_ring_info *txr, u32 tx_idx) +{ + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + const u32 type = HWRM_RING_ALLOC_TX; + int rc; + + rc = hwrm_ring_alloc_send_msg(bp, ring, type, tx_idx); + if (rc) + return rc; + bnxt_set_db(bp, &txr->tx_db, type, tx_idx, ring->fw_ring_id); + return 0; +} + static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); @@ -7233,33 +7301,17 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) } } - type = HWRM_RING_ALLOC_TX; for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; - struct bnxt_ring_struct *ring; - u32 map_idx; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - struct bnxt_cp_ring_info *cpr2 = txr->tx_cpr; - struct bnxt_napi *bnapi = txr->bnapi; - u32 type2 = HWRM_RING_ALLOC_CMPL; - - ring = &cpr2->cp_ring_struct; - ring->handle = BNXT_SET_NQ_HDL(cpr2); - map_idx = bnapi->index; - rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); + rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); if (rc) goto err_out; - bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, - ring->fw_ring_id); - bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); } - ring = &txr->tx_ring_struct; - map_idx = i; - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + rc = bnxt_hwrm_tx_ring_alloc(bp, txr, i); if (rc) goto err_out; - bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); } for (i = 0; i < bp->rx_nr_rings; i++) { @@ -7272,20 +7324,9 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) if (!agg_rings) bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { - struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; - struct bnxt_napi *bnapi = rxr->bnapi; - u32 type2 = HWRM_RING_ALLOC_CMPL; - struct bnxt_ring_struct *ring; - u32 map_idx = bnapi->index; - - ring = &cpr2->cp_ring_struct; - ring->handle = BNXT_SET_NQ_HDL(cpr2); - rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx); + rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); if (rc) goto err_out; - bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx, - ring->fw_ring_id); - bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons); } } @@ -7353,6 +7394,23 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, return 0; } +static void bnxt_hwrm_tx_ring_free(struct bnxt *bp, + struct bnxt_tx_ring_info *txr, + bool close_path) +{ + struct bnxt_ring_struct *ring = &txr->tx_ring_struct; + u32 cmpl_ring_id; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) : + INVALID_HW_RING_ID; + hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX, + cmpl_ring_id); + ring->fw_ring_id = INVALID_HW_RING_ID; +} + static void bnxt_hwrm_rx_ring_free(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, bool close_path) @@ -7397,6 +7455,33 @@ static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp, bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID; } +static void bnxt_hwrm_cp_ring_free(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_ring_struct *ring; + + ring = &cpr->cp_ring_struct; + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_L2_CMPL, + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + int i, size = ring->ring_mem.page_size; + + cpr->cp_raw_cons = 0; + cpr->toggle = 0; + + for (i = 0; i < bp->cp_nr_pages; i++) + if (cpr->cp_desc_ring[i]) + memset(cpr->cp_desc_ring[i], 0, size); +} + static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) { u32 type; @@ -7405,20 +7490,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) if (!bp->bnapi) return; - for (i = 0; i < bp->tx_nr_rings; i++) { - struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; - struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr); - - hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_TX, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - } - } + for (i = 0; i < bp->tx_nr_rings; i++) + bnxt_hwrm_tx_ring_free(bp, &bp->tx_ring[i], close_path); bnxt_cancel_dim(bp); for (i = 0; i < bp->rx_nr_rings; i++) { @@ -7442,17 +7515,9 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) struct bnxt_ring_struct *ring; int j; - for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) { - struct bnxt_cp_ring_info *cpr2 = &cpr->cp_ring_arr[j]; + for (j = 0; j < cpr->cp_ring_count && cpr->cp_ring_arr; j++) + bnxt_hwrm_cp_ring_free(bp, &cpr->cp_ring_arr[j]); - ring = &cpr2->cp_ring_struct; - if (ring->fw_ring_id == INVALID_HW_RING_ID) - continue; - hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_L2_CMPL, - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - } ring = &cpr->cp_ring_struct; if (ring->fw_ring_id != INVALID_HW_RING_ID) { hwrm_ring_free_send_msg(bp, ring, type, @@ -8365,6 +8430,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_2: case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: bp->port_partition_type = resp->port_partition_type; @@ -9529,6 +9595,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF; if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED)) bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH; + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_NPAR_1_2_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_NPAR_1_2; if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED)) bp->fw_cap |= BNXT_FW_CAP_DFLT_VLAN_TPID_PCP; if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_BS_V2_SUPPORTED) @@ -11237,6 +11305,155 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) return 0; } +static void bnxt_tx_queue_stop(struct bnxt *bp, int idx) +{ + struct bnxt_tx_ring_info *txr; + struct netdev_queue *txq; + struct bnxt_napi *bnapi; + int i; + + bnapi = bp->bnapi[idx]; + bnxt_for_each_napi_tx(i, bnapi, txr) { + WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING); + synchronize_net(); + + if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) { + txq = netdev_get_tx_queue(bp->dev, txr->txq_index); + if (txq) { + __netif_tx_lock_bh(txq); + netif_tx_stop_queue(txq); + __netif_tx_unlock_bh(txq); + } + } + + if (!bp->tph_mode) + continue; + + bnxt_hwrm_tx_ring_free(bp, txr, true); + bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr); + bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index); + bnxt_clear_one_cp_ring(bp, txr->tx_cpr); + } +} + +static int bnxt_tx_queue_start(struct bnxt *bp, int idx) +{ + struct bnxt_tx_ring_info *txr; + struct netdev_queue *txq; + struct bnxt_napi *bnapi; + int rc, i; + + bnapi = bp->bnapi[idx]; + /* All rings have been reserved and previously allocated. + * Reallocating with the same parameters should never fail. + */ + bnxt_for_each_napi_tx(i, bnapi, txr) { + if (!bp->tph_mode) + goto start_tx; + + rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr); + if (rc) + return rc; + + rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false); + if (rc) + return rc; + + txr->tx_prod = 0; + txr->tx_cons = 0; + txr->tx_hw_cons = 0; +start_tx: + WRITE_ONCE(txr->dev_state, 0); + synchronize_net(); + + if (bnapi->flags & BNXT_NAPI_FLAG_XDP) + continue; + + txq = netdev_get_tx_queue(bp->dev, txr->txq_index); + if (txq) + netif_tx_start_queue(txq); + } + + return 0; +} + +static void bnxt_irq_affinity_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct bnxt_irq *irq; + u16 tag; + int err; + + irq = container_of(notify, struct bnxt_irq, affinity_notify); + + if (!irq->bp->tph_mode) + return; + + cpumask_copy(irq->cpu_mask, mask); + + if (irq->ring_nr >= irq->bp->rx_nr_rings) + return; + + if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM, + cpumask_first(irq->cpu_mask), &tag)) + return; + + if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag)) + return; + + netdev_lock(irq->bp->dev); + if (netif_running(irq->bp->dev)) { + err = netdev_rx_queue_restart(irq->bp->dev, irq->ring_nr); + if (err) + netdev_err(irq->bp->dev, + "RX queue restart failed: err=%d\n", err); + } + netdev_unlock(irq->bp->dev); +} + +static void bnxt_irq_affinity_release(struct kref *ref) +{ + struct irq_affinity_notify *notify = + container_of(ref, struct irq_affinity_notify, kref); + struct bnxt_irq *irq; + + irq = container_of(notify, struct bnxt_irq, affinity_notify); + + if (!irq->bp->tph_mode) + return; + + if (pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, 0)) { + netdev_err(irq->bp->dev, + "Setting ST=0 for MSIX entry %d failed\n", + irq->msix_nr); + return; + } +} + +static void bnxt_release_irq_notifier(struct bnxt_irq *irq) +{ + irq_set_affinity_notifier(irq->vector, NULL); +} + +static void bnxt_register_irq_notifier(struct bnxt *bp, struct bnxt_irq *irq) +{ + struct irq_affinity_notify *notify; + + irq->bp = bp; + + /* Nothing to do if TPH is not enabled */ + if (!bp->tph_mode) + return; + + /* Register IRQ affinity notifier */ + notify = &irq->affinity_notify; + notify->irq = irq->vector; + notify->notify = bnxt_irq_affinity_notify; + notify->release = bnxt_irq_affinity_release; + + irq_set_affinity_notifier(irq->vector, notify); +} + static void bnxt_free_irq(struct bnxt *bp) { struct bnxt_irq *irq; @@ -11259,11 +11476,18 @@ static void bnxt_free_irq(struct bnxt *bp) free_cpumask_var(irq->cpu_mask); irq->have_cpumask = 0; } + + bnxt_release_irq_notifier(irq); + free_irq(irq->vector, bp->bnapi[i]); } irq->requested = 0; } + + /* Disable TPH support */ + pcie_disable_tph(bp->pdev); + bp->tph_mode = 0; } static int bnxt_request_irq(struct bnxt *bp) @@ -11283,6 +11507,12 @@ static int bnxt_request_irq(struct bnxt *bp) #ifdef CONFIG_RFS_ACCEL rmap = bp->dev->rx_cpu_rmap; #endif + + /* Enable TPH support as part of IRQ request */ + rc = pcie_enable_tph(bp->pdev, PCI_TPH_ST_IV_MODE); + if (!rc) + bp->tph_mode = PCI_TPH_ST_IV_MODE; + for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { int map_idx = bnxt_cp_num_to_irq_num(bp, i); struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; @@ -11301,13 +11531,16 @@ static int bnxt_request_irq(struct bnxt *bp) if (rc) break; - netif_napi_set_irq(&bp->bnapi[i]->napi, irq->vector); + netif_napi_set_irq_locked(&bp->bnapi[i]->napi, irq->vector); irq->requested = 1; if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) { int numa_node = dev_to_node(&bp->pdev->dev); + u16 tag; irq->have_cpumask = 1; + irq->msix_nr = map_idx; + irq->ring_nr = i; cpumask_set_cpu(cpumask_local_spread(i, numa_node), irq->cpu_mask); rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask); @@ -11317,6 +11550,16 @@ static int bnxt_request_irq(struct bnxt *bp) irq->vector); break; } + + bnxt_register_irq_notifier(bp, irq); + + /* Init ST table entry */ + if (pcie_tph_get_cpu_st(irq->bp->pdev, TPH_MEM_TYPE_VM, + cpumask_first(irq->cpu_mask), + &tag)) + continue; + + pcie_tph_set_st_entry(irq->bp->pdev, irq->msix_nr, tag); } } return rc; @@ -11337,9 +11580,9 @@ static void bnxt_del_napi(struct bnxt *bp) for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; - __netif_napi_del(&bnapi->napi); + __netif_napi_del_locked(&bnapi->napi); } - /* We called __netif_napi_del(), we need + /* We called __netif_napi_del_locked(), we need * to respect an RCU grace period before freeing napi structures. */ synchronize_net(); @@ -11358,12 +11601,12 @@ static void bnxt_init_napi(struct bnxt *bp) cp_nr_rings--; for (i = 0; i < cp_nr_rings; i++) { bnapi = bp->bnapi[i]; - netif_napi_add_config(bp->dev, &bnapi->napi, poll_fn, - bnapi->index); + netif_napi_add_config_locked(bp->dev, &bnapi->napi, poll_fn, + bnapi->index); } if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bnapi = bp->bnapi[cp_nr_rings]; - netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll_nitroa0); + netif_napi_add_locked(bp->dev, &bnapi->napi, bnxt_poll_nitroa0); } } @@ -11384,7 +11627,7 @@ static void bnxt_disable_napi(struct bnxt *bp) cpr->sw_stats->tx.tx_resets++; if (bnapi->in_reset) cpr->sw_stats->rx.rx_resets++; - napi_disable(&bnapi->napi); + napi_disable_locked(&bnapi->napi); } } @@ -11406,7 +11649,7 @@ static void bnxt_enable_napi(struct bnxt *bp) INIT_WORK(&cpr->dim.work, bnxt_dim_work); cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; } - napi_enable(&bnapi->napi); + napi_enable_locked(&bnapi->napi); } } @@ -12077,6 +12320,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) struct hwrm_func_drv_if_change_input *req; bool fw_reset = !bp->irq_tbl; bool resc_reinit = false; + bool caps_change = false; int rc, retry = 0; u32 flags = 0; @@ -12132,8 +12376,11 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) set_bit(BNXT_STATE_ABORT_ERR, &bp->state); return -ENODEV; } - if (resc_reinit || fw_reset) { - if (fw_reset) { + if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE) + caps_change = true; + + if (resc_reinit || fw_reset || caps_change) { + if (fw_reset || caps_change) { set_bit(BNXT_STATE_FW_RESET_DET, &bp->state); if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) bnxt_ulp_irq_stop(bp); @@ -12569,7 +12816,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) return rc; } -/* rtnl_lock held */ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -12580,14 +12826,14 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) rc = __bnxt_open_nic(bp, irq_re_init, link_re_init); if (rc) { netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc); - dev_close(bp->dev); + netif_close(bp->dev); } return rc; } -/* rtnl_lock held, open the NIC half way by allocating all resources, but - * NAPI, IRQ, and TX are not enabled. This is mainly used for offline - * self tests. +/* netdev instance lock held, open the NIC half way by allocating all + * resources, but NAPI, IRQ, and TX are not enabled. This is mainly used + * for offline self tests. */ int bnxt_half_open_nic(struct bnxt *bp) { @@ -12618,12 +12864,12 @@ int bnxt_half_open_nic(struct bnxt *bp) half_open_err: bnxt_free_skbs(bp); bnxt_free_mem(bp, true); - dev_close(bp->dev); + netif_close(bp->dev); return rc; } -/* rtnl_lock held, this call can only be made after a previous successful - * call to bnxt_half_open_nic(). +/* netdev instance lock held, this call can only be made after a previous + * successful call to bnxt_half_open_nic(). */ void bnxt_half_close_nic(struct bnxt *bp) { @@ -12732,10 +12978,11 @@ void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { /* If we get here, it means firmware reset is in progress * while we are trying to close. We can safely proceed with - * the close because we are holding rtnl_lock(). Some firmware - * messages may fail as we proceed to close. We set the - * ABORT_ERR flag here so that the FW reset thread will later - * abort when it gets the rtnl_lock() and sees the flag. + * the close because we are holding netdev instance lock. + * Some firmware messages may fail as we proceed to close. + * We set the ABORT_ERR flag here so that the FW reset thread + * will later abort when it gets the netdev instance lock + * and sees the flag. */ netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n"); set_bit(BNXT_STATE_ABORT_ERR, &bp->state); @@ -12826,7 +13073,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, return hwrm_req_send(bp, req); } -/* rtnl_lock held */ +/* netdev instance lock held */ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *mdio = if_mii(ifr); @@ -13745,30 +13992,31 @@ static void bnxt_timer(struct timer_list *t) mod_timer(&bp->timer, jiffies + bp->current_interval); } -static void bnxt_rtnl_lock_sp(struct bnxt *bp) +static void bnxt_lock_sp(struct bnxt *bp) { /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK * set. If the device is being closed, bnxt_close() may be holding - * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we - * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). + * netdev instance lock and waiting for BNXT_STATE_IN_SP_TASK to clear. + * So we must clear BNXT_STATE_IN_SP_TASK before holding netdev + * instance lock. */ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_lock(); + netdev_lock(bp->dev); } -static void bnxt_rtnl_unlock_sp(struct bnxt *bp) +static void bnxt_unlock_sp(struct bnxt *bp) { set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_unlock(); + netdev_unlock(bp->dev); } /* Only called from bnxt_sp_task() */ static void bnxt_reset(struct bnxt *bp, bool silent) { - bnxt_rtnl_lock_sp(bp); + bnxt_lock_sp(bp); if (test_bit(BNXT_STATE_OPEN, &bp->state)) bnxt_reset_task(bp, silent); - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); } /* Only called from bnxt_sp_task() */ @@ -13776,9 +14024,9 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) { int i; - bnxt_rtnl_lock_sp(bp); + bnxt_lock_sp(bp); if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); return; } /* Disable and flush TPA before resetting the RX ring */ @@ -13817,7 +14065,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp) } if (bp->flags & BNXT_FLAG_TPA) bnxt_set_tpa(bp, true); - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); } static void bnxt_fw_fatal_close(struct bnxt *bp) @@ -13873,7 +14121,7 @@ static bool is_bnxt_fw_ok(struct bnxt *bp) return false; } -/* rtnl_lock is acquired before calling this function */ +/* netdev instance lock is acquired before calling this function */ static void bnxt_force_fw_reset(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; @@ -13916,9 +14164,9 @@ void bnxt_fw_exception(struct bnxt *bp) netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n"); set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); bnxt_ulp_stop(bp); - bnxt_rtnl_lock_sp(bp); + bnxt_lock_sp(bp); bnxt_force_fw_reset(bp); - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); } /* Returns the number of registered VFs, or 1 if VF configuration is pending, or @@ -13948,7 +14196,7 @@ static int bnxt_get_registered_vfs(struct bnxt *bp) void bnxt_fw_reset(struct bnxt *bp) { bnxt_ulp_stop(bp); - bnxt_rtnl_lock_sp(bp); + bnxt_lock_sp(bp); if (test_bit(BNXT_STATE_OPEN, &bp->state) && !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; @@ -13971,7 +14219,7 @@ void bnxt_fw_reset(struct bnxt *bp) netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n", n); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - dev_close(bp->dev); + netif_close(bp->dev); goto fw_reset_exit; } else if (n > 0) { u16 vf_tmo_dsecs = n * 10; @@ -13994,7 +14242,7 @@ void bnxt_fw_reset(struct bnxt *bp) bnxt_queue_fw_reset_work(bp, tmo); } fw_reset_exit: - bnxt_rtnl_unlock_sp(bp); + bnxt_unlock_sp(bp); } static void bnxt_chk_missed_irq(struct bnxt *bp) @@ -14193,7 +14441,7 @@ static void bnxt_sp_task(struct work_struct *work) static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, int *max_cp); -/* Under rtnl_lock */ +/* Under netdev instance lock */ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int tx_xdp) { @@ -14586,7 +14834,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) bnxt_dl_health_fw_status_update(bp, false); bp->fw_reset_state = 0; - dev_close(bp->dev); + netif_close(bp->dev); } static void bnxt_fw_reset_task(struct work_struct *work) @@ -14621,10 +14869,10 @@ static void bnxt_fw_reset_task(struct work_struct *work) return; } bp->fw_reset_timestamp = jiffies; - rtnl_lock(); + netdev_lock(bp->dev); if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { bnxt_fw_reset_abort(bp, rc); - rtnl_unlock(); + netdev_unlock(bp->dev); goto ulp_start; } bnxt_fw_reset_close(bp); @@ -14635,7 +14883,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV; tmo = bp->fw_reset_min_dsecs * HZ / 10; } - rtnl_unlock(); + netdev_unlock(bp->dev); bnxt_queue_fw_reset_work(bp, tmo); return; } @@ -14709,7 +14957,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING; fallthrough; case BNXT_FW_RESET_STATE_OPENING: - while (!rtnl_trylock()) { + while (!netdev_trylock(bp->dev)) { bnxt_queue_fw_reset_work(bp, HZ / 10); return; } @@ -14717,7 +14965,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) if (rc) { netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); bnxt_fw_reset_abort(bp, rc); - rtnl_unlock(); + netdev_unlock(bp->dev); goto ulp_start; } @@ -14736,13 +14984,13 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_dl_health_fw_recovery_done(bp); bnxt_dl_health_fw_status_update(bp, true); } - rtnl_unlock(); + netdev_unlock(bp->dev); bnxt_ulp_start(bp, 0); bnxt_reenable_sriov(bp); - rtnl_lock(); + netdev_lock(bp->dev); bnxt_vf_reps_alloc(bp); bnxt_vf_reps_open(bp); - rtnl_unlock(); + netdev_unlock(bp->dev); break; } return; @@ -14755,9 +15003,9 @@ static void bnxt_fw_reset_task(struct work_struct *work) netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); } fw_reset_abort: - rtnl_lock(); + netdev_lock(bp->dev); bnxt_fw_reset_abort(bp, rc); - rtnl_unlock(); + netdev_unlock(bp->dev); ulp_start: bnxt_ulp_start(bp, rc); } @@ -14849,13 +15097,14 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) return rc; } -/* rtnl_lock held */ static int bnxt_change_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct bnxt *bp = netdev_priv(dev); int rc = 0; + netdev_assert_locked(dev); + if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; @@ -14876,11 +15125,12 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) return rc; } -/* rtnl_lock held */ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) { struct bnxt *bp = netdev_priv(dev); + netdev_assert_locked(dev); + if (netif_running(dev)) bnxt_close_nic(bp, true, false); @@ -15375,6 +15625,9 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i, struct bnxt_cp_ring_info *cpr; u64 *sw; + if (!bp->bnapi) + return; + cpr = &bp->bnapi[i]->cp_ring; sw = cpr->stats.sw_stats; @@ -15398,6 +15651,9 @@ static void bnxt_get_queue_stats_tx(struct net_device *dev, int i, struct bnxt_napi *bnapi; u64 *sw; + if (!bp->tx_ring) + return; + bnapi = bp->tx_ring[bp->tx_ring_map[i]].bnapi; sw = bnapi->cp_ring.stats.sw_stats; @@ -15439,6 +15695,9 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) struct bnxt_ring_struct *ring; int rc; + if (!bp->rx_ring) + return -ENETDOWN; + rxr = &bp->rx_ring[idx]; clone = qmem; memcpy(clone, rxr, sizeof(*rxr)); @@ -15521,6 +15780,7 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) struct bnxt_ring_struct *ring; bnxt_free_one_rx_ring_skbs(bp, rxr); + bnxt_free_one_tpa_info(bp, rxr); xdp_rxq_info_unreg(&rxr->xdp_rxq); @@ -15601,6 +15861,7 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) struct bnxt_rx_ring_info *rxr, *clone; struct bnxt_cp_ring_info *cpr; struct bnxt_vnic_info *vnic; + struct bnxt_napi *bnapi; int i, rc; rxr = &bp->rx_ring[idx]; @@ -15618,21 +15879,40 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) bnxt_copy_rx_ring(bp, rxr, clone); + bnapi = rxr->bnapi; + cpr = &bnapi->cp_ring; + + /* All rings have been reserved and previously allocated. + * Reallocating with the same parameters should never fail. + */ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); if (rc) - return rc; + goto err_reset; + + if (bp->tph_mode) { + rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); + if (rc) + goto err_reset; + } + rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); if (rc) - goto err_free_hwrm_rx_ring; + goto err_reset; bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); - cpr = &rxr->bnapi->cp_ring; - cpr->sw_stats->rx.rx_resets++; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) { + rc = bnxt_tx_queue_start(bp, idx); + if (rc) + goto err_reset; + } + + napi_enable(&bnapi->napi); + bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); - for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { + for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true); @@ -15648,8 +15928,12 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) return 0; -err_free_hwrm_rx_ring: - bnxt_hwrm_rx_ring_free(bp, rxr, false); +err_reset: + netdev_err(bp->dev, "Unexpected HWRM error during queue start rc: %d\n", + rc); + napi_enable(&bnapi->napi); + bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons); + bnxt_reset_task(bp, true); return rc; } @@ -15657,10 +15941,12 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) { struct bnxt *bp = netdev_priv(dev); struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; struct bnxt_vnic_info *vnic; + struct bnxt_napi *bnapi; int i; - for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) { + for (i = 0; i < bp->nr_vnics; i++) { vnic = &bp->vnic_info[i]; vnic->mru = 0; bnxt_hwrm_vnic_update(bp, vnic, @@ -15669,14 +15955,30 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) /* Make sure NAPI sees that the VNIC is disabled */ synchronize_net(); rxr = &bp->rx_ring[idx]; - cancel_work_sync(&rxr->bnapi->cp_ring.dim.work); + bnapi = rxr->bnapi; + cpr = &bnapi->cp_ring; + cancel_work_sync(&cpr->dim.work); bnxt_hwrm_rx_ring_free(bp, rxr, false); bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); - rxr->rx_next_cons = 0; page_pool_disable_direct_recycling(rxr->page_pool); if (bnxt_separate_head_pool()) page_pool_disable_direct_recycling(rxr->head_pool); + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + bnxt_tx_queue_stop(bp, idx); + + /* Disable NAPI now after freeing the rings because HWRM_RING_FREE + * completion is handled in NAPI to guarantee no more DMA on that ring + * after seeing the completion. + */ + napi_disable(&bnapi->napi); + + if (bp->tph_mode) { + bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr); + bnxt_clear_one_cp_ring(bp, rxr->rx_cpr); + } + bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); + memcpy(qmem, rxr, sizeof(*rxr)); bnxt_init_rx_ring_struct(bp, qmem); @@ -15995,7 +16297,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp) { int rc; - ASSERT_RTNL(); + netdev_ops_assert_locked(bp->dev); bnxt_hwrm_func_qcaps(bp); if (netif_running(bp->dev)) @@ -16008,7 +16310,7 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp) if (netif_running(bp->dev)) { if (rc) - dev_close(bp->dev); + netif_close(bp->dev); else rc = bnxt_open_nic(bp, true, false); } @@ -16345,6 +16647,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; if (BNXT_SUPPORTS_QUEUE_API(bp)) dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; + dev->request_ops_lock = true; rc = register_netdev(dev); if (rc) @@ -16395,13 +16698,13 @@ static void bnxt_shutdown(struct pci_dev *pdev) if (!dev) return; - rtnl_lock(); + netdev_lock(dev); bp = netdev_priv(dev); if (!bp) goto shutdown_exit; if (netif_running(dev)) - dev_close(dev); + netif_close(dev); bnxt_ptp_clear(bp); bnxt_clear_int_mode(bp); @@ -16413,7 +16716,7 @@ static void bnxt_shutdown(struct pci_dev *pdev) } shutdown_exit: - rtnl_unlock(); + netdev_unlock(dev); } #ifdef CONFIG_PM_SLEEP @@ -16425,7 +16728,7 @@ static int bnxt_suspend(struct device *device) bnxt_ulp_stop(bp); - rtnl_lock(); + netdev_lock(dev); if (netif_running(dev)) { netif_device_detach(dev); rc = bnxt_close(dev); @@ -16434,7 +16737,7 @@ static int bnxt_suspend(struct device *device) bnxt_ptp_clear(bp); pci_disable_device(bp->pdev); bnxt_free_ctx_mem(bp, false); - rtnl_unlock(); + netdev_unlock(dev); return rc; } @@ -16444,7 +16747,7 @@ static int bnxt_resume(struct device *device) struct bnxt *bp = netdev_priv(dev); int rc = 0; - rtnl_lock(); + netdev_lock(dev); rc = pci_enable_device(bp->pdev); if (rc) { netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n", @@ -16487,7 +16790,7 @@ static int bnxt_resume(struct device *device) } resume_exit: - rtnl_unlock(); + netdev_unlock(bp->dev); bnxt_ulp_start(bp, rc); if (!rc) bnxt_reenable_sriov(bp); @@ -16522,7 +16825,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, bnxt_ulp_stop(bp); - rtnl_lock(); + netdev_lock(netdev); netif_device_detach(netdev); if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { @@ -16531,7 +16834,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, } if (abort || state == pci_channel_io_perm_failure) { - rtnl_unlock(); + netdev_unlock(netdev); return PCI_ERS_RESULT_DISCONNECT; } @@ -16550,7 +16853,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, if (pci_is_enabled(pdev)) pci_disable_device(pdev); bnxt_free_ctx_mem(bp, false); - rtnl_unlock(); + netdev_unlock(netdev); /* Request a slot slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -16580,7 +16883,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state)) msleep(900); - rtnl_lock(); + netdev_lock(netdev); if (pci_enable_device(pdev)) { dev_err(&pdev->dev, @@ -16635,7 +16938,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) reset_exit: clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); bnxt_clear_reservations(bp, true); - rtnl_unlock(); + netdev_unlock(netdev); return result; } @@ -16654,7 +16957,7 @@ static void bnxt_io_resume(struct pci_dev *pdev) int err; netdev_info(bp->dev, "PCI Slot Resume\n"); - rtnl_lock(); + netdev_lock(netdev); err = bnxt_hwrm_func_qcaps(bp); if (!err) { @@ -16667,7 +16970,7 @@ static void bnxt_io_resume(struct pci_dev *pdev) if (!err) netif_device_attach(netdev); - rtnl_unlock(); + netdev_unlock(netdev); bnxt_ulp_start(bp, err); if (!err) bnxt_reenable_sriov(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 2373f423a523e..21726cf565866 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -82,6 +82,12 @@ struct tx_bd { #define TX_OPAQUE_PROD(bp, opq) ((TX_OPAQUE_IDX(opq) + TX_OPAQUE_BDS(opq)) &\ (bp)->tx_ring_mask) +#define TX_BD_CNT(n) (((n) << TX_BD_FLAGS_BD_CNT_SHIFT) & TX_BD_FLAGS_BD_CNT) + +#define TX_MAX_BD_CNT 32 + +#define TX_MAX_FRAGS (TX_MAX_BD_CNT - 2) + struct tx_bd_ext { __le32 tx_bd_hsize_lflags; #define TX_BD_FLAGS_TCP_UDP_CHKSUM (1 << 0) @@ -1234,6 +1240,11 @@ struct bnxt_irq { u8 have_cpumask:1; char name[IFNAMSIZ + BNXT_IRQ_NAME_EXTRA]; cpumask_var_t cpu_mask; + + struct bnxt *bp; + int msix_nr; + int ring_nr; + struct irq_affinity_notify affinity_notify; }; #define HWRM_RING_ALLOC_TX 0x1 @@ -2410,6 +2421,8 @@ struct bnxt { u8 max_q; u8 num_tc; + u8 tph_mode; + unsigned int current_interval; #define BNXT_TIMER_INTERVAL HZ @@ -2492,6 +2505,7 @@ struct bnxt { #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3 BIT_ULL(39) #define BNXT_FW_CAP_VNIC_RE_FLUSH BIT_ULL(40) #define BNXT_FW_CAP_SW_MAX_RESOURCE_LIMITS BIT_ULL(41) + #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(42) u32 fw_dbg_cap; @@ -2689,6 +2703,7 @@ struct bnxt { #define BNXT_DUMP_LIVE 0 #define BNXT_DUMP_CRASH 1 #define BNXT_DUMP_DRIVER 2 +#define BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE 3 struct bpf_prog *xdp_prog; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c index 7236d8e548ab5..5576e7cf84631 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -159,8 +159,8 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, return rc; } -static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, - u16 segment_id) +static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 dump_type, + u16 component_id, u16 segment_id) { struct hwrm_dbg_coredump_initiate_input *req; int rc; @@ -172,6 +172,8 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, hwrm_req_timeout(bp, req, bp->hwrm_cmd_max_timeout); req->component_id = cpu_to_le16(component_id); req->segment_id = cpu_to_le16(segment_id); + if (dump_type == BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) + req->seg_flags = DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE; return hwrm_req_send(bp, req); } @@ -450,7 +452,8 @@ static int __bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, start = jiffies; - rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id); + rc = bnxt_hwrm_dbg_coredump_initiate(bp, dump_type, comp_id, + seg_id); if (rc) { netdev_err(bp->dev, "Failed to initiate coredump for seg = %d\n", diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index ef8288fd68f4c..777880594a04c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_hwrm.h" @@ -439,14 +440,17 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { bnxt_ulp_stop(bp); rtnl_lock(); + netdev_lock(bp->dev); if (bnxt_sriov_cfg(bp)) { NL_SET_ERR_MSG_MOD(extack, "reload is unsupported while VFs are allocated or being configured"); + netdev_unlock(bp->dev); rtnl_unlock(); bnxt_ulp_start(bp, 0); return -EOPNOTSUPP; } if (bp->dev->reg_state == NETREG_UNREGISTERED) { + netdev_unlock(bp->dev); rtnl_unlock(); bnxt_ulp_start(bp, 0); return -ENODEV; @@ -458,7 +462,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, if (rc) { NL_SET_ERR_MSG_MOD(extack, "Failed to deregister"); if (netif_running(bp->dev)) - dev_close(bp->dev); + netif_close(bp->dev); + netdev_unlock(bp->dev); rtnl_unlock(); break; } @@ -479,7 +484,9 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, return -EPERM; } rtnl_lock(); + netdev_lock(bp->dev); if (bp->dev->reg_state == NETREG_UNREGISTERED) { + netdev_unlock(bp->dev); rtnl_unlock(); return -ENODEV; } @@ -493,6 +500,7 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, if (rc) { NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware"); clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); + netdev_unlock(bp->dev); rtnl_unlock(); } break; @@ -511,6 +519,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti struct bnxt *bp = bnxt_get_bp_from_dl(dl); int rc = 0; + netdev_assert_locked(bp->dev); + *actions_performed = 0; switch (action) { case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { @@ -535,6 +545,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti if (!netif_running(bp->dev)) NL_SET_ERR_MSG_MOD(extack, "Device is closed, not waiting for reset notice that will never come"); + netdev_unlock(bp->dev); rtnl_unlock(); while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) { if (time_after(jiffies, timeout)) { @@ -550,6 +561,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti msleep(50); } rtnl_lock(); + netdev_lock(bp->dev); if (!rc) *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); @@ -568,8 +580,9 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti } *actions_performed |= BIT(action); } else if (netif_running(bp->dev)) { - dev_close(bp->dev); + netif_close(bp->dev); } + netdev_unlock(bp->dev); rtnl_unlock(); if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT) bnxt_ulp_start(bp, rc); @@ -666,6 +679,8 @@ static const struct bnxt_dl_nvm_param nvm_params[] = { NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4}, {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4}, + {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA, + BNXT_NVM_FUNC_CFG, 1, 1}, {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, BNXT_NVM_SHARED_CFG, 1, 1}, }; @@ -1010,37 +1025,19 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, } -static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, - union devlink_param_value *val) +static int __bnxt_hwrm_nvm_req(struct bnxt *bp, + const struct bnxt_dl_nvm_param *nvm, void *msg, + union devlink_param_value *val) { struct hwrm_nvm_get_variable_input *req = msg; - struct bnxt_dl_nvm_param nvm_param; struct hwrm_err_output *resp; union bnxt_nvm_data *data; dma_addr_t data_dma_addr; - int idx = 0, rc, i; - - /* Get/Set NVM CFG parameter is supported only on PFs */ - if (BNXT_VF(bp)) { - hwrm_req_drop(bp, req); - return -EPERM; - } - - for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { - if (nvm_params[i].id == param_id) { - nvm_param = nvm_params[i]; - break; - } - } + int idx = 0, rc; - if (i == ARRAY_SIZE(nvm_params)) { - hwrm_req_drop(bp, req); - return -EOPNOTSUPP; - } - - if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) + if (nvm->dir_type == BNXT_NVM_PORT_CFG) idx = bp->pf.port_id; - else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) + else if (nvm->dir_type == BNXT_NVM_FUNC_CFG) idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); @@ -1051,23 +1048,23 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } req->dest_data_addr = cpu_to_le64(data_dma_addr); - req->data_len = cpu_to_le16(nvm_param.nvm_num_bits); - req->option_num = cpu_to_le16(nvm_param.offset); + req->data_len = cpu_to_le16(nvm->nvm_num_bits); + req->option_num = cpu_to_le16(nvm->offset); req->index_0 = cpu_to_le16(idx); if (idx) req->dimensions = cpu_to_le16(1); resp = hwrm_req_hold(bp, req); if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { - bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits, - nvm_param.dl_num_bytes); + bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits, + nvm->dl_num_bytes); rc = hwrm_req_send(bp, msg); } else { rc = hwrm_req_send_silent(bp, msg); if (!rc) { bnxt_copy_from_nvm_data(val, data, - nvm_param.nvm_num_bits, - nvm_param.dl_num_bytes); + nvm->nvm_num_bits, + nvm->dl_num_bytes); } else { if (resp->cmd_err == NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) @@ -1080,6 +1077,27 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, return rc; } +static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, + union devlink_param_value *val) +{ + struct hwrm_nvm_get_variable_input *req = msg; + const struct bnxt_dl_nvm_param *nvm_param; + int i; + + /* Get/Set NVM CFG parameter is supported only on PFs */ + if (BNXT_VF(bp)) { + hwrm_req_drop(bp, req); + return -EPERM; + } + + for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { + nvm_param = &nvm_params[i]; + if (nvm_param->id == param_id) + return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val); + } + return -EOPNOTSUPP; +} + static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { @@ -1116,6 +1134,32 @@ static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); } +static int bnxt_dl_roce_validate(struct devlink *dl, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE, + BNXT_NVM_SHARED_CFG, 1, 1}; + struct bnxt *bp = bnxt_get_bp_from_dl(dl); + struct hwrm_nvm_get_variable_input *req; + union devlink_param_value roce_cap; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) { + NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable"); + return -EINVAL; + } + if (!roce_cap.vbool) { + NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA"); + return -EINVAL; + } + return 0; +} + static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, union devlink_param_value val, struct netlink_ext_ack *extack) @@ -1180,6 +1224,10 @@ static const struct devlink_param bnxt_dl_params[] = { BIT(DEVLINK_PARAM_CMODE_PERMANENT), bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, bnxt_dl_msix_validate), + DEVLINK_PARAM_GENERIC(ENABLE_ROCE, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, + bnxt_dl_roce_validate), DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL, BIT(DEVLINK_PARAM_CMODE_PERMANENT), diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h index b8105065367b6..7f45dcd7b2879 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -41,8 +41,10 @@ static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value) #define NVM_OFF_MSIX_VEC_PER_PF_MAX 108 #define NVM_OFF_MSIX_VEC_PER_PF_MIN 114 #define NVM_OFF_IGNORE_ARI 164 +#define NVM_OFF_RDMA_CAPABLE 161 #define NVM_OFF_DIS_GRE_VER_CHECK 171 #define NVM_OFF_ENABLE_SRIOV 401 +#define NVM_OFF_SUPPORT_RDMA 506 #define NVM_OFF_NVM_CFG_VER 602 #define BNXT_NVM_CFG_VER_BITS 8 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 9c58208395144..48dd5922e4dd8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -4541,16 +4541,16 @@ static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extac return -EINVAL; } -static int bnxt_get_module_eeprom_by_page(struct net_device *dev, - const struct ethtool_module_eeprom *page_data, - struct netlink_ext_ack *extack) +static int +bnxt_mod_eeprom_by_page_precheck(struct bnxt *bp, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) { - struct bnxt *bp = netdev_priv(dev); int rc; if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) { NL_SET_ERR_MSG_MOD(extack, - "Module read not permitted on untrusted VF"); + "Module read/write not permitted on untrusted VF"); return -EPERM; } @@ -4567,6 +4567,19 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev, NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection"); return -EINVAL; } + return 0; +} + +static int bnxt_get_module_eeprom_by_page(struct net_device *dev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); + if (rc) + return rc; rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1, page_data->page, page_data->bank, @@ -4580,6 +4593,62 @@ static int bnxt_get_module_eeprom_by_page(struct net_device *dev, return page_data->length; } +static int bnxt_write_sfp_module_eeprom_info(struct bnxt *bp, + const struct ethtool_module_eeprom *page) +{ + struct hwrm_port_phy_i2c_write_input *req; + int bytes_written = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_WRITE); + if (rc) + return rc; + + hwrm_req_hold(bp, req); + req->i2c_slave_addr = page->i2c_address << 1; + req->page_number = cpu_to_le16(page->page); + req->bank_number = page->bank; + req->port_id = cpu_to_le16(bp->pf.port_id); + req->enables = cpu_to_le32(PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET | + PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER); + + while (bytes_written < page->length) { + u16 xfer_size; + + xfer_size = min_t(u16, page->length - bytes_written, + BNXT_MAX_PHY_I2C_RESP_SIZE); + req->page_offset = cpu_to_le16(page->offset + bytes_written); + req->data_length = xfer_size; + memcpy(req->data, page->data + bytes_written, xfer_size); + rc = hwrm_req_send(bp, req); + if (rc) + break; + bytes_written += xfer_size; + } + + hwrm_req_drop(bp, req); + return rc; +} + +static int bnxt_set_module_eeprom_by_page(struct net_device *dev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = bnxt_mod_eeprom_by_page_precheck(bp, page_data, extack); + if (rc) + return rc; + + rc = bnxt_write_sfp_module_eeprom_info(bp, page_data); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom write failed"); + return rc; + } + return page_data->length; +} + static int bnxt_nway_reset(struct net_device *dev) { int rc = 0; @@ -5077,8 +5146,9 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump) { struct bnxt *bp = netdev_priv(dev); - if (dump->flag > BNXT_DUMP_DRIVER) { - netdev_info(dev, "Supports only Live(0), Crash(1), Driver(2) dumps.\n"); + if (dump->flag > BNXT_DUMP_LIVE_WITH_CTX_L1_CACHE) { + netdev_info(dev, + "Supports only Live(0), Crash(1), Driver(2), Live with cached context(3) dumps.\n"); return -EINVAL; } @@ -5441,6 +5511,7 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_module_info = bnxt_get_module_info, .get_module_eeprom = bnxt_get_module_eeprom, .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, + .set_module_eeprom_by_page = bnxt_set_module_eeprom_by_page, .nway_reset = bnxt_nway_reset, .set_phys_id = bnxt_set_phys_id, .self_test = bnxt_self_test, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 5f8de16343788..549231703bce7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -2,7 +2,7 @@ * * Copyright (c) 2014-2016 Broadcom Corporation * Copyright (c) 2014-2018 Broadcom Limited - * Copyright (c) 2018-2024 Broadcom Inc. + * Copyright (c) 2018-2025 Broadcom Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -438,6 +438,7 @@ struct cmd_nums { #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL #define HWRM_STAT_DB_ERROR_QSTATS 0x21aUL #define HWRM_MFG_TESTS 0x21bUL + #define HWRM_MFG_WRITE_CERT_NVM 0x21cUL #define HWRM_PORT_POE_CFG 0x230UL #define HWRM_PORT_POE_QCFG 0x231UL #define HWRM_UDCC_QCAPS 0x258UL @@ -514,6 +515,8 @@ struct cmd_nums { #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL #define HWRM_TFC_GLOBAL_ID_FREE 0x39cUL + #define HWRM_TFC_TCAM_PRI_UPDATE 0x39dUL + #define HWRM_TFC_HOT_UPGRADE_PROCESS 0x3a0UL #define HWRM_SV 0x400UL #define HWRM_DBG_SERDES_TEST 0xff0eUL #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL @@ -629,8 +632,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 3 -#define HWRM_VERSION_RSVD 85 -#define HWRM_VERSION_STR "1.10.3.85" +#define HWRM_VERSION_RSVD 97 +#define HWRM_VERSION_STR "1.10.3.97" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -1905,11 +1908,15 @@ struct hwrm_func_qcaps_output { __le32 roce_vf_max_srq; __le32 roce_vf_max_gid; __le32 flags_ext3; - #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL - #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL - #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_REQUIRE_L2_FILTER 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_MAX_ROCE_VFS_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_RX_RATE_PROFILE_SEL_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_BIDI_OPT_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED 0x20UL __le16 max_roce_vfs; - u8 unused_3[5]; + __le16 max_crypto_rx_flow_filters; + u8 unused_3[3]; u8 valid; }; @@ -1924,7 +1931,7 @@ struct hwrm_func_qcfg_input { u8 unused_0[6]; }; -/* hwrm_func_qcfg_output (size:1280b/160B) */ +/* hwrm_func_qcfg_output (size:1344b/168B) */ struct hwrm_func_qcfg_output { __le16 error_code; __le16 req_type; @@ -2087,14 +2094,18 @@ struct hwrm_func_qcfg_output { __le16 host_mtu; __le16 flags2; #define FUNC_QCFG_RESP_FLAGS2_SRIOV_DSCP_INSERT_ENABLED 0x1UL - u8 unused_4[2]; + __le16 stag_vid; u8 port_kdnet_mode; #define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL #define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL #define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED u8 kdnet_pcie_function; __le16 port_kdnet_fid; - u8 unused_5[2]; + u8 unused_5; + u8 roce_bidi_opt_mode; + #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DISABLED 0x1UL + #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_DEDICATED 0x2UL + #define FUNC_QCFG_RESP_ROCE_BIDI_OPT_MODE_SHARED 0x4UL __le32 num_ktls_tx_key_ctxs; __le32 num_ktls_rx_key_ctxs; u8 lag_id; @@ -2112,7 +2123,8 @@ struct hwrm_func_qcfg_output { __le16 xid_partition_cfg; #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL - u8 unused_7; + __le16 mirror_vnic_id; + u8 unused_7[7]; u8 valid; }; @@ -3965,7 +3977,7 @@ struct ts_split_entries { __le32 region_num_entries; u8 tsid; u8 lkup_static_bkt_cnt_exp[2]; - u8 rsvd; + u8 locked; __le32 rsvd2[2]; }; @@ -5483,6 +5495,37 @@ struct hwrm_port_phy_qcaps_output { u8 valid; }; +/* hwrm_port_phy_i2c_write_input (size:832b/104B) */ +struct hwrm_port_phy_i2c_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_PAGE_OFFSET 0x1UL + #define PORT_PHY_I2C_WRITE_REQ_ENABLES_BANK_NUMBER 0x2UL + __le16 port_id; + u8 i2c_slave_addr; + u8 bank_number; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; + __le32 data[16]; +}; + +/* hwrm_port_phy_i2c_write_output (size:128b/16B) */ +struct hwrm_port_phy_i2c_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_port_phy_i2c_read_input (size:320b/40B) */ struct hwrm_port_phy_i2c_read_input { __le16 req_type; @@ -6610,8 +6653,9 @@ struct hwrm_vnic_alloc_input { __le32 flags; #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL + #define VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID 0x4UL __le16 virtio_net_fid; - u8 unused_0[2]; + __le16 vnic_id; }; /* hwrm_vnic_alloc_output (size:128b/16B) */ @@ -6710,6 +6754,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL #define VNIC_CFG_REQ_ENABLES_L2_CQE_MODE 0x200UL + #define VNIC_CFG_REQ_ENABLES_RAW_QP_ID 0x400UL __le16 vnic_id; __le16 dflt_ring_grp; __le16 rss_rule; @@ -6729,7 +6774,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_L2_CQE_MODE_COMPRESSED 0x1UL #define VNIC_CFG_REQ_L2_CQE_MODE_MIXED 0x2UL #define VNIC_CFG_REQ_L2_CQE_MODE_LAST VNIC_CFG_REQ_L2_CQE_MODE_MIXED - u8 unused0[4]; + __le32 raw_qp_id; }; /* hwrm_vnic_cfg_output (size:128b/16B) */ @@ -7082,6 +7127,15 @@ struct hwrm_vnic_plcmodes_cfg_output { u8 valid; }; +/* hwrm_vnic_plcmodes_cfg_cmd_err (size:64b/8B) */ +struct hwrm_vnic_plcmodes_cfg_cmd_err { + u8 code; + #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD 0x1UL + #define VNIC_PLCMODES_CFG_CMD_ERR_CODE_LAST VNIC_PLCMODES_CFG_CMD_ERR_CODE_INVALID_HDS_THRESHOLD + u8 unused_0[7]; +}; + /* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { __le16 req_type; @@ -7131,15 +7185,16 @@ struct hwrm_ring_alloc_input { __le16 target_id; __le64 resp_addr; __le32 enables; - #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL - #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL - #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL - #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL - #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL - #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL - #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL - #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL - #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL + #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL + #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL + #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL + #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL + #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL + #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL + #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL + #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL + #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL + #define RING_ALLOC_REQ_ENABLES_RX_RATE_PROFILE_VALID 0x1000UL u8 ring_type; #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL @@ -7226,7 +7281,11 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE - u8 unused_4[2]; + u8 rx_rate_profile_sel; + #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_DEFAULT 0x0UL + #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE 0x1UL + #define RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_LAST RING_ALLOC_REQ_RX_RATE_PROFILE_SEL_POLL_MODE + u8 unused_4; __le64 cq_handle; }; @@ -9122,6 +9181,39 @@ struct pcie_ctx_hw_stats { __le64 pcie_recovery_histogram; }; +/* pcie_ctx_hw_stats_v2 (size:4096b/512B) */ +struct pcie_ctx_hw_stats_v2 { + __le64 pcie_pl_signal_integrity; + __le64 pcie_dl_signal_integrity; + __le64 pcie_tl_signal_integrity; + __le64 pcie_link_integrity; + __le64 pcie_tx_traffic_rate; + __le64 pcie_rx_traffic_rate; + __le64 pcie_tx_dllp_statistics; + __le64 pcie_rx_dllp_statistics; + __le64 pcie_equalization_time; + __le32 pcie_ltssm_histogram[4]; + __le64 pcie_recovery_histogram; + __le32 pcie_tl_credit_nph_histogram[8]; + __le32 pcie_tl_credit_ph_histogram[8]; + __le32 pcie_tl_credit_pd_histogram[8]; + __le32 pcie_cmpl_latest_times[4]; + __le32 pcie_cmpl_longest_time; + __le32 pcie_cmpl_shortest_time; + __le32 unused_0[2]; + __le32 pcie_cmpl_latest_headers[4][4]; + __le32 pcie_cmpl_longest_headers[4][4]; + __le32 pcie_cmpl_shortest_headers[4][4]; + __le32 pcie_wr_latency_histogram[12]; + __le32 pcie_wr_latency_all_normal_count; + __le32 unused_1; + __le64 pcie_posted_packet_count; + __le64 pcie_non_posted_packet_count; + __le64 pcie_other_packet_count; + __le64 pcie_blocked_packet_count; + __le64 pcie_cmpl_packet_count; +}; + /* hwrm_stat_generic_qstats_input (size:256b/32B) */ struct hwrm_stat_generic_qstats_input { __le16 req_type; @@ -9317,6 +9409,9 @@ struct hwrm_struct_hdr { #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_UDCC_RTT_BUCKET_BOUND __le16 len; u8 version; + #define STRUCT_HDR_VERSION_0 0x0UL + #define STRUCT_HDR_VERSION_1 0x1UL + #define STRUCT_HDR_VERSION_LAST STRUCT_HDR_VERSION_1 u8 count; __le16 subtype; __le16 next_offset; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 12b6ed51fd884..5ddddd89052ff 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -946,7 +946,9 @@ void bnxt_sriov_disable(struct bnxt *bp) /* Reclaim all resources for the PF. */ rtnl_lock(); + netdev_lock(bp->dev); bnxt_restore_pf_fw_resources(bp); + netdev_unlock(bp->dev); rtnl_unlock(); } @@ -956,17 +958,21 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) struct bnxt *bp = netdev_priv(dev); rtnl_lock(); + netdev_lock(dev); if (!netif_running(dev)) { netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); + netdev_unlock(dev); rtnl_unlock(); return 0; } if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n"); + netdev_unlock(dev); rtnl_unlock(); return 0; } bp->sriov_cfg = true; + netdev_unlock(dev); rtnl_unlock(); if (pci_vfs_assigned(bp->pdev)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index e4a7f37036edb..a8e930d5dbb09 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -112,7 +112,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp *ulp; int rc = 0; - rtnl_lock(); + netdev_lock(dev); mutex_lock(&edev->en_dev_lock); if (!bp->irq_tbl) { rc = -ENODEV; @@ -138,7 +138,7 @@ int bnxt_register_dev(struct bnxt_en_dev *edev, edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; exit: mutex_unlock(&edev->en_dev_lock); - rtnl_unlock(); + netdev_unlock(dev); return rc; } EXPORT_SYMBOL(bnxt_register_dev); @@ -151,7 +151,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) int i = 0; ulp = edev->ulp_tbl; - rtnl_lock(); + netdev_lock(dev); mutex_lock(&edev->en_dev_lock); if (ulp->msix_requested) edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; @@ -169,7 +169,7 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev) i++; } mutex_unlock(&edev->en_dev_lock); - rtnl_unlock(); + netdev_unlock(dev); return; } EXPORT_SYMBOL(bnxt_unregister_dev); @@ -309,12 +309,14 @@ void bnxt_ulp_irq_stop(struct bnxt *bp) if (!ulp->msix_requested) return; - ops = rtnl_dereference(ulp->ulp_ops); + netdev_lock(bp->dev); + ops = rcu_dereference(ulp->ulp_ops); if (!ops || !ops->ulp_irq_stop) return; if (test_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) reset = true; ops->ulp_irq_stop(ulp->handle, reset); + netdev_unlock(bp->dev); } } @@ -333,7 +335,8 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err) if (!ulp->msix_requested) return; - ops = rtnl_dereference(ulp->ulp_ops); + netdev_lock(bp->dev); + ops = rcu_dereference(ulp->ulp_ops); if (!ops || !ops->ulp_irq_restart) return; @@ -345,6 +348,7 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err) bnxt_fill_msix_vecs(bp, ent); } ops->ulp_irq_restart(ulp->handle, ent); + netdev_unlock(bp->dev); kfree(ent); } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 1467b94a6427f..619f0844e7785 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -257,8 +257,7 @@ bool bnxt_dev_is_vf_rep(struct net_device *dev) /* Called when the parent PF interface is closed: * As the mode transition from SWITCHDEV to LEGACY - * happens under the rtnl_lock() this routine is safe - * under the rtnl_lock() + * happens under the netdev instance lock this routine is safe */ void bnxt_vf_reps_close(struct bnxt *bp) { @@ -278,8 +277,7 @@ void bnxt_vf_reps_close(struct bnxt *bp) /* Called when the parent PF interface is opened (re-opened): * As the mode transition from SWITCHDEV to LEGACY - * happen under the rtnl_lock() this routine is safe - * under the rtnl_lock() + * happen under the netdev instance lock this routine is safe */ void bnxt_vf_reps_open(struct bnxt *bp) { @@ -348,7 +346,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp) /* Ensure that parent PF's and VF-reps' RX/TX has been quiesced * before proceeding with VF-rep cleanup. */ - rtnl_lock(); + netdev_lock(bp->dev); if (netif_running(bp->dev)) { bnxt_close_nic(bp, false, false); closed = true; @@ -365,10 +363,10 @@ void bnxt_vf_reps_destroy(struct bnxt *bp) bnxt_open_nic(bp, false, false); bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; } - rtnl_unlock(); + netdev_unlock(bp->dev); - /* Need to call vf_reps_destroy() outside of rntl_lock - * as unregister_netdev takes rtnl_lock + /* Need to call vf_reps_destroy() outside of netdev instance lock + * as unregister_netdev takes it */ __bnxt_vf_reps_destroy(bp); } @@ -376,7 +374,7 @@ void bnxt_vf_reps_destroy(struct bnxt *bp) /* Free the VF-Reps in firmware, during firmware hot-reset processing. * Note that the VF-Rep netdevs are still active (not unregistered) during * this process. As the mode transition from SWITCHDEV to LEGACY happens - * under the rtnl_lock() this routine is safe under the rtnl_lock(). + * under the netdev instance lock this routine is safe. */ void bnxt_vf_reps_free(struct bnxt *bp) { @@ -413,7 +411,7 @@ static int bnxt_alloc_vf_rep(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, /* Allocate the VF-Reps in firmware, during firmware hot-reset processing. * Note that the VF-Rep netdevs are still active (not unregistered) during * this process. As the mode transition from SWITCHDEV to LEGACY happens - * under the rtnl_lock() this routine is safe under the rtnl_lock(). + * under the netdev instance lock this routine is safe. */ int bnxt_vf_reps_alloc(struct bnxt *bp) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index e6c64e4bd66c3..e675611777b52 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "bnxt_hsi.h" #include "bnxt.h" @@ -48,8 +49,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, tx_buf->page = virt_to_head_page(xdp->data); txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; - flags = (len << TX_BD_LEN_SHIFT) | - ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) | + flags = (len << TX_BD_LEN_SHIFT) | TX_BD_CNT(num_frags + 1) | bnxt_lhint_arr[len >> 9]; txbd->tx_bd_len_flags_type = cpu_to_le32(flags); txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags); @@ -382,13 +382,14 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames, return nxmit; } -/* Under rtnl_lock */ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) { struct net_device *dev = bp->dev; int tx_xdp = 0, tx_cp, rc, tc; struct bpf_prog *old; + netdev_assert_locked(dev); + if (prog && !prog->aux->xdp_has_frags && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { netdev_warn(dev, "MTU %d larger than %d without XDP frag support.\n", @@ -460,23 +461,16 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) struct sk_buff * bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, - struct page_pool *pool, struct xdp_buff *xdp, - struct rx_cmp_ext *rxcmp1) + struct page_pool *pool, struct xdp_buff *xdp) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); if (!skb) return NULL; - skb_checksum_none_assert(skb); - if (RX_CMP_L4_CS_OK(rxcmp1)) { - if (bp->dev->features & NETIF_F_RXCSUM) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = RX_CMP_ENCAP(rxcmp1); - } - } + xdp_update_skb_shared_info(skb, num_frags, sinfo->xdp_frags_size, - BNXT_RX_PAGE_SIZE * sinfo->nr_frags, + BNXT_RX_PAGE_SIZE * num_frags, xdp_buff_is_frag_pfmemalloc(xdp)); return skb; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 0122782400b8a..220285e190fcd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp); struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, struct page_pool *pool, - struct xdp_buff *xdp, - struct rx_cmp_ext *rxcmp1); + struct xdp_buff *xdp); #endif diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 3e93f957430b2..73d78dcb774d9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2,7 +2,7 @@ /* * Broadcom GENET (Gigabit Ethernet) controller driver * - * Copyright (c) 2014-2024 Broadcom + * Copyright (c) 2014-2025 Broadcom */ #define pr_fmt(fmt) "bcmgenet: " fmt @@ -41,15 +41,13 @@ #include "bcmgenet.h" -/* Maximum number of hardware queues, downsized if needed */ -#define GENET_MAX_MQ_CNT 4 - /* Default highest priority queue for multi queue support */ -#define GENET_Q0_PRIORITY 0 +#define GENET_Q1_PRIORITY 0 +#define GENET_Q0_PRIORITY 1 -#define GENET_Q16_RX_BD_CNT \ +#define GENET_Q0_RX_BD_CNT \ (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q) -#define GENET_Q16_TX_BD_CNT \ +#define GENET_Q0_TX_BD_CNT \ (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q) #define RX_BUF_LENGTH 2048 @@ -104,7 +102,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, * the platform is explicitly configured for 64-bits/LPAE. */ #ifdef CONFIG_PHYS_ADDR_T_64BIT - if (priv->hw_params->flags & GENET_HAS_40BITS) + if (bcmgenet_has_40bits(priv)) bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI); #endif } @@ -446,33 +444,48 @@ static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index) u32 offset; u32 reg; - offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); - reg = bcmgenet_hfb_reg_readl(priv, offset); - reg |= (1 << (f_index % 32)); - bcmgenet_hfb_reg_writel(priv, reg, offset); - reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); - reg |= RBUF_HFB_EN; - bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg |= (1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT)) | + RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } else { + offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32); + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg |= (1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg |= RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } } static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index) { u32 offset, reg, reg1; - offset = HFB_FLT_ENABLE_V3PLUS; - reg = bcmgenet_hfb_reg_readl(priv, offset); - reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); - if (f_index < 32) { - reg1 &= ~(1 << (f_index % 32)); - bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32)); - } else { - reg &= ~(1 << (f_index % 32)); - bcmgenet_hfb_reg_writel(priv, reg, offset); - } - if (!reg && !reg1) { + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) { reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); - reg &= ~RBUF_HFB_EN; + reg &= ~(1 << ((f_index % 32) + RBUF_HFB_FILTER_EN_SHIFT)); + if (!(reg & RBUF_HFB_FILTER_EN_MASK)) + reg &= ~RBUF_HFB_EN; bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } else { + offset = HFB_FLT_ENABLE_V3PLUS; + reg = bcmgenet_hfb_reg_readl(priv, offset); + reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32)); + if (f_index < 32) { + reg1 &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32)); + } else { + reg &= ~(1 << (f_index % 32)); + bcmgenet_hfb_reg_writel(priv, reg, offset); + } + if (!reg && !reg1) { + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + reg &= ~RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); + } } } @@ -482,6 +495,9 @@ static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv, u32 offset; u32 reg; + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + return; + offset = f_index / 8; reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset); reg &= ~(0xF << (4 * (f_index % 8))); @@ -495,9 +511,13 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv, u32 offset; u32 reg; - offset = HFB_FLT_LEN_V3PLUS + - ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) * - sizeof(u32); + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) + offset = HFB_FLT_LEN_V2; + else + offset = HFB_FLT_LEN_V3PLUS; + + offset += sizeof(u32) * + ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4); reg = bcmgenet_hfb_reg_readl(priv, offset); reg &= ~(0xFF << (8 * (f_index % 4))); reg |= ((f_length & 0xFF) << (8 * (f_index % 4))); @@ -579,13 +599,13 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, struct bcmgenet_rxnfc_rule *rule) { struct ethtool_rx_flow_spec *fs = &rule->fs; - u32 offset = 0, f_length = 0, f; + u32 offset = 0, f_length = 0, f, q; u8 val_8, mask_8; __be16 val_16; u16 mask_16; size_t size; - f = fs->location; + f = fs->location + 1; if (fs->flow_type & FLOW_MAC_EXT) { bcmgenet_hfb_insert_data(priv, f, 0, &fs->h_ext.h_dest, &fs->m_ext.h_dest, @@ -667,19 +687,16 @@ static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv, } bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length); - if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) { - /* Ring 0 flows can be handled by the default Descriptor Ring - * We'll map them to ring 0, but don't enable the filter - */ - bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0); - rule->state = BCMGENET_RXNFC_STATE_DISABLED; - } else { + if (fs->ring_cookie == RX_CLS_FLOW_WAKE) + q = 0; + else if (fs->ring_cookie == RX_CLS_FLOW_DISC) + q = priv->hw_params->rx_queues + 1; + else /* Other Rx rings are direct mapped here */ - bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, - fs->ring_cookie); - bcmgenet_hfb_enable_filter(priv, f); - rule->state = BCMGENET_RXNFC_STATE_ENABLED; - } + q = fs->ring_cookie; + bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, q); + bcmgenet_hfb_enable_filter(priv, f); + rule->state = BCMGENET_RXNFC_STATE_ENABLED; } /* bcmgenet_hfb_clear @@ -690,6 +707,7 @@ static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index) { u32 base, i; + bcmgenet_hfb_set_filter_length(priv, f_index, 0); base = f_index * priv->hw_params->hfb_filter_size; for (i = 0; i < priv->hw_params->hfb_filter_size; i++) bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32)); @@ -699,22 +717,23 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv) { u32 i; - if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) - return; - - bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL); - bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS); - bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4); - - for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) - bcmgenet_rdma_writel(priv, 0x0, i); + bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); - for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++) - bcmgenet_hfb_reg_writel(priv, 0x0, - HFB_FLT_LEN_V3PLUS + i * sizeof(u32)); + if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv)) { + bcmgenet_hfb_reg_writel(priv, 0, + HFB_FLT_ENABLE_V3PLUS); + bcmgenet_hfb_reg_writel(priv, 0, + HFB_FLT_ENABLE_V3PLUS + 4); + for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++) + bcmgenet_rdma_writel(priv, 0, i); + } for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++) bcmgenet_hfb_clear_filter(priv, i); + + /* Enable filter 0 to send default flow to ring 0 */ + bcmgenet_hfb_set_filter_length(priv, 0, 4); + bcmgenet_hfb_enable_filter(priv, 0); } static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) @@ -722,9 +741,6 @@ static void bcmgenet_hfb_init(struct bcmgenet_priv *priv) int i; INIT_LIST_HEAD(&priv->rxnfc_list); - if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) - return; - for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { INIT_LIST_HEAD(&priv->rxnfc_rules[i].list); priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED; @@ -819,20 +835,16 @@ static int bcmgenet_get_coalesce(struct net_device *dev, unsigned int i; ec->tx_max_coalesced_frames = - bcmgenet_tdma_ring_readl(priv, DESC_INDEX, - DMA_MBUF_DONE_THRESH); + bcmgenet_tdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH); ec->rx_max_coalesced_frames = - bcmgenet_rdma_ring_readl(priv, DESC_INDEX, - DMA_MBUF_DONE_THRESH); + bcmgenet_rdma_ring_readl(priv, 0, DMA_MBUF_DONE_THRESH); ec->rx_coalesce_usecs = - bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; + bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT) * 8192 / 1000; - for (i = 0; i < priv->hw_params->rx_queues; i++) { + for (i = 0; i <= priv->hw_params->rx_queues; i++) { ring = &priv->rx_rings[i]; ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; } - ring = &priv->rx_rings[DESC_INDEX]; - ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; return 0; } @@ -902,17 +914,13 @@ static int bcmgenet_set_coalesce(struct net_device *dev, /* Program all TX queues with the same values, as there is no * ethtool knob to do coalescing on a per-queue basis */ - for (i = 0; i < priv->hw_params->tx_queues; i++) + for (i = 0; i <= priv->hw_params->tx_queues; i++) bcmgenet_tdma_ring_writel(priv, i, ec->tx_max_coalesced_frames, DMA_MBUF_DONE_THRESH); - bcmgenet_tdma_ring_writel(priv, DESC_INDEX, - ec->tx_max_coalesced_frames, - DMA_MBUF_DONE_THRESH); - for (i = 0; i < priv->hw_params->rx_queues; i++) + for (i = 0; i <= priv->hw_params->rx_queues; i++) bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); - bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); return 0; } @@ -1120,7 +1128,7 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { STAT_GENET_Q(1), STAT_GENET_Q(2), STAT_GENET_Q(3), - STAT_GENET_Q(16), + STAT_GENET_Q(4), }; #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) @@ -1438,7 +1446,8 @@ static int bcmgenet_insert_flow(struct net_device *dev, } if (cmd->fs.ring_cookie > priv->hw_params->rx_queues && - cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) { + cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE && + cmd->fs.ring_cookie != RX_CLS_FLOW_DISC) { netdev_err(dev, "rxnfc: Unsupported action (%llu)\n", cmd->fs.ring_cookie); return -EINVAL; @@ -1472,10 +1481,10 @@ static int bcmgenet_insert_flow(struct net_device *dev, loc_rule = &priv->rxnfc_rules[cmd->fs.location]; } if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) - bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1); if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { list_del(&loc_rule->list); - bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1); } loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED; memcpy(&loc_rule->fs, &cmd->fs, @@ -1505,10 +1514,10 @@ static int bcmgenet_delete_flow(struct net_device *dev, } if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) - bcmgenet_hfb_disable_filter(priv, cmd->fs.location); + bcmgenet_hfb_disable_filter(priv, cmd->fs.location + 1); if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) { list_del(&rule->list); - bcmgenet_hfb_clear_filter(priv, cmd->fs.location); + bcmgenet_hfb_clear_filter(priv, cmd->fs.location + 1); } rule->state = BCMGENET_RXNFC_STATE_UNUSED; memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec)); @@ -1651,9 +1660,9 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, case GENET_POWER_PASSIVE: /* Power down LED */ - if (priv->hw_params->flags & GENET_HAS_EXT) { + if (bcmgenet_has_ext(priv)) { reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - if (GENET_IS_V5(priv) && !priv->ephy_16nm) + if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv)) reg |= EXT_PWR_DOWN_PHY_EN | EXT_PWR_DOWN_PHY_RD | EXT_PWR_DOWN_PHY_SD | @@ -1676,13 +1685,14 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, return ret; } -static void bcmgenet_power_up(struct bcmgenet_priv *priv, - enum bcmgenet_power_mode mode) +static int bcmgenet_power_up(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) { + int ret = 0; u32 reg; - if (!(priv->hw_params->flags & GENET_HAS_EXT)) - return; + if (!bcmgenet_has_ext(priv)) + return ret; reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); @@ -1690,7 +1700,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, case GENET_POWER_PASSIVE: reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | EXT_ENERGY_DET_MASK); - if (GENET_IS_V5(priv) && !priv->ephy_16nm) { + if (GENET_IS_V5(priv) && !bcmgenet_has_ephy_16nm(priv)) { reg &= ~(EXT_PWR_DOWN_PHY_EN | EXT_PWR_DOWN_PHY_RD | EXT_PWR_DOWN_PHY_SD | @@ -1718,11 +1728,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, } break; case GENET_POWER_WOL_MAGIC: - bcmgenet_wol_power_up_cfg(priv, mode); - return; + ret = bcmgenet_wol_power_up_cfg(priv, mode); + break; default: break; } + + return ret; } static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, @@ -1759,18 +1771,6 @@ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, return tx_cb_ptr; } -static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) -{ - bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, - INTRL2_CPU_MASK_SET); -} - -static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring) -{ - bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE, - INTRL2_CPU_MASK_CLEAR); -} - static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring) { bcmgenet_intrl2_1_writel(ring->priv, @@ -1785,18 +1785,6 @@ static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring) INTRL2_CPU_MASK_CLEAR); } -static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring) -{ - bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, - INTRL2_CPU_MASK_SET); -} - -static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring) -{ - bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE, - INTRL2_CPU_MASK_CLEAR); -} - static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring) { bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index, @@ -1877,12 +1865,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct sk_buff *skb; /* Clear status before servicing to reduce spurious interrupts */ - if (ring->index == DESC_INDEX) - bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE, - INTRL2_CPU_CLEAR); - else - bcmgenet_intrl2_1_writel(priv, (1 << ring->index), - INTRL2_CPU_CLEAR); + bcmgenet_intrl2_1_writel(priv, (1 << ring->index), INTRL2_CPU_CLEAR); /* Compute how many buffers are transmitted since last xmit call */ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX) @@ -1916,19 +1899,46 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, ring->packets += pkts_compl; ring->bytes += bytes_compl; - netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue), + netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->index), pkts_compl, bytes_compl); return txbds_processed; } static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, - struct bcmgenet_tx_ring *ring) + struct bcmgenet_tx_ring *ring, + bool all) { - unsigned int released; + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; + unsigned int released, drop, wr_ptr; + struct enet_cb *cb_ptr; + struct sk_buff *skb; spin_lock_bh(&ring->lock); released = __bcmgenet_tx_reclaim(dev, ring); + if (all) { + skb = NULL; + drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK; + released += drop; + ring->prod_index = ring->c_index & DMA_C_INDEX_MASK; + while (drop--) { + cb_ptr = bcmgenet_put_txcb(priv, ring); + skb = cb_ptr->skb; + bcmgenet_free_tx_cb(kdev, cb_ptr); + if (skb && cb_ptr == GENET_CB(skb)->first_cb) { + dev_consume_skb_any(skb); + skb = NULL; + } + } + if (skb) + dev_consume_skb_any(skb); + bcmgenet_tdma_ring_writel(priv, ring->index, + ring->prod_index, TDMA_PROD_INDEX); + wr_ptr = ring->write_ptr * WORDS_PER_BD(priv); + bcmgenet_tdma_ring_writel(priv, ring->index, wr_ptr, + TDMA_WRITE_PTR); + } spin_unlock_bh(&ring->lock); return released; @@ -1944,14 +1954,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) spin_lock(&ring->lock); work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { - txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); + txq = netdev_get_tx_queue(ring->priv->dev, ring->index); netif_tx_wake_queue(txq); } spin_unlock(&ring->lock); if (work_done == 0) { napi_complete(napi); - ring->int_enable(ring); + bcmgenet_tx_ring_int_enable(ring); return 0; } @@ -1962,14 +1972,11 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) static void bcmgenet_tx_reclaim_all(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - int i; - - if (netif_is_multiqueue(dev)) { - for (i = 0; i < priv->hw_params->tx_queues; i++) - bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]); - } + int i = 0; - bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); + do { + bcmgenet_tx_reclaim(dev, &priv->tx_rings[i++], true); + } while (i <= priv->hw_params->tx_queues && netif_is_multiqueue(dev)); } /* Reallocate the SKB to put enough headroom in front of it and insert @@ -2057,19 +2064,14 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) index = skb_get_queue_mapping(skb); /* Mapping strategy: - * queue_mapping = 0, unclassified, packet xmited through ring16 - * queue_mapping = 1, goes to ring 0. (highest priority queue - * queue_mapping = 2, goes to ring 1. - * queue_mapping = 3, goes to ring 2. - * queue_mapping = 4, goes to ring 3. + * queue_mapping = 0, unclassified, packet xmited through ring 0 + * queue_mapping = 1, goes to ring 1. (highest priority queue) + * queue_mapping = 2, goes to ring 2. + * queue_mapping = 3, goes to ring 3. + * queue_mapping = 4, goes to ring 4. */ - if (index == 0) - index = DESC_INDEX; - else - index -= 1; - ring = &priv->tx_rings[index]; - txq = netdev_get_tx_queue(dev, ring->queue); + txq = netdev_get_tx_queue(dev, index); nr_frags = skb_shinfo(skb)->nr_frags; @@ -2242,15 +2244,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, unsigned int discards; /* Clear status before servicing to reduce spurious interrupts */ - if (ring->index == DESC_INDEX) { - bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE, - INTRL2_CPU_CLEAR); - } else { - mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); - bcmgenet_intrl2_1_writel(priv, - mask, - INTRL2_CPU_CLEAR); - } + mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index); + bcmgenet_intrl2_1_writel(priv, mask, INTRL2_CPU_CLEAR); p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX); @@ -2399,7 +2394,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete_done(napi, work_done); - ring->int_enable(ring); + bcmgenet_rx_ring_int_enable(ring); } if (ring->dim.use_dim) { @@ -2523,7 +2518,7 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) } else if (priv->ext_phy) { int0_enable |= UMAC_IRQ_LINK_EVENT; } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { - if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + if (bcmgenet_has_moca_link_det(priv)) int0_enable |= UMAC_IRQ_LINK_EVENT; } bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); @@ -2588,8 +2583,8 @@ static void init_umac(struct bcmgenet_priv *priv) } /* Enable MDIO interrupts on GENET v3+ */ - if (priv->hw_params->flags & GENET_HAS_MDIO_INTR) - int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR); + if (bcmgenet_has_mdio_intr(priv)) + int0_enable |= UMAC_IRQ_MDIO_EVENT; bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); @@ -2639,15 +2634,6 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, spin_lock_init(&ring->lock); ring->priv = priv; ring->index = index; - if (index == DESC_INDEX) { - ring->queue = 0; - ring->int_enable = bcmgenet_tx_ring16_int_enable; - ring->int_disable = bcmgenet_tx_ring16_int_disable; - } else { - ring->queue = index + 1; - ring->int_enable = bcmgenet_tx_ring_int_enable; - ring->int_disable = bcmgenet_tx_ring_int_disable; - } ring->cbs = priv->tx_cbs + start_ptr; ring->size = size; ring->clean_ptr = start_ptr; @@ -2658,8 +2644,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, ring->end_ptr = end_ptr - 1; ring->prod_index = 0; - /* Set flow period for ring != 16 */ - if (index != DESC_INDEX) + /* Set flow period for ring != 0 */ + if (index) flow_period_val = ENET_MAX_MTU_SIZE << 16; bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX); @@ -2697,13 +2683,6 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, ring->priv = priv; ring->index = index; - if (index == DESC_INDEX) { - ring->int_enable = bcmgenet_rx_ring16_int_enable; - ring->int_disable = bcmgenet_rx_ring16_int_disable; - } else { - ring->int_enable = bcmgenet_rx_ring_int_enable; - ring->int_disable = bcmgenet_rx_ring_int_disable; - } ring->cbs = priv->rx_cbs + start_ptr; ring->size = size; ring->c_index = 0; @@ -2749,15 +2728,11 @@ static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_tx_ring *ring; - for (i = 0; i < priv->hw_params->tx_queues; ++i) { + for (i = 0; i <= priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; napi_enable(&ring->napi); - ring->int_enable(ring); + bcmgenet_tx_ring_int_enable(ring); } - - ring = &priv->tx_rings[DESC_INDEX]; - napi_enable(&ring->napi); - ring->int_enable(ring); } static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) @@ -2765,13 +2740,10 @@ static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_tx_ring *ring; - for (i = 0; i < priv->hw_params->tx_queues; ++i) { + for (i = 0; i <= priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; napi_disable(&ring->napi); } - - ring = &priv->tx_rings[DESC_INDEX]; - napi_disable(&ring->napi); } static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) @@ -2779,82 +2751,104 @@ static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_tx_ring *ring; - for (i = 0; i < priv->hw_params->tx_queues; ++i) { + for (i = 0; i <= priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; netif_napi_del(&ring->napi); } +} - ring = &priv->tx_rings[DESC_INDEX]; - netif_napi_del(&ring->napi); +static int bcmgenet_tdma_disable(struct bcmgenet_priv *priv) +{ + int timeout = 0; + u32 reg, mask; + + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + mask = (1 << (priv->hw_params->tx_queues + 1)) - 1; + mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN; + reg &= ~mask; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + + /* Check DMA status register to confirm DMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_tdma_readl(priv, DMA_STATUS); + if ((reg & mask) == mask) + return 0; + + udelay(1); + } + + return -ETIMEDOUT; +} + +static int bcmgenet_rdma_disable(struct bcmgenet_priv *priv) +{ + int timeout = 0; + u32 reg, mask; + + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + mask = (1 << (priv->hw_params->rx_queues + 1)) - 1; + mask = (mask << DMA_RING_BUF_EN_SHIFT) | DMA_EN; + reg &= ~mask; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + /* Check DMA status register to confirm DMA is disabled */ + while (timeout++ < DMA_TIMEOUT_VAL) { + reg = bcmgenet_rdma_readl(priv, DMA_STATUS); + if ((reg & mask) == mask) + return 0; + + udelay(1); + } + + return -ETIMEDOUT; } /* Initialize Tx queues * - * Queues 0-3 are priority-based, each one has 32 descriptors, - * with queue 0 being the highest priority queue. + * Queues 1-4 are priority-based, each one has 32 descriptors, + * with queue 1 being the highest priority queue. * - * Queue 16 is the default Tx queue with - * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. + * Queue 0 is the default Tx queue with + * GENET_Q0_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors. * * The transmit control block pool is then partitioned as follows: - * - Tx queue 0 uses tx_cbs[0..31] - * - Tx queue 1 uses tx_cbs[32..63] - * - Tx queue 2 uses tx_cbs[64..95] - * - Tx queue 3 uses tx_cbs[96..127] - * - Tx queue 16 uses tx_cbs[128..255] + * - Tx queue 0 uses tx_cbs[0..127] + * - Tx queue 1 uses tx_cbs[128..159] + * - Tx queue 2 uses tx_cbs[160..191] + * - Tx queue 3 uses tx_cbs[192..223] + * - Tx queue 4 uses tx_cbs[224..255] */ static void bcmgenet_init_tx_queues(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - u32 i, dma_enable; - u32 dma_ctrl, ring_cfg; - u32 dma_priority[3] = {0, 0, 0}; - - dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL); - dma_enable = dma_ctrl & DMA_EN; - dma_ctrl &= ~DMA_EN; - bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); - - dma_ctrl = 0; - ring_cfg = 0; + unsigned int start = 0, end = GENET_Q0_TX_BD_CNT; + u32 i, ring_mask, dma_priority[3] = {0, 0, 0}; /* Enable strict priority arbiter mode */ bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL); /* Initialize Tx priority queues */ - for (i = 0; i < priv->hw_params->tx_queues; i++) { - bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q, - i * priv->hw_params->tx_bds_per_q, - (i + 1) * priv->hw_params->tx_bds_per_q); - ring_cfg |= (1 << i); - dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + for (i = 0; i <= priv->hw_params->tx_queues; i++) { + bcmgenet_init_tx_ring(priv, i, end - start, start, end); + start = end; + end += priv->hw_params->tx_bds_per_q; dma_priority[DMA_PRIO_REG_INDEX(i)] |= - ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i)); + (i ? GENET_Q1_PRIORITY : GENET_Q0_PRIORITY) + << DMA_PRIO_REG_SHIFT(i); } - /* Initialize Tx default queue 16 */ - bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT, - priv->hw_params->tx_queues * - priv->hw_params->tx_bds_per_q, - TOTAL_DESC); - ring_cfg |= (1 << DESC_INDEX); - dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); - dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |= - ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << - DMA_PRIO_REG_SHIFT(DESC_INDEX)); - /* Set Tx queue priorities */ bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0); bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); - /* Enable Tx queues */ - bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); + /* Configure Tx queues as descriptor rings */ + ring_mask = (1 << (priv->hw_params->tx_queues + 1)) - 1; + bcmgenet_tdma_writel(priv, ring_mask, DMA_RING_CFG); - /* Enable Tx DMA */ - if (dma_enable) - dma_ctrl |= DMA_EN; - bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); + /* Enable Tx rings */ + ring_mask <<= DMA_RING_BUF_EN_SHIFT; + bcmgenet_tdma_writel(priv, ring_mask, DMA_CTRL); } static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) @@ -2862,15 +2856,11 @@ static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_rx_ring *ring; - for (i = 0; i < priv->hw_params->rx_queues; ++i) { + for (i = 0; i <= priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; napi_enable(&ring->napi); - ring->int_enable(ring); + bcmgenet_rx_ring_int_enable(ring); } - - ring = &priv->rx_rings[DESC_INDEX]; - napi_enable(&ring->napi); - ring->int_enable(ring); } static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) @@ -2878,15 +2868,11 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_rx_ring *ring; - for (i = 0; i < priv->hw_params->rx_queues; ++i) { + for (i = 0; i <= priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; napi_disable(&ring->napi); cancel_work_sync(&ring->dim.dim.work); } - - ring = &priv->rx_rings[DESC_INDEX]; - napi_disable(&ring->napi); - cancel_work_sync(&ring->dim.dim.work); } static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) @@ -2894,13 +2880,10 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) unsigned int i; struct bcmgenet_rx_ring *ring; - for (i = 0; i < priv->hw_params->rx_queues; ++i) { + for (i = 0; i <= priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; netif_napi_del(&ring->napi); } - - ring = &priv->rx_rings[DESC_INDEX]; - netif_napi_del(&ring->napi); } /* Initialize Rx queues @@ -2908,57 +2891,32 @@ static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be * used to direct traffic to these queues. * - * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors. + * Queue 0 is also the default Rx queue with GENET_Q0_RX_BD_CNT descriptors. */ static int bcmgenet_init_rx_queues(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - u32 i; - u32 dma_enable; - u32 dma_ctrl; - u32 ring_cfg; + unsigned int start = 0, end = GENET_Q0_RX_BD_CNT; + u32 i, ring_mask; int ret; - dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL); - dma_enable = dma_ctrl & DMA_EN; - dma_ctrl &= ~DMA_EN; - bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); - - dma_ctrl = 0; - ring_cfg = 0; - /* Initialize Rx priority queues */ - for (i = 0; i < priv->hw_params->rx_queues; i++) { - ret = bcmgenet_init_rx_ring(priv, i, - priv->hw_params->rx_bds_per_q, - i * priv->hw_params->rx_bds_per_q, - (i + 1) * - priv->hw_params->rx_bds_per_q); + for (i = 0; i <= priv->hw_params->rx_queues; i++) { + ret = bcmgenet_init_rx_ring(priv, i, end - start, start, end); if (ret) return ret; - ring_cfg |= (1 << i); - dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); + start = end; + end += priv->hw_params->rx_bds_per_q; } - /* Initialize Rx default queue 16 */ - ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT, - priv->hw_params->rx_queues * - priv->hw_params->rx_bds_per_q, - TOTAL_DESC); - if (ret) - return ret; - - ring_cfg |= (1 << DESC_INDEX); - dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); - - /* Enable rings */ - bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); + /* Configure Rx queues as descriptor rings */ + ring_mask = (1 << (priv->hw_params->rx_queues + 1)) - 1; + bcmgenet_rdma_writel(priv, ring_mask, DMA_RING_CFG); - /* Configure ring as descriptor ring and re-enable DMA if enabled */ - if (dma_enable) - dma_ctrl |= DMA_EN; - bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL); + /* Enable Rx rings */ + ring_mask <<= DMA_RING_BUF_EN_SHIFT; + bcmgenet_rdma_writel(priv, ring_mask, DMA_CTRL); return 0; } @@ -2966,26 +2924,9 @@ static int bcmgenet_init_rx_queues(struct net_device *dev) static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) { int ret = 0; - int timeout = 0; - u32 reg; - u32 dma_ctrl; - int i; /* Disable TDMA to stop add more frames in TX DMA */ - reg = bcmgenet_tdma_readl(priv, DMA_CTRL); - reg &= ~DMA_EN; - bcmgenet_tdma_writel(priv, reg, DMA_CTRL); - - /* Check TDMA status register to confirm TDMA is disabled */ - while (timeout++ < DMA_TIMEOUT_VAL) { - reg = bcmgenet_tdma_readl(priv, DMA_STATUS); - if (reg & DMA_DISABLED) - break; - - udelay(1); - } - - if (timeout == DMA_TIMEOUT_VAL) { + if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) { netdev_warn(priv->dev, "Timed out while disabling TX DMA\n"); ret = -ETIMEDOUT; } @@ -2994,39 +2935,11 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) usleep_range(10000, 20000); /* Disable RDMA */ - reg = bcmgenet_rdma_readl(priv, DMA_CTRL); - reg &= ~DMA_EN; - bcmgenet_rdma_writel(priv, reg, DMA_CTRL); - - timeout = 0; - /* Check RDMA status register to confirm RDMA is disabled */ - while (timeout++ < DMA_TIMEOUT_VAL) { - reg = bcmgenet_rdma_readl(priv, DMA_STATUS); - if (reg & DMA_DISABLED) - break; - - udelay(1); - } - - if (timeout == DMA_TIMEOUT_VAL) { + if (-ETIMEDOUT == bcmgenet_rdma_disable(priv)) { netdev_warn(priv->dev, "Timed out while disabling RX DMA\n"); ret = -ETIMEDOUT; } - dma_ctrl = 0; - for (i = 0; i < priv->hw_params->rx_queues; i++) - dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); - reg = bcmgenet_rdma_readl(priv, DMA_CTRL); - reg &= ~dma_ctrl; - bcmgenet_rdma_writel(priv, reg, DMA_CTRL); - - dma_ctrl = 0; - for (i = 0; i < priv->hw_params->tx_queues; i++) - dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); - reg = bcmgenet_tdma_readl(priv, DMA_CTRL); - reg &= ~dma_ctrl; - bcmgenet_tdma_writel(priv, reg, DMA_CTRL); - return ret; } @@ -3038,32 +2951,53 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) bcmgenet_fini_rx_napi(priv); bcmgenet_fini_tx_napi(priv); - for (i = 0; i < priv->num_tx_bds; i++) - dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev, - priv->tx_cbs + i)); - - for (i = 0; i < priv->hw_params->tx_queues; i++) { - txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); + for (i = 0; i <= priv->hw_params->tx_queues; i++) { + txq = netdev_get_tx_queue(priv->dev, i); netdev_tx_reset_queue(txq); } - txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); - netdev_tx_reset_queue(txq); - bcmgenet_free_rx_buffers(priv); kfree(priv->rx_cbs); kfree(priv->tx_cbs); } /* init_edma: Initialize DMA control register */ -static int bcmgenet_init_dma(struct bcmgenet_priv *priv) +static int bcmgenet_init_dma(struct bcmgenet_priv *priv, bool flush_rx) { - int ret; - unsigned int i; struct enet_cb *cb; + unsigned int i; + int ret; + u32 reg; netif_dbg(priv, hw, priv->dev, "%s\n", __func__); + /* Disable TX DMA */ + ret = bcmgenet_tdma_disable(priv); + if (ret) { + netdev_err(priv->dev, "failed to halt Tx DMA\n"); + return ret; + } + + /* Disable RX DMA */ + ret = bcmgenet_rdma_disable(priv); + if (ret) { + netdev_err(priv->dev, "failed to halt Rx DMA\n"); + return ret; + } + + /* Flush TX queues */ + bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); + udelay(10); + bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + + if (flush_rx) { + reg = bcmgenet_rbuf_ctrl_get(priv); + bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0)); + udelay(10); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); + } + /* Initialize common Rx ring structures */ priv->rx_bds = priv->base + priv->hw_params->rdma_offset; priv->num_rx_bds = TOTAL_DESC; @@ -3113,6 +3047,15 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) /* Initialize Tx queues */ bcmgenet_init_tx_queues(priv->dev); + /* Enable RX/TX DMA */ + reg = bcmgenet_rdma_readl(priv, DMA_CTRL); + reg |= DMA_EN; + bcmgenet_rdma_writel(priv, reg, DMA_CTRL); + + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + return 0; } @@ -3142,7 +3085,7 @@ static void bcmgenet_irq_task(struct work_struct *work) } -/* bcmgenet_isr1: handle Rx and Tx priority queues */ +/* bcmgenet_isr1: handle Rx and Tx queues */ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) { struct bcmgenet_priv *priv = dev_id; @@ -3161,7 +3104,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) "%s: IRQ=0x%x\n", __func__, status); /* Check Rx priority queue interrupts */ - for (index = 0; index < priv->hw_params->rx_queues; index++) { + for (index = 0; index <= priv->hw_params->rx_queues; index++) { if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index))) continue; @@ -3169,20 +3112,20 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) rx_ring->dim.event_ctr++; if (likely(napi_schedule_prep(&rx_ring->napi))) { - rx_ring->int_disable(rx_ring); + bcmgenet_rx_ring_int_disable(rx_ring); __napi_schedule_irqoff(&rx_ring->napi); } } /* Check Tx priority queue interrupts */ - for (index = 0; index < priv->hw_params->tx_queues; index++) { + for (index = 0; index <= priv->hw_params->tx_queues; index++) { if (!(status & BIT(index))) continue; tx_ring = &priv->tx_rings[index]; if (likely(napi_schedule_prep(&tx_ring->napi))) { - tx_ring->int_disable(tx_ring); + bcmgenet_tx_ring_int_disable(tx_ring); __napi_schedule_irqoff(&tx_ring->napi); } } @@ -3190,12 +3133,10 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) return IRQ_HANDLED; } -/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */ +/* bcmgenet_isr0: handle other stuff */ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) { struct bcmgenet_priv *priv = dev_id; - struct bcmgenet_rx_ring *rx_ring; - struct bcmgenet_tx_ring *tx_ring; unsigned int status; unsigned long flags; @@ -3209,29 +3150,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) netif_dbg(priv, intr, priv->dev, "IRQ=0x%x\n", status); - if (status & UMAC_IRQ_RXDMA_DONE) { - rx_ring = &priv->rx_rings[DESC_INDEX]; - rx_ring->dim.event_ctr++; - - if (likely(napi_schedule_prep(&rx_ring->napi))) { - rx_ring->int_disable(rx_ring); - __napi_schedule_irqoff(&rx_ring->napi); - } - } - - if (status & UMAC_IRQ_TXDMA_DONE) { - tx_ring = &priv->tx_rings[DESC_INDEX]; - - if (likely(napi_schedule_prep(&tx_ring->napi))) { - tx_ring->int_disable(tx_ring); - __napi_schedule_irqoff(&tx_ring->napi); - } - } - - if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && - status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { + if (bcmgenet_has_mdio_intr(priv) && status & UMAC_IRQ_MDIO_EVENT) wake_up(&priv->wq); - } /* all other interested interrupts handled in bottom half */ status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R); @@ -3285,56 +3205,6 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, put_unaligned_be16(addr_tmp, &addr[4]); } -/* Returns a reusable dma control register value */ -static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx) -{ - unsigned int i; - u32 reg; - u32 dma_ctrl; - - /* disable DMA */ - dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; - for (i = 0; i < priv->hw_params->tx_queues; i++) - dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); - reg = bcmgenet_tdma_readl(priv, DMA_CTRL); - reg &= ~dma_ctrl; - bcmgenet_tdma_writel(priv, reg, DMA_CTRL); - - dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; - for (i = 0; i < priv->hw_params->rx_queues; i++) - dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); - reg = bcmgenet_rdma_readl(priv, DMA_CTRL); - reg &= ~dma_ctrl; - bcmgenet_rdma_writel(priv, reg, DMA_CTRL); - - bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH); - udelay(10); - bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); - - if (flush_rx) { - reg = bcmgenet_rbuf_ctrl_get(priv); - bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0)); - udelay(10); - bcmgenet_rbuf_ctrl_set(priv, reg); - udelay(10); - } - - return dma_ctrl; -} - -static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl) -{ - u32 reg; - - reg = bcmgenet_rdma_readl(priv, DMA_CTRL); - reg |= dma_ctrl; - bcmgenet_rdma_writel(priv, reg, DMA_CTRL); - - reg = bcmgenet_tdma_readl(priv, DMA_CTRL); - reg |= dma_ctrl; - bcmgenet_tdma_writel(priv, reg, DMA_CTRL); -} - static void bcmgenet_netif_start(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); @@ -3358,7 +3228,6 @@ static void bcmgenet_netif_start(struct net_device *dev) static int bcmgenet_open(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - unsigned long dma_ctrl; int ret; netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); @@ -3384,22 +3253,16 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_set_hw_addr(priv, dev->dev_addr); - /* Disable RX/TX DMA and flush TX and RX queues */ - dma_ctrl = bcmgenet_dma_disable(priv, true); + /* HFB init */ + bcmgenet_hfb_init(priv); /* Reinitialize TDMA and RDMA and SW housekeeping */ - ret = bcmgenet_init_dma(priv); + ret = bcmgenet_init_dma(priv, true); if (ret) { netdev_err(dev, "failed to initialize DMA\n"); goto err_clk_disable; } - /* Always enable ring 16 - descriptor ring */ - bcmgenet_enable_dma(priv, dma_ctrl); - - /* HFB init */ - bcmgenet_hfb_init(priv); - ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED, dev->name, priv); if (ret < 0) { @@ -3446,19 +3309,21 @@ static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy) { struct bcmgenet_priv *priv = netdev_priv(dev); - bcmgenet_disable_tx_napi(priv); netif_tx_disable(dev); /* Disable MAC receive */ + bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); umac_enable_set(priv, CMD_RX_EN, false); + if (stop_phy) + phy_stop(dev->phydev); + bcmgenet_dma_teardown(priv); /* Disable MAC transmit. TX DMA disabled must be done before this */ umac_enable_set(priv, CMD_TX_EN, false); - if (stop_phy) - phy_stop(dev->phydev); + bcmgenet_disable_tx_napi(priv); bcmgenet_disable_rx_napi(priv); bcmgenet_intr_disable(priv); @@ -3506,16 +3371,11 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) if (!netif_msg_tx_err(priv)) return; - txq = netdev_get_tx_queue(priv->dev, ring->queue); + txq = netdev_get_tx_queue(priv->dev, ring->index); spin_lock(&ring->lock); - if (ring->index == DESC_INDEX) { - intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); - intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; - } else { - intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); - intmsk = 1 << ring->index; - } + intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); + intmsk = 1 << ring->index; c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX); p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); txq_stopped = netif_tx_queue_stopped(txq); @@ -3529,7 +3389,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) "(sw)c_index: %d (hw)c_index: %d\n" "(sw)clean_p: %d (sw)write_p: %d\n" "(sw)cb_ptr: %d (sw)end_ptr: %d\n", - ring->index, ring->queue, + ring->index, ring->index, txq_stopped ? "stopped" : "active", intsts & intmsk ? "enabled" : "disabled", free_bds, ring->size, @@ -3542,25 +3402,20 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) { struct bcmgenet_priv *priv = netdev_priv(dev); - u32 int0_enable = 0; u32 int1_enable = 0; unsigned int q; netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); - for (q = 0; q < priv->hw_params->tx_queues; q++) + for (q = 0; q <= priv->hw_params->tx_queues; q++) bcmgenet_dump_tx_queue(&priv->tx_rings[q]); - bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]); bcmgenet_tx_reclaim_all(dev); - for (q = 0; q < priv->hw_params->tx_queues; q++) + for (q = 0; q <= priv->hw_params->tx_queues; q++) int1_enable |= (1 << q); - int0_enable = UMAC_IRQ_TXDMA_DONE; - /* Re-enable TX interrupts if disabled */ - bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); netif_trans_update(dev); @@ -3664,16 +3519,13 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) struct bcmgenet_rx_ring *rx_ring; unsigned int q; - for (q = 0; q < priv->hw_params->tx_queues; q++) { + for (q = 0; q <= priv->hw_params->tx_queues; q++) { tx_ring = &priv->tx_rings[q]; tx_bytes += tx_ring->bytes; tx_packets += tx_ring->packets; } - tx_ring = &priv->tx_rings[DESC_INDEX]; - tx_bytes += tx_ring->bytes; - tx_packets += tx_ring->packets; - for (q = 0; q < priv->hw_params->rx_queues; q++) { + for (q = 0; q <= priv->hw_params->rx_queues; q++) { rx_ring = &priv->rx_rings[q]; rx_bytes += rx_ring->bytes; @@ -3681,11 +3533,6 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) rx_errors += rx_ring->errors; rx_dropped += rx_ring->dropped; } - rx_ring = &priv->rx_rings[DESC_INDEX]; - rx_bytes += rx_ring->bytes; - rx_packets += rx_ring->packets; - rx_errors += rx_ring->errors; - rx_dropped += rx_ring->dropped; dev->stats.tx_bytes = tx_bytes; dev->stats.tx_packets = tx_packets; @@ -3726,128 +3573,109 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_change_carrier = bcmgenet_change_carrier, }; -/* Array of GENET hardware parameters/characteristics */ -static struct bcmgenet_hw_params bcmgenet_hw_params[] = { - [GENET_V1] = { - .tx_queues = 0, - .tx_bds_per_q = 0, - .rx_queues = 0, - .rx_bds_per_q = 0, - .bp_in_en_shift = 16, - .bp_in_mask = 0xffff, - .hfb_filter_cnt = 16, - .qtag_mask = 0x1F, - .hfb_offset = 0x1000, - .rdma_offset = 0x2000, - .tdma_offset = 0x3000, - .words_per_bd = 2, - }, - [GENET_V2] = { - .tx_queues = 4, - .tx_bds_per_q = 32, - .rx_queues = 0, - .rx_bds_per_q = 0, - .bp_in_en_shift = 16, - .bp_in_mask = 0xffff, - .hfb_filter_cnt = 16, - .qtag_mask = 0x1F, - .tbuf_offset = 0x0600, - .hfb_offset = 0x1000, - .hfb_reg_offset = 0x2000, - .rdma_offset = 0x3000, - .tdma_offset = 0x4000, - .words_per_bd = 2, - .flags = GENET_HAS_EXT, - }, - [GENET_V3] = { - .tx_queues = 4, - .tx_bds_per_q = 32, - .rx_queues = 0, - .rx_bds_per_q = 0, - .bp_in_en_shift = 17, - .bp_in_mask = 0x1ffff, - .hfb_filter_cnt = 48, - .hfb_filter_size = 128, - .qtag_mask = 0x3F, - .tbuf_offset = 0x0600, - .hfb_offset = 0x8000, - .hfb_reg_offset = 0xfc00, - .rdma_offset = 0x10000, - .tdma_offset = 0x11000, - .words_per_bd = 2, - .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | - GENET_HAS_MOCA_LINK_DET, - }, - [GENET_V4] = { - .tx_queues = 4, - .tx_bds_per_q = 32, - .rx_queues = 0, - .rx_bds_per_q = 0, - .bp_in_en_shift = 17, - .bp_in_mask = 0x1ffff, - .hfb_filter_cnt = 48, - .hfb_filter_size = 128, - .qtag_mask = 0x3F, - .tbuf_offset = 0x0600, - .hfb_offset = 0x8000, - .hfb_reg_offset = 0xfc00, - .rdma_offset = 0x2000, - .tdma_offset = 0x4000, - .words_per_bd = 3, - .flags = GENET_HAS_40BITS | GENET_HAS_EXT | - GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, - }, - [GENET_V5] = { - .tx_queues = 4, - .tx_bds_per_q = 32, - .rx_queues = 0, - .rx_bds_per_q = 0, - .bp_in_en_shift = 17, - .bp_in_mask = 0x1ffff, - .hfb_filter_cnt = 48, - .hfb_filter_size = 128, - .qtag_mask = 0x3F, - .tbuf_offset = 0x0600, - .hfb_offset = 0x8000, - .hfb_reg_offset = 0xfc00, - .rdma_offset = 0x2000, - .tdma_offset = 0x4000, - .words_per_bd = 3, - .flags = GENET_HAS_40BITS | GENET_HAS_EXT | - GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, - }, +/* GENET hardware parameters/characteristics */ +static const struct bcmgenet_hw_params bcmgenet_hw_params_v1 = { + .tx_queues = 0, + .tx_bds_per_q = 0, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .hfb_filter_size = 64, + .qtag_mask = 0x1F, + .hfb_offset = 0x1000, + .hfb_reg_offset = GENET_RBUF_OFF + RBUF_HFB_CTRL_V1, + .rdma_offset = 0x2000, + .tdma_offset = 0x3000, + .words_per_bd = 2, +}; + +static const struct bcmgenet_hw_params bcmgenet_hw_params_v2 = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 16, + .bp_in_mask = 0xffff, + .hfb_filter_cnt = 16, + .hfb_filter_size = 64, + .qtag_mask = 0x1F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x1000, + .hfb_reg_offset = 0x2000, + .rdma_offset = 0x3000, + .tdma_offset = 0x4000, + .words_per_bd = 2, +}; + +static const struct bcmgenet_hw_params bcmgenet_hw_params_v3 = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x10000, + .tdma_offset = 0x11000, + .words_per_bd = 2, +}; + +static const struct bcmgenet_hw_params bcmgenet_hw_params_v4 = { + .tx_queues = 4, + .tx_bds_per_q = 32, + .rx_queues = 0, + .rx_bds_per_q = 0, + .bp_in_en_shift = 17, + .bp_in_mask = 0x1ffff, + .hfb_filter_cnt = 48, + .hfb_filter_size = 128, + .qtag_mask = 0x3F, + .tbuf_offset = 0x0600, + .hfb_offset = 0x8000, + .hfb_reg_offset = 0xfc00, + .rdma_offset = 0x2000, + .tdma_offset = 0x4000, + .words_per_bd = 3, }; /* Infer hardware parameters from the detected GENET version */ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) { - struct bcmgenet_hw_params *params; + const struct bcmgenet_hw_params *params; u32 reg; u8 major; u16 gphy_rev; - if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) { - bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; - genet_dma_ring_regs = genet_dma_ring_regs_v4; - } else if (GENET_IS_V3(priv)) { + /* default to latest values */ + params = &bcmgenet_hw_params_v4; + bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; + genet_dma_ring_regs = genet_dma_ring_regs_v4; + if (GENET_IS_V3(priv)) { + params = &bcmgenet_hw_params_v3; bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus; genet_dma_ring_regs = genet_dma_ring_regs_v123; } else if (GENET_IS_V2(priv)) { + params = &bcmgenet_hw_params_v2; bcmgenet_dma_regs = bcmgenet_dma_regs_v2; genet_dma_ring_regs = genet_dma_ring_regs_v123; } else if (GENET_IS_V1(priv)) { + params = &bcmgenet_hw_params_v1; bcmgenet_dma_regs = bcmgenet_dma_regs_v1; genet_dma_ring_regs = genet_dma_ring_regs_v123; } - - /* enum genet_version starts at 1 */ - priv->hw_params = &bcmgenet_hw_params[priv->version]; - params = priv->hw_params; + priv->hw_params = params; /* Read GENET HW version */ reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL); major = (reg >> 24 & 0x0f); - if (major == 6) + if (major == 6 || major == 7) major = 5; else if (major == 5) major = 4; @@ -3898,7 +3726,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) } #ifdef CONFIG_PHYS_ADDR_T_64BIT - if (!(params->flags & GENET_HAS_40BITS)) + if (!bcmgenet_has_40bits(priv)) pr_warn("GENET does not support 40-bits PA\n"); #endif @@ -3923,7 +3751,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv) struct bcmgenet_plat_data { enum bcmgenet_version version; u32 dma_max_burst_length; - bool ephy_16nm; + u32 flags; }; static const struct bcmgenet_plat_data v1_plat_data = { @@ -3934,32 +3762,43 @@ static const struct bcmgenet_plat_data v1_plat_data = { static const struct bcmgenet_plat_data v2_plat_data = { .version = GENET_V2, .dma_max_burst_length = DMA_MAX_BURST_LENGTH, + .flags = GENET_HAS_EXT, }; static const struct bcmgenet_plat_data v3_plat_data = { .version = GENET_V3, .dma_max_burst_length = DMA_MAX_BURST_LENGTH, + .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR | + GENET_HAS_MOCA_LINK_DET, }; static const struct bcmgenet_plat_data v4_plat_data = { .version = GENET_V4, .dma_max_burst_length = DMA_MAX_BURST_LENGTH, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, }; static const struct bcmgenet_plat_data v5_plat_data = { .version = GENET_V5, .dma_max_burst_length = DMA_MAX_BURST_LENGTH, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, }; static const struct bcmgenet_plat_data bcm2711_plat_data = { .version = GENET_V5, .dma_max_burst_length = 0x08, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET, }; static const struct bcmgenet_plat_data bcm7712_plat_data = { .version = GENET_V5, .dma_max_burst_length = DMA_MAX_BURST_LENGTH, - .ephy_16nm = true, + .flags = GENET_HAS_40BITS | GENET_HAS_EXT | + GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET | + GENET_HAS_EPHY_16NM, }; static const struct of_device_id bcmgenet_match[] = { @@ -4057,7 +3896,7 @@ static int bcmgenet_probe(struct platform_device *pdev) if (pdata) { priv->version = pdata->version; priv->dma_max_burst_length = pdata->dma_max_burst_length; - priv->ephy_16nm = pdata->ephy_16nm; + priv->flags = pdata->flags; } else { priv->version = pd->genet_version; priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH; @@ -4077,7 +3916,7 @@ static int bcmgenet_probe(struct platform_device *pdev) bcmgenet_set_hw_params(priv); err = -EIO; - if (priv->hw_params->flags & GENET_HAS_40BITS) + if (bcmgenet_has_40bits(priv)) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); if (err) err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); @@ -4132,16 +3971,13 @@ static int bcmgenet_probe(struct platform_device *pdev) if (err) goto err_clk_disable; - /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues - * just the ring 16 descriptor based TX - */ + /* setup number of real queues + 1 */ netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); /* Set default coalescing parameters */ - for (i = 0; i < priv->hw_params->rx_queues; i++) + for (i = 0; i <= priv->hw_params->rx_queues; i++) priv->rx_rings[i].rx_max_coalesced_frames = 1; - priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; /* libphy will determine the link state */ netif_carrier_off(dev); @@ -4205,9 +4041,22 @@ static int bcmgenet_resume_noirq(struct device *d) reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT); if (reg & UMAC_IRQ_WAKE_EVENT) pm_wakeup_event(&priv->pdev->dev, 0); + + /* From WOL-enabled suspend, switch to regular clock */ + if (!bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC)) + return 0; + + /* Failed so fall through to reset MAC */ } - bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR); + /* If this is an internal GPHY, power it back on now, before UniMAC is + * brought out of reset as absolutely no UniMAC activity is allowed + */ + if (priv->internal_phy) + bcmgenet_power_up(priv, GENET_POWER_PASSIVE); + + /* take MAC out of reset */ + bcmgenet_umac_reset(priv); return 0; } @@ -4217,23 +4066,46 @@ static int bcmgenet_resume(struct device *d) struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_rxnfc_rule *rule; - unsigned long dma_ctrl; int ret; + u32 reg; if (!netif_running(dev)) return 0; - /* From WOL-enabled suspend, switch to regular clock */ - if (device_may_wakeup(d) && priv->wolopts) - bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); - - /* If this is an internal GPHY, power it back on now, before UniMAC is - * brought out of reset as absolutely no UniMAC activity is allowed - */ - if (priv->internal_phy) - bcmgenet_power_up(priv, GENET_POWER_PASSIVE); - - bcmgenet_umac_reset(priv); + if (device_may_wakeup(d) && priv->wolopts) { + reg = bcmgenet_umac_readl(priv, UMAC_CMD); + if (reg & CMD_RX_EN) { + /* Successfully exited WoL, just resume data flows */ + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->state == BCMGENET_RXNFC_STATE_ENABLED) + bcmgenet_hfb_enable_filter(priv, + rule->fs.location + 1); + bcmgenet_hfb_enable_filter(priv, 0); + bcmgenet_set_rx_mode(dev); + bcmgenet_enable_rx_napi(priv); + + /* Reinitialize Tx flows */ + bcmgenet_tdma_disable(priv); + bcmgenet_init_tx_queues(priv->dev); + reg = bcmgenet_tdma_readl(priv, DMA_CTRL); + reg |= DMA_EN; + bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + bcmgenet_enable_tx_napi(priv); + + bcmgenet_link_intr_enable(priv); + phy_start_machine(dev->phydev); + + netif_device_attach(dev); + enable_irq(priv->irq1); + return 0; + } + /* MAC was reset so complete bcmgenet_netif_stop() */ + umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, false); + bcmgenet_rdma_disable(priv); + bcmgenet_intr_disable(priv); + bcmgenet_fini_dma(priv); + enable_irq(priv->irq1); + } init_umac(priv); @@ -4254,19 +4126,13 @@ static int bcmgenet_resume(struct device *d) if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) bcmgenet_hfb_create_rxnfc_filter(priv, rule); - /* Disable RX/TX DMA and flush TX queues */ - dma_ctrl = bcmgenet_dma_disable(priv, false); - /* Reinitialize TDMA and RDMA and SW housekeeping */ - ret = bcmgenet_init_dma(priv); + ret = bcmgenet_init_dma(priv, false); if (ret) { netdev_err(dev, "failed to initialize DMA\n"); goto out_clk_disable; } - /* Always enable ring 16 - descriptor ring */ - bcmgenet_enable_dma(priv, dma_ctrl); - if (!device_may_wakeup(d)) phy_resume(dev->phydev); @@ -4287,19 +4153,52 @@ static int bcmgenet_suspend(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rxnfc_rule *rule; + u32 reg, hfb_enable = 0; if (!netif_running(dev)) return 0; netif_device_detach(dev); - bcmgenet_netif_stop(dev, true); + if (device_may_wakeup(d) && priv->wolopts) { + netif_tx_disable(dev); + + /* Suspend non-wake Rx data flows */ + if (priv->wolopts & WAKE_FILTER) + list_for_each_entry(rule, &priv->rxnfc_list, list) + if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE && + rule->state == BCMGENET_RXNFC_STATE_ENABLED) + hfb_enable |= 1 << rule->fs.location; + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (GENET_IS_V1(priv) || GENET_IS_V2(priv)) { + reg &= ~RBUF_HFB_FILTER_EN_MASK; + reg |= hfb_enable << (RBUF_HFB_FILTER_EN_SHIFT + 1); + } else { + bcmgenet_hfb_reg_writel(priv, hfb_enable << 1, + HFB_FLT_ENABLE_V3PLUS + 4); + } + if (!hfb_enable) + reg &= ~RBUF_HFB_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); - if (!device_may_wakeup(d)) - phy_suspend(dev->phydev); + /* Clear any old filter matches so only new matches wake */ + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET); + bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR); - /* Disable filtering */ - bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL); + if (-ETIMEDOUT == bcmgenet_tdma_disable(priv)) + netdev_warn(priv->dev, + "Timed out while disabling TX DMA\n"); + + bcmgenet_disable_tx_napi(priv); + bcmgenet_disable_rx_napi(priv); + disable_irq(priv->irq1); + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_tx_napi(priv); + } else { + /* Teardown the interface */ + bcmgenet_netif_stop(dev, true); + } return 0; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 43b923c48b14f..10c631bbe9649 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2014-2024 Broadcom + * Copyright (c) 2014-2025 Broadcom */ #ifndef __BCMGENET_H__ @@ -18,6 +18,9 @@ #include "../unimac.h" +/* Maximum number of hardware queues, downsized if needed */ +#define GENET_MAX_MQ_CNT 4 + /* total number of Buffer Descriptors, same for Rx/Tx */ #define TOTAL_DESC 256 @@ -271,6 +274,8 @@ struct bcmgenet_mib_counters { /* Only valid for GENETv3+ */ #define UMAC_IRQ_MDIO_DONE (1 << 23) #define UMAC_IRQ_MDIO_ERROR (1 << 24) +#define UMAC_IRQ_MDIO_EVENT (UMAC_IRQ_MDIO_DONE | \ + UMAC_IRQ_MDIO_ERROR) /* INTRL2 instance 1 definitions */ #define UMAC_IRQ1_TX_INTR_MASK 0xFFFF @@ -476,6 +481,7 @@ enum bcmgenet_version { #define GENET_HAS_EXT (1 << 1) #define GENET_HAS_MDIO_INTR (1 << 2) #define GENET_HAS_MOCA_LINK_DET (1 << 3) +#define GENET_HAS_EPHY_16NM (1 << 4) /* BCMGENET hardware parameters, keep this structure nicely aligned * since it is going to be used in hot paths @@ -496,7 +502,6 @@ struct bcmgenet_hw_params { u32 rdma_offset; u32 tdma_offset; u32 words_per_bd; - u32 flags; }; struct bcmgenet_skb_cb { @@ -513,7 +518,6 @@ struct bcmgenet_tx_ring { unsigned long packets; unsigned long bytes; unsigned int index; /* ring index */ - unsigned int queue; /* queue index */ struct enet_cb *cbs; /* tx ring buffer control block*/ unsigned int size; /* size of each tx ring */ unsigned int clean_ptr; /* Tx ring clean pointer */ @@ -523,8 +527,6 @@ struct bcmgenet_tx_ring { unsigned int prod_index; /* Tx ring producer index SW copy */ unsigned int cb_ptr; /* Tx ring initial CB ptr */ unsigned int end_ptr; /* Tx ring end CB ptr */ - void (*int_enable)(struct bcmgenet_tx_ring *); - void (*int_disable)(struct bcmgenet_tx_ring *); struct bcmgenet_priv *priv; }; @@ -553,8 +555,6 @@ struct bcmgenet_rx_ring { struct bcmgenet_net_dim dim; u32 rx_max_coalesced_frames; u32 rx_coalesce_usecs; - void (*int_enable)(struct bcmgenet_rx_ring *); - void (*int_disable)(struct bcmgenet_rx_ring *); struct bcmgenet_priv *priv; }; @@ -583,7 +583,7 @@ struct bcmgenet_priv { struct enet_cb *tx_cbs; unsigned int num_tx_bds; - struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1]; + struct bcmgenet_tx_ring tx_rings[GENET_MAX_MQ_CNT + 1]; /* receive variables */ void __iomem *rx_bds; @@ -593,10 +593,11 @@ struct bcmgenet_priv { struct bcmgenet_rxnfc_rule rxnfc_rules[MAX_NUM_OF_FS_RULES]; struct list_head rxnfc_list; - struct bcmgenet_rx_ring rx_rings[DESC_INDEX + 1]; + struct bcmgenet_rx_ring rx_rings[GENET_MAX_MQ_CNT + 1]; /* other misc variables */ - struct bcmgenet_hw_params *hw_params; + const struct bcmgenet_hw_params *hw_params; + u32 flags; unsigned autoneg_pause:1; unsigned tx_pause:1; unsigned rx_pause:1; @@ -615,7 +616,6 @@ struct bcmgenet_priv { phy_interface_t phy_interface; int phy_addr; int ext_phy; - bool ephy_16nm; /* Interrupt variables */ struct work_struct bcmgenet_irq_work; @@ -643,13 +643,37 @@ struct bcmgenet_priv { struct clk *clk_wol; u32 wolopts; u8 sopass[SOPASS_MAX]; - bool wol_active; struct bcmgenet_mib_counters mib; struct ethtool_keee eee; }; +static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv) +{ + return !!(priv->flags & GENET_HAS_40BITS); +} + +static inline bool bcmgenet_has_ext(struct bcmgenet_priv *priv) +{ + return !!(priv->flags & GENET_HAS_EXT); +} + +static inline bool bcmgenet_has_mdio_intr(struct bcmgenet_priv *priv) +{ + return !!(priv->flags & GENET_HAS_MDIO_INTR); +} + +static inline bool bcmgenet_has_moca_link_det(struct bcmgenet_priv *priv) +{ + return !!(priv->flags & GENET_HAS_MOCA_LINK_DET); +} + +static inline bool bcmgenet_has_ephy_16nm(struct bcmgenet_priv *priv) +{ + return !!(priv->flags & GENET_HAS_EPHY_16NM); +} + #define GENET_IO_MACRO(name, offset) \ static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \ u32 off) \ @@ -702,8 +726,8 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol); int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, enum bcmgenet_power_mode mode); -void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, - enum bcmgenet_power_mode mode); +int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode); void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, bool tx_lpi_enabled); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 3b082114f2e53..8fb5512882980 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -2,7 +2,7 @@ /* * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support * - * Copyright (c) 2014-2024 Broadcom + * Copyright (c) 2014-2025 Broadcom */ #define pr_fmt(fmt) "bcmgenet_wol: " fmt @@ -145,8 +145,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, enum bcmgenet_power_mode mode) { struct net_device *dev = priv->dev; - struct bcmgenet_rxnfc_rule *rule; - u32 reg, hfb_ctrl_reg, hfb_enable = 0; + u32 reg, hfb_ctrl_reg; int retries = 0; if (mode != GENET_POWER_WOL_MAGIC) { @@ -154,18 +153,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, return -EINVAL; } - /* Can't suspend with WoL if MAC is still in reset */ - spin_lock_bh(&priv->reg_lock); - reg = bcmgenet_umac_readl(priv, UMAC_CMD); - if (reg & CMD_SW_RESET) - reg &= ~CMD_SW_RESET; - - /* disable RX */ - reg &= ~CMD_RX_EN; - bcmgenet_umac_writel(priv, reg, UMAC_CMD); - spin_unlock_bh(&priv->reg_lock); - mdelay(10); - if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); reg |= MPD_EN; @@ -177,13 +164,8 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, } hfb_ctrl_reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); - if (priv->wolopts & WAKE_FILTER) { - list_for_each_entry(rule, &priv->rxnfc_list, list) - if (rule->fs.ring_cookie == RX_CLS_FLOW_WAKE) - hfb_enable |= (1 << rule->fs.location); - reg = (hfb_ctrl_reg & ~RBUF_HFB_EN) | RBUF_ACPI_EN; - bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); - } + reg = hfb_ctrl_reg | RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); /* Do not leave UniMAC in MPD mode only */ retries = bcmgenet_poll_wol_status(priv); @@ -198,15 +180,12 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n", retries); - clk_prepare_enable(priv->clk_wol); - priv->wol_active = 1; + /* Disable phy status updates while suspending */ + mutex_lock(&dev->phydev->lock); + dev->phydev->state = PHY_READY; + mutex_unlock(&dev->phydev->lock); - if (hfb_enable) { - bcmgenet_hfb_reg_writel(priv, hfb_enable, - HFB_FLT_ENABLE_V3PLUS + 4); - hfb_ctrl_reg = RBUF_HFB_EN | RBUF_ACPI_EN; - bcmgenet_hfb_reg_writel(priv, hfb_ctrl_reg, HFB_CTRL); - } + clk_prepare_enable(priv->clk_wol); /* Enable CRC forward */ spin_lock_bh(&priv->reg_lock); @@ -214,13 +193,17 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, priv->crc_fwd_en = 1; reg |= CMD_CRC_FWD; + /* Can't suspend with WoL if MAC is still in reset */ + if (reg & CMD_SW_RESET) + reg &= ~CMD_SW_RESET; + /* Receiver must be enabled for WOL MP detection */ reg |= CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); spin_unlock_bh(&priv->reg_lock); reg = UMAC_IRQ_MPD_R; - if (hfb_enable) + if (hfb_ctrl_reg & RBUF_HFB_EN) reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; bcmgenet_intrl2_0_writel(priv, reg, INTRL2_CPU_MASK_CLEAR); @@ -228,40 +211,42 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, return 0; } -void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, - enum bcmgenet_power_mode mode) +int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, + enum bcmgenet_power_mode mode) { + struct net_device *dev = priv->dev; u32 reg; if (mode != GENET_POWER_WOL_MAGIC) { netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode); - return; + return -EINVAL; } - if (!priv->wol_active) - return; /* failed to suspend so skip the rest */ - - priv->wol_active = 0; clk_disable_unprepare(priv->clk_wol); priv->crc_fwd_en = 0; + bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, + INTRL2_CPU_MASK_SET); + if (bcmgenet_has_mdio_intr(priv)) + bcmgenet_intrl2_0_writel(priv, + UMAC_IRQ_MDIO_EVENT, + INTRL2_CPU_MASK_CLEAR); + /* Disable Magic Packet Detection */ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); if (!(reg & MPD_EN)) - return; /* already reset so skip the rest */ + return -EPERM; /* already reset so skip the rest */ reg &= ~(MPD_EN | MPD_PW_EN); bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); } - /* Disable WAKE_FILTER Detection */ - if (priv->wolopts & WAKE_FILTER) { - reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); - if (!(reg & RBUF_ACPI_EN)) - return; /* already reset so skip the rest */ - reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN); - bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); - } + /* Disable ACPI mode */ + reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL); + if (!(reg & RBUF_ACPI_EN)) + return -EPERM; /* already reset so skip the rest */ + reg &= ~RBUF_ACPI_EN; + bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL); /* Disable CRC Forward */ spin_lock_bh(&priv->reg_lock); @@ -269,4 +254,14 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, reg &= ~CMD_CRC_FWD; bcmgenet_umac_writel(priv, reg, UMAC_CMD); spin_unlock_bh(&priv->reg_lock); + + /* Resume link status tracking */ + mutex_lock(&dev->phydev->lock); + if (dev->phydev->link) + dev->phydev->state = PHY_RUNNING; + else + dev->phydev->state = PHY_NOLINK; + mutex_unlock(&dev->phydev->lock); + + return 0; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index c4a3698cef66f..71c619d2bea5f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -2,7 +2,7 @@ /* * Broadcom GENET MDIO routines * - * Copyright (c) 2014-2024 Broadcom + * Copyright (c) 2014-2025 Broadcom */ #include @@ -154,7 +154,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable) u32 reg = 0; /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */ - if (GENET_IS_V4(priv) || priv->ephy_16nm) { + if (GENET_IS_V4(priv) || bcmgenet_has_ephy_16nm(priv)) { reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL); if (enable) { reg &= ~EXT_CK25_DIS; @@ -184,7 +184,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable) static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) { - if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) + if (bcmgenet_has_moca_link_det(priv)) fixed_phy_set_link_update(priv->dev->phydev, bcmgenet_fixed_phy_link_update); } diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ece6f3b483273..3b9107003b007 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "bnad.h" #include "bna.h" diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 5740c98d8c9f0..c9a5c8beb2fa8 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -951,75 +951,73 @@ struct macb_tx_skb { * device stats by a periodic timer. */ struct macb_stats { - u32 rx_pause_frames; - u32 tx_ok; - u32 tx_single_cols; - u32 tx_multiple_cols; - u32 rx_ok; - u32 rx_fcs_errors; - u32 rx_align_errors; - u32 tx_deferred; - u32 tx_late_cols; - u32 tx_excessive_cols; - u32 tx_underruns; - u32 tx_carrier_errors; - u32 rx_resource_errors; - u32 rx_overruns; - u32 rx_symbol_errors; - u32 rx_oversize_pkts; - u32 rx_jabbers; - u32 rx_undersize_pkts; - u32 sqe_test_errors; - u32 rx_length_mismatch; - u32 tx_pause_frames; + u64 rx_pause_frames; + u64 tx_ok; + u64 tx_single_cols; + u64 tx_multiple_cols; + u64 rx_ok; + u64 rx_fcs_errors; + u64 rx_align_errors; + u64 tx_deferred; + u64 tx_late_cols; + u64 tx_excessive_cols; + u64 tx_underruns; + u64 tx_carrier_errors; + u64 rx_resource_errors; + u64 rx_overruns; + u64 rx_symbol_errors; + u64 rx_oversize_pkts; + u64 rx_jabbers; + u64 rx_undersize_pkts; + u64 sqe_test_errors; + u64 rx_length_mismatch; + u64 tx_pause_frames; }; struct gem_stats { - u32 tx_octets_31_0; - u32 tx_octets_47_32; - u32 tx_frames; - u32 tx_broadcast_frames; - u32 tx_multicast_frames; - u32 tx_pause_frames; - u32 tx_64_byte_frames; - u32 tx_65_127_byte_frames; - u32 tx_128_255_byte_frames; - u32 tx_256_511_byte_frames; - u32 tx_512_1023_byte_frames; - u32 tx_1024_1518_byte_frames; - u32 tx_greater_than_1518_byte_frames; - u32 tx_underrun; - u32 tx_single_collision_frames; - u32 tx_multiple_collision_frames; - u32 tx_excessive_collisions; - u32 tx_late_collisions; - u32 tx_deferred_frames; - u32 tx_carrier_sense_errors; - u32 rx_octets_31_0; - u32 rx_octets_47_32; - u32 rx_frames; - u32 rx_broadcast_frames; - u32 rx_multicast_frames; - u32 rx_pause_frames; - u32 rx_64_byte_frames; - u32 rx_65_127_byte_frames; - u32 rx_128_255_byte_frames; - u32 rx_256_511_byte_frames; - u32 rx_512_1023_byte_frames; - u32 rx_1024_1518_byte_frames; - u32 rx_greater_than_1518_byte_frames; - u32 rx_undersized_frames; - u32 rx_oversize_frames; - u32 rx_jabbers; - u32 rx_frame_check_sequence_errors; - u32 rx_length_field_frame_errors; - u32 rx_symbol_errors; - u32 rx_alignment_errors; - u32 rx_resource_errors; - u32 rx_overruns; - u32 rx_ip_header_checksum_errors; - u32 rx_tcp_checksum_errors; - u32 rx_udp_checksum_errors; + u64 tx_octets; + u64 tx_frames; + u64 tx_broadcast_frames; + u64 tx_multicast_frames; + u64 tx_pause_frames; + u64 tx_64_byte_frames; + u64 tx_65_127_byte_frames; + u64 tx_128_255_byte_frames; + u64 tx_256_511_byte_frames; + u64 tx_512_1023_byte_frames; + u64 tx_1024_1518_byte_frames; + u64 tx_greater_than_1518_byte_frames; + u64 tx_underrun; + u64 tx_single_collision_frames; + u64 tx_multiple_collision_frames; + u64 tx_excessive_collisions; + u64 tx_late_collisions; + u64 tx_deferred_frames; + u64 tx_carrier_sense_errors; + u64 rx_octets; + u64 rx_frames; + u64 rx_broadcast_frames; + u64 rx_multicast_frames; + u64 rx_pause_frames; + u64 rx_64_byte_frames; + u64 rx_65_127_byte_frames; + u64 rx_128_255_byte_frames; + u64 rx_256_511_byte_frames; + u64 rx_512_1023_byte_frames; + u64 rx_1024_1518_byte_frames; + u64 rx_greater_than_1518_byte_frames; + u64 rx_undersized_frames; + u64 rx_oversize_frames; + u64 rx_jabbers; + u64 rx_frame_check_sequence_errors; + u64 rx_length_field_frame_errors; + u64 rx_symbol_errors; + u64 rx_alignment_errors; + u64 rx_resource_errors; + u64 rx_overruns; + u64 rx_ip_header_checksum_errors; + u64 rx_tcp_checksum_errors; + u64 rx_udp_checksum_errors; }; /* Describes the name and offset of an individual statistic register, as @@ -1027,7 +1025,7 @@ struct gem_stats { * this register should contribute to. */ struct gem_statistic { - char stat_string[ETH_GSTRING_LEN]; + char stat_string[ETH_GSTRING_LEN] __nonstring; int offset; u32 stat_bits; }; @@ -1279,6 +1277,8 @@ struct macb { struct clk *rx_clk; struct clk *tsu_clk; struct net_device *dev; + /* Protects hw_stats and ethtool_stats */ + spinlock_t stats_lock; union { struct macb_stats macb; struct gem_stats gem; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 48496209fb164..1fe8ec37491b1 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -17,8 +17,6 @@ #include #include #include -#include -#include #include #include #include @@ -26,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -853,9 +850,7 @@ static int macb_mii_probe(struct net_device *dev) struct macb *bp = netdev_priv(dev); bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops; - bp->phylink_sgmii_pcs.neg_mode = true; bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops; - bp->phylink_usx_pcs.neg_mode = true; bp->phylink_config.dev = &dev->dev; bp->phylink_config.type = PHYLINK_NETDEV; @@ -990,8 +985,8 @@ static int macb_mii_init(struct macb *bp) static void macb_update_stats(struct macb *bp) { - u32 *p = &bp->hw_stats.macb.rx_pause_frames; - u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; + u64 *p = &bp->hw_stats.macb.rx_pause_frames; + u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1; int offset = MACB_PFR; WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); @@ -1081,15 +1076,18 @@ static void macb_tx_error_task(struct work_struct *work) tx_error_task); bool halt_timeout = false; struct macb *bp = queue->bp; + u32 queue_index; + u32 packets = 0; + u32 bytes = 0; struct macb_tx_skb *tx_skb; struct macb_dma_desc *desc; struct sk_buff *skb; unsigned int tail; unsigned long flags; + queue_index = queue - bp->queues; netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", - (unsigned int)(queue - bp->queues), - queue->tx_tail, queue->tx_head); + queue_index, queue->tx_tail, queue->tx_head); /* Prevent the queue NAPI TX poll from running, as it calls * macb_tx_complete(), which in turn may call netif_wake_subqueue(). @@ -1142,8 +1140,10 @@ static void macb_tx_error_task(struct work_struct *work) skb->data); bp->dev->stats.tx_packets++; queue->stats.tx_packets++; + packets++; bp->dev->stats.tx_bytes += skb->len; queue->stats.tx_bytes += skb->len; + bytes += skb->len; } } else { /* "Buffers exhausted mid-frame" errors may only happen @@ -1160,6 +1160,9 @@ static void macb_tx_error_task(struct work_struct *work) macb_tx_unmap(bp, tx_skb, 0); } + netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index), + packets, bytes); + /* Set end of TX queue */ desc = macb_tx_desc(queue, 0); macb_set_addr(bp, desc, 0); @@ -1230,6 +1233,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget) unsigned int tail; unsigned int head; int packets = 0; + u32 bytes = 0; spin_lock(&queue->tx_ptr_lock); head = queue->tx_head; @@ -1271,6 +1275,7 @@ static int macb_tx_complete(struct macb_queue *queue, int budget) bp->dev->stats.tx_bytes += skb->len; queue->stats.tx_bytes += skb->len; packets++; + bytes += skb->len; } /* Now we can safely release resources */ @@ -1285,6 +1290,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget) } } + netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index), + packets, bytes); + queue->tx_tail = tail; if (__netif_subqueue_stopped(bp->dev, queue_index) && CIRC_CNT(queue->tx_head, queue->tx_tail, @@ -1978,10 +1986,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (status & MACB_BIT(ISR_ROVR)) { /* We missed at least one packet */ + spin_lock(&bp->stats_lock); if (macb_is_gem(bp)) bp->hw_stats.gem.rx_overruns++; else bp->hw_stats.macb.rx_overruns++; + spin_unlock(&bp->stats_lock); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); @@ -2386,6 +2396,8 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) /* Make newly initialized descriptor visible to hardware */ wmb(); skb_tx_timestamp(skb); + netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index), + skb->len); spin_lock_irq(&bp->lock); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); @@ -3021,6 +3033,7 @@ static int macb_close(struct net_device *dev) for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { napi_disable(&queue->napi_rx); napi_disable(&queue->napi_tx); + netdev_tx_reset_queue(netdev_get_tx_queue(dev, q)); } phylink_stop(bp->phylink); @@ -3071,7 +3084,7 @@ static void gem_update_stats(struct macb *bp) unsigned int i, q, idx; unsigned long *stat; - u32 *p = &bp->hw_stats.gem.tx_octets_31_0; + u64 *p = &bp->hw_stats.gem.tx_octets; for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { u32 offset = gem_statistics[i].offset; @@ -3084,7 +3097,7 @@ static void gem_update_stats(struct macb *bp) /* Add GEM_OCTTXH, GEM_OCTRXH */ val = bp->macb_reg_readl(bp, offset + 4); bp->ethtool_stats[i] += ((u64)val) << 32; - *(++p) += val; + *(p++) += ((u64)val) << 32; } } @@ -3094,15 +3107,13 @@ static void gem_update_stats(struct macb *bp) bp->ethtool_stats[idx++] = *stat; } -static struct net_device_stats *gem_get_stats(struct macb *bp) +static void gem_get_stats(struct macb *bp, struct rtnl_link_stats64 *nstat) { struct gem_stats *hwstat = &bp->hw_stats.gem; - struct net_device_stats *nstat = &bp->dev->stats; - - if (!netif_running(bp->dev)) - return nstat; - gem_update_stats(bp); + spin_lock_irq(&bp->stats_lock); + if (netif_running(bp->dev)) + gem_update_stats(bp); nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + hwstat->rx_alignment_errors + @@ -3131,19 +3142,19 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; nstat->tx_fifo_errors = hwstat->tx_underrun; - - return nstat; + spin_unlock_irq(&bp->stats_lock); } static void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { - struct macb *bp; + struct macb *bp = netdev_priv(dev); - bp = netdev_priv(dev); + spin_lock_irq(&bp->stats_lock); gem_update_stats(bp); memcpy(data, &bp->ethtool_stats, sizeof(u64) * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); + spin_unlock_irq(&bp->stats_lock); } static int gem_get_sset_count(struct net_device *dev, int sset) @@ -3183,16 +3194,20 @@ static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) } } -static struct net_device_stats *macb_get_stats(struct net_device *dev) +static void macb_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *nstat) { struct macb *bp = netdev_priv(dev); - struct net_device_stats *nstat = &bp->dev->stats; struct macb_stats *hwstat = &bp->hw_stats.macb; - if (macb_is_gem(bp)) - return gem_get_stats(bp); + netdev_stats_to_stats64(nstat, &bp->dev->stats); + if (macb_is_gem(bp)) { + gem_get_stats(bp, nstat); + return; + } /* read stats from hardware */ + spin_lock_irq(&bp->stats_lock); macb_update_stats(bp); /* Convert HW stats into netdevice stats */ @@ -3226,8 +3241,171 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) nstat->tx_carrier_errors = hwstat->tx_carrier_errors; nstat->tx_fifo_errors = hwstat->tx_underruns; /* Don't know about heartbeat or window errors... */ + spin_unlock_irq(&bp->stats_lock); +} + +static void macb_get_pause_stats(struct net_device *dev, + struct ethtool_pause_stats *pause_stats) +{ + struct macb *bp = netdev_priv(dev); + struct macb_stats *hwstat = &bp->hw_stats.macb; + + spin_lock_irq(&bp->stats_lock); + macb_update_stats(bp); + pause_stats->tx_pause_frames = hwstat->tx_pause_frames; + pause_stats->rx_pause_frames = hwstat->rx_pause_frames; + spin_unlock_irq(&bp->stats_lock); +} + +static void gem_get_pause_stats(struct net_device *dev, + struct ethtool_pause_stats *pause_stats) +{ + struct macb *bp = netdev_priv(dev); + struct gem_stats *hwstat = &bp->hw_stats.gem; + + spin_lock_irq(&bp->stats_lock); + gem_update_stats(bp); + pause_stats->tx_pause_frames = hwstat->tx_pause_frames; + pause_stats->rx_pause_frames = hwstat->rx_pause_frames; + spin_unlock_irq(&bp->stats_lock); +} + +static void macb_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct macb *bp = netdev_priv(dev); + struct macb_stats *hwstat = &bp->hw_stats.macb; - return nstat; + spin_lock_irq(&bp->stats_lock); + macb_update_stats(bp); + mac_stats->FramesTransmittedOK = hwstat->tx_ok; + mac_stats->SingleCollisionFrames = hwstat->tx_single_cols; + mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols; + mac_stats->FramesReceivedOK = hwstat->rx_ok; + mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors; + mac_stats->AlignmentErrors = hwstat->rx_align_errors; + mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred; + mac_stats->LateCollisions = hwstat->tx_late_cols; + mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols; + mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns; + mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors; + mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns; + mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch; + mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts; + spin_unlock_irq(&bp->stats_lock); +} + +static void gem_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct macb *bp = netdev_priv(dev); + struct gem_stats *hwstat = &bp->hw_stats.gem; + + spin_lock_irq(&bp->stats_lock); + gem_update_stats(bp); + mac_stats->FramesTransmittedOK = hwstat->tx_frames; + mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames; + mac_stats->MultipleCollisionFrames = + hwstat->tx_multiple_collision_frames; + mac_stats->FramesReceivedOK = hwstat->rx_frames; + mac_stats->FrameCheckSequenceErrors = + hwstat->rx_frame_check_sequence_errors; + mac_stats->AlignmentErrors = hwstat->rx_alignment_errors; + mac_stats->OctetsTransmittedOK = hwstat->tx_octets; + mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames; + mac_stats->LateCollisions = hwstat->tx_late_collisions; + mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions; + mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun; + mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors; + mac_stats->OctetsReceivedOK = hwstat->rx_octets; + mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames; + mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames; + mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames; + mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames; + mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors; + mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames; + spin_unlock_irq(&bp->stats_lock); +} + +/* TODO: Report SQE test errors when added to phy_stats */ +static void macb_get_eth_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct macb *bp = netdev_priv(dev); + struct macb_stats *hwstat = &bp->hw_stats.macb; + + spin_lock_irq(&bp->stats_lock); + macb_update_stats(bp); + phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors; + spin_unlock_irq(&bp->stats_lock); +} + +static void gem_get_eth_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct macb *bp = netdev_priv(dev); + struct gem_stats *hwstat = &bp->hw_stats.gem; + + spin_lock_irq(&bp->stats_lock); + gem_update_stats(bp); + phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors; + spin_unlock_irq(&bp->stats_lock); +} + +static void macb_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct macb *bp = netdev_priv(dev); + struct macb_stats *hwstat = &bp->hw_stats.macb; + + spin_lock_irq(&bp->stats_lock); + macb_update_stats(bp); + rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts; + rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts; + rmon_stats->jabbers = hwstat->rx_jabbers; + spin_unlock_irq(&bp->stats_lock); +} + +static const struct ethtool_rmon_hist_range gem_rmon_ranges[] = { + { 64, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 16384 }, + { }, +}; + +static void gem_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct macb *bp = netdev_priv(dev); + struct gem_stats *hwstat = &bp->hw_stats.gem; + + spin_lock_irq(&bp->stats_lock); + gem_update_stats(bp); + rmon_stats->undersize_pkts = hwstat->rx_undersized_frames; + rmon_stats->oversize_pkts = hwstat->rx_oversize_frames; + rmon_stats->jabbers = hwstat->rx_jabbers; + rmon_stats->hist[0] = hwstat->rx_64_byte_frames; + rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames; + rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames; + rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames; + rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames; + rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames; + rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames; + rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames; + rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames; + rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames; + rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames; + rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames; + rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames; + rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames; + spin_unlock_irq(&bp->stats_lock); + *ranges = gem_rmon_ranges; } static int macb_get_regs_len(struct net_device *netdev) @@ -3756,6 +3934,10 @@ static const struct ethtool_ops macb_ethtool_ops = { .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_pause_stats = macb_get_pause_stats, + .get_eth_mac_stats = macb_get_eth_mac_stats, + .get_eth_phy_stats = macb_get_eth_phy_stats, + .get_rmon_stats = macb_get_rmon_stats, .get_wol = macb_get_wol, .set_wol = macb_set_wol, .get_link_ksettings = macb_get_link_ksettings, @@ -3774,6 +3956,10 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_ethtool_stats = gem_get_ethtool_stats, .get_strings = gem_get_ethtool_strings, .get_sset_count = gem_get_sset_count, + .get_pause_stats = gem_get_pause_stats, + .get_eth_mac_stats = gem_get_eth_mac_stats, + .get_eth_phy_stats = gem_get_eth_phy_stats, + .get_rmon_stats = gem_get_rmon_stats, .get_link_ksettings = macb_get_link_ksettings, .set_link_ksettings = macb_set_link_ksettings, .get_ringparam = macb_get_ringparam, @@ -3910,7 +4096,7 @@ static const struct net_device_ops macb_netdev_ops = { .ndo_stop = macb_close, .ndo_start_xmit = macb_start_xmit, .ndo_set_rx_mode = macb_set_rx_mode, - .ndo_get_stats = macb_get_stats, + .ndo_get_stats64 = macb_get_stats, .ndo_eth_ioctl = macb_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = macb_change_mtu, @@ -4571,7 +4757,7 @@ static const struct net_device_ops at91ether_netdev_ops = { .ndo_open = at91ether_open, .ndo_stop = at91ether_close, .ndo_start_xmit = at91ether_start_xmit, - .ndo_get_stats = macb_get_stats, + .ndo_get_stats64 = macb_get_stats, .ndo_set_rx_mode = macb_set_rx_mode, .ndo_set_mac_address = eth_mac_addr, .ndo_eth_ioctl = macb_ioctl, @@ -5097,6 +5283,7 @@ static int macb_probe(struct platform_device *pdev) } } spin_lock_init(&bp->lock); + spin_lock_init(&bp->stats_lock); /* setup capabilities */ macb_configure_caps(bp, macb_config); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 9ad49aea2673b..ff8f2f9f9cae1 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -49,7 +49,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct) lio_pci_readq(oct, CN23XX_RST_SOFT_RST); lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST); - /* Wait for 100ms as Octeon resets. */ + /* Wait for 100ms as Octeon resets */ mdelay(100); if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) { @@ -61,7 +61,7 @@ static int cn23xx_pf_soft_reset(struct octeon_device *oct) dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n", oct->octeon_id); - /* restore the reset value*/ + /* Restore the reset value */ octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF); return 0; @@ -121,7 +121,7 @@ u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us) oqticks_per_us /= 1024; /* time_intr is in microseconds. The next 2 steps gives the oq ticks - * corressponding to time_intr. + * corresponding to time_intr. */ oqticks_per_us *= time_intr_in_us; oqticks_per_us /= 1000; @@ -136,11 +136,11 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct) u64 reg_val; u64 temp; - /* programming SRN and TRS for each MAC(0..3) */ + /* Programming SRN and TRS for each MAC(0..3) */ dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n", __func__, mac_no); - /* By default, mapping all 64 IOQs to a single MACs */ + /* By default, map all 64 IOQs to a single MAC */ reg_val = octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)); @@ -164,7 +164,7 @@ static void cn23xx_setup_global_mac_regs(struct octeon_device *oct) temp = oct->sriov_info.max_vfs & 0xff; reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS); - /* write these settings to MAC register */ + /* Write these settings to MAC register */ octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num), reg_val); @@ -183,10 +183,10 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct) srn = oct->sriov_info.pf_srn; ern = srn + oct->sriov_info.num_pf_rings; - /*As per HRM reg description, s/w cant write 0 to ENB. */ - /*to make the queue off, need to set the RST bit. */ + /* As per HRM reg description, s/w can't write 0 to ENB. */ + /* We need to set the RST bit, to turn the queue off. */ - /* Reset the Enable bit for all the 64 IQs. */ + /* Reset the enable bit for all the 64 IQs. */ for (q_no = srn; q_no < ern; q_no++) { /* set RST bit to 1. This bit applies to both IQ and OQ */ d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); @@ -194,7 +194,7 @@ static int cn23xx_reset_io_queues(struct octeon_device *oct) octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); } - /*wait until the RST bit is clear or the RST and quite bits are set*/ + /* Wait until the RST bit is clear or the RST and quiet bits are set */ for (q_no = srn; q_no < ern; q_no++) { u64 reg_val = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); @@ -245,15 +245,15 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) if (cn23xx_reset_io_queues(oct)) return -1; - /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg - * for all queues.Only PF can set these bits. + /* Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg + * for all queues. Only PF can set these bits. * bits 29:30 indicate the MAC num. * bits 32:47 indicate the PVF num. */ for (q_no = 0; q_no < ern; q_no++) { reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; - /* for VF assigned queues. */ + /* For VF assigned queues. */ if (q_no < oct->sriov_info.pf_srn) { vf_num = q_no / oct->sriov_info.rings_per_vf; vf_num += 1; /* VF1, VF2,........ */ @@ -268,7 +268,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) reg_val); } - /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for + /* Select ES, RO, NS, RDSIZE,DPTR Format#0 for * pf queues */ for (q_no = srn; q_no < ern; q_no++) { @@ -289,7 +289,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); - /* Set WMARK level for triggering PI_INT */ + /* Set WMARK level to trigger PI_INT */ /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */ intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) & CN23XX_PKT_IN_DONE_WMARK_MASK; @@ -354,7 +354,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) /* set the ES bit */ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES); - /* write all the selected settings */ + /* Write all the selected settings */ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val); /* Enabling these interrupt in oct->fn_list.enable_interrupt() @@ -373,7 +373,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) /** Setting the water mark level for pko back pressure **/ writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK); - /** Disabling setting OQs in reset when ring has no dorebells + /* Disabling setting OQs in reset when ring has no doorbells * enabling this will cause of head of line blocking */ /* Do it only for pass1.1. and pass1.2 */ @@ -383,7 +383,7 @@ static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) CN23XX_SLI_GBL_CONTROL) | 0x2, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL); - /** Enable channel-level backpressure */ + /** Enable channel-level backpressure **/ if (oct->pf_num) writeq(0xffffffffffffffffULL, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S); @@ -396,7 +396,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct) { cn23xx_enable_error_reporting(oct); - /* program the MAC(0..3)_RINFO before setting up input/output regs */ + /* Program the MAC(0..3)_RINFO before setting up input/output regs */ cn23xx_setup_global_mac_regs(oct); if (cn23xx_pf_setup_global_input_regs(oct)) @@ -410,7 +410,7 @@ static int cn23xx_setup_pf_device_regs(struct octeon_device *oct) octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL, CN23XX_SLI_WINDOW_CTL_DEFAULT); - /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */ + /* Set SLI_PKT_IN_JABBER to handle large VXLAN packets */ octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER); return 0; } @@ -574,7 +574,7 @@ static int cn23xx_setup_pf_mbox(struct octeon_device *oct) mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1); - /*Mail Box Thread creation*/ + /* Mail Box Thread creation */ INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work, cn23xx_pf_mbox_thread); mbox->mbox_poll_wk.ctxptr = (void *)mbox; @@ -626,7 +626,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct) ern = srn + oct->num_iqs; for (q_no = srn; q_no < ern; q_no++) { - /* set the corresponding IQ IS_64B bit */ + /* Set the corresponding IQ IS_64B bit */ if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) { reg_val = octeon_read_csr64( oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); @@ -635,7 +635,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct) oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); } - /* set the corresponding IQ ENB bit */ + /* Set the corresponding IQ ENB bit */ if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) { /* IOQs are in reset by default in PEM2 mode, * clearing reset bit @@ -681,7 +681,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct) } for (q_no = srn; q_no < ern; q_no++) { u32 reg_val; - /* set the corresponding OQ ENB bit */ + /* Set the corresponding OQ ENB bit */ if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) { reg_val = octeon_read_csr( oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); @@ -707,7 +707,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct) for (q_no = srn; q_no < ern; q_no++) { loop = HZ; - /* start the Reset for a particular ring */ + /* Start the Reset for a particular ring */ WRITE_ONCE(d64, octeon_read_csr64( oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); WRITE_ONCE(d64, READ_ONCE(d64) & @@ -740,7 +740,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct) loop = HZ; /* Wait until hardware indicates that the particular IQ - * is out of reset.It given that SLI_PKT_RING_RST is + * is out of reset. Given that SLI_PKT_RING_RST is * common for both IQs and OQs */ WRITE_ONCE(d64, octeon_read_csr64( @@ -760,7 +760,7 @@ static void cn23xx_disable_io_queues(struct octeon_device *oct) schedule_timeout_uninterruptible(1); } - /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */ + /* Clear the SLI_PKT(0..63)_CNTS[CNT] reg value */ WRITE_ONCE(d32, octeon_read_csr( oct, CN23XX_SLI_OQ_PKTS_SENT(q_no))); octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no), @@ -793,7 +793,7 @@ static u64 cn23xx_pf_msix_interrupt_handler(void *dev) if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL)) return ret; - /* Write count reg in sli_pkt_cnts to clear these int.*/ + /* Write count reg in sli_pkt_cnts to clear these int. */ if ((pkts_sent & CN23XX_INTR_PO_INT) || (pkts_sent & CN23XX_INTR_PI_INT)) { if (pkts_sent & CN23XX_INTR_PO_INT) @@ -908,7 +908,7 @@ static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx) oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); } -/* always call with lock held */ +/* Always call with lock held */ static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq) { u32 new_idx; @@ -919,7 +919,7 @@ static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq) iq->pkt_in_done = pkt_in_done; /* Modulo of the new index with the IQ size will give us - * the new index. The iq->reset_instr_cnt is always zero for + * the new index. The iq->reset_instr_cnt is always zero for * cn23xx, so no extra adjustments are needed. */ new_idx = (iq->octeon_read_index + @@ -934,8 +934,8 @@ static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag) struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; u64 intr_val = 0; - /* Divide the single write to multiple writes based on the flag. */ - /* Enable Interrupt */ + /* Divide the single write to multiple writes based on the flag. */ + /* Enable Interrupts */ if (intr_flag == OCTEON_ALL_INTR) { writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64); } else if (intr_flag & OCTEON_OUTPUT_INTR) { @@ -990,7 +990,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct) ret = 0; - /** Read Function Dependency Link reg to get the function number */ + /* Read Function Dependency Link reg to get the function number */ if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, &fdl_bit) == 0) { oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & @@ -1003,13 +1003,13 @@ static int cn23xx_get_pf_num(struct octeon_device *oct) * In this case, read the PF number from the * SLI_PKT0_INPUT_CONTROL reg (written by f/w) */ - pkt0_in_ctl = octeon_read_csr64(oct, - CN23XX_SLI_IQ_PKT_CONTROL64(0)); + pkt0_in_ctl = + octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(0)); pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) & CN23XX_PKT_INPUT_CTL_PF_NUM_MASK; mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff; - /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/ + /* Validate PF num by reading RINFO; f/w writes RINFO.trs == 1 */ d64 = octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum)); trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff; @@ -1252,9 +1252,9 @@ int cn23xx_fw_loaded(struct octeon_device *oct) u64 val; /* If there's more than one active PF on this NIC, then that - * implies that the NIC firmware is loaded and running. This check + * implies that the NIC firmware is loaded and running. This check * prevents a rare false negative that might occur if we only relied - * on checking the SCR2_BIT_FW_LOADED flag. The false negative would + * on checking the SCR2_BIT_FW_LOADED flag. The false negative would * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even * though the firmware was already loaded but still booting and has yet * to set SCR2_BIT_FW_LOADED. diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 6b6cb73482d7f..1753bb87dfbd9 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1433,22 +1433,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout) } EXPORT_SYMBOL_GPL(octeon_wait_for_ddr_init); -/* Get the octeon id assigned to the octeon device passed as argument. - * This function is exported to other modules. - * @param dev - octeon device pointer passed as a void *. - * @return octeon device id - */ -int lio_get_device_id(void *dev) -{ - struct octeon_device *octeon_dev = (struct octeon_device *)dev; - u32 i; - - for (i = 0; i < MAX_OCTEON_DEVICES; i++) - if (octeon_device[i] == octeon_dev) - return octeon_dev->octeon_id; - return -1; -} - void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) { u64 instr_cnt; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index d26364c2ac81c..19344b21f8fb9 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -705,13 +705,6 @@ octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode, */ struct octeon_device *lio_get_device(u32 octeon_id); -/** Get the octeon id assigned to the octeon device passed as argument. - * This function is exported to other modules. - * @param dev - octeon device pointer passed as a void *. - * @return octeon device id - */ -int lio_get_device_id(void *dev); - /** Read windowed register. * @param oct - pointer to the Octeon device. * @param addr - Address of the register to read. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index c7c2c15a18155..95e6f015a6af0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1211,9 +1211,6 @@ struct adapter { struct timer_list flower_stats_timer; struct work_struct flower_stats_work; - /* Ethtool Dump */ - struct ethtool_dump eth_dump; - /* HMA */ struct hma_data hma; @@ -1233,6 +1230,10 @@ struct adapter { /* Ethtool n-tuple */ struct cxgb4_ethtool_filter *ethtool_filters; + + /* Ethtool Dump */ + /* Must be last - ends in a flex-array member. */ + struct ethtool_dump eth_dump; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 2f0b3e389e624..551c279dc14be 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6538,26 +6538,6 @@ static void cxgb4_xfrm_free_state(struct xfrm_state *x) mutex_unlock(&uld_mutex); } -static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) -{ - struct adapter *adap = netdev2adap(x->xso.dev); - bool ret = false; - - if (!mutex_trylock(&uld_mutex)) { - dev_dbg(adap->pdev_dev, - "crypto uld critical resource is under use\n"); - return ret; - } - if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) - goto out_unlock; - - ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); - -out_unlock: - mutex_unlock(&uld_mutex); - return ret; -} - static void cxgb4_advance_esn_state(struct xfrm_state *x) { struct adapter *adap = netdev2adap(x->xso.dev); @@ -6583,7 +6563,6 @@ static const struct xfrmdev_ops cxgb4_xfrmdev_ops = { .xdo_dev_state_add = cxgb4_xfrm_add_state, .xdo_dev_state_delete = cxgb4_xfrm_del_state, .xdo_dev_state_free = cxgb4_xfrm_free_state, - .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok, .xdo_dev_state_advance_esn = cxgb4_advance_esn_state, }; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c index c7338ac6a5bbe..baba96883f48b 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c @@ -71,7 +71,6 @@ static LIST_HEAD(uld_ctx_list); static DEFINE_MUTEX(dev_mutex); -static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x); static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state); static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev); static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop); @@ -85,7 +84,6 @@ static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = { .xdo_dev_state_add = ch_ipsec_xfrm_add_state, .xdo_dev_state_delete = ch_ipsec_xfrm_del_state, .xdo_dev_state_free = ch_ipsec_xfrm_free_state, - .xdo_dev_offload_ok = ch_ipsec_offload_ok, .xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state, }; @@ -323,20 +321,6 @@ static void ch_ipsec_xfrm_free_state(struct xfrm_state *x) module_put(THIS_MODULE); } -static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) -{ - if (x->props.family == AF_INET) { - /* Offload with IP options is not supported yet */ - if (ip_hdr(skb)->ihl > 5) - return false; - } else { - /* Offload with IPv6 extension headers is not support yet */ - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) - return false; - } - return true; -} - static void ch_ipsec_advance_esn_state(struct xfrm_state *x) { /* do nothing */ diff --git a/drivers/net/ethernet/cisco/enic/Kconfig b/drivers/net/ethernet/cisco/enic/Kconfig index ad80c0fa96a68..96709875fe4f2 100644 --- a/drivers/net/ethernet/cisco/enic/Kconfig +++ b/drivers/net/ethernet/cisco/enic/Kconfig @@ -6,5 +6,6 @@ config ENIC tristate "Cisco VIC Ethernet NIC Support" depends on PCI + select PAGE_POOL help This enables the support for the Cisco VIC Ethernet card. diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile index c3b6febfdbe44..a96b8332e6e2a 100644 --- a/drivers/net/ethernet/cisco/enic/Makefile +++ b/drivers/net/ethernet/cisco/enic/Makefile @@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \ - enic_ethtool.o enic_api.o enic_clsf.o + enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o diff --git a/drivers/net/ethernet/cisco/enic/cq_desc.h b/drivers/net/ethernet/cisco/enic/cq_desc.h index 462c5435a206b..bfb3f14e89f5d 100644 --- a/drivers/net/ethernet/cisco/enic/cq_desc.h +++ b/drivers/net/ethernet/cisco/enic/cq_desc.h @@ -40,28 +40,7 @@ struct cq_desc { #define CQ_DESC_COMP_NDX_BITS 12 #define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1) -static inline void cq_desc_dec(const struct cq_desc *desc_arg, - u8 *type, u8 *color, u16 *q_number, u16 *completed_index) -{ - const struct cq_desc *desc = desc_arg; - const u8 type_color = desc->type_color; - - *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; - - /* - * Make sure color bit is read from desc *before* other fields - * are read from desc. Hardware guarantees color bit is last - * bit (byte) written. Adding the rmb() prevents the compiler - * and/or CPU from reordering the reads which would potentially - * result in reading stale values. - */ - - rmb(); - - *type = type_color & CQ_DESC_TYPE_MASK; - *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; - *completed_index = le16_to_cpu(desc->completed_index) & - CQ_DESC_COMP_NDX_MASK; -} +#define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1)) +#define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1)) #endif /* _CQ_DESC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h index d25426470a293..50787cff29db0 100644 --- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h +++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h @@ -17,12 +17,22 @@ struct cq_enet_wq_desc { u8 type_color; }; -static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, - u8 *type, u8 *color, u16 *q_number, u16 *completed_index) -{ - cq_desc_dec((struct cq_desc *)desc, type, - color, q_number, completed_index); -} +/* + * Defines and Capabilities for CMD_CQ_ENTRY_SIZE_SET + */ +#define VNIC_RQ_ALL (~0ULL) + +#define VNIC_RQ_CQ_ENTRY_SIZE_16 0 +#define VNIC_RQ_CQ_ENTRY_SIZE_32 1 +#define VNIC_RQ_CQ_ENTRY_SIZE_64 2 + +#define VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_16) +#define VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_32) +#define VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE BIT(VNIC_RQ_CQ_ENTRY_SIZE_64) + +#define VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT (VNIC_RQ_CQ_ENTRY_SIZE_16_CAPABLE | \ + VNIC_RQ_CQ_ENTRY_SIZE_32_CAPABLE | \ + VNIC_RQ_CQ_ENTRY_SIZE_64_CAPABLE) /* Completion queue descriptor: Ethernet receive queue, 16B */ struct cq_enet_rq_desc { @@ -36,6 +46,45 @@ struct cq_enet_rq_desc { u8 type_color; }; +/* Completion queue descriptor: Ethernet receive queue, 32B */ +struct cq_enet_rq_desc_32 { + __le16 completed_index_flags; + __le16 q_number_rss_type_flags; + __le32 rss_hash; + __le16 bytes_written_flags; + __le16 vlan; + __le16 checksum_fcoe; + u8 flags; + u8 fetch_index_flags; + __le32 time_stamp; + __le16 time_stamp2; + __le16 pie_info; + __le32 pie_info2; + __le16 pie_info3; + u8 pie_info4; + u8 type_color; +}; + +/* Completion queue descriptor: Ethernet receive queue, 64B */ +struct cq_enet_rq_desc_64 { + __le16 completed_index_flags; + __le16 q_number_rss_type_flags; + __le32 rss_hash; + __le16 bytes_written_flags; + __le16 vlan; + __le16 checksum_fcoe; + u8 flags; + u8 fetch_index_flags; + __le32 time_stamp; + __le16 time_stamp2; + __le16 pie_info; + __le32 pie_info2; + __le16 pie_info3; + u8 pie_info4; + u8 reserved[32]; + u8 type_color; +}; + #define CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT (0x1 << 12) #define CQ_ENET_RQ_DESC_FLAGS_FCOE (0x1 << 13) #define CQ_ENET_RQ_DESC_FLAGS_EOP (0x1 << 14) @@ -88,85 +137,4 @@ struct cq_enet_rq_desc { #define CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT (0x1 << 6) #define CQ_ENET_RQ_DESC_FLAGS_FCS_OK (0x1 << 7) -static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, - u8 *type, u8 *color, u16 *q_number, u16 *completed_index, - u8 *ingress_port, u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, - u8 *csum_not_calc, u32 *rss_hash, u16 *bytes_written, u8 *packet_error, - u8 *vlan_stripped, u16 *vlan_tci, u16 *checksum, u8 *fcoe_sof, - u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, u8 *fcoe_eof, - u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, - u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) -{ - u16 completed_index_flags; - u16 q_number_rss_type_flags; - u16 bytes_written_flags; - - cq_desc_dec((struct cq_desc *)desc, type, - color, q_number, completed_index); - - completed_index_flags = le16_to_cpu(desc->completed_index_flags); - q_number_rss_type_flags = - le16_to_cpu(desc->q_number_rss_type_flags); - bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); - - *ingress_port = (completed_index_flags & - CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; - *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? - 1 : 0; - *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? - 1 : 0; - *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? - 1 : 0; - - *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & - CQ_ENET_RQ_DESC_RSS_TYPE_MASK); - *csum_not_calc = (q_number_rss_type_flags & - CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; - - *rss_hash = le32_to_cpu(desc->rss_hash); - - *bytes_written = bytes_written_flags & - CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; - *packet_error = (bytes_written_flags & - CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; - *vlan_stripped = (bytes_written_flags & - CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; - - /* - * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12) - */ - *vlan_tci = le16_to_cpu(desc->vlan); - - if (*fcoe) { - *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & - CQ_ENET_RQ_DESC_FCOE_SOF_MASK); - *fcoe_fc_crc_ok = (desc->flags & - CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; - *fcoe_enc_error = (desc->flags & - CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; - *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >> - CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & - CQ_ENET_RQ_DESC_FCOE_EOF_MASK); - *checksum = 0; - } else { - *fcoe_sof = 0; - *fcoe_fc_crc_ok = 0; - *fcoe_enc_error = 0; - *fcoe_eof = 0; - *checksum = le16_to_cpu(desc->checksum_fcoe); - } - - *tcp_udp_csum_ok = - (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; - *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; - *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; - *ipv4_csum_ok = - (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; - *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; - *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; - *ipv4_fragment = - (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; - *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; -} - #endif /* _CQ_ENET_DESC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 10b7e02ba4d0e..9c12e967e9f12 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -17,6 +17,7 @@ #include "vnic_nic.h" #include "vnic_rss.h" #include +#include #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" @@ -30,6 +31,13 @@ #define ENIC_AIC_LARGE_PKT_DIFF 3 +enum ext_cq { + ENIC_RQ_CQ_ENTRY_SIZE_16, + ENIC_RQ_CQ_ENTRY_SIZE_32, + ENIC_RQ_CQ_ENTRY_SIZE_64, + ENIC_RQ_CQ_ENTRY_SIZE_MAX, +}; + struct enic_msix_entry { int requested; char devname[IFNAMSIZ + 8]; @@ -75,6 +83,10 @@ struct enic_rx_coal { #define ENIC_SET_INSTANCE (1 << 3) #define ENIC_SET_HOST (1 << 4) +#define MAX_TSO BIT(16) +#define WQ_ENET_MAX_DESC_LEN BIT(WQ_ENET_LEN_BITS) +#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) + struct enic_port_profile { u32 set; u8 request; @@ -158,6 +170,7 @@ struct enic_rq_stats { u64 pkt_truncated; /* truncated pkts */ u64 no_skb; /* out of skbs */ u64 desc_skip; /* Rx pkt went into later buffer */ + u64 pp_alloc_fail; /* page pool alloc failure */ }; struct enic_wq { @@ -169,6 +182,7 @@ struct enic_wq { struct enic_rq { struct vnic_rq vrq; struct enic_rq_stats stats; + struct page_pool *pool; } ____cacheline_aligned; /* Per-instance private data structure */ @@ -223,9 +237,9 @@ struct enic { unsigned int cq_avail; unsigned int cq_count; struct enic_rfs_flw_tbl rfs_h; - u32 rx_copybreak; u8 rss_key[ENIC_RSS_LEN]; struct vnic_gen_stats gen_stats; + enum ext_cq ext_cq; }; static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev) @@ -347,5 +361,6 @@ int enic_is_valid_vf(struct enic *enic, int vf); int enic_is_dynamic(struct enic *enic); void enic_set_ethtool_ops(struct net_device *netdev); int __enic_set_rsskey(struct enic *enic); +void enic_ext_cq(struct enic *enic); #endif /* _ENIC_H_ */ diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index d607b4f0542ce..529160926a963 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -222,9 +222,9 @@ static void enic_get_ringparam(struct net_device *netdev, struct enic *enic = netdev_priv(netdev); struct vnic_enet_config *c = &enic->config; - ring->rx_max_pending = ENIC_MAX_RQ_DESCS; + ring->rx_max_pending = c->max_rq_ring; ring->rx_pending = c->rq_desc_count; - ring->tx_max_pending = ENIC_MAX_WQ_DESCS; + ring->tx_max_pending = c->max_wq_ring; ring->tx_pending = c->wq_desc_count; } @@ -252,18 +252,18 @@ static int enic_set_ringparam(struct net_device *netdev, } rx_pending = c->rq_desc_count; tx_pending = c->wq_desc_count; - if (ring->rx_pending > ENIC_MAX_RQ_DESCS || + if (ring->rx_pending > c->max_rq_ring || ring->rx_pending < ENIC_MIN_RQ_DESCS) { netdev_info(netdev, "rx pending (%u) not in range [%u,%u]", ring->rx_pending, ENIC_MIN_RQ_DESCS, - ENIC_MAX_RQ_DESCS); + c->max_rq_ring); return -EINVAL; } - if (ring->tx_pending > ENIC_MAX_WQ_DESCS || + if (ring->tx_pending > c->max_wq_ring || ring->tx_pending < ENIC_MIN_WQ_DESCS) { netdev_info(netdev, "tx pending (%u) not in range [%u,%u]", ring->tx_pending, ENIC_MIN_WQ_DESCS, - ENIC_MAX_WQ_DESCS); + c->max_wq_ring); return -EINVAL; } if (running) @@ -608,43 +608,6 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -static int enic_get_tunable(struct net_device *dev, - const struct ethtool_tunable *tuna, void *data) -{ - struct enic *enic = netdev_priv(dev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - *(u32 *)data = enic->rx_copybreak; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - -static int enic_set_tunable(struct net_device *dev, - const struct ethtool_tunable *tuna, - const void *data) -{ - struct enic *enic = netdev_priv(dev); - int ret = 0; - - switch (tuna->id) { - case ETHTOOL_RX_COPYBREAK: - enic->rx_copybreak = *(u32 *)data; - break; - default: - ret = -EINVAL; - break; - } - - return ret; -} - static u32 enic_get_rxfh_key_size(struct net_device *netdev) { return ENIC_RSS_LEN; @@ -727,8 +690,6 @@ static const struct ethtool_ops enic_ethtool_ops = { .get_coalesce = enic_get_coalesce, .set_coalesce = enic_set_coalesce, .get_rxnfc = enic_get_rxnfc, - .get_tunable = enic_get_tunable, - .set_tunable = enic_set_tunable, .get_rxfh_key_size = enic_get_rxfh_key_size, .get_rxfh = enic_get_rxfh, .set_rxfh = enic_set_rxfh, diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 49f6cab01ed51..54aa3953bf7b6 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -58,18 +58,15 @@ #include "enic_dev.h" #include "enic_pp.h" #include "enic_clsf.h" +#include "enic_rq.h" +#include "enic_wq.h" #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) -#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) -#define MAX_TSO (1 << 16) -#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ #define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ -#define RX_COPYBREAK_DEFAULT 256 - /* Supported devices */ static const struct pci_device_id enic_id_table[] = { { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, @@ -322,54 +319,6 @@ int enic_is_valid_vf(struct enic *enic, int vf) #endif } -static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) -{ - struct enic *enic = vnic_dev_priv(wq->vdev); - - if (buf->sop) - dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, - DMA_TO_DEVICE); - else - dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len, - DMA_TO_DEVICE); - - if (buf->os_buf) - dev_kfree_skb_any(buf->os_buf); -} - -static void enic_wq_free_buf(struct vnic_wq *wq, - struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) -{ - struct enic *enic = vnic_dev_priv(wq->vdev); - - enic->wq[wq->index].stats.cq_work++; - enic->wq[wq->index].stats.cq_bytes += buf->len; - enic_free_wq_buf(wq, buf); -} - -static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, - u8 type, u16 q_number, u16 completed_index, void *opaque) -{ - struct enic *enic = vnic_dev_priv(vdev); - - spin_lock(&enic->wq[q_number].lock); - - vnic_wq_service(&enic->wq[q_number].vwq, cq_desc, - completed_index, enic_wq_free_buf, - opaque); - - if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) && - vnic_wq_desc_avail(&enic->wq[q_number].vwq) >= - (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) { - netif_wake_subqueue(enic->netdev, q_number); - enic->wq[q_number].stats.wake++; - } - - spin_unlock(&enic->wq[q_number].lock); - - return 0; -} - static bool enic_log_q_error(struct enic *enic) { unsigned int i; @@ -1313,243 +1262,6 @@ static int enic_get_vf_port(struct net_device *netdev, int vf, return -EMSGSIZE; } -static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) -{ - struct enic *enic = vnic_dev_priv(rq->vdev); - - if (!buf->os_buf) - return; - - dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, - DMA_FROM_DEVICE); - dev_kfree_skb_any(buf->os_buf); - buf->os_buf = NULL; -} - -static int enic_rq_alloc_buf(struct vnic_rq *rq) -{ - struct enic *enic = vnic_dev_priv(rq->vdev); - struct net_device *netdev = enic->netdev; - struct sk_buff *skb; - unsigned int len = netdev->mtu + VLAN_ETH_HLEN; - unsigned int os_buf_index = 0; - dma_addr_t dma_addr; - struct vnic_rq_buf *buf = rq->to_use; - - if (buf->os_buf) { - enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, - buf->len); - - return 0; - } - skb = netdev_alloc_skb_ip_align(netdev, len); - if (!skb) { - enic->rq[rq->index].stats.no_skb++; - return -ENOMEM; - } - - dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len, - DMA_FROM_DEVICE); - if (unlikely(enic_dma_map_check(enic, dma_addr))) { - dev_kfree_skb(skb); - return -ENOMEM; - } - - enic_queue_rq_desc(rq, skb, os_buf_index, - dma_addr, len); - - return 0; -} - -static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size, - u32 pkt_len) -{ - if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len) - pkt_size->large_pkt_bytes_cnt += pkt_len; - else - pkt_size->small_pkt_bytes_cnt += pkt_len; -} - -static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb, - struct vnic_rq_buf *buf, u16 len) -{ - struct enic *enic = netdev_priv(netdev); - struct sk_buff *new_skb; - - if (len > enic->rx_copybreak) - return false; - new_skb = netdev_alloc_skb_ip_align(netdev, len); - if (!new_skb) - return false; - dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len, - DMA_FROM_DEVICE); - memcpy(new_skb->data, (*skb)->data, len); - *skb = new_skb; - - return true; -} - -static void enic_rq_indicate_buf(struct vnic_rq *rq, - struct cq_desc *cq_desc, struct vnic_rq_buf *buf, - int skipped, void *opaque) -{ - struct enic *enic = vnic_dev_priv(rq->vdev); - struct net_device *netdev = enic->netdev; - struct sk_buff *skb; - struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats; - - u8 type, color, eop, sop, ingress_port, vlan_stripped; - u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; - u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; - u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; - u8 packet_error; - u16 q_number, completed_index, bytes_written, vlan_tci, checksum; - u32 rss_hash; - bool outer_csum_ok = true, encap = false; - - rqstats->packets++; - if (skipped) { - rqstats->desc_skip++; - return; - } - - skb = buf->os_buf; - - cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, - &type, &color, &q_number, &completed_index, - &ingress_port, &fcoe, &eop, &sop, &rss_type, - &csum_not_calc, &rss_hash, &bytes_written, - &packet_error, &vlan_stripped, &vlan_tci, &checksum, - &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, - &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, - &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, - &fcs_ok); - - if (packet_error) { - - if (!fcs_ok) { - if (bytes_written > 0) - rqstats->bad_fcs++; - else if (bytes_written == 0) - rqstats->pkt_truncated++; - } - - dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, - DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - buf->os_buf = NULL; - - return; - } - - if (eop && bytes_written > 0) { - - /* Good receive - */ - rqstats->bytes += bytes_written; - if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) { - buf->os_buf = NULL; - dma_unmap_single(&enic->pdev->dev, buf->dma_addr, - buf->len, DMA_FROM_DEVICE); - } - prefetch(skb->data - NET_IP_ALIGN); - - skb_put(skb, bytes_written); - skb->protocol = eth_type_trans(skb, netdev); - skb_record_rx_queue(skb, q_number); - if ((netdev->features & NETIF_F_RXHASH) && rss_hash && - (type == 3)) { - switch (rss_type) { - case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4: - case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6: - case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX: - skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4); - rqstats->l4_rss_hash++; - break; - case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4: - case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6: - case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX: - skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3); - rqstats->l3_rss_hash++; - break; - } - } - if (enic->vxlan.vxlan_udp_port_number) { - switch (enic->vxlan.patch_level) { - case 0: - if (fcoe) { - encap = true; - outer_csum_ok = fcoe_fc_crc_ok; - } - break; - case 2: - if ((type == 7) && - (rss_hash & BIT(0))) { - encap = true; - outer_csum_ok = (rss_hash & BIT(1)) && - (rss_hash & BIT(2)); - } - break; - } - } - - /* Hardware does not provide whole packet checksum. It only - * provides pseudo checksum. Since hw validates the packet - * checksum but not provide us the checksum value. use - * CHECSUM_UNNECESSARY. - * - * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is - * inner csum_ok. outer_csum_ok is set by hw when outer udp - * csum is correct or is zero. - */ - if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && - tcp_udp_csum_ok && outer_csum_ok && - (ipv4_csum_ok || ipv6)) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = encap; - if (encap) - rqstats->csum_unnecessary_encap++; - else - rqstats->csum_unnecessary++; - } - - if (vlan_stripped) { - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); - rqstats->vlan_stripped++; - } - skb_mark_napi_id(skb, &enic->napi[rq->index]); - if (!(netdev->features & NETIF_F_GRO)) - netif_receive_skb(skb); - else - napi_gro_receive(&enic->napi[q_number], skb); - if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) - enic_intr_update_pkt_size(&cq->pkt_size_counter, - bytes_written); - } else { - - /* Buffer overflow - */ - rqstats->pkt_truncated++; - dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, - DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); - buf->os_buf = NULL; - } -} - -static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, - u8 type, u16 q_number, u16 completed_index, void *opaque) -{ - struct enic *enic = vnic_dev_priv(vdev); - - vnic_rq_service(&enic->rq[q_number].vrq, cq_desc, - completed_index, VNIC_RQ_RETURN_DESC, - enic_rq_indicate_buf, opaque); - - return 0; -} - static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq) { unsigned int intr = enic_msix_rq_intr(enic, rq->index); @@ -1620,12 +1332,10 @@ static int enic_poll(struct napi_struct *napi, int budget) unsigned int work_done, rq_work_done = 0, wq_work_done; int err; - wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do, - enic_wq_service, NULL); + wq_work_done = enic_wq_cq_service(enic, cq_wq, wq_work_to_do); if (budget > 0) - rq_work_done = vnic_cq_service(&enic->cq[cq_rq], - rq_work_to_do, enic_rq_service, NULL); + rq_work_done = enic_rq_cq_service(enic, cq_rq, rq_work_to_do); /* Accumulate intr event credits for this polling * cycle. An intr event is the completion of a @@ -1724,8 +1434,8 @@ static int enic_poll_msix_wq(struct napi_struct *napi, int budget) wq_irq = wq->index; cq = enic_cq_wq(enic, wq_irq); intr = enic_msix_wq_intr(enic, wq_irq); - wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do, - enic_wq_service, NULL); + + wq_work_done = enic_wq_cq_service(enic, cq, wq_work_to_do); vnic_intr_return_credits(&enic->intr[intr], wq_work_done, 0 /* don't unmask intr */, @@ -1754,8 +1464,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ if (budget > 0) - work_done = vnic_cq_service(&enic->cq[cq], - work_to_do, enic_rq_service, NULL); + work_done = enic_rq_cq_service(enic, cq, work_to_do); /* Return intr event credits for this polling * cycle. An intr event is the completion of a @@ -1972,6 +1681,17 @@ static int enic_open(struct net_device *netdev) struct enic *enic = netdev_priv(netdev); unsigned int i; int err, ret; + unsigned int max_pkt_len = netdev->mtu + VLAN_ETH_HLEN; + struct page_pool_params pp_params = { + .order = get_order(max_pkt_len), + .pool_size = enic->config.rq_desc_count, + .nid = dev_to_node(&enic->pdev->dev), + .dev = &enic->pdev->dev, + .dma_dir = DMA_FROM_DEVICE, + .max_len = (max_pkt_len > PAGE_SIZE) ? max_pkt_len : PAGE_SIZE, + .netdev = netdev, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + }; err = enic_request_intr(enic); if (err) { @@ -1989,6 +1709,16 @@ static int enic_open(struct net_device *netdev) } for (i = 0; i < enic->rq_count; i++) { + /* create a page pool for each RQ */ + pp_params.napi = &enic->napi[i]; + pp_params.queue_idx = i; + enic->rq[i].pool = page_pool_create(&pp_params); + if (IS_ERR(enic->rq[i].pool)) { + err = PTR_ERR(enic->rq[i].pool); + enic->rq[i].pool = NULL; + goto err_out_free_rq; + } + /* enable rq before updating rq desc */ vnic_rq_enable(&enic->rq[i].vrq); vnic_rq_fill(&enic->rq[i].vrq, enic_rq_alloc_buf); @@ -2029,8 +1759,11 @@ static int enic_open(struct net_device *netdev) err_out_free_rq: for (i = 0; i < enic->rq_count; i++) { ret = vnic_rq_disable(&enic->rq[i].vrq); - if (!ret) + if (!ret) { vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); + page_pool_destroy(enic->rq[i].pool); + enic->rq[i].pool = NULL; + } } enic_dev_notify_unset(enic); err_out_free_intr: @@ -2088,8 +1821,11 @@ static int enic_stop(struct net_device *netdev) for (i = 0; i < enic->wq_count; i++) vnic_wq_clean(&enic->wq[i].vwq, enic_free_wq_buf); - for (i = 0; i < enic->rq_count; i++) + for (i = 0; i < enic->rq_count; i++) { vnic_rq_clean(&enic->rq[i].vrq, enic_free_rq_buf); + page_pool_destroy(enic->rq[i].pool); + enic->rq[i].pool = NULL; + } for (i = 0; i < enic->cq_count; i++) vnic_cq_clean(&enic->cq[i]); for (i = 0; i < enic->intr_count; i++) @@ -2405,6 +2141,7 @@ static void enic_reset(struct work_struct *work) enic_init_vnic_resources(enic); enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_ext_cq(enic); enic_open(enic->netdev); /* Allow infiniband to fiddle with the device again */ @@ -2431,6 +2168,7 @@ static void enic_tx_hang_reset(struct work_struct *work) enic_init_vnic_resources(enic); enic_set_rss_nic_cfg(enic); enic_dev_set_ig_vlan_rewrite_mode(enic); + enic_ext_cq(enic); enic_open(enic->netdev); /* Allow infiniband to fiddle with the device again */ @@ -2599,6 +2337,7 @@ static void enic_get_queue_stats_rx(struct net_device *dev, int idx, rxs->hw_drop_overruns = rqstats->pkt_truncated; rxs->csum_unnecessary = rqstats->csum_unnecessary + rqstats->csum_unnecessary_encap; + rxs->alloc_fail = rqstats->pp_alloc_fail; } static void enic_get_queue_stats_tx(struct net_device *dev, int idx, @@ -2626,6 +2365,7 @@ static void enic_get_base_stats(struct net_device *dev, rxs->hw_drops = 0; rxs->hw_drop_overruns = 0; rxs->csum_unnecessary = 0; + rxs->alloc_fail = 0; txs->bytes = 0; txs->packets = 0; txs->csum_none = 0; @@ -2803,6 +2543,8 @@ static int enic_dev_init(struct enic *enic) enic_get_res_counts(enic); + enic_ext_cq(enic); + err = enic_alloc_enic_resources(enic); if (err) { dev_err(dev, "Failed to allocate enic resources\n"); @@ -3179,7 +2921,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(dev, "Cannot register net device, aborting\n"); goto err_out_dev_deinit; } - enic->rx_copybreak = RX_COPYBREAK_DEFAULT; return 0; diff --git a/drivers/net/ethernet/cisco/enic/enic_res.c b/drivers/net/ethernet/cisco/enic/enic_res.c index 1261251998330..bbd3143ed73e7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_res.c +++ b/drivers/net/ethernet/cisco/enic/enic_res.c @@ -59,31 +59,38 @@ int enic_get_vnic_config(struct enic *enic) GET_CONFIG(intr_timer_usec); GET_CONFIG(loop_tag); GET_CONFIG(num_arfs); + GET_CONFIG(max_rq_ring); + GET_CONFIG(max_wq_ring); + GET_CONFIG(max_cq_ring); + + if (!c->max_wq_ring) + c->max_wq_ring = ENIC_MAX_WQ_DESCS_DEFAULT; + if (!c->max_rq_ring) + c->max_rq_ring = ENIC_MAX_RQ_DESCS_DEFAULT; + if (!c->max_cq_ring) + c->max_cq_ring = ENIC_MAX_CQ_DESCS_DEFAULT; c->wq_desc_count = - min_t(u32, ENIC_MAX_WQ_DESCS, - max_t(u32, ENIC_MIN_WQ_DESCS, - c->wq_desc_count)); + min_t(u32, c->max_wq_ring, + max_t(u32, ENIC_MIN_WQ_DESCS, c->wq_desc_count)); c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ c->rq_desc_count = - min_t(u32, ENIC_MAX_RQ_DESCS, - max_t(u32, ENIC_MIN_RQ_DESCS, - c->rq_desc_count)); + min_t(u32, c->max_rq_ring, + max_t(u32, ENIC_MIN_RQ_DESCS, c->rq_desc_count)); c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ if (c->mtu == 0) c->mtu = 1500; - c->mtu = min_t(u16, ENIC_MAX_MTU, - max_t(u16, ENIC_MIN_MTU, - c->mtu)); + c->mtu = min_t(u16, ENIC_MAX_MTU, max_t(u16, ENIC_MIN_MTU, c->mtu)); c->intr_timer_usec = min_t(u32, c->intr_timer_usec, vnic_dev_get_intr_coal_timer_max(enic->vdev)); dev_info(enic_get_dev(enic), - "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n", - enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu); + "vNIC MAC addr %pM wq/rq %d/%d max wq/rq/cq %d/%d/%d mtu %d\n", + enic->mac_addr, c->wq_desc_count, c->rq_desc_count, + c->max_wq_ring, c->max_rq_ring, c->max_cq_ring, c->mtu); dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " "tso/lro %s/%s rss %s intr mode %s type %s timer %d usec " @@ -312,6 +319,7 @@ void enic_init_vnic_resources(struct enic *enic) int enic_alloc_vnic_resources(struct enic *enic) { enum vnic_dev_intr_mode intr_mode; + int rq_cq_desc_size; unsigned int i; int err; @@ -326,6 +334,24 @@ int enic_alloc_vnic_resources(struct enic *enic) intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : "unknown"); + switch (enic->ext_cq) { + case ENIC_RQ_CQ_ENTRY_SIZE_16: + rq_cq_desc_size = 16; + break; + case ENIC_RQ_CQ_ENTRY_SIZE_32: + rq_cq_desc_size = 32; + break; + case ENIC_RQ_CQ_ENTRY_SIZE_64: + rq_cq_desc_size = 64; + break; + default: + dev_err(enic_get_dev(enic), + "Unable to determine rq cq desc size: %d", + enic->ext_cq); + err = -ENODEV; + goto err_out; + } + /* Allocate queue resources */ @@ -348,8 +374,8 @@ int enic_alloc_vnic_resources(struct enic *enic) for (i = 0; i < enic->cq_count; i++) { if (i < enic->rq_count) err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, - enic->config.rq_desc_count, - sizeof(struct cq_enet_rq_desc)); + enic->config.rq_desc_count, + rq_cq_desc_size); else err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, enic->config.wq_desc_count, @@ -380,6 +406,39 @@ int enic_alloc_vnic_resources(struct enic *enic) err_out_cleanup: enic_free_vnic_resources(enic); - +err_out: return err; } + +/* + * CMD_CQ_ENTRY_SIZE_SET can fail on older hw generations that don't support + * that command + */ +void enic_ext_cq(struct enic *enic) +{ + u64 a0 = CMD_CQ_ENTRY_SIZE_SET, a1 = 0; + int wait = 1000; + int ret; + + spin_lock_bh(&enic->devcmd_lock); + ret = vnic_dev_cmd(enic->vdev, CMD_CAPABILITY, &a0, &a1, wait); + if (ret || a0) { + dev_info(&enic->pdev->dev, + "CMD_CQ_ENTRY_SIZE_SET not supported."); + enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16; + goto out; + } + a1 &= VNIC_RQ_CQ_ENTRY_SIZE_ALL_BIT; + enic->ext_cq = fls(a1) - 1; + a0 = VNIC_RQ_ALL; + a1 = enic->ext_cq; + ret = vnic_dev_cmd(enic->vdev, CMD_CQ_ENTRY_SIZE_SET, &a0, &a1, wait); + if (ret) { + dev_info(&enic->pdev->dev, "CMD_CQ_ENTRY_SIZE_SET failed."); + enic->ext_cq = ENIC_RQ_CQ_ENTRY_SIZE_16; + } +out: + spin_unlock_bh(&enic->devcmd_lock); + dev_info(&enic->pdev->dev, "CQ entry size set to %d bytes", + 16 << enic->ext_cq); +} diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h index b8ee42d297aaf..02dca1ae4a224 100644 --- a/drivers/net/ethernet/cisco/enic/enic_res.h +++ b/drivers/net/ethernet/cisco/enic/enic_res.h @@ -12,10 +12,13 @@ #include "vnic_wq.h" #include "vnic_rq.h" -#define ENIC_MIN_WQ_DESCS 64 -#define ENIC_MAX_WQ_DESCS 4096 -#define ENIC_MIN_RQ_DESCS 64 -#define ENIC_MAX_RQ_DESCS 4096 +#define ENIC_MIN_WQ_DESCS 64 +#define ENIC_MAX_WQ_DESCS_DEFAULT 4096 +#define ENIC_MAX_WQ_DESCS 16384 +#define ENIC_MIN_RQ_DESCS 64 +#define ENIC_MAX_RQ_DESCS 16384 +#define ENIC_MAX_RQ_DESCS_DEFAULT 4096 +#define ENIC_MAX_CQ_DESCS_DEFAULT (64 * 1024) #define ENIC_MIN_MTU ETH_MIN_MTU #define ENIC_MAX_MTU 9000 diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.c b/drivers/net/ethernet/cisco/enic/enic_rq.c new file mode 100644 index 0000000000000..ccbf5c9a21d0f --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_rq.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2024 Cisco Systems, Inc. All rights reserved. + +#include +#include +#include +#include "enic.h" +#include "enic_res.h" +#include "enic_rq.h" +#include "vnic_rq.h" +#include "cq_enet_desc.h" + +#define ENIC_LARGE_PKT_THRESHOLD 1000 + +static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size, + u32 pkt_len) +{ + if (pkt_len > ENIC_LARGE_PKT_THRESHOLD) + pkt_size->large_pkt_bytes_cnt += pkt_len; + else + pkt_size->small_pkt_bytes_cnt += pkt_len; +} + +static void enic_rq_cq_desc_dec(void *cq_desc, u8 cq_desc_size, u8 *type, + u8 *color, u16 *q_number, u16 *completed_index) +{ + /* type_color is the last field for all cq structs */ + u8 type_color; + + switch (cq_desc_size) { + case VNIC_RQ_CQ_ENTRY_SIZE_16: { + struct cq_enet_rq_desc *desc = + (struct cq_enet_rq_desc *)cq_desc; + type_color = desc->type_color; + + /* Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); + + *q_number = le16_to_cpu(desc->q_number_rss_type_flags) & + CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index_flags) & + CQ_DESC_COMP_NDX_MASK; + break; + } + case VNIC_RQ_CQ_ENTRY_SIZE_32: { + struct cq_enet_rq_desc_32 *desc = + (struct cq_enet_rq_desc_32 *)cq_desc; + type_color = desc->type_color; + + /* Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); + + *q_number = le16_to_cpu(desc->q_number_rss_type_flags) & + CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index_flags) & + CQ_DESC_COMP_NDX_MASK; + *completed_index |= (desc->fetch_index_flags & CQ_DESC_32_FI_MASK) << + CQ_DESC_COMP_NDX_BITS; + break; + } + case VNIC_RQ_CQ_ENTRY_SIZE_64: { + struct cq_enet_rq_desc_64 *desc = + (struct cq_enet_rq_desc_64 *)cq_desc; + type_color = desc->type_color; + + /* Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); + + *q_number = le16_to_cpu(desc->q_number_rss_type_flags) & + CQ_DESC_Q_NUM_MASK; + *completed_index = le16_to_cpu(desc->completed_index_flags) & + CQ_DESC_COMP_NDX_MASK; + *completed_index |= (desc->fetch_index_flags & CQ_DESC_64_FI_MASK) << + CQ_DESC_COMP_NDX_BITS; + break; + } + } + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + *type = type_color & CQ_DESC_TYPE_MASK; +} + +static void enic_rq_set_skb_flags(struct vnic_rq *vrq, u8 type, u32 rss_hash, + u8 rss_type, u8 fcoe, u8 fcoe_fc_crc_ok, + u8 vlan_stripped, u8 csum_not_calc, + u8 tcp_udp_csum_ok, u8 ipv6, u8 ipv4_csum_ok, + u16 vlan_tci, struct sk_buff *skb) +{ + struct enic *enic = vnic_dev_priv(vrq->vdev); + struct net_device *netdev = enic->netdev; + struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats; + bool outer_csum_ok = true, encap = false; + + if ((netdev->features & NETIF_F_RXHASH) && rss_hash && type == 3) { + switch (rss_type) { + case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4: + case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6: + case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6_EX: + skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L4); + rqstats->l4_rss_hash++; + break; + case CQ_ENET_RQ_DESC_RSS_TYPE_IPv4: + case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6: + case CQ_ENET_RQ_DESC_RSS_TYPE_IPv6_EX: + skb_set_hash(skb, rss_hash, PKT_HASH_TYPE_L3); + rqstats->l3_rss_hash++; + break; + } + } + if (enic->vxlan.vxlan_udp_port_number) { + switch (enic->vxlan.patch_level) { + case 0: + if (fcoe) { + encap = true; + outer_csum_ok = fcoe_fc_crc_ok; + } + break; + case 2: + if (type == 7 && (rss_hash & BIT(0))) { + encap = true; + outer_csum_ok = (rss_hash & BIT(1)) && + (rss_hash & BIT(2)); + } + break; + } + } + + /* Hardware does not provide whole packet checksum. It only + * provides pseudo checksum. Since hw validates the packet + * checksum but not provide us the checksum value. use + * CHECSUM_UNNECESSARY. + * + * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is + * inner csum_ok. outer_csum_ok is set by hw when outer udp + * csum is correct or is zero. + */ + if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && + tcp_udp_csum_ok && outer_csum_ok && (ipv4_csum_ok || ipv6)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = encap; + if (encap) + rqstats->csum_unnecessary_encap++; + else + rqstats->csum_unnecessary++; + } + + if (vlan_stripped) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); + rqstats->vlan_stripped++; + } +} + +/* + * cq_enet_rq_desc accesses section uses only the 1st 15 bytes of the cq which + * is identical for all type (16,32 and 64 byte) of cqs. + */ +static void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, u8 *ingress_port, + u8 *fcoe, u8 *eop, u8 *sop, u8 *rss_type, + u8 *csum_not_calc, u32 *rss_hash, + u16 *bytes_written, u8 *packet_error, + u8 *vlan_stripped, u16 *vlan_tci, + u16 *checksum, u8 *fcoe_sof, + u8 *fcoe_fc_crc_ok, u8 *fcoe_enc_error, + u8 *fcoe_eof, u8 *tcp_udp_csum_ok, u8 *udp, + u8 *tcp, u8 *ipv4_csum_ok, u8 *ipv6, u8 *ipv4, + u8 *ipv4_fragment, u8 *fcs_ok) +{ + u16 completed_index_flags; + u16 q_number_rss_type_flags; + u16 bytes_written_flags; + + completed_index_flags = le16_to_cpu(desc->completed_index_flags); + q_number_rss_type_flags = + le16_to_cpu(desc->q_number_rss_type_flags); + bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); + + *ingress_port = (completed_index_flags & + CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; + *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? + 1 : 0; + *eop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_EOP) ? + 1 : 0; + *sop = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_SOP) ? + 1 : 0; + + *rss_type = (u8)((q_number_rss_type_flags >> CQ_DESC_Q_NUM_BITS) & + CQ_ENET_RQ_DESC_RSS_TYPE_MASK); + *csum_not_calc = (q_number_rss_type_flags & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ? 1 : 0; + + *rss_hash = le32_to_cpu(desc->rss_hash); + + *bytes_written = bytes_written_flags & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; + *packet_error = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ? 1 : 0; + *vlan_stripped = (bytes_written_flags & + CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) ? 1 : 0; + + /* + * Tag Control Information(16) = user_priority(3) + cfi(1) + vlan(12) + */ + *vlan_tci = le16_to_cpu(desc->vlan); + + if (*fcoe) { + *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & + CQ_ENET_RQ_DESC_FCOE_SOF_MASK); + *fcoe_fc_crc_ok = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0; + *fcoe_enc_error = (desc->flags & + CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0; + *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >> + CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) & + CQ_ENET_RQ_DESC_FCOE_EOF_MASK); + *checksum = 0; + } else { + *fcoe_sof = 0; + *fcoe_fc_crc_ok = 0; + *fcoe_enc_error = 0; + *fcoe_eof = 0; + *checksum = le16_to_cpu(desc->checksum_fcoe); + } + + *tcp_udp_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; + *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; + *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; + *ipv4_csum_ok = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; + *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; + *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; + *ipv4_fragment = + (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; + *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; +} + +static bool enic_rq_pkt_error(struct vnic_rq *vrq, u8 packet_error, u8 fcs_ok, + u16 bytes_written) +{ + struct enic *enic = vnic_dev_priv(vrq->vdev); + struct enic_rq_stats *rqstats = &enic->rq[vrq->index].stats; + + if (packet_error) { + if (!fcs_ok) { + if (bytes_written > 0) + rqstats->bad_fcs++; + else if (bytes_written == 0) + rqstats->pkt_truncated++; + } + return true; + } + return false; +} + +int enic_rq_alloc_buf(struct vnic_rq *rq) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + struct net_device *netdev = enic->netdev; + struct enic_rq *erq = &enic->rq[rq->index]; + struct enic_rq_stats *rqstats = &erq->stats; + unsigned int offset = 0; + unsigned int len = netdev->mtu + VLAN_ETH_HLEN; + unsigned int os_buf_index = 0; + dma_addr_t dma_addr; + struct vnic_rq_buf *buf = rq->to_use; + struct page *page; + unsigned int truesize = len; + + if (buf->os_buf) { + enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, + buf->len); + + return 0; + } + + page = page_pool_dev_alloc(erq->pool, &offset, &truesize); + if (unlikely(!page)) { + rqstats->pp_alloc_fail++; + return -ENOMEM; + } + buf->offset = offset; + buf->truesize = truesize; + dma_addr = page_pool_get_dma_addr(page) + offset; + enic_queue_rq_desc(rq, (void *)page, os_buf_index, dma_addr, len); + + return 0; +} + +void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) +{ + struct enic *enic = vnic_dev_priv(rq->vdev); + struct enic_rq *erq = &enic->rq[rq->index]; + + if (!buf->os_buf) + return; + + page_pool_put_full_page(erq->pool, (struct page *)buf->os_buf, true); + buf->os_buf = NULL; +} + +static void enic_rq_indicate_buf(struct enic *enic, struct vnic_rq *rq, + struct vnic_rq_buf *buf, void *cq_desc, + u8 type, u16 q_number, u16 completed_index) +{ + struct sk_buff *skb; + struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)]; + struct enic_rq_stats *rqstats = &enic->rq[rq->index].stats; + struct napi_struct *napi; + + u8 eop, sop, ingress_port, vlan_stripped; + u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; + u8 packet_error; + u16 bytes_written, vlan_tci, checksum; + u32 rss_hash; + + rqstats->packets++; + + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, &ingress_port, + &fcoe, &eop, &sop, &rss_type, &csum_not_calc, + &rss_hash, &bytes_written, &packet_error, + &vlan_stripped, &vlan_tci, &checksum, &fcoe_sof, + &fcoe_fc_crc_ok, &fcoe_enc_error, &fcoe_eof, + &tcp_udp_csum_ok, &udp, &tcp, &ipv4_csum_ok, &ipv6, + &ipv4, &ipv4_fragment, &fcs_ok); + + if (enic_rq_pkt_error(rq, packet_error, fcs_ok, bytes_written)) + return; + + if (eop && bytes_written > 0) { + /* Good receive + */ + rqstats->bytes += bytes_written; + napi = &enic->napi[rq->index]; + skb = napi_get_frags(napi); + if (unlikely(!skb)) { + net_warn_ratelimited("%s: skb alloc error rq[%d], desc[%d]\n", + enic->netdev->name, rq->index, + completed_index); + rqstats->no_skb++; + return; + } + + prefetch(skb->data - NET_IP_ALIGN); + + dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, + bytes_written, DMA_FROM_DEVICE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + (struct page *)buf->os_buf, buf->offset, + bytes_written, buf->truesize); + skb_record_rx_queue(skb, q_number); + enic_rq_set_skb_flags(rq, type, rss_hash, rss_type, fcoe, + fcoe_fc_crc_ok, vlan_stripped, + csum_not_calc, tcp_udp_csum_ok, ipv6, + ipv4_csum_ok, vlan_tci, skb); + skb_mark_for_recycle(skb); + napi_gro_frags(napi); + if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce) + enic_intr_update_pkt_size(&cq->pkt_size_counter, + bytes_written); + buf->os_buf = NULL; + buf->dma_addr = 0; + buf = buf->next; + } else { + /* Buffer overflow + */ + rqstats->pkt_truncated++; + } +} + +static void enic_rq_service(struct enic *enic, void *cq_desc, u8 type, + u16 q_number, u16 completed_index) +{ + struct enic_rq_stats *rqstats = &enic->rq[q_number].stats; + struct vnic_rq *vrq = &enic->rq[q_number].vrq; + struct vnic_rq_buf *vrq_buf = vrq->to_clean; + int skipped; + + while (1) { + skipped = (vrq_buf->index != completed_index); + if (!skipped) + enic_rq_indicate_buf(enic, vrq, vrq_buf, cq_desc, type, + q_number, completed_index); + else + rqstats->desc_skip++; + + vrq->ring.desc_avail++; + vrq->to_clean = vrq_buf->next; + vrq_buf = vrq_buf->next; + if (!skipped) + break; + } +} + +unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index, + unsigned int work_to_do) +{ + struct vnic_cq *cq = &enic->cq[cq_index]; + void *cq_desc = vnic_cq_to_clean(cq); + u16 q_number, completed_index; + unsigned int work_done = 0; + u8 type, color; + + enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, &q_number, + &completed_index); + + while (color != cq->last_color) { + enic_rq_service(enic, cq_desc, type, q_number, completed_index); + vnic_cq_inc_to_clean(cq); + + if (++work_done >= work_to_do) + break; + + cq_desc = vnic_cq_to_clean(cq); + enic_rq_cq_desc_dec(cq_desc, enic->ext_cq, &type, &color, + &q_number, &completed_index); + } + + return work_done; +} diff --git a/drivers/net/ethernet/cisco/enic/enic_rq.h b/drivers/net/ethernet/cisco/enic/enic_rq.h new file mode 100644 index 0000000000000..98476a7297afb --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_rq.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright 2024 Cisco Systems, Inc. All rights reserved. + */ + +unsigned int enic_rq_cq_service(struct enic *enic, unsigned int cq_index, + unsigned int work_to_do); +int enic_rq_alloc_buf(struct vnic_rq *rq); +void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.c b/drivers/net/ethernet/cisco/enic/enic_wq.c new file mode 100644 index 0000000000000..07936f8b42317 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_wq.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2025 Cisco Systems, Inc. All rights reserved. + +#include +#include "enic_res.h" +#include "enic.h" +#include "enic_wq.h" + +#define ENET_CQ_DESC_COMP_NDX_BITS 14 +#define ENET_CQ_DESC_COMP_NDX_MASK GENMASK(ENET_CQ_DESC_COMP_NDX_BITS - 1, 0) + +static void enic_wq_cq_desc_dec(const struct cq_desc *desc_arg, bool ext_wq, + u8 *type, u8 *color, u16 *q_number, + u16 *completed_index) +{ + const struct cq_desc *desc = desc_arg; + const u8 type_color = desc->type_color; + + *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK; + + /* + * Make sure color bit is read from desc *before* other fields + * are read from desc. Hardware guarantees color bit is last + * bit (byte) written. Adding the rmb() prevents the compiler + * and/or CPU from reordering the reads which would potentially + * result in reading stale values. + */ + rmb(); + + *type = type_color & CQ_DESC_TYPE_MASK; + *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; + + if (ext_wq) + *completed_index = le16_to_cpu(desc->completed_index) & + ENET_CQ_DESC_COMP_NDX_MASK; + else + *completed_index = le16_to_cpu(desc->completed_index) & + CQ_DESC_COMP_NDX_MASK; +} + +void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) +{ + struct enic *enic = vnic_dev_priv(wq->vdev); + + if (buf->sop) + dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len, + DMA_TO_DEVICE); + else + dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len, + DMA_TO_DEVICE); + + if (buf->os_buf) + dev_kfree_skb_any(buf->os_buf); +} + +static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc, + struct vnic_wq_buf *buf, void *opaque) +{ + struct enic *enic = vnic_dev_priv(wq->vdev); + + enic->wq[wq->index].stats.cq_work++; + enic->wq[wq->index].stats.cq_bytes += buf->len; + enic_free_wq_buf(wq, buf); +} + +static void enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, + u8 type, u16 q_number, u16 completed_index) +{ + struct enic *enic = vnic_dev_priv(vdev); + + spin_lock(&enic->wq[q_number].lock); + + vnic_wq_service(&enic->wq[q_number].vwq, cq_desc, + completed_index, enic_wq_free_buf, NULL); + + if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) + && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >= + (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) { + netif_wake_subqueue(enic->netdev, q_number); + enic->wq[q_number].stats.wake++; + } + + spin_unlock(&enic->wq[q_number].lock); +} + +unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index, + unsigned int work_to_do) +{ + struct vnic_cq *cq = &enic->cq[cq_index]; + u16 q_number, completed_index; + unsigned int work_done = 0; + struct cq_desc *cq_desc; + u8 type, color; + bool ext_wq; + + ext_wq = cq->ring.size > ENIC_MAX_WQ_DESCS_DEFAULT; + + cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq); + enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color, + &q_number, &completed_index); + + while (color != cq->last_color) { + enic_wq_service(cq->vdev, cq_desc, type, q_number, + completed_index); + + vnic_cq_inc_to_clean(cq); + + if (++work_done >= work_to_do) + break; + + cq_desc = (struct cq_desc *)vnic_cq_to_clean(cq); + enic_wq_cq_desc_dec(cq_desc, ext_wq, &type, &color, + &q_number, &completed_index); + } + + return work_done; +} diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.h b/drivers/net/ethernet/cisco/enic/enic_wq.h new file mode 100644 index 0000000000000..12acb3f2fbc94 --- /dev/null +++ b/drivers/net/ethernet/cisco/enic/enic_wq.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright 2025 Cisco Systems, Inc. All rights reserved. + */ + +void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); +unsigned int enic_wq_cq_service(struct enic *enic, unsigned int cq_index, + unsigned int work_to_do); diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h index eed5bf59e5d2c..0e37f5d5e5272 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_cq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h @@ -56,45 +56,18 @@ struct vnic_cq { ktime_t prev_ts; }; -static inline unsigned int vnic_cq_service(struct vnic_cq *cq, - unsigned int work_to_do, - int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, - u8 type, u16 q_number, u16 completed_index, void *opaque), - void *opaque) +static inline void *vnic_cq_to_clean(struct vnic_cq *cq) { - struct cq_desc *cq_desc; - unsigned int work_done = 0; - u16 q_number, completed_index; - u8 type, color; - - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); - cq_desc_dec(cq_desc, &type, &color, - &q_number, &completed_index); - - while (color != cq->last_color) { - - if ((*q_service)(cq->vdev, cq_desc, type, - q_number, completed_index, opaque)) - break; - - cq->to_clean++; - if (cq->to_clean == cq->ring.desc_count) { - cq->to_clean = 0; - cq->last_color = cq->last_color ? 0 : 1; - } - - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); - cq_desc_dec(cq_desc, &type, &color, - &q_number, &completed_index); + return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean); +} - work_done++; - if (work_done >= work_to_do) - break; +static inline void vnic_cq_inc_to_clean(struct vnic_cq *cq) +{ + cq->to_clean++; + if (cq->to_clean == cq->ring.desc_count) { + cq->to_clean = 0; + cq->last_color = cq->last_color ? 0 : 1; } - - return work_done; } void vnic_cq_free(struct vnic_cq *cq); diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h index db56d778877a7..605ef17f967e4 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h +++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h @@ -436,6 +436,25 @@ enum vnic_devcmd_cmd { * in: (u16) a2 = unsigned short int port information */ CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73), + + /* + * Set extended CQ field in MREGS of RQ (or all RQs) + * for given vNIC + * in: (u64) a0 = RQ selection (VNIC_RQ_ALL for all RQs) + * (u32) a1 = CQ entry size + * VNIC_RQ_CQ_ENTRY_SIZE_16 --> 16 bytes + * VNIC_RQ_CQ_ENTRY_SIZE_32 --> 32 bytes + * VNIC_RQ_CQ_ENTRY_SIZE_64 --> 64 bytes + * + * Capability query: + * out: (u32) a0 = errno, 0:valid cmd + * (u32) a1 = value consisting of supported entries + * bit 0: 16 bytes + * bit 1: 32 bytes + * bit 2: 64 bytes + */ + CMD_CQ_ENTRY_SIZE_SET = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 90), + }; /* CMD_ENABLE2 flags */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_enet.h b/drivers/net/ethernet/cisco/enic/vnic_enet.h index 5acc236069dea..9e8e86262a3fe 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_enet.h +++ b/drivers/net/ethernet/cisco/enic/vnic_enet.h @@ -21,6 +21,11 @@ struct vnic_enet_config { u16 loop_tag; u16 vf_rq_count; u16 num_arfs; + u8 reserved[66]; + u32 max_rq_ring; // MAX RQ ring size + u32 max_wq_ring; // MAX WQ ring size + u32 max_cq_ring; // MAX CQ ring size + u32 rdma_rsvd_lkey; // Reserved (privileged) LKey }; #define VENETF_TSO 0x1 /* TSO enabled */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h index 0bc595abc03b5..a1cdd729caece 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h @@ -50,7 +50,7 @@ struct vnic_rq_ctrl { (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf)) #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \ DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries)) -#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096) +#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(16384) struct vnic_rq_buf { struct vnic_rq_buf *next; @@ -61,6 +61,8 @@ struct vnic_rq_buf { unsigned int index; void *desc; uint64_t wr_id; + unsigned int offset; + unsigned int truesize; }; enum enic_poll_state { diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h index 75c5269110744..3bb4758100ba4 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h @@ -62,7 +62,7 @@ struct vnic_wq_buf { (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) #define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) -#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) +#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(16384) struct vnic_wq { unsigned int index; diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 991e3839858b5..1f8067bdd61a6 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "gemini.h" diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 27e01d780cd02..75eac18ff2466 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1177,7 +1177,6 @@ static void set_rx_mode(struct net_device *dev) iowrite32(csr6, ioaddr + CSR6); } -#ifdef CONFIG_TULIP_MWI static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev) { struct tulip_private *tp = netdev_priv(dev); @@ -1251,7 +1250,6 @@ static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev) netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n", cache, csr0); } -#endif /* * Chips that have the MRM/reserved bit quirk and the burst quirk. That @@ -1463,10 +1461,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); -#ifdef CONFIG_TULIP_MWI - if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) + if (IS_ENABLED(CONFIG_TULIP_MWI) && !force_csr0 && + (tp->flags & HAS_PCI_MWI)) tulip_mwi_config (pdev, dev); -#endif /* Stop the chip's Tx and Rx processes. */ tulip_stop_rxtx(tp); diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index e48b861e4ce15..270ff9aab3352 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -562,7 +562,7 @@ struct be_adapter { struct be_dma_mem mbox_mem_alloced; struct be_mcc_obj mcc_obj; - struct mutex mcc_lock; /* For serializing mcc cmds to BE card */ + spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ spinlock_t mcc_cq_lock; u16 cfg_num_rx_irqs; /* configured via set-channels */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 61adcebeef010..51b8377edd1d0 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -575,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter) /* Wait till no more pending mcc requests are present */ static int be_mcc_wait_compl(struct be_adapter *adapter) { -#define mcc_timeout 12000 /* 12s timeout */ +#define mcc_timeout 120000 /* 12s timeout */ int i, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; @@ -589,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (atomic_read(&mcc_obj->q.used) == 0) break; - usleep_range(500, 1000); + udelay(100); } if (i == mcc_timeout) { dev_err(&adapter->pdev->dev, "FW not responding\n"); @@ -866,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter) static int be_cmd_lock(struct be_adapter *adapter) { if (use_mcc(adapter)) { - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); return 0; } else { return mutex_lock_interruptible(&adapter->mbox_lock); @@ -877,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter) static void be_cmd_unlock(struct be_adapter *adapter) { if (use_mcc(adapter)) - return mutex_unlock(&adapter->mcc_lock); + return spin_unlock_bh(&adapter->mcc_lock); else return mutex_unlock(&adapter->mbox_lock); } @@ -1047,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, struct be_cmd_req_mac_query *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1076,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1088,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, struct be_cmd_req_pmac_add *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1113,7 +1113,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; @@ -1131,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) if (pmac_id == -1) return 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1151,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1414,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, struct be_dma_mem *q_mem = &rxq->dma_mem; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1444,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1508,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) struct be_cmd_req_q_destroy *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1525,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) q->created = false; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1593,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) struct be_cmd_req_hdr *hdr; int status = 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1621,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) adapter->stats_cmd_sent = true; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1637,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, CMD_SUBSYSTEM_ETH)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1660,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, adapter->stats_cmd_sent = true; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1697,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, struct be_cmd_req_link_status *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); if (link_status) *link_status = LINK_DOWN; @@ -1736,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1747,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) struct be_cmd_req_get_cntl_addnl_attribs *req; int status = 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1762,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) status = be_mcc_notify(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1811,7 +1811,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) if (!get_fat_cmd.va) return -ENOMEM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); while (total_size) { buf_size = min(total_size, (u32)60 * 1024); @@ -1849,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) log_offset += buf_size; } err: + spin_unlock_bh(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, get_fat_cmd.va, get_fat_cmd.dma); - mutex_unlock(&adapter->mcc_lock); return status; } @@ -1862,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) struct be_cmd_req_get_fw_version *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1885,7 +1885,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) sizeof(adapter->fw_on_flash)); } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1899,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, struct be_cmd_req_modify_eq_delay *req; int status = 0, i; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1922,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, status = be_mcc_notify(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1949,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, struct be_cmd_req_vlan_config *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1971,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1982,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) struct be_cmd_req_rx_filter *req = mem->va; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2015,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2046,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2066,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) return -EOPNOTSUPP; @@ -2085,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2108,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2189,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) return 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2214,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2226,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, struct be_cmd_req_enable_disable_beacon *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2247,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2258,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) struct be_cmd_req_get_beacon_state *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2282,7 +2282,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2306,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, return -ENOMEM; } - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2328,7 +2328,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, memcpy(data, resp->page_data + off, len); } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -2345,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, void *ctxt = NULL; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); @@ -2387,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, if (status) goto err_unlock; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(60000))) @@ -2406,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, return status; err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2460,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter, struct be_mcc_wrb *wrb; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2478,7 +2478,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2491,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, struct lancer_cmd_resp_read_object *resp; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2525,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, } err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2537,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_cmd_write_flashrom *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); @@ -2562,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, if (status) goto err_unlock; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(40000))) @@ -2573,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, return status; err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2584,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, struct be_mcc_wrb *wrb; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2611,7 +2611,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, memcpy(flashed_crc, req->crc, 4); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3217,7 +3217,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_cmd_req_acpi_wol_magic_config *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3234,7 +3234,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3249,7 +3249,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3272,7 +3272,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, if (status) goto err_unlock; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) @@ -3281,7 +3281,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, return status; err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3298,7 +3298,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3324,7 +3324,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, if (status) goto err; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); wait_for_completion(&adapter->et_cmd_compl); resp = embedded_payload(wrb); @@ -3332,7 +3332,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, return status; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3348,7 +3348,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3382,7 +3382,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3393,7 +3393,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, struct be_cmd_req_seeprom_read *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3409,7 +3409,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3424,7 +3424,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3469,7 +3469,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) } dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3479,7 +3479,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) struct be_cmd_req_set_qos *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3499,7 +3499,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3611,7 +3611,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, struct be_cmd_req_get_fn_privileges *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3643,7 +3643,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3655,7 +3655,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, struct be_cmd_req_set_fn_privileges *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3675,7 +3675,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3707,7 +3707,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, return -ENOMEM; } - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3771,7 +3771,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, } out: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, get_mac_list_cmd.va, get_mac_list_cmd.dma); return status; @@ -3831,7 +3831,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, if (!cmd.va) return -ENOMEM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3853,7 +3853,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, err: dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3889,7 +3889,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3930,7 +3930,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3944,7 +3944,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, int status; u16 vid; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3991,7 +3991,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4190,7 +4190,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, struct be_cmd_req_set_ext_fat_caps *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4206,7 +4206,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4684,7 +4684,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) if (iface == 0xFFFFFFFF) return -1; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4701,7 +4701,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4735,7 +4735,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, struct be_cmd_resp_get_iface_list *resp; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4756,7 +4756,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4850,7 +4850,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) if (BEx_chip(adapter)) return 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4868,7 +4868,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) req->enable = 1; status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4941,7 +4941,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter, u32 link_config = 0; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4969,7 +4969,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -5000,8 +5000,7 @@ int be_cmd_set_features(struct be_adapter *adapter) struct be_mcc_wrb *wrb; int status; - if (mutex_lock_interruptible(&adapter->mcc_lock)) - return -1; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -5039,7 +5038,7 @@ int be_cmd_set_features(struct be_adapter *adapter) dev_info(&adapter->pdev->dev, "Adapter does not support HW error recovery\n"); - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -5053,7 +5052,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, struct be_cmd_resp_hdr *resp; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -5076,7 +5075,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } EXPORT_SYMBOL(be_roce_mcc_cmd); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 875fe379eea21..3d2e215921191 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5667,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter) } mutex_init(&adapter->mbox_lock); - mutex_init(&adapter->mcc_lock); mutex_init(&adapter->rx_filter_lock); + spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); init_completion(&adapter->et_cmd_compl); diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index 0d030cb0b21c7..625245b0845c7 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -221,20 +221,19 @@ static void tsnep_phy_link_status_change(struct net_device *netdev) static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable) { - int retval; - - retval = phy_loopback(adapter->phydev, enable); + int speed; - /* PHY link state change is not signaled if loopback is enabled, it - * would delay a working loopback anyway, let's ensure that loopback - * is working immediately by setting link mode directly - */ - if (!retval && enable) { - netif_carrier_on(adapter->netdev); - tsnep_set_link_mode(adapter); + if (enable) { + if (adapter->phydev->autoneg == AUTONEG_DISABLE && + adapter->phydev->speed == SPEED_100) + speed = SPEED_100; + else + speed = SPEED_1000; + } else { + speed = 0; } - return retval; + return phy_loopback(adapter->phydev, enable, speed); } static int tsnep_phy_open(struct tsnep_adapter *adapter) @@ -852,8 +851,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) struct skb_shared_hwtstamps hwtstamps; u64 timestamp; - if (skb_shinfo(entry->skb)->tx_flags & - SKBTX_HW_TSTAMP_USE_CYCLES) + if (entry->skb->sk && + READ_ONCE(entry->skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC) timestamp = __le64_to_cpu(entry->desc_wb->counter); else diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 6a6fc819dfdee..2106861463e40 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -167,6 +167,24 @@ static bool enetc_skb_is_tcp(struct sk_buff *skb) return skb->csum_offset == offsetof(struct tcphdr, check); } +/** + * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame + * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located + * @count: Number of Tx buffer descriptors which need to be unmapped + * @i: Index of the last successfully mapped Tx buffer descriptor + */ +static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i) +{ + while (count--) { + struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; + + enetc_free_tx_frame(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } +} + static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) { bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; @@ -279,9 +297,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) } if (do_onestep_tstamp) { - u32 lo, hi, val; - u64 sec, nsec; + __be32 new_sec_l, new_nsec; + u32 lo, hi, nsec, val; + __be16 new_sec_h; u8 *data; + u64 sec; lo = enetc_rd_hot(hw, ENETC_SICTR0); hi = enetc_rd_hot(hw, ENETC_SICTR1); @@ -295,13 +315,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) /* Update originTimestamp field of Sync packet * - 48 bits seconds field * - 32 bits nanseconds field + * + * In addition, the UDP checksum needs to be updated + * by software after updating originTimestamp field, + * otherwise the hardware will calculate the wrong + * checksum when updating the correction field and + * update it to the packet. */ data = skb_mac_header(skb); - *(__be16 *)(data + offset2) = - htons((sec >> 32) & 0xffff); - *(__be32 *)(data + offset2 + 2) = - htonl(sec & 0xffffffff); - *(__be32 *)(data + offset2 + 6) = htonl(nsec); + new_sec_h = htons((sec >> 32) & 0xffff); + new_sec_l = htonl(sec & 0xffffffff); + new_nsec = htonl(nsec); + if (udp) { + struct udphdr *uh = udp_hdr(skb); + __be32 old_sec_l, old_nsec; + __be16 old_sec_h; + + old_sec_h = *(__be16 *)(data + offset2); + inet_proto_csum_replace2(&uh->check, skb, old_sec_h, + new_sec_h, false); + + old_sec_l = *(__be32 *)(data + offset2 + 2); + inet_proto_csum_replace4(&uh->check, skb, old_sec_l, + new_sec_l, false); + + old_nsec = *(__be32 *)(data + offset2 + 6); + inet_proto_csum_replace4(&uh->check, skb, old_nsec, + new_nsec, false); + } + + *(__be16 *)(data + offset2) = new_sec_h; + *(__be32 *)(data + offset2 + 2) = new_sec_l; + *(__be32 *)(data + offset2 + 6) = new_nsec; /* Configure single-step register */ val = ENETC_PM0_SINGLE_STEP_EN; @@ -372,25 +417,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) dma_err: dev_err(tx_ring->dev, "DMA map error"); - do { - tx_swbd = &tx_ring->tx_swbd[i]; - enetc_free_tx_frame(tx_ring, tx_swbd); - if (i == 0) - i = tx_ring->bd_count; - i--; - } while (count--); + enetc_unwind_tx_frame(tx_ring, count, i); return 0; } -static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, - struct enetc_tx_swbd *tx_swbd, - union enetc_tx_bd *txbd, int *i, int hdr_len, - int data_len) +static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, + struct enetc_tx_swbd *tx_swbd, + union enetc_tx_bd *txbd, int *i, int hdr_len, + int data_len) { union enetc_tx_bd txbd_tmp; u8 flags = 0, e_flags = 0; dma_addr_t addr; + int count = 1; enetc_clear_tx_bd(&txbd_tmp); addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; @@ -433,7 +473,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, /* Write the BD */ txbd_tmp.ext.e_flags = e_flags; *txbd = txbd_tmp; + count++; } + + return count; } static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, @@ -790,9 +833,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb /* compute the csum over the L4 header */ csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); - enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); + count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, + &i, hdr_len, data_len); bd_data_num = 0; - count++; while (data_len > 0) { int size; @@ -816,8 +859,13 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, tso.data, size, size == data_len); - if (err) + if (err) { + if (i == 0) + i = tx_ring->bd_count; + i--; + goto err_map_data; + } data_len -= size; count++; @@ -846,13 +894,7 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb dev_err(tx_ring->dev, "DMA map error"); err_chained_bd: - do { - tx_swbd = &tx_ring->tx_swbd[i]; - enetc_free_tx_frame(tx_ring, tx_swbd); - if (i == 0) - i = tx_ring->bd_count; - i--; - } while (count--); + enetc_unwind_tx_frame(tx_ring, count, i); return 0; } @@ -1901,7 +1943,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, enetc_xdp_drop(rx_ring, orig_i, i); tx_ring->stats.xdp_tx_drops++; } else { - tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; + tx_ring->stats.xdp_tx++; rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; xdp_tx_frm_cnt++; /* The XDP_TX enqueue was successful, so we @@ -3228,6 +3270,9 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) new_offloads |= ENETC_F_TX_TSTAMP; break; case HWTSTAMP_TX_ONESTEP_SYNC: + if (!enetc_si_is_pf(priv->si)) + return -EOPNOTSUPP; + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; break; diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c index fc41078c4f5da..73ac8c6afb3ad 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c @@ -672,7 +672,6 @@ static int enetc4_pf_netdev_create(struct enetc_si *si) err_alloc_msix: err_config_si: err_clk_get: - mutex_destroy(&priv->mm_lock); free_netdev(ndev); return err; @@ -684,6 +683,7 @@ static void enetc4_pf_netdev_destroy(struct enetc_si *si) struct net_device *ndev = si->ndev; unregister_netdev(ndev); + enetc4_link_deinit(priv); enetc_free_msix(priv); free_netdev(ndev); } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index bf34b5bb1e358..ece3ae28ba827 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -832,6 +832,7 @@ static int enetc_set_coalesce(struct net_device *ndev, static int enetc_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { + struct enetc_ndev_priv *priv = netdev_priv(ndev); int *phc_idx; phc_idx = symbol_get(enetc_phc_index); @@ -852,8 +853,10 @@ static int enetc_get_ts_info(struct net_device *ndev, SOF_TIMESTAMPING_TX_SOFTWARE; info->tx_types = (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON) | - (1 << HWTSTAMP_TX_ONESTEP_SYNC); + (1 << HWTSTAMP_TX_ON); + + if (enetc_si_is_pf(priv->si)) + info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_ALL); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index f7c4ce8e9a265..a86cfebedaa8b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1093,6 +1093,29 @@ static void fec_enet_enable_ring(struct net_device *ndev) } } +/* Whack a reset. We should wait for this. + * For i.MX6SX SOC, enet use AXI bus, we use disable MAC + * instead of reset MAC itself. + */ +static void fec_ctrl_reset(struct fec_enet_private *fep, bool allow_wol) +{ + u32 val; + + if (!allow_wol || !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { + if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || + ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { + writel(0, fep->hwp + FEC_ECNTRL); + } else { + writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); + udelay(10); + } + } else { + val = readl(fep->hwp + FEC_ECNTRL); + val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); + writel(val, fep->hwp + FEC_ECNTRL); + } +} + /* * This function is called to start or restart the FEC during a link * change, transmit timeout, or to reconfigure the FEC. The network @@ -1109,17 +1132,7 @@ fec_restart(struct net_device *ndev) if (fep->bufdesc_ex) fec_ptp_save_state(fep); - /* Whack a reset. We should wait for this. - * For i.MX6SX SOC, enet use AXI bus, we use disable MAC - * instead of reset MAC itself. - */ - if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || - ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { - writel(0, fep->hwp + FEC_ECNTRL); - } else { - writel(1, fep->hwp + FEC_ECNTRL); - udelay(10); - } + fec_ctrl_reset(fep, false); /* * enet-mac reset will reset mac address registers too, @@ -1373,22 +1386,7 @@ fec_stop(struct net_device *ndev) if (fep->bufdesc_ex) fec_ptp_save_state(fep); - /* Whack a reset. We should wait for this. - * For i.MX6SX SOC, enet use AXI bus, we use disable MAC - * instead of reset MAC itself. - */ - if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { - if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { - writel(0, fep->hwp + FEC_ECNTRL); - } else { - writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL); - udelay(10); - } - } else { - val = readl(fep->hwp + FEC_ECNTRL); - val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); - writel(val, fep->hwp + FEC_ECNTRL); - } + fec_ctrl_reset(fep, true); writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 7f6b574320716..7e62a4529e6fa 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include "fec.h" diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index b3e2a596ad2c8..51402dff72c5f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1446,7 +1446,6 @@ int dtsec_initialization(struct mac_device *mac_dev, goto _return_fm_mac_free; } dtsec->pcs.ops = &dtsec_pcs_ops; - dtsec->pcs.neg_mode = true; dtsec->pcs.poll = true; supported = mac_dev->phylink_config.supported_interfaces; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 435138f4699d2..deb35b38c9766 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1647,20 +1647,11 @@ static void gfar_configure_serdes(struct net_device *dev) */ static int init_phy(struct net_device *dev) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct gfar_private *priv = netdev_priv(dev); phy_interface_t interface = priv->interface; struct phy_device *phydev; struct ethtool_keee edata; - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); - linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); - if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) - linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); - priv->oldlink = 0; priv->oldspeed = 0; priv->oldduplex = -1; @@ -1675,9 +1666,8 @@ static int init_phy(struct net_device *dev) if (interface == PHY_INTERFACE_MODE_SGMII) gfar_configure_serdes(dev); - /* Remove any features not supported by the controller */ - linkmode_and(phydev->supported, phydev->supported, mask); - linkmode_copy(phydev->advertising, phydev->supported); + if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)) + phy_set_max_speed(phydev, SPEED_100); /* Add support for flow control */ phy_support_asym_pause(phydev); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 88510f8227596..affd5a6c44e7b 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3408,7 +3408,7 @@ static int ucc_geth_parse_clock(struct device_node *np, const char *which, return 0; } -struct phylink_mac_ops ugeth_mac_ops = { +static const struct phylink_mac_ops ugeth_mac_ops = { .mac_link_up = ugeth_mac_link_up, .mac_link_down = ugeth_mac_link_down, .mac_config = ugeth_mac_config, diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index 38789faae706b..84f92f6384e70 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -890,8 +890,6 @@ struct ucc_geth_hardware_statistics { addresses */ #define TX_TIMEOUT (1*HZ) -#define PHY_INIT_TIMEOUT 100000 -#define PHY_CHANGE_TIME 2 /* Fast Ethernet (10/100 Mbps) */ #define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 78d2a19593d18..2fab38c8ee78a 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -59,6 +59,8 @@ #define GVE_MAX_RX_BUFFER_SIZE 4096 +#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096 + #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 #define GVE_PAGE_POOL_SIZE_MULTIPLIER 4 @@ -68,6 +70,9 @@ #define GVE_FLOW_RULE_IDS_CACHE_SIZE \ (GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location)) +#define GVE_RSS_KEY_SIZE 40 +#define GVE_RSS_INDIR_SIZE 128 + #define GVE_XDP_ACTIONS 5 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 @@ -102,7 +107,13 @@ struct gve_rx_desc_queue { /* The page info for a single slot in the RX data queue */ struct gve_rx_slot_page_info { - struct page *page; + /* netmem is used for DQO RDA mode + * page is used in all other modes + */ + union { + struct page *page; + netmem_ref netmem; + }; void *page_address; u32 page_offset; /* offset to write to in page */ unsigned int buf_size; @@ -218,6 +229,11 @@ struct gve_rx_cnts { /* Contains datapath state used to represent an RX queue. */ struct gve_rx_ring { struct gve_priv *gve; + + u16 packet_buffer_size; /* Size of buffer posted to NIC */ + u16 packet_buffer_truesize; /* Total size of RX buffer */ + u16 rx_headroom; + union { /* GQI fields */ struct { @@ -226,7 +242,6 @@ struct gve_rx_ring { /* threshold for posting new buffs and descs */ u32 db_threshold; - u16 packet_buffer_size; u32 qpl_copy_pool_mask; u32 qpl_copy_pool_head; @@ -604,8 +619,6 @@ struct gve_tx_ring { dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ struct u64_stats_sync statss; /* sync stats for 32bit archs */ struct xsk_buff_pool *xsk_pool; - u32 xdp_xsk_wakeup; - u32 xdp_xsk_done; u64 xdp_xsk_sent; u64 xdp_xmit; u64 xdp_xmit_errors; @@ -624,10 +637,18 @@ struct gve_notify_block { u32 irq; }; -/* Tracks allowed and current queue settings */ -struct gve_queue_config { +/* Tracks allowed and current rx queue settings */ +struct gve_rx_queue_config { + u16 max_queues; + u16 num_queues; + u16 packet_buffer_size; +}; + +/* Tracks allowed and current tx queue settings */ +struct gve_tx_queue_config { u16 max_queues; - u16 num_queues; /* current */ + u16 num_queues; /* number of TX queues, excluding XDP queues */ + u16 num_xdp_queues; }; /* Tracks the available and used qpl IDs */ @@ -651,11 +672,11 @@ struct gve_ptype_lut { /* Parameters for allocating resources for tx queues */ struct gve_tx_alloc_rings_cfg { - struct gve_queue_config *qcfg; + struct gve_tx_queue_config *qcfg; + + u16 num_xdp_rings; u16 ring_size; - u16 start_idx; - u16 num_rings; bool raw_addressing; /* Allocated resources are returned here */ @@ -665,13 +686,15 @@ struct gve_tx_alloc_rings_cfg { /* Parameters for allocating resources for rx queues */ struct gve_rx_alloc_rings_cfg { /* tx config is also needed to determine QPL ids */ - struct gve_queue_config *qcfg; - struct gve_queue_config *qcfg_tx; + struct gve_rx_queue_config *qcfg_rx; + struct gve_tx_queue_config *qcfg_tx; u16 ring_size; u16 packet_buffer_size; bool raw_addressing; bool enable_header_split; + bool reset_rss; + bool xdp; /* Allocated resources are returned here */ struct gve_rx_ring *rx; @@ -722,6 +745,11 @@ struct gve_flow_rules_cache { u32 rule_ids_cache_num; }; +struct gve_rss_config { + u8 *hash_key; + u32 *hash_lut; +}; + struct gve_priv { struct net_device *dev; struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ @@ -751,9 +779,8 @@ struct gve_priv { u32 rx_copybreak; /* copy packets smaller than this */ u16 default_num_queues; /* default num queues to set up */ - u16 num_xdp_queues; - struct gve_queue_config tx_cfg; - struct gve_queue_config rx_cfg; + struct gve_tx_queue_config tx_cfg; + struct gve_rx_queue_config rx_cfg; u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ @@ -823,7 +850,6 @@ struct gve_priv { struct gve_ptype_lut *ptype_lut_dqo; /* Must be a power of two. */ - u16 data_buffer_size_dqo; u16 max_rx_buffer_size; /* device limit */ enum gve_queue_format queue_format; @@ -842,6 +868,8 @@ struct gve_priv { u16 rss_key_size; u16 rss_lut_size; + bool cache_rss_config; + struct gve_rss_config rss_config; }; enum gve_service_task_flags_bit { @@ -1024,27 +1052,16 @@ static inline bool gve_is_qpl(struct gve_priv *priv) } /* Returns the number of tx queue page lists */ -static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg, - int num_xdp_queues, +static inline u32 gve_num_tx_qpls(const struct gve_tx_queue_config *tx_cfg, bool is_qpl) { if (!is_qpl) return 0; - return tx_cfg->num_queues + num_xdp_queues; -} - -/* Returns the number of XDP tx queue page lists - */ -static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) -{ - if (priv->queue_format != GVE_GQI_QPL_FORMAT) - return 0; - - return priv->num_xdp_queues; + return tx_cfg->num_queues + tx_cfg->num_xdp_queues; } /* Returns the number of rx queue page lists */ -static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg, +static inline u32 gve_num_rx_qpls(const struct gve_rx_queue_config *rx_cfg, bool is_qpl) { if (!is_qpl) @@ -1062,7 +1079,8 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) return priv->tx_cfg.max_queues + rx_qid; } -static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid) +static inline u32 gve_get_rx_qpl_id(const struct gve_tx_queue_config *tx_cfg, + int rx_qid) { return tx_cfg->max_queues + rx_qid; } @@ -1072,7 +1090,7 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) return gve_tx_qpl_id(priv, 0); } -static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg) +static inline u32 gve_rx_start_qpl_id(const struct gve_tx_queue_config *tx_cfg) { return gve_get_rx_qpl_id(tx_cfg, 0); } @@ -1103,7 +1121,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv) static inline u32 gve_num_tx_queues(struct gve_priv *priv) { - return priv->tx_cfg.num_queues + priv->num_xdp_queues; + return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues; } static inline u32 gve_xdp_tx_queue_id(struct gve_priv *priv, u32 queue_id) @@ -1207,7 +1225,8 @@ void gve_free_buffer(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state); int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc); struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, - struct gve_rx_ring *rx); + struct gve_rx_ring *rx, + bool xdp); /* Reset */ void gve_schedule_reset(struct gve_priv *priv); @@ -1219,14 +1238,17 @@ int gve_adjust_config(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, struct gve_rx_alloc_rings_cfg *rx_alloc_cfg); int gve_adjust_queues(struct gve_priv *priv, - struct gve_queue_config new_rx_config, - struct gve_queue_config new_tx_config); + struct gve_rx_queue_config new_rx_config, + struct gve_tx_queue_config new_tx_config, + bool reset_rss); /* flow steering rule */ int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd); int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs); int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd); int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd); int gve_flow_rules_reset(struct gve_priv *priv); +/* RSS config */ +int gve_init_rss_config(struct gve_priv *priv, u16 num_queues); /* report stats handling */ void gve_handle_report_stats(struct gve_priv *priv); /* exported by ethtool.c */ diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index aa7d723011d0c..3e8fc33cc11fd 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -731,6 +731,7 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, .ntfy_id = cpu_to_be32(rx->ntfy_id), .queue_resources_addr = cpu_to_be64(rx->q_resources_bus), .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt), + .packet_buffer_size = cpu_to_be16(rx->packet_buffer_size), }; if (gve_is_gqi(priv)) { @@ -743,7 +744,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, cpu_to_be64(rx->data.data_bus); cmd->create_rx_queue.index = cpu_to_be32(queue_index); cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); - cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); } else { u32 qpl_id = 0; @@ -756,8 +756,6 @@ static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, cpu_to_be64(rx->dqo.complq.bus); cmd->create_rx_queue.rx_data_ring_addr = cpu_to_be64(rx->dqo.bufq.bus); - cmd->create_rx_queue.packet_buffer_size = - cpu_to_be16(priv->data_buffer_size_dqo); cmd->create_rx_queue.rx_buff_ring_size = cpu_to_be16(priv->rx_desc_cnt); cmd->create_rx_queue.enable_rsc = @@ -885,6 +883,15 @@ static void gve_set_default_desc_cnt(struct gve_priv *priv, priv->min_rx_desc_cnt = priv->rx_desc_cnt; } +static void gve_set_default_rss_sizes(struct gve_priv *priv) +{ + if (!gve_is_gqi(priv)) { + priv->rss_key_size = GVE_RSS_KEY_SIZE; + priv->rss_lut_size = GVE_RSS_INDIR_SIZE; + priv->cache_rss_config = true; + } +} + static void gve_enable_supported_features(struct gve_priv *priv, u32 supported_features_mask, const struct gve_device_option_jumbo_frames @@ -968,6 +975,10 @@ static void gve_enable_supported_features(struct gve_priv *priv, be16_to_cpu(dev_op_rss_config->hash_key_size); priv->rss_lut_size = be16_to_cpu(dev_op_rss_config->hash_lut_size); + priv->cache_rss_config = false; + dev_dbg(&priv->pdev->dev, + "RSS device option enabled with key size of %u, lut size of %u.\n", + priv->rss_key_size, priv->rss_lut_size); } } @@ -1052,6 +1063,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) /* set default descriptor counts */ gve_set_default_desc_cnt(priv, descriptor); + gve_set_default_rss_sizes(priv); + /* DQO supports LRO. */ if (!gve_is_gqi(priv)) priv->dev->hw_features |= NETIF_F_LRO; @@ -1276,8 +1289,9 @@ int gve_adminq_reset_flow_rules(struct gve_priv *priv) int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh) { + const u32 *hash_lut_to_config = NULL; + const u8 *hash_key_to_config = NULL; dma_addr_t lut_bus = 0, key_bus = 0; - u16 key_size = 0, lut_size = 0; union gve_adminq_command cmd; __be32 *lut = NULL; u8 hash_alg = 0; @@ -1287,7 +1301,7 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r switch (rxfh->hfunc) { case ETH_RSS_HASH_NO_CHANGE: - break; + fallthrough; case ETH_RSS_HASH_TOP: hash_alg = ETH_RSS_HASH_TOP; break; @@ -1296,27 +1310,46 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r } if (rxfh->indir) { - lut_size = priv->rss_lut_size; + if (rxfh->indir_size != priv->rss_lut_size) + return -EINVAL; + + hash_lut_to_config = rxfh->indir; + } else if (priv->cache_rss_config) { + hash_lut_to_config = priv->rss_config.hash_lut; + } + + if (hash_lut_to_config) { lut = dma_alloc_coherent(&priv->pdev->dev, - lut_size * sizeof(*lut), + priv->rss_lut_size * sizeof(*lut), &lut_bus, GFP_KERNEL); if (!lut) return -ENOMEM; for (i = 0; i < priv->rss_lut_size; i++) - lut[i] = cpu_to_be32(rxfh->indir[i]); + lut[i] = cpu_to_be32(hash_lut_to_config[i]); } if (rxfh->key) { - key_size = priv->rss_key_size; + if (rxfh->key_size != priv->rss_key_size) { + err = -EINVAL; + goto out; + } + + hash_key_to_config = rxfh->key; + } else if (priv->cache_rss_config) { + hash_key_to_config = priv->rss_config.hash_key; + } + + if (hash_key_to_config) { key = dma_alloc_coherent(&priv->pdev->dev, - key_size, &key_bus, GFP_KERNEL); + priv->rss_key_size, + &key_bus, GFP_KERNEL); if (!key) { err = -ENOMEM; goto out; } - memcpy(key, rxfh->key, key_size); + memcpy(key, hash_key_to_config, priv->rss_key_size); } /* Zero-valued fields in the cmd.configure_rss instruct the device to @@ -1330,8 +1363,10 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r BIT(GVE_RSS_HASH_TCPV6) | BIT(GVE_RSS_HASH_UDPV6)), .hash_alg = hash_alg, - .hash_key_size = cpu_to_be16(key_size), - .hash_lut_size = cpu_to_be16(lut_size), + .hash_key_size = + cpu_to_be16((key_bus) ? priv->rss_key_size : 0), + .hash_lut_size = + cpu_to_be16((lut_bus) ? priv->rss_lut_size : 0), .hash_key_addr = cpu_to_be64(key_bus), .hash_lut_addr = cpu_to_be64(lut_bus), }; @@ -1341,11 +1376,11 @@ int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *r out: if (lut) dma_free_coherent(&priv->pdev->dev, - lut_size * sizeof(*lut), + priv->rss_lut_size * sizeof(*lut), lut, lut_bus); if (key) dma_free_coherent(&priv->pdev->dev, - key_size, key, key_bus); + priv->rss_key_size, key, key_bus); return err; } @@ -1449,12 +1484,15 @@ static int gve_adminq_process_rss_query(struct gve_priv *priv, rxfh->hfunc = descriptor->hash_alg; rss_info_addr = (void *)(descriptor + 1); - if (rxfh->key) + if (rxfh->key) { + rxfh->key_size = priv->rss_key_size; memcpy(rxfh->key, rss_info_addr, priv->rss_key_size); + } rss_info_addr += priv->rss_key_size; lut = (__be32 *)rss_info_addr; if (rxfh->indir) { + rxfh->indir_size = priv->rss_lut_size; for (i = 0; i < priv->rss_lut_size; i++) rxfh->indir[i] = be32_to_cpu(lut[i]); } diff --git a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c index 403f0f335ba66..a71883e1d9202 100644 --- a/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c @@ -139,7 +139,8 @@ int gve_alloc_qpl_page_dqo(struct gve_rx_ring *rx, buf_state->page_info.page_offset = 0; buf_state->page_info.page_address = page_address(buf_state->page_info.page); - buf_state->page_info.buf_size = priv->data_buffer_size_dqo; + buf_state->page_info.buf_size = rx->packet_buffer_truesize; + buf_state->page_info.pad = rx->rx_headroom; buf_state->last_single_ref_offset = 0; /* The page already has 1 ref. */ @@ -162,7 +163,7 @@ void gve_free_qpl_page_dqo(struct gve_rx_buf_state_dqo *buf_state) void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { - const u16 data_buffer_size = priv->data_buffer_size_dqo; + const u16 data_buffer_size = rx->packet_buffer_truesize; int pagecount; /* Can't reuse if we only fit one buffer per page */ @@ -205,38 +206,40 @@ void gve_free_to_page_pool(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state, bool allow_direct) { - struct page *page = buf_state->page_info.page; + netmem_ref netmem = buf_state->page_info.netmem; - if (!page) + if (!netmem) return; - page_pool_put_full_page(page->pp, page, allow_direct); - buf_state->page_info.page = NULL; + page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct); + buf_state->page_info.netmem = 0; } static int gve_alloc_from_page_pool(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { - struct gve_priv *priv = rx->gve; - struct page *page; + netmem_ref netmem; - buf_state->page_info.buf_size = priv->data_buffer_size_dqo; - page = page_pool_alloc(rx->dqo.page_pool, - &buf_state->page_info.page_offset, - &buf_state->page_info.buf_size, GFP_ATOMIC); + buf_state->page_info.buf_size = rx->packet_buffer_truesize; + netmem = page_pool_alloc_netmem(rx->dqo.page_pool, + &buf_state->page_info.page_offset, + &buf_state->page_info.buf_size, + GFP_ATOMIC); - if (!page) + if (!netmem) return -ENOMEM; - buf_state->page_info.page = page; - buf_state->page_info.page_address = page_address(page); - buf_state->addr = page_pool_get_dma_addr(page); + buf_state->page_info.netmem = netmem; + buf_state->page_info.page_address = netmem_address(netmem); + buf_state->addr = page_pool_get_dma_addr_netmem(netmem); + buf_state->page_info.pad = rx->dqo.page_pool->p.offset; return 0; } struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, - struct gve_rx_ring *rx) + struct gve_rx_ring *rx, + bool xdp) { u32 ntfy_id = gve_rx_idx_to_ntfy(priv, rx->q_num); struct page_pool_params pp = { @@ -247,7 +250,8 @@ struct page_pool *gve_rx_create_page_pool(struct gve_priv *priv, .netdev = priv->dev, .napi = &priv->ntfy_blocks[ntfy_id].napi, .max_len = PAGE_SIZE, - .dma_dir = DMA_FROM_DEVICE, + .dma_dir = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, + .offset = xdp ? XDP_PACKET_HEADROOM : 0, }; return page_pool_create(&pp); @@ -269,7 +273,7 @@ void gve_reuse_buffer(struct gve_rx_ring *rx, struct gve_rx_buf_state_dqo *buf_state) { if (rx->dqo.page_pool) { - buf_state->page_info.page = NULL; + buf_state->page_info.netmem = 0; gve_free_buf_state(rx, buf_state); } else { gve_dec_pagecnt_bias(&buf_state->page_info); @@ -301,7 +305,8 @@ int gve_alloc_buffer(struct gve_rx_ring *rx, struct gve_rx_desc_dqo *desc) } desc->buf_id = cpu_to_le16(buf_state - rx->dqo.buf_states); desc->buf_addr = cpu_to_le64(buf_state->addr + - buf_state->page_info.page_offset); + buf_state->page_info.page_offset + + buf_state->page_info.pad); return 0; diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index bdfc6e77b2af5..31a21ccf48634 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -63,8 +63,8 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = { static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = { "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]", "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]", - "tx_dma_mapping_error[%u]", "tx_xsk_wakeup[%u]", - "tx_xsk_done[%u]", "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]" + "tx_dma_mapping_error[%u]", + "tx_xsk_sent[%u]", "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]" }; static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { @@ -417,9 +417,7 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = value; } } - /* XDP xsk counters */ - data[i++] = tx->xdp_xsk_wakeup; - data[i++] = tx->xdp_xsk_done; + /* XDP counters */ do { start = u64_stats_fetch_begin(&priv->tx[ring].statss); data[i] = tx->xdp_xsk_sent; @@ -477,11 +475,12 @@ static int gve_set_channels(struct net_device *netdev, struct ethtool_channels *cmd) { struct gve_priv *priv = netdev_priv(netdev); - struct gve_queue_config new_tx_cfg = priv->tx_cfg; - struct gve_queue_config new_rx_cfg = priv->rx_cfg; + struct gve_tx_queue_config new_tx_cfg = priv->tx_cfg; + struct gve_rx_queue_config new_rx_cfg = priv->rx_cfg; struct ethtool_channels old_settings; int new_tx = cmd->tx_count; int new_rx = cmd->rx_count; + bool reset_rss = false; gve_get_channels(netdev, &old_settings); @@ -492,22 +491,27 @@ static int gve_set_channels(struct net_device *netdev, if (!new_rx || !new_tx) return -EINVAL; - if (priv->num_xdp_queues && - (new_tx != new_rx || (2 * new_tx > priv->tx_cfg.max_queues))) { - dev_err(&priv->pdev->dev, "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues"); - return -EINVAL; - } + if (priv->xdp_prog) { + if (new_tx != new_rx || + (2 * new_tx > priv->tx_cfg.max_queues)) { + dev_err(&priv->pdev->dev, "The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues when XDP program is installed"); + return -EINVAL; + } - if (!netif_running(netdev)) { - priv->tx_cfg.num_queues = new_tx; - priv->rx_cfg.num_queues = new_rx; - return 0; + /* One XDP TX queue per RX queue. */ + new_tx_cfg.num_xdp_queues = new_rx; + } else { + new_tx_cfg.num_xdp_queues = 0; } + if (new_rx != priv->rx_cfg.num_queues && + priv->cache_rss_config && !netif_is_rxfh_configured(netdev)) + reset_rss = true; + new_tx_cfg.num_queues = new_tx; new_rx_cfg.num_queues = new_rx; - return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg); + return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg, reset_rss); } static void gve_get_ringparam(struct net_device *netdev, @@ -643,8 +647,7 @@ static int gve_set_tunable(struct net_device *netdev, switch (etuna->id) { case ETHTOOL_RX_COPYBREAK: { - u32 max_copybreak = gve_is_gqi(priv) ? - GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo; + u32 max_copybreak = priv->rx_cfg.packet_buffer_size; len = *(u32 *)value; if (len > max_copybreak) @@ -855,6 +858,25 @@ static u32 gve_get_rxfh_indir_size(struct net_device *netdev) return priv->rss_lut_size; } +static void gve_get_rss_config_cache(struct gve_priv *priv, + struct ethtool_rxfh_param *rxfh) +{ + struct gve_rss_config *rss_config = &priv->rss_config; + + rxfh->hfunc = ETH_RSS_HASH_TOP; + + if (rxfh->key) { + rxfh->key_size = priv->rss_key_size; + memcpy(rxfh->key, rss_config->hash_key, priv->rss_key_size); + } + + if (rxfh->indir) { + rxfh->indir_size = priv->rss_lut_size; + memcpy(rxfh->indir, rss_config->hash_lut, + priv->rss_lut_size * sizeof(*rxfh->indir)); + } +} + static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh) { struct gve_priv *priv = netdev_priv(netdev); @@ -862,18 +884,46 @@ static int gve_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rx if (!priv->rss_key_size || !priv->rss_lut_size) return -EOPNOTSUPP; + if (priv->cache_rss_config) { + gve_get_rss_config_cache(priv, rxfh); + return 0; + } + return gve_adminq_query_rss_config(priv, rxfh); } +static void gve_set_rss_config_cache(struct gve_priv *priv, + struct ethtool_rxfh_param *rxfh) +{ + struct gve_rss_config *rss_config = &priv->rss_config; + + if (rxfh->key) + memcpy(rss_config->hash_key, rxfh->key, priv->rss_key_size); + + if (rxfh->indir) + memcpy(rss_config->hash_lut, rxfh->indir, + priv->rss_lut_size * sizeof(*rxfh->indir)); +} + static int gve_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) { struct gve_priv *priv = netdev_priv(netdev); + int err; if (!priv->rss_key_size || !priv->rss_lut_size) return -EOPNOTSUPP; - return gve_adminq_configure_rss(priv, rxfh); + err = gve_adminq_configure_rss(priv, rxfh); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Fail to configure RSS config"); + return err; + } + + if (priv->cache_rss_config) + gve_set_rss_config_cache(priv, rxfh); + + return 0; } const struct ethtool_ops gve_ethtool_ops = { diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 92237fb0b60c1..cb2f9978f45e2 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -184,6 +184,43 @@ static void gve_free_flow_rule_caches(struct gve_priv *priv) flow_rules_cache->rules_cache = NULL; } +static int gve_alloc_rss_config_cache(struct gve_priv *priv) +{ + struct gve_rss_config *rss_config = &priv->rss_config; + + if (!priv->cache_rss_config) + return 0; + + rss_config->hash_key = kcalloc(priv->rss_key_size, + sizeof(rss_config->hash_key[0]), + GFP_KERNEL); + if (!rss_config->hash_key) + return -ENOMEM; + + rss_config->hash_lut = kcalloc(priv->rss_lut_size, + sizeof(rss_config->hash_lut[0]), + GFP_KERNEL); + if (!rss_config->hash_lut) + goto free_rss_key_cache; + + return 0; + +free_rss_key_cache: + kfree(rss_config->hash_key); + rss_config->hash_key = NULL; + return -ENOMEM; +} + +static void gve_free_rss_config_cache(struct gve_priv *priv) +{ + struct gve_rss_config *rss_config = &priv->rss_config; + + kfree(rss_config->hash_key); + kfree(rss_config->hash_lut); + + memset(rss_config, 0, sizeof(*rss_config)); +} + static int gve_alloc_counter_array(struct gve_priv *priv) { priv->counter_array = @@ -575,9 +612,12 @@ static int gve_setup_device_resources(struct gve_priv *priv) err = gve_alloc_flow_rule_caches(priv); if (err) return err; - err = gve_alloc_counter_array(priv); + err = gve_alloc_rss_config_cache(priv); if (err) goto abort_with_flow_rule_caches; + err = gve_alloc_counter_array(priv); + if (err) + goto abort_with_rss_config_cache; err = gve_alloc_notify_blocks(priv); if (err) goto abort_with_counter; @@ -611,6 +651,12 @@ static int gve_setup_device_resources(struct gve_priv *priv) } } + err = gve_init_rss_config(priv, priv->rx_cfg.num_queues); + if (err) { + dev_err(&priv->pdev->dev, "Failed to init RSS config"); + goto abort_with_ptype_lut; + } + err = gve_adminq_report_stats(priv, priv->stats_report_len, priv->stats_report_bus, GVE_STATS_REPORT_TIMER_PERIOD); @@ -629,6 +675,8 @@ static int gve_setup_device_resources(struct gve_priv *priv) gve_free_notify_blocks(priv); abort_with_counter: gve_free_counter_array(priv); +abort_with_rss_config_cache: + gve_free_rss_config_cache(priv); abort_with_flow_rule_caches: gve_free_flow_rule_caches(priv); @@ -669,6 +717,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv) priv->ptype_lut_dqo = NULL; gve_free_flow_rule_caches(priv); + gve_free_rss_config_cache(priv); gve_free_counter_array(priv); gve_free_notify_blocks(priv); gve_free_stats_report(priv); @@ -746,30 +795,13 @@ static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx return rx->dqo.qpl; } -static int gve_register_xdp_qpls(struct gve_priv *priv) -{ - int start_id; - int err; - int i; - - start_id = gve_xdp_tx_start_queue_id(priv); - for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { - err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i)); - /* This failure will trigger a reset - no need to clean up */ - if (err) - return err; - } - return 0; -} - static int gve_register_qpls(struct gve_priv *priv) { int num_tx_qpls, num_rx_qpls; int err; int i; - num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv), - gve_is_qpl(priv)); + num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv)); num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); for (i = 0; i < num_tx_qpls; i++) { @@ -787,30 +819,13 @@ static int gve_register_qpls(struct gve_priv *priv) return 0; } -static int gve_unregister_xdp_qpls(struct gve_priv *priv) -{ - int start_id; - int err; - int i; - - start_id = gve_xdp_tx_start_queue_id(priv); - for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { - err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i)); - /* This failure will trigger a reset - no need to clean */ - if (err) - return err; - } - return 0; -} - static int gve_unregister_qpls(struct gve_priv *priv) { int num_tx_qpls, num_rx_qpls; int err; int i; - num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv), - gve_is_qpl(priv)); + num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_is_qpl(priv)); num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv)); for (i = 0; i < num_tx_qpls; i++) { @@ -829,27 +844,6 @@ static int gve_unregister_qpls(struct gve_priv *priv) return 0; } -static int gve_create_xdp_rings(struct gve_priv *priv) -{ - int err; - - err = gve_adminq_create_tx_queues(priv, - gve_xdp_tx_start_queue_id(priv), - priv->num_xdp_queues); - if (err) { - netif_err(priv, drv, priv->dev, "failed to create %d XDP tx queues\n", - priv->num_xdp_queues); - /* This failure will trigger a reset - no need to clean - * up - */ - return err; - } - netif_dbg(priv, drv, priv->dev, "created %d XDP tx queues\n", - priv->num_xdp_queues); - - return 0; -} - static int gve_create_rings(struct gve_priv *priv) { int num_tx_queues = gve_num_tx_queues(priv); @@ -905,7 +899,7 @@ static void init_xdp_sync_stats(struct gve_priv *priv) int i; /* Init stats */ - for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { + for (i = start_id; i < start_id + priv->tx_cfg.num_xdp_queues; i++) { int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); u64_stats_init(&priv->tx[i].statss); @@ -930,24 +924,21 @@ static void gve_init_sync_stats(struct gve_priv *priv) static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg) { - int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0; - cfg->qcfg = &priv->tx_cfg; cfg->raw_addressing = !gve_is_qpl(priv); cfg->ring_size = priv->tx_desc_cnt; - cfg->start_idx = 0; - cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues; + cfg->num_xdp_rings = cfg->qcfg->num_xdp_queues; cfg->tx = priv->tx; } -static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings) +static void gve_tx_stop_rings(struct gve_priv *priv, int num_rings) { int i; if (!priv->tx) return; - for (i = start_id; i < start_id + num_rings; i++) { + for (i = 0; i < num_rings; i++) { if (gve_is_gqi(priv)) gve_tx_stop_ring_gqi(priv, i); else @@ -955,12 +946,11 @@ static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings } } -static void gve_tx_start_rings(struct gve_priv *priv, int start_id, - int num_rings) +static void gve_tx_start_rings(struct gve_priv *priv, int num_rings) { int i; - for (i = start_id; i < start_id + num_rings; i++) { + for (i = 0; i < num_rings; i++) { if (gve_is_gqi(priv)) gve_tx_start_ring_gqi(priv, i); else @@ -968,28 +958,6 @@ static void gve_tx_start_rings(struct gve_priv *priv, int start_id, } } -static int gve_alloc_xdp_rings(struct gve_priv *priv) -{ - struct gve_tx_alloc_rings_cfg cfg = {0}; - int err = 0; - - if (!priv->num_xdp_queues) - return 0; - - gve_tx_get_curr_alloc_cfg(priv, &cfg); - cfg.start_idx = gve_xdp_tx_start_queue_id(priv); - cfg.num_rings = priv->num_xdp_queues; - - err = gve_tx_alloc_rings_gqi(priv, &cfg); - if (err) - return err; - - gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings); - init_xdp_sync_stats(priv); - - return 0; -} - static int gve_queues_mem_alloc(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) @@ -1020,26 +988,6 @@ static int gve_queues_mem_alloc(struct gve_priv *priv, return err; } -static int gve_destroy_xdp_rings(struct gve_priv *priv) -{ - int start_id; - int err; - - start_id = gve_xdp_tx_start_queue_id(priv); - err = gve_adminq_destroy_tx_queues(priv, - start_id, - priv->num_xdp_queues); - if (err) { - netif_err(priv, drv, priv->dev, - "failed to destroy XDP queues\n"); - /* This failure will trigger a reset - no need to clean up */ - return err; - } - netif_dbg(priv, drv, priv->dev, "destroyed XDP queues\n"); - - return 0; -} - static int gve_destroy_rings(struct gve_priv *priv) { int num_tx_queues = gve_num_tx_queues(priv); @@ -1064,20 +1012,6 @@ static int gve_destroy_rings(struct gve_priv *priv) return 0; } -static void gve_free_xdp_rings(struct gve_priv *priv) -{ - struct gve_tx_alloc_rings_cfg cfg = {0}; - - gve_tx_get_curr_alloc_cfg(priv, &cfg); - cfg.start_idx = gve_xdp_tx_start_queue_id(priv); - cfg.num_rings = priv->num_xdp_queues; - - if (priv->tx) { - gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings); - gve_tx_free_rings_gqi(priv, &cfg); - } -} - static void gve_queues_mem_free(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_cfg, struct gve_rx_alloc_rings_cfg *rx_cfg) @@ -1204,7 +1138,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) int i, j; u32 tx_qid; - if (!priv->num_xdp_queues) + if (!priv->tx_cfg.num_xdp_queues) return 0; for (i = 0; i < priv->rx_cfg.num_queues; i++) { @@ -1215,8 +1149,14 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) napi->napi_id); if (err) goto err; - err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, - MEM_TYPE_PAGE_SHARED, NULL); + if (gve_is_qpl(priv)) + err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + else + err = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, + MEM_TYPE_PAGE_POOL, + rx->dqo.page_pool); if (err) goto err; rx->xsk_pool = xsk_get_pool_from_qid(dev, i); @@ -1234,7 +1174,7 @@ static int gve_reg_xdp_info(struct gve_priv *priv, struct net_device *dev) } } - for (i = 0; i < priv->num_xdp_queues; i++) { + for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) { tx_qid = gve_xdp_tx_queue_id(priv, i); priv->tx[tx_qid].xsk_pool = xsk_get_pool_from_qid(dev, i); } @@ -1255,7 +1195,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv) { int i, tx_qid; - if (!priv->num_xdp_queues) + if (!priv->tx_cfg.num_xdp_queues || !priv->rx || !priv->tx) return; for (i = 0; i < priv->rx_cfg.num_queues; i++) { @@ -1268,7 +1208,7 @@ static void gve_unreg_xdp_info(struct gve_priv *priv) } } - for (i = 0; i < priv->num_xdp_queues; i++) { + for (i = 0; i < priv->tx_cfg.num_xdp_queues; i++) { tx_qid = gve_xdp_tx_queue_id(priv, i); priv->tx[tx_qid].xsk_pool = NULL; } @@ -1285,15 +1225,14 @@ static void gve_drain_page_cache(struct gve_priv *priv) static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, struct gve_rx_alloc_rings_cfg *cfg) { - cfg->qcfg = &priv->rx_cfg; + cfg->qcfg_rx = &priv->rx_cfg; cfg->qcfg_tx = &priv->tx_cfg; cfg->raw_addressing = !gve_is_qpl(priv); cfg->enable_header_split = priv->header_split_enabled; cfg->ring_size = priv->rx_desc_cnt; - cfg->packet_buffer_size = gve_is_gqi(priv) ? - GVE_DEFAULT_RX_BUFFER_SIZE : - priv->data_buffer_size_dqo; + cfg->packet_buffer_size = priv->rx_cfg.packet_buffer_size; cfg->rx = priv->rx; + cfg->xdp = !!cfg->qcfg_tx->num_xdp_queues; } void gve_get_curr_alloc_cfgs(struct gve_priv *priv, @@ -1366,17 +1305,13 @@ static int gve_queues_start(struct gve_priv *priv, /* Record new configs into priv */ priv->tx_cfg = *tx_alloc_cfg->qcfg; - priv->rx_cfg = *rx_alloc_cfg->qcfg; + priv->tx_cfg.num_xdp_queues = tx_alloc_cfg->num_xdp_rings; + priv->rx_cfg = *rx_alloc_cfg->qcfg_rx; priv->tx_desc_cnt = tx_alloc_cfg->ring_size; priv->rx_desc_cnt = rx_alloc_cfg->ring_size; - if (priv->xdp_prog) - priv->num_xdp_queues = priv->rx_cfg.num_queues; - else - priv->num_xdp_queues = 0; - - gve_tx_start_rings(priv, 0, tx_alloc_cfg->num_rings); - gve_rx_start_rings(priv, rx_alloc_cfg->qcfg->num_queues); + gve_tx_start_rings(priv, gve_num_tx_queues(priv)); + gve_rx_start_rings(priv, rx_alloc_cfg->qcfg_rx->num_queues); gve_init_sync_stats(priv); err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); @@ -1390,12 +1325,18 @@ static int gve_queues_start(struct gve_priv *priv, if (err) goto stop_and_free_rings; + if (rx_alloc_cfg->reset_rss) { + err = gve_init_rss_config(priv, priv->rx_cfg.num_queues); + if (err) + goto reset; + } + err = gve_register_qpls(priv); if (err) goto reset; priv->header_split_enabled = rx_alloc_cfg->enable_header_split; - priv->data_buffer_size_dqo = rx_alloc_cfg->packet_buffer_size; + priv->rx_cfg.packet_buffer_size = rx_alloc_cfg->packet_buffer_size; err = gve_create_rings(priv); if (err) @@ -1422,7 +1363,7 @@ static int gve_queues_start(struct gve_priv *priv, /* return the original error */ return err; stop_and_free_rings: - gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv)); + gve_tx_stop_rings(priv, gve_num_tx_queues(priv)); gve_rx_stop_rings(priv, priv->rx_cfg.num_queues); gve_queues_mem_remove(priv); return err; @@ -1471,7 +1412,7 @@ static int gve_queues_stop(struct gve_priv *priv) gve_unreg_xdp_info(priv); - gve_tx_stop_rings(priv, 0, gve_num_tx_queues(priv)); + gve_tx_stop_rings(priv, gve_num_tx_queues(priv)); gve_rx_stop_rings(priv, priv->rx_cfg.num_queues); priv->interface_down_cnt++; @@ -1501,56 +1442,6 @@ static int gve_close(struct net_device *dev) return 0; } -static int gve_remove_xdp_queues(struct gve_priv *priv) -{ - int err; - - err = gve_destroy_xdp_rings(priv); - if (err) - return err; - - err = gve_unregister_xdp_qpls(priv); - if (err) - return err; - - gve_unreg_xdp_info(priv); - gve_free_xdp_rings(priv); - - priv->num_xdp_queues = 0; - return 0; -} - -static int gve_add_xdp_queues(struct gve_priv *priv) -{ - int err; - - priv->num_xdp_queues = priv->rx_cfg.num_queues; - - err = gve_alloc_xdp_rings(priv); - if (err) - goto err; - - err = gve_reg_xdp_info(priv, priv->dev); - if (err) - goto free_xdp_rings; - - err = gve_register_xdp_qpls(priv); - if (err) - goto free_xdp_rings; - - err = gve_create_xdp_rings(priv); - if (err) - goto free_xdp_rings; - - return 0; - -free_xdp_rings: - gve_free_xdp_rings(priv); -err: - priv->num_xdp_queues = 0; - return err; -} - static void gve_handle_link_status(struct gve_priv *priv, bool link_status) { if (!gve_get_napi_enabled(priv)) @@ -1568,6 +1459,19 @@ static void gve_handle_link_status(struct gve_priv *priv, bool link_status) } } +static int gve_configure_rings_xdp(struct gve_priv *priv, + u16 num_xdp_rings) +{ + struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; + struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; + + gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); + tx_alloc_cfg.num_xdp_rings = num_xdp_rings; + + rx_alloc_cfg.xdp = !!num_xdp_rings; + return gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); +} + static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, struct netlink_ext_ack *extack) { @@ -1580,29 +1484,26 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog, WRITE_ONCE(priv->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); + + /* Update priv XDP queue configuration */ + priv->tx_cfg.num_xdp_queues = priv->xdp_prog ? + priv->rx_cfg.num_queues : 0; return 0; } - gve_turndown(priv); - if (!old_prog && prog) { - // Allocate XDP TX queues if an XDP program is - // being installed - err = gve_add_xdp_queues(priv); - if (err) - goto out; - } else if (old_prog && !prog) { - // Remove XDP TX queues if an XDP program is - // being uninstalled - err = gve_remove_xdp_queues(priv); - if (err) - goto out; - } + if (!old_prog && prog) + err = gve_configure_rings_xdp(priv, priv->rx_cfg.num_queues); + else if (old_prog && !prog) + err = gve_configure_rings_xdp(priv, 0); + + if (err) + goto out; + WRITE_ONCE(priv->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); out: - gve_turnup(priv); status = ioread32be(&priv->reg_bar0->device_status); gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); return err; @@ -1736,6 +1637,7 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) static int verify_xdp_configuration(struct net_device *dev) { struct gve_priv *priv = netdev_priv(dev); + u16 max_xdp_mtu; if (dev->features & NETIF_F_LRO) { netdev_warn(dev, "XDP is not supported when LRO is on.\n"); @@ -1748,7 +1650,11 @@ static int verify_xdp_configuration(struct net_device *dev) return -EOPNOTSUPP; } - if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) { + max_xdp_mtu = priv->rx_cfg.packet_buffer_size - sizeof(struct ethhdr); + if (priv->queue_format == GVE_GQI_QPL_FORMAT) + max_xdp_mtu -= GVE_RX_PAD; + + if (dev->mtu > max_xdp_mtu) { netdev_warn(dev, "XDP is not supported for mtu %d.\n", dev->mtu); return -EOPNOTSUPP; @@ -1786,6 +1692,26 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +int gve_init_rss_config(struct gve_priv *priv, u16 num_queues) +{ + struct gve_rss_config *rss_config = &priv->rss_config; + struct ethtool_rxfh_param rxfh = {0}; + u16 i; + + if (!priv->cache_rss_config) + return 0; + + for (i = 0; i < priv->rss_lut_size; i++) + rss_config->hash_lut[i] = + ethtool_rxfh_indir_default(i, num_queues); + + netdev_rss_key_fill(rss_config->hash_key, priv->rss_key_size); + + rxfh.hfunc = ETH_RSS_HASH_TOP; + + return gve_adminq_configure_rss(priv, &rxfh); +} + int gve_flow_rules_reset(struct gve_priv *priv) { if (!priv->max_flow_rules) @@ -1833,12 +1759,12 @@ int gve_adjust_config(struct gve_priv *priv, } int gve_adjust_queues(struct gve_priv *priv, - struct gve_queue_config new_rx_config, - struct gve_queue_config new_tx_config) + struct gve_rx_queue_config new_rx_config, + struct gve_tx_queue_config new_tx_config, + bool reset_rss) { struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; - int num_xdp_queues; int err; gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg); @@ -1846,18 +1772,19 @@ int gve_adjust_queues(struct gve_priv *priv, /* Relay the new config from ethtool */ tx_alloc_cfg.qcfg = &new_tx_config; rx_alloc_cfg.qcfg_tx = &new_tx_config; - rx_alloc_cfg.qcfg = &new_rx_config; - tx_alloc_cfg.num_rings = new_tx_config.num_queues; - - /* Add dedicated XDP TX queues if enabled. */ - num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0; - tx_alloc_cfg.num_rings += num_xdp_queues; + rx_alloc_cfg.qcfg_rx = &new_rx_config; + rx_alloc_cfg.reset_rss = reset_rss; if (netif_running(priv->dev)) { err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); return err; } /* Set the config for the next up. */ + if (reset_rss) { + err = gve_init_rss_config(priv, new_rx_config.num_queues); + if (err) + return err; + } priv->tx_cfg = new_tx_config; priv->rx_cfg = new_rx_config; @@ -1886,7 +1813,7 @@ static void gve_turndown(struct gve_priv *priv) netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_TX, NULL); - napi_disable(&block->napi); + napi_disable_locked(&block->napi); } for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); @@ -1897,7 +1824,7 @@ static void gve_turndown(struct gve_priv *priv) netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX, NULL); - napi_disable(&block->napi); + napi_disable_locked(&block->napi); } /* Stop tx queues */ @@ -1927,7 +1854,7 @@ static void gve_turnup(struct gve_priv *priv) if (!gve_tx_was_added_to_block(priv, idx)) continue; - napi_enable(&block->napi); + napi_enable_locked(&block->napi); if (idx < priv->tx_cfg.num_queues) netif_queue_set_napi(priv->dev, idx, @@ -1955,7 +1882,7 @@ static void gve_turnup(struct gve_priv *priv) if (!gve_rx_was_added_to_block(priv, idx)) continue; - napi_enable(&block->napi); + napi_enable_locked(&block->napi); netif_queue_set_napi(priv->dev, idx, NETDEV_QUEUE_TYPE_RX, &block->napi); @@ -1974,7 +1901,7 @@ static void gve_turnup(struct gve_priv *priv) napi_schedule(&block->napi); } - if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv)) + if (priv->tx_cfg.num_xdp_queues && gve_supports_xdp_xmit(priv)) xdp_features_set_redirect_target(priv->dev, false); gve_set_napi_enabled(priv); @@ -2330,6 +2257,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues, priv->rx_cfg.num_queues); } + priv->tx_cfg.num_xdp_queues = 0; dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n", priv->tx_cfg.num_queues, priv->rx_cfg.num_queues); @@ -2710,7 +2638,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) priv->service_task_flags = 0x0; priv->state_flags = 0x0; priv->ethtool_flags = 0x0; - priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE; + priv->rx_cfg.packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; priv->max_rx_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; gve_set_probe_in_progress(priv); @@ -2805,6 +2733,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state) priv->suspend_cnt++; rtnl_lock(); + netdev_lock(netdev); if (was_up && gve_close(priv->dev)) { /* If the dev was up, attempt to close, if close fails, reset */ gve_reset_and_teardown(priv, was_up); @@ -2813,6 +2742,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state) gve_teardown_priv_resources(priv); } priv->up_before_suspend = was_up; + netdev_unlock(netdev); rtnl_unlock(); return 0; } @@ -2825,7 +2755,9 @@ static int gve_resume(struct pci_dev *pdev) priv->resume_cnt++; rtnl_lock(); + netdev_lock(netdev); err = gve_reset_recovery(priv, priv->up_before_suspend); + netdev_unlock(netdev); rtnl_unlock(); return err; } diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index acb73d4d0de6b..90e875c1832f8 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -141,12 +141,15 @@ void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx, netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); } -static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info, - dma_addr_t addr, struct page *page, __be64 *slot_addr) +static void gve_setup_rx_buffer(struct gve_rx_ring *rx, + struct gve_rx_slot_page_info *page_info, + dma_addr_t addr, struct page *page, + __be64 *slot_addr) { page_info->page = page; page_info->page_offset = 0; page_info->page_address = page_address(page); + page_info->buf_size = rx->packet_buffer_size; *slot_addr = cpu_to_be64(addr); /* The page already has 1 ref */ page_ref_add(page, INT_MAX - 1); @@ -171,7 +174,7 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, return err; } - gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr); + gve_setup_rx_buffer(rx, page_info, dma, page, &data_slot->addr); return 0; } @@ -199,7 +202,8 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx, struct page *page = rx->data.qpl->pages[i]; dma_addr_t addr = i * PAGE_SIZE; - gve_setup_rx_buffer(&rx->data.page_info[i], addr, page, + gve_setup_rx_buffer(rx, &rx->data.page_info[i], addr, + page, &rx->data.data_ring[i].qpl_offset); continue; } @@ -222,6 +226,7 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx, rx->qpl_copy_pool[j].page = page; rx->qpl_copy_pool[j].page_offset = 0; rx->qpl_copy_pool[j].page_address = page_address(page); + rx->qpl_copy_pool[j].buf_size = rx->packet_buffer_size; /* The page already has 1 ref. */ page_ref_add(page, INT_MAX - 1); @@ -283,6 +288,7 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv, rx->gve = priv; rx->q_num = idx; + rx->packet_buffer_size = cfg->packet_buffer_size; rx->mask = slots - 1; rx->data.raw_addressing = cfg->raw_addressing; @@ -351,7 +357,6 @@ int gve_rx_alloc_ring_gqi(struct gve_priv *priv, rx->db_threshold = slots / 2; gve_rx_init_ring_state_gqi(rx); - rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; gve_rx_ctx_clear(&rx->ctx); return 0; @@ -385,12 +390,12 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv, int err = 0; int i, j; - rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), + rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring), GFP_KERNEL); if (!rx) return -ENOMEM; - for (i = 0; i < cfg->qcfg->num_queues; i++) { + for (i = 0; i < cfg->qcfg_rx->num_queues; i++) { err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i); if (err) { netif_err(priv, drv, priv->dev, @@ -419,7 +424,7 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv, if (!rx) return; - for (i = 0; i < cfg->qcfg->num_queues; i++) + for (i = 0; i < cfg->qcfg_rx->num_queues; i++) gve_rx_free_ring_gqi(priv, &rx[i], cfg); kvfree(rx); @@ -590,7 +595,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx, copy_page_info->pad = page_info->pad; skb = gve_rx_add_frags(napi, copy_page_info, - rx->packet_buffer_size, len, ctx); + copy_page_info->buf_size, len, ctx); if (unlikely(!skb)) return NULL; @@ -630,7 +635,8 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev, * device. */ if (page_info->can_flip) { - skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx); + skb = gve_rx_add_frags(napi, page_info, page_info->buf_size, + len, ctx); /* No point in recycling if we didn't get the skb */ if (skb) { /* Make sure that the page isn't freed. */ @@ -680,7 +686,7 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx, skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev, page_info, len, napi, data_slot, - rx->packet_buffer_size, ctx); + page_info->buf_size, ctx); } else { skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx, page_info, len, napi, data_slot); @@ -855,7 +861,7 @@ static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat, void *old_data; int xdp_act; - xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq); + xdp_init_buff(&xdp, page_info->buf_size, &rx->xdp_rxq); xdp_prepare_buff(&xdp, page_info->page_address + page_info->page_offset, GVE_RX_PAD, len, false); diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 8ac0047f1ada1..dcb0545baa50f 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -109,10 +109,13 @@ static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx) void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx) { int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); + struct gve_rx_ring *rx = &priv->rx[idx]; if (!gve_rx_was_added_to_block(priv, idx)) return; + if (rx->dqo.page_pool) + page_pool_disable_direct_recycling(rx->dqo.page_pool); gve_remove_napi(priv, ntfy_idx); gve_rx_remove_from_block(priv, idx); gve_rx_reset_ring_dqo(priv, idx); @@ -221,6 +224,15 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, memset(rx, 0, sizeof(*rx)); rx->gve = priv; rx->q_num = idx; + rx->packet_buffer_size = cfg->packet_buffer_size; + + if (cfg->xdp) { + rx->packet_buffer_truesize = GVE_XDP_RX_BUFFER_SIZE_DQO; + rx->rx_headroom = XDP_PACKET_HEADROOM; + } else { + rx->packet_buffer_truesize = rx->packet_buffer_size; + rx->rx_headroom = 0; + } rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots : gve_get_rx_pages_per_qpl_dqo(cfg->ring_size); @@ -251,7 +263,7 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv, goto err; if (cfg->raw_addressing) { - pool = gve_rx_create_page_pool(priv, rx); + pool = gve_rx_create_page_pool(priv, rx, cfg->xdp); if (IS_ERR(pool)) goto err; @@ -297,12 +309,12 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv, int err; int i; - rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring), + rx = kvcalloc(cfg->qcfg_rx->max_queues, sizeof(struct gve_rx_ring), GFP_KERNEL); if (!rx) return -ENOMEM; - for (i = 0; i < cfg->qcfg->num_queues; i++) { + for (i = 0; i < cfg->qcfg_rx->num_queues; i++) { err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i); if (err) { netif_err(priv, drv, priv->dev, @@ -331,7 +343,7 @@ void gve_rx_free_rings_dqo(struct gve_priv *priv, if (!rx) return; - for (i = 0; i < cfg->qcfg->num_queues; i++) + for (i = 0; i < cfg->qcfg_rx->num_queues; i++) gve_rx_free_ring_dqo(priv, &rx[i], cfg); kvfree(rx); @@ -474,6 +486,25 @@ static int gve_rx_copy_ondemand(struct gve_rx_ring *rx, return 0; } +static void gve_skb_add_rx_frag(struct gve_rx_ring *rx, + struct gve_rx_buf_state_dqo *buf_state, + int num_frags, u16 buf_len) +{ + if (rx->dqo.page_pool) { + skb_add_rx_frag_netmem(rx->ctx.skb_tail, num_frags, + buf_state->page_info.netmem, + buf_state->page_info.page_offset + + buf_state->page_info.pad, buf_len, + buf_state->page_info.buf_size); + } else { + skb_add_rx_frag(rx->ctx.skb_tail, num_frags, + buf_state->page_info.page, + buf_state->page_info.page_offset + + buf_state->page_info.pad, buf_len, + buf_state->page_info.buf_size); + } +} + /* Chains multi skbs for single rx packet. * Returns 0 if buffer is appended, -1 otherwise. */ @@ -511,14 +542,34 @@ static int gve_rx_append_frags(struct napi_struct *napi, if (gve_rx_should_trigger_copy_ondemand(rx)) return gve_rx_copy_ondemand(rx, buf_state, buf_len); - skb_add_rx_frag(rx->ctx.skb_tail, num_frags, - buf_state->page_info.page, - buf_state->page_info.page_offset, - buf_len, buf_state->page_info.buf_size); + gve_skb_add_rx_frag(rx, buf_state, num_frags, buf_len); gve_reuse_buffer(rx, buf_state); return 0; } +static void gve_xdp_done_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, + struct xdp_buff *xdp, struct bpf_prog *xprog, + int xdp_act, + struct gve_rx_buf_state_dqo *buf_state) +{ + u64_stats_update_begin(&rx->statss); + switch (xdp_act) { + case XDP_ABORTED: + case XDP_DROP: + default: + rx->xdp_actions[xdp_act]++; + break; + case XDP_TX: + rx->xdp_tx_errors++; + break; + case XDP_REDIRECT: + rx->xdp_redirect_errors++; + break; + } + u64_stats_update_end(&rx->statss); + gve_free_buffer(rx, buf_state); +} + /* Returns 0 if descriptor is completed successfully. * Returns -EINVAL if descriptor is invalid. * Returns -ENOMEM if data cannot be copied to skb. @@ -533,6 +584,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, const bool hsplit = compl_desc->split_header; struct gve_rx_buf_state_dqo *buf_state; struct gve_priv *priv = rx->gve; + struct bpf_prog *xprog; u16 buf_len; u16 hdr_len; @@ -559,7 +611,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, /* Page might have not been used for awhile and was likely last written * by a different thread. */ - prefetch(buf_state->page_info.page); + if (rx->dqo.page_pool) { + if (!netmem_is_net_iov(buf_state->page_info.netmem)) + prefetch(netmem_to_page(buf_state->page_info.netmem)); + } else { + prefetch(buf_state->page_info.page); + } /* Copy the header into the skb in the case of header split */ if (hsplit) { @@ -588,7 +645,8 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, /* Sync the portion of dma buffer for CPU to read. */ dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr, - buf_state->page_info.page_offset, + buf_state->page_info.page_offset + + buf_state->page_info.pad, buf_len, DMA_FROM_DEVICE); /* Append to current skb if one exists. */ @@ -600,6 +658,34 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, return 0; } + xprog = READ_ONCE(priv->xdp_prog); + if (xprog) { + struct xdp_buff xdp; + void *old_data; + int xdp_act; + + xdp_init_buff(&xdp, buf_state->page_info.buf_size, + &rx->xdp_rxq); + xdp_prepare_buff(&xdp, + buf_state->page_info.page_address + + buf_state->page_info.page_offset, + buf_state->page_info.pad, + buf_len, false); + old_data = xdp.data; + xdp_act = bpf_prog_run_xdp(xprog, &xdp); + buf_state->page_info.pad += xdp.data - old_data; + buf_len = xdp.data_end - xdp.data; + if (xdp_act != XDP_PASS) { + gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act, + buf_state); + return 0; + } + + u64_stats_update_begin(&rx->statss); + rx->xdp_actions[XDP_PASS]++; + u64_stats_update_end(&rx->statss); + } + if (eop && buf_len <= priv->rx_copybreak) { rx->ctx.skb_head = gve_rx_copy(priv->dev, napi, &buf_state->page_info, buf_len); @@ -630,9 +716,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, if (rx->dqo.page_pool) skb_mark_for_recycle(rx->ctx.skb_head); - skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page, - buf_state->page_info.page_offset, buf_len, - buf_state->page_info.buf_size); + gve_skb_add_rx_frag(rx, buf_state, 0, buf_len); gve_reuse_buffer(rx, buf_state); return 0; diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 4350ebd9c2bd9..1b40bf0c811a3 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -334,27 +334,23 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg) { struct gve_tx_ring *tx = cfg->tx; + int total_queues; int err = 0; int i, j; - if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) { + total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings; + if (total_queues > cfg->qcfg->max_queues) { netif_err(priv, drv, priv->dev, "Cannot alloc more than the max num of Tx rings\n"); return -EINVAL; } - if (cfg->start_idx == 0) { - tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), - GFP_KERNEL); - if (!tx) - return -ENOMEM; - } else if (!tx) { - netif_err(priv, drv, priv->dev, - "Cannot alloc tx rings from a nonzero start idx without tx array\n"); - return -EINVAL; - } + tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), + GFP_KERNEL); + if (!tx) + return -ENOMEM; - for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) { + for (i = 0; i < total_queues; i++) { err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i); if (err) { netif_err(priv, drv, priv->dev, @@ -370,8 +366,7 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv, cleanup: for (j = 0; j < i; j++) gve_tx_free_ring_gqi(priv, &tx[j], cfg); - if (cfg->start_idx == 0) - kvfree(tx); + kvfree(tx); return err; } @@ -384,13 +379,11 @@ void gve_tx_free_rings_gqi(struct gve_priv *priv, if (!tx) return; - for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) + for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++) gve_tx_free_ring_gqi(priv, &tx[i], cfg); - if (cfg->start_idx == 0) { - kvfree(tx); - cfg->tx = NULL; - } + kvfree(tx); + cfg->tx = NULL; } /* gve_tx_avail - Calculates the number of slots available in the ring @@ -844,7 +837,7 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, return -ENETDOWN; qid = gve_xdp_tx_queue_id(priv, - smp_processor_id() % priv->num_xdp_queues); + smp_processor_id() % priv->tx_cfg.num_xdp_queues); tx = &priv->tx[qid]; @@ -959,13 +952,9 @@ static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx, spin_lock(&tx->xdp_lock); while (sent < budget) { - if (!gve_can_tx(tx, GVE_TX_START_THRESH)) - goto out; - - if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) { - tx->xdp_xsk_done = tx->xdp_xsk_wakeup; + if (!gve_can_tx(tx, GVE_TX_START_THRESH) || + !xsk_tx_peek_desc(tx->xsk_pool, &desc)) goto out; - } data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); nsegs = gve_tx_fill_xdp(priv, tx, data, desc.len, NULL, true); diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index 394debc62268a..2eba868d80370 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -379,27 +379,23 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg) { struct gve_tx_ring *tx = cfg->tx; + int total_queues; int err = 0; int i, j; - if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) { + total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings; + if (total_queues > cfg->qcfg->max_queues) { netif_err(priv, drv, priv->dev, "Cannot alloc more than the max num of Tx rings\n"); return -EINVAL; } - if (cfg->start_idx == 0) { - tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), - GFP_KERNEL); - if (!tx) - return -ENOMEM; - } else if (!tx) { - netif_err(priv, drv, priv->dev, - "Cannot alloc tx rings from a nonzero start idx without tx array\n"); - return -EINVAL; - } + tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), + GFP_KERNEL); + if (!tx) + return -ENOMEM; - for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) { + for (i = 0; i < total_queues; i++) { err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i); if (err) { netif_err(priv, drv, priv->dev, @@ -415,8 +411,7 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv, err: for (j = 0; j < i; j++) gve_tx_free_ring_dqo(priv, &tx[j], cfg); - if (cfg->start_idx == 0) - kvfree(tx); + kvfree(tx); return err; } @@ -429,13 +424,11 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv, if (!tx) return; - for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) + for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++) gve_tx_free_ring_dqo(priv, &tx[i], cfg); - if (cfg->start_idx == 0) { - kvfree(tx); - cfg->tx = NULL; - } + kvfree(tx); + cfg->tx = NULL; } /* Returns the number of slots available in the ring */ diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c index 30fef100257e6..ace9b8698021f 100644 --- a/drivers/net/ethernet/google/gve/gve_utils.c +++ b/drivers/net/ethernet/google/gve/gve_utils.c @@ -110,13 +110,13 @@ void gve_add_napi(struct gve_priv *priv, int ntfy_idx, { struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; - netif_napi_add(priv->dev, &block->napi, gve_poll); - netif_napi_set_irq(&block->napi, block->irq); + netif_napi_add_locked(priv->dev, &block->napi, gve_poll); + netif_napi_set_irq_locked(&block->napi, block->irq); } void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) { struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; - netif_napi_del(&block->napi); + netif_napi_del_locked(&block->napi); } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/Makefile b/drivers/net/ethernet/hisilicon/hibmcge/Makefile index 7ea15f9ef8492..1a9da564b306a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/Makefile +++ b/drivers/net/ethernet/hisilicon/hibmcge/Makefile @@ -6,4 +6,4 @@ obj-$(CONFIG_HIBMCGE) += hibmcge.o hibmcge-objs = hbg_main.o hbg_hw.o hbg_mdio.o hbg_irq.o hbg_txrx.o hbg_ethtool.o \ - hbg_debugfs.o hbg_err.o + hbg_debugfs.o hbg_err.o hbg_diagnose.o diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index b4300d8ea4ad2..f8cdab62bf85c 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -36,6 +36,8 @@ enum hbg_nic_state { HBG_NIC_STATE_EVENT_HANDLING = 0, HBG_NIC_STATE_RESETTING, HBG_NIC_STATE_RESET_FAIL, + HBG_NIC_STATE_NEED_RESET, /* trigger a reset in scheduled task */ + HBG_NIC_STATE_NP_LINK_FAIL, }; enum hbg_reset_type { @@ -81,6 +83,7 @@ enum hbg_hw_event_type { HBG_HW_EVENT_NONE = 0, HBG_HW_EVENT_INIT, /* driver is loading */ HBG_HW_EVENT_RESET, + HBG_HW_EVENT_CORE_RESET, }; struct hbg_dev_specs { @@ -104,6 +107,7 @@ struct hbg_irq_info { u32 mask; bool re_enable; bool need_print; + bool need_reset; u64 count; void (*irq_handle)(struct hbg_priv *priv, struct hbg_irq_info *info); @@ -142,6 +146,118 @@ struct hbg_user_def { struct ethtool_pauseparam pause_param; }; +struct hbg_stats { + u64 rx_desc_drop; + u64 rx_desc_l2_err_cnt; + u64 rx_desc_pkt_len_err_cnt; + u64 rx_desc_l3l4_err_cnt; + u64 rx_desc_l3_wrong_head_cnt; + u64 rx_desc_l3_csum_err_cnt; + u64 rx_desc_l3_len_err_cnt; + u64 rx_desc_l3_zero_ttl_cnt; + u64 rx_desc_l3_other_cnt; + u64 rx_desc_l4_err_cnt; + u64 rx_desc_l4_wrong_head_cnt; + u64 rx_desc_l4_len_err_cnt; + u64 rx_desc_l4_csum_err_cnt; + u64 rx_desc_l4_zero_port_num_cnt; + u64 rx_desc_l4_other_cnt; + u64 rx_desc_frag_cnt; + u64 rx_desc_ip_ver_err_cnt; + u64 rx_desc_ipv4_pkt_cnt; + u64 rx_desc_ipv6_pkt_cnt; + u64 rx_desc_no_ip_pkt_cnt; + u64 rx_desc_ip_pkt_cnt; + u64 rx_desc_tcp_pkt_cnt; + u64 rx_desc_udp_pkt_cnt; + u64 rx_desc_vlan_pkt_cnt; + u64 rx_desc_icmp_pkt_cnt; + u64 rx_desc_arp_pkt_cnt; + u64 rx_desc_rarp_pkt_cnt; + u64 rx_desc_multicast_pkt_cnt; + u64 rx_desc_broadcast_pkt_cnt; + u64 rx_desc_ipsec_pkt_cnt; + u64 rx_desc_ip_opt_pkt_cnt; + u64 rx_desc_key_not_match_cnt; + + u64 rx_octets_total_ok_cnt; + u64 rx_uc_pkt_cnt; + u64 rx_mc_pkt_cnt; + u64 rx_bc_pkt_cnt; + u64 rx_vlan_pkt_cnt; + u64 rx_octets_bad_cnt; + u64 rx_octets_total_filt_cnt; + u64 rx_filt_pkt_cnt; + u64 rx_trans_pkt_cnt; + u64 rx_framesize_64; + u64 rx_framesize_65_127; + u64 rx_framesize_128_255; + u64 rx_framesize_256_511; + u64 rx_framesize_512_1023; + u64 rx_framesize_1024_1518; + u64 rx_framesize_bt_1518; + u64 rx_fcs_error_cnt; + u64 rx_data_error_cnt; + u64 rx_align_error_cnt; + u64 rx_pause_macctl_frame_cnt; + u64 rx_unknown_macctl_frame_cnt; + /* crc ok, > max_frm_size, < 2max_frm_size */ + u64 rx_frame_long_err_cnt; + /* crc fail, > max_frm_size, < 2max_frm_size */ + u64 rx_jabber_err_cnt; + /* > 2max_frm_size */ + u64 rx_frame_very_long_err_cnt; + /* < 64byte, >= short_runts_thr */ + u64 rx_frame_runt_err_cnt; + /* < short_runts_thr */ + u64 rx_frame_short_err_cnt; + /* PCU: dropped when the RX FIFO is full.*/ + u64 rx_overflow_cnt; + /* GMAC: the count of overflows of the RX FIFO */ + u64 rx_overrun_cnt; + /* PCU: the count of buffer alloc errors in RX */ + u64 rx_bufrq_err_cnt; + /* PCU: the count of write descriptor errors in RX */ + u64 rx_we_err_cnt; + /* GMAC: the count of pkts that contain PAD but length is not 64 */ + u64 rx_lengthfield_err_cnt; + u64 rx_fail_comma_cnt; + + u64 rx_dma_err_cnt; + u64 rx_fifo_less_empty_thrsld_cnt; + + u64 tx_octets_total_ok_cnt; + u64 tx_uc_pkt_cnt; + u64 tx_mc_pkt_cnt; + u64 tx_bc_pkt_cnt; + u64 tx_vlan_pkt_cnt; + u64 tx_octets_bad_cnt; + u64 tx_trans_pkt_cnt; + u64 tx_pause_frame_cnt; + u64 tx_framesize_64; + u64 tx_framesize_65_127; + u64 tx_framesize_128_255; + u64 tx_framesize_256_511; + u64 tx_framesize_512_1023; + u64 tx_framesize_1024_1518; + u64 tx_framesize_bt_1518; + /* GMAC: the count of times that frames fail to be transmitted + * due to internal errors. + */ + u64 tx_underrun_err_cnt; + u64 tx_add_cs_fail_cnt; + /* PCU: the count of buffer free errors in TX */ + u64 tx_bufrl_err_cnt; + u64 tx_crc_err_cnt; + u64 tx_drop_cnt; + u64 tx_excessive_length_drop_cnt; + + u64 tx_timeout_cnt; + u64 tx_dma_err_cnt; + + u64 np_link_fail_cnt; +}; + struct hbg_priv { struct net_device *netdev; struct pci_dev *pdev; @@ -155,6 +271,12 @@ struct hbg_priv { struct hbg_mac_filter filter; enum hbg_reset_type reset_type; struct hbg_user_def user_def; + struct hbg_stats stats; + unsigned long last_update_stats_time; + struct delayed_work service_task; }; +void hbg_err_reset_task_schedule(struct hbg_priv *priv); +void hbg_np_link_fail_task_schedule(struct hbg_priv *priv); + #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c index 8473c43d171a9..5e0ba4d5b08d2 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_debugfs.c @@ -67,10 +67,11 @@ static int hbg_dbg_irq_info(struct seq_file *s, void *unused) for (i = 0; i < priv->vectors.info_array_len; i++) { info = &priv->vectors.info_array[i]; seq_printf(s, - "%-20s: enabled: %-5s, logged: %-5s, count: %llu\n", + "%-20s: enabled: %-5s, reset: %-5s, logged: %-5s, count: %llu\n", info->name, str_true_false(hbg_hw_irq_is_enabled(priv, info->mask)), + str_true_false(info->need_reset), str_true_false(info->need_print), info->count); } @@ -114,6 +115,10 @@ static int hbg_dbg_nic_state(struct seq_file *s, void *unused) state_str_true_false(priv, HBG_NIC_STATE_RESET_FAIL)); seq_printf(s, "last reset type: %s\n", reset_type_str[priv->reset_type]); + seq_printf(s, "need reset state: %s\n", + state_str_true_false(priv, HBG_NIC_STATE_NEED_RESET)); + seq_printf(s, "np_link fail state: %s\n", + state_str_true_false(priv, HBG_NIC_STATE_NP_LINK_FAIL)); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c new file mode 100644 index 0000000000000..d61c03f34ff05 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright (c) 2025 Hisilicon Limited. + +#include +#include +#include "hbg_common.h" +#include "hbg_ethtool.h" +#include "hbg_hw.h" +#include "hbg_diagnose.h" + +#define HBG_MSG_DATA_MAX_NUM 64 + +struct hbg_diagnose_message { + u32 opcode; + u32 status; + u32 data_num; + struct hbg_priv *priv; + + u32 data[HBG_MSG_DATA_MAX_NUM]; +}; + +#define HBG_HW_PUSH_WAIT_TIMEOUT_US (2 * 1000 * 1000) +#define HBG_HW_PUSH_WAIT_INTERVAL_US (1 * 1000) + +enum hbg_push_cmd { + HBG_PUSH_CMD_IRQ = 0, + HBG_PUSH_CMD_STATS, + HBG_PUSH_CMD_LINK, +}; + +struct hbg_push_stats_info { + /* id is used to match the name of the current stats item. + * and is used for pretty print on BMC + */ + u32 id; + u64 offset; +}; + +struct hbg_push_irq_info { + /* id is used to match the name of the current irq. + * and is used for pretty print on BMC + */ + u32 id; + u32 mask; +}; + +#define HBG_PUSH_IRQ_I(name, id) {id, HBG_INT_MSK_##name##_B} +static const struct hbg_push_irq_info hbg_push_irq_list[] = { + HBG_PUSH_IRQ_I(RX, 0), + HBG_PUSH_IRQ_I(TX, 1), + HBG_PUSH_IRQ_I(TX_PKT_CPL, 2), + HBG_PUSH_IRQ_I(MAC_MII_FIFO_ERR, 3), + HBG_PUSH_IRQ_I(MAC_PCS_RX_FIFO_ERR, 4), + HBG_PUSH_IRQ_I(MAC_PCS_TX_FIFO_ERR, 5), + HBG_PUSH_IRQ_I(MAC_APP_RX_FIFO_ERR, 6), + HBG_PUSH_IRQ_I(MAC_APP_TX_FIFO_ERR, 7), + HBG_PUSH_IRQ_I(SRAM_PARITY_ERR, 8), + HBG_PUSH_IRQ_I(TX_AHB_ERR, 9), + HBG_PUSH_IRQ_I(RX_BUF_AVL, 10), + HBG_PUSH_IRQ_I(REL_BUF_ERR, 11), + HBG_PUSH_IRQ_I(TXCFG_AVL, 12), + HBG_PUSH_IRQ_I(TX_DROP, 13), + HBG_PUSH_IRQ_I(RX_DROP, 14), + HBG_PUSH_IRQ_I(RX_AHB_ERR, 15), + HBG_PUSH_IRQ_I(MAC_FIFO_ERR, 16), + HBG_PUSH_IRQ_I(RBREQ_ERR, 17), + HBG_PUSH_IRQ_I(WE_ERR, 18), +}; + +#define HBG_PUSH_STATS_I(name, id) {id, HBG_STATS_FIELD_OFF(name)} +static const struct hbg_push_stats_info hbg_push_stats_list[] = { + HBG_PUSH_STATS_I(rx_desc_drop, 0), + HBG_PUSH_STATS_I(rx_desc_l2_err_cnt, 1), + HBG_PUSH_STATS_I(rx_desc_pkt_len_err_cnt, 2), + HBG_PUSH_STATS_I(rx_desc_l3_wrong_head_cnt, 3), + HBG_PUSH_STATS_I(rx_desc_l3_csum_err_cnt, 4), + HBG_PUSH_STATS_I(rx_desc_l3_len_err_cnt, 5), + HBG_PUSH_STATS_I(rx_desc_l3_zero_ttl_cnt, 6), + HBG_PUSH_STATS_I(rx_desc_l3_other_cnt, 7), + HBG_PUSH_STATS_I(rx_desc_l4_err_cnt, 8), + HBG_PUSH_STATS_I(rx_desc_l4_wrong_head_cnt, 9), + HBG_PUSH_STATS_I(rx_desc_l4_len_err_cnt, 10), + HBG_PUSH_STATS_I(rx_desc_l4_csum_err_cnt, 11), + HBG_PUSH_STATS_I(rx_desc_l4_zero_port_num_cnt, 12), + HBG_PUSH_STATS_I(rx_desc_l4_other_cnt, 13), + HBG_PUSH_STATS_I(rx_desc_frag_cnt, 14), + HBG_PUSH_STATS_I(rx_desc_ip_ver_err_cnt, 15), + HBG_PUSH_STATS_I(rx_desc_ipv4_pkt_cnt, 16), + HBG_PUSH_STATS_I(rx_desc_ipv6_pkt_cnt, 17), + HBG_PUSH_STATS_I(rx_desc_no_ip_pkt_cnt, 18), + HBG_PUSH_STATS_I(rx_desc_ip_pkt_cnt, 19), + HBG_PUSH_STATS_I(rx_desc_tcp_pkt_cnt, 20), + HBG_PUSH_STATS_I(rx_desc_udp_pkt_cnt, 21), + HBG_PUSH_STATS_I(rx_desc_vlan_pkt_cnt, 22), + HBG_PUSH_STATS_I(rx_desc_icmp_pkt_cnt, 23), + HBG_PUSH_STATS_I(rx_desc_arp_pkt_cnt, 24), + HBG_PUSH_STATS_I(rx_desc_rarp_pkt_cnt, 25), + HBG_PUSH_STATS_I(rx_desc_multicast_pkt_cnt, 26), + HBG_PUSH_STATS_I(rx_desc_broadcast_pkt_cnt, 27), + HBG_PUSH_STATS_I(rx_desc_ipsec_pkt_cnt, 28), + HBG_PUSH_STATS_I(rx_desc_ip_opt_pkt_cnt, 29), + HBG_PUSH_STATS_I(rx_desc_key_not_match_cnt, 30), + HBG_PUSH_STATS_I(rx_octets_total_ok_cnt, 31), + HBG_PUSH_STATS_I(rx_uc_pkt_cnt, 32), + HBG_PUSH_STATS_I(rx_mc_pkt_cnt, 33), + HBG_PUSH_STATS_I(rx_bc_pkt_cnt, 34), + HBG_PUSH_STATS_I(rx_vlan_pkt_cnt, 35), + HBG_PUSH_STATS_I(rx_octets_bad_cnt, 36), + HBG_PUSH_STATS_I(rx_octets_total_filt_cnt, 37), + HBG_PUSH_STATS_I(rx_filt_pkt_cnt, 38), + HBG_PUSH_STATS_I(rx_trans_pkt_cnt, 39), + HBG_PUSH_STATS_I(rx_framesize_64, 40), + HBG_PUSH_STATS_I(rx_framesize_65_127, 41), + HBG_PUSH_STATS_I(rx_framesize_128_255, 42), + HBG_PUSH_STATS_I(rx_framesize_256_511, 43), + HBG_PUSH_STATS_I(rx_framesize_512_1023, 44), + HBG_PUSH_STATS_I(rx_framesize_1024_1518, 45), + HBG_PUSH_STATS_I(rx_framesize_bt_1518, 46), + HBG_PUSH_STATS_I(rx_fcs_error_cnt, 47), + HBG_PUSH_STATS_I(rx_data_error_cnt, 48), + HBG_PUSH_STATS_I(rx_align_error_cnt, 49), + HBG_PUSH_STATS_I(rx_frame_long_err_cnt, 50), + HBG_PUSH_STATS_I(rx_jabber_err_cnt, 51), + HBG_PUSH_STATS_I(rx_pause_macctl_frame_cnt, 52), + HBG_PUSH_STATS_I(rx_unknown_macctl_frame_cnt, 53), + HBG_PUSH_STATS_I(rx_frame_very_long_err_cnt, 54), + HBG_PUSH_STATS_I(rx_frame_runt_err_cnt, 55), + HBG_PUSH_STATS_I(rx_frame_short_err_cnt, 56), + HBG_PUSH_STATS_I(rx_overflow_cnt, 57), + HBG_PUSH_STATS_I(rx_bufrq_err_cnt, 58), + HBG_PUSH_STATS_I(rx_we_err_cnt, 59), + HBG_PUSH_STATS_I(rx_overrun_cnt, 60), + HBG_PUSH_STATS_I(rx_lengthfield_err_cnt, 61), + HBG_PUSH_STATS_I(rx_fail_comma_cnt, 62), + HBG_PUSH_STATS_I(rx_dma_err_cnt, 63), + HBG_PUSH_STATS_I(rx_fifo_less_empty_thrsld_cnt, 64), + HBG_PUSH_STATS_I(tx_octets_total_ok_cnt, 65), + HBG_PUSH_STATS_I(tx_uc_pkt_cnt, 66), + HBG_PUSH_STATS_I(tx_mc_pkt_cnt, 67), + HBG_PUSH_STATS_I(tx_bc_pkt_cnt, 68), + HBG_PUSH_STATS_I(tx_vlan_pkt_cnt, 69), + HBG_PUSH_STATS_I(tx_octets_bad_cnt, 70), + HBG_PUSH_STATS_I(tx_trans_pkt_cnt, 71), + HBG_PUSH_STATS_I(tx_pause_frame_cnt, 72), + HBG_PUSH_STATS_I(tx_framesize_64, 73), + HBG_PUSH_STATS_I(tx_framesize_65_127, 74), + HBG_PUSH_STATS_I(tx_framesize_128_255, 75), + HBG_PUSH_STATS_I(tx_framesize_256_511, 76), + HBG_PUSH_STATS_I(tx_framesize_512_1023, 77), + HBG_PUSH_STATS_I(tx_framesize_1024_1518, 78), + HBG_PUSH_STATS_I(tx_framesize_bt_1518, 79), + HBG_PUSH_STATS_I(tx_underrun_err_cnt, 80), + HBG_PUSH_STATS_I(tx_add_cs_fail_cnt, 81), + HBG_PUSH_STATS_I(tx_bufrl_err_cnt, 82), + HBG_PUSH_STATS_I(tx_crc_err_cnt, 83), + HBG_PUSH_STATS_I(tx_drop_cnt, 84), + HBG_PUSH_STATS_I(tx_excessive_length_drop_cnt, 85), + HBG_PUSH_STATS_I(tx_dma_err_cnt, 86), +}; + +static int hbg_push_msg_send(struct hbg_priv *priv, + struct hbg_diagnose_message *msg) +{ + u32 header = 0; + u32 i; + + if (msg->data_num == 0) + return 0; + + for (i = 0; i < msg->data_num && i < HBG_MSG_DATA_MAX_NUM; i++) + hbg_reg_write(priv, + HBG_REG_MSG_DATA_BASE_ADDR + i * sizeof(u32), + msg->data[i]); + + hbg_field_modify(header, HBG_REG_MSG_HEADER_OPCODE_M, msg->opcode); + hbg_field_modify(header, HBG_REG_MSG_HEADER_DATA_NUM_M, msg->data_num); + hbg_field_modify(header, HBG_REG_MSG_HEADER_RESP_CODE_M, ETIMEDOUT); + + /* start status */ + hbg_field_modify(header, HBG_REG_MSG_HEADER_STATUS_M, 1); + + /* write header msg to start push */ + hbg_reg_write(priv, HBG_REG_MSG_HEADER_ADDR, header); + + /* wait done */ + readl_poll_timeout(priv->io_base + HBG_REG_MSG_HEADER_ADDR, header, + !FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header), + HBG_HW_PUSH_WAIT_INTERVAL_US, + HBG_HW_PUSH_WAIT_TIMEOUT_US); + + msg->status = FIELD_GET(HBG_REG_MSG_HEADER_STATUS_M, header); + return -(int)FIELD_GET(HBG_REG_MSG_HEADER_RESP_CODE_M, header); +} + +static int hbg_push_data(struct hbg_priv *priv, + u32 opcode, u32 *data, u32 data_num) +{ + struct hbg_diagnose_message msg = {0}; + u32 data_left_num; + u32 i, j; + int ret; + + msg.priv = priv; + msg.opcode = opcode; + for (i = 0; i < data_num / HBG_MSG_DATA_MAX_NUM + 1; i++) { + if (i * HBG_MSG_DATA_MAX_NUM >= data_num) + break; + + data_left_num = data_num - i * HBG_MSG_DATA_MAX_NUM; + for (j = 0; j < data_left_num && j < HBG_MSG_DATA_MAX_NUM; j++) + msg.data[j] = data[i * HBG_MSG_DATA_MAX_NUM + j]; + + msg.data_num = j; + ret = hbg_push_msg_send(priv, &msg); + if (ret) + return ret; + } + + return 0; +} + +static int hbg_push_data_u64(struct hbg_priv *priv, u32 opcode, + u64 *data, u32 data_num) +{ + /* The length of u64 is twice that of u32, + * the data_num must be multiplied by 2. + */ + return hbg_push_data(priv, opcode, (u32 *)data, data_num * 2); +} + +static u64 hbg_get_irq_stats(struct hbg_vector *vectors, u32 mask) +{ + u32 i = 0; + + for (i = 0; i < vectors->info_array_len; i++) + if (vectors->info_array[i].mask == mask) + return vectors->info_array[i].count; + + return 0; +} + +static int hbg_push_irq_cnt(struct hbg_priv *priv) +{ + /* An id needs to be added for each data. + * Therefore, the data_num must be multiplied by 2. + */ + u32 data_num = ARRAY_SIZE(hbg_push_irq_list) * 2; + struct hbg_vector *vectors = &priv->vectors; + const struct hbg_push_irq_info *info; + u32 i, j = 0; + u64 *data; + int ret; + + data = kcalloc(data_num, sizeof(u64), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* An id needs to be added for each data. + * So i + 2 for each loop. + */ + for (i = 0; i < data_num; i += 2) { + info = &hbg_push_irq_list[j++]; + data[i] = info->id; + data[i + 1] = hbg_get_irq_stats(vectors, info->mask); + } + + ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_IRQ, data, data_num); + kfree(data); + return ret; +} + +static int hbg_push_link_status(struct hbg_priv *priv) +{ + u32 link_status[2]; + + /* phy link status */ + link_status[0] = priv->mac.phydev->link; + /* mac link status */ + link_status[1] = hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR, + HBG_REG_AN_NEG_STATE_NP_LINK_OK_B); + + return hbg_push_data(priv, HBG_PUSH_CMD_LINK, + link_status, ARRAY_SIZE(link_status)); +} + +static int hbg_push_stats(struct hbg_priv *priv) +{ + /* An id needs to be added for each data. + * Therefore, the data_num must be multiplied by 2. + */ + u64 data_num = ARRAY_SIZE(hbg_push_stats_list) * 2; + struct hbg_stats *stats = &priv->stats; + const struct hbg_push_stats_info *info; + u32 i, j = 0; + u64 *data; + int ret; + + data = kcalloc(data_num, sizeof(u64), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* An id needs to be added for each data. + * So i + 2 for each loop. + */ + for (i = 0; i < data_num; i += 2) { + info = &hbg_push_stats_list[j++]; + data[i] = info->id; + data[i + 1] = HBG_STATS_R(stats, info->offset); + } + + ret = hbg_push_data_u64(priv, HBG_PUSH_CMD_STATS, data, data_num); + kfree(data); + return ret; +} + +void hbg_diagnose_message_push(struct hbg_priv *priv) +{ + int ret; + + if (test_bit(HBG_NIC_STATE_RESETTING, &priv->state)) + return; + + /* only 1 is the right value */ + if (hbg_reg_read(priv, HBG_REG_PUSH_REQ_ADDR) != 1) + return; + + ret = hbg_push_irq_cnt(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to push irq cnt, ret = %d\n", ret); + goto push_done; + } + + ret = hbg_push_link_status(priv); + if (ret) { + dev_err(&priv->pdev->dev, + "failed to push link status, ret = %d\n", ret); + goto push_done; + } + + ret = hbg_push_stats(priv); + if (ret) + dev_err(&priv->pdev->dev, + "failed to push stats, ret = %d\n", ret); + +push_done: + hbg_reg_write(priv, HBG_REG_PUSH_REQ_ADDR, 0); +} diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h new file mode 100644 index 0000000000000..ba04c6d8c03db --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_diagnose.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2025 Hisilicon Limited. */ + +#ifndef __HBG_DIAGNOSE_H +#define __HBG_DIAGNOSE_H + +#include "hbg_common.h" + +void hbg_diagnose_message_push(struct hbg_priv *priv); + +#endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c index 4d1f4a33391a8..4e8cb66f601c0 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -105,6 +105,62 @@ int hbg_reset(struct hbg_priv *priv) return hbg_reset_done(priv, HBG_RESET_TYPE_FUNCTION); } +void hbg_err_reset(struct hbg_priv *priv) +{ + bool running; + + rtnl_lock(); + running = netif_running(priv->netdev); + if (running) + dev_close(priv->netdev); + + hbg_reset(priv); + + /* in hbg_pci_err_detected(), we will detach first, + * so we need to attach before open + */ + if (!netif_device_present(priv->netdev)) + netif_device_attach(priv->netdev); + + if (running) + dev_open(priv->netdev, NULL); + rtnl_unlock(); +} + +static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + pci_disable_device(pdev); + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct hbg_priv *priv = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "failed to re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + + hbg_err_reset(priv); + netif_device_attach(netdev); + return PCI_ERS_RESULT_RECOVERED; +} + static void hbg_pci_err_reset_prepare(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -124,6 +180,8 @@ static void hbg_pci_err_reset_done(struct pci_dev *pdev) } static const struct pci_error_handlers hbg_pci_err_handler = { + .error_detected = hbg_pci_err_detected, + .slot_reset = hbg_pci_err_slot_reset, .reset_prepare = hbg_pci_err_reset_prepare, .reset_done = hbg_pci_err_reset_done, }; diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h index d7828e446308f..fb9fbe7004e8f 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.h @@ -9,5 +9,6 @@ void hbg_set_pci_err_handler(struct pci_driver *pdrv); int hbg_reset(struct hbg_priv *priv); int hbg_rebuild(struct hbg_priv *priv); +void hbg_err_reset(struct hbg_priv *priv); #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c index 00364a438ec2e..8f1107b85fbb0 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.c @@ -9,6 +9,136 @@ #include "hbg_ethtool.h" #include "hbg_hw.h" +struct hbg_ethtool_stats { + char name[ETH_GSTRING_LEN]; + unsigned long offset; + u32 reg; /* set to 0 if stats is not updated via dump reg */ +}; + +#define HBG_STATS_I(stats) { #stats, HBG_STATS_FIELD_OFF(stats), 0} +#define HBG_STATS_REG_I(stats, reg) { #stats, HBG_STATS_FIELD_OFF(stats), reg} + +static const struct hbg_ethtool_stats hbg_ethtool_stats_info[] = { + HBG_STATS_I(rx_desc_l2_err_cnt), + HBG_STATS_I(rx_desc_pkt_len_err_cnt), + HBG_STATS_I(rx_desc_l3_wrong_head_cnt), + HBG_STATS_I(rx_desc_l3_csum_err_cnt), + HBG_STATS_I(rx_desc_l3_len_err_cnt), + HBG_STATS_I(rx_desc_l3_zero_ttl_cnt), + HBG_STATS_I(rx_desc_l3_other_cnt), + HBG_STATS_I(rx_desc_l4_wrong_head_cnt), + HBG_STATS_I(rx_desc_l4_len_err_cnt), + HBG_STATS_I(rx_desc_l4_csum_err_cnt), + HBG_STATS_I(rx_desc_l4_zero_port_num_cnt), + HBG_STATS_I(rx_desc_l4_other_cnt), + HBG_STATS_I(rx_desc_ip_ver_err_cnt), + HBG_STATS_I(rx_desc_ipv4_pkt_cnt), + HBG_STATS_I(rx_desc_ipv6_pkt_cnt), + HBG_STATS_I(rx_desc_no_ip_pkt_cnt), + HBG_STATS_I(rx_desc_ip_pkt_cnt), + HBG_STATS_I(rx_desc_tcp_pkt_cnt), + HBG_STATS_I(rx_desc_udp_pkt_cnt), + HBG_STATS_I(rx_desc_vlan_pkt_cnt), + HBG_STATS_I(rx_desc_icmp_pkt_cnt), + HBG_STATS_I(rx_desc_arp_pkt_cnt), + HBG_STATS_I(rx_desc_rarp_pkt_cnt), + HBG_STATS_I(rx_desc_multicast_pkt_cnt), + HBG_STATS_I(rx_desc_broadcast_pkt_cnt), + HBG_STATS_I(rx_desc_ipsec_pkt_cnt), + HBG_STATS_I(rx_desc_ip_opt_pkt_cnt), + HBG_STATS_I(rx_desc_key_not_match_cnt), + + HBG_STATS_REG_I(rx_octets_bad_cnt, HBG_REG_RX_OCTETS_BAD_ADDR), + HBG_STATS_REG_I(rx_octets_total_filt_cnt, + HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR), + HBG_STATS_REG_I(rx_uc_pkt_cnt, HBG_REG_RX_UC_PKTS_ADDR), + HBG_STATS_REG_I(rx_vlan_pkt_cnt, HBG_REG_RX_TAGGED_ADDR), + HBG_STATS_REG_I(rx_filt_pkt_cnt, HBG_REG_RX_FILT_PKT_CNT_ADDR), + HBG_STATS_REG_I(rx_data_error_cnt, HBG_REG_RX_DATA_ERR_ADDR), + HBG_STATS_REG_I(rx_frame_long_err_cnt, HBG_REG_RX_LONG_ERRORS_ADDR), + HBG_STATS_REG_I(rx_jabber_err_cnt, HBG_REG_RX_JABBER_ERRORS_ADDR), + HBG_STATS_REG_I(rx_frame_very_long_err_cnt, + HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_frame_runt_err_cnt, HBG_REG_RX_RUNT_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_frame_short_err_cnt, HBG_REG_RX_SHORT_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_overflow_cnt, HBG_REG_RX_OVER_FLOW_CNT_ADDR), + HBG_STATS_REG_I(rx_bufrq_err_cnt, HBG_REG_RX_BUFRQ_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_we_err_cnt, HBG_REG_RX_WE_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_overrun_cnt, HBG_REG_RX_OVERRUN_CNT_ADDR), + HBG_STATS_REG_I(rx_lengthfield_err_cnt, + HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR), + HBG_STATS_REG_I(rx_fail_comma_cnt, HBG_REG_RX_FAIL_COMMA_CNT_ADDR), + HBG_STATS_I(rx_dma_err_cnt), + HBG_STATS_I(rx_fifo_less_empty_thrsld_cnt), + + HBG_STATS_REG_I(tx_uc_pkt_cnt, HBG_REG_TX_UC_PKTS_ADDR), + HBG_STATS_REG_I(tx_vlan_pkt_cnt, HBG_REG_TX_TAGGED_ADDR), + HBG_STATS_REG_I(tx_octets_bad_cnt, HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR), + + HBG_STATS_REG_I(tx_underrun_err_cnt, HBG_REG_TX_UNDERRUN_ADDR), + HBG_STATS_REG_I(tx_add_cs_fail_cnt, HBG_REG_TX_CS_FAIL_CNT_ADDR), + HBG_STATS_REG_I(tx_bufrl_err_cnt, HBG_REG_TX_BUFRL_ERR_CNT_ADDR), + HBG_STATS_REG_I(tx_crc_err_cnt, HBG_REG_TX_CRC_ERROR_ADDR), + HBG_STATS_REG_I(tx_drop_cnt, HBG_REG_TX_DROP_CNT_ADDR), + HBG_STATS_REG_I(tx_excessive_length_drop_cnt, + HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR), + HBG_STATS_I(tx_dma_err_cnt), + HBG_STATS_I(tx_timeout_cnt), +}; + +static const struct hbg_ethtool_stats hbg_ethtool_rmon_stats_info[] = { + HBG_STATS_I(rx_desc_frag_cnt), + HBG_STATS_REG_I(rx_framesize_64, HBG_REG_RX_PKTS_64OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_65_127, + HBG_REG_RX_PKTS_65TO127OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_128_255, + HBG_REG_RX_PKTS_128TO255OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_256_511, + HBG_REG_RX_PKTS_256TO511OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_512_1023, + HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_1024_1518, + HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR), + HBG_STATS_REG_I(rx_framesize_bt_1518, + HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_64, HBG_REG_TX_PKTS_64OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_65_127, + HBG_REG_TX_PKTS_65TO127OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_128_255, + HBG_REG_TX_PKTS_128TO255OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_256_511, + HBG_REG_TX_PKTS_256TO511OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_512_1023, + HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_1024_1518, + HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR), + HBG_STATS_REG_I(tx_framesize_bt_1518, + HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR), +}; + +static const struct hbg_ethtool_stats hbg_ethtool_mac_stats_info[] = { + HBG_STATS_REG_I(rx_mc_pkt_cnt, HBG_REG_RX_MC_PKTS_ADDR), + HBG_STATS_REG_I(rx_bc_pkt_cnt, HBG_REG_RX_BC_PKTS_ADDR), + HBG_STATS_REG_I(rx_align_error_cnt, HBG_REG_RX_ALIGN_ERRORS_ADDR), + HBG_STATS_REG_I(rx_octets_total_ok_cnt, + HBG_REG_RX_OCTETS_TOTAL_OK_ADDR), + HBG_STATS_REG_I(rx_trans_pkt_cnt, HBG_REG_RX_TRANS_PKG_CNT_ADDR), + HBG_STATS_REG_I(rx_fcs_error_cnt, HBG_REG_RX_FCS_ERRORS_ADDR), + HBG_STATS_REG_I(tx_mc_pkt_cnt, HBG_REG_TX_MC_PKTS_ADDR), + HBG_STATS_REG_I(tx_bc_pkt_cnt, HBG_REG_TX_BC_PKTS_ADDR), + HBG_STATS_REG_I(tx_octets_total_ok_cnt, + HBG_REG_OCTETS_TRANSMITTED_OK_ADDR), + HBG_STATS_REG_I(tx_trans_pkt_cnt, HBG_REG_TX_TRANS_PKG_CNT_ADDR), +}; + +static const struct hbg_ethtool_stats hbg_ethtool_ctrl_stats_info[] = { + HBG_STATS_REG_I(rx_pause_macctl_frame_cnt, + HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR), + HBG_STATS_REG_I(tx_pause_frame_cnt, HBG_REG_TX_PAUSE_FRAMES_ADDR), + HBG_STATS_REG_I(rx_unknown_macctl_frame_cnt, + HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR), +}; + enum hbg_reg_dump_type { HBG_DUMP_REG_TYPE_SPEC = 0, HBG_DUMP_REG_TYPE_MDIO, @@ -180,6 +310,167 @@ static int hbg_ethtool_reset(struct net_device *netdev, u32 *flags) return hbg_reset(priv); } +static void hbg_update_stats_by_info(struct hbg_priv *priv, + const struct hbg_ethtool_stats *info, + u32 info_len) +{ + const struct hbg_ethtool_stats *stats; + u32 i; + + for (i = 0; i < info_len; i++) { + stats = &info[i]; + if (!stats->reg) + continue; + + HBG_STATS_U(&priv->stats, stats->offset, + hbg_reg_read(priv, stats->reg)); + } +} + +void hbg_update_stats(struct hbg_priv *priv) +{ + hbg_update_stats_by_info(priv, hbg_ethtool_stats_info, + ARRAY_SIZE(hbg_ethtool_stats_info)); + hbg_update_stats_by_info(priv, hbg_ethtool_rmon_stats_info, + ARRAY_SIZE(hbg_ethtool_rmon_stats_info)); + hbg_update_stats_by_info(priv, hbg_ethtool_mac_stats_info, + ARRAY_SIZE(hbg_ethtool_mac_stats_info)); + hbg_update_stats_by_info(priv, hbg_ethtool_ctrl_stats_info, + ARRAY_SIZE(hbg_ethtool_ctrl_stats_info)); +} + +static int hbg_ethtool_get_sset_count(struct net_device *netdev, int stringset) +{ + if (stringset != ETH_SS_STATS) + return -EOPNOTSUPP; + + return ARRAY_SIZE(hbg_ethtool_stats_info); +} + +static void hbg_ethtool_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + u32 i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++) + ethtool_puts(&data, hbg_ethtool_stats_info[i].name); +} + +static void hbg_ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct hbg_priv *priv = netdev_priv(netdev); + u32 i; + + hbg_update_stats(priv); + for (i = 0; i < ARRAY_SIZE(hbg_ethtool_stats_info); i++) + *data++ = HBG_STATS_R(&priv->stats, + hbg_ethtool_stats_info[i].offset); +} + +static void hbg_ethtool_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *epstats) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_stats *stats = &priv->stats; + + hbg_update_stats(priv); + epstats->rx_pause_frames = stats->rx_pause_macctl_frame_cnt; + epstats->tx_pause_frames = stats->tx_pause_frame_cnt; +} + +static void hbg_ethtool_get_eth_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *emstats) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_stats *stats = &priv->stats; + + hbg_update_stats(priv); + emstats->FramesTransmittedOK = stats->tx_trans_pkt_cnt; + emstats->FramesReceivedOK = stats->rx_trans_pkt_cnt; + emstats->FrameCheckSequenceErrors = stats->rx_fcs_error_cnt; + emstats->AlignmentErrors = stats->rx_align_error_cnt; + emstats->OctetsTransmittedOK = stats->tx_octets_total_ok_cnt; + emstats->OctetsReceivedOK = stats->rx_octets_total_ok_cnt; + + emstats->MulticastFramesXmittedOK = stats->tx_mc_pkt_cnt; + emstats->BroadcastFramesXmittedOK = stats->tx_bc_pkt_cnt; + emstats->MulticastFramesReceivedOK = stats->rx_mc_pkt_cnt; + emstats->BroadcastFramesReceivedOK = stats->rx_bc_pkt_cnt; + emstats->InRangeLengthErrors = stats->rx_fcs_error_cnt + + stats->rx_jabber_err_cnt + + stats->rx_unknown_macctl_frame_cnt + + stats->rx_bufrq_err_cnt + + stats->rx_we_err_cnt; + emstats->OutOfRangeLengthField = stats->rx_frame_short_err_cnt + + stats->rx_frame_runt_err_cnt + + stats->rx_lengthfield_err_cnt + + stats->rx_frame_long_err_cnt + + stats->rx_frame_very_long_err_cnt; + emstats->FrameTooLongErrors = stats->rx_frame_long_err_cnt + + stats->rx_frame_very_long_err_cnt; +} + +static void +hbg_ethtool_get_eth_ctrl_stats(struct net_device *netdev, + struct ethtool_eth_ctrl_stats *ecstats) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_stats *s = &priv->stats; + + hbg_update_stats(priv); + ecstats->MACControlFramesTransmitted = s->tx_pause_frame_cnt; + ecstats->MACControlFramesReceived = s->rx_pause_macctl_frame_cnt; + ecstats->UnsupportedOpcodesReceived = s->rx_unknown_macctl_frame_cnt; +} + +static const struct ethtool_rmon_hist_range hbg_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 4095 }, +}; + +static void +hbg_ethtool_get_rmon_stats(struct net_device *netdev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_stats *stats = &priv->stats; + + hbg_update_stats(priv); + rmon_stats->undersize_pkts = stats->rx_frame_short_err_cnt + + stats->rx_frame_runt_err_cnt + + stats->rx_lengthfield_err_cnt; + rmon_stats->oversize_pkts = stats->rx_frame_long_err_cnt + + stats->rx_frame_very_long_err_cnt; + rmon_stats->fragments = stats->rx_desc_frag_cnt; + rmon_stats->hist[0] = stats->rx_framesize_64; + rmon_stats->hist[1] = stats->rx_framesize_65_127; + rmon_stats->hist[2] = stats->rx_framesize_128_255; + rmon_stats->hist[3] = stats->rx_framesize_256_511; + rmon_stats->hist[4] = stats->rx_framesize_512_1023; + rmon_stats->hist[5] = stats->rx_framesize_1024_1518; + rmon_stats->hist[6] = stats->rx_framesize_bt_1518; + + rmon_stats->hist_tx[0] = stats->tx_framesize_64; + rmon_stats->hist_tx[1] = stats->tx_framesize_65_127; + rmon_stats->hist_tx[2] = stats->tx_framesize_128_255; + rmon_stats->hist_tx[3] = stats->tx_framesize_256_511; + rmon_stats->hist_tx[4] = stats->tx_framesize_512_1023; + rmon_stats->hist_tx[5] = stats->tx_framesize_1024_1518; + rmon_stats->hist_tx[6] = stats->tx_framesize_bt_1518; + + *ranges = hbg_rmon_ranges; +} + static const struct ethtool_ops hbg_ethtool_ops = { .get_link = ethtool_op_get_link, .get_link_ksettings = phy_ethtool_get_link_ksettings, @@ -190,6 +481,13 @@ static const struct ethtool_ops hbg_ethtool_ops = { .set_pauseparam = hbg_ethtool_set_pauseparam, .reset = hbg_ethtool_reset, .nway_reset = phy_ethtool_nway_reset, + .get_sset_count = hbg_ethtool_get_sset_count, + .get_strings = hbg_ethtool_get_strings, + .get_ethtool_stats = hbg_ethtool_get_stats, + .get_pause_stats = hbg_ethtool_get_pause_stats, + .get_eth_mac_stats = hbg_ethtool_get_eth_mac_stats, + .get_eth_ctrl_stats = hbg_ethtool_get_eth_ctrl_stats, + .get_rmon_stats = hbg_ethtool_get_rmon_stats, }; void hbg_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h index 628707ec2686d..e173155b146a8 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_ethtool.h @@ -6,6 +6,11 @@ #include +#define HBG_STATS_FIELD_OFF(f) (offsetof(struct hbg_stats, f)) +#define HBG_STATS_R(p, offset) (*(u64 *)((u8 *)(p) + (offset))) +#define HBG_STATS_U(p, offset, val) (HBG_STATS_R(p, offset) += (val)) + void hbg_ethtool_set_ops(struct net_device *netdev); +void hbg_update_stats(struct hbg_priv *priv); #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c index e7798f2136450..74a18033b4441 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -213,10 +213,20 @@ void hbg_hw_fill_buffer(struct hbg_priv *priv, u32 buffer_dma_addr) void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) { + hbg_hw_mac_enable(priv, HBG_STATUS_DISABLE); + hbg_reg_write_field(priv, HBG_REG_PORT_MODE_ADDR, HBG_REG_PORT_MODE_M, speed); hbg_reg_write_field(priv, HBG_REG_DUPLEX_TYPE_ADDR, HBG_REG_DUPLEX_B, duplex); + + hbg_hw_event_notify(priv, HBG_HW_EVENT_CORE_RESET); + + hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); + + if (!hbg_reg_read_field(priv, HBG_REG_AN_NEG_STATE_ADDR, + HBG_REG_AN_NEG_STATE_NP_LINK_OK_B)) + hbg_np_link_fail_task_schedule(priv); } /* only support uc filter */ diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c index 25dd25f096fe0..e79e9ab3e5308 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c @@ -11,6 +11,9 @@ static void hbg_irq_handle_err(struct hbg_priv *priv, if (irq_info->need_print) dev_err(&priv->pdev->dev, "receive error interrupt: %s\n", irq_info->name); + + if (irq_info->need_reset) + hbg_err_reset_task_schedule(priv); } static void hbg_irq_handle_tx(struct hbg_priv *priv, @@ -25,30 +28,38 @@ static void hbg_irq_handle_rx(struct hbg_priv *priv, napi_schedule(&priv->rx_ring.napi); } -#define HBG_TXRX_IRQ_I(name, handle) \ - {#name, HBG_INT_MSK_##name##_B, false, false, 0, handle} -#define HBG_ERR_IRQ_I(name, need_print) \ - {#name, HBG_INT_MSK_##name##_B, true, need_print, 0, hbg_irq_handle_err} +static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv, + struct hbg_irq_info *irq_info) +{ + priv->stats.rx_fifo_less_empty_thrsld_cnt++; +} + +#define HBG_IRQ_I(name, handle) \ + {#name, HBG_INT_MSK_##name##_B, false, false, false, 0, handle} +#define HBG_ERR_IRQ_I(name, need_print, ndde_reset) \ + {#name, HBG_INT_MSK_##name##_B, true, need_print, \ + ndde_reset, 0, hbg_irq_handle_err} static struct hbg_irq_info hbg_irqs[] = { - HBG_TXRX_IRQ_I(RX, hbg_irq_handle_rx), - HBG_TXRX_IRQ_I(TX, hbg_irq_handle_tx), - HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true), - HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true), - HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true), - HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true), - HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true), - HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true), - HBG_ERR_IRQ_I(TX_AHB_ERR, true), - HBG_ERR_IRQ_I(RX_BUF_AVL, false), - HBG_ERR_IRQ_I(REL_BUF_ERR, true), - HBG_ERR_IRQ_I(TXCFG_AVL, false), - HBG_ERR_IRQ_I(TX_DROP, false), - HBG_ERR_IRQ_I(RX_DROP, false), - HBG_ERR_IRQ_I(RX_AHB_ERR, true), - HBG_ERR_IRQ_I(MAC_FIFO_ERR, false), - HBG_ERR_IRQ_I(RBREQ_ERR, false), - HBG_ERR_IRQ_I(WE_ERR, false), + HBG_IRQ_I(RX, hbg_irq_handle_rx), + HBG_IRQ_I(TX, hbg_irq_handle_tx), + HBG_ERR_IRQ_I(TX_PKT_CPL, true, true), + HBG_ERR_IRQ_I(MAC_MII_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(MAC_PCS_RX_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(MAC_PCS_TX_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(MAC_APP_RX_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(MAC_APP_TX_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(SRAM_PARITY_ERR, true, false), + HBG_ERR_IRQ_I(TX_AHB_ERR, true, true), + HBG_IRQ_I(RX_BUF_AVL, hbg_irq_handle_rx_buf_val), + HBG_ERR_IRQ_I(REL_BUF_ERR, true, false), + HBG_ERR_IRQ_I(TXCFG_AVL, false, false), + HBG_ERR_IRQ_I(TX_DROP, false, false), + HBG_ERR_IRQ_I(RX_DROP, false, false), + HBG_ERR_IRQ_I(RX_AHB_ERR, true, false), + HBG_ERR_IRQ_I(MAC_FIFO_ERR, true, true), + HBG_ERR_IRQ_I(RBREQ_ERR, true, true), + HBG_ERR_IRQ_I(WE_ERR, true, true), }; static irqreturn_t hbg_irq_handle(int irq_num, void *p) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c index bb0f25ac97600..2ac5454338e4d 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c @@ -5,7 +5,9 @@ #include #include #include +#include #include "hbg_common.h" +#include "hbg_diagnose.h" #include "hbg_err.h" #include "hbg_ethtool.h" #include "hbg_hw.h" @@ -14,6 +16,9 @@ #include "hbg_txrx.h" #include "hbg_debugfs.h" +#define HBG_SUPPORT_FEATURES (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | \ + NETIF_F_RXCSUM) + static void hbg_all_irq_enable(struct hbg_priv *priv, bool enabled) { struct hbg_irq_info *info; @@ -214,6 +219,10 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue) char *buf = ring->tout_log_buf; u32 pos = 0; + priv->stats.tx_timeout_cnt++; + + pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos, + "tx_timeout cnt: %llu\n", priv->stats.tx_timeout_cnt); pos += scnprintf(buf + pos, HBG_TX_TIMEOUT_BUF_LEN - pos, "ring used num: %u, fifo used num: %u\n", hbg_get_queue_used_num(ring), @@ -226,6 +235,39 @@ static void hbg_net_tx_timeout(struct net_device *netdev, unsigned int txqueue) netdev_info(netdev, "%s", buf); } +static void hbg_net_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct hbg_priv *priv = netdev_priv(netdev); + struct hbg_stats *h_stats = &priv->stats; + + hbg_update_stats(priv); + dev_get_tstats64(netdev, stats); + + /* fifo empty */ + stats->tx_fifo_errors += h_stats->tx_drop_cnt; + + stats->tx_dropped += h_stats->tx_excessive_length_drop_cnt + + h_stats->tx_drop_cnt; + stats->tx_errors += h_stats->tx_add_cs_fail_cnt + + h_stats->tx_bufrl_err_cnt + + h_stats->tx_underrun_err_cnt + + h_stats->tx_crc_err_cnt; + stats->rx_errors += h_stats->rx_data_error_cnt; + stats->multicast += h_stats->rx_mc_pkt_cnt; + stats->rx_dropped += h_stats->rx_desc_drop; + stats->rx_length_errors += h_stats->rx_frame_very_long_err_cnt + + h_stats->rx_frame_long_err_cnt + + h_stats->rx_frame_runt_err_cnt + + h_stats->rx_frame_short_err_cnt + + h_stats->rx_lengthfield_err_cnt; + stats->rx_frame_errors += h_stats->rx_desc_l2_err_cnt + + h_stats->rx_desc_l3l4_err_cnt; + stats->rx_fifo_errors += h_stats->rx_overflow_cnt + + h_stats->rx_overrun_cnt; + stats->rx_crc_errors += h_stats->rx_fcs_error_cnt; +} + static const struct net_device_ops hbg_netdev_ops = { .ndo_open = hbg_net_open, .ndo_stop = hbg_net_stop, @@ -235,8 +277,62 @@ static const struct net_device_ops hbg_netdev_ops = { .ndo_change_mtu = hbg_net_change_mtu, .ndo_tx_timeout = hbg_net_tx_timeout, .ndo_set_rx_mode = hbg_net_set_rx_mode, + .ndo_get_stats64 = hbg_net_get_stats, + .ndo_eth_ioctl = phy_do_ioctl_running, }; +static void hbg_service_task(struct work_struct *work) +{ + struct hbg_priv *priv = container_of(work, struct hbg_priv, + service_task.work); + + if (test_and_clear_bit(HBG_NIC_STATE_NEED_RESET, &priv->state)) + hbg_err_reset(priv); + + if (test_and_clear_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state)) + hbg_fix_np_link_fail(priv); + + hbg_diagnose_message_push(priv); + + /* The type of statistics register is u32, + * To prevent the statistics register from overflowing, + * the driver dumps the statistics every 30 seconds. + */ + if (time_after(jiffies, priv->last_update_stats_time + 30 * HZ)) { + hbg_update_stats(priv); + priv->last_update_stats_time = jiffies; + } + + schedule_delayed_work(&priv->service_task, + msecs_to_jiffies(MSEC_PER_SEC)); +} + +void hbg_err_reset_task_schedule(struct hbg_priv *priv) +{ + set_bit(HBG_NIC_STATE_NEED_RESET, &priv->state); + schedule_delayed_work(&priv->service_task, 0); +} + +void hbg_np_link_fail_task_schedule(struct hbg_priv *priv) +{ + set_bit(HBG_NIC_STATE_NP_LINK_FAIL, &priv->state); + schedule_delayed_work(&priv->service_task, 0); +} + +static void hbg_cancel_delayed_work_sync(void *data) +{ + cancel_delayed_work_sync(data); +} + +static int hbg_delaywork_init(struct hbg_priv *priv) +{ + INIT_DELAYED_WORK(&priv->service_task, hbg_service_task); + schedule_delayed_work(&priv->service_task, 0); + return devm_add_action_or_reset(&priv->pdev->dev, + hbg_cancel_delayed_work_sync, + &priv->service_task); +} + static int hbg_mac_filter_init(struct hbg_priv *priv) { struct hbg_dev_specs *dev_specs = &priv->dev_specs; @@ -291,6 +387,10 @@ static int hbg_init(struct hbg_priv *priv) if (ret) return ret; + ret = hbg_delaywork_init(priv); + if (ret) + return ret; + hbg_debugfs_init(priv); hbg_init_user_def(priv); return 0; @@ -349,6 +449,9 @@ static int hbg_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) return ret; + /* set default features */ + netdev->features |= HBG_SUPPORT_FEATURES; + netdev->hw_features |= HBG_SUPPORT_FEATURES; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c index db6bc4cfb971f..f29a937ad0879 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c @@ -17,6 +17,8 @@ #define HBG_MDIO_OP_TIMEOUT_US (1 * 1000 * 1000) #define HBG_MDIO_OP_INTERVAL_US (5 * 1000) +#define HBG_NP_LINK_FAIL_RETRY_TIMES 5 + static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd) { hbg_reg_write(HBG_MAC_GET_PRIV(mac), HBG_REG_MDIO_COMMAND_ADDR, cmd); @@ -127,6 +129,26 @@ static void hbg_flowctrl_cfg(struct hbg_priv *priv) hbg_hw_set_pause_enable(priv, tx_pause, rx_pause); } +void hbg_fix_np_link_fail(struct hbg_priv *priv) +{ + struct device *dev = &priv->pdev->dev; + + if (priv->stats.np_link_fail_cnt >= HBG_NP_LINK_FAIL_RETRY_TIMES) { + dev_err(dev, "failed to fix the MAC link status\n"); + priv->stats.np_link_fail_cnt = 0; + return; + } + + priv->stats.np_link_fail_cnt++; + dev_err(dev, "failed to link between MAC and PHY, try to fix...\n"); + + /* Replace phy_reset() with phy_stop() and phy_start(), + * as suggested by Andrew. + */ + hbg_phy_stop(priv); + hbg_phy_start(priv); +} + static void hbg_phy_adjust_link(struct net_device *netdev) { struct hbg_priv *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h index febd02a309c79..f3771c1bbd347 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.h @@ -9,4 +9,6 @@ int hbg_mdio_init(struct hbg_priv *priv); void hbg_phy_start(struct hbg_priv *priv); void hbg_phy_stop(struct hbg_priv *priv); +void hbg_fix_np_link_fail(struct hbg_priv *priv); + #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h index f12efc12f3c54..cc2cc612770d7 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h @@ -18,6 +18,13 @@ #define HBG_REG_TX_FIFO_NUM_ADDR 0x0030 #define HBG_REG_RX_FIFO_NUM_ADDR 0x0034 #define HBG_REG_VLAN_LAYERS_ADDR 0x0038 +#define HBG_REG_PUSH_REQ_ADDR 0x00F0 +#define HBG_REG_MSG_HEADER_ADDR 0x00F4 +#define HBG_REG_MSG_HEADER_OPCODE_M GENMASK(7, 0) +#define HBG_REG_MSG_HEADER_STATUS_M GENMASK(11, 8) +#define HBG_REG_MSG_HEADER_DATA_NUM_M GENMASK(19, 12) +#define HBG_REG_MSG_HEADER_RESP_CODE_M GENMASK(27, 20) +#define HBG_REG_MSG_DATA_BASE_ADDR 0x0100 /* MDIO */ #define HBG_REG_MDIO_BASE 0x8000 @@ -54,12 +61,55 @@ #define HBG_REG_PAUSE_ENABLE_RX_B BIT(0) #define HBG_REG_PAUSE_ENABLE_TX_B BIT(1) #define HBG_REG_AN_NEG_STATE_ADDR (HBG_REG_SGMII_BASE + 0x0058) +#define HBG_REG_AN_NEG_STATE_NP_LINK_OK_B BIT(15) #define HBG_REG_TRANSMIT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0060) #define HBG_REG_TRANSMIT_CTRL_PAD_EN_B BIT(7) #define HBG_REG_TRANSMIT_CTRL_CRC_ADD_B BIT(6) #define HBG_REG_TRANSMIT_CTRL_AN_EN_B BIT(5) #define HBG_REG_REC_FILT_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x0064) #define HBG_REG_REC_FILT_CTRL_UC_MATCH_EN_B BIT(0) +#define HBG_REG_RX_OCTETS_TOTAL_OK_ADDR (HBG_REG_SGMII_BASE + 0x0080) +#define HBG_REG_RX_OCTETS_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0084) +#define HBG_REG_RX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0088) +#define HBG_REG_RX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x008C) +#define HBG_REG_RX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0090) +#define HBG_REG_RX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0094) +#define HBG_REG_RX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0098) +#define HBG_REG_RX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x009C) +#define HBG_REG_RX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A0) +#define HBG_REG_RX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A4) +#define HBG_REG_RX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00A8) +#define HBG_REG_RX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x00AC) +#define HBG_REG_RX_FCS_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00B0) +#define HBG_REG_RX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x00B4) +#define HBG_REG_RX_DATA_ERR_ADDR (HBG_REG_SGMII_BASE + 0x00B8) +#define HBG_REG_RX_ALIGN_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00BC) +#define HBG_REG_RX_LONG_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C0) +#define HBG_REG_RX_JABBER_ERRORS_ADDR (HBG_REG_SGMII_BASE + 0x00C4) +#define HBG_REG_RX_PAUSE_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00C8) +#define HBG_REG_RX_UNKNOWN_MACCTL_FRAMCOUNTER_ADDR (HBG_REG_SGMII_BASE + 0x00CC) +#define HBG_REG_RX_VERY_LONG_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D0) +#define HBG_REG_RX_RUNT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D4) +#define HBG_REG_RX_SHORT_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00D8) +#define HBG_REG_RX_FILT_PKT_CNT_ADDR (HBG_REG_SGMII_BASE + 0x00E8) +#define HBG_REG_RX_OCTETS_TOTAL_FILT_ADDR (HBG_REG_SGMII_BASE + 0x00EC) +#define HBG_REG_OCTETS_TRANSMITTED_OK_ADDR (HBG_REG_SGMII_BASE + 0x0100) +#define HBG_REG_OCTETS_TRANSMITTED_BAD_ADDR (HBG_REG_SGMII_BASE + 0x0104) +#define HBG_REG_TX_UC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0108) +#define HBG_REG_TX_MC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x010C) +#define HBG_REG_TX_BC_PKTS_ADDR (HBG_REG_SGMII_BASE + 0x0110) +#define HBG_REG_TX_PKTS_64OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0114) +#define HBG_REG_TX_PKTS_65TO127OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0118) +#define HBG_REG_TX_PKTS_128TO255OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x011C) +#define HBG_REG_TX_PKTS_256TO511OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0120) +#define HBG_REG_TX_PKTS_512TO1023OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0124) +#define HBG_REG_TX_PKTS_1024TO1518OCTETS_ADDR (HBG_REG_SGMII_BASE + 0x0128) +#define HBG_REG_TX_PKTS_1519TOMAXOCTETS_ADDR (HBG_REG_SGMII_BASE + 0x012C) +#define HBG_REG_TX_EXCESSIVE_LENGTH_DROP_ADDR (HBG_REG_SGMII_BASE + 0x014C) +#define HBG_REG_TX_UNDERRUN_ADDR (HBG_REG_SGMII_BASE + 0x0150) +#define HBG_REG_TX_TAGGED_ADDR (HBG_REG_SGMII_BASE + 0x0154) +#define HBG_REG_TX_CRC_ERROR_ADDR (HBG_REG_SGMII_BASE + 0x0158) +#define HBG_REG_TX_PAUSE_FRAMES_ADDR (HBG_REG_SGMII_BASE + 0x015C) #define HBG_REG_LINE_LOOP_BACK_ADDR (HBG_REG_SGMII_BASE + 0x01A8) #define HBG_REG_CF_CRC_STRIP_ADDR (HBG_REG_SGMII_BASE + 0x01B0) #define HBG_REG_CF_CRC_STRIP_B BIT(0) @@ -69,6 +119,9 @@ #define HBG_REG_RECV_CTRL_ADDR (HBG_REG_SGMII_BASE + 0x01E0) #define HBG_REG_RECV_CTRL_STRIP_PAD_EN_B BIT(3) #define HBG_REG_VLAN_CODE_ADDR (HBG_REG_SGMII_BASE + 0x01E8) +#define HBG_REG_RX_OVERRUN_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01EC) +#define HBG_REG_RX_LENGTHFIELD_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F4) +#define HBG_REG_RX_FAIL_COMMA_CNT_ADDR (HBG_REG_SGMII_BASE + 0x01F8) #define HBG_REG_STATION_ADDR_LOW_0_ADDR (HBG_REG_SGMII_BASE + 0x0200) #define HBG_REG_STATION_ADDR_HIGH_0_ADDR (HBG_REG_SGMII_BASE + 0x0204) #define HBG_REG_STATION_ADDR_LOW_1_ADDR (HBG_REG_SGMII_BASE + 0x0208) @@ -103,6 +156,7 @@ #define HBG_INT_MSK_MAC_PCS_TX_FIFO_ERR_B BIT(17) #define HBG_INT_MSK_MAC_PCS_RX_FIFO_ERR_B BIT(16) #define HBG_INT_MSK_MAC_MII_FIFO_ERR_B BIT(15) +#define HBG_INT_MSK_TX_PKT_CPL_B BIT(14) #define HBG_INT_MSK_TX_B BIT(1) /* just used in driver */ #define HBG_INT_MSK_RX_B BIT(0) /* just used in driver */ #define HBG_REG_CF_INTRPT_STAT_ADDR (HBG_REG_SGMII_BASE + 0x0434) @@ -111,12 +165,17 @@ #define HBG_REG_RX_BUS_ERR_ADDR_ADDR (HBG_REG_SGMII_BASE + 0x0440) #define HBG_REG_MAX_FRAME_LEN_ADDR (HBG_REG_SGMII_BASE + 0x0444) #define HBG_REG_MAX_FRAME_LEN_M GENMASK(15, 0) +#define HBG_REG_TX_DROP_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0448) +#define HBG_REG_RX_OVER_FLOW_CNT_ADDR (HBG_REG_SGMII_BASE + 0x044C) #define HBG_REG_DEBUG_ST_MCH_ADDR (HBG_REG_SGMII_BASE + 0x0450) #define HBG_REG_FIFO_CURR_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0454) #define HBG_REG_FIFO_HIST_STATUS_ADDR (HBG_REG_SGMII_BASE + 0x0458) #define HBG_REG_CF_CFF_DATA_NUM_ADDR (HBG_REG_SGMII_BASE + 0x045C) #define HBG_REG_CF_CFF_DATA_NUM_ADDR_TX_M GENMASK(8, 0) #define HBG_REG_CF_CFF_DATA_NUM_ADDR_RX_M GENMASK(24, 16) +#define HBG_REG_TX_CS_FAIL_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0460) +#define HBG_REG_RX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0464) +#define HBG_REG_TX_TRANS_PKG_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0468) #define HBG_REG_CF_TX_PAUSE_ADDR (HBG_REG_SGMII_BASE + 0x0470) #define HBG_REG_TX_CFF_ADDR_0_ADDR (HBG_REG_SGMII_BASE + 0x0488) #define HBG_REG_TX_CFF_ADDR_1_ADDR (HBG_REG_SGMII_BASE + 0x048C) @@ -136,6 +195,9 @@ #define HBG_REG_RX_CTRL_RXBUF_1ST_SKIP_SIZE2_M GENMASK(3, 0) #define HBG_REG_RX_PKT_MODE_ADDR (HBG_REG_SGMII_BASE + 0x04F4) #define HBG_REG_RX_PKT_MODE_PARSE_MODE_M GENMASK(22, 21) +#define HBG_REG_RX_BUFRQ_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x058C) +#define HBG_REG_TX_BUFRL_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0590) +#define HBG_REG_RX_WE_ERR_CNT_ADDR (HBG_REG_SGMII_BASE + 0x0594) #define HBG_REG_DBG_ST0_ADDR (HBG_REG_SGMII_BASE + 0x05E4) #define HBG_REG_DBG_ST1_ADDR (HBG_REG_SGMII_BASE + 0x05E8) #define HBG_REG_DBG_ST2_ADDR (HBG_REG_SGMII_BASE + 0x05EC) @@ -178,5 +240,48 @@ struct hbg_rx_desc { }; #define HBG_RX_DESC_W2_PKT_LEN_M GENMASK(31, 16) +#define HBG_RX_DESC_W2_PORT_NUM_M GENMASK(15, 12) +#define HBG_RX_DESC_W4_IP_TCP_UDP_M GENMASK(31, 30) +#define HBG_RX_DESC_W4_IPSEC_B BIT(29) +#define HBG_RX_DESC_W4_IP_VERSION_B BIT(28) +#define HBG_RX_DESC_W4_L4_ERR_CODE_M GENMASK(26, 23) +#define HBG_RX_DESC_W4_FRAG_B BIT(22) +#define HBG_RX_DESC_W4_OPT_B BIT(21) +#define HBG_RX_DESC_W4_IP_VERSION_ERR_B BIT(20) +#define HBG_RX_DESC_W4_BRD_CST_B BIT(19) +#define HBG_RX_DESC_W4_MUL_CST_B BIT(18) +#define HBG_RX_DESC_W4_ARP_B BIT(17) +#define HBG_RX_DESC_W4_RARP_B BIT(16) +#define HBG_RX_DESC_W4_ICMP_B BIT(15) +#define HBG_RX_DESC_W4_VLAN_FLAG_B BIT(14) +#define HBG_RX_DESC_W4_DROP_B BIT(13) +#define HBG_RX_DESC_W4_L3_ERR_CODE_M GENMASK(12, 9) +#define HBG_RX_DESC_W4_L2_ERR_B BIT(8) +#define HBG_RX_DESC_W4_IDX_MATCH_B BIT(7) + +enum hbg_l3_err_code { + HBG_L3_OK = 0, + HBG_L3_WRONG_HEAD, + HBG_L3_CSUM_ERR, + HBG_L3_LEN_ERR, + HBG_L3_ZERO_TTL, + HBG_L3_RSVD, +}; + +enum hbg_l4_err_code { + HBG_L4_OK = 0, + HBG_L4_WRONG_HEAD, + HBG_L4_LEN_ERR, + HBG_L4_CSUM_ERR, + HBG_L4_ZERO_PORT_NUM, + HBG_L4_RSVD, +}; + +enum hbg_pkt_type_code { + HBG_NO_IP_PKT = 0, + HBG_IP_PKT, + HBG_TCP_PKT, + HBG_UDP_PKT, +}; #endif diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c index f4f256a0dfea4..8d814c8f19ea0 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c @@ -38,8 +38,14 @@ static int hbg_dma_map(struct hbg_buffer *buffer) buffer->skb_dma = dma_map_single(&priv->pdev->dev, buffer->skb->data, buffer->skb_len, buffer_to_dma_dir(buffer)); - if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) + if (unlikely(dma_mapping_error(&priv->pdev->dev, buffer->skb_dma))) { + if (buffer->dir == HBG_DIR_RX) + priv->stats.rx_dma_err_cnt++; + else + priv->stats.tx_dma_err_cnt++; + return -ENOMEM; + } return 0; } @@ -195,6 +201,173 @@ static int hbg_napi_tx_recycle(struct napi_struct *napi, int budget) return packet_done; } +static bool hbg_rx_check_l3l4_error(struct hbg_priv *priv, + struct hbg_rx_desc *desc, + struct sk_buff *skb) +{ + bool rx_checksum_offload = !!(priv->netdev->features & NETIF_F_RXCSUM); + + skb->ip_summed = rx_checksum_offload ? + CHECKSUM_UNNECESSARY : CHECKSUM_NONE; + + if (likely(!FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4) && + !FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4))) + return true; + + switch (FIELD_GET(HBG_RX_DESC_W4_L3_ERR_CODE_M, desc->word4)) { + case HBG_L3_OK: + break; + case HBG_L3_WRONG_HEAD: + priv->stats.rx_desc_l3_wrong_head_cnt++; + return false; + case HBG_L3_CSUM_ERR: + skb->ip_summed = CHECKSUM_NONE; + priv->stats.rx_desc_l3_csum_err_cnt++; + + /* Don't drop packets on csum validation failure, + * suggest by Jakub + */ + break; + case HBG_L3_LEN_ERR: + priv->stats.rx_desc_l3_len_err_cnt++; + return false; + case HBG_L3_ZERO_TTL: + priv->stats.rx_desc_l3_zero_ttl_cnt++; + return false; + default: + priv->stats.rx_desc_l3_other_cnt++; + return false; + } + + switch (FIELD_GET(HBG_RX_DESC_W4_L4_ERR_CODE_M, desc->word4)) { + case HBG_L4_OK: + break; + case HBG_L4_WRONG_HEAD: + priv->stats.rx_desc_l4_wrong_head_cnt++; + return false; + case HBG_L4_LEN_ERR: + priv->stats.rx_desc_l4_len_err_cnt++; + return false; + case HBG_L4_CSUM_ERR: + skb->ip_summed = CHECKSUM_NONE; + priv->stats.rx_desc_l4_csum_err_cnt++; + + /* Don't drop packets on csum validation failure, + * suggest by Jakub + */ + break; + case HBG_L4_ZERO_PORT_NUM: + priv->stats.rx_desc_l4_zero_port_num_cnt++; + return false; + default: + priv->stats.rx_desc_l4_other_cnt++; + return false; + } + + return true; +} + +static void hbg_update_rx_ip_protocol_stats(struct hbg_priv *priv, + struct hbg_rx_desc *desc) +{ + if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4))) { + priv->stats.rx_desc_no_ip_pkt_cnt++; + return; + } + + if (unlikely(FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_ERR_B, desc->word4))) { + priv->stats.rx_desc_ip_ver_err_cnt++; + return; + } + + /* 0:ipv4, 1:ipv6 */ + if (FIELD_GET(HBG_RX_DESC_W4_IP_VERSION_B, desc->word4)) + priv->stats.rx_desc_ipv6_pkt_cnt++; + else + priv->stats.rx_desc_ipv4_pkt_cnt++; + + switch (FIELD_GET(HBG_RX_DESC_W4_IP_TCP_UDP_M, desc->word4)) { + case HBG_IP_PKT: + priv->stats.rx_desc_ip_pkt_cnt++; + if (FIELD_GET(HBG_RX_DESC_W4_OPT_B, desc->word4)) + priv->stats.rx_desc_ip_opt_pkt_cnt++; + if (FIELD_GET(HBG_RX_DESC_W4_FRAG_B, desc->word4)) + priv->stats.rx_desc_frag_cnt++; + + if (FIELD_GET(HBG_RX_DESC_W4_ICMP_B, desc->word4)) + priv->stats.rx_desc_icmp_pkt_cnt++; + else if (FIELD_GET(HBG_RX_DESC_W4_IPSEC_B, desc->word4)) + priv->stats.rx_desc_ipsec_pkt_cnt++; + break; + case HBG_TCP_PKT: + priv->stats.rx_desc_tcp_pkt_cnt++; + break; + case HBG_UDP_PKT: + priv->stats.rx_desc_udp_pkt_cnt++; + break; + default: + priv->stats.rx_desc_no_ip_pkt_cnt++; + break; + } +} + +static void hbg_update_rx_protocol_stats(struct hbg_priv *priv, + struct hbg_rx_desc *desc) +{ + if (unlikely(!FIELD_GET(HBG_RX_DESC_W4_IDX_MATCH_B, desc->word4))) { + priv->stats.rx_desc_key_not_match_cnt++; + return; + } + + if (FIELD_GET(HBG_RX_DESC_W4_BRD_CST_B, desc->word4)) + priv->stats.rx_desc_broadcast_pkt_cnt++; + else if (FIELD_GET(HBG_RX_DESC_W4_MUL_CST_B, desc->word4)) + priv->stats.rx_desc_multicast_pkt_cnt++; + + if (FIELD_GET(HBG_RX_DESC_W4_VLAN_FLAG_B, desc->word4)) + priv->stats.rx_desc_vlan_pkt_cnt++; + + if (FIELD_GET(HBG_RX_DESC_W4_ARP_B, desc->word4)) { + priv->stats.rx_desc_arp_pkt_cnt++; + return; + } else if (FIELD_GET(HBG_RX_DESC_W4_RARP_B, desc->word4)) { + priv->stats.rx_desc_rarp_pkt_cnt++; + return; + } + + hbg_update_rx_ip_protocol_stats(priv, desc); +} + +static bool hbg_rx_pkt_check(struct hbg_priv *priv, struct hbg_rx_desc *desc, + struct sk_buff *skb) +{ + if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, desc->word2) > + priv->dev_specs.max_frame_len)) { + priv->stats.rx_desc_pkt_len_err_cnt++; + return false; + } + + if (unlikely(FIELD_GET(HBG_RX_DESC_W2_PORT_NUM_M, desc->word2) != + priv->dev_specs.mac_id || + FIELD_GET(HBG_RX_DESC_W4_DROP_B, desc->word4))) { + priv->stats.rx_desc_drop++; + return false; + } + + if (unlikely(FIELD_GET(HBG_RX_DESC_W4_L2_ERR_B, desc->word4))) { + priv->stats.rx_desc_l2_err_cnt++; + return false; + } + + if (unlikely(!hbg_rx_check_l3l4_error(priv, desc, skb))) { + priv->stats.rx_desc_l3l4_err_cnt++; + return false; + } + + hbg_update_rx_protocol_stats(priv, desc); + return true; +} + static int hbg_rx_fill_one_buffer(struct hbg_priv *priv) { struct hbg_ring *ring = &priv->rx_ring; @@ -257,8 +430,12 @@ static int hbg_napi_rx_poll(struct napi_struct *napi, int budget) rx_desc = (struct hbg_rx_desc *)buffer->skb->data; pkt_len = FIELD_GET(HBG_RX_DESC_W2_PKT_LEN_M, rx_desc->word2); - hbg_dma_unmap(buffer); + if (unlikely(!hbg_rx_pkt_check(priv, rx_desc, buffer->skb))) { + hbg_buffer_free(buffer); + goto next_buffer; + } + hbg_dma_unmap(buffer); skb_reserve(buffer->skb, HBG_PACKET_HEAD_SIZE + NET_IP_ALIGN); skb_put(buffer->skb, pkt_len); buffer->skb->protocol = eth_type_trans(buffer->skb, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 6c458f037262f..60a586a951a02 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -266,9 +266,9 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) if (err) goto out; - err = phy_loopback(phy_dev, true); + err = phy_loopback(phy_dev, true, 0); } else { - err = phy_loopback(phy_dev, false); + err = phy_loopback(phy_dev, false, 0); if (err) goto out; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 9bbece25552b1..09749e9f73986 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -3,6 +3,7 @@ #include #include +#include #include "hnae3.h" #include "hns3_debugfs.h" @@ -661,12 +662,14 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring, HNS3_RING_RX_RING_PKTNUM_RECORD_REG)); sprintf(result[j++], "%u", ring->rx_copybreak); - sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + - HNS3_RING_EN_REG) ? "on" : "off"); + sprintf(result[j++], "%s", + str_on_off(readl_relaxed(ring->tqp->io_base + + HNS3_RING_EN_REG))); if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) - sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + - HNS3_RING_RX_EN_REG) ? "on" : "off"); + sprintf(result[j++], "%s", + str_on_off(readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_EN_REG))); else sprintf(result[j++], "%s", "NA"); @@ -764,12 +767,14 @@ static void hns3_dump_tx_queue_info(struct hns3_enet_ring *ring, sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_PKTNUM_RECORD_REG)); - sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + - HNS3_RING_EN_REG) ? "on" : "off"); + sprintf(result[j++], "%s", + str_on_off(readl_relaxed(ring->tqp->io_base + + HNS3_RING_EN_REG))); if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) - sprintf(result[j++], "%s", readl_relaxed(ring->tqp->io_base + - HNS3_RING_TX_EN_REG) ? "on" : "off"); + sprintf(result[j++], "%s", + str_on_off(readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_EN_REG))); else sprintf(result[j++], "%s", "NA"); @@ -1030,7 +1035,6 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); - const char * const str[] = {"no", "yes"}; unsigned long *caps = ae_dev->caps; u32 i, state; @@ -1039,7 +1043,7 @@ hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos) for (i = 0; i < ARRAY_SIZE(hns3_dbg_cap); i++) { state = test_bit(hns3_dbg_cap[i].cap_bit, caps); *pos += scnprintf(buf + *pos, len - *pos, "%s: %s\n", - hns3_dbg_cap[i].name, str[state]); + hns3_dbg_cap[i].name, str_yes_no(state)); } *pos += scnprintf(buf + *pos, len - *pos, "\n"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index b771a2daba436..6715222aeb661 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -1198,7 +1199,7 @@ static int hns3_set_tx_push(struct net_device *netdev, u32 tx_push) return 0; netdev_dbg(netdev, "Changing tx push from %s to %s\n", - old_state ? "on" : "off", tx_push ? "on" : "off"); + str_on_off(old_state), str_on_off(tx_push)); if (tx_push) set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index debf143e99401..c464906935946 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -3,6 +3,7 @@ #include #include +#include #include "hclge_debugfs.h" #include "hclge_err.h" @@ -11,7 +12,6 @@ #include "hclge_tm.h" #include "hnae3.h" -static const char * const state_str[] = { "off", "on" }; static const char * const hclge_mac_state_str[] = { "TO_ADD", "TO_DEL", "ACTIVE" }; @@ -2573,7 +2573,7 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len) loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en), HCLGE_MAC_APP_LP_B); pos += scnprintf(buf + pos, len - pos, "app loopback: %s\n", - state_str[loopback_en]); + str_on_off(loopback_en)); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2586,22 +2586,22 @@ static int hclge_dbg_dump_loopback(struct hclge_dev *hdev, char *buf, int len) loopback_en = req_common->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B; pos += scnprintf(buf + pos, len - pos, "serdes serial loopback: %s\n", - state_str[loopback_en]); + str_on_off(loopback_en)); loopback_en = req_common->enable & HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B ? 1 : 0; pos += scnprintf(buf + pos, len - pos, "serdes parallel loopback: %s\n", - state_str[loopback_en]); + str_on_off(loopback_en)); if (phydev) { loopback_en = phydev->loopback_enabled; pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n", - state_str[loopback_en]); + str_on_off(loopback_en)); } else if (hnae3_dev_phy_imp_supported(hdev)) { loopback_en = req_common->enable & HCLGE_CMD_GE_PHY_INNER_LOOP_B; pos += scnprintf(buf + pos, len - pos, "phy loopback: %s\n", - state_str[loopback_en]); + str_on_off(loopback_en)); } return 0; @@ -2894,9 +2894,9 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf, egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0; *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n", - state_str[ingress]); + str_on_off(ingress)); *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n", - state_str[egress]); + str_on_off(egress)); hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items, NULL, ARRAY_SIZE(vlan_filter_items)); @@ -2915,11 +2915,11 @@ static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf, return ret; j = 0; result[j++] = hclge_dbg_get_func_id_str(str_id, i); - result[j++] = state_str[ingress]; - result[j++] = state_str[egress]; - result[j++] = - test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, - hdev->ae_dev->caps) ? state_str[bypass] : "NA"; + result[j++] = str_on_off(ingress); + result[j++] = str_on_off(egress); + result[j++] = test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, + hdev->ae_dev->caps) ? + str_on_off(bypass) : "NA"; hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items, result, ARRAY_SIZE(vlan_filter_items)); @@ -2958,19 +2958,19 @@ static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf, j = 0; result[j++] = hclge_dbg_get_func_id_str(str_id, i); result[j++] = str_pvid; - result[j++] = state_str[vlan_cfg.accept_tag1]; - result[j++] = state_str[vlan_cfg.accept_tag2]; - result[j++] = state_str[vlan_cfg.accept_untag1]; - result[j++] = state_str[vlan_cfg.accept_untag2]; - result[j++] = state_str[vlan_cfg.insert_tag1]; - result[j++] = state_str[vlan_cfg.insert_tag2]; - result[j++] = state_str[vlan_cfg.shift_tag]; - result[j++] = state_str[vlan_cfg.strip_tag1]; - result[j++] = state_str[vlan_cfg.strip_tag2]; - result[j++] = state_str[vlan_cfg.drop_tag1]; - result[j++] = state_str[vlan_cfg.drop_tag2]; - result[j++] = state_str[vlan_cfg.pri_only1]; - result[j++] = state_str[vlan_cfg.pri_only2]; + result[j++] = str_on_off(vlan_cfg.accept_tag1); + result[j++] = str_on_off(vlan_cfg.accept_tag2); + result[j++] = str_on_off(vlan_cfg.accept_untag1); + result[j++] = str_on_off(vlan_cfg.accept_untag2); + result[j++] = str_on_off(vlan_cfg.insert_tag1); + result[j++] = str_on_off(vlan_cfg.insert_tag2); + result[j++] = str_on_off(vlan_cfg.shift_tag); + result[j++] = str_on_off(vlan_cfg.strip_tag1); + result[j++] = str_on_off(vlan_cfg.strip_tag2); + result[j++] = str_on_off(vlan_cfg.drop_tag1); + result[j++] = str_on_off(vlan_cfg.drop_tag2); + result[j++] = str_on_off(vlan_cfg.pri_only1); + result[j++] = str_on_off(vlan_cfg.pri_only2); hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items, result, @@ -3007,14 +3007,13 @@ static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len) pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n", ptp->info.name); pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n", - test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ? - "yes" : "no"); + str_yes_no(test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags))); pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n", - test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ? - "yes" : "no"); + str_yes_no(test_bit(HCLGE_PTP_FLAG_TX_EN, + &ptp->flags))); pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n", - test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ? - "yes" : "no"); + str_yes_no(test_bit(HCLGE_PTP_FLAG_RX_EN, + &ptp->flags))); last_rx = jiffies_to_msecs(ptp->last_rx); pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 3f17b3073e50f..92f9b8ec76d9f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -7875,7 +7875,7 @@ static int hclge_enable_phy_loopback(struct hclge_dev *hdev, if (ret) return ret; - return phy_loopback(phydev, true); + return phy_loopback(phydev, true, 0); } static int hclge_disable_phy_loopback(struct hclge_dev *hdev, @@ -7883,7 +7883,7 @@ static int hclge_disable_phy_loopback(struct hclge_dev *hdev, { int ret; - ret = phy_loopback(phydev, false); + ret = phy_loopback(phydev, false, 0); if (ret) return ret; @@ -8000,7 +8000,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle, ret = hclge_tqp_enable(handle, en); if (ret) dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", - en ? "enable" : "disable", ret); + str_enable_disable(en), ret); return ret; } @@ -11200,9 +11200,9 @@ static void hclge_info_show(struct hclge_dev *hdev) dev_info(dev, "This is %s PF\n", hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); dev_info(dev, "DCB %s\n", - handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable"); + str_enable_disable(handle->kinfo.tc_info.dcb_ets_active)); dev_info(dev, "MQPRIO %s\n", - handle->kinfo.tc_info.mqprio_active ? "enable" : "disable"); + str_enable_disable(handle->kinfo.tc_info.mqprio_active)); dev_info(dev, "Default tx spare buffer size: %u\n", hdev->tx_spare_buf_size); @@ -11976,7 +11976,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) if (ret) { dev_err(&hdev->pdev->dev, "Set vf %d mac spoof check %s failed, ret=%d\n", - vf, enable ? "on" : "off", ret); + vf, str_on_off(enable), ret); return ret; } @@ -11984,7 +11984,7 @@ static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) if (ret) dev_err(&hdev->pdev->dev, "Set vf %d vlan spoof check %s failed, ret=%d\n", - vf, enable ? "on" : "off", ret); + vf, str_on_off(enable), ret); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 80079657afebe..9a456ebf9b7cd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -258,7 +258,7 @@ void hclge_mac_start_phy(struct hclge_dev *hdev) if (!phydev) return; - phy_loopback(phydev, false); + phy_loopback(phydev, false, 0); phy_start(phydev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index bab16c2191b2f..59cc9221185f2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -2,6 +2,7 @@ // Copyright (c) 2021 Hisilicon Limited. #include +#include #include "hclge_main.h" #include "hnae3.h" @@ -226,7 +227,7 @@ static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en) if (ret) dev_err(&hdev->pdev->dev, "failed to %s ptp interrupt, ret = %d\n", - en ? "enable" : "disable", ret); + str_enable_disable(en), ret); return ret; } @@ -483,7 +484,7 @@ int hclge_ptp_init(struct hclge_dev *hdev) ret = hclge_ptp_get_cycle(hdev); if (ret) - return ret; + goto out; } ret = hclge_ptp_int_en(hdev, true); diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index a1aa6c1f966ec..6812be8dc64fe 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -640,7 +640,7 @@ static struct platform_driver hns_mdio_driver = { .driver = { .name = MDIO_DRV_NAME, .of_match_table = hns_mdio_match, - .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match), + .acpi_match_table = hns_mdio_acpi_match, }, }; diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig index c05fce15eb518..7d0feb1da158a 100644 --- a/drivers/net/ethernet/huawei/Kconfig +++ b/drivers/net/ethernet/huawei/Kconfig @@ -16,5 +16,6 @@ config NET_VENDOR_HUAWEI if NET_VENDOR_HUAWEI source "drivers/net/ethernet/huawei/hinic/Kconfig" +source "drivers/net/ethernet/huawei/hinic3/Kconfig" endif # NET_VENDOR_HUAWEI diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile index 2549ad5afe6d4..59865b8828792 100644 --- a/drivers/net/ethernet/huawei/Makefile +++ b/drivers/net/ethernet/huawei/Makefile @@ -4,3 +4,4 @@ # obj-$(CONFIG_HINIC) += hinic/ +obj-$(CONFIG_HINIC3) += hinic3/ diff --git a/drivers/net/ethernet/huawei/hinic3/Kconfig b/drivers/net/ethernet/huawei/hinic3/Kconfig new file mode 100644 index 0000000000000..70e30a2af6a5a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Huawei driver configuration +# + +config HINIC3 + tristate "Huawei 3rd generation network adapters (HINIC3) support" + # Fields of HW and management structures are little endian and are + # currently not converted + depends on !CPU_BIG_ENDIAN + depends on X86 || ARM64 || COMPILE_TEST + depends on PCI_MSI && 64BIT + select AUXILIARY_BUS + help + This driver supports HiNIC 3rd gen Network Adapter (HINIC3). + The driver is supported on X86_64 and ARM64 little endian. + + To compile this driver as a module, choose M here. + The module will be called hinic3. diff --git a/drivers/net/ethernet/huawei/hinic3/Makefile b/drivers/net/ethernet/huawei/hinic3/Makefile new file mode 100644 index 0000000000000..509dfbfb0e96e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +obj-$(CONFIG_HINIC3) += hinic3.o + +hinic3-objs := hinic3_common.o \ + hinic3_hw_cfg.o \ + hinic3_hw_comm.o \ + hinic3_hwdev.o \ + hinic3_hwif.o \ + hinic3_irq.o \ + hinic3_lld.o \ + hinic3_main.o \ + hinic3_mbox.o \ + hinic3_netdev_ops.o \ + hinic3_nic_cfg.o \ + hinic3_nic_io.o \ + hinic3_queue_common.o \ + hinic3_rx.o \ + hinic3_tx.o \ + hinic3_wq.o diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c new file mode 100644 index 0000000000000..0aa42068728cd --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include +#include + +#include "hinic3_common.h" + +int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align, + gfp_t flag, + struct hinic3_dma_addr_align *mem_align) +{ + dma_addr_t paddr, align_paddr; + void *vaddr, *align_vaddr; + u32 real_size = size; + + vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + if (align_paddr == paddr) { + align_vaddr = vaddr; + goto out; + } + + dma_free_coherent(dev, real_size, vaddr, paddr); + + /* realloc memory for align */ + real_size = size + align; + vaddr = dma_alloc_coherent(dev, real_size, &paddr, flag); + if (!vaddr) + return -ENOMEM; + + align_paddr = ALIGN(paddr, align); + align_vaddr = vaddr + (align_paddr - paddr); + +out: + mem_align->real_size = real_size; + mem_align->ori_vaddr = vaddr; + mem_align->ori_paddr = paddr; + mem_align->align_vaddr = align_vaddr; + mem_align->align_paddr = align_paddr; + + return 0; +} + +void hinic3_dma_free_coherent_align(struct device *dev, + struct hinic3_dma_addr_align *mem_align) +{ + dma_free_coherent(dev, mem_align->real_size, + mem_align->ori_vaddr, mem_align->ori_paddr); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h new file mode 100644 index 0000000000000..bb795dace04c2 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_common.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_COMMON_H_ +#define _HINIC3_COMMON_H_ + +#include + +#define HINIC3_MIN_PAGE_SIZE 0x1000 + +struct hinic3_dma_addr_align { + u32 real_size; + + void *ori_vaddr; + dma_addr_t ori_paddr; + + void *align_vaddr; + dma_addr_t align_paddr; +}; + +int hinic3_dma_zalloc_coherent_align(struct device *dev, u32 size, u32 align, + gfp_t flag, + struct hinic3_dma_addr_align *mem_align); +void hinic3_dma_free_coherent_align(struct device *dev, + struct hinic3_dma_addr_align *mem_align); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c new file mode 100644 index 0000000000000..221545c264049 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include + +#include "hinic3_hw_cfg.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +bool hinic3_support_nic(struct hinic3_hwdev *hwdev) +{ + return hwdev->cfg_mgmt->cap.supp_svcs_bitmap & BIT(HINIC3_SERVICE_T_NIC); +} + +u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev) +{ + return hwdev->cfg_mgmt->cap.nic_svc_cap.max_sqs; +} + +u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev) +{ + return hwdev->cfg_mgmt->cap.port_id; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h new file mode 100644 index 0000000000000..e017b1ae9f054 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HW_CFG_H_ +#define _HINIC3_HW_CFG_H_ + +#include +#include + +struct hinic3_hwdev; + +struct hinic3_irq { + u32 irq_id; + u16 msix_entry_idx; + bool allocated; +}; + +struct hinic3_irq_info { + struct hinic3_irq *irq; + u16 num_irq; + /* device max irq number */ + u16 num_irq_hw; + /* protect irq alloc and free */ + struct mutex irq_mutex; +}; + +struct hinic3_nic_service_cap { + u16 max_sqs; +}; + +/* Device capabilities */ +struct hinic3_dev_cap { + /* Bitmasks of services supported by device */ + u16 supp_svcs_bitmap; + /* Physical port */ + u8 port_id; + struct hinic3_nic_service_cap nic_svc_cap; +}; + +struct hinic3_cfg_mgmt_info { + struct hinic3_irq_info irq_info; + struct hinic3_dev_cap cap; +}; + +int hinic3_alloc_irqs(struct hinic3_hwdev *hwdev, u16 num, + struct msix_entry *alloc_arr, u16 *act_num); +void hinic3_free_irq(struct hinic3_hwdev *hwdev, u32 irq_id); + +bool hinic3_support_nic(struct hinic3_hwdev *hwdev); +u16 hinic3_func_max_qnum(struct hinic3_hwdev *hwdev); +u8 hinic3_physical_port_id(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c new file mode 100644 index 0000000000000..434696ce7dc23 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include + +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag) +{ + struct comm_cmd_func_reset func_reset = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + func_reset.func_id = func_id; + func_reset.reset_flag = reset_flag; + + mgmt_msg_params_init_default(&msg_params, &func_reset, + sizeof(func_reset)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_COMM, + COMM_CMD_FUNC_RESET, &msg_params); + if (err || func_reset.head.status) { + dev_err(hwdev->dev, "Failed to reset func resources, reset_flag 0x%llx, err: %d, status: 0x%x\n", + reset_flag, err, func_reset.head.status); + return -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h new file mode 100644 index 0000000000000..c33a1c77da9ce --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HW_COMM_H_ +#define _HINIC3_HW_COMM_H_ + +#include "hinic3_hw_intf.h" + +struct hinic3_hwdev; + +int hinic3_func_reset(struct hinic3_hwdev *hwdev, u16 func_id, u64 reset_flag); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h new file mode 100644 index 0000000000000..3edf2820fd3ab --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HW_INTF_H_ +#define _HINIC3_HW_INTF_H_ + +#include +#include + +#define MGMT_MSG_CMD_OP_SET 1 +#define MGMT_MSG_CMD_OP_GET 0 + +#define MGMT_STATUS_PF_SET_VF_ALREADY 0x4 +#define MGMT_STATUS_EXIST 0x6 +#define MGMT_STATUS_CMD_UNSUPPORTED 0xFF + +#define MGMT_MSG_POLLING_TIMEOUT 0 + +struct mgmt_msg_head { + u8 status; + u8 version; + u8 rsvd0[6]; +}; + +struct mgmt_msg_params { + const void *buf_in; + u32 in_size; + void *buf_out; + u32 expected_out_size; + u32 timeout_ms; +}; + +/* CMDQ MODULE_TYPE */ +enum mgmt_mod_type { + /* HW communication module */ + MGMT_MOD_COMM = 0, + /* L2NIC module */ + MGMT_MOD_L2NIC = 1, + /* Configuration module */ + MGMT_MOD_CFGM = 7, + MGMT_MOD_HILINK = 14, +}; + +static inline void mgmt_msg_params_init_default(struct mgmt_msg_params *msg_params, + void *inout_buff, u32 buff_size) +{ + msg_params->buf_in = inout_buff; + msg_params->buf_out = inout_buff; + msg_params->in_size = buff_size; + msg_params->expected_out_size = buff_size; + msg_params->timeout_ms = 0; +} + +/* COMM Commands between Driver to fw */ +enum comm_cmd { + /* Commands for clearing FLR and resources */ + COMM_CMD_FUNC_RESET = 0, + COMM_CMD_FEATURE_NEGO = 1, + COMM_CMD_FLUSH_DOORBELL = 2, + COMM_CMD_START_FLUSH = 3, + COMM_CMD_GET_GLOBAL_ATTR = 5, + COMM_CMD_SET_FUNC_SVC_USED_STATE = 7, + + /* Driver Configuration Commands */ + COMM_CMD_SET_CMDQ_CTXT = 20, + COMM_CMD_SET_VAT = 21, + COMM_CMD_CFG_PAGESIZE = 22, + COMM_CMD_CFG_MSIX_CTRL_REG = 23, + COMM_CMD_SET_CEQ_CTRL_REG = 24, + COMM_CMD_SET_DMA_ATTR = 25, +}; + +enum comm_func_reset_bits { + COMM_FUNC_RESET_BIT_FLUSH = BIT(0), + COMM_FUNC_RESET_BIT_MQM = BIT(1), + COMM_FUNC_RESET_BIT_SMF = BIT(2), + COMM_FUNC_RESET_BIT_PF_BW_CFG = BIT(3), + + COMM_FUNC_RESET_BIT_COMM = BIT(10), + /* clear mbox and aeq, The COMM_FUNC_RESET_BIT_COMM bit must be set */ + COMM_FUNC_RESET_BIT_COMM_MGMT_CH = BIT(11), + /* clear cmdq and ceq, The COMM_FUNC_RESET_BIT_COMM bit must be set */ + COMM_FUNC_RESET_BIT_COMM_CMD_CH = BIT(12), + COMM_FUNC_RESET_BIT_NIC = BIT(13), +}; + +struct comm_cmd_func_reset { + struct mgmt_msg_head head; + u16 func_id; + u16 rsvd1[3]; + u64 reset_flag; +}; + +#define COMM_MAX_FEATURE_QWORD 4 +struct comm_cmd_feature_nego { + struct mgmt_msg_head head; + u16 func_id; + u8 opcode; + u8 rsvd; + u64 s_feature[COMM_MAX_FEATURE_QWORD]; +}; + +/* Services supported by HW. HW uses these values when delivering events. + * HW supports multiple services that are not yet supported by driver + * (e.g. RoCE). + */ +enum hinic3_service_type { + HINIC3_SERVICE_T_NIC = 0, + /* MAX is only used by SW for array sizes. */ + HINIC3_SERVICE_T_MAX = 1, +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c new file mode 100644 index 0000000000000..6e8788a649258 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" +#include "hinic3_mgmt.h" + +int hinic3_init_hwdev(struct pci_dev *pdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +void hinic3_free_hwdev(struct hinic3_hwdev *hwdev) +{ + /* Completed by later submission due to LoC limit. */ +} + +void hinic3_set_api_stop(struct hinic3_hwdev *hwdev) +{ + /* Completed by later submission due to LoC limit. */ +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h new file mode 100644 index 0000000000000..718398f1ad7c7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HWDEV_H_ +#define _HINIC3_HWDEV_H_ + +#include +#include + +#include "hinic3_hw_intf.h" + +struct hinic3_cmdqs; +struct hinic3_hwif; + +enum hinic3_event_service_type { + HINIC3_EVENT_SRV_COMM = 0, + HINIC3_EVENT_SRV_NIC = 1 +}; + +#define HINIC3_SRV_EVENT_TYPE(svc, type) (((svc) << 16) | (type)) + +/* driver-specific data of pci_dev */ +struct hinic3_pcidev { + struct pci_dev *pdev; + struct hinic3_hwdev *hwdev; + /* Auxiliary devices */ + struct hinic3_adev *hadev[HINIC3_SERVICE_T_MAX]; + + void __iomem *cfg_reg_base; + void __iomem *intr_reg_base; + void __iomem *db_base; + u64 db_dwqe_len; + u64 db_base_phy; + + /* lock for attach/detach uld */ + struct mutex pdev_mutex; + unsigned long state; +}; + +struct hinic3_hwdev { + struct hinic3_pcidev *adapter; + struct pci_dev *pdev; + struct device *dev; + int dev_id; + struct hinic3_hwif *hwif; + struct hinic3_cfg_mgmt_info *cfg_mgmt; + struct hinic3_aeqs *aeqs; + struct hinic3_ceqs *ceqs; + struct hinic3_mbox *mbox; + struct hinic3_cmdqs *cmdqs; + struct workqueue_struct *workq; + /* protect channel init and deinit */ + spinlock_t channel_lock; + u64 features[COMM_MAX_FEATURE_QWORD]; + u32 wq_page_size; + u8 max_cmdq; + ulong func_state; +}; + +struct hinic3_event_info { + /* enum hinic3_event_service_type */ + u16 service; + u16 type; + u8 event_data[104]; +}; + +struct hinic3_adev { + struct auxiliary_device adev; + struct hinic3_hwdev *hwdev; + enum hinic3_service_type svc_type; + + void (*event)(struct auxiliary_device *adev, + struct hinic3_event_info *event); +}; + +int hinic3_init_hwdev(struct pci_dev *pdev); +void hinic3_free_hwdev(struct hinic3_hwdev *hwdev); + +void hinic3_set_api_stop(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c new file mode 100644 index 0000000000000..0865453bf0e7c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include +#include +#include + +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" + +void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx, + enum hinic3_msix_state flag) +{ + /* Completed by later submission due to LoC limit. */ +} + +u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev) +{ + return hwdev->hwif->attr.func_global_idx; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h new file mode 100644 index 0000000000000..513c9680e6b61 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_HWIF_H_ +#define _HINIC3_HWIF_H_ + +#include +#include + +struct hinic3_hwdev; + +enum hinic3_func_type { + HINIC3_FUNC_TYPE_VF = 1, +}; + +struct hinic3_db_area { + unsigned long *db_bitmap_array; + u32 db_max_areas; + /* protect doorbell area alloc and free */ + spinlock_t idx_lock; +}; + +struct hinic3_func_attr { + enum hinic3_func_type func_type; + u16 func_global_idx; + u16 global_vf_id_of_pf; + u16 num_irqs; + u16 num_sq; + u8 port_to_port_idx; + u8 pci_intf_idx; + u8 ppf_idx; + u8 num_aeqs; + u8 num_ceqs; + u8 msix_flex_en; +}; + +static_assert(sizeof(struct hinic3_func_attr) == 20); + +struct hinic3_hwif { + u8 __iomem *cfg_regs_base; + u64 db_base_phy; + u64 db_dwqe_len; + u8 __iomem *db_base; + struct hinic3_db_area db_area; + struct hinic3_func_attr attr; +}; + +enum hinic3_msix_state { + HINIC3_MSIX_ENABLE, + HINIC3_MSIX_DISABLE, +}; + +void hinic3_set_msix_state(struct hinic3_hwdev *hwdev, u16 msix_idx, + enum hinic3_msix_state flag); + +u16 hinic3_global_func_id(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c new file mode 100644 index 0000000000000..dcd971f31140b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_irq.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include + +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_nic_dev.h" +#include "hinic3_rx.h" +#include "hinic3_tx.h" + +static int hinic3_poll(struct napi_struct *napi, int budget) +{ + struct hinic3_irq_cfg *irq_cfg = + container_of(napi, struct hinic3_irq_cfg, napi); + struct hinic3_nic_dev *nic_dev; + int tx_pkts, rx_pkts; + + nic_dev = netdev_priv(irq_cfg->netdev); + rx_pkts = hinic3_rx_poll(irq_cfg->rxq, budget); + + tx_pkts = hinic3_tx_poll(irq_cfg->txq, budget); + if (tx_pkts >= budget || rx_pkts >= budget) + return budget; + + napi_complete(napi); + + hinic3_set_msix_state(nic_dev->hwdev, irq_cfg->msix_entry_idx, + HINIC3_MSIX_ENABLE); + + return max(tx_pkts, rx_pkts); +} + +void qp_add_napi(struct hinic3_irq_cfg *irq_cfg) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(irq_cfg->netdev); + + netif_napi_add(nic_dev->netdev, &irq_cfg->napi, hinic3_poll); + napi_enable(&irq_cfg->napi); +} + +void qp_del_napi(struct hinic3_irq_cfg *irq_cfg) +{ + napi_disable(&irq_cfg->napi); + netif_napi_del(&irq_cfg->napi); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c new file mode 100644 index 0000000000000..bd653898ceb0b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.c @@ -0,0 +1,416 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include +#include + +#include "hinic3_hw_cfg.h" +#include "hinic3_hwdev.h" +#include "hinic3_lld.h" +#include "hinic3_mgmt.h" + +#define HINIC3_VF_PCI_CFG_REG_BAR 0 +#define HINIC3_PCI_INTR_REG_BAR 2 +#define HINIC3_PCI_DB_BAR 4 + +#define HINIC3_EVENT_POLL_SLEEP_US 1000 +#define HINIC3_EVENT_POLL_TIMEOUT_US 10000000 + +static struct hinic3_adev_device { + const char *name; +} hinic3_adev_devices[HINIC3_SERVICE_T_MAX] = { + [HINIC3_SERVICE_T_NIC] = { + .name = "nic", + }, +}; + +static bool hinic3_adev_svc_supported(struct hinic3_hwdev *hwdev, + enum hinic3_service_type svc_type) +{ + switch (svc_type) { + case HINIC3_SERVICE_T_NIC: + return hinic3_support_nic(hwdev); + default: + break; + } + + return false; +} + +static void hinic3_comm_adev_release(struct device *dev) +{ + struct hinic3_adev *hadev = container_of(dev, struct hinic3_adev, adev.dev); + + kfree(hadev); +} + +static struct hinic3_adev *hinic3_add_one_adev(struct hinic3_hwdev *hwdev, + enum hinic3_service_type svc_type) +{ + struct hinic3_adev *hadev; + const char *svc_name; + int ret; + + hadev = kzalloc(sizeof(*hadev), GFP_KERNEL); + if (!hadev) + return NULL; + + svc_name = hinic3_adev_devices[svc_type].name; + hadev->adev.name = svc_name; + hadev->adev.id = hwdev->dev_id; + hadev->adev.dev.parent = hwdev->dev; + hadev->adev.dev.release = hinic3_comm_adev_release; + hadev->svc_type = svc_type; + hadev->hwdev = hwdev; + + ret = auxiliary_device_init(&hadev->adev); + if (ret) { + dev_err(hwdev->dev, "failed init adev %s %u\n", + svc_name, hwdev->dev_id); + kfree(hadev); + return NULL; + } + + ret = auxiliary_device_add(&hadev->adev); + if (ret) { + dev_err(hwdev->dev, "failed to add adev %s %u\n", + svc_name, hwdev->dev_id); + auxiliary_device_uninit(&hadev->adev); + return NULL; + } + + return hadev; +} + +static void hinic3_del_one_adev(struct hinic3_hwdev *hwdev, + enum hinic3_service_type svc_type) +{ + struct hinic3_pcidev *pci_adapter = hwdev->adapter; + struct hinic3_adev *hadev; + int timeout; + bool state; + + timeout = read_poll_timeout(test_and_set_bit, state, !state, + HINIC3_EVENT_POLL_SLEEP_US, + HINIC3_EVENT_POLL_TIMEOUT_US, + false, svc_type, &pci_adapter->state); + + hadev = pci_adapter->hadev[svc_type]; + auxiliary_device_delete(&hadev->adev); + auxiliary_device_uninit(&hadev->adev); + pci_adapter->hadev[svc_type] = NULL; + if (!timeout) + clear_bit(svc_type, &pci_adapter->state); +} + +static int hinic3_attach_aux_devices(struct hinic3_hwdev *hwdev) +{ + struct hinic3_pcidev *pci_adapter = hwdev->adapter; + enum hinic3_service_type svc_type; + + mutex_lock(&pci_adapter->pdev_mutex); + + for (svc_type = 0; svc_type < HINIC3_SERVICE_T_MAX; svc_type++) { + if (!hinic3_adev_svc_supported(hwdev, svc_type)) + continue; + + pci_adapter->hadev[svc_type] = hinic3_add_one_adev(hwdev, svc_type); + if (!pci_adapter->hadev[svc_type]) + goto err_add_one_adev; + } + mutex_unlock(&pci_adapter->pdev_mutex); + return 0; + +err_add_one_adev: + while (svc_type > 0) { + svc_type--; + if (pci_adapter->hadev[svc_type]) { + hinic3_del_one_adev(hwdev, svc_type); + pci_adapter->hadev[svc_type] = NULL; + } + } + mutex_unlock(&pci_adapter->pdev_mutex); + return -ENOMEM; +} + +static void hinic3_detach_aux_devices(struct hinic3_hwdev *hwdev) +{ + struct hinic3_pcidev *pci_adapter = hwdev->adapter; + int i; + + mutex_lock(&pci_adapter->pdev_mutex); + for (i = 0; i < ARRAY_SIZE(hinic3_adev_devices); i++) { + if (pci_adapter->hadev[i]) + hinic3_del_one_adev(hwdev, i); + } + mutex_unlock(&pci_adapter->pdev_mutex); +} + +struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev) +{ + struct hinic3_adev *hadev; + + hadev = container_of(adev, struct hinic3_adev, adev); + return hadev->hwdev; +} + +void hinic3_adev_event_register(struct auxiliary_device *adev, + void (*event_handler)(struct auxiliary_device *adev, + struct hinic3_event_info *event)) +{ + struct hinic3_adev *hadev; + + hadev = container_of(adev, struct hinic3_adev, adev); + hadev->event = event_handler; +} + +void hinic3_adev_event_unregister(struct auxiliary_device *adev) +{ + struct hinic3_adev *hadev; + + hadev = container_of(adev, struct hinic3_adev, adev); + hadev->event = NULL; +} + +static int hinic3_mapping_bar(struct pci_dev *pdev, + struct hinic3_pcidev *pci_adapter) +{ + pci_adapter->cfg_reg_base = pci_ioremap_bar(pdev, + HINIC3_VF_PCI_CFG_REG_BAR); + if (!pci_adapter->cfg_reg_base) { + dev_err(&pdev->dev, "Failed to map configuration regs\n"); + return -ENOMEM; + } + + pci_adapter->intr_reg_base = pci_ioremap_bar(pdev, + HINIC3_PCI_INTR_REG_BAR); + if (!pci_adapter->intr_reg_base) { + dev_err(&pdev->dev, "Failed to map interrupt regs\n"); + goto err_undo_reg_bar; + } + + pci_adapter->db_base_phy = pci_resource_start(pdev, HINIC3_PCI_DB_BAR); + pci_adapter->db_dwqe_len = pci_resource_len(pdev, HINIC3_PCI_DB_BAR); + pci_adapter->db_base = pci_ioremap_bar(pdev, HINIC3_PCI_DB_BAR); + if (!pci_adapter->db_base) { + dev_err(&pdev->dev, "Failed to map doorbell regs\n"); + goto err_undo_intr_bar; + } + + return 0; + +err_undo_intr_bar: + iounmap(pci_adapter->intr_reg_base); + +err_undo_reg_bar: + iounmap(pci_adapter->cfg_reg_base); + + return -ENOMEM; +} + +static void hinic3_unmapping_bar(struct hinic3_pcidev *pci_adapter) +{ + iounmap(pci_adapter->db_base); + iounmap(pci_adapter->intr_reg_base); + iounmap(pci_adapter->cfg_reg_base); +} + +static int hinic3_pci_init(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter; + int err; + + pci_adapter = kzalloc(sizeof(*pci_adapter), GFP_KERNEL); + if (!pci_adapter) + return -ENOMEM; + + pci_adapter->pdev = pdev; + mutex_init(&pci_adapter->pdev_mutex); + + pci_set_drvdata(pdev, pci_adapter); + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + goto err_pci_enable; + } + + err = pci_request_regions(pdev, HINIC3_NIC_DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Failed to request regions\n"); + goto err_pci_regions; + } + + pci_set_master(pdev); + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); + /* try 32 bit DMA mask if 64 bit fails */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Failed to set DMA mask\n"); + goto err_dma_mask; + } + } + + return 0; + +err_dma_mask: + pci_clear_master(pdev); + pci_release_regions(pdev); + +err_pci_regions: + pci_disable_device(pdev); + +err_pci_enable: + pci_set_drvdata(pdev, NULL); + mutex_destroy(&pci_adapter->pdev_mutex); + kfree(pci_adapter); + + return err; +} + +static void hinic3_pci_deinit(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + mutex_destroy(&pci_adapter->pdev_mutex); + kfree(pci_adapter); +} + +static int hinic3_func_init(struct pci_dev *pdev, struct hinic3_pcidev *pci_adapter) +{ + int err; + + err = hinic3_init_hwdev(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to initialize hardware device\n"); + return -EFAULT; + } + + err = hinic3_attach_aux_devices(pci_adapter->hwdev); + if (err) + goto err_attatch_aux_devices; + + return 0; + +err_attatch_aux_devices: + hinic3_free_hwdev(pci_adapter->hwdev); + + return err; +} + +static void hinic3_func_deinit(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + hinic3_detach_aux_devices(pci_adapter->hwdev); + hinic3_free_hwdev(pci_adapter->hwdev); +} + +static int hinic3_probe_func(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pdev; + int err; + + err = hinic3_mapping_bar(pdev, pci_adapter); + if (err) { + dev_err(&pdev->dev, "Failed to map bar\n"); + goto err_map_bar; + } + + err = hinic3_func_init(pdev, pci_adapter); + if (err) + goto err_func_init; + + return 0; + +err_func_init: + hinic3_unmapping_bar(pci_adapter); + +err_map_bar: + dev_err(&pdev->dev, "PCIe device probe function failed\n"); + return err; +} + +static void hinic3_remove_func(struct hinic3_pcidev *pci_adapter) +{ + struct pci_dev *pdev = pci_adapter->pdev; + + hinic3_func_deinit(pdev); + hinic3_unmapping_bar(pci_adapter); +} + +static int hinic3_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct hinic3_pcidev *pci_adapter; + int err; + + err = hinic3_pci_init(pdev); + if (err) + goto out; + + pci_adapter = pci_get_drvdata(pdev); + err = hinic3_probe_func(pci_adapter); + if (err) + goto err_hinic3_probe_func; + + return 0; + +err_hinic3_probe_func: + hinic3_pci_deinit(pdev); + +out: + dev_err(&pdev->dev, "PCIe device probe failed\n"); + return err; +} + +static void hinic3_remove(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + hinic3_remove_func(pci_adapter); + hinic3_pci_deinit(pdev); +} + +static const struct pci_device_id hinic3_pci_table[] = { + /* Completed by later submission due to LoC limit. */ + {0, 0} + +}; + +MODULE_DEVICE_TABLE(pci, hinic3_pci_table); + +static void hinic3_shutdown(struct pci_dev *pdev) +{ + struct hinic3_pcidev *pci_adapter = pci_get_drvdata(pdev); + + pci_disable_device(pdev); + + if (pci_adapter) + hinic3_set_api_stop(pci_adapter->hwdev); +} + +static struct pci_driver hinic3_driver = { + .name = HINIC3_NIC_DRV_NAME, + .id_table = hinic3_pci_table, + .probe = hinic3_probe, + .remove = hinic3_remove, + .shutdown = hinic3_shutdown, + .sriov_configure = pci_sriov_configure_simple +}; + +int hinic3_lld_init(void) +{ + return pci_register_driver(&hinic3_driver); +} + +void hinic3_lld_exit(void) +{ + pci_unregister_driver(&hinic3_driver); +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h new file mode 100644 index 0000000000000..322b448034763 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_lld.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_LLD_H_ +#define _HINIC3_LLD_H_ + +#include + +struct hinic3_event_info; + +#define HINIC3_NIC_DRV_NAME "hinic3" + +int hinic3_lld_init(void); +void hinic3_lld_exit(void); +void hinic3_adev_event_register(struct auxiliary_device *adev, + void (*event_handler)(struct auxiliary_device *adev, + struct hinic3_event_info *event)); +void hinic3_adev_event_unregister(struct auxiliary_device *adev); +struct hinic3_hwdev *hinic3_adev_get_hwdev(struct auxiliary_device *adev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_main.c b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c new file mode 100644 index 0000000000000..b10def13d78c5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_main.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include +#include + +#include "hinic3_common.h" +#include "hinic3_hw_comm.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_lld.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_rx.h" +#include "hinic3_tx.h" + +#define HINIC3_NIC_DRV_DESC "Intelligent Network Interface Card Driver" + +#define HINIC3_RX_BUFF_LEN 2048 +#define HINIC3_RX_BUFF_NUM_PER_PAGE 2 +#define HINIC3_LRO_REPLENISH_THLD 256 +#define HINIC3_NIC_DEV_WQ_NAME "hinic3_nic_dev_wq" + +#define HINIC3_SQ_DEPTH 1024 +#define HINIC3_RQ_DEPTH 1024 + +static int hinic3_alloc_txrxqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + int err; + + err = hinic3_alloc_txqs(netdev); + if (err) { + dev_err(hwdev->dev, "Failed to alloc txqs\n"); + return err; + } + + err = hinic3_alloc_rxqs(netdev); + if (err) { + dev_err(hwdev->dev, "Failed to alloc rxqs\n"); + goto err_alloc_rxqs; + } + + return 0; + +err_alloc_rxqs: + hinic3_free_txqs(netdev); + + return err; +} + +static void hinic3_free_txrxqs(struct net_device *netdev) +{ + hinic3_free_rxqs(netdev); + hinic3_free_txqs(netdev); +} + +static int hinic3_init_nic_dev(struct net_device *netdev, + struct hinic3_hwdev *hwdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = hwdev->pdev; + u32 page_num; + + nic_dev->netdev = netdev; + SET_NETDEV_DEV(netdev, &pdev->dev); + nic_dev->hwdev = hwdev; + nic_dev->pdev = pdev; + + nic_dev->rx_buff_len = HINIC3_RX_BUFF_LEN; + nic_dev->dma_rx_buff_size = HINIC3_RX_BUFF_NUM_PER_PAGE * nic_dev->rx_buff_len; + page_num = nic_dev->dma_rx_buff_size / HINIC3_MIN_PAGE_SIZE; + nic_dev->page_order = page_num > 0 ? ilog2(page_num) : 0; + nic_dev->lro_replenish_thld = HINIC3_LRO_REPLENISH_THLD; + nic_dev->nic_svc_cap = hwdev->cfg_mgmt->cap.nic_svc_cap; + + return 0; +} + +static int hinic3_sw_init(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + int err; + + nic_dev->q_params.sq_depth = HINIC3_SQ_DEPTH; + nic_dev->q_params.rq_depth = HINIC3_RQ_DEPTH; + + /* VF driver always uses random MAC address. During VM migration to a + * new device, the new device should learn the VMs old MAC rather than + * provide its own MAC. The product design assumes that every VF is + * suspectable to migration so the device avoids offering MAC address + * to VFs. + */ + eth_hw_addr_random(netdev); + err = hinic3_set_mac(hwdev, netdev->dev_addr, 0, + hinic3_global_func_id(hwdev)); + if (err) { + dev_err(hwdev->dev, "Failed to set default MAC\n"); + return err; + } + + err = hinic3_alloc_txrxqs(netdev); + if (err) { + dev_err(hwdev->dev, "Failed to alloc qps\n"); + goto err_alloc_qps; + } + + return 0; + +err_alloc_qps: + hinic3_del_mac(hwdev, netdev->dev_addr, 0, hinic3_global_func_id(hwdev)); + + return err; +} + +static void hinic3_sw_deinit(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + hinic3_free_txrxqs(netdev); + hinic3_del_mac(nic_dev->hwdev, netdev->dev_addr, 0, + hinic3_global_func_id(nic_dev->hwdev)); +} + +static void hinic3_assign_netdev_ops(struct net_device *netdev) +{ + hinic3_set_netdev_ops(netdev); +} + +static void netdev_feature_init(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + netdev_features_t cso_fts = 0; + netdev_features_t tso_fts = 0; + netdev_features_t dft_fts; + + dft_fts = NETIF_F_SG | NETIF_F_HIGHDMA; + if (hinic3_test_support(nic_dev, HINIC3_NIC_F_CSUM)) + cso_fts |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + if (hinic3_test_support(nic_dev, HINIC3_NIC_F_SCTP_CRC)) + cso_fts |= NETIF_F_SCTP_CRC; + if (hinic3_test_support(nic_dev, HINIC3_NIC_F_TSO)) + tso_fts |= NETIF_F_TSO | NETIF_F_TSO6; + + netdev->features |= dft_fts | cso_fts | tso_fts; +} + +static int hinic3_set_default_hw_feature(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + int err; + + err = hinic3_set_nic_feature_to_hw(nic_dev); + if (err) { + dev_err(hwdev->dev, "Failed to set nic features\n"); + return err; + } + + return 0; +} + +static void hinic3_link_status_change(struct net_device *netdev, bool link_status_up) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + if (!HINIC3_CHANNEL_RES_VALID(nic_dev)) + return; + + if (link_status_up) { + if (netif_carrier_ok(netdev)) + return; + + nic_dev->link_status_up = true; + netif_carrier_on(netdev); + netdev_dbg(netdev, "Link is up\n"); + } else { + if (!netif_carrier_ok(netdev)) + return; + + nic_dev->link_status_up = false; + netif_carrier_off(netdev); + netdev_dbg(netdev, "Link is down\n"); + } +} + +static void hinic3_nic_event(struct auxiliary_device *adev, + struct hinic3_event_info *event) +{ + struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev); + struct net_device *netdev; + + netdev = nic_dev->netdev; + + switch (HINIC3_SRV_EVENT_TYPE(event->service, event->type)) { + case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC, HINIC3_NIC_EVENT_LINK_UP): + hinic3_link_status_change(netdev, true); + break; + case HINIC3_SRV_EVENT_TYPE(HINIC3_EVENT_SRV_NIC, HINIC3_NIC_EVENT_LINK_DOWN): + hinic3_link_status_change(netdev, false); + break; + default: + break; + } +} + +static int hinic3_nic_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct hinic3_hwdev *hwdev = hinic3_adev_get_hwdev(adev); + struct pci_dev *pdev = hwdev->pdev; + struct hinic3_nic_dev *nic_dev; + struct net_device *netdev; + u16 max_qps, glb_func_id; + int err; + + if (!hinic3_support_nic(hwdev)) { + dev_dbg(&adev->dev, "HW doesn't support nic\n"); + return 0; + } + + hinic3_adev_event_register(adev, hinic3_nic_event); + + glb_func_id = hinic3_global_func_id(hwdev); + err = hinic3_func_reset(hwdev, glb_func_id, COMM_FUNC_RESET_BIT_NIC); + if (err) { + dev_err(&adev->dev, "Failed to reset function\n"); + goto err_undo_event_register; + } + + max_qps = hinic3_func_max_qnum(hwdev); + netdev = alloc_etherdev_mq(sizeof(*nic_dev), max_qps); + if (!netdev) { + dev_err(&adev->dev, "Failed to allocate netdev\n"); + err = -ENOMEM; + goto err_undo_event_register; + } + + nic_dev = netdev_priv(netdev); + dev_set_drvdata(&adev->dev, nic_dev); + err = hinic3_init_nic_dev(netdev, hwdev); + if (err) + goto err_undo_netdev_alloc; + + err = hinic3_init_nic_io(nic_dev); + if (err) + goto err_undo_netdev_alloc; + + err = hinic3_sw_init(netdev); + if (err) + goto err_sw_init; + + hinic3_assign_netdev_ops(netdev); + + netdev_feature_init(netdev); + err = hinic3_set_default_hw_feature(netdev); + if (err) + goto err_set_features; + + err = register_netdev(netdev); + if (err) + goto err_register_netdev; + + netif_carrier_off(netdev); + return 0; + +err_register_netdev: + hinic3_update_nic_feature(nic_dev, 0); + hinic3_set_nic_feature_to_hw(nic_dev); + +err_set_features: + hinic3_sw_deinit(netdev); + +err_sw_init: + hinic3_free_nic_io(nic_dev); + +err_undo_netdev_alloc: + free_netdev(netdev); + +err_undo_event_register: + hinic3_adev_event_unregister(adev); + dev_err(&pdev->dev, "NIC service probe failed\n"); + + return err; +} + +static void hinic3_nic_remove(struct auxiliary_device *adev) +{ + struct hinic3_nic_dev *nic_dev = dev_get_drvdata(&adev->dev); + struct net_device *netdev; + + if (!hinic3_support_nic(nic_dev->hwdev)) + return; + + netdev = nic_dev->netdev; + unregister_netdev(netdev); + + hinic3_update_nic_feature(nic_dev, 0); + hinic3_set_nic_feature_to_hw(nic_dev); + hinic3_sw_deinit(netdev); + + hinic3_free_nic_io(nic_dev); + + free_netdev(netdev); +} + +static const struct auxiliary_device_id hinic3_nic_id_table[] = { + { + .name = HINIC3_NIC_DRV_NAME ".nic", + }, + {}, +}; + +static struct auxiliary_driver hinic3_nic_driver = { + .probe = hinic3_nic_probe, + .remove = hinic3_nic_remove, + .suspend = NULL, + .resume = NULL, + .name = "nic", + .id_table = hinic3_nic_id_table, +}; + +static __init int hinic3_nic_lld_init(void) +{ + int err; + + pr_info("%s: %s\n", HINIC3_NIC_DRV_NAME, HINIC3_NIC_DRV_DESC); + + err = hinic3_lld_init(); + if (err) + return err; + + err = auxiliary_driver_register(&hinic3_nic_driver); + if (err) { + hinic3_lld_exit(); + return err; + } + + return 0; +} + +static __exit void hinic3_nic_lld_exit(void) +{ + auxiliary_driver_unregister(&hinic3_nic_driver); + + hinic3_lld_exit(); +} + +module_init(hinic3_nic_lld_init); +module_exit(hinic3_nic_lld_exit); + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION(HINIC3_NIC_DRV_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c new file mode 100644 index 0000000000000..e74d1eb097301 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include + +#include "hinic3_common.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" + +int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + const struct mgmt_msg_params *msg_params) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h new file mode 100644 index 0000000000000..d7a6c37b7eff7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_MBOX_H_ +#define _HINIC3_MBOX_H_ + +#include +#include + +struct hinic3_hwdev; + +int hinic3_send_mbox_to_mgmt(struct hinic3_hwdev *hwdev, u8 mod, u16 cmd, + const struct mgmt_msg_params *msg_params); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h new file mode 100644 index 0000000000000..4edabeb321122 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_MGMT_H_ +#define _HINIC3_MGMT_H_ + +#include + +struct hinic3_hwdev; + +void hinic3_flush_mgmt_workq(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h new file mode 100644 index 0000000000000..c4434efdc7f77 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_MGMT_INTERFACE_H_ +#define _HINIC3_MGMT_INTERFACE_H_ + +#include +#include +#include + +#include "hinic3_hw_intf.h" + +struct l2nic_cmd_feature_nego { + struct mgmt_msg_head msg_head; + u16 func_id; + u8 opcode; + u8 rsvd; + u64 s_feature[4]; +}; + +enum l2nic_func_tbl_cfg_bitmap { + L2NIC_FUNC_TBL_CFG_INIT = 0, + L2NIC_FUNC_TBL_CFG_RX_BUF_SIZE = 1, + L2NIC_FUNC_TBL_CFG_MTU = 2, +}; + +struct l2nic_func_tbl_cfg { + u16 rx_wqe_buf_size; + u16 mtu; + u32 rsvd[9]; +}; + +struct l2nic_cmd_set_func_tbl { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 rsvd; + u32 cfg_bitmap; + struct l2nic_func_tbl_cfg tbl_cfg; +}; + +struct l2nic_cmd_set_mac { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 mac[ETH_ALEN]; +}; + +struct l2nic_cmd_update_mac { + struct mgmt_msg_head msg_head; + u16 func_id; + u16 vlan_id; + u16 rsvd1; + u8 old_mac[ETH_ALEN]; + u16 rsvd2; + u8 new_mac[ETH_ALEN]; +}; + +struct l2nic_cmd_force_pkt_drop { + struct mgmt_msg_head msg_head; + u8 port; + u8 rsvd1[3]; +}; + +/* Commands between NIC to fw */ +enum l2nic_cmd { + /* FUNC CFG */ + L2NIC_CMD_SET_FUNC_TBL = 5, + L2NIC_CMD_SET_VPORT_ENABLE = 6, + L2NIC_CMD_SET_SQ_CI_ATTR = 8, + L2NIC_CMD_CLEAR_QP_RESOURCE = 11, + L2NIC_CMD_FEATURE_NEGO = 15, + L2NIC_CMD_SET_MAC = 21, + L2NIC_CMD_DEL_MAC = 22, + L2NIC_CMD_UPDATE_MAC = 23, + L2NIC_CMD_CFG_RSS = 60, + L2NIC_CMD_CFG_RSS_HASH_KEY = 63, + L2NIC_CMD_CFG_RSS_HASH_ENGINE = 64, + L2NIC_CMD_SET_RSS_CTX_TBL = 65, + L2NIC_CMD_QOS_DCB_STATE = 110, + L2NIC_CMD_FORCE_PKT_DROP = 113, + L2NIC_CMD_MAX = 256, +}; + +enum hinic3_nic_feature_cap { + HINIC3_NIC_F_CSUM = BIT(0), + HINIC3_NIC_F_SCTP_CRC = BIT(1), + HINIC3_NIC_F_TSO = BIT(2), + HINIC3_NIC_F_LRO = BIT(3), + HINIC3_NIC_F_UFO = BIT(4), + HINIC3_NIC_F_RSS = BIT(5), + HINIC3_NIC_F_RX_VLAN_FILTER = BIT(6), + HINIC3_NIC_F_RX_VLAN_STRIP = BIT(7), + HINIC3_NIC_F_TX_VLAN_INSERT = BIT(8), + HINIC3_NIC_F_VXLAN_OFFLOAD = BIT(9), + HINIC3_NIC_F_FDIR = BIT(11), + HINIC3_NIC_F_PROMISC = BIT(12), + HINIC3_NIC_F_ALLMULTI = BIT(13), + HINIC3_NIC_F_RATE_LIMIT = BIT(16), +}; + +#define HINIC3_NIC_F_ALL_MASK 0x33bff +#define HINIC3_NIC_DRV_DEFAULT_FEATURE 0x3f03f + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c new file mode 100644 index 0000000000000..4a3243bc92b15 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include +#include + +#include "hinic3_hwif.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_rx.h" +#include "hinic3_tx.h" + +static int hinic3_open(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +static int hinic3_close(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +static int hinic3_change_mtu(struct net_device *netdev, int new_mtu) +{ + int err; + + err = hinic3_set_port_mtu(netdev, new_mtu); + if (err) { + netdev_err(netdev, "Failed to change port mtu to %d\n", new_mtu); + return err; + } + + netdev_dbg(netdev, "Change mtu from %u to %d\n", netdev->mtu, new_mtu); + WRITE_ONCE(netdev->mtu, new_mtu); + + return 0; +} + +static int hinic3_set_mac_addr(struct net_device *netdev, void *addr) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct sockaddr *saddr = addr; + int err; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(netdev->dev_addr, saddr->sa_data)) + return 0; + + err = hinic3_update_mac(nic_dev->hwdev, netdev->dev_addr, + saddr->sa_data, 0, + hinic3_global_func_id(nic_dev->hwdev)); + + if (err) + return err; + + eth_hw_addr_set(netdev, saddr->sa_data); + + return 0; +} + +static const struct net_device_ops hinic3_netdev_ops = { + .ndo_open = hinic3_open, + .ndo_stop = hinic3_close, + .ndo_change_mtu = hinic3_change_mtu, + .ndo_set_mac_address = hinic3_set_mac_addr, + .ndo_start_xmit = hinic3_xmit_frame, +}; + +void hinic3_set_netdev_ops(struct net_device *netdev) +{ + netdev->netdev_ops = &hinic3_netdev_ops; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c new file mode 100644 index 0000000000000..45fd6d0a0d3e7 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include + +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_mbox.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" + +static int hinic3_feature_nego(struct hinic3_hwdev *hwdev, u8 opcode, + u64 *s_feature, u16 size) +{ + struct l2nic_cmd_feature_nego feature_nego = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + feature_nego.func_id = hinic3_global_func_id(hwdev); + feature_nego.opcode = opcode; + if (opcode == MGMT_MSG_CMD_OP_SET) + memcpy(feature_nego.s_feature, s_feature, size * sizeof(u64)); + + mgmt_msg_params_init_default(&msg_params, &feature_nego, + sizeof(feature_nego)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_FEATURE_NEGO, &msg_params); + if (err || feature_nego.msg_head.status) { + dev_err(hwdev->dev, "Failed to negotiate nic feature, err:%d, status: 0x%x\n", + err, feature_nego.msg_head.status); + return -EIO; + } + + if (opcode == MGMT_MSG_CMD_OP_GET) + memcpy(s_feature, feature_nego.s_feature, size * sizeof(u64)); + + return 0; +} + +int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev) +{ + return hinic3_feature_nego(nic_dev->hwdev, MGMT_MSG_CMD_OP_SET, + &nic_dev->nic_io->feature_cap, 1); +} + +bool hinic3_test_support(struct hinic3_nic_dev *nic_dev, + enum hinic3_nic_feature_cap feature_bits) +{ + return (nic_dev->nic_io->feature_cap & feature_bits) == feature_bits; +} + +void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap) +{ + nic_dev->nic_io->feature_cap = feature_cap; +} + +static int hinic3_set_function_table(struct hinic3_hwdev *hwdev, u32 cfg_bitmap, + const struct l2nic_func_tbl_cfg *cfg) +{ + struct l2nic_cmd_set_func_tbl cmd_func_tbl = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + cmd_func_tbl.func_id = hinic3_global_func_id(hwdev); + cmd_func_tbl.cfg_bitmap = cfg_bitmap; + cmd_func_tbl.tbl_cfg = *cfg; + + mgmt_msg_params_init_default(&msg_params, &cmd_func_tbl, + sizeof(cmd_func_tbl)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_SET_FUNC_TBL, &msg_params); + if (err || cmd_func_tbl.msg_head.status) { + dev_err(hwdev->dev, + "Failed to set func table, bitmap: 0x%x, err: %d, status: 0x%x\n", + cfg_bitmap, err, cmd_func_tbl.msg_head.status); + return -EFAULT; + } + + return 0; +} + +int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct l2nic_func_tbl_cfg func_tbl_cfg = {}; + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + + func_tbl_cfg.mtu = new_mtu; + return hinic3_set_function_table(hwdev, BIT(L2NIC_FUNC_TBL_CFG_MTU), + &func_tbl_cfg); +} + +static int hinic3_check_mac_info(struct hinic3_hwdev *hwdev, u8 status, u16 vlan_id) +{ + if ((status && status != MGMT_STATUS_EXIST) || + ((vlan_id & BIT(15)) && status == MGMT_STATUS_EXIST)) { + return -EINVAL; + } + + return 0; +} + +int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id) +{ + struct l2nic_cmd_set_mac mac_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) { + dev_err(hwdev->dev, "Invalid VLAN number: %d\n", + (vlan_id & HINIC3_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.mac, mac_addr); + + mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_SET_MAC, &msg_params); + if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status, + mac_info.vlan_id)) { + dev_err(hwdev->dev, + "Failed to update MAC, err: %d, status: 0x%x\n", + err, mac_info.msg_head.status); + return -EIO; + } + + if (mac_info.msg_head.status == MGMT_STATUS_PF_SET_VF_ALREADY) { + dev_warn(hwdev->dev, "PF has already set VF mac, Ignore set operation\n"); + return 0; + } + + if (mac_info.msg_head.status == MGMT_STATUS_EXIST) { + dev_warn(hwdev->dev, "MAC is repeated. Ignore update operation\n"); + return 0; + } + + return 0; +} + +int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id) +{ + struct l2nic_cmd_set_mac mac_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) { + dev_err(hwdev->dev, "Invalid VLAN number: %d\n", + (vlan_id & HINIC3_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.mac, mac_addr); + + mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_DEL_MAC, &msg_params); + if (err) { + dev_err(hwdev->dev, + "Failed to delete MAC, err: %d, status: 0x%x\n", + err, mac_info.msg_head.status); + return -EIO; + } + + return 0; +} + +int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac, u8 *new_mac, + u16 vlan_id, u16 func_id) +{ + struct l2nic_cmd_update_mac mac_info = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + if ((vlan_id & HINIC3_VLAN_ID_MASK) >= VLAN_N_VID) { + dev_err(hwdev->dev, "Invalid VLAN number: %d\n", + (vlan_id & HINIC3_VLAN_ID_MASK)); + return -EINVAL; + } + + mac_info.func_id = func_id; + mac_info.vlan_id = vlan_id; + ether_addr_copy(mac_info.old_mac, old_mac); + ether_addr_copy(mac_info.new_mac, new_mac); + + mgmt_msg_params_init_default(&msg_params, &mac_info, sizeof(mac_info)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_UPDATE_MAC, &msg_params); + if (err || hinic3_check_mac_info(hwdev, mac_info.msg_head.status, + mac_info.vlan_id)) { + dev_err(hwdev->dev, + "Failed to update MAC, err: %d, status: 0x%x\n", + err, mac_info.msg_head.status); + return -EIO; + } + return 0; +} + +int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev) +{ + struct l2nic_cmd_force_pkt_drop pkt_drop = {}; + struct mgmt_msg_params msg_params = {}; + int err; + + pkt_drop.port = hinic3_physical_port_id(hwdev); + + mgmt_msg_params_init_default(&msg_params, &pkt_drop, sizeof(pkt_drop)); + + err = hinic3_send_mbox_to_mgmt(hwdev, MGMT_MOD_L2NIC, + L2NIC_CMD_FORCE_PKT_DROP, &msg_params); + if ((pkt_drop.msg_head.status != MGMT_STATUS_CMD_UNSUPPORTED && + pkt_drop.msg_head.status) || err) { + dev_err(hwdev->dev, + "Failed to set force tx packets drop, err: %d, status: 0x%x\n", + err, pkt_drop.msg_head.status); + return -EFAULT; + } + + return pkt_drop.msg_head.status; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h new file mode 100644 index 0000000000000..b7ae0f631cf43 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_NIC_CFG_H_ +#define _HINIC3_NIC_CFG_H_ + +#include + +#include "hinic3_hw_intf.h" +#include "hinic3_mgmt_interface.h" + +struct hinic3_hwdev; +struct hinic3_nic_dev; + +#define HINIC3_MIN_MTU_SIZE 256 +#define HINIC3_MAX_JUMBO_FRAME_SIZE 9600 + +#define HINIC3_VLAN_ID_MASK 0x7FFF + +enum hinic3_nic_event_type { + HINIC3_NIC_EVENT_LINK_DOWN = 0, + HINIC3_NIC_EVENT_LINK_UP = 1, +}; + +int hinic3_set_nic_feature_to_hw(struct hinic3_nic_dev *nic_dev); +bool hinic3_test_support(struct hinic3_nic_dev *nic_dev, + enum hinic3_nic_feature_cap feature_bits); +void hinic3_update_nic_feature(struct hinic3_nic_dev *nic_dev, u64 feature_cap); + +int hinic3_set_port_mtu(struct net_device *netdev, u16 new_mtu); + +int hinic3_set_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id); +int hinic3_del_mac(struct hinic3_hwdev *hwdev, const u8 *mac_addr, u16 vlan_id, u16 func_id); +int hinic3_update_mac(struct hinic3_hwdev *hwdev, const u8 *old_mac, u8 *new_mac, u16 vlan_id, + u16 func_id); + +int hinic3_force_drop_tx_pkt(struct hinic3_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h new file mode 100644 index 0000000000000..64a8fb874097d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_NIC_DEV_H_ +#define _HINIC3_NIC_DEV_H_ + +#include + +#include "hinic3_hw_cfg.h" +#include "hinic3_mgmt_interface.h" + +enum hinic3_flags { + HINIC3_INTF_UP, + HINIC3_RSS_ENABLE, + HINIC3_CHANGE_RES_INVALID, + HINIC3_RSS_DEFAULT_INDIR, +}; + +#define HINIC3_CHANNEL_RES_VALID(nic_dev) \ + (test_bit(HINIC3_INTF_UP, &(nic_dev)->flags) && \ + !test_bit(HINIC3_CHANGE_RES_INVALID, &(nic_dev)->flags)) + +enum hinic3_rss_hash_type { + HINIC3_RSS_HASH_ENGINE_TYPE_XOR = 0, + HINIC3_RSS_HASH_ENGINE_TYPE_TOEP = 1, +}; + +struct hinic3_rss_type { + u8 tcp_ipv6_ext; + u8 ipv6_ext; + u8 tcp_ipv6; + u8 ipv6; + u8 tcp_ipv4; + u8 ipv4; + u8 udp_ipv6; + u8 udp_ipv4; +}; + +struct hinic3_irq_cfg { + struct net_device *netdev; + u16 msix_entry_idx; + /* provided by OS */ + u32 irq_id; + char irq_name[IFNAMSIZ + 16]; + struct napi_struct napi; + cpumask_t affinity_mask; + struct hinic3_txq *txq; + struct hinic3_rxq *rxq; +}; + +struct hinic3_dyna_txrxq_params { + u16 num_qps; + u32 sq_depth; + u32 rq_depth; + + struct hinic3_dyna_txq_res *txqs_res; + struct hinic3_dyna_rxq_res *rxqs_res; + struct hinic3_irq_cfg *irq_cfg; +}; + +struct hinic3_nic_dev { + struct pci_dev *pdev; + struct net_device *netdev; + struct hinic3_hwdev *hwdev; + struct hinic3_nic_io *nic_io; + + u16 max_qps; + u32 dma_rx_buff_size; + u16 rx_buff_len; + u32 page_order; + u32 lro_replenish_thld; + unsigned long flags; + struct hinic3_nic_service_cap nic_svc_cap; + + struct hinic3_dyna_txrxq_params q_params; + struct hinic3_txq *txqs; + struct hinic3_rxq *rxqs; + + u16 num_qp_irq; + struct msix_entry *qps_msix_entries; + + bool link_status_up; +}; + +void hinic3_set_netdev_ops(struct net_device *netdev); + +/* Temporary prototypes. Functions become static in later submission. */ +void qp_add_napi(struct hinic3_irq_cfg *irq_cfg); +void qp_del_napi(struct hinic3_irq_cfg *irq_cfg); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c new file mode 100644 index 0000000000000..34a1f5bd5ac19 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include "hinic3_hw_comm.h" +#include "hinic3_hw_intf.h" +#include "hinic3_hwdev.h" +#include "hinic3_hwif.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" + +int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev) +{ + /* Completed by later submission due to LoC limit. */ +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h new file mode 100644 index 0000000000000..72c0969d73dad --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_NIC_IO_H_ +#define _HINIC3_NIC_IO_H_ + +#include + +#include "hinic3_wq.h" + +struct hinic3_nic_dev; + +#define HINIC3_SQ_WQEBB_SHIFT 4 +#define HINIC3_RQ_WQEBB_SHIFT 3 +#define HINIC3_SQ_WQEBB_SIZE BIT(HINIC3_SQ_WQEBB_SHIFT) + +/* ******************** RQ_CTRL ******************** */ +enum hinic3_rq_wqe_type { + HINIC3_NORMAL_RQ_WQE = 1, +}; + +/* ******************** SQ_CTRL ******************** */ +#define HINIC3_TX_MSS_DEFAULT 0x3E00 +#define HINIC3_TX_MSS_MIN 0x50 +#define HINIC3_MAX_SQ_SGE 18 + +struct hinic3_io_queue { + struct hinic3_wq wq; + u8 owner; + u16 q_id; + u16 msix_entry_idx; + u8 __iomem *db_addr; + u16 *cons_idx_addr; +} ____cacheline_aligned; + +static inline u16 hinic3_get_sq_local_ci(const struct hinic3_io_queue *sq) +{ + const struct hinic3_wq *wq = &sq->wq; + + return wq->cons_idx & wq->idx_mask; +} + +static inline u16 hinic3_get_sq_local_pi(const struct hinic3_io_queue *sq) +{ + const struct hinic3_wq *wq = &sq->wq; + + return wq->prod_idx & wq->idx_mask; +} + +static inline u16 hinic3_get_sq_hw_ci(const struct hinic3_io_queue *sq) +{ + const struct hinic3_wq *wq = &sq->wq; + + return READ_ONCE(*sq->cons_idx_addr) & wq->idx_mask; +} + +/* ******************** DB INFO ******************** */ +#define DB_INFO_QID_MASK GENMASK(12, 0) +#define DB_INFO_CFLAG_MASK BIT(23) +#define DB_INFO_COS_MASK GENMASK(26, 24) +#define DB_INFO_TYPE_MASK GENMASK(31, 27) +#define DB_INFO_SET(val, member) \ + FIELD_PREP(DB_INFO_##member##_MASK, val) + +#define DB_PI_LOW_MASK 0xFFU +#define DB_PI_HIGH_MASK 0xFFU +#define DB_PI_HI_SHIFT 8 +#define DB_PI_LOW(pi) ((pi) & DB_PI_LOW_MASK) +#define DB_PI_HIGH(pi) (((pi) >> DB_PI_HI_SHIFT) & DB_PI_HIGH_MASK) +#define DB_ADDR(q, pi) ((u64 __iomem *)((q)->db_addr) + DB_PI_LOW(pi)) +#define DB_SRC_TYPE 1 + +/* CFLAG_DATA_PATH */ +#define DB_CFLAG_DP_SQ 0 +#define DB_CFLAG_DP_RQ 1 + +struct hinic3_nic_db { + u32 db_info; + u32 pi_hi; +}; + +static inline void hinic3_write_db(struct hinic3_io_queue *queue, int cos, + u8 cflag, u16 pi) +{ + struct hinic3_nic_db db; + + db.db_info = DB_INFO_SET(DB_SRC_TYPE, TYPE) | DB_INFO_SET(cflag, CFLAG) | + DB_INFO_SET(cos, COS) | DB_INFO_SET(queue->q_id, QID); + db.pi_hi = DB_PI_HIGH(pi); + + writeq(*((u64 *)&db), DB_ADDR(queue, pi)); +} + +struct hinic3_nic_io { + struct hinic3_io_queue *sq; + struct hinic3_io_queue *rq; + + u16 num_qps; + u16 max_qps; + + /* Base address for consumer index of all tx queues. Each queue is + * given a full cache line to hold its consumer index. HW updates + * current consumer index as it consumes tx WQEs. + */ + void *ci_vaddr_base; + dma_addr_t ci_dma_base; + + u8 __iomem *sqs_db_addr; + u8 __iomem *rqs_db_addr; + + u16 rx_buff_len; + u64 feature_cap; +}; + +int hinic3_init_nic_io(struct hinic3_nic_dev *nic_dev); +void hinic3_free_nic_io(struct hinic3_nic_dev *nic_dev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c new file mode 100644 index 0000000000000..8535840d86de0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include + +#include "hinic3_hwdev.h" +#include "hinic3_queue_common.h" + +void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth, + u32 page_size, u32 elem_size) +{ + u32 elem_per_page; + + elem_per_page = min(page_size / elem_size, q_depth); + + qpages->pages = NULL; + qpages->page_size = page_size; + qpages->num_pages = max(q_depth / elem_per_page, 1); + qpages->elem_size_shift = ilog2(elem_size); + qpages->elem_per_pg_shift = ilog2(elem_per_page); +} + +static void __queue_pages_free(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages, u32 pg_cnt) +{ + while (pg_cnt > 0) { + pg_cnt--; + hinic3_dma_free_coherent_align(hwdev->dev, + qpages->pages + pg_cnt); + } + kfree(qpages->pages); + qpages->pages = NULL; +} + +void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages) +{ + __queue_pages_free(hwdev, qpages, qpages->num_pages); +} + +int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages, u32 align) +{ + u32 pg_idx; + int err; + + qpages->pages = kcalloc(qpages->num_pages, sizeof(qpages->pages[0]), GFP_KERNEL); + if (!qpages->pages) + return -ENOMEM; + + if (align == 0) + align = qpages->page_size; + + for (pg_idx = 0; pg_idx < qpages->num_pages; pg_idx++) { + err = hinic3_dma_zalloc_coherent_align(hwdev->dev, + qpages->page_size, + align, + GFP_KERNEL, + qpages->pages + pg_idx); + if (err) { + __queue_pages_free(hwdev, qpages, pg_idx); + return err; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h new file mode 100644 index 0000000000000..799861cc0e9b8 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_queue_common.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_QUEUE_COMMON_H_ +#define _HINIC3_QUEUE_COMMON_H_ + +#include + +#include "hinic3_common.h" + +struct hinic3_hwdev; + +struct hinic3_queue_pages { + /* Array of DMA-able pages that actually holds the queue entries. */ + struct hinic3_dma_addr_align *pages; + /* Page size in bytes. */ + u32 page_size; + /* Number of pages, must be power of 2. */ + u16 num_pages; + u8 elem_size_shift; + u8 elem_per_pg_shift; +}; + +void hinic3_queue_pages_init(struct hinic3_queue_pages *qpages, u32 q_depth, + u32 page_size, u32 elem_size); +int hinic3_queue_pages_alloc(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages, u32 align); +void hinic3_queue_pages_free(struct hinic3_hwdev *hwdev, + struct hinic3_queue_pages *qpages); + +/* Get pointer to queue entry at the specified index. Index does not have to be + * masked to queue depth, only least significant bits will be used. Also + * provides remaining elements in same page (including the first one) in case + * caller needs multiple entries. + */ +static inline void *get_q_element(const struct hinic3_queue_pages *qpages, + u32 idx, u32 *remaining_in_page) +{ + const struct hinic3_dma_addr_align *page; + u32 page_idx, elem_idx, elem_per_pg, ofs; + u8 shift; + + shift = qpages->elem_per_pg_shift; + page_idx = (idx >> shift) & (qpages->num_pages - 1); + elem_per_pg = 1U << shift; + elem_idx = idx & (elem_per_pg - 1); + if (remaining_in_page) + *remaining_in_page = elem_per_pg - elem_idx; + ofs = elem_idx << qpages->elem_size_shift; + page = qpages->pages + page_idx; + return (char *)page->align_vaddr + ofs; +} + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c new file mode 100644 index 0000000000000..a3a670c6ce9ba --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include +#include +#include +#include + +#include "hinic3_hwdev.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_rx.h" + +#define HINIC3_RX_HDR_SIZE 256 +#define HINIC3_RX_BUFFER_WRITE 16 + +#define HINIC3_RX_TCP_PKT 0x3 +#define HINIC3_RX_UDP_PKT 0x4 +#define HINIC3_RX_SCTP_PKT 0x7 + +#define HINIC3_RX_IPV4_PKT 0 +#define HINIC3_RX_IPV6_PKT 1 +#define HINIC3_RX_INVALID_IP_TYPE 2 + +#define HINIC3_RX_PKT_FORMAT_NON_TUNNEL 0 +#define HINIC3_RX_PKT_FORMAT_VXLAN 1 + +#define HINIC3_LRO_PKT_HDR_LEN_IPV4 66 +#define HINIC3_LRO_PKT_HDR_LEN_IPV6 86 +#define HINIC3_LRO_PKT_HDR_LEN(cqe) \ + (RQ_CQE_OFFOLAD_TYPE_GET((cqe)->offload_type, IP_TYPE) == \ + HINIC3_RX_IPV6_PKT ? HINIC3_LRO_PKT_HDR_LEN_IPV6 : \ + HINIC3_LRO_PKT_HDR_LEN_IPV4) + +int hinic3_alloc_rxqs(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ + return -EFAULT; +} + +void hinic3_free_rxqs(struct net_device *netdev) +{ + /* Completed by later submission due to LoC limit. */ +} + +static int rx_alloc_mapped_page(struct net_device *netdev, struct hinic3_rx_info *rx_info) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + dma_addr_t dma = rx_info->buf_dma_addr; + struct pci_dev *pdev = nic_dev->pdev; + struct page *page = rx_info->page; + + if (likely(dma)) + return 0; + + page = alloc_pages_node(NUMA_NO_NODE, GFP_ATOMIC | __GFP_COMP, + nic_dev->page_order); + if (unlikely(!page)) + return -ENOMEM; + + dma = dma_map_page(&pdev->dev, page, 0, nic_dev->dma_rx_buff_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&pdev->dev, dma))) { + __free_pages(page, nic_dev->page_order); + return -ENOMEM; + } + + rx_info->page = page; + rx_info->buf_dma_addr = dma; + rx_info->page_offset = 0; + + return 0; +} + +static void rq_wqe_buff_set(struct hinic3_io_queue *rq, uint32_t wqe_idx, + dma_addr_t dma_addr, u16 len) +{ + struct hinic3_rq_wqe *rq_wqe; + + rq_wqe = get_q_element(&rq->wq.qpages, wqe_idx, NULL); + rq_wqe->buf_hi_addr = upper_32_bits(dma_addr); + rq_wqe->buf_lo_addr = lower_32_bits(dma_addr); +} + +static u32 hinic3_rx_fill_buffers(struct hinic3_rxq *rxq) +{ + struct net_device *netdev = rxq->netdev; + u32 i, free_wqebbs = rxq->delta - 1; + struct hinic3_nic_dev *nic_dev; + struct hinic3_rx_info *rx_info; + dma_addr_t dma_addr; + int err; + + nic_dev = netdev_priv(netdev); + for (i = 0; i < free_wqebbs; i++) { + rx_info = &rxq->rx_info[rxq->next_to_update]; + + err = rx_alloc_mapped_page(netdev, rx_info); + if (unlikely(err)) + break; + + dma_addr = rx_info->buf_dma_addr + rx_info->page_offset; + rq_wqe_buff_set(rxq->rq, rxq->next_to_update, dma_addr, + nic_dev->rx_buff_len); + rxq->next_to_update = (rxq->next_to_update + 1) & rxq->q_mask; + } + + if (likely(i)) { + hinic3_write_db(rxq->rq, rxq->q_id & 3, DB_CFLAG_DP_RQ, + rxq->next_to_update << HINIC3_NORMAL_RQ_WQE); + rxq->delta -= i; + rxq->next_to_alloc = rxq->next_to_update; + } + + return i; +} + +static void hinic3_reuse_rx_page(struct hinic3_rxq *rxq, + struct hinic3_rx_info *old_rx_info) +{ + struct hinic3_rx_info *new_rx_info; + u16 nta = rxq->next_to_alloc; + + new_rx_info = &rxq->rx_info[nta]; + + /* update, and store next to alloc */ + nta++; + rxq->next_to_alloc = (nta < rxq->q_depth) ? nta : 0; + + new_rx_info->page = old_rx_info->page; + new_rx_info->page_offset = old_rx_info->page_offset; + new_rx_info->buf_dma_addr = old_rx_info->buf_dma_addr; + + /* sync the buffer for use by the device */ + dma_sync_single_range_for_device(rxq->dev, new_rx_info->buf_dma_addr, + new_rx_info->page_offset, + rxq->buf_len, + DMA_FROM_DEVICE); +} + +static void hinic3_add_rx_frag(struct hinic3_rxq *rxq, + struct hinic3_rx_info *rx_info, + struct sk_buff *skb, u32 size) +{ + struct page *page; + u8 *va; + + page = rx_info->page; + va = (u8 *)page_address(page) + rx_info->page_offset; + net_prefetch(va); + + dma_sync_single_range_for_cpu(rxq->dev, + rx_info->buf_dma_addr, + rx_info->page_offset, + rxq->buf_len, + DMA_FROM_DEVICE); + + if (size <= HINIC3_RX_HDR_SIZE && !skb_is_nonlinear(skb)) { + memcpy(__skb_put(skb, size), va, + ALIGN(size, sizeof(long))); + + /* page is not reserved, we can reuse buffer as-is */ + if (likely(page_to_nid(page) == numa_node_id())) + goto reuse_rx_page; + + /* this page cannot be reused so discard it */ + put_page(page); + goto err_reuse_buffer; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_info->page_offset, size, rxq->buf_len); + + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + goto err_reuse_buffer; + + /* if we are the only owner of the page we can reuse it */ + if (unlikely(page_count(page) != 1)) + goto err_reuse_buffer; + + /* flip page offset to other buffer */ + rx_info->page_offset ^= rxq->buf_len; + get_page(page); + +reuse_rx_page: + hinic3_reuse_rx_page(rxq, rx_info); + return; + +err_reuse_buffer: + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rxq->dev, rx_info->buf_dma_addr, rxq->dma_rx_buff_size, DMA_FROM_DEVICE); +} + +static void packaging_skb(struct hinic3_rxq *rxq, struct sk_buff *skb, + u32 sge_num, u32 pkt_len) +{ + struct hinic3_rx_info *rx_info; + u32 temp_pkt_len = pkt_len; + u32 temp_sge_num = sge_num; + u32 sw_ci; + u32 size; + + sw_ci = rxq->cons_idx & rxq->q_mask; + while (temp_sge_num) { + rx_info = &rxq->rx_info[sw_ci]; + sw_ci = (sw_ci + 1) & rxq->q_mask; + if (unlikely(temp_pkt_len > rxq->buf_len)) { + size = rxq->buf_len; + temp_pkt_len -= rxq->buf_len; + } else { + size = temp_pkt_len; + } + + hinic3_add_rx_frag(rxq, rx_info, skb, size); + + /* clear contents of buffer_info */ + rx_info->buf_dma_addr = 0; + rx_info->page = NULL; + temp_sge_num--; + } +} + +static u32 hinic3_get_sge_num(struct hinic3_rxq *rxq, u32 pkt_len) +{ + u32 sge_num; + + sge_num = pkt_len >> rxq->rx_buff_shift; + sge_num += (pkt_len & (rxq->buf_len - 1)) ? 1 : 0; + + return sge_num; +} + +static struct sk_buff *hinic3_fetch_rx_buffer(struct hinic3_rxq *rxq, + u32 pkt_len) +{ + struct net_device *netdev = rxq->netdev; + struct sk_buff *skb; + u32 sge_num; + + skb = netdev_alloc_skb_ip_align(netdev, HINIC3_RX_HDR_SIZE); + if (unlikely(!skb)) + return NULL; + + sge_num = hinic3_get_sge_num(rxq, pkt_len); + + net_prefetchw(skb->data); + packaging_skb(rxq, skb, sge_num, pkt_len); + + rxq->cons_idx += sge_num; + rxq->delta += sge_num; + + return skb; +} + +static void hinic3_pull_tail(struct sk_buff *skb) +{ + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + unsigned int pull_len; + unsigned char *va; + + va = skb_frag_address(frag); + + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(skb->dev, va, HINIC3_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + skb_frag_off_add(frag, pull_len); + + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +static void hinic3_rx_csum(struct hinic3_rxq *rxq, u32 offload_type, + u32 status, struct sk_buff *skb) +{ + u32 pkt_fmt = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, TUNNEL_PKT_FORMAT); + u32 pkt_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, PKT_TYPE); + u32 ip_type = RQ_CQE_OFFOLAD_TYPE_GET(offload_type, IP_TYPE); + u32 csum_err = RQ_CQE_STATUS_GET(status, CSUM_ERR); + struct net_device *netdev = rxq->netdev; + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + if (unlikely(csum_err)) { + /* pkt type is recognized by HW, and csum is wrong */ + skb->ip_summed = CHECKSUM_NONE; + return; + } + + if (ip_type == HINIC3_RX_INVALID_IP_TYPE || + !(pkt_fmt == HINIC3_RX_PKT_FORMAT_NON_TUNNEL || + pkt_fmt == HINIC3_RX_PKT_FORMAT_VXLAN)) { + skb->ip_summed = CHECKSUM_NONE; + return; + } + + switch (pkt_type) { + case HINIC3_RX_TCP_PKT: + case HINIC3_RX_UDP_PKT: + case HINIC3_RX_SCTP_PKT: + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + default: + skb->ip_summed = CHECKSUM_NONE; + break; + } +} + +static void hinic3_lro_set_gso_params(struct sk_buff *skb, u16 num_lro) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + __be16 proto; + + proto = __vlan_get_protocol(skb, eth->h_proto, NULL); + + skb_shinfo(skb)->gso_size = DIV_ROUND_UP(skb->len - skb_headlen(skb), num_lro); + skb_shinfo(skb)->gso_type = proto == htons(ETH_P_IP) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; + skb_shinfo(skb)->gso_segs = num_lro; +} + +static int recv_one_pkt(struct hinic3_rxq *rxq, struct hinic3_rq_cqe *rx_cqe, + u32 pkt_len, u32 vlan_len, u32 status) +{ + struct net_device *netdev = rxq->netdev; + struct sk_buff *skb; + u32 offload_type; + u16 num_lro; + + skb = hinic3_fetch_rx_buffer(rxq, pkt_len); + if (unlikely(!skb)) + return -ENOMEM; + + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + hinic3_pull_tail(skb); + + offload_type = rx_cqe->offload_type; + hinic3_rx_csum(rxq, offload_type, status, skb); + + num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO); + if (num_lro) + hinic3_lro_set_gso_params(skb, num_lro); + + skb_record_rx_queue(skb, rxq->q_id); + skb->protocol = eth_type_trans(skb, netdev); + + if (skb_has_frag_list(skb)) { + napi_gro_flush(&rxq->irq_cfg->napi, false); + netif_receive_skb(skb); + } else { + napi_gro_receive(&rxq->irq_cfg->napi, skb); + } + + return 0; +} + +int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(rxq->netdev); + u32 sw_ci, status, pkt_len, vlan_len; + struct hinic3_rq_cqe *rx_cqe; + u32 num_wqe = 0; + int nr_pkts = 0; + u16 num_lro; + + while (likely(nr_pkts < budget)) { + sw_ci = rxq->cons_idx & rxq->q_mask; + rx_cqe = rxq->cqe_arr + sw_ci; + status = rx_cqe->status; + if (!RQ_CQE_STATUS_GET(status, RXDONE)) + break; + + /* make sure we read rx_done before packet length */ + rmb(); + + vlan_len = rx_cqe->vlan_len; + pkt_len = RQ_CQE_SGE_GET(vlan_len, LEN); + if (recv_one_pkt(rxq, rx_cqe, pkt_len, vlan_len, status)) + break; + + nr_pkts++; + num_lro = RQ_CQE_STATUS_GET(status, NUM_LRO); + if (num_lro) + num_wqe += hinic3_get_sge_num(rxq, pkt_len); + + rx_cqe->status = 0; + + if (num_wqe >= nic_dev->lro_replenish_thld) + break; + } + + if (rxq->delta >= HINIC3_RX_BUFFER_WRITE) + hinic3_rx_fill_buffers(rxq); + + return nr_pkts; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h new file mode 100644 index 0000000000000..276b712815ec1 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_rx.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_RX_H_ +#define _HINIC3_RX_H_ + +#include +#include + +#define RQ_CQE_OFFOLAD_TYPE_PKT_TYPE_MASK GENMASK(4, 0) +#define RQ_CQE_OFFOLAD_TYPE_IP_TYPE_MASK GENMASK(6, 5) +#define RQ_CQE_OFFOLAD_TYPE_TUNNEL_PKT_FORMAT_MASK GENMASK(11, 8) +#define RQ_CQE_OFFOLAD_TYPE_VLAN_EN_MASK BIT(21) +#define RQ_CQE_OFFOLAD_TYPE_GET(val, member) \ + FIELD_GET(RQ_CQE_OFFOLAD_TYPE_##member##_MASK, val) + +#define RQ_CQE_SGE_VLAN_MASK GENMASK(15, 0) +#define RQ_CQE_SGE_LEN_MASK GENMASK(31, 16) +#define RQ_CQE_SGE_GET(val, member) \ + FIELD_GET(RQ_CQE_SGE_##member##_MASK, val) + +#define RQ_CQE_STATUS_CSUM_ERR_MASK GENMASK(15, 0) +#define RQ_CQE_STATUS_NUM_LRO_MASK GENMASK(23, 16) +#define RQ_CQE_STATUS_RXDONE_MASK BIT(31) +#define RQ_CQE_STATUS_GET(val, member) \ + FIELD_GET(RQ_CQE_STATUS_##member##_MASK, val) + +/* RX Completion information that is provided by HW for a specific RX WQE */ +struct hinic3_rq_cqe { + u32 status; + u32 vlan_len; + u32 offload_type; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; + u32 rsvd6; + u32 pkt_info; +}; + +struct hinic3_rq_wqe { + u32 buf_hi_addr; + u32 buf_lo_addr; + u32 cqe_hi_addr; + u32 cqe_lo_addr; +}; + +struct hinic3_rx_info { + dma_addr_t buf_dma_addr; + struct page *page; + u32 page_offset; +}; + +struct hinic3_rxq { + struct net_device *netdev; + + u16 q_id; + u32 q_depth; + u32 q_mask; + + u16 buf_len; + u32 rx_buff_shift; + u32 dma_rx_buff_size; + + u32 cons_idx; + u32 delta; + + u32 irq_id; + u16 msix_entry_idx; + + /* cqe_arr and rx_info are arrays of rq_depth elements. Each element is + * statically associated (by index) to a specific rq_wqe. + */ + struct hinic3_rq_cqe *cqe_arr; + struct hinic3_rx_info *rx_info; + + struct hinic3_io_queue *rq; + + struct hinic3_irq_cfg *irq_cfg; + u16 next_to_alloc; + u16 next_to_update; + struct device *dev; /* device for DMA mapping */ + + dma_addr_t cqe_start_paddr; +} ____cacheline_aligned; + +int hinic3_alloc_rxqs(struct net_device *netdev); +void hinic3_free_rxqs(struct net_device *netdev); + +int hinic3_rx_poll(struct hinic3_rxq *rxq, int budget); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c new file mode 100644 index 0000000000000..6a91305ac15d0 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.c @@ -0,0 +1,691 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include +#include +#include +#include + +#include "hinic3_hwdev.h" +#include "hinic3_nic_cfg.h" +#include "hinic3_nic_dev.h" +#include "hinic3_nic_io.h" +#include "hinic3_tx.h" +#include "hinic3_wq.h" + +#define MIN_SKB_LEN 32 + +int hinic3_alloc_txqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_hwdev *hwdev = nic_dev->hwdev; + u16 q_id, num_txqs = nic_dev->max_qps; + struct pci_dev *pdev = nic_dev->pdev; + struct hinic3_txq *txq; + u64 txq_size; + + txq_size = num_txqs * sizeof(*nic_dev->txqs); + if (!txq_size) { + dev_err(hwdev->dev, "Cannot allocate zero size txqs\n"); + return -EINVAL; + } + + nic_dev->txqs = kzalloc(txq_size, GFP_KERNEL); + if (!nic_dev->txqs) + return -ENOMEM; + + for (q_id = 0; q_id < num_txqs; q_id++) { + txq = &nic_dev->txqs[q_id]; + txq->netdev = netdev; + txq->q_id = q_id; + txq->q_depth = nic_dev->q_params.sq_depth; + txq->q_mask = nic_dev->q_params.sq_depth - 1; + txq->dev = &pdev->dev; + } + + return 0; +} + +void hinic3_free_txqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + + kfree(nic_dev->txqs); +} + +static void hinic3_set_buf_desc(struct hinic3_sq_bufdesc *buf_descs, + dma_addr_t addr, u32 len) +{ + buf_descs->hi_addr = upper_32_bits(addr); + buf_descs->lo_addr = lower_32_bits(addr); + buf_descs->len = len; +} + +static int hinic3_tx_map_skb(struct net_device *netdev, struct sk_buff *skb, + struct hinic3_txq *txq, + struct hinic3_tx_info *tx_info, + struct hinic3_sq_wqe_combo *wqe_combo) +{ + struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + struct hinic3_sq_bufdesc *buf_desc = wqe_combo->bds_head; + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct hinic3_dma_info *dma_info = tx_info->dma_info; + struct pci_dev *pdev = nic_dev->pdev; + skb_frag_t *frag; + u32 j, i; + int err; + + dma_info[0].dma = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_info[0].dma)) + return -EFAULT; + + dma_info[0].len = skb_headlen(skb); + + wqe_desc->hi_addr = upper_32_bits(dma_info[0].dma); + wqe_desc->lo_addr = lower_32_bits(dma_info[0].dma); + + wqe_desc->ctrl_len = dma_info[0].len; + + for (i = 0; i < skb_shinfo(skb)->nr_frags;) { + frag = &(skb_shinfo(skb)->frags[i]); + if (unlikely(i == wqe_combo->first_bds_num)) + buf_desc = wqe_combo->bds_sec2; + + i++; + dma_info[i].dma = skb_frag_dma_map(&pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_info[i].dma)) { + i--; + err = -EFAULT; + goto err_frag_map; + } + dma_info[i].len = skb_frag_size(frag); + + hinic3_set_buf_desc(buf_desc, dma_info[i].dma, + dma_info[i].len); + buf_desc++; + } + + return 0; + +err_frag_map: + for (j = 0; j < i;) { + j++; + dma_unmap_page(&pdev->dev, dma_info[j].dma, + dma_info[j].len, DMA_TO_DEVICE); + } + dma_unmap_single(&pdev->dev, dma_info[0].dma, dma_info[0].len, + DMA_TO_DEVICE); + return err; +} + +static void hinic3_tx_unmap_skb(struct net_device *netdev, + struct sk_buff *skb, + struct hinic3_dma_info *dma_info) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + struct pci_dev *pdev = nic_dev->pdev; + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags;) { + i++; + dma_unmap_page(&pdev->dev, + dma_info[i].dma, + dma_info[i].len, DMA_TO_DEVICE); + } + + dma_unmap_single(&pdev->dev, dma_info[0].dma, + dma_info[0].len, DMA_TO_DEVICE); +} + +static void hinic3_tx_free_skb(struct net_device *netdev, + struct hinic3_tx_info *tx_info) +{ + hinic3_tx_unmap_skb(netdev, tx_info->skb, tx_info->dma_info); + dev_kfree_skb_any(tx_info->skb); + tx_info->skb = NULL; +} + +union hinic3_ip { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union hinic3_l4 { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +enum hinic3_l3_type { + HINIC3_L3_UNKNOWN = 0, + HINIC3_L3_IP6_PKT = 1, + HINIC3_L3_IP4_PKT_NO_CSUM = 2, + HINIC3_L3_IP4_PKT_CSUM = 3, +}; + +enum hinic3_l4_offload_type { + HINIC3_L4_OFFLOAD_DISABLE = 0, + HINIC3_L4_OFFLOAD_TCP = 1, + HINIC3_L4_OFFLOAD_STCP = 2, + HINIC3_L4_OFFLOAD_UDP = 3, +}; + +/* initialize l4 offset and offload */ +static void get_inner_l4_info(struct sk_buff *skb, union hinic3_l4 *l4, + u8 l4_proto, u32 *offset, + enum hinic3_l4_offload_type *l4_offload) +{ + switch (l4_proto) { + case IPPROTO_TCP: + *l4_offload = HINIC3_L4_OFFLOAD_TCP; + /* To be same with TSO, payload offset begins from payload */ + *offset = (l4->tcp->doff << TCP_HDR_DATA_OFF_UNIT_SHIFT) + + TRANSPORT_OFFSET(l4->hdr, skb); + break; + + case IPPROTO_UDP: + *l4_offload = HINIC3_L4_OFFLOAD_UDP; + *offset = TRANSPORT_OFFSET(l4->hdr, skb); + break; + default: + *l4_offload = HINIC3_L4_OFFLOAD_DISABLE; + *offset = 0; + } +} + +static int hinic3_tx_csum(struct hinic3_txq *txq, struct hinic3_sq_task *task, + struct sk_buff *skb) +{ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (skb->encapsulation) { + union hinic3_ip ip; + u8 l4_proto; + + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, TUNNEL_FLAG); + + ip.hdr = skb_network_header(skb); + if (ip.v4->version == 4) { + l4_proto = ip.v4->protocol; + } else if (ip.v4->version == 6) { + union hinic3_l4 l4; + unsigned char *exthdr; + __be16 frag_off; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + l4.hdr = skb_transport_header(skb); + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + } else { + l4_proto = IPPROTO_RAW; + } + + if (l4_proto != IPPROTO_UDP || + ((struct udphdr *)skb_transport_header(skb))->dest != VXLAN_OFFLOAD_PORT_LE) { + /* Unsupported tunnel packet, disable csum offload */ + skb_checksum_help(skb); + return 0; + } + } + + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); + + return 1; +} + +static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic3_ip *ip, + union hinic3_l4 *l4, + enum hinic3_l3_type *l3_type, u8 *l4_proto) +{ + unsigned char *exthdr; + __be16 frag_off; + + if (ip->v4->version == 4) { + *l3_type = HINIC3_L3_IP4_PKT_CSUM; + *l4_proto = ip->v4->protocol; + } else if (ip->v4->version == 6) { + *l3_type = HINIC3_L3_IP6_PKT; + exthdr = ip->hdr + sizeof(*ip->v6); + *l4_proto = ip->v6->nexthdr; + if (exthdr != l4->hdr) { + ipv6_skip_exthdr(skb, exthdr - skb->data, + l4_proto, &frag_off); + } + } else { + *l3_type = HINIC3_L3_UNKNOWN; + *l4_proto = 0; + } +} + +static void hinic3_set_tso_info(struct hinic3_sq_task *task, u32 *queue_info, + enum hinic3_l4_offload_type l4_offload, + u32 offset, u32 mss) +{ + if (l4_offload == HINIC3_L4_OFFLOAD_TCP) { + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, TSO); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); + } else if (l4_offload == HINIC3_L4_OFFLOAD_UDP) { + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UFO); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L4_EN); + } + + /* enable L3 calculation */ + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, INNER_L3_EN); + + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(offset >> 1, PLDOFF); + + /* set MSS value */ + *queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK; + *queue_info |= SQ_CTRL_QUEUE_INFO_SET(mss, MSS); +} + +static __sum16 csum_magic(union hinic3_ip *ip, unsigned short proto) +{ + return (ip->v4->version == 4) ? + csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : + csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); +} + +static int hinic3_tso(struct hinic3_sq_task *task, u32 *queue_info, + struct sk_buff *skb) +{ + enum hinic3_l4_offload_type l4_offload; + enum hinic3_l3_type l3_type; + union hinic3_ip ip; + union hinic3_l4 l4; + u8 l4_proto; + u32 offset; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + if (skb->encapsulation) { + u32 gso_type = skb_shinfo(skb)->gso_type; + /* L3 checksum is always enabled */ + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, OUT_L3_EN); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, TUNNEL_FLAG); + + l4.hdr = skb_transport_header(skb); + ip.hdr = skb_network_header(skb); + + if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); + task->pkt_info0 |= SQ_TASK_INFO0_SET(1U, OUT_L4_EN); + } + + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + } else { + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + } + + get_inner_l3_l4_type(skb, &ip, &l4, &l3_type, &l4_proto); + + if (l4_proto == IPPROTO_TCP) + l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); + + get_inner_l4_info(skb, &l4, l4_proto, &offset, &l4_offload); + + hinic3_set_tso_info(task, queue_info, l4_offload, offset, + skb_shinfo(skb)->gso_size); + + return 1; +} + +static void hinic3_set_vlan_tx_offload(struct hinic3_sq_task *task, + u16 vlan_tag, u8 vlan_tpid) +{ + /* vlan_tpid: 0=select TPID0 in IPSU, 1=select TPID1 in IPSU + * 2=select TPID2 in IPSU, 3=select TPID3 in IPSU, + * 4=select TPID4 in IPSU + */ + task->vlan_offload = SQ_TASK_INFO3_SET(vlan_tag, VLAN_TAG) | + SQ_TASK_INFO3_SET(vlan_tpid, VLAN_TPID) | + SQ_TASK_INFO3_SET(1U, VLAN_TAG_VALID); +} + +static u32 hinic3_tx_offload(struct sk_buff *skb, struct hinic3_sq_task *task, + u32 *queue_info, struct hinic3_txq *txq) +{ + u32 offload = 0; + int tso_cs_en; + + task->pkt_info0 = 0; + task->ip_identify = 0; + task->rsvd = 0; + task->vlan_offload = 0; + + tso_cs_en = hinic3_tso(task, queue_info, skb); + if (tso_cs_en < 0) { + offload = HINIC3_TX_OFFLOAD_INVALID; + return offload; + } else if (tso_cs_en) { + offload |= HINIC3_TX_OFFLOAD_TSO; + } else { + tso_cs_en = hinic3_tx_csum(txq, task, skb); + if (tso_cs_en) + offload |= HINIC3_TX_OFFLOAD_CSUM; + } + +#define VLAN_INSERT_MODE_MAX 5 + if (unlikely(skb_vlan_tag_present(skb))) { + /* select vlan insert mode by qid, default 802.1Q Tag type */ + hinic3_set_vlan_tx_offload(task, skb_vlan_tag_get(skb), + txq->q_id % VLAN_INSERT_MODE_MAX); + offload |= HINIC3_TX_OFFLOAD_VLAN; + } + + if (unlikely(SQ_CTRL_QUEUE_INFO_GET(*queue_info, PLDOFF) > + SQ_CTRL_MAX_PLDOFF)) { + offload = HINIC3_TX_OFFLOAD_INVALID; + return offload; + } + + return offload; +} + +static int hinic3_maybe_stop_tx(struct hinic3_txq *txq, u16 wqebb_cnt) +{ + if (likely(hinic3_wq_free_wqebbs(&txq->sq->wq) >= wqebb_cnt)) + return 0; + + /* We need to check again in a case another CPU has just + * made room available. + */ + netif_stop_subqueue(txq->netdev, txq->q_id); + + if (likely(hinic3_wq_free_wqebbs(&txq->sq->wq) < wqebb_cnt)) + return -EBUSY; + + netif_start_subqueue(txq->netdev, txq->q_id); + + return 0; +} + +static u16 hinic3_get_and_update_sq_owner(struct hinic3_io_queue *sq, + u16 curr_pi, u16 wqebb_cnt) +{ + u16 owner = sq->owner; + + if (unlikely(curr_pi + wqebb_cnt >= sq->wq.q_depth)) + sq->owner = !sq->owner; + + return owner; +} + +static u16 hinic3_set_wqe_combo(struct hinic3_txq *txq, + struct hinic3_sq_wqe_combo *wqe_combo, + u32 offload, u16 num_sge, u16 *curr_pi) +{ + struct hinic3_sq_bufdesc *second_part_wqebbs_addr; + u16 first_part_wqebbs_num, tmp_pi; + struct hinic3_sq_bufdesc *wqe; + + wqe_combo->ctrl_bd0 = hinic3_wq_get_one_wqebb(&txq->sq->wq, curr_pi); + if (!offload && num_sge == 1) { + wqe_combo->wqe_type = SQ_WQE_COMPACT_TYPE; + return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, 1); + } + + wqe_combo->wqe_type = SQ_WQE_EXTENDED_TYPE; + + if (offload) { + wqe_combo->task = hinic3_wq_get_one_wqebb(&txq->sq->wq, &tmp_pi); + wqe_combo->task_type = SQ_WQE_TASKSECT_16BYTES; + } else { + wqe_combo->task_type = SQ_WQE_TASKSECT_46BITS; + } + + if (num_sge > 1) { + /* first wqebb contain bd0, and bd size is equal to sq wqebb + * size, so we use (num_sge - 1) as wanted weqbb_cnt + */ + hinic3_wq_get_multi_wqebbs(&txq->sq->wq, num_sge - 1, &tmp_pi, + &wqe, + &second_part_wqebbs_addr, + &first_part_wqebbs_num); + wqe_combo->bds_head = wqe; + wqe_combo->bds_sec2 = second_part_wqebbs_addr; + wqe_combo->first_bds_num = first_part_wqebbs_num; + } + + return hinic3_get_and_update_sq_owner(txq->sq, *curr_pi, + num_sge + !!offload); +} + +static void hinic3_prepare_sq_ctrl(struct hinic3_sq_wqe_combo *wqe_combo, + u32 queue_info, int nr_descs, u16 owner) +{ + struct hinic3_sq_wqe_desc *wqe_desc = wqe_combo->ctrl_bd0; + + if (wqe_combo->wqe_type == SQ_WQE_COMPACT_TYPE) { + wqe_desc->ctrl_len |= + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER); + + /* compact wqe queue_info will transfer to chip */ + wqe_desc->queue_info = 0; + return; + } + + wqe_desc->ctrl_len |= SQ_CTRL_SET(nr_descs, BUFDESC_NUM) | + SQ_CTRL_SET(wqe_combo->task_type, TASKSECT_LEN) | + SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + SQ_CTRL_SET(wqe_combo->wqe_type, EXTENDED) | + SQ_CTRL_SET(owner, OWNER); + + wqe_desc->queue_info = queue_info; + wqe_desc->queue_info |= SQ_CTRL_QUEUE_INFO_SET(1U, UC); + + if (!SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS)) { + wqe_desc->queue_info |= + SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_DEFAULT, MSS); + } else if (SQ_CTRL_QUEUE_INFO_GET(wqe_desc->queue_info, MSS) < + HINIC3_TX_MSS_MIN) { + /* mss should not be less than 80 */ + wqe_desc->queue_info &= ~SQ_CTRL_QUEUE_INFO_MSS_MASK; + wqe_desc->queue_info |= + SQ_CTRL_QUEUE_INFO_SET(HINIC3_TX_MSS_MIN, MSS); + } +} + +static netdev_tx_t hinic3_send_one_skb(struct sk_buff *skb, + struct net_device *netdev, + struct hinic3_txq *txq) +{ + struct hinic3_sq_wqe_combo wqe_combo = {}; + struct hinic3_tx_info *tx_info; + u32 offload, queue_info = 0; + struct hinic3_sq_task task; + u16 wqebb_cnt, num_sge; + u16 saved_wq_prod_idx; + u16 owner, pi = 0; + u8 saved_sq_owner; + int err; + + if (unlikely(skb->len < MIN_SKB_LEN)) { + if (skb_pad(skb, MIN_SKB_LEN - skb->len)) + goto err_tx_skb_pad; + + skb->len = MIN_SKB_LEN; + } + + num_sge = skb_shinfo(skb)->nr_frags + 1; + /* assume normal wqe format + 1 wqebb for task info */ + wqebb_cnt = num_sge + 1; + if (unlikely(hinic3_maybe_stop_tx(txq, wqebb_cnt))) + return NETDEV_TX_BUSY; + + offload = hinic3_tx_offload(skb, &task, &queue_info, txq); + if (unlikely(offload == HINIC3_TX_OFFLOAD_INVALID)) { + goto tx_drop_pkts; + } else if (!offload) { + wqebb_cnt -= 1; + if (unlikely(num_sge == 1 && + skb->len > HINIC3_COMPACT_WQEE_SKB_MAX_LEN)) + goto tx_drop_pkts; + } + + saved_wq_prod_idx = txq->sq->wq.prod_idx; + saved_sq_owner = txq->sq->owner; + + owner = hinic3_set_wqe_combo(txq, &wqe_combo, offload, num_sge, &pi); + if (offload) + *wqe_combo.task = task; + + tx_info = &txq->tx_info[pi]; + tx_info->skb = skb; + tx_info->wqebb_cnt = wqebb_cnt; + + err = hinic3_tx_map_skb(netdev, skb, txq, tx_info, &wqe_combo); + if (err) { + /* Rollback work queue to reclaim the wqebb we did not use */ + txq->sq->wq.prod_idx = saved_wq_prod_idx; + txq->sq->owner = saved_sq_owner; + goto tx_drop_pkts; + } + + hinic3_prepare_sq_ctrl(&wqe_combo, queue_info, num_sge, owner); + hinic3_write_db(txq->sq, 0, DB_CFLAG_DP_SQ, + hinic3_get_sq_local_pi(txq->sq)); + + return NETDEV_TX_OK; + +tx_drop_pkts: + dev_kfree_skb_any(skb); + +err_tx_skb_pad: + return NETDEV_TX_OK; +} + +netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 q_id = skb_get_queue_mapping(skb); + struct hinic3_txq *txq; + + if (unlikely(!netif_carrier_ok(netdev))) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(q_id >= nic_dev->q_params.num_qps)) { + txq = &nic_dev->txqs[0]; + goto tx_drop_pkts; + } + txq = &nic_dev->txqs[q_id]; + + return hinic3_send_one_skb(skb, netdev, txq); + +tx_drop_pkts: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static bool is_hw_complete_sq_process(struct hinic3_io_queue *sq) +{ + u16 sw_pi, hw_ci; + + sw_pi = hinic3_get_sq_local_pi(sq); + hw_ci = hinic3_get_sq_hw_ci(sq); + + return sw_pi == hw_ci; +} + +#define HINIC3_FLUSH_QUEUE_POLL_SLEEP_US 10000 +#define HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US 10000000 +static int hinic3_stop_sq(struct hinic3_txq *txq) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(txq->netdev); + int err, rc; + + err = read_poll_timeout(hinic3_force_drop_tx_pkt, rc, + is_hw_complete_sq_process(txq->sq) || rc, + HINIC3_FLUSH_QUEUE_POLL_SLEEP_US, + HINIC3_FLUSH_QUEUE_POLL_TIMEOUT_US, + true, nic_dev->hwdev); + if (rc) + return rc; + else + return err; +} + +/* packet transmission should be stopped before calling this function */ +void hinic3_flush_txqs(struct net_device *netdev) +{ + struct hinic3_nic_dev *nic_dev = netdev_priv(netdev); + u16 qid; + int err; + + for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { + err = hinic3_stop_sq(&nic_dev->txqs[qid]); + if (err) + netdev_err(netdev, "Failed to stop sq%u\n", qid); + } +} + +#define HINIC3_BDS_PER_SQ_WQEBB \ + (HINIC3_SQ_WQEBB_SIZE / sizeof(struct hinic3_sq_bufdesc)) + +int hinic3_tx_poll(struct hinic3_txq *txq, int budget) +{ + struct net_device *netdev = txq->netdev; + u16 hw_ci, sw_ci, q_id = txq->sq->q_id; + struct hinic3_nic_dev *nic_dev; + struct hinic3_tx_info *tx_info; + u16 wqebb_cnt = 0; + int pkts = 0; + + nic_dev = netdev_priv(netdev); + hw_ci = hinic3_get_sq_hw_ci(txq->sq); + dma_rmb(); + sw_ci = hinic3_get_sq_local_ci(txq->sq); + + do { + tx_info = &txq->tx_info[sw_ci]; + + /* Did all wqebb of this wqe complete? */ + if (hw_ci == sw_ci || + ((hw_ci - sw_ci) & txq->q_mask) < tx_info->wqebb_cnt) + break; + + sw_ci = (sw_ci + tx_info->wqebb_cnt) & txq->q_mask; + net_prefetch(&txq->tx_info[sw_ci]); + + wqebb_cnt += tx_info->wqebb_cnt; + pkts++; + + hinic3_tx_free_skb(netdev, tx_info); + } while (likely(pkts < budget)); + + hinic3_wq_put_wqebbs(&txq->sq->wq, wqebb_cnt); + + if (unlikely(__netif_subqueue_stopped(netdev, q_id) && + hinic3_wq_free_wqebbs(&txq->sq->wq) >= 1 && + test_bit(HINIC3_INTF_UP, &nic_dev->flags))) { + struct netdev_queue *netdev_txq = + netdev_get_tx_queue(netdev, q_id); + + __netif_tx_lock(netdev_txq, smp_processor_id()); + /* avoid re-waking subqueue with xmit_frame */ + if (__netif_subqueue_stopped(netdev, q_id)) + netif_wake_subqueue(netdev, q_id); + + __netif_tx_unlock(netdev_txq); + } + + return pkts; +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h new file mode 100644 index 0000000000000..4f53bbfb20b7a --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_tx.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_TX_H_ +#define _HINIC3_TX_H_ + +#include +#include +#include +#include +#include + +#define VXLAN_OFFLOAD_PORT_LE cpu_to_be16(4789) +#define TCP_HDR_DATA_OFF_UNIT_SHIFT 2 +#define TRANSPORT_OFFSET(l4_hdr, skb) ((l4_hdr) - (skb)->data) + +#define HINIC3_COMPACT_WQEE_SKB_MAX_LEN 16383 + +enum sq_wqe_data_format { + SQ_NORMAL_WQE = 0, +}; + +enum sq_wqe_ec_type { + SQ_WQE_COMPACT_TYPE = 0, + SQ_WQE_EXTENDED_TYPE = 1, +}; + +enum sq_wqe_tasksect_len_type { + SQ_WQE_TASKSECT_46BITS = 0, + SQ_WQE_TASKSECT_16BYTES = 1, +}; + +enum hinic3_tx_offload_type { + HINIC3_TX_OFFLOAD_TSO = BIT(0), + HINIC3_TX_OFFLOAD_CSUM = BIT(1), + HINIC3_TX_OFFLOAD_VLAN = BIT(2), + HINIC3_TX_OFFLOAD_INVALID = BIT(3), + HINIC3_TX_OFFLOAD_ESP = BIT(4), +}; + +#define SQ_CTRL_BUFDESC_NUM_MASK GENMASK(26, 19) +#define SQ_CTRL_TASKSECT_LEN_MASK BIT(27) +#define SQ_CTRL_DATA_FORMAT_MASK BIT(28) +#define SQ_CTRL_EXTENDED_MASK BIT(30) +#define SQ_CTRL_OWNER_MASK BIT(31) +#define SQ_CTRL_SET(val, member) \ + FIELD_PREP(SQ_CTRL_##member##_MASK, val) + +#define SQ_CTRL_QUEUE_INFO_PLDOFF_MASK GENMASK(9, 2) +#define SQ_CTRL_QUEUE_INFO_UFO_MASK BIT(10) +#define SQ_CTRL_QUEUE_INFO_TSO_MASK BIT(11) +#define SQ_CTRL_QUEUE_INFO_MSS_MASK GENMASK(26, 13) +#define SQ_CTRL_QUEUE_INFO_UC_MASK BIT(28) + +#define SQ_CTRL_QUEUE_INFO_SET(val, member) \ + FIELD_PREP(SQ_CTRL_QUEUE_INFO_##member##_MASK, val) +#define SQ_CTRL_QUEUE_INFO_GET(val, member) \ + FIELD_GET(SQ_CTRL_QUEUE_INFO_##member##_MASK, val) + +#define SQ_CTRL_MAX_PLDOFF 221 + +#define SQ_TASK_INFO0_TUNNEL_FLAG_MASK BIT(19) +#define SQ_TASK_INFO0_INNER_L4_EN_MASK BIT(24) +#define SQ_TASK_INFO0_INNER_L3_EN_MASK BIT(25) +#define SQ_TASK_INFO0_OUT_L4_EN_MASK BIT(27) +#define SQ_TASK_INFO0_OUT_L3_EN_MASK BIT(28) +#define SQ_TASK_INFO0_SET(val, member) \ + FIELD_PREP(SQ_TASK_INFO0_##member##_MASK, val) + +#define SQ_TASK_INFO3_VLAN_TAG_MASK GENMASK(15, 0) +#define SQ_TASK_INFO3_VLAN_TPID_MASK GENMASK(18, 16) +#define SQ_TASK_INFO3_VLAN_TAG_VALID_MASK BIT(19) +#define SQ_TASK_INFO3_SET(val, member) \ + FIELD_PREP(SQ_TASK_INFO3_##member##_MASK, val) + +struct hinic3_sq_wqe_desc { + u32 ctrl_len; + u32 queue_info; + u32 hi_addr; + u32 lo_addr; +}; + +struct hinic3_sq_task { + u32 pkt_info0; + u32 ip_identify; + u32 rsvd; + u32 vlan_offload; +}; + +struct hinic3_sq_wqe_combo { + struct hinic3_sq_wqe_desc *ctrl_bd0; + struct hinic3_sq_task *task; + struct hinic3_sq_bufdesc *bds_head; + struct hinic3_sq_bufdesc *bds_sec2; + u16 first_bds_num; + u32 wqe_type; + u32 task_type; +}; + +struct hinic3_dma_info { + dma_addr_t dma; + u32 len; +}; + +struct hinic3_tx_info { + struct sk_buff *skb; + u16 wqebb_cnt; + struct hinic3_dma_info *dma_info; +}; + +struct hinic3_txq { + struct net_device *netdev; + struct device *dev; + + u16 q_id; + u32 q_mask; + u32 q_depth; + + struct hinic3_tx_info *tx_info; + struct hinic3_io_queue *sq; +} ____cacheline_aligned; + +int hinic3_alloc_txqs(struct net_device *netdev); +void hinic3_free_txqs(struct net_device *netdev); + +netdev_tx_t hinic3_xmit_frame(struct sk_buff *skb, struct net_device *netdev); +int hinic3_tx_poll(struct hinic3_txq *txq, int budget); +void hinic3_flush_txqs(struct net_device *netdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c new file mode 100644 index 0000000000000..66516dad16c84 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. + +#include + +#include "hinic3_hwdev.h" +#include "hinic3_wq.h" + +void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq, + u16 num_wqebbs, u16 *prod_idx, + struct hinic3_sq_bufdesc **first_wqebb, + struct hinic3_sq_bufdesc **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num) +{ + u32 idx, remaining; + + idx = wq->prod_idx & wq->idx_mask; + wq->prod_idx += num_wqebbs; + *prod_idx = idx; + *first_wqebb = get_q_element(&wq->qpages, idx, &remaining); + if (likely(remaining >= num_wqebbs)) { + *first_part_wqebbs_num = num_wqebbs; + *second_part_wqebbs_addr = NULL; + } else { + *first_part_wqebbs_num = remaining; + idx += remaining; + *second_part_wqebbs_addr = get_q_element(&wq->qpages, idx, NULL); + } +} diff --git a/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h new file mode 100644 index 0000000000000..d90097eb86924 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic3/hinic3_wq.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved. */ + +#ifndef _HINIC3_WQ_H_ +#define _HINIC3_WQ_H_ + +#include + +#include "hinic3_queue_common.h" + +struct hinic3_sq_bufdesc { + /* 31-bits Length, L2NIC only uses length[17:0] */ + u32 len; + u32 rsvd; + u32 hi_addr; + u32 lo_addr; +}; + +/* Work queue is used to submit elements (tx, rx, cmd) to hw. + * Driver is the producer that advances prod_idx. cons_idx is advanced when + * HW reports completions of previously submitted elements. + */ +struct hinic3_wq { + struct hinic3_queue_pages qpages; + /* Unmasked producer/consumer indices that are advanced to natural + * integer overflow regardless of queue depth. + */ + u16 cons_idx; + u16 prod_idx; + + u32 q_depth; + u16 idx_mask; + + /* Work Queue (logical WQEBB array) is mapped to hw via Chip Logical + * Address (CLA) using 1 of 2 levels: + * level 0 - direct mapping of single wq page + * level 1 - indirect mapping of multiple pages via additional page + * table. + * When wq uses level 1, wq_block will hold the allocated indirection + * table. + */ + dma_addr_t wq_block_paddr; + __be64 *wq_block_vaddr; +} ____cacheline_aligned; + +/* Get number of elements in work queue that are in-use. */ +static inline u16 hinic3_wq_get_used(const struct hinic3_wq *wq) +{ + return wq->prod_idx - wq->cons_idx; +} + +static inline u16 hinic3_wq_free_wqebbs(struct hinic3_wq *wq) +{ + /* Don't allow queue to become completely full, report (free - 1). */ + return wq->q_depth - hinic3_wq_get_used(wq) - 1; +} + +static inline void *hinic3_wq_get_one_wqebb(struct hinic3_wq *wq, u16 *pi) +{ + *pi = wq->prod_idx & wq->idx_mask; + wq->prod_idx++; + return get_q_element(&wq->qpages, *pi, NULL); +} + +static inline void hinic3_wq_put_wqebbs(struct hinic3_wq *wq, u16 num_wqebbs) +{ + wq->cons_idx += num_wqebbs; +} + +void hinic3_wq_get_multi_wqebbs(struct hinic3_wq *wq, + u16 num_wqebbs, u16 *prod_idx, + struct hinic3_sq_bufdesc **first_wqebb, + struct hinic3_sq_bufdesc **second_part_wqebbs_addr, + u16 *first_part_wqebbs_num); + +#endif diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 25b8a35560042..417dfa18daae3 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2554,17 +2554,12 @@ static int emac_dt_mdio_probe(struct emac_instance *dev) struct mii_bus *bus; int res; - mii_np = of_get_child_by_name(dev->ofdev->dev.of_node, "mdio"); + mii_np = of_get_available_child_by_name(dev->ofdev->dev.of_node, "mdio"); if (!mii_np) { dev_err(&dev->ofdev->dev, "no mdio definition found."); return -ENODEV; } - if (!of_device_is_available(mii_np)) { - res = -ENODEV; - goto put_node; - } - bus = devm_mdiobus_alloc(&dev->ofdev->dev); if (!bus) { res = -ENOMEM; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 0676fc547b6f4..480606d1245ea 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4829,6 +4829,18 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter, strscpy(vlcd->name, adapter->netdev->name, len); } +static void ibmvnic_print_hex_dump(struct net_device *dev, void *buf, + size_t len) +{ + unsigned char hex_str[16 * 3]; + + for (size_t i = 0; i < len; i += 16) { + hex_dump_to_buffer((unsigned char *)buf + i, len - i, 16, 8, + hex_str, sizeof(hex_str), false); + netdev_dbg(dev, "%s\n", hex_str); + } +} + static int send_login(struct ibmvnic_adapter *adapter) { struct ibmvnic_login_rsp_buffer *login_rsp_buffer; @@ -4939,10 +4951,8 @@ static int send_login(struct ibmvnic_adapter *adapter) vnic_add_client_data(adapter, vlcd); netdev_dbg(adapter->netdev, "Login Buffer:\n"); - for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { - netdev_dbg(adapter->netdev, "%016lx\n", - ((unsigned long *)(adapter->login_buf))[i]); - } + ibmvnic_print_hex_dump(adapter->netdev, adapter->login_buf, + adapter->login_buf_sz); memset(&crq, 0, sizeof(crq)); crq.login.first = IBMVNIC_CRQ_CMD; @@ -5319,15 +5329,13 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; - int i; dma_unmap_single(dev, adapter->ip_offload_tok, sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE); netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); - for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) - netdev_dbg(adapter->netdev, "%016lx\n", - ((unsigned long *)(buf))[i]); + ibmvnic_print_hex_dump(adapter->netdev, buf, + sizeof(adapter->ip_offload_buf)); netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); @@ -5558,10 +5566,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, netdev->mtu = adapter->req_mtu - ETH_HLEN; netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); - for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { - netdev_dbg(adapter->netdev, "%016lx\n", - ((unsigned long *)(adapter->login_rsp_buf))[i]); - } + ibmvnic_print_hex_dump(netdev, adapter->login_rsp_buf, + adapter->login_rsp_buf_sz); /* Sanity checks */ if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs || diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 24ec9a4f1ffa8..1640d2f278330 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -264,6 +264,7 @@ config I40EVF tristate "Intel(R) Ethernet Adaptive Virtual Function support" select IAVF depends on PCI_MSI + depends on PTP_1588_CLOCK_OPTIONAL help This driver supports virtual functions for Intel XL710, X710, X722, XXV710, and all devices advertising support for @@ -336,7 +337,7 @@ config ICE_SWITCHDEV config ICE_HWTS bool "Support HW cross-timestamp on platforms with PTM support" default y - depends on ICE && X86 + depends on ICE && X86 && PCIE_PTM help Say Y to enable hardware supported cross-timestamping on platforms with PCIe PTM support. The cross-timestamp is available through diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index d7df2a0ed6290..44249dd91bd67 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -331,8 +331,21 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, } /* replace the entire MTA table */ - for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) { E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + + if (IS_ENABLED(CONFIG_PREEMPT_RT)) { + /* + * Do not queue up too many posted writes to prevent + * increased latency for other devices on the + * interconnect. Flush after each 8th posted write, + * to keep additional execution time low while still + * preventing increased latency. + */ + if (!(i % 8) && i) + e1e_flush(); + } + } e1e_flush(); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index e28f1905a4a0f..9f47388eaba53 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -2,6 +2,7 @@ /* Copyright(c) 2018 Intel Corporation. */ #include +#include #include #include "i40e_txrx_common.h" #include "i40e_xsk.h" @@ -529,7 +530,8 @@ static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *des dma_addr_t dma; u32 i; - loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { + unrolled_count(PKTS_PER_BATCH) + for (i = 0; i < PKTS_PER_BATCH; i++) { u32 cmd = I40E_TX_DESC_CMD_ICRC | xsk_is_eop_desc(&desc[i]); dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index ef156fad52f26..dd16351a7af84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -6,7 +6,7 @@ #include -/* This value should match the pragma in the loop_unrolled_for +/* This value should match the pragma in the unrolled_count() * macro. Why 4? It is strictly empirical. It seems to be a good * compromise between the advantage of having simultaneous outstanding * reads to the DMA array that can hide each others latency and the @@ -14,14 +14,6 @@ */ #define PKTS_PER_BATCH 4 -#ifdef __clang__ -#define loop_unrolled_for _Pragma("clang loop unroll_count(4)") for -#elif __GNUC__ >= 8 -#define loop_unrolled_for _Pragma("GCC unroll 4") for -#else -#define loop_unrolled_for for -#endif - struct i40e_ring; struct i40e_vsi; struct net_device; diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile index 356ac9faa5bf9..e13720a728ff3 100644 --- a/drivers/net/ethernet/intel/iavf/Makefile +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -13,3 +13,5 @@ obj-$(CONFIG_IAVF) += iavf.o iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o + +iavf-$(CONFIG_PTP_1588_CLOCK) += iavf_ptp.o diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 532a0a595fe8e..9de3e0ba37316 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -41,6 +41,7 @@ #include "iavf_txrx.h" #include "iavf_fdir.h" #include "iavf_adv_rss.h" +#include "iavf_types.h" #include #define DEFAULT_DEBUG_LEVEL_SHIFT 3 @@ -82,7 +83,7 @@ struct iavf_vsi { #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) -#define IAVF_RX_DESC(R, i) (&(((union iavf_32byte_rx_desc *)((R)->desc))[i])) +#define IAVF_RX_DESC(R, i) (&(((struct iavf_rx_desc *)((R)->desc))[i])) #define IAVF_TX_DESC(R, i) (&(((struct iavf_tx_desc *)((R)->desc))[i])) #define IAVF_TX_CTXTDESC(R, i) \ (&(((struct iavf_tx_context_desc *)((R)->desc))[i])) @@ -271,6 +272,7 @@ struct iavf_adapter { /* Lock to protect accesses to MAC and VLAN lists */ spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; + u8 rxdid; int num_active_queues; int num_req_queues; @@ -343,6 +345,17 @@ struct iavf_adapter { #define IAVF_FLAG_AQ_CONFIGURE_QUEUES_BW BIT_ULL(39) #define IAVF_FLAG_AQ_CFG_QUEUES_QUANTA_SIZE BIT_ULL(40) #define IAVF_FLAG_AQ_GET_QOS_CAPS BIT_ULL(41) +#define IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS BIT_ULL(42) +#define IAVF_FLAG_AQ_GET_PTP_CAPS BIT_ULL(43) +#define IAVF_FLAG_AQ_SEND_PTP_CMD BIT_ULL(44) + + /* AQ messages that must be sent after IAVF_FLAG_AQ_GET_CONFIG, in + * order to negotiated extended capabilities. + */ +#define IAVF_FLAG_AQ_EXTENDED_CAPS \ + (IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS | \ + IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS | \ + IAVF_FLAG_AQ_GET_PTP_CAPS) /* flags for processing extended capability messages during * __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires @@ -354,10 +367,18 @@ struct iavf_adapter { u64 extended_caps; #define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0) #define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1) +#define IAVF_EXTENDED_CAP_SEND_RXDID BIT_ULL(2) +#define IAVF_EXTENDED_CAP_RECV_RXDID BIT_ULL(3) +#define IAVF_EXTENDED_CAP_SEND_PTP BIT_ULL(4) +#define IAVF_EXTENDED_CAP_RECV_PTP BIT_ULL(5) #define IAVF_EXTENDED_CAPS \ (IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \ - IAVF_EXTENDED_CAP_RECV_VLAN_V2) + IAVF_EXTENDED_CAP_RECV_VLAN_V2 | \ + IAVF_EXTENDED_CAP_SEND_RXDID | \ + IAVF_EXTENDED_CAP_RECV_RXDID | \ + IAVF_EXTENDED_CAP_SEND_PTP | \ + IAVF_EXTENDED_CAP_RECV_PTP) /* Lock to prevent possible clobbering of * current_netdev_promisc_flags @@ -417,12 +438,18 @@ struct iavf_adapter { VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) #define QOS_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \ VIRTCHNL_VF_OFFLOAD_QOS) +#define IAVF_RXDID_ALLOWED(a) \ + ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) +#define IAVF_PTP_ALLOWED(a) \ + ((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ struct virtchnl_version_info pf_version; #define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \ ((_a)->pf_version.minor == 1)) struct virtchnl_vlan_caps vlan_v2_caps; + u64 supp_rxdids; + struct iavf_ptp ptp; u16 msg_enable; struct iavf_eth_stats current_stats; struct virtchnl_qos_cap_list *qos_caps; @@ -555,6 +582,10 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter); int iavf_get_vf_config(struct iavf_adapter *adapter); int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter); int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter); +int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter); +int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter); +int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter); +int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter); void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter); u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter); void iavf_irq_enable(struct iavf_adapter *adapter, bool flush); diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 74a1e9fe18212..288bb5b2e72ef 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1808,7 +1808,7 @@ static int iavf_set_rxfh(struct net_device *netdev, static const struct ethtool_ops iavf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE, - .cap_rss_sym_xor_supported = true, + .supported_input_xfrm = RXH_XFRM_SYM_XOR, .get_drvinfo = iavf_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = iavf_get_ringparam, diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 852e5b62f0a5d..6d7ba4d67a193 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2,8 +2,10 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include +#include #include "iavf.h" +#include "iavf_ptp.h" #include "iavf_prototype.h" /* All iavf tracepoints are defined by the include below, which must * be included exactly once across the whole kernel with @@ -709,6 +711,47 @@ static void iavf_configure_tx(struct iavf_adapter *adapter) adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i); } +/** + * iavf_select_rx_desc_format - Select Rx descriptor format + * @adapter: adapter private structure + * + * Select what Rx descriptor format based on availability and enabled + * features. + * + * Return: the desired RXDID to select for a given Rx queue, as defined by + * enum virtchnl_rxdid_format. + */ +static u8 iavf_select_rx_desc_format(const struct iavf_adapter *adapter) +{ + u64 rxdids = adapter->supp_rxdids; + + /* If we did not negotiate VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC, we must + * stick with the default value of the legacy 32 byte format. + */ + if (!IAVF_RXDID_ALLOWED(adapter)) + return VIRTCHNL_RXDID_1_32B_BASE; + + /* Rx timestamping requires the use of flexible NIC descriptors */ + if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) { + if (rxdids & BIT(VIRTCHNL_RXDID_2_FLEX_SQ_NIC)) + return VIRTCHNL_RXDID_2_FLEX_SQ_NIC; + + pci_warn(adapter->pdev, + "Unable to negotiate flexible descriptor format\n"); + } + + /* Warn if the PF does not list support for the default legacy + * descriptor format. This shouldn't happen, as this is the format + * used if VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is not supported. It is + * likely caused by a bug in the PF implementation failing to indicate + * support for the format. + */ + if (!(rxdids & VIRTCHNL_RXDID_1_32B_BASE_M)) + netdev_warn(adapter->netdev, "PF does not list support for default Rx descriptor format\n"); + + return VIRTCHNL_RXDID_1_32B_BASE; +} + /** * iavf_configure_rx - Configure Receive Unit after Reset * @adapter: board private structure @@ -719,8 +762,12 @@ static void iavf_configure_rx(struct iavf_adapter *adapter) { struct iavf_hw *hw = &adapter->hw; - for (u32 i = 0; i < adapter->num_active_queues; i++) + adapter->rxdid = iavf_select_rx_desc_format(adapter); + + for (u32 i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i); + adapter->rx_rings[i].rxdid = adapter->rxdid; + } } /** @@ -1983,7 +2030,7 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool runni static void iavf_finish_config(struct work_struct *work) { struct iavf_adapter *adapter; - bool netdev_released = false; + bool locks_released = false; int pairs, err; adapter = container_of(work, struct iavf_adapter, finish_config); @@ -2012,19 +2059,22 @@ static void iavf_finish_config(struct work_struct *work) netif_set_real_num_tx_queues(adapter->netdev, pairs); if (adapter->netdev->reg_state != NETREG_REGISTERED) { + mutex_unlock(&adapter->crit_lock); netdev_unlock(adapter->netdev); - netdev_released = true; + locks_released = true; err = register_netdevice(adapter->netdev); if (err) { dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n", err); /* go back and try again.*/ + mutex_lock(&adapter->crit_lock); iavf_free_rss(adapter); iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER); + mutex_unlock(&adapter->crit_lock); goto out; } } @@ -2040,9 +2090,10 @@ static void iavf_finish_config(struct work_struct *work) } out: - mutex_unlock(&adapter->crit_lock); - if (!netdev_released) + if (!locks_released) { + mutex_unlock(&adapter->crit_lock); netdev_unlock(adapter->netdev); + } rtnl_unlock(); } @@ -2071,6 +2122,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) return iavf_send_vf_config_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS) return iavf_send_vf_offload_vlan_v2_msg(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS) + return iavf_send_vf_supported_rxdids_msg(adapter); + if (adapter->aq_required & IAVF_FLAG_AQ_GET_PTP_CAPS) + return iavf_send_vf_ptp_caps_msg(adapter); if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) { iavf_disable_queues(adapter); return 0; @@ -2235,7 +2290,10 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter) iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD); return 0; } - + if (adapter->aq_required & IAVF_FLAG_AQ_SEND_PTP_CMD) { + iavf_virtchnl_send_ptp_cmd(adapter); + return IAVF_SUCCESS; + } if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) { iavf_request_stats(adapter); return 0; @@ -2599,6 +2657,112 @@ static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter) iavf_change_state(adapter, __IAVF_INIT_FAILED); } +/** + * iavf_init_send_supported_rxdids - part of querying for supported RXDID + * formats + * @adapter: board private structure + * + * Function processes send of the request for supported RXDIDs to the PF. + * Must clear IAVF_EXTENDED_CAP_RECV_RXDID if the message is not sent, e.g. + * due to the PF not negotiating VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC. + */ +static void iavf_init_send_supported_rxdids(struct iavf_adapter *adapter) +{ + int ret; + + ret = iavf_send_vf_supported_rxdids_msg(adapter); + if (ret == -EOPNOTSUPP) { + /* PF does not support VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC. In this + * case, we did not send the capability exchange message and + * do not expect a response. + */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID; + } + + /* We sent the message, so move on to the next step */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_RXDID; +} + +/** + * iavf_init_recv_supported_rxdids - part of querying for supported RXDID + * formats + * @adapter: board private structure + * + * Function processes receipt of the supported RXDIDs message from the PF. + **/ +static void iavf_init_recv_supported_rxdids(struct iavf_adapter *adapter) +{ + int ret; + + memset(&adapter->supp_rxdids, 0, sizeof(adapter->supp_rxdids)); + + ret = iavf_get_vf_supported_rxdids(adapter); + if (ret) + goto err; + + /* We've processed the PF response to the + * VIRTCHNL_OP_GET_SUPPORTED_RXDIDS message we sent previously. + */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_RXDID; + return; + +err: + /* We didn't receive a reply. Make sure we try sending again when + * __IAVF_INIT_FAILED attempts to recover. + */ + adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_RXDID; + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + +/** + * iavf_init_send_ptp_caps - part of querying for extended PTP capabilities + * @adapter: board private structure + * + * Function processes send of the request for 1588 PTP capabilities to the PF. + * Must clear IAVF_EXTENDED_CAP_SEND_PTP if the message is not sent, e.g. + * due to the PF not negotiating VIRTCHNL_VF_PTP_CAP + */ +static void iavf_init_send_ptp_caps(struct iavf_adapter *adapter) +{ + if (iavf_send_vf_ptp_caps_msg(adapter) == -EOPNOTSUPP) { + /* PF does not support VIRTCHNL_VF_PTP_CAP. In this case, we + * did not send the capability exchange message and do not + * expect a response. + */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP; + } + + /* We sent the message, so move on to the next step */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_PTP; +} + +/** + * iavf_init_recv_ptp_caps - part of querying for supported PTP capabilities + * @adapter: board private structure + * + * Function processes receipt of the PTP capabilities supported on this VF. + **/ +static void iavf_init_recv_ptp_caps(struct iavf_adapter *adapter) +{ + memset(&adapter->ptp.hw_caps, 0, sizeof(adapter->ptp.hw_caps)); + + if (iavf_get_vf_ptp_caps(adapter)) + goto err; + + /* We've processed the PF response to the VIRTCHNL_OP_1588_PTP_GET_CAPS + * message we sent previously. + */ + adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_PTP; + return; + +err: + /* We didn't receive a reply. Make sure we try sending again when + * __IAVF_INIT_FAILED attempts to recover. + */ + adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_PTP; + iavf_change_state(adapter, __IAVF_INIT_FAILED); +} + /** * iavf_init_process_extended_caps - Part of driver startup * @adapter: board private structure @@ -2623,6 +2787,24 @@ static void iavf_init_process_extended_caps(struct iavf_adapter *adapter) return; } + /* Process capability exchange for RXDID formats */ + if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_RXDID) { + iavf_init_send_supported_rxdids(adapter); + return; + } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_RXDID) { + iavf_init_recv_supported_rxdids(adapter); + return; + } + + /* Process capability exchange for PTP features */ + if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_PTP) { + iavf_init_send_ptp_caps(adapter); + return; + } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_PTP) { + iavf_init_recv_ptp_caps(adapter); + return; + } + /* When we reach here, no further extended capabilities exchanges are * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER */ @@ -2714,6 +2896,9 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) if (QOS_ALLOWED(adapter)) adapter->aq_required |= IAVF_FLAG_AQ_GET_QOS_CAPS; + /* Setup initial PTP configuration */ + iavf_ptp_init(adapter); + iavf_schedule_finish_config(adapter); return; @@ -3139,15 +3324,18 @@ static void iavf_reset_task(struct work_struct *work) } adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; - /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been - * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here, - * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until - * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have - * been successfully sent and negotiated - */ - adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS; adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; + /* Certain capabilities require an extended negotiation process using + * extra messages that must be processed after getting the VF + * configuration. The related checks such as VLAN_V2_ALLOWED() are not + * reliable here, since the configuration has not yet been negotiated. + * + * Always set these flags, since them related VIRTCHNL messages won't + * be sent until after VIRTCHNL_OP_GET_VF_RESOURCES. + */ + adapter->aq_required |= IAVF_FLAG_AQ_EXTENDED_CAPS; + spin_lock_bh(&adapter->mac_vlan_list_lock); /* Delete filter for the current MAC address, it could have @@ -3707,10 +3895,8 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data) if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) return 0; - netdev_lock(netdev); netif_set_real_num_rx_queues(netdev, total_qps); netif_set_real_num_tx_queues(netdev, total_qps); - netdev_unlock(netdev); return ret; } @@ -4375,22 +4561,21 @@ static int iavf_open(struct net_device *netdev) struct iavf_adapter *adapter = netdev_priv(netdev); int err; + netdev_assert_locked(netdev); + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) { dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); return -EIO; } - netdev_lock(netdev); while (!mutex_trylock(&adapter->crit_lock)) { /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock * is already taken and iavf_open is called from an upper * device's notifier reacting on NETDEV_REGISTER event. * We have to leave here to avoid dead lock. */ - if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) { - netdev_unlock(netdev); + if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER) return -EBUSY; - } usleep_range(500, 1000); } @@ -4439,7 +4624,6 @@ static int iavf_open(struct net_device *netdev) iavf_irq_enable(adapter, true); mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); return 0; @@ -4452,7 +4636,6 @@ static int iavf_open(struct net_device *netdev) iavf_free_all_tx_resources(adapter); err_unlock: mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); return err; } @@ -4474,12 +4657,12 @@ static int iavf_close(struct net_device *netdev) u64 aq_to_restore; int status; - netdev_lock(netdev); + netdev_assert_locked(netdev); + mutex_lock(&adapter->crit_lock); if (adapter->state <= __IAVF_DOWN_PENDING) { mutex_unlock(&adapter->crit_lock); - netdev_unlock(netdev); return 0; } @@ -4532,6 +4715,7 @@ static int iavf_close(struct net_device *netdev) if (!status) netdev_warn(netdev, "Device resources not yet released\n"); + netdev_lock(netdev); mutex_lock(&adapter->crit_lock); adapter->aq_required |= aq_to_restore; mutex_unlock(&adapter->crit_lock); @@ -4996,6 +5180,25 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, return iavf_fix_strip_features(adapter, features); } +static int iavf_hwstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + *config = adapter->ptp.hwtstamp_config; + + return 0; +} + +static int iavf_hwstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct iavf_adapter *adapter = netdev_priv(netdev); + + return iavf_ptp_set_ts_config(adapter, config, extack); +} + static int iavf_verify_shaper(struct net_shaper_binding *binding, const struct net_shaper *shaper, @@ -5104,6 +5307,8 @@ static const struct net_device_ops iavf_netdev_ops = { .ndo_set_features = iavf_set_features, .ndo_setup_tc = iavf_setup_tc, .net_shaper_ops = &iavf_shaper_ops, + .ndo_hwtstamp_get = iavf_hwstamp_get, + .ndo_hwtstamp_set = iavf_hwstamp_set, }; /** @@ -5358,6 +5563,10 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup the wait queue for indicating virtchannel events */ init_waitqueue_head(&adapter->vc_waitqueue); + INIT_LIST_HEAD(&adapter->ptp.aq_cmds); + init_waitqueue_head(&adapter->ptp.phc_time_waitqueue); + mutex_init(&adapter->ptp.aq_cmd_lock); + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); /* Initialization goes on in the work. Do not add more of it below. */ @@ -5514,6 +5723,8 @@ static void iavf_remove(struct pci_dev *pdev) msleep(50); } + iavf_ptp_release(adapter); + iavf_misc_irq_disable(adapter); /* Shut down all the garbage mashers on the detention level */ cancel_work_sync(&adapter->reset_task); diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.c b/drivers/net/ethernet/intel/iavf/iavf_ptp.c new file mode 100644 index 0000000000000..b4d5eda2e84fc --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Intel Corporation. */ + +#include "iavf.h" +#include "iavf_ptp.h" + +#define iavf_clock_to_adapter(info) \ + container_of_const(info, struct iavf_adapter, ptp.info) + +/** + * iavf_ptp_disable_rx_tstamp - Disable timestamping in Rx rings + * @adapter: private adapter structure + * + * Disable timestamp reporting for all Rx rings. + */ +static void iavf_ptp_disable_rx_tstamp(struct iavf_adapter *adapter) +{ + for (u32 i = 0; i < adapter->num_active_queues; i++) + adapter->rx_rings[i].flags &= ~IAVF_TXRX_FLAGS_HW_TSTAMP; +} + +/** + * iavf_ptp_enable_rx_tstamp - Enable timestamping in Rx rings + * @adapter: private adapter structure + * + * Enable timestamp reporting for all Rx rings. + */ +static void iavf_ptp_enable_rx_tstamp(struct iavf_adapter *adapter) +{ + for (u32 i = 0; i < adapter->num_active_queues; i++) + adapter->rx_rings[i].flags |= IAVF_TXRX_FLAGS_HW_TSTAMP; +} + +/** + * iavf_ptp_set_timestamp_mode - Set device timestamping mode + * @adapter: private adapter structure + * @config: pointer to kernel_hwtstamp_config + * + * Set the timestamping mode requested from the userspace. + * + * Note: this function always translates Rx timestamp requests for any packet + * category into HWTSTAMP_FILTER_ALL. + * + * Return: 0 on success, negative error code otherwise. + */ +static int iavf_ptp_set_timestamp_mode(struct iavf_adapter *adapter, + struct kernel_hwtstamp_config *config) +{ + /* Reserved for future extensions. */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + break; + case HWTSTAMP_TX_ON: + return -EOPNOTSUPP; + default: + return -ERANGE; + } + + if (config->rx_filter == HWTSTAMP_FILTER_NONE) { + iavf_ptp_disable_rx_tstamp(adapter); + return 0; + } else if (config->rx_filter > HWTSTAMP_FILTER_NTP_ALL) { + return -ERANGE; + } else if (!(iavf_ptp_cap_supported(adapter, + VIRTCHNL_1588_PTP_CAP_RX_TSTAMP))) { + return -EOPNOTSUPP; + } + + config->rx_filter = HWTSTAMP_FILTER_ALL; + iavf_ptp_enable_rx_tstamp(adapter); + + return 0; +} + +/** + * iavf_ptp_set_ts_config - Set timestamping configuration + * @adapter: private adapter structure + * @config: pointer to kernel_hwtstamp_config structure + * @extack: pointer to netlink_ext_ack structure + * + * Program the requested timestamping configuration to the device. + * + * Return: 0 on success, negative error code otherwise. + */ +int iavf_ptp_set_ts_config(struct iavf_adapter *adapter, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + int err; + + err = iavf_ptp_set_timestamp_mode(adapter, config); + if (err) + return err; + + /* Save successful settings for future reference */ + adapter->ptp.hwtstamp_config = *config; + + return 0; +} + +/** + * iavf_ptp_cap_supported - Check if a PTP capability is supported + * @adapter: private adapter structure + * @cap: the capability bitmask to check + * + * Return: true if every capability set in cap is also set in the enabled + * capabilities reported by the PF, false otherwise. + */ +bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap) +{ + if (!IAVF_PTP_ALLOWED(adapter)) + return false; + + /* Only return true if every bit in cap is set in hw_caps.caps */ + return (adapter->ptp.hw_caps.caps & cap) == cap; +} + +/** + * iavf_allocate_ptp_cmd - Allocate a PTP command message structure + * @v_opcode: the virtchnl opcode + * @msglen: length in bytes of the associated virtchnl structure + * + * Allocates a PTP command message and pre-fills it with the provided message + * length and opcode. + * + * Return: allocated PTP command. + */ +static struct iavf_ptp_aq_cmd *iavf_allocate_ptp_cmd(enum virtchnl_ops v_opcode, + u16 msglen) +{ + struct iavf_ptp_aq_cmd *cmd; + + cmd = kzalloc(struct_size(cmd, msg, msglen), GFP_KERNEL); + if (!cmd) + return NULL; + + cmd->v_opcode = v_opcode; + cmd->msglen = msglen; + + return cmd; +} + +/** + * iavf_queue_ptp_cmd - Queue PTP command for sending over virtchnl + * @adapter: private adapter structure + * @cmd: the command structure to send + * + * Queue the given command structure into the PTP virtchnl command queue tos + * end to the PF. + */ +static void iavf_queue_ptp_cmd(struct iavf_adapter *adapter, + struct iavf_ptp_aq_cmd *cmd) +{ + mutex_lock(&adapter->ptp.aq_cmd_lock); + list_add_tail(&cmd->list, &adapter->ptp.aq_cmds); + mutex_unlock(&adapter->ptp.aq_cmd_lock); + + adapter->aq_required |= IAVF_FLAG_AQ_SEND_PTP_CMD; + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); +} + +/** + * iavf_send_phc_read - Send request to read PHC time + * @adapter: private adapter structure + * + * Send a request to obtain the PTP hardware clock time. This allocates the + * VIRTCHNL_OP_1588_PTP_GET_TIME message and queues it up to send to + * indirectly read the PHC time. + * + * This function does not wait for the reply from the PF. + * + * Return: 0 if success, error code otherwise. + */ +static int iavf_send_phc_read(struct iavf_adapter *adapter) +{ + struct iavf_ptp_aq_cmd *cmd; + + if (!adapter->ptp.clock) + return -EOPNOTSUPP; + + cmd = iavf_allocate_ptp_cmd(VIRTCHNL_OP_1588_PTP_GET_TIME, + sizeof(struct virtchnl_phc_time)); + if (!cmd) + return -ENOMEM; + + iavf_queue_ptp_cmd(adapter, cmd); + + return 0; +} + +/** + * iavf_read_phc_indirect - Indirectly read the PHC time via virtchnl + * @adapter: private adapter structure + * @ts: storage for the timestamp value + * @sts: system timestamp values before and after the read + * + * Used when the device does not have direct register access to the PHC time. + * Indirectly reads the time via the VIRTCHNL_OP_1588_PTP_GET_TIME, and waits + * for the reply from the PF. + * + * Based on some simple measurements using ftrace and phc2sys, this clock + * access method has about a ~110 usec latency even when the system is not + * under load. In order to achieve acceptable results when using phc2sys with + * the indirect clock access method, it is recommended to use more + * conservative proportional and integration constants with the P/I servo. + * + * Return: 0 if success, error code otherwise. + */ +static int iavf_read_phc_indirect(struct iavf_adapter *adapter, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + long ret; + int err; + + adapter->ptp.phc_time_ready = false; + + ptp_read_system_prets(sts); + + err = iavf_send_phc_read(adapter); + if (err) + return err; + + ret = wait_event_interruptible_timeout(adapter->ptp.phc_time_waitqueue, + adapter->ptp.phc_time_ready, + HZ); + + ptp_read_system_postts(sts); + + if (ret < 0) + return ret; + else if (!ret) + return -EBUSY; + + *ts = ns_to_timespec64(adapter->ptp.cached_phc_time); + + return 0; +} + +static int iavf_ptp_gettimex64(struct ptp_clock_info *info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct iavf_adapter *adapter = iavf_clock_to_adapter(info); + + if (!adapter->ptp.clock) + return -EOPNOTSUPP; + + return iavf_read_phc_indirect(adapter, ts, sts); +} + +/** + * iavf_ptp_cache_phc_time - Cache PHC time for performing timestamp extension + * @adapter: private adapter structure + * + * Periodically cache the PHC time in order to allow for timestamp extension. + * This is required because the Tx and Rx timestamps only contain 32bits of + * nanoseconds. Timestamp extension allows calculating the corrected 64bit + * timestamp. This algorithm relies on the cached time being within ~1 second + * of the timestamp. + */ +static void iavf_ptp_cache_phc_time(struct iavf_adapter *adapter) +{ + if (!time_is_before_jiffies(adapter->ptp.cached_phc_updated + HZ)) + return; + + /* The response from virtchnl will store the time into + * cached_phc_time. + */ + iavf_send_phc_read(adapter); +} + +/** + * iavf_ptp_do_aux_work - Perform periodic work required for PTP support + * @info: PTP clock info structure + * + * Handler to take care of periodic work required for PTP operation. This + * includes the following tasks: + * + * 1) updating cached_phc_time + * + * cached_phc_time is used by the Tx and Rx timestamp flows in order to + * perform timestamp extension, by carefully comparing the timestamp + * 32bit nanosecond timestamps and determining the corrected 64bit + * timestamp value to report to userspace. This algorithm only works if + * the cached_phc_time is within ~1 second of the Tx or Rx timestamp + * event. This task periodically reads the PHC time and stores it, to + * ensure that timestamp extension operates correctly. + * + * Returns: time in jiffies until the periodic task should be re-scheduled. + */ +static long iavf_ptp_do_aux_work(struct ptp_clock_info *info) +{ + struct iavf_adapter *adapter = iavf_clock_to_adapter(info); + + iavf_ptp_cache_phc_time(adapter); + + /* Check work about twice a second */ + return msecs_to_jiffies(500); +} + +/** + * iavf_ptp_register_clock - Register a new PTP for userspace + * @adapter: private adapter structure + * + * Allocate and register a new PTP clock device if necessary. + * + * Return: 0 if success, error otherwise. + */ +static int iavf_ptp_register_clock(struct iavf_adapter *adapter) +{ + struct ptp_clock_info *ptp_info = &adapter->ptp.info; + struct device *dev = &adapter->pdev->dev; + struct ptp_clock *clock; + + snprintf(ptp_info->name, sizeof(ptp_info->name), "%s-%s-clk", + KBUILD_MODNAME, dev_name(dev)); + ptp_info->owner = THIS_MODULE; + ptp_info->gettimex64 = iavf_ptp_gettimex64; + ptp_info->do_aux_work = iavf_ptp_do_aux_work; + + clock = ptp_clock_register(ptp_info, dev); + if (IS_ERR(clock)) + return PTR_ERR(clock); + + adapter->ptp.clock = clock; + + dev_dbg(&adapter->pdev->dev, "PTP clock %s registered\n", + adapter->ptp.info.name); + + return 0; +} + +/** + * iavf_ptp_init - Initialize PTP support if capability was negotiated + * @adapter: private adapter structure + * + * Initialize PTP functionality, based on the capabilities that the PF has + * enabled for this VF. + */ +void iavf_ptp_init(struct iavf_adapter *adapter) +{ + int err; + + if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC)) { + pci_notice(adapter->pdev, + "Device does not have PTP clock support\n"); + return; + } + + err = iavf_ptp_register_clock(adapter); + if (err) { + pci_err(adapter->pdev, + "Failed to register PTP clock device (%p)\n", + ERR_PTR(err)); + return; + } + + for (int i = 0; i < adapter->num_active_queues; i++) { + struct iavf_ring *rx_ring = &adapter->rx_rings[i]; + + rx_ring->ptp = &adapter->ptp; + } + + ptp_schedule_worker(adapter->ptp.clock, 0); +} + +/** + * iavf_ptp_release - Disable PTP support + * @adapter: private adapter structure + * + * Release all PTP resources that were previously initialized. + */ +void iavf_ptp_release(struct iavf_adapter *adapter) +{ + struct iavf_ptp_aq_cmd *cmd, *tmp; + + if (!adapter->ptp.clock) + return; + + pci_dbg(adapter->pdev, "removing PTP clock %s\n", + adapter->ptp.info.name); + ptp_clock_unregister(adapter->ptp.clock); + adapter->ptp.clock = NULL; + + /* Cancel any remaining uncompleted PTP clock commands */ + mutex_lock(&adapter->ptp.aq_cmd_lock); + list_for_each_entry_safe(cmd, tmp, &adapter->ptp.aq_cmds, list) { + list_del(&cmd->list); + kfree(cmd); + } + adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD; + mutex_unlock(&adapter->ptp.aq_cmd_lock); + + adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + iavf_ptp_disable_rx_tstamp(adapter); +} + +/** + * iavf_ptp_process_caps - Handle change in PTP capabilities + * @adapter: private adapter structure + * + * Handle any state changes necessary due to change in PTP capabilities, such + * as after a device reset or change in configuration from the PF. + */ +void iavf_ptp_process_caps(struct iavf_adapter *adapter) +{ + bool phc = iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_READ_PHC); + + /* Check if the device gained or lost necessary access to support the + * PTP hardware clock. If so, driver must respond appropriately by + * creating or destroying the PTP clock device. + */ + if (adapter->ptp.clock && !phc) + iavf_ptp_release(adapter); + else if (!adapter->ptp.clock && phc) + iavf_ptp_init(adapter); + + /* Check if the device lost access to Rx timestamp incoming packets */ + if (!iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) { + adapter->ptp.hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + iavf_ptp_disable_rx_tstamp(adapter); + } +} + +/** + * iavf_ptp_extend_32b_timestamp - Convert a 32b nanoseconds timestamp to 64b + * nanoseconds + * @cached_phc_time: recently cached copy of PHC time + * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value + * + * Hardware captures timestamps which contain only 32 bits of nominal + * nanoseconds, as opposed to the 64bit timestamps that the stack expects. + * + * Extend the 32bit nanosecond timestamp using the following algorithm and + * assumptions: + * + * 1) have a recently cached copy of the PHC time + * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1 + * seconds) before or after the PHC time was captured. + * 3) calculate the delta between the cached time and the timestamp + * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was + * captured after the PHC time. In this case, the full timestamp is just + * the cached PHC time plus the delta. + * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the + * timestamp was captured *before* the PHC time, i.e. because the PHC + * cache was updated after the timestamp was captured by hardware. In this + * case, the full timestamp is the cached time minus the inverse delta. + * + * This algorithm works even if the PHC time was updated after a Tx timestamp + * was requested, but before the Tx timestamp event was reported from + * hardware. + * + * This calculation primarily relies on keeping the cached PHC time up to + * date. If the timestamp was captured more than 2^31 nanoseconds after the + * PHC time, it is possible that the lower 32bits of PHC time have + * overflowed more than once, and we might generate an incorrect timestamp. + * + * This is prevented by (a) periodically updating the cached PHC time once + * a second, and (b) discarding any Tx timestamp packet if it has waited for + * a timestamp for more than one second. + * + * Return: extended timestamp (to 64b). + */ +u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp) +{ + u32 low = lower_32_bits(cached_phc_time); + u32 delta = in_tstamp - low; + u64 ns; + + /* Do not assume that the in_tstamp is always more recent than the + * cached PHC time. If the delta is large, it indicates that the + * in_tstamp was taken in the past, and should be converted + * forward. + */ + if (delta > S32_MAX) + ns = cached_phc_time - (low - in_tstamp); + else + ns = cached_phc_time + delta; + + return ns; +} diff --git a/drivers/net/ethernet/intel/iavf/iavf_ptp.h b/drivers/net/ethernet/intel/iavf/iavf_ptp.h new file mode 100644 index 0000000000000..783b8f287cd95 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_ptp.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Intel Corporation. */ + +#ifndef _IAVF_PTP_H_ +#define _IAVF_PTP_H_ + +#include "iavf_types.h" + +/* bit indicating whether a 40bit timestamp is valid */ +#define IAVF_PTP_40B_TSTAMP_VALID BIT(24) + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +void iavf_ptp_init(struct iavf_adapter *adapter); +void iavf_ptp_release(struct iavf_adapter *adapter); +void iavf_ptp_process_caps(struct iavf_adapter *adapter); +bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, u32 cap); +void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter); +int iavf_ptp_set_ts_config(struct iavf_adapter *adapter, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); +u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, u32 in_tstamp); +#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +static inline void iavf_ptp_init(struct iavf_adapter *adapter) { } +static inline void iavf_ptp_release(struct iavf_adapter *adapter) { } +static inline void iavf_ptp_process_caps(struct iavf_adapter *adapter) { } +static inline bool iavf_ptp_cap_supported(const struct iavf_adapter *adapter, + u32 cap) +{ + return false; +} + +static inline void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter) { } +static inline int iavf_ptp_set_ts_config(struct iavf_adapter *adapter, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + return -1; +} + +static inline u64 iavf_ptp_extend_32b_timestamp(u64 cached_phc_time, + u32 in_tstamp) +{ + return 0; +} + +#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* _IAVF_PTP_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h index 62212011c8070..c5e4d18238866 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_trace.h +++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h @@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS( iavf_rx_template, TP_PROTO(struct iavf_ring *ring, - union iavf_32byte_rx_desc *desc, + struct iavf_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb), @@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS( DEFINE_EVENT( iavf_rx_template, iavf_clean_rx_irq, TP_PROTO(struct iavf_ring *ring, - union iavf_32byte_rx_desc *desc, + struct iavf_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb)); @@ -148,7 +148,7 @@ DEFINE_EVENT( DEFINE_EVENT( iavf_rx_template, iavf_clean_rx_irq_rx, TP_PROTO(struct iavf_ring *ring, - union iavf_32byte_rx_desc *desc, + struct iavf_rx_desc *desc, struct sk_buff *skb), TP_ARGS(ring, desc, skb)); diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 26b424fd67183..422312b8b54a1 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -8,6 +8,26 @@ #include "iavf.h" #include "iavf_trace.h" #include "iavf_prototype.h" +#include "iavf_ptp.h" + +/** + * iavf_is_descriptor_done - tests DD bit in Rx descriptor + * @qw1: quad word 1 from descriptor to get Descriptor Done field from + * @flex: is the descriptor flex or legacy + * + * This function tests the descriptor done bit in specified descriptor. Because + * there are two types of descriptors (legacy and flex) the parameter rx_ring + * is used to distinguish. + * + * Return: true or false based on the state of DD bit in Rx descriptor. + */ +static bool iavf_is_descriptor_done(u64 qw1, bool flex) +{ + if (flex) + return FIELD_GET(IAVF_RXD_FLEX_DD_M, qw1); + else + return FIELD_GET(IAVF_RXD_LEGACY_DD_M, qw1); +} static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, u32 td_tag) @@ -766,7 +786,7 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(struct iavf_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -845,7 +865,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) .count = rx_ring->count, }; u16 ntu = rx_ring->next_to_use; - union iavf_rx_desc *rx_desc; + struct iavf_rx_desc *rx_desc; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) @@ -863,7 +883,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(addr); + rx_desc->qw0 = cpu_to_le64(addr); rx_desc++; ntu++; @@ -873,7 +893,7 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) } /* clear the status bits for the next_to_use descriptor */ - rx_desc->wb.qword1.status_error_len = 0; + rx_desc->qw1 = 0; cleaned_count--; } while (cleaned_count); @@ -896,60 +916,43 @@ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) } /** - * iavf_rx_checksum - Indicate in skb if hw indicated a good cksum + * iavf_rx_csum - Indicate in skb if hw indicated a good checksum * @vsi: the VSI we care about * @skb: skb currently being received and modified - * @rx_desc: the receive descriptor + * @decoded_pt: decoded ptype information + * @csum_bits: decoded Rx descriptor information **/ -static void iavf_rx_checksum(struct iavf_vsi *vsi, - struct sk_buff *skb, - union iavf_rx_desc *rx_desc) +static void iavf_rx_csum(const struct iavf_vsi *vsi, struct sk_buff *skb, + struct libeth_rx_pt decoded_pt, + struct libeth_rx_csum csum_bits) { - struct libeth_rx_pt decoded; - u32 rx_error, rx_status; bool ipv4, ipv6; - u8 ptype; - u64 qword; skb->ip_summed = CHECKSUM_NONE; - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); - - decoded = libie_rx_pt_parse(ptype); - if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded)) - return; - - rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword); - rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword); - /* did the hardware decode the packet and checksum? */ - if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) + if (unlikely(!csum_bits.l3l4p)) return; - ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; - ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; + ipv4 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded_pt) == LIBETH_RX_PT_OUTER_IPV6; - if (ipv4 && - (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | - BIT(IAVF_RX_DESC_ERROR_EIPE_SHIFT)))) + if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe))) goto checksum_fail; /* likely incorrect csum if alternate IP extension headers found */ - if (ipv6 && - rx_status & BIT(IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT)) - /* don't increment checksum err here, non-fatal err */ + if (unlikely(ipv6 && csum_bits.ipv6exadd)) return; /* there was some L4 error, count error and punt packet to the stack */ - if (rx_error & BIT(IAVF_RX_DESC_ERROR_L4E_SHIFT)) + if (unlikely(csum_bits.l4e)) goto checksum_fail; /* handle packets that were not able to be checksummed due * to arrival speed, in this case the stack can compute * the csum. */ - if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT)) + if (unlikely(csum_bits.pprs)) return; skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -960,52 +963,196 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi, } /** - * iavf_rx_hash - set the hash value in the skb + * iavf_legacy_rx_csum - Indicate in skb if hw indicated a good checksum + * @vsi: the VSI we care about + * @qw1: quad word 1 + * @decoded_pt: decoded packet type + * + * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte + * descriptor writeback format. + * + * Return: decoded checksum bits. + **/ +static struct libeth_rx_csum +iavf_legacy_rx_csum(const struct iavf_vsi *vsi, u64 qw1, + const struct libeth_rx_pt decoded_pt) +{ + struct libeth_rx_csum csum_bits = {}; + + if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt)) + return csum_bits; + + csum_bits.ipe = FIELD_GET(IAVF_RXD_LEGACY_IPE_M, qw1); + csum_bits.eipe = FIELD_GET(IAVF_RXD_LEGACY_EIPE_M, qw1); + csum_bits.l4e = FIELD_GET(IAVF_RXD_LEGACY_L4E_M, qw1); + csum_bits.pprs = FIELD_GET(IAVF_RXD_LEGACY_PPRS_M, qw1); + csum_bits.l3l4p = FIELD_GET(IAVF_RXD_LEGACY_L3L4P_M, qw1); + csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_LEGACY_IPV6EXADD_M, qw1); + + return csum_bits; +} + +/** + * iavf_flex_rx_csum - Indicate in skb if hw indicated a good checksum + * @vsi: the VSI we care about + * @qw1: quad word 1 + * @decoded_pt: decoded packet type + * + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + * + * Return: decoded checksum bits. + **/ +static struct libeth_rx_csum +iavf_flex_rx_csum(const struct iavf_vsi *vsi, u64 qw1, + const struct libeth_rx_pt decoded_pt) +{ + struct libeth_rx_csum csum_bits = {}; + + if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded_pt)) + return csum_bits; + + csum_bits.ipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_IPE_M, qw1); + csum_bits.eipe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EIPE_M, qw1); + csum_bits.l4e = FIELD_GET(IAVF_RXD_FLEX_XSUM_L4E_M, qw1); + csum_bits.eudpe = FIELD_GET(IAVF_RXD_FLEX_XSUM_EUDPE_M, qw1); + csum_bits.l3l4p = FIELD_GET(IAVF_RXD_FLEX_L3L4P_M, qw1); + csum_bits.ipv6exadd = FIELD_GET(IAVF_RXD_FLEX_IPV6EXADD_M, qw1); + csum_bits.nat = FIELD_GET(IAVF_RXD_FLEX_NAT_M, qw1); + + return csum_bits; +} + +/** + * iavf_legacy_rx_hash - set the hash value in the skb + * @ring: descriptor ring + * @qw0: quad word 0 + * @qw1: quad word 1 + * @skb: skb currently being received and modified + * @decoded_pt: decoded packet type + * + * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte + * descriptor writeback format. + **/ +static void iavf_legacy_rx_hash(const struct iavf_ring *ring, __le64 qw0, + __le64 qw1, struct sk_buff *skb, + const struct libeth_rx_pt decoded_pt) +{ + const __le64 rss_mask = cpu_to_le64(IAVF_RXD_LEGACY_FLTSTAT_M); + u32 hash; + + if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt)) + return; + + if ((qw1 & rss_mask) == rss_mask) { + hash = le64_get_bits(qw0, IAVF_RXD_LEGACY_RSS_M); + libeth_rx_pt_set_hash(skb, hash, decoded_pt); + } +} + +/** + * iavf_flex_rx_hash - set the hash value in the skb * @ring: descriptor ring - * @rx_desc: specific descriptor + * @qw1: quad word 1 * @skb: skb currently being received and modified - * @rx_ptype: Rx packet type + * @decoded_pt: decoded packet type + * + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. **/ -static void iavf_rx_hash(struct iavf_ring *ring, - union iavf_rx_desc *rx_desc, - struct sk_buff *skb, - u8 rx_ptype) +static void iavf_flex_rx_hash(const struct iavf_ring *ring, __le64 qw1, + struct sk_buff *skb, + const struct libeth_rx_pt decoded_pt) { - struct libeth_rx_pt decoded; + bool rss_valid; u32 hash; - const __le64 rss_mask = - cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << - IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); - decoded = libie_rx_pt_parse(rx_ptype); - if (!libeth_rx_pt_has_hash(ring->netdev, decoded)) + if (!libeth_rx_pt_has_hash(ring->netdev, decoded_pt)) return; - if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { - hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - libeth_rx_pt_set_hash(skb, hash, decoded); + rss_valid = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_VALID_M); + if (rss_valid) { + hash = le64_get_bits(qw1, IAVF_RXD_FLEX_RSS_HASH_M); + libeth_rx_pt_set_hash(skb, hash, decoded_pt); } } +/** + * iavf_flex_rx_tstamp - Capture Rx timestamp from the descriptor + * @rx_ring: descriptor ring + * @qw2: quad word 2 of descriptor + * @qw3: quad word 3 of descriptor + * @skb: skb currently being received + * + * Read the Rx timestamp value from the descriptor and pass it to the stack. + * + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + */ +static void iavf_flex_rx_tstamp(const struct iavf_ring *rx_ring, __le64 qw2, + __le64 qw3, struct sk_buff *skb) +{ + u32 tstamp; + u64 ns; + + /* Skip processing if timestamps aren't enabled */ + if (!(rx_ring->flags & IAVF_TXRX_FLAGS_HW_TSTAMP)) + return; + + /* Check if this Rx descriptor has a valid timestamp */ + if (!le64_get_bits(qw2, IAVF_PTP_40B_TSTAMP_VALID)) + return; + + /* the ts_low field only contains the valid bit and sub-nanosecond + * precision, so we don't need to extract it. + */ + tstamp = le64_get_bits(qw3, IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M); + + ns = iavf_ptp_extend_32b_timestamp(rx_ring->ptp->cached_phc_time, + tstamp); + + *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) { + .hwtstamp = ns_to_ktime(ns), + }; +} + /** * iavf_process_skb_fields - Populate skb header fields from Rx descriptor * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated - * @rx_ptype: the packet type decoded by hardware + * @ptype: the packet type decoded by hardware + * @flex: is the descriptor flex or legacy * * This function checks the ring, descriptor, and packet information in * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ -static void -iavf_process_skb_fields(struct iavf_ring *rx_ring, - union iavf_rx_desc *rx_desc, struct sk_buff *skb, - u8 rx_ptype) +static void iavf_process_skb_fields(const struct iavf_ring *rx_ring, + const struct iavf_rx_desc *rx_desc, + struct sk_buff *skb, u32 ptype, + bool flex) { - iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - - iavf_rx_checksum(rx_ring->vsi, skb, rx_desc); + struct libeth_rx_csum csum_bits; + struct libeth_rx_pt decoded_pt; + __le64 qw0 = rx_desc->qw0; + __le64 qw1 = rx_desc->qw1; + __le64 qw2 = rx_desc->qw2; + __le64 qw3 = rx_desc->qw3; + + decoded_pt = libie_rx_pt_parse(ptype); + + if (flex) { + iavf_flex_rx_hash(rx_ring, qw1, skb, decoded_pt); + iavf_flex_rx_tstamp(rx_ring, qw2, qw3, skb); + csum_bits = iavf_flex_rx_csum(rx_ring->vsi, le64_to_cpu(qw1), + decoded_pt); + } else { + iavf_legacy_rx_hash(rx_ring, qw0, qw1, skb, decoded_pt); + csum_bits = iavf_legacy_rx_csum(rx_ring->vsi, le64_to_cpu(qw1), + decoded_pt); + } + iavf_rx_csum(rx_ring->vsi, skb, decoded_pt, csum_bits); skb_record_rx_queue(skb, rx_ring->queue_index); @@ -1092,8 +1239,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, /** * iavf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed - * @rx_desc: Rx descriptor for current buffer - * @skb: Current socket buffer containing buffer in progress + * @fields: Rx descriptor extracted fields * * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the @@ -1101,8 +1247,7 @@ static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, * that this is in fact a non-EOP buffer. **/ static bool iavf_is_non_eop(struct iavf_ring *rx_ring, - union iavf_rx_desc *rx_desc, - struct sk_buff *skb) + struct libeth_rqe_info fields) { u32 ntc = rx_ring->next_to_clean + 1; @@ -1113,8 +1258,7 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring, prefetch(IAVF_RX_DESC(rx_ring, ntc)); /* if we are the last buffer then there is nothing else to do */ -#define IAVF_RXD_EOF BIT(IAVF_RX_DESC_STATUS_EOF_SHIFT) - if (likely(iavf_test_staterr(rx_desc, IAVF_RXD_EOF))) + if (likely(fields.eop)) return false; rx_ring->rx_stats.non_eop_descs++; @@ -1122,6 +1266,109 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring, return true; } +/** + * iavf_extract_legacy_rx_fields - Extract fields from the Rx descriptor + * @rx_ring: rx descriptor ring + * @rx_desc: the descriptor to process + * + * Decode the Rx descriptor and extract relevant information including the + * size, VLAN tag, Rx packet type, end of packet field and RXE field value. + * + * This function only operates on the VIRTCHNL_RXDID_1_32B_BASE legacy 32byte + * descriptor writeback format. + * + * Return: fields extracted from the Rx descriptor. + */ +static struct libeth_rqe_info +iavf_extract_legacy_rx_fields(const struct iavf_ring *rx_ring, + const struct iavf_rx_desc *rx_desc) +{ + u64 qw0 = le64_to_cpu(rx_desc->qw0); + u64 qw1 = le64_to_cpu(rx_desc->qw1); + u64 qw2 = le64_to_cpu(rx_desc->qw2); + struct libeth_rqe_info fields; + bool l2tag1p, l2tag2p; + + fields.eop = FIELD_GET(IAVF_RXD_LEGACY_EOP_M, qw1); + fields.len = FIELD_GET(IAVF_RXD_LEGACY_LENGTH_M, qw1); + + if (!fields.eop) + return fields; + + fields.rxe = FIELD_GET(IAVF_RXD_LEGACY_RXE_M, qw1); + fields.ptype = FIELD_GET(IAVF_RXD_LEGACY_PTYPE_M, qw1); + fields.vlan = 0; + + if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { + l2tag1p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1P_M, qw1); + if (l2tag1p) + fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG1_M, qw0); + } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { + l2tag2p = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2P_M, qw2); + if (l2tag2p) + fields.vlan = FIELD_GET(IAVF_RXD_LEGACY_L2TAG2_M, qw2); + } + + return fields; +} + +/** + * iavf_extract_flex_rx_fields - Extract fields from the Rx descriptor + * @rx_ring: rx descriptor ring + * @rx_desc: the descriptor to process + * + * Decode the Rx descriptor and extract relevant information including the + * size, VLAN tag, Rx packet type, end of packet field and RXE field value. + * + * This function only operates on the VIRTCHNL_RXDID_2_FLEX_SQ_NIC flexible + * descriptor writeback format. + * + * Return: fields extracted from the Rx descriptor. + */ +static struct libeth_rqe_info +iavf_extract_flex_rx_fields(const struct iavf_ring *rx_ring, + const struct iavf_rx_desc *rx_desc) +{ + struct libeth_rqe_info fields = {}; + u64 qw0 = le64_to_cpu(rx_desc->qw0); + u64 qw1 = le64_to_cpu(rx_desc->qw1); + u64 qw2 = le64_to_cpu(rx_desc->qw2); + bool l2tag1p, l2tag2p; + + fields.eop = FIELD_GET(IAVF_RXD_FLEX_EOP_M, qw1); + fields.len = FIELD_GET(IAVF_RXD_FLEX_PKT_LEN_M, qw0); + + if (!fields.eop) + return fields; + + fields.rxe = FIELD_GET(IAVF_RXD_FLEX_RXE_M, qw1); + fields.ptype = FIELD_GET(IAVF_RXD_FLEX_PTYPE_M, qw0); + fields.vlan = 0; + + if (rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) { + l2tag1p = FIELD_GET(IAVF_RXD_FLEX_L2TAG1P_M, qw1); + if (l2tag1p) + fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG1_M, qw1); + } else if (rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { + l2tag2p = FIELD_GET(IAVF_RXD_FLEX_L2TAG2P_M, qw2); + if (l2tag2p) + fields.vlan = FIELD_GET(IAVF_RXD_FLEX_L2TAG2_2_M, qw2); + } + + return fields; +} + +static struct libeth_rqe_info +iavf_extract_rx_fields(const struct iavf_ring *rx_ring, + const struct iavf_rx_desc *rx_desc, + bool flex) +{ + if (flex) + return iavf_extract_flex_rx_fields(rx_ring, rx_desc); + else + return iavf_extract_legacy_rx_fields(rx_ring, rx_desc); +} + /** * iavf_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on @@ -1136,18 +1383,17 @@ static bool iavf_is_non_eop(struct iavf_ring *rx_ring, **/ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) { + bool flex = rx_ring->rxdid == VIRTCHNL_RXDID_2_FLEX_SQ_NIC; unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); bool failure = false; while (likely(total_rx_packets < (unsigned int)budget)) { + struct libeth_rqe_info fields; struct libeth_fqe *rx_buffer; - union iavf_rx_desc *rx_desc; - unsigned int size; - u16 vlan_tag = 0; - u8 rx_ptype; - u64 qword; + struct iavf_rx_desc *rx_desc; + u64 qw1; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IAVF_RX_BUFFER_WRITE) { @@ -1158,35 +1404,32 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); - /* status_error_len will always be zero for unused descriptors - * because it's cleared in cleanup, and overlaps with hdr_addr - * which is always zero because packet split isn't used, if the - * hardware wrote DD then the length will be non-zero - */ - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we have * verified the descriptor has been written back. */ dma_rmb(); -#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT) - if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD)) + + qw1 = le64_to_cpu(rx_desc->qw1); + /* If DD field (descriptor done) is unset then other fields are + * not valid + */ + if (!iavf_is_descriptor_done(qw1, flex)) break; - size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword); + fields = iavf_extract_rx_fields(rx_ring, rx_desc, flex); iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean]; - if (!libeth_rx_sync_for_cpu(rx_buffer, size)) + if (!libeth_rx_sync_for_cpu(rx_buffer, fields.len)) goto skip_data; /* retrieve a buffer from the ring */ if (skb) - iavf_add_rx_frag(skb, rx_buffer, size); + iavf_add_rx_frag(skb, rx_buffer, fields.len); else - skb = iavf_build_skb(rx_buffer, size); + skb = iavf_build_skb(rx_buffer, fields.len); /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -1197,15 +1440,14 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) skip_data: cleaned_count++; - if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb)) + if (iavf_is_non_eop(rx_ring, fields) || unlikely(!skb)) continue; - /* ERR_MASK will only have valid bits if EOP set, and - * what we are doing here is actually checking - * IAVF_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in - * the error field + /* RXE field in descriptor is an indication of the MAC errors + * (like CRC, alignment, oversize etc). If it is set then iavf + * should finish. */ - if (unlikely(iavf_test_staterr(rx_desc, BIT(IAVF_RXD_QW1_ERROR_SHIFT)))) { + if (unlikely(fields.rxe)) { dev_kfree_skb_any(skb); skb = NULL; continue; @@ -1219,22 +1461,11 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); - /* populate checksum, VLAN, and protocol */ - iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - - if (qword & BIT(IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT) && - rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) - vlan_tag = le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1); - if (rx_desc->wb.qword2.ext_status & - cpu_to_le16(BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) && - rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) - vlan_tag = le16_to_cpu(rx_desc->wb.qword2.l2tag2_2); + iavf_process_skb_fields(rx_ring, rx_desc, skb, fields.ptype, flex); iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); - iavf_receive_skb(rx_ring, skb, vlan_tag); + iavf_receive_skb(rx_ring, skb, fields.vlan); skb = NULL; /* update budget accounting */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index f97c702c0802d..79ad554f2d53b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -80,25 +80,6 @@ enum iavf_dyn_idx_t { BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) -#define iavf_rx_desc iavf_32byte_rx_desc - -/** - * iavf_test_staterr - tests bits in Rx descriptor status and error fields - * @rx_desc: pointer to receive descriptor (in le64 format) - * @stat_err_bits: value to mask - * - * This function does some fast chicanery in order to return the - * value of the mask which is really only used for boolean tests. - * The status_error_len doesn't need to be shifted because it begins - * at offset zero. - */ -static inline bool iavf_test_staterr(union iavf_rx_desc *rx_desc, - const u64 stat_err_bits) -{ - return !!(rx_desc->wb.qword1.status_error_len & - cpu_to_le64(stat_err_bits)); -} - /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IAVF_RX_INCREMENT(r, i) \ do { \ @@ -262,6 +243,8 @@ struct iavf_ring { u16 next_to_use; u16 next_to_clean; + u16 rxdid; /* Rx descriptor format */ + u16 flags; #define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0) #define IAVF_TXR_FLAGS_ARM_WB BIT(1) @@ -269,6 +252,7 @@ struct iavf_ring { #define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3) #define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4) #define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5) +#define IAVF_TXRX_FLAGS_HW_TSTAMP BIT(6) /* stats structs */ struct iavf_queue_stats stats; @@ -295,6 +279,8 @@ struct iavf_ring { * for this ring. */ + struct iavf_ptp *ptp; + u32 rx_buf_len; struct net_shaper q_shaper; bool q_shaper_update; diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h index f6b09e57abcef..f9e1319620f45 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_type.h +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -178,110 +178,116 @@ struct iavf_hw { char err_str[16]; }; -/* RX Descriptors */ -union iavf_16byte_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - struct { - union { - __le16 mirroring_status; - __le16 fcoe_ctx_id; - } mirr_fcoe; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fd_id; /* Flow director filter id */ - __le32 fcoe_param; /* FCoE DDP Context id */ - } hi_dword; - } qword0; - struct { - /* ext status/error/pktype/length */ - __le64 status_error_len; - } qword1; - } wb; /* writeback */ -}; - -union iavf_32byte_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - /* bit 0 of hdr_buffer_addr is DD bit */ - __le64 rsvd1; - __le64 rsvd2; - } read; - struct { - struct { - struct { - union { - __le16 mirroring_status; - __le16 fcoe_ctx_id; - } mirr_fcoe; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fcoe_param; /* FCoE DDP Context id */ - /* Flow director filter id in case of - * Programming status desc WB - */ - __le32 fd_id; - } hi_dword; - } qword0; - struct { - /* status/error/pktype/length */ - __le64 status_error_len; - } qword1; - struct { - __le16 ext_status; /* extended status */ - __le16 rsvd; - __le16 l2tag2_1; - __le16 l2tag2_2; - } qword2; - struct { - union { - __le32 flex_bytes_lo; - __le32 pe_status; - } lo_dword; - union { - __le32 flex_bytes_hi; - __le32 fd_id; - } hi_dword; - } qword3; - } wb; /* writeback */ -}; - -enum iavf_rx_desc_status_bits { - /* Note: These are predefined bit offsets */ - IAVF_RX_DESC_STATUS_DD_SHIFT = 0, - IAVF_RX_DESC_STATUS_EOF_SHIFT = 1, - IAVF_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, - IAVF_RX_DESC_STATUS_L3L4P_SHIFT = 3, - IAVF_RX_DESC_STATUS_CRCP_SHIFT = 4, - IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ - IAVF_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - /* Note: Bit 8 is reserved in X710 and XL710 */ - IAVF_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, - IAVF_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ - IAVF_RX_DESC_STATUS_FLM_SHIFT = 11, - IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ - IAVF_RX_DESC_STATUS_LPBK_SHIFT = 14, - IAVF_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, - IAVF_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - /* Note: For non-tunnel packets INT_UDP_0 is the right status for - * UDP header - */ - IAVF_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, - IAVF_RX_DESC_STATUS_LAST /* this entry must be last!!! */ -}; - -#define IAVF_RXD_QW1_STATUS_SHIFT 0 -#define IAVF_RXD_QW1_STATUS_MASK ((BIT(IAVF_RX_DESC_STATUS_LAST) - 1) \ - << IAVF_RXD_QW1_STATUS_SHIFT) +/** + * struct iavf_rx_desc - Receive descriptor (both legacy and flexible) + * @qw0: quad word 0 fields: + * Legacy: Descriptor Type; Mirror ID; L2TAG1P (S-TAG); Filter Status + * Flex: Descriptor Type; Mirror ID; UMBCAST; Packet Type; Flexible Flags + * Section 0; Packet Length; Header Length; Split Header Flag; + * Flexible Flags section 1 / Extended Status + * @qw1: quad word 1 fields: + * Legacy: Status Field; Error Field; Packet Type; Packet Length (packet, + * header, Split Header Flag) + * Flex: Status / Error 0 Field; L2TAG1P (S-TAG); Flexible Metadata + * Container #0; Flexible Metadata Container #1 + * @qw2: quad word 2 fields: + * Legacy: Extended Status; 1st L2TAG2P (C-TAG); 2nd L2TAG2P (C-TAG) + * Flex: Status / Error 1 Field; Flexible Flags section 2; Timestamp Low; + * 1st L2TAG2 (C-TAG); 2nd L2TAG2 (C-TAG) + * @qw3: quad word 3 fields: + * Legacy: FD Filter ID / Flexible Bytes + * Flex: Flexible Metadata Container #2; Flexible Metadata Container #3; + * Flexible Metadata Container #4 / Timestamp High 0; Flexible + * Metadata Container #5 / Timestamp High 1; + */ +struct iavf_rx_desc { + aligned_le64 qw0; +/* The hash signature (RSS) */ +#define IAVF_RXD_LEGACY_RSS_M GENMASK_ULL(63, 32) +/* Stripped C-TAG VLAN from the receive packet */ +#define IAVF_RXD_LEGACY_L2TAG1_M GENMASK_ULL(33, 16) +/* Packet type */ +#define IAVF_RXD_FLEX_PTYPE_M GENMASK_ULL(25, 16) +/* Packet length */ +#define IAVF_RXD_FLEX_PKT_LEN_M GENMASK_ULL(45, 32) + + aligned_le64 qw1; +/* Descriptor done indication flag. */ +#define IAVF_RXD_LEGACY_DD_M BIT(0) +/* End of packet. Set to 1 if this descriptor is the last one of the packet */ +#define IAVF_RXD_LEGACY_EOP_M BIT(1) +/* L2 TAG 1 presence indication */ +#define IAVF_RXD_LEGACY_L2TAG1P_M BIT(2) +/* Detectable L3 and L4 integrity check is processed by the HW */ +#define IAVF_RXD_LEGACY_L3L4P_M BIT(3) +/* Set when an IPv6 packet contains a Destination Options Header or a Routing + * Header. + */ +#define IAVF_RXD_LEGACY_IPV6EXADD_M BIT(15) +/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */ +#define IAVF_RXD_LEGACY_RXE_M BIT(19) +/* Checksum reports: + * - IPE: IP checksum error + * - L4E: L4 integrity error + * - EIPE: External IP header (tunneled packets) + */ +#define IAVF_RXD_LEGACY_IPE_M BIT(22) +#define IAVF_RXD_LEGACY_L4E_M BIT(23) +#define IAVF_RXD_LEGACY_EIPE_M BIT(24) +/* Set for packets that skip checksum calculation in pre-parser */ +#define IAVF_RXD_LEGACY_PPRS_M BIT(26) +/* Indicates the content in the Filter Status field */ +#define IAVF_RXD_LEGACY_FLTSTAT_M GENMASK_ULL(13, 12) +/* Packet type */ +#define IAVF_RXD_LEGACY_PTYPE_M GENMASK_ULL(37, 30) +/* Packet length */ +#define IAVF_RXD_LEGACY_LENGTH_M GENMASK_ULL(51, 38) +/* Descriptor done indication flag */ +#define IAVF_RXD_FLEX_DD_M BIT(0) +/* End of packet. Set to 1 if this descriptor is the last one of the packet */ +#define IAVF_RXD_FLEX_EOP_M BIT(1) +/* Detectable L3 and L4 integrity check is processed by the HW */ +#define IAVF_RXD_FLEX_L3L4P_M BIT(3) +/* Checksum reports: + * - IPE: IP checksum error + * - L4E: L4 integrity error + * - EIPE: External IP header (tunneled packets) + * - EUDPE: External UDP checksum error (tunneled packets) + */ +#define IAVF_RXD_FLEX_XSUM_IPE_M BIT(4) +#define IAVF_RXD_FLEX_XSUM_L4E_M BIT(5) +#define IAVF_RXD_FLEX_XSUM_EIPE_M BIT(6) +#define IAVF_RXD_FLEX_XSUM_EUDPE_M BIT(7) +/* Set when an IPv6 packet contains a Destination Options Header or a Routing + * Header. + */ +#define IAVF_RXD_FLEX_IPV6EXADD_M BIT(9) +/* Receive MAC Errors: CRC; Alignment; Oversize; Undersizes; Length error */ +#define IAVF_RXD_FLEX_RXE_M BIT(10) +/* Indicates that the RSS/HASH result is valid */ +#define IAVF_RXD_FLEX_RSS_VALID_M BIT(12) +/* L2 TAG 1 presence indication */ +#define IAVF_RXD_FLEX_L2TAG1P_M BIT(13) +/* Stripped L2 Tag from the receive packet */ +#define IAVF_RXD_FLEX_L2TAG1_M GENMASK_ULL(31, 16) +/* The hash signature (RSS) */ +#define IAVF_RXD_FLEX_RSS_HASH_M GENMASK_ULL(63, 32) + + aligned_le64 qw2; +/* L2 Tag 2 Presence */ +#define IAVF_RXD_LEGACY_L2TAG2P_M BIT(0) +/* Stripped S-TAG VLAN from the receive packet */ +#define IAVF_RXD_LEGACY_L2TAG2_M GENMASK_ULL(63, 32) +/* Stripped S-TAG VLAN from the receive packet */ +#define IAVF_RXD_FLEX_L2TAG2_2_M GENMASK_ULL(63, 48) +/* The packet is a UDP tunneled packet */ +#define IAVF_RXD_FLEX_NAT_M BIT(4) +/* L2 Tag 2 Presence */ +#define IAVF_RXD_FLEX_L2TAG2P_M BIT(11) + aligned_le64 qw3; +#define IAVF_RXD_FLEX_QW3_TSTAMP_HIGH_M GENMASK_ULL(63, 32) +} __aligned(4 * sizeof(__le64)); +static_assert(sizeof(struct iavf_rx_desc) == 32); #define IAVF_RXD_QW1_STATUS_TSYNINDX_SHIFT IAVF_RX_DESC_STATUS_TSYNINDX_SHIFT #define IAVF_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ @@ -298,22 +304,6 @@ enum iavf_rx_desc_fltstat_values { IAVF_RX_DESC_FLTSTAT_RSS_HASH = 3, }; -#define IAVF_RXD_QW1_ERROR_SHIFT 19 -#define IAVF_RXD_QW1_ERROR_MASK (0xFFUL << IAVF_RXD_QW1_ERROR_SHIFT) - -enum iavf_rx_desc_error_bits { - /* Note: These are predefined bit offsets */ - IAVF_RX_DESC_ERROR_RXE_SHIFT = 0, - IAVF_RX_DESC_ERROR_RECIPE_SHIFT = 1, - IAVF_RX_DESC_ERROR_HBO_SHIFT = 2, - IAVF_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ - IAVF_RX_DESC_ERROR_IPE_SHIFT = 3, - IAVF_RX_DESC_ERROR_L4E_SHIFT = 4, - IAVF_RX_DESC_ERROR_EIPE_SHIFT = 5, - IAVF_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, - IAVF_RX_DESC_ERROR_PPRS_SHIFT = 7 -}; - enum iavf_rx_desc_error_l3l4e_fcoe_masks { IAVF_RX_DESC_ERROR_L3L4E_NONE = 0, IAVF_RX_DESC_ERROR_L3L4E_PROT = 1, @@ -322,13 +312,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks { IAVF_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 }; -#define IAVF_RXD_QW1_PTYPE_SHIFT 30 -#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT) - -#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38 -#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ - IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) - #define IAVF_RXD_QW1_LENGTH_HBUF_SHIFT 52 #define IAVF_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ IAVF_RXD_QW1_LENGTH_HBUF_SHIFT) @@ -347,6 +330,8 @@ enum iavf_rx_desc_ext_status_bits { IAVF_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, }; +#define IAVF_RX_DESC_EXT_STATUS_L2TAG2P_M BIT(IAVF_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT) + enum iavf_rx_desc_pe_status_bits { /* Note: These are predefined bit offsets */ IAVF_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_types.h b/drivers/net/ethernet/intel/iavf/iavf_types.h new file mode 100644 index 0000000000000..a095855122bf2 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/iavf_types.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Intel Corporation. */ + +#ifndef _IAVF_TYPES_H_ +#define _IAVF_TYPES_H_ + +#include "iavf_types.h" + +#include +#include + +/* structure used to queue PTP commands for processing */ +struct iavf_ptp_aq_cmd { + struct list_head list; + enum virtchnl_ops v_opcode:16; + u16 msglen; + u8 msg[] __counted_by(msglen); +}; + +struct iavf_ptp { + wait_queue_head_t phc_time_waitqueue; + struct virtchnl_ptp_caps hw_caps; + struct ptp_clock_info info; + struct ptp_clock *clock; + struct list_head aq_cmds; + u64 cached_phc_time; + unsigned long cached_phc_updated; + /* Lock protecting access to the AQ command list */ + struct mutex aq_cmd_lock; + struct kernel_hwtstamp_config hwtstamp_config; + bool phc_time_ready:1; +}; + +#endif /* _IAVF_TYPES_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 15d388b431c51..a6f0e5990be25 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -4,6 +4,7 @@ #include #include "iavf.h" +#include "iavf_ptp.h" #include "iavf_prototype.h" /** @@ -144,9 +145,11 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_ENCAP | VIRTCHNL_VF_OFFLOAD_TC_U32 | VIRTCHNL_VF_OFFLOAD_VLAN_V2 | + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC | VIRTCHNL_VF_OFFLOAD_CRC | VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | VIRTCHNL_VF_OFFLOAD_REQ_QUEUES | + VIRTCHNL_VF_CAP_PTP | VIRTCHNL_VF_OFFLOAD_ADQ | VIRTCHNL_VF_OFFLOAD_USO | VIRTCHNL_VF_OFFLOAD_FDIR_PF | @@ -177,6 +180,54 @@ int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter) NULL, 0); } +int iavf_send_vf_supported_rxdids_msg(struct iavf_adapter *adapter) +{ + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS; + + if (!IAVF_RXDID_ALLOWED(adapter)) + return -EOPNOTSUPP; + + adapter->current_op = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS; + + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS, + NULL, 0); +} + +/** + * iavf_send_vf_ptp_caps_msg - Send request for PTP capabilities + * @adapter: private adapter structure + * + * Send the VIRTCHNL_OP_1588_PTP_GET_CAPS command to the PF to request the PTP + * capabilities available to this device. This includes the following + * potential access: + * + * * READ_PHC - access to read the PTP hardware clock time + * * RX_TSTAMP - access to request Rx timestamps on all received packets + * + * The PF will reply with the same opcode a filled out copy of the + * virtchnl_ptp_caps structure which defines the specifics of which features + * are accessible to this device. + * + * Return: 0 if success, error code otherwise. + */ +int iavf_send_vf_ptp_caps_msg(struct iavf_adapter *adapter) +{ + struct virtchnl_ptp_caps hw_caps = { + .caps = VIRTCHNL_1588_PTP_CAP_READ_PHC | + VIRTCHNL_1588_PTP_CAP_RX_TSTAMP + }; + + adapter->aq_required &= ~IAVF_FLAG_AQ_GET_PTP_CAPS; + + if (!IAVF_PTP_ALLOWED(adapter)) + return -EOPNOTSUPP; + + adapter->current_op = VIRTCHNL_OP_1588_PTP_GET_CAPS; + + return iavf_send_pf_msg(adapter, VIRTCHNL_OP_1588_PTP_GET_CAPS, + (u8 *)&hw_caps, sizeof(hw_caps)); +} + /** * iavf_validate_num_queues * @adapter: adapter structure @@ -263,6 +314,40 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) return err; } +int iavf_get_vf_supported_rxdids(struct iavf_adapter *adapter) +{ + struct iavf_arq_event_info event; + u64 rxdids; + int err; + + event.msg_buf = (u8 *)&rxdids; + event.buf_len = sizeof(rxdids); + + err = iavf_poll_virtchnl_msg(&adapter->hw, &event, + VIRTCHNL_OP_GET_SUPPORTED_RXDIDS); + if (!err) + adapter->supp_rxdids = rxdids; + + return err; +} + +int iavf_get_vf_ptp_caps(struct iavf_adapter *adapter) +{ + struct virtchnl_ptp_caps caps = {}; + struct iavf_arq_event_info event; + int err; + + event.msg_buf = (u8 *)∩︀ + event.buf_len = sizeof(caps); + + err = iavf_poll_virtchnl_msg(&adapter->hw, &event, + VIRTCHNL_OP_1588_PTP_GET_CAPS); + if (!err) + adapter->ptp.hw_caps = caps; + + return err; +} + /** * iavf_configure_queues * @adapter: adapter structure @@ -275,6 +360,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter) int pairs = adapter->num_active_queues; struct virtchnl_queue_pair_info *vqpi; u32 i, max_frame; + u8 rx_flags = 0; size_t len; max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset); @@ -292,6 +378,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter) if (!vqci) return; + if (iavf_ptp_cap_supported(adapter, VIRTCHNL_1588_PTP_CAP_RX_TSTAMP)) + rx_flags |= VIRTCHNL_PTP_RX_TSTAMP; + vqci->vsi_id = adapter->vsi_res->vsi_id; vqci->num_queue_pairs = pairs; vqpi = vqci->qpair; @@ -309,9 +398,12 @@ void iavf_configure_queues(struct iavf_adapter *adapter) vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; vqpi->rxq.max_pkt_size = max_frame; vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; + if (IAVF_RXDID_ALLOWED(adapter)) + vqpi->rxq.rxdid = adapter->rxdid; if (CRC_OFFLOAD_ALLOWED(adapter)) vqpi->rxq.crc_disable = !!(adapter->netdev->features & NETIF_F_RXFCS); + vqpi->rxq.flags = rx_flags; vqpi++; } @@ -1402,6 +1494,67 @@ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid) VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2); } +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +/** + * iavf_virtchnl_send_ptp_cmd - Send one queued PTP command + * @adapter: adapter private structure + * + * De-queue one PTP command request and send the command message to the PF. + * Clear IAVF_FLAG_AQ_SEND_PTP_CMD if no more messages are left to send. + */ +void iavf_virtchnl_send_ptp_cmd(struct iavf_adapter *adapter) +{ + struct iavf_ptp_aq_cmd *cmd; + int err; + + if (!adapter->ptp.clock) { + /* This shouldn't be possible to hit, since no messages should + * be queued if PTP is not initialized. + */ + pci_err(adapter->pdev, "PTP is not initialized\n"); + adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD; + return; + } + + mutex_lock(&adapter->ptp.aq_cmd_lock); + cmd = list_first_entry_or_null(&adapter->ptp.aq_cmds, + struct iavf_ptp_aq_cmd, list); + if (!cmd) { + /* no further PTP messages to send */ + adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD; + goto out_unlock; + } + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + pci_err(adapter->pdev, + "Cannot send PTP command %d, command %d pending\n", + cmd->v_opcode, adapter->current_op); + goto out_unlock; + } + + err = iavf_send_pf_msg(adapter, cmd->v_opcode, cmd->msg, cmd->msglen); + if (!err) { + /* Command was sent without errors, so we can remove it from + * the list and discard it. + */ + list_del(&cmd->list); + kfree(cmd); + } else { + /* We failed to send the command, try again next cycle */ + pci_err(adapter->pdev, "Failed to send PTP command %d\n", + cmd->v_opcode); + } + + if (list_empty(&adapter->ptp.aq_cmds)) + /* no further PTP messages to send */ + adapter->aq_required &= ~IAVF_FLAG_AQ_SEND_PTP_CMD; + +out_unlock: + mutex_unlock(&adapter->ptp.aq_cmd_lock); +} +#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ + /** * iavf_print_link_message - print link up or down * @adapter: adapter structure @@ -2097,6 +2250,37 @@ static void iavf_activate_fdir_filters(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; } +/** + * iavf_virtchnl_ptp_get_time - Respond to VIRTCHNL_OP_1588_PTP_GET_TIME + * @adapter: private adapter structure + * @data: the message from the PF + * @len: length of the message from the PF + * + * Handle the VIRTCHNL_OP_1588_PTP_GET_TIME message from the PF. This message + * is sent by the PF in response to the same op as a request from the VF. + * Extract the 64bit nanoseconds time from the message and store it in + * cached_phc_time. Then, notify any thread that is waiting for the update via + * the wait queue. + */ +static void iavf_virtchnl_ptp_get_time(struct iavf_adapter *adapter, + void *data, u16 len) +{ + struct virtchnl_phc_time *msg = data; + + if (len != sizeof(*msg)) { + dev_err_once(&adapter->pdev->dev, + "Invalid VIRTCHNL_OP_1588_PTP_GET_TIME from PF. Got size %u, expected %zu\n", + len, sizeof(*msg)); + return; + } + + adapter->ptp.cached_phc_time = msg->time; + adapter->ptp.cached_phc_updated = jiffies; + adapter->ptp.phc_time_ready = true; + + wake_up(&adapter->ptp.phc_time_waitqueue); +} + /** * iavf_virtchnl_completion * @adapter: adapter structure @@ -2509,6 +2693,25 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, aq_required; } break; + case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS: + if (msglen != sizeof(u64)) + return; + + adapter->supp_rxdids = *(u64 *)msg; + + break; + case VIRTCHNL_OP_1588_PTP_GET_CAPS: + if (msglen != sizeof(adapter->ptp.hw_caps)) + return; + + adapter->ptp.hw_caps = *(struct virtchnl_ptp_caps *)msg; + + /* process any state change needed due to new capabilities */ + iavf_ptp_process_caps(adapter); + break; + case VIRTCHNL_OP_1588_PTP_GET_TIME: + iavf_virtchnl_ptp_get_time(adapter, msg, msglen); + break; case VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ iavf_irq_enable(adapter, true); diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index dbdb83567364c..fcb199efbea55 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -1205,6 +1205,25 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, return status; } +static void ice_set_min_max_msix(struct ice_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + union devlink_param_value val; + int err; + + err = devl_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, + &val); + if (!err) + pf->msix.min = val.vu32; + + err = devl_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + &val); + if (!err) + pf->msix.max = val.vu32; +} + /** * ice_devlink_reinit_up - do reinit of the given PF * @pf: pointer to the PF struct @@ -1220,6 +1239,9 @@ static int ice_devlink_reinit_up(struct ice_pf *pf) return err; } + /* load MSI-X values */ + ice_set_min_max_msix(pf); + err = ice_init_dev(pf); if (err) goto unroll_hw_init; @@ -1533,6 +1555,43 @@ static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id, return 0; } +static int +ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + + if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors) + return -EINVAL; + + return 0; +} + +static int +ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + if (val.vu32 < ICE_MIN_MSIX) + return -EINVAL; + + return 0; +} + +static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + bool new_state = val.vbool; + + if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + return -EOPNOTSUPP; + + return 0; +} + enum ice_param_id { ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, @@ -1548,6 +1607,17 @@ static const struct devlink_param ice_dvl_rdma_params[] = { ice_devlink_enable_iw_get, ice_devlink_enable_iw_set, ice_devlink_enable_iw_validate), + DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, ice_devlink_enable_rdma_validate), +}; + +static const struct devlink_param ice_dvl_msix_params[] = { + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, ice_devlink_msix_max_pf_validate), + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, ice_devlink_msix_min_pf_validate), }; static const struct devlink_param ice_dvl_sched_params[] = { @@ -1651,6 +1721,7 @@ void ice_devlink_unregister(struct ice_pf *pf) int ice_devlink_register_params(struct ice_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); + union devlink_param_value value; struct ice_hw *hw = &pf->hw; int status; @@ -1659,10 +1730,39 @@ int ice_devlink_register_params(struct ice_pf *pf) if (status) return status; + status = devl_params_register(devlink, ice_dvl_msix_params, + ARRAY_SIZE(ice_dvl_msix_params)); + if (status) + goto unregister_rdma_params; + if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) status = devl_params_register(devlink, ice_dvl_sched_params, ARRAY_SIZE(ice_dvl_sched_params)); + if (status) + goto unregister_msix_params; + + value.vu32 = pf->msix.max; + devl_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + value); + value.vu32 = pf->msix.min; + devl_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, + value); + + value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags); + devl_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, + value); + + return 0; +unregister_msix_params: + devl_params_unregister(devlink, ice_dvl_msix_params, + ARRAY_SIZE(ice_dvl_msix_params)); +unregister_rdma_params: + devl_params_unregister(devlink, ice_dvl_rdma_params, + ARRAY_SIZE(ice_dvl_rdma_params)); return status; } @@ -1673,6 +1773,8 @@ void ice_devlink_unregister_params(struct ice_pf *pf) devl_params_unregister(devlink, ice_dvl_rdma_params, ARRAY_SIZE(ice_dvl_rdma_params)); + devl_params_unregister(devlink, ice_dvl_msix_params, + ARRAY_SIZE(ice_dvl_msix_params)); if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) devl_params_unregister(devlink, ice_dvl_sched_params, diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c index ea40f79412590..19c3d37aa768b 100644 --- a/drivers/net/ethernet/intel/ice/devlink/health.c +++ b/drivers/net/ethernet/intel/ice/devlink/health.c @@ -25,10 +25,10 @@ struct ice_health_status { * The below lookup requires to be sorted by code. */ -static const char *const ice_common_port_solutions = +static const char ice_common_port_solutions[] = "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex."; -static const char *const ice_port_number_label = "Port Number"; -static const char *const ice_update_nvm_solution = "Update to the latest NVM image."; +static const char ice_port_number_label[] = "Port Number"; +static const char ice_update_nvm_solution[] = "Update to the latest NVM image."; static const struct ice_health_status ice_health_status_lookup[] = { {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.", diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 71e05d30f0fda..fd083647c14ac 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -97,9 +97,6 @@ #define ICE_MIN_LAN_OICR_MSIX 1 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) #define ICE_FDIR_MSIX 2 -#define ICE_RDMA_NUM_AEQ_MSIX 4 -#define ICE_MIN_RDMA_MSIX 2 -#define ICE_ESWITCH_MSIX 1 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -204,6 +201,7 @@ enum ice_feature { ICE_F_SMA_CTRL, ICE_F_CGU, ICE_F_GNSS, + ICE_F_GCS, ICE_F_ROCE_LAG, ICE_F_SRIOV_LAG, ICE_F_MBX_LIMIT, @@ -478,9 +476,6 @@ struct ice_q_vector { struct ice_ring_container rx; struct ice_ring_container tx; - cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; - struct ice_channel *ch; char name[ICE_INT_NAME_STR_LEN]; @@ -542,6 +537,14 @@ struct ice_agg_node { u8 valid; }; +struct ice_pf_msix { + u32 cur; + u32 min; + u32 max; + u32 total; + u32 rest; +}; + struct ice_pf { struct pci_dev *pdev; struct ice_adapter *adapter; @@ -556,13 +559,7 @@ struct ice_pf { /* OS reserved IRQ details */ struct msix_entry *msix_entries; struct ice_irq_tracker irq_tracker; - /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the - * number of MSIX vectors needed for all SR-IOV VFs from the number of - * MSIX vectors allowed on this PF. - */ - u16 sriov_base_vector; - unsigned long *sriov_irq_bm; /* bitmap to track irq usage */ - u16 sriov_irq_size; /* size of the irq_bm bitmap */ + struct ice_virt_irq_tracker virt_irq_tracker; u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ @@ -612,7 +609,7 @@ struct ice_pf { struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */ u16 max_pf_txqs; /* Total Tx queues PF wide */ u16 max_pf_rxqs; /* Total Rx queues PF wide */ - u16 num_lan_msix; /* Total MSIX vectors for base driver */ + struct ice_pf_msix msix; u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ @@ -1047,10 +1044,5 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); } -static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw) -{ - return hw->ptp.phy_model; -} - extern const struct xdp_metadata_ops ice_xdp_md_ops; #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 7cee365cc7d16..2bc5c7f598444 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -511,7 +511,7 @@ void ice_init_arfs(struct ice_vsi *vsi) struct hlist_head *arfs_fltr_list; unsigned int i; - if (!vsi || vsi->type != ICE_VSI_PF) + if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi)) return; arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), @@ -570,25 +570,6 @@ void ice_clear_arfs(struct ice_vsi *vsi) vsi->arfs_fltr_cntrs = NULL; } -/** - * ice_free_cpu_rx_rmap - free setup CPU reverse map - * @vsi: the VSI to be forwarded to - */ -void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) -{ - struct net_device *netdev; - - if (!vsi || vsi->type != ICE_VSI_PF) - return; - - netdev = vsi->netdev; - if (!netdev || !netdev->rx_cpu_rmap) - return; - - free_irq_cpu_rmap(netdev->rx_cpu_rmap); - netdev->rx_cpu_rmap = NULL; -} - /** * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue * @vsi: the VSI to be forwarded to @@ -597,7 +578,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) { struct net_device *netdev; struct ice_pf *pf; - int i; if (!vsi || vsi->type != ICE_VSI_PF) return 0; @@ -610,18 +590,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n", vsi->type, netdev->name, vsi->num_q_vectors); - netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors); - if (unlikely(!netdev->rx_cpu_rmap)) - return -EINVAL; - - ice_for_each_q_vector(vsi, i) - if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, - vsi->q_vectors[i]->irq.virq)) { - ice_free_cpu_rx_rmap(vsi); - return -EINVAL; - } - - return 0; + return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h index 9669ad9bf7b54..9706293128c3b 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.h +++ b/drivers/net/ethernet/intel/ice/ice_arfs.h @@ -45,7 +45,6 @@ int ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, u16 rxq_idx, u32 flow_id); void ice_clear_arfs(struct ice_vsi *vsi); -void ice_free_cpu_rx_rmap(struct ice_vsi *vsi); void ice_init_arfs(struct ice_vsi *vsi); void ice_sync_arfs_fltrs(struct ice_pf *pf); int ice_set_cpu_rx_rmap(struct ice_vsi *vsi); @@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type); #else static inline void ice_clear_arfs(struct ice_vsi *vsi) { } -static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { } static inline void ice_init_arfs(struct ice_vsi *vsi) { } static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { } static inline void ice_remove_arfs(struct ice_pf *pf) { } diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index b2af8e3586f76..6db4ad8fc70b0 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -147,10 +147,6 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) q_vector->reg_idx = q_vector->irq.index; q_vector->vf_reg_idx = q_vector->irq.index; - /* only set affinity_mask if the CPU is online */ - if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); - /* This will not be called in the driver load path because the netdev * will not be created yet. All other cases with register the NAPI * handler here (i.e. resume, reset/rebuild, etc.) @@ -276,7 +272,8 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring) if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state)) return; - netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, + netif_set_xps_queue(ring->netdev, + &ring->q_vector->napi.config->affinity_mask, ring->q_index); } @@ -473,9 +470,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) */ if (vsi->type != ICE_VSI_VF) ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); - else - ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3, - false); /* Absolute queue number out of 2K needs to be passed */ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); @@ -801,13 +795,11 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) return 0; err_out: - while (v_idx--) - ice_free_q_vector(vsi, v_idx); - dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n", - vsi->num_q_vectors, vsi->vsi_num, err); - vsi->num_q_vectors = 0; - return err; + dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d", + vsi->num_q_vectors, vsi->vsi_num, v_idx); + vsi->num_q_vectors = v_idx; + return v_idx ? 0 : err; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 7a2a2e8da8fab..59df31c2c83f7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -186,7 +186,7 @@ static int ice_set_mac_type(struct ice_hw *hw) * ice_is_generic_mac - check if device's mac_type is generic * @hw: pointer to the hardware structure * - * Return: true if mac_type is generic (with SBQ support), false if not + * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise. */ bool ice_is_generic_mac(struct ice_hw *hw) { @@ -194,120 +194,6 @@ bool ice_is_generic_mac(struct ice_hw *hw) hw->mac_type == ICE_MAC_GENERIC_3K_E825); } -/** - * ice_is_e810 - * @hw: pointer to the hardware structure - * - * returns true if the device is E810 based, false if not. - */ -bool ice_is_e810(struct ice_hw *hw) -{ - return hw->mac_type == ICE_MAC_E810; -} - -/** - * ice_is_e810t - * @hw: pointer to the hardware structure - * - * returns true if the device is E810T based, false if not. - */ -bool ice_is_e810t(struct ice_hw *hw) -{ - switch (hw->device_id) { - case ICE_DEV_ID_E810C_SFP: - switch (hw->subsystem_device_id) { - case ICE_SUBDEV_ID_E810T: - case ICE_SUBDEV_ID_E810T2: - case ICE_SUBDEV_ID_E810T3: - case ICE_SUBDEV_ID_E810T4: - case ICE_SUBDEV_ID_E810T6: - case ICE_SUBDEV_ID_E810T7: - return true; - } - break; - case ICE_DEV_ID_E810C_QSFP: - switch (hw->subsystem_device_id) { - case ICE_SUBDEV_ID_E810T2: - case ICE_SUBDEV_ID_E810T3: - case ICE_SUBDEV_ID_E810T5: - return true; - } - break; - default: - break; - } - - return false; -} - -/** - * ice_is_e822 - Check if a device is E822 family device - * @hw: pointer to the hardware structure - * - * Return: true if the device is E822 based, false if not. - */ -bool ice_is_e822(struct ice_hw *hw) -{ - switch (hw->device_id) { - case ICE_DEV_ID_E822C_BACKPLANE: - case ICE_DEV_ID_E822C_QSFP: - case ICE_DEV_ID_E822C_SFP: - case ICE_DEV_ID_E822C_10G_BASE_T: - case ICE_DEV_ID_E822C_SGMII: - case ICE_DEV_ID_E822L_BACKPLANE: - case ICE_DEV_ID_E822L_SFP: - case ICE_DEV_ID_E822L_10G_BASE_T: - case ICE_DEV_ID_E822L_SGMII: - return true; - default: - return false; - } -} - -/** - * ice_is_e823 - * @hw: pointer to the hardware structure - * - * returns true if the device is E823-L or E823-C based, false if not. - */ -bool ice_is_e823(struct ice_hw *hw) -{ - switch (hw->device_id) { - case ICE_DEV_ID_E823L_BACKPLANE: - case ICE_DEV_ID_E823L_SFP: - case ICE_DEV_ID_E823L_10G_BASE_T: - case ICE_DEV_ID_E823L_1GBE: - case ICE_DEV_ID_E823L_QSFP: - case ICE_DEV_ID_E823C_BACKPLANE: - case ICE_DEV_ID_E823C_QSFP: - case ICE_DEV_ID_E823C_SFP: - case ICE_DEV_ID_E823C_10G_BASE_T: - case ICE_DEV_ID_E823C_SGMII: - return true; - default: - return false; - } -} - -/** - * ice_is_e825c - Check if a device is E825C family device - * @hw: pointer to the hardware structure - * - * Return: true if the device is E825-C based, false if not. - */ -bool ice_is_e825c(struct ice_hw *hw) -{ - switch (hw->device_id) { - case ICE_DEV_ID_E825C_BACKPLANE: - case ICE_DEV_ID_E825C_QSFP: - case ICE_DEV_ID_E825C_SFP: - case ICE_DEV_ID_E825C_SGMII: - return true; - default: - return false; - } -} - /** * ice_is_pf_c827 - check if pf contains c827 phy * @hw: pointer to the hw struct @@ -2271,7 +2157,8 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, caps->nvm_unified_update); break; case ICE_AQC_CAPS_RDMA: - caps->rdma = (number == 1); + if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA)) + caps->rdma = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); break; case ICE_AQC_CAPS_MAX_MTU: @@ -2408,7 +2295,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); - if (!ice_is_e825c(hw)) { + if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) { info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); } else { @@ -5764,6 +5651,96 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } +/** + * ice_get_pca9575_handle - find and return the PCA9575 controller + * @hw: pointer to the hw struct + * @pca9575_handle: GPIO controller's handle + * + * Find and return the GPIO controller's handle in the netlist. + * When found - the value will be cached in the hw structure and following calls + * will return cached value. + * + * Return: 0 on success, -ENXIO when there's no PCA9575 present. + */ +int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) +{ + struct ice_aqc_get_link_topo *cmd; + struct ice_aq_desc desc; + int err; + u8 idx; + + /* If handle was read previously return cached value */ + if (hw->io_expander_handle) { + *pca9575_handle = hw->io_expander_handle; + return 0; + } + +#define SW_PCA9575_SFP_TOPO_IDX 2 +#define SW_PCA9575_QSFP_TOPO_IDX 1 + + /* Check if the SW IO expander controlling SMA exists in the netlist. */ + if (hw->device_id == ICE_DEV_ID_E810C_SFP) + idx = SW_PCA9575_SFP_TOPO_IDX; + else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) + idx = SW_PCA9575_QSFP_TOPO_IDX; + else + return -ENXIO; + + /* If handle was not detected read it from the netlist */ + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + cmd = &desc.params.get_link_topo; + cmd->addr.topo_params.node_type_ctx = + ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL; + cmd->addr.topo_params.index = idx; + + err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (err) + return -ENXIO; + + /* Verify if we found the right IO expander type */ + if (desc.params.get_link_topo.node_part_num != + ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) + return -ENXIO; + + /* If present save the handle and return it */ + hw->io_expander_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + *pca9575_handle = hw->io_expander_handle; + + return 0; +} + +/** + * ice_read_pca9575_reg - read the register from the PCA9575 controller + * @hw: pointer to the hw struct + * @offset: GPIO controller register offset + * @data: pointer to data to be read from the GPIO controller + * + * Return: 0 on success, negative error code otherwise. + */ +int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data) +{ + struct ice_aqc_link_topo_addr link_topo; + __le16 addr; + u16 handle; + int err; + + memset(&link_topo, 0, sizeof(link_topo)); + + err = ice_get_pca9575_handle(hw, &handle); + if (err) + return err; + + link_topo.handle = cpu_to_le16(handle); + link_topo.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, + ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); + + addr = cpu_to_le16((u16)offset); + + return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); +} + /** * ice_aq_set_gpio * @hw: pointer to the hw struct diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 15ba385437389..9b00aa0ddf10e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -131,7 +131,6 @@ int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); bool ice_is_generic_mac(struct ice_hw *hw); -bool ice_is_e810(struct ice_hw *hw); int ice_clear_pf_cfg(struct ice_hw *hw); int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, @@ -276,10 +275,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); -bool ice_is_e810t(struct ice_hw *hw); -bool ice_is_e822(struct ice_hw *hw); -bool ice_is_e823(struct ice_hw *hw); -bool ice_is_e825c(struct ice_hw *hw); int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); @@ -306,5 +301,7 @@ int ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, const u8 *data, struct ice_sq_cd *cd); +int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle); +int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 03988be03729b..69d5b1a28491d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -2345,14 +2345,14 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; - if (ice_is_e825c(hw)) + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); } else { ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; } - if (!ice_is_e825c(hw)) + if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index 8d806d8ad761a..bce3ad6ca2a6f 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -95,7 +95,7 @@ ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin, } if (ret) { NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to set pin freq:%u on pin:%u\n", + "err:%d %s failed to set pin freq:%u on pin:%u", ret, ice_aq_str(pf->hw.adminq.sq_last_status), freq, pin->idx); @@ -322,7 +322,7 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, } if (ret) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to enable %s pin:%u\n", + "err:%d %s failed to enable %s pin:%u", ret, ice_aq_str(hw->adminq.sq_last_status), pin_type_name[pin_type], pin->idx); @@ -367,7 +367,7 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin, } if (ret) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to disable %s pin:%u\n", + "err:%d %s failed to disable %s pin:%u", ret, ice_aq_str(hw->adminq.sq_last_status), pin_type_name[pin_type], pin->idx); @@ -479,7 +479,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, err: if (extack) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to update %s pin:%u\n", + "err:%d %s failed to update %s pin:%u", ret, ice_aq_str(pf->hw.adminq.sq_last_status), pin_type_name[pin_type], pin->idx); @@ -518,7 +518,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll, (u8)prio); if (ret) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to set pin prio:%u on pin:%u\n", + "err:%d %s failed to set pin prio:%u on pin:%u", ret, ice_aq_str(pf->hw.adminq.sq_last_status), prio, pin->idx); @@ -1004,7 +1004,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, mutex_unlock(&pf->dplls.lock); if (ret) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n", + "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u", ret, ice_aq_str(pf->hw.adminq.sq_last_status), phase_adjust, p->idx, d->dpll_idx); @@ -1362,7 +1362,7 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv, &p->freq); if (ret) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n", + "err:%d %s failed to set pin state:%u for pin:%u on parent:%u", ret, ice_aq_str(pf->hw.adminq.sq_last_status), state, p->idx, parent->idx); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index fb527434b58b1..ed21d7f55ac11 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -38,8 +38,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) if (ice_vsi_add_vlan_zero(uplink_vsi)) goto err_vlan_zero; - if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, - ICE_FLTR_RX)) + if (ice_set_dflt_vsi(uplink_vsi)) goto err_def_rx; if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, @@ -50,9 +49,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) if (vlan_ops->dis_rx_filtering(uplink_vsi)) goto err_vlan_filtering; - if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) - goto err_override_uplink; - if (ice_vsi_update_local_lb(uplink_vsi, true)) goto err_override_local_lb; @@ -64,8 +60,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) err_up: ice_vsi_update_local_lb(uplink_vsi, false); err_override_local_lb: - ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); -err_override_uplink: vlan_ops->ena_rx_filtering(uplink_vsi); err_vlan_filtering: ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, @@ -276,7 +270,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf) vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); ice_vsi_update_local_lb(uplink_vsi, false); - ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); vlan_ops->ena_rx_filtering(uplink_vsi); ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, ICE_FLTR_TX); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index f241493a6ac88..7c2dc347e4e57 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3788,8 +3788,7 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) */ static int ice_get_max_txq(struct ice_pf *pf) { - return min3(pf->num_lan_msix, (u16)num_online_cpus(), - (u16)pf->hw.func_caps.common_cap.num_txq); + return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq); } /** @@ -3798,8 +3797,7 @@ static int ice_get_max_txq(struct ice_pf *pf) */ static int ice_get_max_rxq(struct ice_pf *pf) { - return min3(pf->num_lan_msix, (u16)num_online_cpus(), - (u16)pf->hw.func_caps.common_cap.num_rxq); + return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq); } /** @@ -3817,8 +3815,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; - if (q_vector->rx.rx_ring && q_vector->tx.tx_ring) - combined++; + combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx); } return combined; @@ -4773,7 +4770,7 @@ static const struct ethtool_ops ice_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH, - .cap_rss_sym_xor_supported = true, + .supported_input_xfrm = RXH_XFRM_SYM_XOR, .rxfh_per_ctx_key = true, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index ee9862ddfe15e..1d118171de378 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1605,22 +1605,19 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf) */ int ice_fdir_create_dflt_rules(struct ice_pf *pf) { + const enum ice_fltr_ptype dflt_rules[] = { + ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP, + }; int err; /* Create perfect TCP and UDP rules in hardware. */ - err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP); - if (err) - return err; - - err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP); - if (err) - return err; + for (int i = 0; i < ARRAY_SIZE(dflt_rules); i++) { + err = ice_create_init_fdir_rule(pf, dflt_rules[i]); - err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP); - if (err) - return err; - - err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP); + if (err) + break; + } return err; } diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index b2148dbe49b28..6b26290452d48 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf) } /** - * ice_gnss_is_gps_present - Check if GPS HW is present + * ice_gnss_is_module_present - Check if GNSS HW is present * @hw: pointer to HW struct + * + * Return: true when GNSS is present, false otherwise. */ -bool ice_gnss_is_gps_present(struct ice_hw *hw) +bool ice_gnss_is_module_present(struct ice_hw *hw) { - if (!hw->func_caps.ts_func_info.src_tmr_owned) - return false; + int err; + u8 data; - if (!ice_is_gps_in_netlist(hw)) + if (!hw->func_caps.ts_func_info.src_tmr_owned || + !ice_is_gps_in_netlist(hw)) return false; -#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) - if (ice_is_e810t(hw)) { - int err; - u8 data; - - err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data); - if (err || !!(data & ICE_P0_GNSS_PRSNT_N)) - return false; - } else { - return false; - } -#else - if (!ice_is_e810t(hw)) + err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data); + if (err || !!(data & ICE_P0_GNSS_PRSNT_N)) return false; -#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ return true; } diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h index 75e567ad70594..15daf603ed7bf 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.h +++ b/drivers/net/ethernet/intel/ice/ice_gnss.h @@ -37,11 +37,11 @@ struct gnss_serial { #if IS_ENABLED(CONFIG_GNSS) void ice_gnss_init(struct ice_pf *pf); void ice_gnss_exit(struct ice_pf *pf); -bool ice_gnss_is_gps_present(struct ice_hw *hw); +bool ice_gnss_is_module_present(struct ice_hw *hw); #else static inline void ice_gnss_init(struct ice_pf *pf) { } static inline void ice_gnss_exit(struct ice_pf *pf) { } -static inline bool ice_gnss_is_gps_present(struct ice_hw *hw) +static inline bool ice_gnss_is_module_present(struct ice_hw *hw) { return false; } diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index dc88aea9f4731..aa4bfbcf85d28 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -541,10 +541,22 @@ #define PFPM_WUS_MAG_M BIT(1) #define PFPM_WUS_MNG_M BIT(3) #define PFPM_WUS_FW_RST_WK_M BIT(31) +#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020 +#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000 #define E830_PRTMAC_CL01_PS_QNT 0x001E32A0 #define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0) #define E830_PRTMAC_CL01_QNT_THR 0x001E3320 #define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0) +#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32)) +#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32)) +#define E830_GLPTM_ART_CTL 0x00088B50 +#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0) +#define E830_GLPTM_ART_TIME_H 0x00088B54 +#define E830_GLPTM_ART_TIME_L 0x00088B58 +#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4)) +#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4)) +#define E830_PFPTM_SEM 0x00088B00 +#define E830_PFPTM_SEM_BUSY_M BIT(0) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000 diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 145b27f2a4ce8..bab3e81cad5d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -228,61 +228,34 @@ void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) } EXPORT_SYMBOL_GPL(ice_get_qos_params); -/** - * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver - * @pf: board private structure to initialize - */ -static int ice_alloc_rdma_qvectors(struct ice_pf *pf) +int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry) { - if (ice_is_rdma_ena(pf)) { - int i; - - pf->msix_entries = kcalloc(pf->num_rdma_msix, - sizeof(*pf->msix_entries), - GFP_KERNEL); - if (!pf->msix_entries) - return -ENOMEM; + struct msi_map map = ice_alloc_irq(pf, true); - /* RDMA is the only user of pf->msix_entries array */ - pf->rdma_base_vector = 0; - - for (i = 0; i < pf->num_rdma_msix; i++) { - struct msix_entry *entry = &pf->msix_entries[i]; - struct msi_map map; + if (map.index < 0) + return -ENOMEM; - map = ice_alloc_irq(pf, false); - if (map.index < 0) - break; + entry->entry = map.index; + entry->vector = map.virq; - entry->entry = map.index; - entry->vector = map.virq; - } - } return 0; } +EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector); /** * ice_free_rdma_qvector - free vector resources reserved for RDMA driver * @pf: board private structure to initialize + * @entry: MSI-X entry to be removed */ -static void ice_free_rdma_qvector(struct ice_pf *pf) +void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry) { - int i; - - if (!pf->msix_entries) - return; - - for (i = 0; i < pf->num_rdma_msix; i++) { - struct msi_map map; + struct msi_map map; - map.index = pf->msix_entries[i].entry; - map.virq = pf->msix_entries[i].vector; - ice_free_irq(pf, map); - } - - kfree(pf->msix_entries); - pf->msix_entries = NULL; + map.index = entry->entry; + map.virq = entry->vector; + ice_free_irq(pf, map); } +EXPORT_SYMBOL_GPL(ice_free_rdma_qvector); /** * ice_adev_release - function to be mapped to AUX dev's release op @@ -382,12 +355,6 @@ int ice_init_rdma(struct ice_pf *pf) return -ENOMEM; } - /* Reserve vector resources */ - ret = ice_alloc_rdma_qvectors(pf); - if (ret < 0) { - dev_err(dev, "failed to reserve vectors for RDMA\n"); - goto err_reserve_rdma_qvector; - } pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; ret = ice_plug_aux_dev(pf); if (ret) @@ -395,8 +362,6 @@ int ice_init_rdma(struct ice_pf *pf) return 0; err_plug_aux_dev: - ice_free_rdma_qvector(pf); -err_reserve_rdma_qvector: pf->adev = NULL; xa_erase(&ice_aux_id, pf->aux_idx); return ret; @@ -412,6 +377,5 @@ void ice_deinit_rdma(struct ice_pf *pf) return; ice_unplug_aux_dev(pf); - ice_free_rdma_qvector(pf); xa_erase(&ice_aux_id, pf->aux_idx); } diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c index ad82ff7d19957..30801fd375f01 100644 --- a/drivers/net/ethernet/intel/ice/ice_irq.c +++ b/drivers/net/ethernet/intel/ice/ice_irq.c @@ -20,6 +20,19 @@ ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors, xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC); } +static int +ice_init_virt_irq_tracker(struct ice_pf *pf, u32 base, u32 num_entries) +{ + pf->virt_irq_tracker.bm = bitmap_zalloc(num_entries, GFP_KERNEL); + if (!pf->virt_irq_tracker.bm) + return -ENOMEM; + + pf->virt_irq_tracker.num_entries = num_entries; + pf->virt_irq_tracker.base = base; + + return 0; +} + /** * ice_deinit_irq_tracker - free xarray tracker * @pf: board private structure @@ -29,6 +42,11 @@ static void ice_deinit_irq_tracker(struct ice_pf *pf) xa_destroy(&pf->irq_tracker.entries); } +static void ice_deinit_virt_irq_tracker(struct ice_pf *pf) +{ + bitmap_free(pf->virt_irq_tracker.bm); +} + /** * ice_free_irq_res - free a block of resources * @pf: board private structure @@ -45,7 +63,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index) /** * ice_get_irq_res - get an interrupt resource * @pf: board private structure - * @dyn_only: force entry to be dynamically allocated + * @dyn_allowed: allow entry to be dynamically allocated * * Allocate new irq entry in the free slot of the tracker. Since xarray * is used, always allocate new entry at the lowest possible index. Set @@ -53,11 +71,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index) * * Returns allocated irq entry or NULL on failure. */ -static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) +static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, + bool dyn_allowed) { - struct xa_limit limit = { .max = pf->irq_tracker.num_entries, + struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1, .min = 0 }; - unsigned int num_static = pf->irq_tracker.num_static; + unsigned int num_static = pf->irq_tracker.num_static - 1; struct ice_irq_entry *entry; unsigned int index; int ret; @@ -66,9 +85,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) if (!entry) return NULL; - /* skip preallocated entries if the caller says so */ - if (dyn_only) - limit.min = num_static; + /* only already allocated if the caller says so */ + if (!dyn_allowed) + limit.max = num_static; ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit, GFP_KERNEL); @@ -78,161 +97,18 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) entry = NULL; } else { entry->index = index; - entry->dynamic = index >= num_static; + entry->dynamic = index > num_static; } return entry; } -/** - * ice_reduce_msix_usage - Reduce usage of MSI-X vectors - * @pf: board private structure - * @v_remain: number of remaining MSI-X vectors to be distributed - * - * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled. - * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of - * remaining vectors. - */ -static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) -{ - int v_rdma; - - if (!ice_is_rdma_ena(pf)) { - pf->num_lan_msix = v_remain; - return; - } - - /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */ - v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; - - if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) { - dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); - clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); - - pf->num_rdma_msix = 0; - pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; - } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || - (v_remain - v_rdma < v_rdma)) { - /* Support minimum RDMA and give remaining vectors to LAN MSIX - */ - pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; - pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; - } else { - /* Split remaining MSIX with RDMA after accounting for AEQ MSIX - */ - pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + - ICE_RDMA_NUM_AEQ_MSIX; - pf->num_lan_msix = v_remain - pf->num_rdma_msix; - } -} - -/** - * ice_ena_msix_range - Request a range of MSIX vectors from the OS - * @pf: board private structure - * - * Compute the number of MSIX vectors wanted and request from the OS. Adjust - * device usage if there are not enough vectors. Return the number of vectors - * reserved or negative on failure. - */ -static int ice_ena_msix_range(struct ice_pf *pf) +#define ICE_RDMA_AEQ_MSIX 1 +static int ice_get_default_msix_amount(struct ice_pf *pf) { - int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; - struct device *dev = ice_pf_to_dev(pf); - int err; - - hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; - num_cpus = num_online_cpus(); - - /* LAN miscellaneous handler */ - v_other = ICE_MIN_LAN_OICR_MSIX; - - /* Flow Director */ - if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) - v_other += ICE_FDIR_MSIX; - - /* switchdev */ - v_other += ICE_ESWITCH_MSIX; - - v_wanted = v_other; - - /* LAN traffic */ - pf->num_lan_msix = num_cpus; - v_wanted += pf->num_lan_msix; - - /* RDMA auxiliary driver */ - if (ice_is_rdma_ena(pf)) { - pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; - v_wanted += pf->num_rdma_msix; - } - - if (v_wanted > hw_num_msix) { - int v_remain; - - dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", - v_wanted, hw_num_msix); - - if (hw_num_msix < ICE_MIN_MSIX) { - err = -ERANGE; - goto exit_err; - } - - v_remain = hw_num_msix - v_other; - if (v_remain < ICE_MIN_LAN_TXRX_MSIX) { - v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; - v_remain = ICE_MIN_LAN_TXRX_MSIX; - } - - ice_reduce_msix_usage(pf, v_remain); - v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; - - dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", - pf->num_lan_msix); - if (ice_is_rdma_ena(pf)) - dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", - pf->num_rdma_msix); - } - - /* actually reserve the vectors */ - v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted, - PCI_IRQ_MSIX); - if (v_actual < 0) { - dev_err(dev, "unable to reserve MSI-X vectors\n"); - err = v_actual; - goto exit_err; - } - - if (v_actual < v_wanted) { - dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", - v_wanted, v_actual); - - if (v_actual < ICE_MIN_MSIX) { - /* error if we can't get minimum vectors */ - pci_free_irq_vectors(pf->pdev); - err = -ERANGE; - goto exit_err; - } else { - int v_remain = v_actual - v_other; - - if (v_remain < ICE_MIN_LAN_TXRX_MSIX) - v_remain = ICE_MIN_LAN_TXRX_MSIX; - - ice_reduce_msix_usage(pf, v_remain); - - dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", - pf->num_lan_msix); - - if (ice_is_rdma_ena(pf)) - dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", - pf->num_rdma_msix); - } - } - - return v_actual; - -exit_err: - pf->num_rdma_msix = 0; - pf->num_lan_msix = 0; - return err; + return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() + + (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) + + (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0); } /** @@ -243,6 +119,7 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf) { pci_free_irq_vectors(pf->pdev); ice_deinit_irq_tracker(pf); + ice_deinit_virt_irq_tracker(pf); } /** @@ -252,27 +129,38 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf) int ice_init_interrupt_scheme(struct ice_pf *pf) { int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; - int vectors, max_vectors; + int vectors; - vectors = ice_ena_msix_range(pf); + /* load default PF MSI-X range */ + if (!pf->msix.min) + pf->msix.min = ICE_MIN_MSIX; - if (vectors < 0) - return -ENOMEM; + if (!pf->msix.max) + pf->msix.max = min(total_vectors, + ice_get_default_msix_amount(pf)); + + pf->msix.total = total_vectors; + pf->msix.rest = total_vectors - pf->msix.max; if (pci_msix_can_alloc_dyn(pf->pdev)) - max_vectors = total_vectors; + vectors = pf->msix.min; else - max_vectors = vectors; + vectors = pf->msix.max; + + vectors = pci_alloc_irq_vectors(pf->pdev, pf->msix.min, vectors, + PCI_IRQ_MSIX); + if (vectors < 0) + return vectors; - ice_init_irq_tracker(pf, max_vectors, vectors); + ice_init_irq_tracker(pf, pf->msix.max, vectors); - return 0; + return ice_init_virt_irq_tracker(pf, pf->msix.max, pf->msix.rest); } /** * ice_alloc_irq - Allocate new interrupt vector * @pf: board private structure - * @dyn_only: force dynamic allocation of the interrupt + * @dyn_allowed: allow dynamic allocation of the interrupt * * Allocate new interrupt vector for a given owner id. * return struct msi_map with interrupt details and track @@ -285,27 +173,22 @@ int ice_init_interrupt_scheme(struct ice_pf *pf) * interrupt will be allocated with pci_msix_alloc_irq_at. * * Some callers may only support dynamically allocated interrupts. - * This is indicated with dyn_only flag. + * This is indicated with dyn_allowed flag. * * On failure, return map with negative .index. The caller * is expected to check returned map index. * */ -struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only) +struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed) { - int sriov_base_vector = pf->sriov_base_vector; struct msi_map map = { .index = -ENOENT }; struct device *dev = ice_pf_to_dev(pf); struct ice_irq_entry *entry; - entry = ice_get_irq_res(pf, dyn_only); + entry = ice_get_irq_res(pf, dyn_allowed); if (!entry) return map; - /* fail if we're about to violate SRIOV vectors space */ - if (sriov_base_vector && entry->index >= sriov_base_vector) - goto exit_free_res; - if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) { map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL); if (map.index < 0) @@ -353,26 +236,40 @@ void ice_free_irq(struct ice_pf *pf, struct msi_map map) } /** - * ice_get_max_used_msix_vector - Get the max used interrupt vector - * @pf: board private structure + * ice_virt_get_irqs - get irqs for SR-IOV usacase + * @pf: pointer to PF structure + * @needed: number of irqs to get * - * Return index of maximum used interrupt vectors with respect to the - * beginning of the MSIX table. Take into account that some interrupts - * may have been dynamically allocated after MSIX was initially enabled. + * This returns the first MSI-X vector index in PF space that is used by this + * VF. This index is used when accessing PF relative registers such as + * GLINT_VECT2FUNC and GLINT_DYN_CTL. + * This will always be the OICR index in the AVF driver so any functionality + * using vf->first_vector_idx for queue configuration_id: id of VF which will + * use this irqs */ -int ice_get_max_used_msix_vector(struct ice_pf *pf) +int ice_virt_get_irqs(struct ice_pf *pf, u32 needed) { - unsigned long start, index, max_idx; - void *entry; + int res = bitmap_find_next_zero_area(pf->virt_irq_tracker.bm, + pf->virt_irq_tracker.num_entries, + 0, needed, 0); - /* Treat all preallocated interrupts as used */ - start = pf->irq_tracker.num_static; - max_idx = start - 1; + if (res >= pf->virt_irq_tracker.num_entries) + return -ENOENT; - xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) { - if (index > max_idx) - max_idx = index; - } + bitmap_set(pf->virt_irq_tracker.bm, res, needed); + + /* conversion from number in bitmap to global irq index */ + return res + pf->virt_irq_tracker.base; +} - return max_idx; +/** + * ice_virt_free_irqs - free irqs used by the VF + * @pf: pointer to PF structure + * @index: first index to be free + * @irqs: number of irqs to free + */ +void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs) +{ + bitmap_clear(pf->virt_irq_tracker.bm, index - pf->virt_irq_tracker.base, + irqs); } diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h index f35efc08575ef..b2f9dbafd57e9 100644 --- a/drivers/net/ethernet/intel/ice/ice_irq.h +++ b/drivers/net/ethernet/intel/ice/ice_irq.h @@ -15,11 +15,22 @@ struct ice_irq_tracker { u16 num_static; /* preallocated entries */ }; +struct ice_virt_irq_tracker { + unsigned long *bm; /* bitmap to track irq usage */ + u32 num_entries; + /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the + * number of MSIX vectors needed for all SR-IOV VFs from the number of + * MSIX vectors allowed on this PF. + */ + u32 base; +}; + int ice_init_interrupt_scheme(struct ice_pf *pf); void ice_clear_interrupt_scheme(struct ice_pf *pf); struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only); void ice_free_irq(struct ice_pf *pf, struct msi_map map); -int ice_get_max_used_msix_vector(struct ice_pf *pf); +int ice_virt_get_irqs(struct ice_pf *pf, u32 needed); +void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 1ccb572ce285d..22371011c2492 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -1000,6 +1000,28 @@ static void ice_lag_link(struct ice_lag *lag) netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n"); } +/** + * ice_lag_config_eswitch - configure eswitch to work with LAG + * @lag: lag info struct + * @netdev: active network interface device struct + * + * Updates all port representors in eswitch to use @netdev for Tx. + * + * Configures the netdev to keep dst metadata (also used in representor Tx). + * This is required for an uplink without switchdev mode configured. + */ +static void ice_lag_config_eswitch(struct ice_lag *lag, + struct net_device *netdev) +{ + struct ice_repr *repr; + unsigned long id; + + xa_for_each(&lag->pf->eswitch.reprs, id, repr) + repr->dst->u.port_info.lower_dev = netdev; + + netif_keep_dst(netdev); +} + /** * ice_lag_unlink - handle unlink event * @lag: LAG info struct @@ -1021,6 +1043,9 @@ static void ice_lag_unlink(struct ice_lag *lag) ice_lag_move_vf_nodes(lag, act_port, pri_port); lag->primary = false; lag->active_port = ICE_LAG_INVALID_PORT; + + /* Config primary's eswitch back to normal operation. */ + ice_lag_config_eswitch(lag, lag->netdev); } else { struct ice_lag *primary_lag; @@ -1419,6 +1444,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) ice_lag_move_vf_nodes(lag, prim_port, event_port); lag->active_port = event_port; + ice_lag_config_eswitch(lag, event_netdev); return; } @@ -1428,6 +1454,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) /* new active port */ ice_lag_move_vf_nodes(lag, lag->active_port, event_port); lag->active_port = event_port; + ice_lag_config_eswitch(lag, event_netdev); } else { /* port not set as currently active (e.g. new active port * has already claimed the nodes and filters diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 1479b45738af1..77ba26538b07b 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -229,7 +229,7 @@ struct ice_32b_rx_flex_desc_nic { __le16 status_error1; u8 flexi_flags2; u8 ts_low; - __le16 l2tag2_1st; + __le16 raw_csum; __le16 l2tag2_2nd; /* Qword 3 */ @@ -478,10 +478,15 @@ enum ice_tx_desc_len_fields { struct ice_tx_ctx_desc { __le32 tunneling_params; __le16 l2tag2; - __le16 rsvd; + __le16 gcs; __le64 qw1; }; +#define ICE_TX_GCS_DESC_START_M GENMASK(7, 0) +#define ICE_TX_GCS_DESC_OFFSET_M GENMASK(11, 8) +#define ICE_TX_GCS_DESC_TYPE_M GENMASK(14, 12) +#define ICE_TX_GCS_DESC_CSUM_PSH 1 + #define ICE_TXD_CTX_QW1_CMD_S 4 #define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 38a1c8372180b..0bcf9d127ac9e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -157,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) } } +static u16 ice_get_rxq_count(struct ice_pf *pf) +{ + return min(ice_get_avail_rxq_count(pf), num_online_cpus()); +} + +static u16 ice_get_txq_count(struct ice_pf *pf) +{ + return min(ice_get_avail_txq_count(pf), num_online_cpus()); +} + /** * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI * @vsi: the VSI being configured @@ -178,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) vsi->alloc_txq = vsi->req_txq; vsi->num_txq = vsi->req_txq; } else { - vsi->alloc_txq = min3(pf->num_lan_msix, - ice_get_avail_txq_count(pf), - (u16)num_online_cpus()); + vsi->alloc_txq = ice_get_txq_count(pf); } pf->num_lan_tx = vsi->alloc_txq; @@ -193,17 +201,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) vsi->alloc_rxq = vsi->req_rxq; vsi->num_rxq = vsi->req_rxq; } else { - vsi->alloc_rxq = min3(pf->num_lan_msix, - ice_get_avail_rxq_count(pf), - (u16)num_online_cpus()); + vsi->alloc_rxq = ice_get_rxq_count(pf); } } pf->num_lan_rx = vsi->alloc_rxq; - vsi->num_q_vectors = min_t(int, pf->num_lan_msix, - max_t(int, vsi->alloc_rxq, - vsi->alloc_txq)); + vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq); break; case ICE_VSI_SF: vsi->alloc_txq = 1; @@ -567,6 +571,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) return -ENOMEM; } + vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev); + switch (vsi->type) { case ICE_VSI_PF: case ICE_VSI_SF: @@ -827,7 +833,13 @@ bool ice_is_safe_mode(struct ice_pf *pf) */ bool ice_is_rdma_ena(struct ice_pf *pf) { - return test_bit(ICE_FLAG_RDMA_ENA, pf->flags); + union devlink_param_value value; + int err; + + err = devl_param_driverinit_value_get(priv_to_devlink(pf), + DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, + &value); + return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool; } /** @@ -1173,12 +1185,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) static void ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { - struct ice_pf *pf = vsi->back; u16 qcount, qmap; u8 offset = 0; int pow; - qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); + qcount = vsi->num_rxq; pow = order_base_2(qcount); qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); @@ -1420,6 +1431,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->dev = dev; ring->count = vsi->num_rx_desc; ring->cached_phctime = pf->ptp.cached_phc_time; + + if (ice_is_feature_supported(pf, ICE_F_GCS)) + ring->flags |= ICE_RX_FLAGS_RING_GCS; + WRITE_ONCE(vsi->rx_rings[i], ring); } @@ -1764,9 +1779,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi) * @prio: priority for the RXDID for this queue * @ena_ts: true to enable timestamp and false to disable timestamp */ -void -ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, - bool ena_ts) +void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, + bool ena_ts) { int regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); @@ -2582,7 +2596,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) return; vsi->irqs_ready = false; - ice_free_cpu_rx_rmap(vsi); ice_for_each_q_vector(vsi, i) { int irq_num; @@ -2595,12 +2608,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) vsi->q_vectors[i]->num_ring_rx)) continue; - /* clear the affinity notifier in the IRQ descriptor */ - if (!IS_ENABLED(CONFIG_RFS_ACCEL)) - irq_set_affinity_notifier(irq_num, NULL); - - /* clear the affinity_hint in the IRQ descriptor */ - irq_update_affinity_hint(irq_num, NULL); synchronize_irq(irq_num); devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); } @@ -2755,11 +2762,18 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi) void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) { struct net_device *netdev = vsi->netdev; - int q_idx; + int q_idx, v_idx; if (!netdev) return; + /* Clear the NAPI's interrupt number */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + + netif_napi_set_irq(&q_vector->napi, -1); + } + ice_for_each_txq(vsi, q_idx) netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL); @@ -3882,15 +3896,17 @@ void ice_init_feature_support(struct ice_pf *pf) ice_set_feature_support(pf, ICE_F_CGU); if (ice_is_clock_mux_in_netlist(&pf->hw)) ice_set_feature_support(pf, ICE_F_SMA_CTRL); - if (ice_gnss_is_gps_present(&pf->hw)) + if (ice_gnss_is_module_present(&pf->hw)) ice_set_feature_support(pf, ICE_F_GNSS); break; default: break; } - if (pf->hw.mac_type == ICE_MAC_E830) + if (pf->hw.mac_type == ICE_MAC_E830) { ice_set_feature_support(pf, ICE_F_MBX_LIMIT); + ice_set_feature_support(pf, ICE_F_GCS); + } } /** @@ -3936,24 +3952,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); } -/** - * ice_vsi_ctx_set_allow_override - allow destination override on VSI - * @ctx: pointer to VSI ctx structure - */ -void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) -{ - ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; -} - -/** - * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI - * @ctx: pointer to VSI ctx structure - */ -void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) -{ - ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; -} - /** * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit * @vsi: pointer to VSI structure diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index eabb35834a245..b4c9cb28a016e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -105,10 +105,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); - -void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); - -void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set); int ice_vsi_add_vlan_zero(struct ice_vsi *vsi); int ice_vsi_del_vlan_zero(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c3a0fb97c5ee4..049edeb601043 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2527,34 +2527,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) return 0; } -/** - * ice_irq_affinity_notify - Callback for affinity changes - * @notify: context as to what irq was changed - * @mask: the new affinity mask - * - * This is a callback function used by the irq_set_affinity_notifier function - * so that we may register to receive changes to the irq affinity masks. - */ -static void -ice_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct ice_q_vector *q_vector = - container_of(notify, struct ice_q_vector, affinity_notify); - - cpumask_copy(&q_vector->affinity_mask, mask); -} - -/** - * ice_irq_affinity_release - Callback for affinity notifier release - * @ref: internal core kernel usage - * - * This is a callback function used by the irq_set_affinity_notifier function - * to inform the current notification subscriber that they will no longer - * receive notifications. - */ -static void ice_irq_affinity_release(struct kref __always_unused *ref) {} - /** * ice_vsi_ena_irq - Enable IRQ for the given VSI * @vsi: the VSI being configured @@ -2618,19 +2590,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) err); goto free_q_irqs; } - - /* register for affinity change notifications */ - if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { - struct irq_affinity_notify *affinity_notify; - - affinity_notify = &q_vector->affinity_notify; - affinity_notify->notify = ice_irq_affinity_notify; - affinity_notify->release = ice_irq_affinity_release; - irq_set_affinity_notifier(irq_num, affinity_notify); - } - - /* assign the mask for this irq */ - irq_update_affinity_hint(irq_num, &q_vector->affinity_mask); } err = ice_set_cpu_rx_rmap(vsi); @@ -2646,9 +2605,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) free_q_irqs: while (vector--) { irq_num = vsi->q_vectors[vector]->irq.virq; - if (!IS_ENABLED(CONFIG_RFS_ACCEL)) - irq_set_affinity_notifier(irq_num, NULL); - irq_update_affinity_hint(irq_num, NULL); devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); } return err; @@ -3304,22 +3260,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - if (ice_pf_state_is_nominal(pf) && - pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) { - struct ice_ptp_tx *tx = &pf->ptp.port.tx; - unsigned long flags; - u8 idx; - - spin_lock_irqsave(&tx->lock, flags); - idx = find_next_bit_wrap(tx->in_use, tx->len, - tx->last_ll_ts_idx_read + 1); - if (idx != tx->len) - ice_ptp_req_tx_single_tstamp(tx, idx); - spin_unlock_irqrestore(&tx->lock, flags); - } else if (ice_ptp_pf_handles_tx_interrupt(pf)) { - set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); - ret = IRQ_WAKE_THREAD; - } + + ret = ice_ptp_ts_irq(pf); } if (oicr & PFINT_OICR_TSYN_EVNT_M) { @@ -3689,6 +3631,15 @@ void ice_set_netdev_features(struct net_device *netdev) */ netdev->hw_features |= NETIF_F_RXFCS; + /* Allow core to manage IRQs affinity */ + netif_set_affinity_auto(netdev); + + /* Mutual exclusivity for TSO and GCS is enforced by the set features + * ndo callback. + */ + if (ice_is_feature_supported(pf, ICE_F_GCS)) + netdev->hw_features |= NETIF_F_HW_CSUM; + netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE); } @@ -4066,8 +4017,7 @@ static void ice_set_pf_caps(struct ice_pf *pf) } clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); - if (func_caps->common_cap.ieee_1588 && - !(pf->hw.mac_type == ICE_MAC_E830)) + if (func_caps->common_cap.ieee_1588) set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); pf->max_pf_txqs = func_caps->common_cap.num_txq; @@ -5065,16 +5015,16 @@ static int ice_init_devlink(struct ice_pf *pf) return err; ice_devlink_init_regions(pf); - ice_health_init(pf); ice_devlink_register(pf); + ice_health_init(pf); return 0; } static void ice_deinit_devlink(struct ice_pf *pf) { - ice_devlink_unregister(pf); ice_health_deinit(pf); + ice_devlink_unregister(pf); ice_devlink_destroy_regions(pf); ice_devlink_unregister_params(pf); } @@ -5087,6 +5037,12 @@ static int ice_init(struct ice_pf *pf) if (err) return err; + if (pf->hw.mac_type == ICE_MAC_E830) { + err = pci_enable_ptm(pf->pdev, NULL); + if (err) + dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n"); + } + err = ice_alloc_vsis(pf); if (err) goto err_alloc_vsis; @@ -5186,11 +5142,12 @@ int ice_load(struct ice_pf *pf) ice_napi_add(vsi); + ice_init_features(pf); + err = ice_init_rdma(pf); if (err) goto err_init_rdma; - ice_init_features(pf); ice_service_task_restart(pf); clear_bit(ICE_DOWN, pf->state); @@ -5198,6 +5155,7 @@ int ice_load(struct ice_pf *pf) return 0; err_init_rdma: + ice_deinit_features(pf); ice_tc_indir_block_unregister(vsi); err_tc_indir_block_register: ice_unregister_netdev(vsi); @@ -5221,8 +5179,8 @@ void ice_unload(struct ice_pf *pf) devl_assert_locked(priv_to_devlink(pf)); - ice_deinit_features(pf); ice_deinit_rdma(pf); + ice_deinit_features(pf); ice_tc_indir_block_unregister(vsi); ice_unregister_netdev(vsi); ice_devlink_destroy_pf_port(pf); @@ -6597,6 +6555,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) if (changed & NETIF_F_LOOPBACK) ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); + /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS + * (NETIF_F_HW_CSUM) is not supported. + */ + if (ice_is_feature_supported(pf, ICE_F_GCS) && + ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) { + if (netdev->features & NETIF_F_HW_CSUM) + dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n"); + else + dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n"); + return -EIO; + } + return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index e26320ce52ca1..1fd1ae03eb909 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -298,8 +298,8 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) * @sts: Optional parameter for holding a pair of system timestamps from * the system clock. Will be ignored if NULL is given. */ -static u64 -ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) +u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, + struct ptp_system_timestamp *sts) { struct ice_hw *hw = &pf->hw; u32 hi, lo, lo2; @@ -310,6 +310,15 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) /* Read the system timestamp pre PHC read */ ptp_read_system_prets(sts); + if (hw->mac_type == ICE_MAC_E830) { + u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx)); + + /* Read the system timestamp post PHC read */ + ptp_read_system_postts(sts); + + return clk_time; + } + lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); /* Read the system timestamp post PHC read */ @@ -971,28 +980,6 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) tx->len = 0; } -/** - * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps - * @pf: Board private structure - * @tx: the Tx tracking structure to initialize - * @port: the port this structure tracks - * - * Initialize the Tx timestamp tracker for this port. ETH56G PHYs - * have independent memory blocks for all ports. - * - * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker - */ -static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx, - u8 port) -{ - tx->block = port; - tx->offset = 0; - tx->len = INDEX_PER_PORT_ETH56G; - tx->has_ready_bitmap = 1; - - return ice_ptp_alloc_tx_tracker(tx); -} - /** * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps * @pf: Board private structure @@ -1003,9 +990,11 @@ static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx, * the timestamp block is shared for all ports in the same quad. To avoid * ports using the same timestamp index, logically break the block of * registers into chunks based on the port number. + * + * Return: 0 on success, -ENOMEM when out of memory */ -static int -ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) +static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, + u8 port) { tx->block = ICE_GET_QUAD_NUM(port); tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; @@ -1016,24 +1005,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) } /** - * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps + * ice_ptp_init_tx - Initialize tracking for Tx timestamps * @pf: Board private structure * @tx: the Tx tracking structure to initialize + * @port: the port this structure tracks + * + * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X, + * each port has its own block of timestamps, independent of the other ports. * - * Initialize the Tx timestamp tracker for this PF. For E810 devices, each - * port has its own block of timestamps, independent of the other ports. + * Return: 0 on success, -ENOMEM when out of memory */ -static int -ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) +static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { - tx->block = pf->hw.port_info->lport; + tx->block = port; tx->offset = 0; - tx->len = INDEX_PER_PORT_E810; + tx->len = INDEX_PER_PORT; + /* The E810 PHY does not provide a timestamp ready bitmap. Instead, * verify new timestamps against cached copy of the last read * timestamp. */ - tx->has_ready_bitmap = 0; + tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810; return ice_ptp_alloc_tx_tracker(tx); } @@ -1318,20 +1310,21 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) struct ice_hw *hw = &pf->hw; int err; - if (ice_is_e810(hw)) - return 0; - mutex_lock(&ptp_port->ps_lock); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - err = ice_stop_phy_timer_eth56g(hw, port, true); + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + err = 0; break; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: kthread_cancel_delayed_work_sync(&ptp_port->ov_work); err = ice_stop_phy_timer_e82x(hw, port, true); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_stop_phy_timer_eth56g(hw, port, true); + break; default: err = -ENODEV; } @@ -1361,19 +1354,17 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) unsigned long flags; int err; - if (ice_is_e810(hw)) - return 0; - if (!ptp_port->link_up) return ice_ptp_port_phy_stop(ptp_port); mutex_lock(&ptp_port->ps_lock); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - err = ice_start_phy_timer_eth56g(hw, port); + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + err = 0; break; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: /* Start the PHY timer in Vernier mode */ kthread_cancel_delayed_work_sync(&ptp_port->ov_work); @@ -1398,6 +1389,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_start_phy_timer_eth56g(hw, port); + break; default: err = -ENODEV; } @@ -1432,12 +1426,14 @@ void ice_ptp_link_change(struct ice_pf *pf, bool linkup) /* Skip HW writes if reset is in progress */ if (pf->hw.reset_ongoing) return; - switch (ice_get_phy_model(hw)) { - case ICE_PHY_E810: - /* Do not reconfigure E810 PHY */ + + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + /* Do not reconfigure E810 or E830 PHY */ return; - case ICE_PHY_ETH56G: - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: ice_ptp_port_phy_restart(ptp_port); return; default: @@ -1465,46 +1461,45 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) ice_ptp_reset_ts_memory(hw); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: { - int port; + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + return 0; + case ICE_MAC_GENERIC: { + int quad; - for (port = 0; port < hw->ptp.num_lports; port++) { + for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); + quad++) { int err; - err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); + err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); if (err) { - dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", - port, err); + dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", + quad, err); return err; } } return 0; } - case ICE_PHY_E82X: { - int quad; + case ICE_MAC_GENERIC_3K_E825: { + int port; - for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); - quad++) { + for (port = 0; port < hw->ptp.num_lports; port++) { int err; - err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); + err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); if (err) { - dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", - quad, err); + dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", + port, err); return err; } } return 0; } - case ICE_PHY_E810: - return 0; - case ICE_PHY_UNSUP: + case ICE_MAC_UNKNOWN: default: - dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__, - ice_get_phy_model(hw)); return -EOPNOTSUPP; } } @@ -1740,7 +1735,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, /* 0. Reset mode & out_en in AUX_OUT */ wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); - if (ice_is_e825c(hw)) { + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) { int err; /* Enable/disable CGU 1PPS output for E825C */ @@ -1783,6 +1778,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, 8 + chan + (tmr_idx * 4)); wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); + ice_flush(hw); return 0; } @@ -1824,7 +1820,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0); if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 && - period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) { + period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) { dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n"); return -EOPNOTSUPP; } @@ -1843,9 +1839,10 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, div64_u64_rem(start, period, &phase); /* If we have only phase or start time is in the past, start the timer - * at the next multiple of period, maintaining phase. + * at the next multiple of period, maintaining phase at least 0.5 second + * from now, so we have time to write it to HW. */ - clk = ice_ptp_read_src_clk_reg(pf, NULL); + clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500; if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) start = div64_u64(clk + period - 1, period) * period + phase; @@ -2078,7 +2075,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) /* For Vernier mode on E82X, we need to recalibrate after new settime. * Start with marking timestamps as invalid. */ - if (ice_get_phy_model(hw) == ICE_PHY_E82X) { + if (hw->mac_type == ICE_MAC_GENERIC) { err = ice_ptp_clear_phy_offset_ready_e82x(hw); if (err) dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); @@ -2102,7 +2099,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) ice_ptp_enable_all_perout(pf); /* Recalibrate and re-enable timestamp blocks for E822/E823 */ - if (ice_get_phy_model(hw) == ICE_PHY_E82X) + if (hw->mac_type == ICE_MAC_GENERIC) ice_ptp_restart_all_phy(pf); exit: if (err) { @@ -2180,93 +2177,157 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) return 0; } +/** + * struct ice_crosststamp_cfg - Device cross timestamp configuration + * @lock_reg: The hardware semaphore lock to use + * @lock_busy: Bit in the semaphore lock indicating the lock is busy + * @ctl_reg: The hardware register to request cross timestamp + * @ctl_active: Bit in the control register to request cross timestamp + * @art_time_l: Lower 32-bits of ART system time + * @art_time_h: Upper 32-bits of ART system time + * @dev_time_l: Lower 32-bits of device time (per timer index) + * @dev_time_h: Upper 32-bits of device time (per timer index) + */ +struct ice_crosststamp_cfg { + /* HW semaphore lock register */ + u32 lock_reg; + u32 lock_busy; + + /* Capture control register */ + u32 ctl_reg; + u32 ctl_active; + + /* Time storage */ + u32 art_time_l; + u32 art_time_h; + u32 dev_time_l[2]; + u32 dev_time_h[2]; +}; + +static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = { + .lock_reg = PFHH_SEM, + .lock_busy = PFHH_SEM_BUSY_M, + .ctl_reg = GLHH_ART_CTL, + .ctl_active = GLHH_ART_CTL_ACTIVE_M, + .art_time_l = GLHH_ART_TIME_L, + .art_time_h = GLHH_ART_TIME_H, + .dev_time_l[0] = GLTSYN_HHTIME_L(0), + .dev_time_h[0] = GLTSYN_HHTIME_H(0), + .dev_time_l[1] = GLTSYN_HHTIME_L(1), + .dev_time_h[1] = GLTSYN_HHTIME_H(1), +}; + #ifdef CONFIG_ICE_HWTS +static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = { + .lock_reg = E830_PFPTM_SEM, + .lock_busy = E830_PFPTM_SEM_BUSY_M, + .ctl_reg = E830_GLPTM_ART_CTL, + .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M, + .art_time_l = E830_GLPTM_ART_TIME_L, + .art_time_h = E830_GLPTM_ART_TIME_H, + .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0), + .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0), + .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1), + .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1), +}; + +#endif /* CONFIG_ICE_HWTS */ +/** + * struct ice_crosststamp_ctx - Device cross timestamp context + * @snapshot: snapshot of system clocks for historic interpolation + * @pf: pointer to the PF private structure + * @cfg: pointer to hardware configuration for cross timestamp + */ +struct ice_crosststamp_ctx { + struct system_time_snapshot snapshot; + struct ice_pf *pf; + const struct ice_crosststamp_cfg *cfg; +}; + /** - * ice_ptp_get_syncdevicetime - Get the cross time stamp info + * ice_capture_crosststamp - Capture a device/system cross timestamp * @device: Current device time * @system: System counter value read synchronously with device time - * @ctx: Context provided by timekeeping code + * @__ctx: Context passed from ice_ptp_getcrosststamp * * Read device and system (ART) clock simultaneously and return the corrected * clock values in ns. + * + * Return: zero on success, or a negative error code on failure. */ -static int -ice_ptp_get_syncdevicetime(ktime_t *device, - struct system_counterval_t *system, - void *ctx) +static int ice_capture_crosststamp(ktime_t *device, + struct system_counterval_t *system, + void *__ctx) { - struct ice_pf *pf = (struct ice_pf *)ctx; - struct ice_hw *hw = &pf->hw; - u32 hh_lock, hh_art_ctl; - int i; + struct ice_crosststamp_ctx *ctx = __ctx; + const struct ice_crosststamp_cfg *cfg; + u32 lock, ctl, ts_lo, ts_hi, tmr_idx; + struct ice_pf *pf; + struct ice_hw *hw; + int err; + u64 ts; -#define MAX_HH_HW_LOCK_TRIES 5 -#define MAX_HH_CTL_LOCK_TRIES 100 + cfg = ctx->cfg; + pf = ctx->pf; + hw = &pf->hw; - for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { - /* Get the HW lock */ - hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); - if (hh_lock & PFHH_SEM_BUSY_M) { - usleep_range(10000, 15000); - continue; - } - break; - } - if (hh_lock & PFHH_SEM_BUSY_M) { - dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; + if (tmr_idx > 1) + return -EINVAL; + + /* Poll until we obtain the cross-timestamp hardware semaphore */ + err = rd32_poll_timeout(hw, cfg->lock_reg, lock, + !(lock & cfg->lock_busy), + 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC); + if (err) { + dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n"); return -EBUSY; } + /* Snapshot system time for historic interpolation */ + ktime_get_snapshot(&ctx->snapshot); + /* Program cmd to master timer */ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); /* Start the ART and device clock sync sequence */ - hh_art_ctl = rd32(hw, GLHH_ART_CTL); - hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; - wr32(hw, GLHH_ART_CTL, hh_art_ctl); - - for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { - /* Wait for sync to complete */ - hh_art_ctl = rd32(hw, GLHH_ART_CTL); - if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { - udelay(1); - continue; - } else { - u32 hh_ts_lo, hh_ts_hi, tmr_idx; - u64 hh_ts; - - tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; - /* Read ART time */ - hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); - hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); - hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; - system->cycles = hh_ts; - system->cs_id = CSID_X86_ART; - /* Read Device source clock time */ - hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); - hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); - hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; - *device = ns_to_ktime(hh_ts); - break; - } - } + ctl = rd32(hw, cfg->ctl_reg); + ctl |= cfg->ctl_active; + wr32(hw, cfg->ctl_reg, ctl); + /* Poll until hardware completes the capture */ + err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active), + 5, 20 * USEC_PER_MSEC); + if (err) + goto err_timeout; + + /* Read ART system time */ + ts_lo = rd32(hw, cfg->art_time_l); + ts_hi = rd32(hw, cfg->art_time_h); + ts = ((u64)ts_hi << 32) | ts_lo; + system->cycles = ts; + system->cs_id = CSID_X86_ART; + + /* Read Device source clock time */ + ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]); + ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]); + ts = ((u64)ts_hi << 32) | ts_lo; + *device = ns_to_ktime(ts); + +err_timeout: /* Clear the master timer */ ice_ptp_src_cmd(hw, ICE_PTP_NOP); /* Release HW lock */ - hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); - hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; - wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); - - if (i == MAX_HH_CTL_LOCK_TRIES) - return -ETIMEDOUT; + lock = rd32(hw, cfg->lock_reg); + lock &= ~cfg->lock_busy; + wr32(hw, cfg->lock_reg, lock); - return 0; + return err; } /** - * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp + * ice_ptp_getcrosststamp - Capture a device cross timestamp * @info: the driver's PTP info structure * @cts: The memory to fill the cross timestamp info * @@ -2274,22 +2335,36 @@ ice_ptp_get_syncdevicetime(ktime_t *device, * clock. Fill the cross timestamp information and report it back to the * caller. * - * This is only valid for E822 and E823 devices which have support for - * generating the cross timestamp via PCIe PTM. - * * In order to correctly correlate the ART timestamp back to the TSC time, the * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. + * + * Return: zero on success, or a negative error code on failure. */ -static int -ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, - struct system_device_crosststamp *cts) +static int ice_ptp_getcrosststamp(struct ptp_clock_info *info, + struct system_device_crosststamp *cts) { struct ice_pf *pf = ptp_info_to_pf(info); + struct ice_crosststamp_ctx ctx = { + .pf = pf, + }; + + switch (pf->hw.mac_type) { + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: + ctx.cfg = &ice_crosststamp_cfg_e82x; + break; +#ifdef CONFIG_ICE_HWTS + case ICE_MAC_E830: + ctx.cfg = &ice_crosststamp_cfg_e830; + break; +#endif /* CONFIG_ICE_HWTS */ + default: + return -EOPNOTSUPP; + } - return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, - pf, NULL, cts); + return get_device_system_crosststamp(ice_capture_crosststamp, &ctx, + &ctx.snapshot, cts); } -#endif /* CONFIG_ICE_HWTS */ /** * ice_ptp_get_ts_config - ioctl interface to read the timestamping config @@ -2550,13 +2625,9 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries, */ static void ice_ptp_set_funcs_e82x(struct ice_pf *pf) { -#ifdef CONFIG_ICE_HWTS - if (boot_cpu_has(X86_FEATURE_ART) && - boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) - pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x; + pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; -#endif /* CONFIG_ICE_HWTS */ - if (ice_is_e825c(&pf->hw)) { + if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) { pf->ptp.ice_pin_desc = ice_pin_desc_e825c; pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c); } else { @@ -2622,6 +2693,28 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf) } } +/** + * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support + * @pf: Board private structure + * + * Assign functions to the PTP capabiltiies structure for E830 devices. + * Functions which operate across all device families should be set directly + * in ice_ptp_set_caps. Only add functions here which are distinct for E830 + * devices. + */ +static void ice_ptp_set_funcs_e830(struct ice_pf *pf) +{ +#ifdef CONFIG_ICE_HWTS + if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART)) + pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; + +#endif /* CONFIG_ICE_HWTS */ + /* Rest of the config is the same as base E810 */ + pf->ptp.ice_pin_desc = ice_pin_desc_e810; + pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); + ice_ptp_setup_pin_cfg(pf); +} + /** * ice_ptp_set_caps - Set PTP capabilities * @pf: Board private structure @@ -2644,10 +2737,20 @@ static void ice_ptp_set_caps(struct ice_pf *pf) info->enable = ice_ptp_gpio_enable; info->verify = ice_verify_pin; - if (ice_is_e810(&pf->hw)) + switch (pf->hw.mac_type) { + case ICE_MAC_E810: ice_ptp_set_funcs_e810(pf); - else + return; + case ICE_MAC_E830: + ice_ptp_set_funcs_e830(pf); + return; + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: ice_ptp_set_funcs_e82x(pf); + return; + default: + return; + } } /** @@ -2757,6 +2860,65 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) } } +/** + * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context + * @pf: Board private structure + * + * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom + * half of the interrupt and IRQ_HANDLED otherwise. + */ +irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + + switch (hw->mac_type) { + case ICE_MAC_E810: + /* E810 capable of low latency timestamping with interrupt can + * request a single timestamp in the top half and wait for + * a second LL TS interrupt from the FW when it's ready. + */ + if (hw->dev_caps.ts_dev_info.ts_ll_int_read) { + struct ice_ptp_tx *tx = &pf->ptp.port.tx; + u8 idx; + + if (!ice_pf_state_is_nominal(pf)) + return IRQ_HANDLED; + + spin_lock(&tx->lock); + idx = find_next_bit_wrap(tx->in_use, tx->len, + tx->last_ll_ts_idx_read + 1); + if (idx != tx->len) + ice_ptp_req_tx_single_tstamp(tx, idx); + spin_unlock(&tx->lock); + + return IRQ_HANDLED; + } + fallthrough; /* non-LL_TS E810 */ + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: + /* All other devices process timestamps in the bottom half due + * to sleeping or polling. + */ + if (!ice_ptp_pf_handles_tx_interrupt(pf)) + return IRQ_HANDLED; + + set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); + return IRQ_WAKE_THREAD; + case ICE_MAC_E830: + /* E830 can read timestamps in the top half using rd32() */ + if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { + /* Process outstanding Tx timestamps. If there + * is more work, re-arm the interrupt to trigger again. + */ + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); + } + return IRQ_HANDLED; + default: + return IRQ_HANDLED; + } +} + /** * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt * @pf: Board private structure @@ -2777,7 +2939,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) bool trigger_oicr = false; unsigned int i; - if (ice_is_e810(hw)) + if (!pf->ptp.port.tx.has_ready_bitmap) return; if (!ice_pf_src_tmr_owned(pf)) @@ -2912,14 +3074,12 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf) */ ice_ptp_flush_all_tx_tracker(pf); - if (!ice_is_e810(hw)) { - /* Enable quad interrupts */ - err = ice_ptp_cfg_phy_interrupt(pf, true, 1); - if (err) - return err; + /* Enable quad interrupts */ + err = ice_ptp_cfg_phy_interrupt(pf, true, 1); + if (err) + return err; - ice_ptp_restart_all_phy(pf); - } + ice_ptp_restart_all_phy(pf); /* Re-enable all periodic outputs and external timestamp events */ ice_ptp_enable_all_perout(pf); @@ -2971,8 +3131,9 @@ void ice_ptp_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) static bool ice_is_primary(struct ice_hw *hw) { - return ice_is_e825c(hw) && ice_is_dual(hw) ? - !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true; + return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) ? + !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : + true; } static int ice_ptp_setup_adapter(struct ice_pf *pf) @@ -2990,7 +3151,7 @@ static int ice_ptp_setup_pf(struct ice_pf *pf) struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); struct ice_ptp *ptp = &pf->ptp; - if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP) + if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN) return -ENODEV; INIT_LIST_HEAD(&ptp->port.list_node); @@ -3007,7 +3168,7 @@ static void ice_ptp_cleanup_pf(struct ice_pf *pf) { struct ice_ptp *ptp = &pf->ptp; - if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) { + if (pf->hw.mac_type != ICE_MAC_UNKNOWN) { mutex_lock(&pf->adapter->ports.lock); list_del(&ptp->port.list_node); mutex_unlock(&pf->adapter->ports.lock); @@ -3127,6 +3288,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) * ice_ptp_init_port - Initialize PTP port structure * @pf: Board private structure * @ptp_port: PTP port structure + * + * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc. */ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) { @@ -3134,16 +3297,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) mutex_init(&ptp_port->ps_lock); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx, - ptp_port->port_num); - case ICE_PHY_E810: - return ice_ptp_init_tx_e810(pf, &ptp_port->tx); - case ICE_PHY_E82X: + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + case ICE_MAC_GENERIC_3K_E825: + return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num); + case ICE_MAC_GENERIC: kthread_init_delayed_work(&ptp_port->ov_work, ice_ptp_wait_for_offsets); - return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, ptp_port->port_num); default: @@ -3162,8 +3323,8 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) */ static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) { - switch (ice_get_phy_model(&pf->hw)) { - case ICE_PHY_E82X: + switch (pf->hw.mac_type) { + case ICE_MAC_GENERIC: /* E822 based PHY has the clock owner process the interrupt * for all ports. */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index a1d0e988c084b..3b769a0cad00d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -128,8 +128,7 @@ struct ice_ptp_tx { /* Quad and port information for initializing timestamp blocks */ #define INDEX_PER_QUAD 64 #define INDEX_PER_PORT_E82X 16 -#define INDEX_PER_PORT_E810 64 -#define INDEX_PER_PORT_ETH56G 64 +#define INDEX_PER_PORT 64 /** * struct ice_ptp_port - data used to initialize an external port for PTP @@ -304,6 +303,9 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx); void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx); enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); +irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf); +u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, + struct ptp_system_timestamp *sts); u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, const struct ice_pkt_ctx *pkt_ctx); @@ -342,6 +344,17 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf) return true; } +static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) +{ + return IRQ_HANDLED; +} + +static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, + struct ptp_system_timestamp *sts) +{ + return 0; +} + static inline u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, const struct ice_pkt_ctx *pkt_ctx) diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index ac46d11833007..003cdfada3ca8 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -10,70 +10,25 @@ /* Constants defined for the PTP 1588 clock hardware. */ const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = { - /* ETH56G_PHY_REG_PTP */ - { - /* base_addr */ - { - 0x092000, - 0x126000, - 0x1BA000, - 0x24E000, - 0x2E2000, - }, - /* step */ - 0x98, + [ETH56G_PHY_REG_PTP] = { + .base_addr = 0x092000, + .step = 0x98, }, - /* ETH56G_PHY_MEM_PTP */ - { - /* base_addr */ - { - 0x093000, - 0x127000, - 0x1BB000, - 0x24F000, - 0x2E3000, - }, - /* step */ - 0x200, + [ETH56G_PHY_MEM_PTP] = { + .base_addr = 0x093000, + .step = 0x200, }, - /* ETH56G_PHY_REG_XPCS */ - { - /* base_addr */ - { - 0x000000, - 0x009400, - 0x128000, - 0x1BC000, - 0x250000, - }, - /* step */ - 0x21000, + [ETH56G_PHY_REG_XPCS] = { + .base_addr = 0x000000, + .step = 0x21000, }, - /* ETH56G_PHY_REG_MAC */ - { - /* base_addr */ - { - 0x085000, - 0x119000, - 0x1AD000, - 0x241000, - 0x2D5000, - }, - /* step */ - 0x1000, + [ETH56G_PHY_REG_MAC] = { + .base_addr = 0x085000, + .step = 0x1000, }, - /* ETH56G_PHY_REG_GPCS */ - { - /* base_addr */ - { - 0x084000, - 0x118000, - 0x1AC000, - 0x240000, - 0x2D4000, - }, - /* step */ - 0x400, + [ETH56G_PHY_REG_GPCS] = { + .base_addr = 0x084000, + .step = 0x400, }, }; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index ec91822e92806..89bb8461284ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -746,7 +746,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw) int err; /* Disable sticky lock detection so lock err reported is accurate */ - if (ice_is_e825c(hw)) + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw); else err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw); @@ -756,7 +756,7 @@ static int ice_init_cgu_e82x(struct ice_hw *hw) /* Configure the CGU PLL using the parameters from the function * capabilities. */ - if (ice_is_e825c(hw)) + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref, (enum ice_clk_src)ts_info->clk_src); else @@ -827,8 +827,9 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw, /* Certain hardware families share the same register values for the * port register and source timer register. */ - switch (ice_get_phy_model(hw)) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810; default: break; @@ -895,6 +896,17 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) ice_flush(hw); } +/** + * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay + * @hw: pointer to HW struct + * @delay: delay between PHC and PHY SYNC command execution in nanoseconds + */ +static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay) +{ + wr32(hw, GLTSYN_SYNC_DLAY, delay); + ice_flush(hw); +} + /* 56G PHY device functions * * The following functions operate on devices with the ETH 56G PHY. @@ -998,7 +1010,7 @@ static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane, /* Lanes 4..7 are in fact 0..3 on a second PHY */ lane %= hw->ptp.ports_per_phy; - *addr = eth56g_phy_res[res_type].base[0] + + *addr = eth56g_phy_res[res_type].base_addr + lane * eth56g_phy_res[res_type].step + offset; return 0; @@ -1228,7 +1240,7 @@ static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port, if (port >= hw->ptp.num_lports) return -EIO; - addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset; + addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset; return ice_write_phy_eth56g(hw, port, addr, val); } @@ -1253,7 +1265,7 @@ static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port, if (port >= hw->ptp.num_lports) return -EIO; - addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base[0] + offset; + addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset; return ice_read_phy_eth56g(hw, port, addr, val); } @@ -1576,9 +1588,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx, * lower 8 bits in the low register, and the upper 32 bits in the high * register. */ - *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | - FIELD_PREP(TS_PHY_LOW_M, lo); - + *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) | + FIELD_PREP(PHY_40B_LOW_M, lo); return 0; } @@ -2639,18 +2650,17 @@ static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable) } /** - * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization + * ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization * @hw: pointer to HW struct * - * Perform PHC initialization steps specific to E82X devices. + * Perform E825-specific PTP hardware clock initialization steps. * - * Return: - * * %0 - success - * * %other - failed to initialize CGU + * Return: 0 on success, negative error code otherwise. */ -static int ice_ptp_init_phc_eth56g(struct ice_hw *hw) +static int ice_ptp_init_phc_e825(struct ice_hw *hw) { ice_sb_access_ena_eth56g(hw, true); + /* Initialize the Clock Generation Unit */ return ice_init_cgu_e82x(hw); } @@ -2729,10 +2739,7 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw) { struct ice_ptp_hw *ptp = &hw->ptp; struct ice_eth56g_params *params; - u32 phy_rev; - int err; - ptp->phy_model = ICE_PHY_ETH56G; params = &ptp->phy.eth56g; params->onestep_ena = false; params->peer_delay = 0; @@ -2742,9 +2749,6 @@ static void ice_ptp_init_phy_e825(struct ice_hw *hw) ptp->num_lports = params->num_phys * ptp->ports_per_phy; ice_sb_access_ena_eth56g(hw, true); - err = ice_read_phy_eth56g(hw, hw->pf_id, PHY_REG_REVISION, &phy_rev); - if (err || phy_rev != PHY_REVISION_ETH56G) - ptp->phy_model = ICE_PHY_UNSUP; } /* E822 family functions @@ -3219,7 +3223,8 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) * lower 8 bits in the low register, and the upper 32 bits in the high * register. */ - *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo); + *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) | + FIELD_PREP(PHY_40B_LOW_M, lo); return 0; } @@ -4792,7 +4797,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold) */ static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp) { - ptp->phy_model = ICE_PHY_E82X; ptp->num_lports = 8; ptp->ports_per_phy = 8; } @@ -4986,7 +4990,8 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) /* For E810 devices, the timestamp is reported with the lower 32 bits * in the low register, and the upper 8 bits in the high register. */ - *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M); + *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) | + FIELD_PREP(PHY_EXT_40B_LOW_M, lo); return 0; } @@ -5049,8 +5054,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw) u8 tmr_idx; int err; - /* Ensure synchronization delay is zero */ - wr32(hw, GLTSYN_SYNC_DLAY, 0); + ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY); tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), @@ -5315,68 +5319,6 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) * to access the extended GPIOs available. */ -/** - * ice_get_pca9575_handle - * @hw: pointer to the hw struct - * @pca9575_handle: GPIO controller's handle - * - * Find and return the GPIO controller's handle in the netlist. - * When found - the value will be cached in the hw structure and following calls - * will return cached value - */ -static int -ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) -{ - struct ice_aqc_get_link_topo *cmd; - struct ice_aq_desc desc; - int status; - u8 idx; - - /* If handle was read previously return cached value */ - if (hw->io_expander_handle) { - *pca9575_handle = hw->io_expander_handle; - return 0; - } - - /* If handle was not detected read it from the netlist */ - cmd = &desc.params.get_link_topo; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); - - /* Set node type to GPIO controller */ - cmd->addr.topo_params.node_type_ctx = - (ICE_AQC_LINK_TOPO_NODE_TYPE_M & - ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); - -#define SW_PCA9575_SFP_TOPO_IDX 2 -#define SW_PCA9575_QSFP_TOPO_IDX 1 - - /* Check if the SW IO expander controlling SMA exists in the netlist. */ - if (hw->device_id == ICE_DEV_ID_E810C_SFP) - idx = SW_PCA9575_SFP_TOPO_IDX; - else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) - idx = SW_PCA9575_QSFP_TOPO_IDX; - else - return -EOPNOTSUPP; - - cmd->addr.topo_params.index = idx; - - status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); - if (status) - return -EOPNOTSUPP; - - /* Verify if we found the right IO expander type */ - if (desc.params.get_link_topo.node_part_num != - ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) - return -EOPNOTSUPP; - - /* If present save the handle and return it */ - hw->io_expander_handle = - le16_to_cpu(desc.params.get_link_topo.addr.handle); - *pca9575_handle = hw->io_expander_handle; - - return 0; -} - /** * ice_read_sma_ctrl * @hw: pointer to the hw struct @@ -5441,37 +5383,6 @@ int ice_write_sma_ctrl(struct ice_hw *hw, u8 data) return status; } -/** - * ice_read_pca9575_reg - * @hw: pointer to the hw struct - * @offset: GPIO controller register offset - * @data: pointer to data to be read from the GPIO controller - * - * Read the register from the GPIO controller - */ -int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data) -{ - struct ice_aqc_link_topo_addr link_topo; - __le16 addr; - u16 handle; - int err; - - memset(&link_topo, 0, sizeof(link_topo)); - - err = ice_get_pca9575_handle(hw, &handle); - if (err) - return err; - - link_topo.handle = cpu_to_le16(handle); - link_topo.topo_params.node_type_ctx = - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, - ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); - - addr = cpu_to_le16((u16)offset); - - return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); -} - /** * ice_ptp_read_sdp_ac - read SDP available connections section from NVM * @hw: pointer to the HW struct @@ -5538,18 +5449,138 @@ int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries) */ static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp) { - ptp->phy_model = ICE_PHY_E810; ptp->num_lports = 8; ptp->ports_per_phy = 4; init_waitqueue_head(&ptp->phy.e810.atqbal_wq); } +/* E830 functions + * + * The following functions operate on the E830 series devices. + * + */ + +/** + * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization + * @hw: pointer to HW struct + * + * Perform E830-specific PTP hardware clock initialization steps. + */ +static void ice_ptp_init_phc_e830(const struct ice_hw *hw) +{ + ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY); +} + +/** + * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change + * @hw: pointer to HW struct + * @incval: The new 40bit increment value to prepare + * + * Prepare the PHY port for a new increment value by programming the PHC + * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is + * completed by FW automatically. + */ +static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw, + u64 incval) +{ + u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + + wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval)); + wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval)); +} + +/** + * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time + * @hw: Board private structure + * @time: Time to initialize the PHY port clock to + * + * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the + * initial clock time. The time will not actually be programmed until the + * driver issues an ICE_PTP_INIT_TIME command. + * + * The time value is the upper 32 bits of the PHY timer, usually in units of + * nominal nanoseconds. + */ +static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw, + u64 time) +{ + u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + + wr32(hw, GLTSYN_TIME_0(tmr_idx), 0); + wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time)); + wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time)); +} + +/** + * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command + * @hw: pointer to HW struct + * @cmd: Command to be sent to the port + * + * Prepare the external PHYs connected to this device for a timer sync + * command. + * + * Return: 0 on success, negative error code when PHY write failed + */ +static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); + + return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val); +} + +/** + * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY + * @hw: pointer to the HW struct + * @idx: the timestamp index to read + * @tstamp: on return, the 40bit timestamp value + * + * Read a 40bit timestamp value out of the timestamp block of the external PHY + * on the E830 device. + */ +static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx, + u64 *tstamp) +{ + u32 hi, lo; + + hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx)); + lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx)); + + /* For E830 devices, the timestamp is reported with the lower 32 bits + * in the low register, and the upper 8 bits in the high register. + */ + *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) | + FIELD_PREP(PHY_EXT_40B_LOW_M, lo); +} + +/** + * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @tstamp_ready: contents of the Tx memory status register + */ +static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port, + u64 *tstamp_ready) +{ + *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H); + *tstamp_ready <<= 32; + *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L); +} + +/** + * ice_ptp_init_phy_e830 - initialize PHY parameters + * @ptp: pointer to the PTP HW struct + */ +static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp) +{ + ptp->num_lports = 8; + ptp->ports_per_phy = 4; +} + /* Device agnostic functions * - * The following functions implement shared behavior common to both E822 and - * E810 devices, possibly calling a device specific implementation where - * necessary. + * The following functions implement shared behavior common to all devices, + * possibly calling a device specific implementation where necessary. */ /** @@ -5612,14 +5643,22 @@ void ice_ptp_init_hw(struct ice_hw *hw) { struct ice_ptp_hw *ptp = &hw->ptp; - if (ice_is_e822(hw) || ice_is_e823(hw)) - ice_ptp_init_phy_e82x(ptp); - else if (ice_is_e810(hw)) + switch (hw->mac_type) { + case ICE_MAC_E810: ice_ptp_init_phy_e810(ptp); - else if (ice_is_e825c(hw)) + break; + case ICE_MAC_E830: + ice_ptp_init_phy_e830(ptp); + break; + case ICE_MAC_GENERIC: + ice_ptp_init_phy_e82x(ptp); + break; + case ICE_MAC_GENERIC_3K_E825: ice_ptp_init_phy_e825(hw); - else - ptp->phy_model = ICE_PHY_UNSUP; + break; + default: + return; + } } /** @@ -5640,11 +5679,11 @@ void ice_ptp_init_hw(struct ice_hw *hw) static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) { - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_ptp_write_port_cmd_eth56g(hw, port, cmd); - case ICE_PHY_E82X: + switch (hw->mac_type) { + case ICE_MAC_GENERIC: return ice_ptp_write_port_cmd_e82x(hw, port, cmd); + case ICE_MAC_GENERIC_3K_E825: + return ice_ptp_write_port_cmd_eth56g(hw, port, cmd); default: return -EOPNOTSUPP; } @@ -5705,9 +5744,11 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) u32 port; /* PHY models which can program all ports simultaneously */ - switch (ice_get_phy_model(hw)) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_ptp_port_cmd_e810(hw, cmd); + case ICE_MAC_E830: + return ice_ptp_port_cmd_e830(hw, cmd); default: break; } @@ -5778,23 +5819,29 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Source timers */ + /* For E830 we don't need to use shadow registers, its automatic */ + if (hw->mac_type == ICE_MAC_E830) { + ice_ptp_write_direct_phc_time_e830(hw, time); + return 0; + } + wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time)); wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time)); wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0); /* PHY timers */ /* Fill Rx and Tx ports and send msg to PHY */ - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - err = ice_ptp_prep_phy_time_eth56g(hw, - (u32)(time & 0xFFFFFFFF)); - break; - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); break; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_ptp_prep_phy_time_eth56g(hw, + (u32)(time & 0xFFFFFFFF)); + break; default: err = -EOPNOTSUPP; } @@ -5826,20 +5873,26 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + /* For E830 we don't need to use shadow registers, its automatic */ + if (hw->mac_type == ICE_MAC_E830) { + ice_ptp_write_direct_incval_e830(hw, incval); + return 0; + } + /* Shadow Adjust */ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - err = ice_ptp_prep_phy_incval_eth56g(hw, incval); - break; - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: err = ice_ptp_prep_phy_incval_e810(hw, incval); break; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: err = ice_ptp_prep_phy_incval_e82x(hw, incval); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_ptp_prep_phy_incval_eth56g(hw, incval); + break; default: err = -EOPNOTSUPP; } @@ -5899,16 +5952,19 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - err = ice_ptp_prep_phy_adj_eth56g(hw, adj); - break; - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: err = ice_ptp_prep_phy_adj_e810(hw, adj); break; - case ICE_PHY_E82X: + case ICE_MAC_E830: + /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */ + return 0; + case ICE_MAC_GENERIC: err = ice_ptp_prep_phy_adj_e82x(hw, adj); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_ptp_prep_phy_adj_eth56g(hw, adj); + break; default: err = -EOPNOTSUPP; } @@ -5932,13 +5988,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) */ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) { - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp); - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); - case ICE_PHY_E82X: + case ICE_MAC_E830: + ice_read_phy_tstamp_e830(hw, idx, tstamp); + return 0; + case ICE_MAC_GENERIC: return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp); + case ICE_MAC_GENERIC_3K_E825: + return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp); default: return -EOPNOTSUPP; } @@ -5962,13 +6021,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) */ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_clear_ptp_tstamp_eth56g(hw, block, idx); - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_clear_phy_tstamp_e810(hw, block, idx); - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: return ice_clear_phy_tstamp_e82x(hw, block, idx); + case ICE_MAC_GENERIC_3K_E825: + return ice_clear_ptp_tstamp_eth56g(hw, block, idx); default: return -EOPNOTSUPP; } @@ -6025,14 +6084,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) */ void ice_ptp_reset_ts_memory(struct ice_hw *hw) { - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - ice_ptp_reset_ts_memory_eth56g(hw); - break; - case ICE_PHY_E82X: + switch (hw->mac_type) { + case ICE_MAC_GENERIC: ice_ptp_reset_ts_memory_e82x(hw); break; - case ICE_PHY_E810: + case ICE_MAC_GENERIC_3K_E825: + ice_ptp_reset_ts_memory_eth56g(hw); + break; + case ICE_MAC_E810: default: return; } @@ -6054,13 +6113,16 @@ int ice_ptp_init_phc(struct ice_hw *hw) /* Clear event err indications for auxiliary pins */ (void)rd32(hw, GLTSYN_STAT(src_idx)); - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_ptp_init_phc_eth56g(hw); - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_ptp_init_phc_e810(hw); - case ICE_PHY_E82X: + case ICE_MAC_E830: + ice_ptp_init_phc_e830(hw); + return 0; + case ICE_MAC_GENERIC: return ice_ptp_init_phc_e82x(hw); + case ICE_MAC_GENERIC_3K_E825: + return ice_ptp_init_phc_e825(hw); default: return -EOPNOTSUPP; } @@ -6079,17 +6141,19 @@ int ice_ptp_init_phc(struct ice_hw *hw) */ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) { - switch (ice_get_phy_model(hw)) { - case ICE_PHY_ETH56G: - return ice_get_phy_tx_tstamp_ready_eth56g(hw, block, - tstamp_ready); - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_get_phy_tx_tstamp_ready_e810(hw, block, tstamp_ready); - case ICE_PHY_E82X: + case ICE_MAC_E830: + ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready); + return 0; + case ICE_MAC_GENERIC: return ice_get_phy_tx_tstamp_ready_e82x(hw, block, tstamp_ready); - break; + case ICE_MAC_GENERIC_3K_E825: + return ice_get_phy_tx_tstamp_ready_eth56g(hw, block, + tstamp_ready); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 6779ce120515a..e5925ccc26132 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -65,14 +65,14 @@ enum ice_eth56g_link_spd { /** * struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters - * @base: base address for each PHY block + * @base_addr: base address for each PHY block * @step: step between PHY lanes * * Characteristic information for the various PHY register parameters in the * ETH56G devices */ struct ice_phy_reg_info_eth56g { - u32 base[NUM_ETH56G_PHY_RES]; + u32 base_addr; u32 step; }; @@ -324,6 +324,7 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; */ #define ICE_E810_PLL_FREQ 812500000 #define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL +#define ICE_E810_E830_SYNC_DELAY 0 /* Device agnostic functions */ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); @@ -395,7 +396,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold); /* E810 family functions */ int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data); int ice_write_sma_ctrl(struct ice_hw *hw, u8 data); -int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data); int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries); int ice_cgu_get_num_pins(struct ice_hw *hw, bool input); enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input); @@ -431,13 +431,14 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port); */ static inline u64 ice_get_base_incval(struct ice_hw *hw) { - switch (hw->ptp.phy_model) { - case ICE_PHY_ETH56G: - return ICE_ETH56G_NOMINAL_INCVAL; - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: return ICE_PTP_NOMINAL_INCVAL_E810; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: return ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); + case ICE_MAC_GENERIC_3K_E825: + return ICE_ETH56G_NOMINAL_INCVAL; default: return 0; } @@ -650,18 +651,25 @@ static inline bool ice_is_dual(struct ice_hw *hw) /* E810 timer command register */ #define E810_ETH_GLTSYN_CMD 0x03000344 +/* E830 timer command register */ +#define E830_ETH_GLTSYN_CMD 0x00088814 + +/* E810 PHC time register */ +#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx)) + /* Source timer incval macros */ #define INCVAL_HIGH_M 0xFF -/* Timestamp block macros */ +/* PHY 40b registers macros */ +#define PHY_EXT_40B_LOW_M GENMASK(31, 0) +#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32) +#define PHY_40B_LOW_M GENMASK(7, 0) +#define PHY_40B_HIGH_M GENMASK_ULL(39, 8) #define TS_VALID BIT(0) #define TS_LOW_M 0xFFFFFFFF #define TS_HIGH_M 0xFF #define TS_HIGH_S 32 -#define TS_PHY_LOW_M GENMASK(7, 0) -#define TS_PHY_HIGH_M GENMASK_ULL(39, 8) - #define BYTES_PER_IDX_ADDR_L_U 8 #define BYTES_PER_IDX_ADDR_L 4 @@ -772,36 +780,19 @@ static inline bool ice_is_dual(struct ice_hw *hw) #define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20) #define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21) -/* GPCS config register */ -#define PHY_GPCS_CONFIG_REG0 0x268 -#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24) -#define PHY_GPCS_BITSLIP 0x5C - #define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0) #define PHY_TS_INT_CONFIG_ENA_M BIT(6) -/* 1-step PTP config */ -#define PHY_PTP_1STEP_CONFIG 0x270 -#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4) -#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8) -#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port)) -#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0) -#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1) -#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31) - /* Macros to derive offsets for TimeStampLow and TimeStampHigh */ #define PHY_TSTAMP_L(x) (((x) * 8) + 0) #define PHY_TSTAMP_U(x) (((x) * 8) + 4) -#define PHY_REG_REVISION 0x85000 - #define PHY_REG_DESKEW_0 0x94 #define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0) #define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7) #define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3 #define PHY_REG_DESKEW_0_VALID GENMASK(10, 10) -#define PHY_REG_GPCS_BITSLIP 0x5C #define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset)) #define PHY_REVISION_ETH56G 0x10200 #define PHY_VENDOR_TXLANE_THRESH 0x2000C @@ -821,7 +812,21 @@ static inline bool ice_is_dual(struct ice_hw *hw) #define PHY_MAC_BLOCKTIME 0x50 #define PHY_MAC_MARKERTIME 0x54 #define PHY_MAC_TX_OFFSET 0x58 +#define PHY_GPCS_BITSLIP 0x5C #define PHY_PTP_INT_STATUS 0x7FD140 +/* ETH56G registers shared per quad */ +/* GPCS config register */ +#define PHY_GPCS_CONFIG_REG0 0x268 +#define PHY_GPCS_CONFIG_REG0_TX_THR_M GENMASK(27, 24) +/* 1-step PTP config */ +#define PHY_PTP_1STEP_CONFIG 0x270 +#define PHY_PTP_1STEP_T1S_UP64_M GENMASK(7, 4) +#define PHY_PTP_1STEP_T1S_DELTA_M GENMASK(11, 8) +#define PHY_PTP_1STEP_PEER_DELAY(_quad_lane) (0x274 + 4 * (_quad_lane)) +#define PHY_PTP_1STEP_PD_ADD_PD_M BIT(0) +#define PHY_PTP_1STEP_PD_DELAY_M GENMASK(30, 1) +#define PHY_PTP_1STEP_PD_DLY_V_M BIT(31) + #endif /* _ICE_PTP_HW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index b83f99c01d91b..f1648cf103b74 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf) hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { hash_del_rcu(&vf->entry); + ice_deinitialize_vf_entry(vf); ice_put_vf(vf); } } @@ -122,27 +123,6 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n"); } -/** - * ice_sriov_free_msix_res - Reset/free any used MSIX resources - * @pf: pointer to the PF structure - * - * Since no MSIX entries are taken from the pf->irq_tracker then just clear - * the pf->sriov_base_vector. - * - * Returns 0 on success, and -EINVAL on error. - */ -static int ice_sriov_free_msix_res(struct ice_pf *pf) -{ - if (!pf) - return -EINVAL; - - bitmap_free(pf->sriov_irq_bm); - pf->sriov_irq_size = 0; - pf->sriov_base_vector = 0; - - return 0; -} - /** * ice_free_vfs - Free all VFs * @pf: pointer to the PF structure @@ -177,6 +157,7 @@ void ice_free_vfs(struct ice_pf *pf) ice_eswitch_detach_vf(pf, vf); ice_dis_vf_qs(vf); + ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix); if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { /* disable VF qp mappings and set VF disable state */ @@ -193,16 +174,9 @@ void ice_free_vfs(struct ice_pf *pf) wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } - /* clear malicious info since the VF is getting released */ - if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) - list_del(&vf->mbx_info.list_entry); - mutex_unlock(&vf->cfg_lock); } - if (ice_sriov_free_msix_res(pf)) - dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); - vfs->num_qps_per = 0; ice_free_vf_entries(pf); @@ -371,40 +345,6 @@ void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx; } -/** - * ice_sriov_set_msix_res - Set any used MSIX resources - * @pf: pointer to PF structure - * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs - * - * This function allows SR-IOV resources to be taken from the end of the PF's - * allowed HW MSIX vectors so that the irq_tracker will not be affected. We - * just set the pf->sriov_base_vector and return success. - * - * If there are not enough resources available, return an error. This should - * always be caught by ice_set_per_vf_res(). - * - * Return 0 on success, and -EINVAL when there are not enough MSIX vectors - * in the PF's space available for SR-IOV. - */ -static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) -{ - u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; - int vectors_used = ice_get_max_used_msix_vector(pf); - int sriov_base_vector; - - sriov_base_vector = total_vectors - num_msix_needed; - - /* make sure we only grab irq_tracker entries from the list end and - * that we have enough available MSIX vectors - */ - if (sriov_base_vector < vectors_used) - return -EINVAL; - - pf->sriov_base_vector = sriov_base_vector; - - return 0; -} - /** * ice_set_per_vf_res - check if vectors and queues are available * @pf: pointer to the PF structure @@ -429,11 +369,9 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) */ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) { - int vectors_used = ice_get_max_used_msix_vector(pf); u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; int msix_avail_per_vf, msix_avail_for_sriov; struct device *dev = ice_pf_to_dev(pf); - int err; lockdep_assert_held(&pf->vfs.table_lock); @@ -441,8 +379,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) return -EINVAL; /* determine MSI-X resources per VF */ - msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - - vectors_used; + msix_avail_for_sriov = pf->virt_irq_tracker.num_entries; msix_avail_per_vf = msix_avail_for_sriov / num_vfs; if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { num_msix_per_vf = ICE_NUM_VF_MSIX_MED; @@ -481,13 +418,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) return -ENOSPC; } - err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); - if (err) { - dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n", - num_vfs, err); - return err; - } - /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); pf->vfs.num_msix_per = num_msix_per_vf; @@ -497,52 +427,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) return 0; } -/** - * ice_sriov_get_irqs - get irqs for SR-IOV usacase - * @pf: pointer to PF structure - * @needed: number of irqs to get - * - * This returns the first MSI-X vector index in PF space that is used by this - * VF. This index is used when accessing PF relative registers such as - * GLINT_VECT2FUNC and GLINT_DYN_CTL. - * This will always be the OICR index in the AVF driver so any functionality - * using vf->first_vector_idx for queue configuration_id: id of VF which will - * use this irqs - * - * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are - * allocated from the end of global irq index. First bit in sriov_irq_bm means - * last irq index etc. It simplifies extension of SRIOV vectors. - * They will be always located from sriov_base_vector to the last irq - * index. While increasing/decreasing sriov_base_vector can be moved. - */ -static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed) -{ - int res = bitmap_find_next_zero_area(pf->sriov_irq_bm, - pf->sriov_irq_size, 0, needed, 0); - /* conversion from number in bitmap to global irq index */ - int index = pf->sriov_irq_size - res - needed; - - if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector) - return -ENOENT; - - bitmap_set(pf->sriov_irq_bm, res, needed); - return index; -} - -/** - * ice_sriov_free_irqs - free irqs used by the VF - * @pf: pointer to PF structure - * @vf: pointer to VF structure - */ -static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf) -{ - /* Move back from first vector index to first index in bitmap */ - int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix; - - bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix); - vf->first_vector_idx = 0; -} - /** * ice_init_vf_vsi_res - initialize/setup VF VSI resources * @vf: VF to initialize/setup the VSI for @@ -556,7 +440,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) struct ice_vsi *vsi; int err; - vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); if (vf->first_vector_idx < 0) return -ENOMEM; @@ -856,16 +740,10 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) */ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) { - int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int ret; - pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL); - if (!pf->sriov_irq_bm) - return -ENOMEM; - pf->sriov_irq_size = total_vectors; - /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); @@ -918,7 +796,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) /* rearm interrupts here */ ice_irq_dynamic_ena(hw, NULL, NULL); clear_bit(ICE_OICR_INTR_DIS, pf->state); - bitmap_free(pf->sriov_irq_bm); return ret; } @@ -992,16 +869,7 @@ u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); - return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf); -} - -static int ice_sriov_move_base_vector(struct ice_pf *pf, int move) -{ - if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf)) - return -ENOMEM; - - pf->sriov_base_vector -= move; - return 0; + return pf->virt_irq_tracker.num_entries; } static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) @@ -1020,7 +888,8 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) continue; ice_dis_vf_mappings(tmp_vf); - ice_sriov_free_irqs(pf, tmp_vf); + ice_virt_free_irqs(pf, tmp_vf->first_vector_idx, + tmp_vf->num_msix); vf_ids[to_remap] = tmp_vf->vf_id; to_remap += 1; @@ -1032,7 +901,7 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) continue; tmp_vf->first_vector_idx = - ice_sriov_get_irqs(pf, tmp_vf->num_msix); + ice_virt_get_irqs(pf, tmp_vf->num_msix); /* there is no need to rebuild VSI as we are only changing the * vector indexes not amount of MSI-X or queues */ @@ -1105,20 +974,15 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) prev_msix = vf->num_msix; prev_queues = vf->num_vf_qs; - if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) { - ice_put_vf(vf); - return -ENOSPC; - } - ice_dis_vf_mappings(vf); - ice_sriov_free_irqs(pf, vf); + ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix); /* Remap all VFs beside the one is now configured */ ice_sriov_remap_vectors(pf, vf->vf_id); vf->num_msix = msix_vec_count; vf->num_vf_qs = queues; - vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); if (vf->first_vector_idx < 0) goto unroll; @@ -1147,7 +1011,8 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) vf->num_msix = prev_msix; vf->num_vf_qs = prev_queues; - vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + + vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); if (vf->first_vector_idx < 0) { ice_put_vf(vf); return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 9c9ea4c1b93b7..1e4f6f6ee449c 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1809,6 +1809,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first, static int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) { + const struct ice_tx_ring *tx_ring = off->tx_ring; u32 l4_len = 0, l3_len = 0, l2_len = 0; struct sk_buff *skb = first->skb; union { @@ -1958,6 +1959,30 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) l3_len = l4.hdr - ip.hdr; offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; + if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) && + !(first->tx_flags & ICE_TX_FLAGS_TSO) && + !skb_csum_is_sctp(skb)) { + /* Set GCS */ + u16 csum_start = (skb->csum_start - skb->mac_header) / 2; + u16 csum_offset = skb->csum_offset / 2; + u16 gcs_params; + + gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) | + FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) | + FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M, + ICE_TX_GCS_DESC_CSUM_PSH); + + /* Unlike legacy HW checksums, GCS requires a context + * descriptor. + */ + off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX; + off->cd_gcs_params = gcs_params; + /* Fill out CSO info in data descriptors */ + off->td_offset |= offset; + off->td_cmd |= cmd; + return 1; + } + /* Enable L4 checksum offloads */ switch (l4_proto) { case IPPROTO_TCP: @@ -2424,7 +2449,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) ICE_TXD_CTX_QW1_CMD_S); ice_tstamp(tx_ring, skb, first, &offload); - if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF) + if ((ice_is_switchdev_running(vsi->back) || + ice_lag_is_switchdev_running(vsi->back)) && + vsi->type != ICE_VSI_SF) ice_eswitch_set_target_vsi(skb, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { @@ -2439,7 +2466,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) /* setup context descriptor */ cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); - cdesc->rsvd = cpu_to_le16(0); + cdesc->gcs = cpu_to_le16(offload.cd_gcs_params); cdesc->qw1 = cpu_to_le64(offload.cd_qw1); } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 806bce701df34..a4b1e95146327 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -193,6 +193,7 @@ struct ice_tx_offload_params { u32 td_l2tag1; u32 cd_tunnel_params; u16 cd_l2tag2; + u16 cd_gcs_params; u8 header_len; }; @@ -366,6 +367,7 @@ struct ice_rx_ring { #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) #define ICE_RX_FLAGS_MULTIDEV BIT(3) +#define ICE_RX_FLAGS_RING_GCS BIT(4) u8 flags; /* CL5 - 5th cacheline starts here */ struct xdp_rxq_info xdp_rxq; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 2719f0e20933f..45cfaabc41cbe 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -80,6 +80,23 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring, libeth_rx_pt_set_hash(skb, hash, decoded); } +/** + * ice_rx_gcs - Set generic checksum in skb + * @skb: skb currently being received and modified + * @rx_desc: receive descriptor + */ +static void ice_rx_gcs(struct sk_buff *skb, + const union ice_32b_rx_flex_desc *rx_desc) +{ + const struct ice_32b_rx_flex_desc_nic *desc; + u16 csum; + + desc = (struct ice_32b_rx_flex_desc_nic *)rx_desc; + skb->ip_summed = CHECKSUM_COMPLETE; + csum = (__force u16)desc->raw_csum; + skb->csum = csum_unfold((__force __sum16)swab16(csum)); +} + /** * ice_rx_csum - Indicate in skb if checksum is good * @ring: the ring we care about @@ -107,6 +124,15 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); + if ((ring->flags & ICE_RX_FLAGS_RING_GCS) && + rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC && + (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP || + decoded.inner_prot == LIBETH_RX_PT_INNER_UDP || + decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) { + ice_rx_gcs(skb, rx_desc); + return; + } + /* check if HW has decoded the packet and checksum */ if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) return; diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 33a1a5934c0d5..0aab21113cc43 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -871,14 +871,6 @@ union ice_phy_params { struct ice_eth56g_params eth56g; }; -/* PHY model */ -enum ice_phy_model { - ICE_PHY_UNSUP = -1, - ICE_PHY_E810 = 1, - ICE_PHY_E82X, - ICE_PHY_ETH56G, -}; - /* Global Link Topology */ enum ice_global_link_topo { ICE_LINK_TOPO_UP_TO_2_LINKS, @@ -888,7 +880,6 @@ enum ice_global_link_topo { }; struct ice_ptp_hw { - enum ice_phy_model phy_model; union ice_phy_params phy; u8 num_lports; u8 ports_per_phy; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index c7c0c2f50c265..815ad0bfe8326 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -1036,6 +1036,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf) mutex_init(&vf->cfg_lock); } +void ice_deinitialize_vf_entry(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + list_del(&vf->mbx_info.list_entry); +} + /** * ice_dis_vf_qs - Disable the VF queues * @vf: pointer to the VF structure diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 4261fe1c2bcd7..799b2c1f11844 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -124,6 +124,9 @@ struct ice_vf { u8 spoofchk:1; u8 link_forced:1; u8 link_up:1; /* only valid if VF link is forced */ + + u32 ptp_caps; + unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h index 0c7e77c0a09fa..5392b04049862 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h @@ -24,6 +24,7 @@ #endif void ice_initialize_vf_entry(struct ice_vf *vf); +void ice_deinitialize_vf_entry(struct ice_vf *vf); void ice_dis_vf_qs(struct ice_vf *vf); int ice_check_vf_init(struct ice_vf *vf); enum virtchnl_status_code ice_err_to_virt_err(int err); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index ff4ad788d96ac..7c3006eb68dd0 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -498,6 +498,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS; + if (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) + vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_PTP; + vfres->num_vsis = 1; /* Tx and Rx queue are equal for VF */ vfres->num_queue_pairs = vsi->num_txq; @@ -562,7 +565,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) * * check for the valid queue ID */ -static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid) +static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid) { /* allocated Tx and Rx queues should be always equal for VF VSI */ return qid < vsi->alloc_txq; @@ -1862,15 +1865,33 @@ static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg) for (i = 0; i < qbw->num_queues; i++) { if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 && - qbw->cfg[i].shaper.peak > vf->max_tx_rate) + qbw->cfg[i].shaper.peak > vf->max_tx_rate) { dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n", qbw->cfg[i].queue_id, vf->vf_id, vf->max_tx_rate); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 && - qbw->cfg[i].shaper.committed < vf->min_tx_rate) + qbw->cfg[i].shaper.committed < vf->min_tx_rate) { dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n", qbw->cfg[i].queue_id, vf->vf_id, - vf->max_tx_rate); + vf->min_tx_rate); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (qbw->cfg[i].queue_id > vf->num_vf_qs) { + dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) { + dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } } for (i = 0; i < qbw->num_queues; i++) { @@ -1900,13 +1921,21 @@ static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg) */ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg) { + u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i; enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - u16 quanta_prof_id, quanta_size, start_qid, end_qid, i; struct virtchnl_quanta_cfg *qquanta = (struct virtchnl_quanta_cfg *)msg; struct ice_vsi *vsi; int ret; + start_qid = qquanta->queue_select.start_queue_id; + num_queues = qquanta->queue_select.num_queues; + + if (check_add_overflow(start_qid, num_queues, &end_qid)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto err; @@ -1918,8 +1947,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg) goto err; } - end_qid = qquanta->queue_select.start_queue_id + - qquanta->queue_select.num_queues; if (end_qid > ICE_MAX_RSS_QS_PER_VF || end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n", @@ -1948,7 +1975,6 @@ static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg) goto err; } - start_qid = qquanta->queue_select.start_queue_id; for (i = start_qid; i < end_qid; i++) vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id; @@ -1975,6 +2001,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) struct ice_vsi *vsi; u8 act_prt, pri_prt; int i = -1, q_idx; + bool ena_ts; lag = pf->lag; mutex_lock(&pf->lag_mutex); @@ -2104,9 +2131,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) rxdid = ICE_RXDID_LEGACY_1; } + ena_ts = ((vf->driver_caps & + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) && + (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) && + (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP)); + ice_write_qrxflxp_cntxt(&vsi->back->hw, - vsi->rxq_map[q_idx], - rxdid, 0x03, false); + vsi->rxq_map[q_idx], rxdid, + ICE_RXDID_PRIO, ena_ts); } } @@ -3031,8 +3063,8 @@ static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg) static int ice_vc_query_rxdid(struct ice_vf *vf) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct virtchnl_supported_rxdids rxdid = {}; struct ice_pf *pf = vf->pf; + u64 rxdid; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -3044,7 +3076,7 @@ static int ice_vc_query_rxdid(struct ice_vf *vf) goto err; } - rxdid.supported_rxdids = pf->supported_rxdids; + rxdid = pf->supported_rxdids; err: return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS, @@ -4092,6 +4124,59 @@ static int ice_vc_dis_vlan_insertion_v2_msg(struct ice_vf *vf, u8 *msg) v_ret, NULL, 0); } +static int ice_vc_get_ptp_cap(struct ice_vf *vf, + const struct virtchnl_ptp_caps *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM; + u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP | + VIRTCHNL_1588_PTP_CAP_READ_PHC; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + goto err; + + v_ret = VIRTCHNL_STATUS_SUCCESS; + + if (msg->caps & caps) + vf->ptp_caps = caps; + +err: + /* send the response back to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret, + (u8 *)&vf->ptp_caps, + sizeof(struct virtchnl_ptp_caps)); +} + +static int ice_vc_get_phc_time(struct ice_vf *vf) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM; + struct virtchnl_phc_time *phc_time = NULL; + struct ice_pf *pf = vf->pf; + u32 len = 0; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + goto err; + + v_ret = VIRTCHNL_STATUS_SUCCESS; + + phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL); + if (!phc_time) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + goto err; + } + + len = sizeof(*phc_time); + + phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL); + +err: + /* send the response back to the VF */ + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret, + (u8 *)phc_time, len); + kfree(phc_time); + return ret; +} + static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { .get_ver_msg = ice_vc_get_ver_msg, .get_vf_res_msg = ice_vc_get_vf_res_msg, @@ -4128,6 +4213,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { .get_qos_caps = ice_vc_get_qos_caps, .cfg_q_bw = ice_vc_cfg_q_bw, .cfg_q_quanta = ice_vc_cfg_q_quanta, + .get_ptp_cap = ice_vc_get_ptp_cap, + .get_phc_time = ice_vc_get_phc_time, /* If you add a new op here please make sure to add it to * ice_virtchnl_repr_ops as well. */ @@ -4264,6 +4351,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = { .get_qos_caps = ice_vc_get_qos_caps, .cfg_q_bw = ice_vc_cfg_q_bw, .cfg_q_quanta = ice_vc_cfg_q_quanta, + .get_ptp_cap = ice_vc_get_ptp_cap, + .get_phc_time = ice_vc_get_phc_time, }; /** @@ -4501,6 +4590,12 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, case VIRTCHNL_OP_CONFIG_QUANTA: err = ops->cfg_q_quanta(vf, msg); break; + case VIRTCHNL_OP_1588_PTP_GET_CAPS: + err = ops->get_ptp_cap(vf, (const void *)msg); + break; + case VIRTCHNL_OP_1588_PTP_GET_TIME: + err = ops->get_phc_time(vf); + break; case VIRTCHNL_OP_UNKNOWN: default: dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode, diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h index 0c629aef9baf1..222990f229d5e 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h @@ -26,6 +26,9 @@ #define ICE_MAX_MACADDR_PER_VF 18 #define ICE_FLEX_DESC_RXDID_MAX_NUM 64 +/* Priority to be compared against previous priority from the pipe */ +#define ICE_RXDID_PRIO 0x03 + /* VFs only get a single VSI. For ice hardware, the VF does not need to know * its VSI index. However, the virtchnl interface requires a VSI number, * mainly due to legacy hardware. @@ -72,6 +75,9 @@ struct ice_virtchnl_ops { int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg); int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg); int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg); + int (*get_ptp_cap)(struct ice_vf *vf, + const struct virtchnl_ptp_caps *msg); + int (*get_phc_time)(struct ice_vf *vf); }; #ifdef CONFIG_PCI_IOV diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index c105a82ee1364..a3d1579a619a5 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -84,6 +84,12 @@ static const u32 fdir_pf_allowlist_opcodes[] = { VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER, }; +/* VIRTCHNL_VF_CAP_PTP */ +static const u32 ptp_allowlist_opcodes[] = { + VIRTCHNL_OP_1588_PTP_GET_CAPS, + VIRTCHNL_OP_1588_PTP_GET_TIME, +}; + static const u32 tc_allowlist_opcodes[] = { VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW, VIRTCHNL_OP_CONFIG_QUANTA, @@ -110,6 +116,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = { ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes), ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes), ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_CAP_PTP, ptp_allowlist_opcodes), }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 14e3f0f89c78d..9be4bd717512d 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -832,21 +832,27 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto, struct virtchnl_fdir_fltr_conf *conf) { - u8 *pkt_buf, *msk_buf __free(kfree); + u8 *pkt_buf, *msk_buf __free(kfree) = NULL; struct ice_parser_result rslt; struct ice_pf *pf = vf->pf; + u16 pkt_len, udp_port = 0; struct ice_parser *psr; int status = -ENOMEM; struct ice_hw *hw; - u16 udp_port = 0; - pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); - msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL); + pkt_len = proto->raw.pkt_len; + + if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET) + return -EINVAL; + + pkt_buf = kzalloc(pkt_len, GFP_KERNEL); + msk_buf = kzalloc(pkt_len, GFP_KERNEL); + if (!pkt_buf || !msk_buf) goto err_mem_alloc; - memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len); - memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len); + memcpy(pkt_buf, proto->raw.spec, pkt_len); + memcpy(msk_buf, proto->raw.mask, pkt_len); hw = &pf->hw; @@ -862,7 +868,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf, if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN)) ice_parser_vxlan_tunnel_set(psr, udp_port, true); - status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt); + status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt); if (status) goto err_parser_destroy; @@ -876,7 +882,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf, } status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, - proto->raw.pkt_len, ICE_BLK_FD, + pkt_len, ICE_BLK_FD, conf->prof); if (status) goto err_parser_profile_init; @@ -885,7 +891,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf, ice_parser_profile_dump(hw, conf->prof); /* Store raw flow info into @conf */ - conf->pkt_len = proto->raw.pkt_len; + conf->pkt_len = pkt_len; conf->pkt_buf = pkt_buf; conf->parser_ena = true; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 8975d2971bc37..a3a4eaa17739a 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include +#include #include #include #include "ice.h" @@ -989,7 +990,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct ice_tx_desc *tx_desc; u32 i; - loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { + unrolled_count(PKTS_PER_BATCH) + for (i = 0; i < PKTS_PER_BATCH; i++) { dma_addr_t dma; dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index 45adeb513253a..8dc5d55e26c52 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -7,14 +7,6 @@ #define PKTS_PER_BATCH 8 -#ifdef __clang__ -#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for -#elif __GNUC__ >= 8 -#define loop_unrolled_for _Pragma("GCC unroll 8") for -#else -#define loop_unrolled_for for -#endif - struct ice_vsi; #ifdef CONFIG_XDP_SOCKETS diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig index 1addd663acad9..2c359a8551c7b 100644 --- a/drivers/net/ethernet/intel/idpf/Kconfig +++ b/drivers/net/ethernet/intel/idpf/Kconfig @@ -4,6 +4,7 @@ config IDPF tristate "Intel(R) Infrastructure Data Path Function Support" depends on PCI_MSI + depends on PTP_1588_CLOCK_OPTIONAL select DIMLIB select LIBETH help diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile index 2ce01a0b58981..83ac5e2963822 100644 --- a/drivers/net/ethernet/intel/idpf/Makefile +++ b/drivers/net/ethernet/intel/idpf/Makefile @@ -17,3 +17,6 @@ idpf-y := \ idpf_vf_dev.o idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o + +idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o +idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_virtchnl_ptp.o diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 66544faab710a..d7dbf7d9c7d31 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -189,6 +189,7 @@ struct idpf_vport_max_q { * @mb_intr_reg_init: Mailbox interrupt register initialization * @reset_reg_init: Reset register initialization * @trigger_reset: Trigger a reset to occur + * @ptp_reg_init: PTP register initialization */ struct idpf_reg_ops { void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq); @@ -197,6 +198,7 @@ struct idpf_reg_ops { void (*reset_reg_init)(struct idpf_adapter *adapter); void (*trigger_reset)(struct idpf_adapter *adapter, enum idpf_flags trig_cause); + void (*ptp_reg_init)(const struct idpf_adapter *adapter); }; /** @@ -244,9 +246,23 @@ struct idpf_port_stats { u64_stats_t tx_busy; u64_stats_t tx_drops; u64_stats_t tx_dma_map_errs; + u64_stats_t tx_hwtstamp_skipped; struct virtchnl2_vport_stats vport_stats; }; +/** + * struct idpf_tx_tstamp_stats - Tx timestamp statistics + * @tx_hwtstamp_lock: Lock to protect Tx tstamp stats + * @tx_hwtstamp_discarded: Number of Tx skbs discarded due to cached PHC time + * being too old to correctly extend timestamp + * @tx_hwtstamp_flushed: Number of Tx skbs flushed due to interface closed + */ +struct idpf_tx_tstamp_stats { + struct mutex tx_hwtstamp_lock; + u32 tx_hwtstamp_discarded; + u32 tx_hwtstamp_flushed; +}; + /** * struct idpf_vport - Handle for netdevices and queue resources * @num_txq: Number of allocated TX queues @@ -290,6 +306,10 @@ struct idpf_port_stats { * @port_stats: per port csum, header split, and other offload stats * @link_up: True if link is up * @sw_marker_wq: workqueue for marker packets + * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping + * @tstamp_config: The Tx tstamp config + * @tstamp_task: Tx timestamping task + * @tstamp_stats: Tx timestamping statistics */ struct idpf_vport { u16 num_txq; @@ -334,6 +354,11 @@ struct idpf_vport { bool link_up; wait_queue_head_t sw_marker_wq; + + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct kernel_hwtstamp_config tstamp_config; + struct work_struct tstamp_task; + struct idpf_tx_tstamp_stats tstamp_stats; }; /** @@ -478,6 +503,13 @@ struct idpf_vport_config { struct idpf_vc_xn_manager; +#define idpf_for_each_vport(adapter, iter) \ + for (struct idpf_vport **__##iter = &(adapter)->vports[0], \ + *iter = (adapter)->max_vports ? *__##iter : NULL; \ + iter; \ + iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \ + *__##iter : NULL) + /** * struct idpf_adapter - Device data struct generated on probe * @pdev: PCI device struct given on probe @@ -530,6 +562,7 @@ struct idpf_vc_xn_manager; * @vector_lock: Lock to protect vector distribution * @queue_lock: Lock to protect queue distribution * @vc_buf_lock: Lock to protect virtchnl buffer + * @ptp: Storage for PTP-related data */ struct idpf_adapter { struct pci_dev *pdev; @@ -587,6 +620,8 @@ struct idpf_adapter { struct mutex vector_lock; struct mutex queue_lock; struct mutex vc_buf_lock; + + struct idpf_ptp *ptp; }; /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h index e8e046ef2f0d7..9642494a67d88 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h @@ -123,9 +123,12 @@ struct idpf_ctlq_info { /** * enum idpf_mbx_opc - PF/VF mailbox commands * @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP + * @idpf_mbq_opc_send_msg_to_peer_drv: used by PF or VF to send a message to + * any peer driver */ enum idpf_mbx_opc { idpf_mbq_opc_send_msg_to_cp = 0x0801, + idpf_mbq_opc_send_msg_to_peer_drv = 0x0804, }; /* API supported for control queue management */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c index 41e4bd49402a8..3fae81f1f9889 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -4,6 +4,7 @@ #include "idpf.h" #include "idpf_lan_pf_regs.h" #include "idpf_virtchnl.h" +#include "idpf_ptp.h" #define IDPF_PF_ITR_IDX_SPACING 0x4 @@ -148,6 +149,18 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter, idpf_get_reg_addr(adapter, PFGEN_CTRL)); } +/** + * idpf_ptp_reg_init - Initialize required registers + * @adapter: Driver specific private structure + * + * Set the bits required for enabling shtime and cmd execution + */ +static void idpf_ptp_reg_init(const struct idpf_adapter *adapter) +{ + adapter->ptp->cmd.shtime_enable_mask = PF_GLTSYN_CMD_SYNC_SHTIME_EN_M; + adapter->ptp->cmd.exec_cmd_mask = PF_GLTSYN_CMD_SYNC_EXEC_CMD_M; +} + /** * idpf_reg_ops_init - Initialize register API function pointers * @adapter: Driver specific private structure @@ -159,6 +172,7 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter) adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init; adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init; adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset; + adapter->dev_ops.reg_ops.ptp_reg_init = idpf_ptp_reg_init; } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 59b1a1a099967..7a4793749bc51 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -2,6 +2,7 @@ /* Copyright (C) 2023 Intel Corporation */ #include "idpf.h" +#include "idpf_ptp.h" /** * idpf_get_rxnfc - command to get RX flow classification rules @@ -479,6 +480,9 @@ static const struct idpf_stats idpf_gstrings_port_stats[] = { IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast), IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast), IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast), + IDPF_PORT_STAT("tx-hwtstamp_skipped", port_stats.tx_hwtstamp_skipped), + IDPF_PORT_STAT("tx-hwtstamp_flushed", tstamp_stats.tx_hwtstamp_flushed), + IDPF_PORT_STAT("tx-hwtstamp_discarded", tstamp_stats.tx_hwtstamp_discarded), }; #define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats) @@ -780,6 +784,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) u64_stats_set(&pstats->tx_busy, 0); u64_stats_set(&pstats->tx_drops, 0); u64_stats_set(&pstats->tx_dma_map_errs, 0); + u64_stats_set(&pstats->tx_hwtstamp_skipped, 0); u64_stats_update_end(&pstats->stats_sync); for (i = 0; i < vport->num_rxq_grp; i++) { @@ -828,7 +833,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; for (j = 0; j < txq_grp->num_txq; j++) { - u64 linearize, qbusy, skb_drops, dma_map_errs; + u64 linearize, qbusy, skb_drops, dma_map_errs, tstamp; struct idpf_tx_queue *txq = txq_grp->txqs[j]; struct idpf_tx_queue_stats *stats; unsigned int start; @@ -844,6 +849,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) qbusy = u64_stats_read(&stats->q_busy); skb_drops = u64_stats_read(&stats->skb_drops); dma_map_errs = u64_stats_read(&stats->dma_map_errs); + tstamp = u64_stats_read(&stats->tstamp_skipped); } while (u64_stats_fetch_retry(&txq->stats_sync, start)); u64_stats_update_begin(&pstats->stats_sync); @@ -851,6 +857,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) u64_stats_add(&pstats->tx_busy, qbusy); u64_stats_add(&pstats->tx_drops, skb_drops); u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs); + u64_stats_add(&pstats->tx_hwtstamp_skipped, tstamp); u64_stats_update_end(&pstats->stats_sync); } } @@ -1312,6 +1319,71 @@ static int idpf_get_link_ksettings(struct net_device *netdev, return 0; } +/** + * idpf_get_timestamp_filters - Get the supported timestamping mode + * @vport: Virtual port structure + * @info: ethtool timestamping info structure + * + * Get the Tx/Rx timestamp filters. + */ +static void idpf_get_timestamp_filters(const struct idpf_vport *vport, + struct kernel_ethtool_ts_info *info) +{ + info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = BIT(HWTSTAMP_TX_OFF); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + + if (!vport->tx_tstamp_caps || + vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE) + return; + + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE; + + info->tx_types |= BIT(HWTSTAMP_TX_ON); +} + +/** + * idpf_get_ts_info - Get device PHC association + * @netdev: network interface device structure + * @info: ethtool timestamping info structure + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_get_ts_info(struct net_device *netdev, + struct kernel_ethtool_ts_info *info) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport *vport; + int err = 0; + + if (!mutex_trylock(&np->adapter->vport_ctrl_lock)) + return -EBUSY; + + vport = idpf_netdev_to_vport(netdev); + + if (!vport->adapter->ptp) { + err = -EOPNOTSUPP; + goto unlock; + } + + if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) && + vport->adapter->ptp->clock) { + info->phc_index = ptp_clock_index(vport->adapter->ptp->clock); + idpf_get_timestamp_filters(vport, info); + } else { + pci_dbg(vport->adapter->pdev, "PTP clock not detected\n"); + err = ethtool_op_get_ts_info(netdev, info); + } + +unlock: + mutex_unlock(&np->adapter->vport_ctrl_lock); + + return err; +} + static const struct ethtool_ops idpf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE, @@ -1336,6 +1408,7 @@ static const struct ethtool_ops idpf_ethtool_ops = { .get_ringparam = idpf_get_ringparam, .set_ringparam = idpf_set_ringparam, .get_link_ksettings = idpf_get_link_ksettings, + .get_ts_info = idpf_get_ts_info, }; /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h index 24edb8a6ec2e6..cc9aa2b6a14aa 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h @@ -53,6 +53,10 @@ #define PF_FW_ATQH_ATQH_M GENMASK(9, 0) #define PF_FW_ATQT (PF_FW_BASE + 0x24) +/* Timesync registers */ +#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M GENMASK(1, 0) +#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(2) + /* Interrupts */ #define PF_GLINT_BASE 0x08900000 #define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000)) diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h index 8c7f8ef8f1a15..7492d17132431 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h @@ -282,7 +282,18 @@ struct idpf_flex_tx_tso_ctx_qw { u8 flex; }; -struct idpf_flex_tx_ctx_desc { +union idpf_flex_tx_ctx_desc { + /* DTYPE = IDPF_TX_DESC_DTYPE_CTX (0x01) */ + struct { + __le64 qw0; +#define IDPF_TX_CTX_L2TAG2_M GENMASK_ULL(47, 32) + __le64 qw1; +#define IDPF_TX_CTX_DTYPE_M GENMASK_ULL(3, 0) +#define IDPF_TX_CTX_CMD_M GENMASK_ULL(15, 4) +#define IDPF_TX_CTX_TSYN_REG_M GENMASK_ULL(47, 30) +#define IDPF_TX_CTX_MSS_M GENMASK_ULL(50, 63) + } tsyn; + /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */ struct { struct idpf_flex_tx_tso_ctx_qw qw0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index a3d6b8f198a86..7e9a414dffa55 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -3,6 +3,7 @@ #include "idpf.h" #include "idpf_virtchnl.h" +#include "idpf_ptp.h" static const struct net_device_ops idpf_netdev_ops; @@ -814,6 +815,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) netdev->hw_features |= dflt_features | offloads; netdev->hw_enc_features |= dflt_features | offloads; idpf_set_ethtool_ops(netdev); + netif_set_affinity_auto(netdev); SET_NETDEV_DEV(netdev, &adapter->pdev->dev); /* carrier off on init to avoid Tx hangs */ @@ -927,15 +929,19 @@ static int idpf_stop(struct net_device *netdev) static void idpf_decfg_netdev(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; + u16 idx = vport->idx; kfree(vport->rx_ptype_lkup); vport->rx_ptype_lkup = NULL; - unregister_netdev(vport->netdev); - free_netdev(vport->netdev); + if (test_and_clear_bit(IDPF_VPORT_REG_NETDEV, + adapter->vport_config[idx]->flags)) { + unregister_netdev(vport->netdev); + free_netdev(vport->netdev); + } vport->netdev = NULL; - adapter->netdevs[vport->idx] = NULL; + adapter->netdevs[idx] = NULL; } /** @@ -1536,13 +1542,22 @@ void idpf_init_task(struct work_struct *work) } for (index = 0; index < adapter->max_vports; index++) { - if (adapter->netdevs[index] && - !test_bit(IDPF_VPORT_REG_NETDEV, - adapter->vport_config[index]->flags)) { - register_netdev(adapter->netdevs[index]); - set_bit(IDPF_VPORT_REG_NETDEV, - adapter->vport_config[index]->flags); + struct net_device *netdev = adapter->netdevs[index]; + struct idpf_vport_config *vport_config; + + vport_config = adapter->vport_config[index]; + + if (!netdev || + test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) + continue; + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "failed to register netdev for vport %d: %pe\n", + index, ERR_PTR(err)); + continue; } + set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags); } /* As all the required vports are created, clear the reset flag @@ -2344,6 +2359,60 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) mem->pa = 0; } +static int idpf_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (!vport->link_up) { + idpf_vport_ctrl_unlock(netdev); + return -EPERM; + } + + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) && + !idpf_ptp_is_vport_rx_tstamp_ena(vport)) { + idpf_vport_ctrl_unlock(netdev); + return -EOPNOTSUPP; + } + + err = idpf_ptp_set_timestamp_mode(vport, config); + + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +static int idpf_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct idpf_vport *vport; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (!vport->link_up) { + idpf_vport_ctrl_unlock(netdev); + return -EPERM; + } + + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) && + !idpf_ptp_is_vport_rx_tstamp_ena(vport)) { + idpf_vport_ctrl_unlock(netdev); + return 0; + } + + *config = vport->tstamp_config; + + idpf_vport_ctrl_unlock(netdev); + + return 0; +} + static const struct net_device_ops idpf_netdev_ops = { .ndo_open = idpf_open, .ndo_stop = idpf_stop, @@ -2356,4 +2425,6 @@ static const struct net_device_ops idpf_netdev_ops = { .ndo_get_stats64 = idpf_get_stats64, .ndo_set_features = idpf_set_features, .ndo_tx_timeout = idpf_tx_timeout, + .ndo_hwtstamp_get = idpf_hwtstamp_get, + .ndo_hwtstamp_set = idpf_hwtstamp_set, }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index b6c515d14cbf0..022645f4fa9c1 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -163,6 +163,10 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free; } + err = pci_enable_ptm(pdev, NULL); + if (err) + pci_dbg(pdev, "PCIe PTM is not supported by PCIe bus/controller\n"); + /* set up for high or low dma */ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (err) { @@ -194,9 +198,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_serv_wq_alloc; } - adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", - WQ_UNBOUND | WQ_MEM_RECLAIM, 0, - dev_driver_string(dev), + adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", WQ_UNBOUND | WQ_HIGHPRI, + 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->mbx_wq) { dev_err(dev, "Failed to allocate mailbox workqueue\n"); diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c new file mode 100644 index 0000000000000..85a9a25c44d6d --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c @@ -0,0 +1,1000 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation */ + +#include "idpf.h" +#include "idpf_ptp.h" + +/** + * idpf_ptp_get_access - Determine the access type of the PTP features + * @adapter: Driver specific private structure + * @direct: Capability that indicates the direct access + * @mailbox: Capability that indicates the mailbox access + * + * Return: the type of supported access for the PTP feature. + */ +static enum idpf_ptp_access +idpf_ptp_get_access(const struct idpf_adapter *adapter, u32 direct, u32 mailbox) +{ + if (adapter->ptp->caps & direct) + return IDPF_PTP_DIRECT; + else if (adapter->ptp->caps & mailbox) + return IDPF_PTP_MAILBOX; + else + return IDPF_PTP_NONE; +} + +/** + * idpf_ptp_get_features_access - Determine the access type of PTP features + * @adapter: Driver specific private structure + * + * Fulfill the adapter structure with type of the supported PTP features + * access. + */ +void idpf_ptp_get_features_access(const struct idpf_adapter *adapter) +{ + struct idpf_ptp *ptp = adapter->ptp; + u32 direct, mailbox; + + /* Get the device clock time */ + direct = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME; + mailbox = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB; + ptp->get_dev_clk_time_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Get the cross timestamp */ + direct = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME; + mailbox = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB; + ptp->get_cross_tstamp_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Set the device clock time */ + direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME; + mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME; + ptp->set_dev_clk_time_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Adjust the device clock time */ + direct = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK; + mailbox = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB; + ptp->adj_dev_clk_time_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Tx timestamping */ + direct = VIRTCHNL2_CAP_PTP_TX_TSTAMPS; + mailbox = VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB; + ptp->tx_tstamp_access = idpf_ptp_get_access(adapter, + direct, + mailbox); +} + +/** + * idpf_ptp_enable_shtime - Enable shadow time and execute a command + * @adapter: Driver specific private structure + */ +static void idpf_ptp_enable_shtime(struct idpf_adapter *adapter) +{ + u32 shtime_enable, exec_cmd; + + /* Get offsets */ + shtime_enable = adapter->ptp->cmd.shtime_enable_mask; + exec_cmd = adapter->ptp->cmd.exec_cmd_mask; + + /* Set the shtime en and the sync field */ + writel(shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync); + writel(exec_cmd | shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync); +} + +/** + * idpf_ptp_read_src_clk_reg_direct - Read directly the main timer value + * @adapter: Driver specific private structure + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored when NULL is given. + * + * Return: the device clock time on success, -errno otherwise. + */ +static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter, + struct ptp_system_timestamp *sts) +{ + struct idpf_ptp *ptp = adapter->ptp; + u32 hi, lo; + + /* Read the system timestamp pre PHC read */ + ptp_read_system_prets(sts); + + idpf_ptp_enable_shtime(adapter); + lo = readl(ptp->dev_clk_regs.dev_clk_ns_l); + + /* Read the system timestamp post PHC read */ + ptp_read_system_postts(sts); + + hi = readl(ptp->dev_clk_regs.dev_clk_ns_h); + + return ((u64)hi << 32) | lo; +} + +/** + * idpf_ptp_read_src_clk_reg_mailbox - Read the main timer value through mailbox + * @adapter: Driver specific private structure + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored when NULL is given. + * @src_clk: Returned main timer value in nanoseconds unit + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_read_src_clk_reg_mailbox(struct idpf_adapter *adapter, + struct ptp_system_timestamp *sts, + u64 *src_clk) +{ + struct idpf_ptp_dev_timers clk_time; + int err; + + /* Read the system timestamp pre PHC read */ + ptp_read_system_prets(sts); + + err = idpf_ptp_get_dev_clk_time(adapter, &clk_time); + if (err) + return err; + + /* Read the system timestamp post PHC read */ + ptp_read_system_postts(sts); + + *src_clk = clk_time.dev_clk_time_ns; + + return 0; +} + +/** + * idpf_ptp_read_src_clk_reg - Read the main timer value + * @adapter: Driver specific private structure + * @src_clk: Returned main timer value in nanoseconds unit + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored if NULL is given. + * + * Return: the device clock time on success, -errno otherwise. + */ +static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk, + struct ptp_system_timestamp *sts) +{ + switch (adapter->ptp->get_dev_clk_time_access) { + case IDPF_PTP_NONE: + return -EOPNOTSUPP; + case IDPF_PTP_MAILBOX: + return idpf_ptp_read_src_clk_reg_mailbox(adapter, sts, src_clk); + case IDPF_PTP_DIRECT: + *src_clk = idpf_ptp_read_src_clk_reg_direct(adapter, sts); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) || IS_ENABLED(CONFIG_X86) +/** + * idpf_ptp_get_sync_device_time_direct - Get the cross time stamp values + * directly + * @adapter: Driver specific private structure + * @dev_time: 64bit main timer value + * @sys_time: 64bit system time value + */ +static void idpf_ptp_get_sync_device_time_direct(struct idpf_adapter *adapter, + u64 *dev_time, u64 *sys_time) +{ + u32 dev_time_lo, dev_time_hi, sys_time_lo, sys_time_hi; + struct idpf_ptp *ptp = adapter->ptp; + + idpf_ptp_enable_shtime(adapter); + + dev_time_lo = readl(ptp->dev_clk_regs.dev_clk_ns_l); + dev_time_hi = readl(ptp->dev_clk_regs.dev_clk_ns_h); + + sys_time_lo = readl(ptp->dev_clk_regs.sys_time_ns_l); + sys_time_hi = readl(ptp->dev_clk_regs.sys_time_ns_h); + + *dev_time = ((u64)dev_time_hi << 32) | dev_time_lo; + *sys_time = ((u64)sys_time_hi << 32) | sys_time_lo; +} + +/** + * idpf_ptp_get_sync_device_time_mailbox - Get the cross time stamp values + * through mailbox + * @adapter: Driver specific private structure + * @dev_time: 64bit main timer value expressed in nanoseconds + * @sys_time: 64bit system time value expressed in nanoseconds + * + * Return: a pair of cross timestamp values on success, -errno otherwise. + */ +static int idpf_ptp_get_sync_device_time_mailbox(struct idpf_adapter *adapter, + u64 *dev_time, u64 *sys_time) +{ + struct idpf_ptp_dev_timers cross_time; + int err; + + err = idpf_ptp_get_cross_time(adapter, &cross_time); + if (err) + return err; + + *dev_time = cross_time.dev_clk_time_ns; + *sys_time = cross_time.sys_time_ns; + + return err; +} + +/** + * idpf_ptp_get_sync_device_time - Get the cross time stamp info + * @device: Current device time + * @system: System counter value read synchronously with device time + * @ctx: Context provided by timekeeping code + * + * Return: the device and the system clocks time read simultaneously on success, + * -errno otherwise. + */ +static int idpf_ptp_get_sync_device_time(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct idpf_adapter *adapter = ctx; + u64 ns_time_dev, ns_time_sys; + int err; + + switch (adapter->ptp->get_cross_tstamp_access) { + case IDPF_PTP_NONE: + return -EOPNOTSUPP; + case IDPF_PTP_MAILBOX: + err = idpf_ptp_get_sync_device_time_mailbox(adapter, + &ns_time_dev, + &ns_time_sys); + if (err) + return err; + break; + case IDPF_PTP_DIRECT: + idpf_ptp_get_sync_device_time_direct(adapter, &ns_time_dev, + &ns_time_sys); + break; + default: + return -EOPNOTSUPP; + } + + *device = ns_to_ktime(ns_time_dev); + +#if IS_ENABLED(CONFIG_X86) + system->cycles = ns_time_sys; + system->cs_id = CSID_X86_ART; +#endif /* CONFIG_X86 */ + + return 0; +} + +/** + * idpf_ptp_get_crosststamp - Capture a device cross timestamp + * @info: the driver's PTP info structure + * @cts: The memory to fill the cross timestamp info + * + * Capture a cross timestamp between the system time and the device PTP hardware + * clock. + * + * Return: cross timestamp value on success, -errno on failure. + */ +static int idpf_ptp_get_crosststamp(struct ptp_clock_info *info, + struct system_device_crosststamp *cts) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + + return get_device_system_crosststamp(idpf_ptp_get_sync_device_time, + adapter, NULL, cts); +} +#endif /* CONFIG_ARM_ARCH_TIMER || CONFIG_X86 */ + +/** + * idpf_ptp_gettimex64 - Get the time of the clock + * @info: the driver's PTP info structure + * @ts: timespec64 structure to hold the current time value + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored if NULL is given. + * + * Return: the device clock value in ns, after converting it into a timespec + * struct on success, -errno otherwise. + */ +static int idpf_ptp_gettimex64(struct ptp_clock_info *info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + u64 time_ns; + int err; + + err = idpf_ptp_read_src_clk_reg(adapter, &time_ns, sts); + if (err) + return -EACCES; + + *ts = ns_to_timespec64(time_ns); + + return 0; +} + +/** + * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx + * queue group. + * @grp: receive queue group in which Rx timestamp is enabled + * @split: Indicates whether the queue model is split or single queue + * @systime: Cached system time + */ +static void +idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split, + u64 systime) +{ + struct idpf_rx_queue *rxq; + u16 i; + + if (!split) { + for (i = 0; i < grp->singleq.num_rxq; i++) { + rxq = grp->singleq.rxqs[i]; + if (rxq) + WRITE_ONCE(rxq->cached_phc_time, systime); + } + } else { + for (i = 0; i < grp->splitq.num_rxq_sets; i++) { + rxq = &grp->splitq.rxq_sets[i]->rxq; + if (rxq) + WRITE_ONCE(rxq->cached_phc_time, systime); + } + } +} + +/** + * idpf_ptp_update_cached_phctime - Update the cached PHC time values + * @adapter: Driver specific private structure + * + * This function updates the system time values which are cached in the adapter + * structure and the Rx queues. + * + * This function must be called periodically to ensure that the cached value + * is never more than 2 seconds old. + * + * Return: 0 on success, negative otherwise. + */ +static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter) +{ + u64 systime; + int err; + + err = idpf_ptp_read_src_clk_reg(adapter, &systime, NULL); + if (err) + return -EACCES; + + /* Update the cached PHC time stored in the adapter structure. + * These values are used to extend Tx timestamp values to 64 bit + * expected by the stack. + */ + WRITE_ONCE(adapter->ptp->cached_phc_time, systime); + WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies); + + idpf_for_each_vport(adapter, vport) { + bool split; + + if (!vport || !vport->rxq_grps) + continue; + + split = idpf_is_queue_model_split(vport->rxq_model); + + for (u16 i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + + idpf_ptp_update_phctime_rxq_grp(grp, split, systime); + } + } + + return 0; +} + +/** + * idpf_ptp_settime64 - Set the time of the clock + * @info: the driver's PTP info structure + * @ts: timespec64 structure that holds the new time value + * + * Set the device clock to the user input value. The conversion from timespec + * to ns happens in the write function. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_settime64(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + enum idpf_ptp_access access; + int err; + u64 ns; + + access = adapter->ptp->set_dev_clk_time_access; + if (access != IDPF_PTP_MAILBOX) + return -EOPNOTSUPP; + + ns = timespec64_to_ns(ts); + + err = idpf_ptp_set_dev_clk_time(adapter, ns); + if (err) { + pci_err(adapter->pdev, "Failed to set the time, err: %pe\n", + ERR_PTR(err)); + return err; + } + + err = idpf_ptp_update_cached_phctime(adapter); + if (err) + pci_warn(adapter->pdev, + "Unable to immediately update cached PHC time\n"); + + return 0; +} + +/** + * idpf_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment + * @info: the driver's PTP info structure + * @delta: Offset in nanoseconds to adjust the time by + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) +{ + struct timespec64 now, then; + int err; + + err = idpf_ptp_gettimex64(info, &now, NULL); + if (err) + return err; + + then = ns_to_timespec64(delta); + now = timespec64_add(now, then); + + return idpf_ptp_settime64(info, &now); +} + +/** + * idpf_ptp_adjtime - Adjust the time of the clock by the indicated delta + * @info: the driver's PTP info structure + * @delta: Offset in nanoseconds to adjust the time by + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + enum idpf_ptp_access access; + int err; + + access = adapter->ptp->adj_dev_clk_time_access; + if (access != IDPF_PTP_MAILBOX) + return -EOPNOTSUPP; + + /* Hardware only supports atomic adjustments using signed 32-bit + * integers. For any adjustment outside this range, perform + * a non-atomic get->adjust->set flow. + */ + if (delta > S32_MAX || delta < S32_MIN) + return idpf_ptp_adjtime_nonatomic(info, delta); + + err = idpf_ptp_adj_dev_clk_time(adapter, delta); + if (err) { + pci_err(adapter->pdev, "Failed to adjust the clock with delta %lld err: %pe\n", + delta, ERR_PTR(err)); + return err; + } + + err = idpf_ptp_update_cached_phctime(adapter); + if (err) + pci_warn(adapter->pdev, + "Unable to immediately update cached PHC time\n"); + + return 0; +} + +/** + * idpf_ptp_adjfine - Adjust clock increment rate + * @info: the driver's PTP info structure + * @scaled_ppm: Parts per million with 16-bit fractional field + * + * Adjust the frequency of the clock by the indicated scaled ppm from the + * base frequency. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + enum idpf_ptp_access access; + u64 incval, diff; + int err; + + access = adapter->ptp->adj_dev_clk_time_access; + if (access != IDPF_PTP_MAILBOX) + return -EOPNOTSUPP; + + incval = adapter->ptp->base_incval; + + diff = adjust_by_scaled_ppm(incval, scaled_ppm); + err = idpf_ptp_adj_dev_clk_fine(adapter, diff); + if (err) + pci_err(adapter->pdev, "Failed to adjust clock increment rate for scaled ppm %ld %pe\n", + scaled_ppm, ERR_PTR(err)); + + return 0; +} + +/** + * idpf_ptp_verify_pin - Verify if pin supports requested pin function + * @info: the driver's PTP info structure + * @pin: Pin index + * @func: Assigned function + * @chan: Assigned channel + * + * Return: EOPNOTSUPP as not supported yet. + */ +static int idpf_ptp_verify_pin(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + return -EOPNOTSUPP; +} + +/** + * idpf_ptp_gpio_enable - Enable/disable ancillary features of PHC + * @info: the driver's PTP info structure + * @rq: The requested feature to change + * @on: Enable/disable flag + * + * Return: EOPNOTSUPP as not supported yet. + */ +static int idpf_ptp_gpio_enable(struct ptp_clock_info *info, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * idpf_ptp_tstamp_extend_32b_to_64b - Convert a 32b nanoseconds Tx or Rx + * timestamp value to 64b. + * @cached_phc_time: recently cached copy of PHC time + * @in_timestamp: Ingress/egress 32b nanoseconds timestamp value + * + * Hardware captures timestamps which contain only 32 bits of nominal + * nanoseconds, as opposed to the 64bit timestamps that the stack expects. + * + * Return: Tx timestamp value extended to 64 bits based on cached PHC time. + */ +u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp) +{ + s64 delta; + + delta = in_timestamp - lower_32_bits(cached_phc_time); + + return cached_phc_time + delta; +} + +/** + * idpf_ptp_extend_ts - Convert a 40b timestamp to 64b nanoseconds + * @vport: Virtual port structure + * @in_tstamp: Ingress/egress timestamp value + * + * It is assumed that the caller verifies the timestamp is valid prior to + * calling this function. + * + * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC + * time stored in the device private PTP structure as the basis for timestamp + * extension. + * + * Return: Tx timestamp value extended to 64 bits. + */ +u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp) +{ + struct idpf_ptp *ptp = vport->adapter->ptp; + unsigned long discard_time; + + discard_time = ptp->cached_phc_jiffies + 2 * HZ; + + if (time_is_before_jiffies(discard_time)) { + mutex_lock(&vport->tstamp_stats.tx_hwtstamp_lock); + vport->tstamp_stats.tx_hwtstamp_discarded++; + mutex_unlock(&vport->tstamp_stats.tx_hwtstamp_lock); + + return 0; + } + + return idpf_ptp_tstamp_extend_32b_to_64b(ptp->cached_phc_time, + lower_32_bits(in_tstamp)); +} + +/** + * idpf_ptp_request_ts - Request an available Tx timestamp index + * @tx_q: Transmit queue on which the Tx timestamp is requested + * @skb: The SKB to associate with this timestamp request + * @idx: Index of the Tx timestamp latch + * + * Request tx timestamp index negotiated during PTP init that will be set into + * Tx descriptor. + * + * Return: 0 and the index that can be provided to Tx descriptor on success, + * -errno otherwise. + */ +int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + u32 *idx) +{ + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp; + struct list_head *head; + + /* Get the index from the free latches list */ + spin_lock_bh(&tx_q->cached_tstamp_caps->lock_free); + + head = &tx_q->cached_tstamp_caps->latches_free; + if (list_empty(head)) { + spin_unlock_bh(&tx_q->cached_tstamp_caps->lock_free); + return -ENOBUFS; + } + + ptp_tx_tstamp = list_first_entry(head, struct idpf_ptp_tx_tstamp, + list_member); + list_del(&ptp_tx_tstamp->list_member); + spin_unlock_bh(&tx_q->cached_tstamp_caps->lock_free); + + ptp_tx_tstamp->skb = skb_get(skb); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + /* Move the element to the used latches list */ + spin_lock_bh(&tx_q->cached_tstamp_caps->lock_in_use); + list_add(&ptp_tx_tstamp->list_member, + &tx_q->cached_tstamp_caps->latches_in_use); + spin_unlock_bh(&tx_q->cached_tstamp_caps->lock_in_use); + + *idx = ptp_tx_tstamp->idx; + + return 0; +} + +/** + * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping + * @vport: Virtual port structure + * @rx_filter: bool value for whether timestamps are enabled or disabled + */ +static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter) +{ + bool enable = true, splitq; + + splitq = idpf_is_queue_model_split(vport->rxq_model); + + if (rx_filter == HWTSTAMP_FILTER_NONE) { + enable = false; + vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + } else { + vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + for (u16 i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + struct idpf_rx_queue *rx_queue; + u16 j, num_rxq; + + if (splitq) + num_rxq = grp->splitq.num_rxq_sets; + else + num_rxq = grp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (splitq) + rx_queue = &grp->splitq.rxq_sets[j]->rxq; + else + rx_queue = grp->singleq.rxqs[j]; + + if (enable) + idpf_queue_set(PTP, rx_queue); + else + idpf_queue_clear(PTP, rx_queue); + } + } +} + +/** + * idpf_ptp_set_timestamp_mode - Setup driver for requested timestamp mode + * @vport: Virtual port structure + * @config: Hwtstamp settings requested or saved + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport, + struct kernel_hwtstamp_config *config) +{ + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + break; + case HWTSTAMP_TX_ON: + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + vport->tstamp_config.tx_type = config->tx_type; + idpf_ptp_set_rx_tstamp(vport, config->rx_filter); + *config = vport->tstamp_config; + + return 0; +} + +/** + * idpf_tstamp_task - Delayed task to handle Tx tstamps + * @work: work_struct handle + */ +void idpf_tstamp_task(struct work_struct *work) +{ + struct idpf_vport *vport; + + vport = container_of(work, struct idpf_vport, tstamp_task); + + idpf_ptp_get_tx_tstamp(vport); +} + +/** + * idpf_ptp_do_aux_work - Do PTP periodic work + * @info: Driver's PTP info structure + * + * Return: Number of jiffies to periodic work. + */ +static long idpf_ptp_do_aux_work(struct ptp_clock_info *info) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + + idpf_ptp_update_cached_phctime(adapter); + + return msecs_to_jiffies(500); +} + +/** + * idpf_ptp_set_caps - Set PTP capabilities + * @adapter: Driver specific private structure + * + * This function sets the PTP functions. + */ +static void idpf_ptp_set_caps(const struct idpf_adapter *adapter) +{ + struct ptp_clock_info *info = &adapter->ptp->info; + + snprintf(info->name, sizeof(info->name), "%s-%s-clk", + KBUILD_MODNAME, pci_name(adapter->pdev)); + + info->owner = THIS_MODULE; + info->max_adj = adapter->ptp->max_adj; + info->gettimex64 = idpf_ptp_gettimex64; + info->settime64 = idpf_ptp_settime64; + info->adjfine = idpf_ptp_adjfine; + info->adjtime = idpf_ptp_adjtime; + info->verify = idpf_ptp_verify_pin; + info->enable = idpf_ptp_gpio_enable; + info->do_aux_work = idpf_ptp_do_aux_work; + +#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) + info->getcrosststamp = idpf_ptp_get_crosststamp; +#elif IS_ENABLED(CONFIG_X86) + if (pcie_ptm_enabled(adapter->pdev) && + boot_cpu_has(X86_FEATURE_ART) && + boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) + info->getcrosststamp = idpf_ptp_get_crosststamp; +#endif /* CONFIG_ARM_ARCH_TIMER */ +} + +/** + * idpf_ptp_create_clock - Create PTP clock device for userspace + * @adapter: Driver specific private structure + * + * This function creates a new PTP clock device. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_create_clock(const struct idpf_adapter *adapter) +{ + struct ptp_clock *clock; + + idpf_ptp_set_caps(adapter); + + /* Attempt to register the clock before enabling the hardware. */ + clock = ptp_clock_register(&adapter->ptp->info, + &adapter->pdev->dev); + if (IS_ERR(clock)) { + pci_err(adapter->pdev, "PTP clock creation failed: %pe\n", + clock); + return PTR_ERR(clock); + } + + adapter->ptp->clock = clock; + + return 0; +} + +/** + * idpf_ptp_release_vport_tstamp - Release the Tx timestamps trakcers for a + * given vport. + * @vport: Virtual port structure + * + * Remove the queues and delete lists that tracks Tx timestamp entries for a + * given vport. + */ +static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport) +{ + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp; + struct list_head *head; + + cancel_work_sync(&vport->tstamp_task); + + /* Remove list with free latches */ + spin_lock(&vport->tx_tstamp_caps->lock_free); + + head = &vport->tx_tstamp_caps->latches_free; + list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) { + list_del(&ptp_tx_tstamp->list_member); + kfree(ptp_tx_tstamp); + } + + spin_unlock(&vport->tx_tstamp_caps->lock_free); + + /* Remove list with latches in use */ + mutex_lock(&vport->tstamp_stats.tx_hwtstamp_lock); + spin_lock(&vport->tx_tstamp_caps->lock_in_use); + + head = &vport->tx_tstamp_caps->latches_in_use; + list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) { + vport->tstamp_stats.tx_hwtstamp_flushed++; + list_del(&ptp_tx_tstamp->list_member); + kfree(ptp_tx_tstamp); + } + + spin_unlock(&vport->tx_tstamp_caps->lock_in_use); + + mutex_unlock(&vport->tstamp_stats.tx_hwtstamp_lock); + mutex_destroy(&vport->tstamp_stats.tx_hwtstamp_lock); + + kfree(vport->tx_tstamp_caps); + vport->tx_tstamp_caps = NULL; +} + +/** + * idpf_ptp_release_tstamp - Release the Tx timestamps trackers + * @adapter: Driver specific private structure + * + * Remove the queues and delete lists that tracks Tx timestamp entries. + */ +static void idpf_ptp_release_tstamp(struct idpf_adapter *adapter) +{ + idpf_for_each_vport(adapter, vport) { + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport)) + continue; + + idpf_ptp_release_vport_tstamp(vport); + } +} + +/** + * idpf_ptp_get_txq_tstamp_capability - Verify the timestamping capability + * for a given tx queue. + * @txq: Transmit queue + * + * Since performing timestamp flows requires reading the device clock value and + * the support in the Control Plane, the function checks both factors and + * summarizes the support for the timestamping. + * + * Return: true if the timestamping is supported, false otherwise. + */ +bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq) +{ + if (!txq || !txq->cached_tstamp_caps) + return false; + else if (txq->cached_tstamp_caps->access) + return true; + else + return false; +} + +/** + * idpf_ptp_init - Initialize PTP hardware clock support + * @adapter: Driver specific private structure + * + * Set up the device for interacting with the PTP hardware clock for all + * functions. Function will allocate and register a ptp_clock with the + * PTP_1588_CLOCK infrastructure. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_init(struct idpf_adapter *adapter) +{ + struct timespec64 ts; + int err; + + if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP)) { + pci_dbg(adapter->pdev, "PTP capability is not detected\n"); + return -EOPNOTSUPP; + } + + adapter->ptp = kzalloc(sizeof(*adapter->ptp), GFP_KERNEL); + if (!adapter->ptp) + return -ENOMEM; + + /* add a back pointer to adapter */ + adapter->ptp->adapter = adapter; + + if (adapter->dev_ops.reg_ops.ptp_reg_init) + adapter->dev_ops.reg_ops.ptp_reg_init(adapter); + + err = idpf_ptp_get_caps(adapter); + if (err) { + pci_err(adapter->pdev, "Failed to get PTP caps err %d\n", err); + goto free_ptp; + } + + err = idpf_ptp_create_clock(adapter); + if (err) + goto free_ptp; + + if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + ptp_schedule_worker(adapter->ptp->clock, 0); + + /* Write the default increment time value if the clock adjustments + * are enabled. + */ + if (adapter->ptp->adj_dev_clk_time_access != IDPF_PTP_NONE) { + err = idpf_ptp_adj_dev_clk_fine(adapter, + adapter->ptp->base_incval); + if (err) + goto remove_clock; + } + + /* Write the initial time value if the set time operation is enabled */ + if (adapter->ptp->set_dev_clk_time_access != IDPF_PTP_NONE) { + ts = ktime_to_timespec64(ktime_get_real()); + err = idpf_ptp_settime64(&adapter->ptp->info, &ts); + if (err) + goto remove_clock; + } + + pci_dbg(adapter->pdev, "PTP init successful\n"); + + return 0; + +remove_clock: + if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + ptp_cancel_worker_sync(adapter->ptp->clock); + + ptp_clock_unregister(adapter->ptp->clock); + adapter->ptp->clock = NULL; + +free_ptp: + kfree(adapter->ptp); + adapter->ptp = NULL; + + return err; +} + +/** + * idpf_ptp_release - Clear PTP hardware clock support + * @adapter: Driver specific private structure + */ +void idpf_ptp_release(struct idpf_adapter *adapter) +{ + struct idpf_ptp *ptp = adapter->ptp; + + if (!ptp) + return; + + if (ptp->tx_tstamp_access != IDPF_PTP_NONE && + ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + idpf_ptp_release_tstamp(adapter); + + if (ptp->clock) { + if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + ptp_cancel_worker_sync(adapter->ptp->clock); + + ptp_clock_unregister(ptp->clock); + } + + kfree(ptp); + adapter->ptp = NULL; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h new file mode 100644 index 0000000000000..c3ad2c5788682 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h @@ -0,0 +1,370 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef _IDPF_PTP_H +#define _IDPF_PTP_H + +#include + +/** + * struct idpf_ptp_cmd - PTP command masks + * @exec_cmd_mask: mask to trigger command execution + * @shtime_enable_mask: mask to enable shadow time + */ +struct idpf_ptp_cmd { + u32 exec_cmd_mask; + u32 shtime_enable_mask; +}; + +/* struct idpf_ptp_dev_clk_regs - PTP device registers + * @dev_clk_ns_l: low part of the device clock register + * @dev_clk_ns_h: high part of the device clock register + * @phy_clk_ns_l: low part of the PHY clock register + * @phy_clk_ns_h: high part of the PHY clock register + * @sys_time_ns_l: low part of the system time register + * @sys_time_ns_h: high part of the system time register + * @cmd: PTP command register + * @phy_cmd: PHY command register + * @cmd_sync: PTP command synchronization register + */ +struct idpf_ptp_dev_clk_regs { + /* Main clock */ + void __iomem *dev_clk_ns_l; + void __iomem *dev_clk_ns_h; + + /* PHY timer */ + void __iomem *phy_clk_ns_l; + void __iomem *phy_clk_ns_h; + + /* System time */ + void __iomem *sys_time_ns_l; + void __iomem *sys_time_ns_h; + + /* Main timer adjustments */ + void __iomem *incval_l; + void __iomem *incval_h; + void __iomem *shadj_l; + void __iomem *shadj_h; + + /* PHY timer adjustments */ + void __iomem *phy_incval_l; + void __iomem *phy_incval_h; + void __iomem *phy_shadj_l; + void __iomem *phy_shadj_h; + + /* Command */ + void __iomem *cmd; + void __iomem *phy_cmd; + void __iomem *cmd_sync; +}; + +/** + * enum idpf_ptp_access - the type of access to PTP operations + * @IDPF_PTP_NONE: no access + * @IDPF_PTP_DIRECT: direct access through BAR registers + * @IDPF_PTP_MAILBOX: access through mailbox messages + */ +enum idpf_ptp_access { + IDPF_PTP_NONE = 0, + IDPF_PTP_DIRECT, + IDPF_PTP_MAILBOX, +}; + +/** + * struct idpf_ptp_secondary_mbx - PTP secondary mailbox + * @peer_mbx_q_id: PTP mailbox queue ID + * @peer_id: Peer ID for PTP Device Control daemon + * @valid: indicates whether secondary mailblox is supported by the Control + * Plane + */ +struct idpf_ptp_secondary_mbx { + u16 peer_mbx_q_id; + u16 peer_id; + bool valid:1; +}; + +/** + * enum idpf_ptp_tx_tstamp_state - Tx timestamp states + * @IDPF_PTP_FREE: Tx timestamp index free to use + * @IDPF_PTP_REQUEST: Tx timestamp index set to the Tx descriptor + * @IDPF_PTP_READ_VALUE: Tx timestamp value ready to be read + */ +enum idpf_ptp_tx_tstamp_state { + IDPF_PTP_FREE, + IDPF_PTP_REQUEST, + IDPF_PTP_READ_VALUE, +}; + +/** + * struct idpf_ptp_tx_tstamp_status - Parameters to track Tx timestamp + * @skb: the pointer to the SKB that received the completion tag + * @state: the state of the Tx timestamp + */ +struct idpf_ptp_tx_tstamp_status { + struct sk_buff *skb; + enum idpf_ptp_tx_tstamp_state state; +}; + +/** + * struct idpf_ptp_tx_tstamp - Parameters for Tx timestamping + * @list_member: the list member structure + * @tx_latch_reg_offset_l: Tx tstamp latch low register offset + * @tx_latch_reg_offset_h: Tx tstamp latch high register offset + * @skb: the pointer to the SKB for this timestamp request + * @tstamp: the Tx tstamp value + * @idx: the index of the Tx tstamp + */ +struct idpf_ptp_tx_tstamp { + struct list_head list_member; + u32 tx_latch_reg_offset_l; + u32 tx_latch_reg_offset_h; + struct sk_buff *skb; + u64 tstamp; + u32 idx; +}; + +/** + * struct idpf_ptp_vport_tx_tstamp_caps - Tx timestamp capabilities + * @vport_id: the vport id + * @num_entries: the number of negotiated Tx timestamp entries + * @tstamp_ns_lo_bit: first bit for nanosecond part of the timestamp + * @lock_in_use: the lock to the used latches list + * @lock_free: the lock to free the latches list + * @lock_status: the lock to the status tracker + * @access: indicates an access to Tx timestamp + * @latches_free: the list of the free Tx timestamps latches + * @latches_in_use: the list of the used Tx timestamps latches + * @tx_tstamp_status: Tx tstamp status tracker + */ +struct idpf_ptp_vport_tx_tstamp_caps { + u32 vport_id; + u16 num_entries; + u16 tstamp_ns_lo_bit; + spinlock_t lock_in_use; + spinlock_t lock_free; + spinlock_t lock_status; + bool access:1; + struct list_head latches_free; + struct list_head latches_in_use; + struct idpf_ptp_tx_tstamp_status tx_tstamp_status[]; +}; + +/** + * struct idpf_ptp - PTP parameters + * @info: structure defining PTP hardware capabilities + * @clock: pointer to registered PTP clock device + * @adapter: back pointer to the adapter + * @base_incval: base increment value of the PTP clock + * @max_adj: maximum adjustment of the PTP clock + * @cmd: HW specific command masks + * @cached_phc_time: a cached copy of the PHC time for timestamp extension + * @cached_phc_jiffies: jiffies when cached_phc_time was last updated + * @dev_clk_regs: the set of registers to access the device clock + * @caps: PTP capabilities negotiated with the Control Plane + * @get_dev_clk_time_access: access type for getting the device clock time + * @get_cross_tstamp_access: access type for the cross timestamping + * @set_dev_clk_time_access: access type for setting the device clock time + * @adj_dev_clk_time_access: access type for the adjusting the device clock + * @tx_tstamp_access: access type for the Tx timestamp value read + * @rsv: reserved bits + * @secondary_mbx: parameters for using dedicated PTP mailbox + */ +struct idpf_ptp { + struct ptp_clock_info info; + struct ptp_clock *clock; + struct idpf_adapter *adapter; + u64 base_incval; + u64 max_adj; + struct idpf_ptp_cmd cmd; + u64 cached_phc_time; + unsigned long cached_phc_jiffies; + struct idpf_ptp_dev_clk_regs dev_clk_regs; + u32 caps; + enum idpf_ptp_access get_dev_clk_time_access:2; + enum idpf_ptp_access get_cross_tstamp_access:2; + enum idpf_ptp_access set_dev_clk_time_access:2; + enum idpf_ptp_access adj_dev_clk_time_access:2; + enum idpf_ptp_access tx_tstamp_access:2; + u8 rsv:6; + struct idpf_ptp_secondary_mbx secondary_mbx; +}; + +/** + * idpf_ptp_info_to_adapter - get driver adapter struct from ptp_clock_info + * @info: pointer to ptp_clock_info struct + * + * Return: pointer to the corresponding adapter struct + */ +static inline struct idpf_adapter * +idpf_ptp_info_to_adapter(const struct ptp_clock_info *info) +{ + const struct idpf_ptp *ptp = container_of_const(info, struct idpf_ptp, + info); + return ptp->adapter; +} + +/** + * struct idpf_ptp_dev_timers - System time and device time values + * @sys_time_ns: system time value expressed in nanoseconds + * @dev_clk_time_ns: device clock time value expressed in nanoseconds + */ +struct idpf_ptp_dev_timers { + u64 sys_time_ns; + u64 dev_clk_time_ns; +}; + +/** + * idpf_ptp_is_vport_tx_tstamp_ena - Verify the Tx timestamping enablement for + * a given vport. + * @vport: Virtual port structure + * + * Tx timestamp capabilities are negotiated with the Control Plane only if the + * device clock value can be read, Tx timestamp access type is different than + * NONE, and the PTP clock for the adapter is created. When all those conditions + * are satisfied, Tx timestamp feature is enabled and tx_tstamp_caps is + * allocated and fulfilled. + * + * Return: true if the Tx timestamping is enabled, false otherwise. + */ +static inline bool idpf_ptp_is_vport_tx_tstamp_ena(struct idpf_vport *vport) +{ + if (!vport->tx_tstamp_caps) + return false; + else + return true; +} + +/** + * idpf_ptp_is_vport_rx_tstamp_ena - Verify the Rx timestamping enablement for + * a given vport. + * @vport: Virtual port structure + * + * Rx timestamp feature is enabled if the PTP clock for the adapter is created + * and it is possible to read the value of the device clock. The second + * assumption comes from the need to extend the Rx timestamp value to 64 bit + * based on the current device clock time. + * + * Return: true if the Rx timestamping is enabled, false otherwise. + */ +static inline bool idpf_ptp_is_vport_rx_tstamp_ena(struct idpf_vport *vport) +{ + if (!vport->adapter->ptp || + vport->adapter->ptp->get_dev_clk_time_access == IDPF_PTP_NONE) + return false; + else + return true; +} + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +int idpf_ptp_init(struct idpf_adapter *adapter); +void idpf_ptp_release(struct idpf_adapter *adapter); +int idpf_ptp_get_caps(struct idpf_adapter *adapter); +void idpf_ptp_get_features_access(const struct idpf_adapter *adapter); +bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq); +int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *dev_clk_time); +int idpf_ptp_get_cross_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *cross_time); +int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time); +int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval); +int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta); +int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport); +int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport); +int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport, + struct kernel_hwtstamp_config *config); +u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp); +u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp); +int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + u32 *idx); +void idpf_tstamp_task(struct work_struct *work); +#else /* CONFIG_PTP_1588_CLOCK */ +static inline int idpf_ptp_init(struct idpf_adapter *adapter) +{ + return 0; +} + +static inline void idpf_ptp_release(struct idpf_adapter *adapter) { } + +static inline int idpf_ptp_get_caps(struct idpf_adapter *adapter) +{ + return -EOPNOTSUPP; +} + +static inline void +idpf_ptp_get_features_access(const struct idpf_adapter *adapter) { } + +static inline bool +idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq) +{ + return false; +} + +static inline int +idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *dev_clk_time) +{ + return -EOPNOTSUPP; +} + +static inline int +idpf_ptp_get_cross_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *cross_time) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, + u64 time) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, + u64 incval) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, + s64 delta) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport) +{ + return -EOPNOTSUPP; +} + +static inline int +idpf_ptp_set_timestamp_mode(struct idpf_vport *vport, + struct kernel_hwtstamp_config *config) +{ + return -EOPNOTSUPP; +} + +static inline u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u32 in_tstamp) +{ + return 0; +} + +static inline u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, + u32 in_timestamp) +{ + return 0; +} + +static inline int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, + struct sk_buff *skb, u32 *idx) +{ + return -EOPNOTSUPP; +} + +static inline void idpf_tstamp_task(struct work_struct *work) { } +#endif /* CONFIG_PTP_1588_CLOCK */ +#endif /* _IDPF_PTP_H */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index dfd7cf1d9aa0a..eae1b6f474e62 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -595,7 +595,7 @@ static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc) */ static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb, - struct idpf_rx_csum_decoded csum_bits, + struct libeth_rx_csum csum_bits, struct libeth_rx_pt decoded) { bool ipv4, ipv6; @@ -661,10 +661,10 @@ static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq, * * Return: parsed checksum status. **/ -static struct idpf_rx_csum_decoded +static struct libeth_rx_csum idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc) { - struct idpf_rx_csum_decoded csum_bits = { }; + struct libeth_rx_csum csum_bits = { }; u32 rx_error, rx_status; u64 qword; @@ -696,10 +696,10 @@ idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc) * * Return: parsed checksum status. **/ -static struct idpf_rx_csum_decoded +static struct libeth_rx_csum idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc) { - struct idpf_rx_csum_decoded csum_bits = { }; + struct libeth_rx_csum csum_bits = { }; u16 rx_status0, rx_status1; rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0); @@ -798,7 +798,7 @@ idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q, u16 ptype) { struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype]; - struct idpf_rx_csum_decoded csum_bits; + struct libeth_rx_csum csum_bits; /* modifies the skb - consumes the enet header */ skb->protocol = eth_type_trans(skb, rx_q->netdev); @@ -891,6 +891,7 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q, * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor * @rx_desc: the descriptor to process * @fields: storage for extracted values + * @ptype: pointer that will store packet type * * Decode the Rx descriptor and extract relevant information including the * size and Rx packet type. @@ -900,20 +901,21 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q, */ static void idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) + struct libeth_rqe_info *fields, u32 *ptype) { u64 qword; qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len); - fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword); - fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword); + fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword); + *ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword); } /** * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor * @rx_desc: the descriptor to process * @fields: storage for extracted values + * @ptype: pointer that will store packet type * * Decode the Rx descriptor and extract relevant information including the * size and Rx packet type. @@ -923,12 +925,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc, */ static void idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) + struct libeth_rqe_info *fields, u32 *ptype) { - fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M, - le16_to_cpu(rx_desc->flex_nic_wb.pkt_len)); - fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M, - le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0)); + fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M, + le16_to_cpu(rx_desc->flex_nic_wb.pkt_len)); + *ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M, + le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0)); } /** @@ -936,17 +938,18 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc, * @rx_q: Rx descriptor queue * @rx_desc: the descriptor to process * @fields: storage for extracted values + * @ptype: pointer that will store packet type * */ static void idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q, const union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) + struct libeth_rqe_info *fields, u32 *ptype) { if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) - idpf_rx_singleq_extract_base_fields(rx_desc, fields); + idpf_rx_singleq_extract_base_fields(rx_desc, fields, ptype); else - idpf_rx_singleq_extract_flex_fields(rx_desc, fields); + idpf_rx_singleq_extract_flex_fields(rx_desc, fields, ptype); } /** @@ -966,9 +969,10 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) /* Process Rx packets bounded by budget */ while (likely(total_rx_pkts < (unsigned int)budget)) { - struct idpf_rx_extracted fields = { }; + struct libeth_rqe_info fields = { }; union virtchnl2_rx_desc *rx_desc; struct idpf_rx_buf *rx_buf; + u32 ptype; /* get the Rx desc from Rx queue based on 'next_to_clean' */ rx_desc = &rx_q->rx[ntc]; @@ -989,16 +993,16 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) */ dma_rmb(); - idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields); + idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields, &ptype); rx_buf = &rx_q->rx_buf[ntc]; - if (!libeth_rx_sync_for_cpu(rx_buf, fields.size)) + if (!libeth_rx_sync_for_cpu(rx_buf, fields.len)) goto skip_data; if (skb) - idpf_rx_add_frag(rx_buf, skb, fields.size); + idpf_rx_add_frag(rx_buf, skb, fields.len); else - skb = idpf_rx_build_skb(rx_buf, fields.size); + skb = idpf_rx_build_skb(rx_buf, fields.len); /* exit if we failed to retrieve a buffer */ if (!skb) @@ -1033,8 +1037,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) total_rx_bytes += skb->len; /* protocol */ - idpf_rx_singleq_process_skb_fields(rx_q, skb, - rx_desc, fields.rx_ptype); + idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, ptype); /* send completed skb up the stack */ napi_gro_receive(rx_q->pp->p.napi, skb); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 9be6a6b59c4e1..5137e9d15adb4 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -5,6 +5,7 @@ #include #include "idpf.h" +#include "idpf_ptp.h" #include "idpf_virtchnl.h" struct idpf_tx_stash { @@ -1107,6 +1108,8 @@ void idpf_vport_queues_rel(struct idpf_vport *vport) */ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) { + struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps; + struct work_struct *tstamp_task = &vport->tstamp_task; int i, j, k = 0; vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs), @@ -1121,6 +1124,12 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) for (j = 0; j < tx_grp->num_txq; j++, k++) { vport->txqs[k] = tx_grp->txqs[j]; vport->txqs[k]->idx = k; + + if (!caps) + continue; + + vport->txqs[k]->cached_tstamp_caps = caps; + vport->txqs[k]->tstamp_task = tstamp_task; } } @@ -1654,6 +1663,40 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q) wake_up(&vport->sw_marker_wq); } +/** + * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value + * @txq: queue to read the timestamp from + * @skb: socket buffer to provide Tx timestamp value + * + * Schedule a work to read Tx timestamp value generated once the packet is + * transmitted. + */ +static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb) +{ + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct idpf_ptp_tx_tstamp_status *tx_tstamp_status; + + tx_tstamp_caps = txq->cached_tstamp_caps; + spin_lock_bh(&tx_tstamp_caps->lock_status); + + for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) { + tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i]; + if (tx_tstamp_status->state != IDPF_PTP_FREE) + continue; + + tx_tstamp_status->skb = skb; + tx_tstamp_status->state = IDPF_PTP_REQUEST; + + /* Fetch timestamp from completion descriptor through + * virtchnl msg to report to stack. + */ + queue_work(system_unbound_wq, txq->tstamp_task); + break; + } + + spin_unlock_bh(&tx_tstamp_caps->lock_status); +} + /** * idpf_tx_clean_stashed_bufs - clean bufs that were stored for * out of order completions @@ -1682,6 +1725,11 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq, continue; hash_del(&stash->hlist); + + if (stash->buf.type == LIBETH_SQE_SKB && + (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS)) + idpf_tx_read_tstamp(txq, stash->buf.skb); + libeth_tx_complete(&stash->buf, &cp); /* Push shadow buf back onto stack */ @@ -1876,8 +1924,12 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag, idpf_tx_buf_compl_tag(tx_buf) != compl_tag)) return false; - if (tx_buf->type == LIBETH_SQE_SKB) + if (tx_buf->type == LIBETH_SQE_SKB) { + if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS) + idpf_tx_read_tstamp(txq, tx_buf->skb); + libeth_tx_complete(tx_buf, &cp); + } idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); @@ -2127,7 +2179,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, struct idpf_tx_splitq_params *params, u16 td_cmd, u16 size) { - desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd; + *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd); desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size); desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); } @@ -2296,7 +2348,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, * descriptor. Reset that here. */ tx_desc = &txq->flex_tx[idx]; - memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); + memset(tx_desc, 0, sizeof(*tx_desc)); if (idx == 0) idx = txq->desc_count; idx--; @@ -2699,10 +2751,10 @@ static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, * Since the TX buffer rings mimics the descriptor ring, update the tx buffer * ring entry to reflect that this index is a context descriptor */ -static struct idpf_flex_tx_ctx_desc * +static union idpf_flex_tx_ctx_desc * idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq) { - struct idpf_flex_tx_ctx_desc *desc; + union idpf_flex_tx_ctx_desc *desc; int i = txq->next_to_use; txq->tx_buf[i].type = LIBETH_SQE_CTX; @@ -2732,6 +2784,73 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb) return NETDEV_TX_OK; } +#if (IS_ENABLED(CONFIG_PTP_1588_CLOCK)) +/** + * idpf_tx_tstamp - set up context descriptor for hardware timestamp + * @tx_q: queue to send buffer on + * @skb: pointer to the SKB we're sending + * @off: pointer to the offload struct + * + * Return: Positive index number on success, negative otherwise. + */ +static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + struct idpf_tx_offload_params *off) +{ + int err, idx; + + /* only timestamp the outbound packet if the user has requested it */ + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + return -1; + + if (!idpf_ptp_get_txq_tstamp_capability(tx_q)) + return -1; + + /* Tx timestamps cannot be sampled when doing TSO */ + if (off->tx_flags & IDPF_TX_FLAGS_TSO) + return -1; + + /* Grab an open timestamp slot */ + err = idpf_ptp_request_ts(tx_q, skb, &idx); + if (err) { + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tstamp_skipped); + u64_stats_update_end(&tx_q->stats_sync); + + return -1; + } + + off->tx_flags |= IDPF_TX_FLAGS_TSYN; + + return idx; +} + +/** + * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate + * PHY Tx timestamp + * @ctx_desc: Context descriptor + * @idx: Index of the Tx timestamp latch + */ +static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc, + u32 idx) +{ + ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX, + IDPF_TX_CTX_DTYPE_M) | + le64_encode_bits(IDPF_TX_CTX_DESC_TSYN, + IDPF_TX_CTX_CMD_M) | + le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M); +} +#else /* CONFIG_PTP_1588_CLOCK */ +static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + struct idpf_tx_offload_params *off) +{ + return -1; +} + +static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc, + u32 idx) +{ } +#endif /* CONFIG_PTP_1588_CLOCK */ + /** * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors * @skb: send buffer @@ -2743,9 +2862,10 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, struct idpf_tx_queue *tx_q) { struct idpf_tx_splitq_params tx_params = { }; + union idpf_flex_tx_ctx_desc *ctx_desc; struct idpf_tx_buf *first; unsigned int count; - int tso; + int tso, idx; count = idpf_tx_desc_count_required(tx_q, skb); if (unlikely(!count)) @@ -2765,8 +2885,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, if (tso) { /* If tso is needed, set up context desc */ - struct idpf_flex_tx_ctx_desc *ctx_desc = - idpf_tx_splitq_get_ctx_desc(tx_q); + ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q); ctx_desc->tso.qw1.cmd_dtype = cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | @@ -2784,6 +2903,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, u64_stats_update_end(&tx_q->stats_sync); } + idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload); + if (idx != -1) { + ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q); + idpf_tx_set_tstamp_desc(ctx_desc, idx); + } + /* record the location of the first descriptor for this packet */ first = &tx_q->tx_buf[tx_q->next_to_use]; first->skb = skb; @@ -2895,7 +3020,7 @@ idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb, * skb->protocol must be set before this function is called */ static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb, - struct idpf_rx_csum_decoded csum_bits, + struct libeth_rx_csum csum_bits, struct libeth_rx_pt decoded) { bool ipv4, ipv6; @@ -2923,7 +3048,7 @@ static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb, if (unlikely(csum_bits.l4e)) goto checksum_fail; - if (csum_bits.raw_csum_inv || + if (!csum_bits.raw_csum_valid || decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) { skb->ip_summed = CHECKSUM_UNNECESSARY; return; @@ -2946,10 +3071,10 @@ static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb, * * Return: parsed checksum status. **/ -static struct idpf_rx_csum_decoded +static struct libeth_rx_csum idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) { - struct idpf_rx_csum_decoded csum = { }; + struct libeth_rx_csum csum = { }; u8 qword0, qword1; qword0 = rx_desc->status_err0_qw0; @@ -2965,9 +3090,9 @@ idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 * qword1); csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, qword0); - csum.raw_csum_inv = - le16_get_bits(rx_desc->ptype_err_fflags0, - VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M); + csum.raw_csum_valid = + !le16_get_bits(rx_desc->ptype_err_fflags0, + VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M); csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); return csum; @@ -3013,7 +3138,6 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, skb_shinfo(skb)->gso_size = rsc_seg_len; skb_reset_network_header(skb); - len = skb->len - skb_transport_offset(skb); if (ipv4) { struct iphdr *ipv4h = ip_hdr(skb); @@ -3022,6 +3146,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, /* Reset and set transport header offset in skb */ skb_set_transport_header(skb, sizeof(struct iphdr)); + len = skb->len - skb_transport_offset(skb); /* Compute the TCP pseudo header checksum*/ tcp_hdr(skb)->check = @@ -3031,6 +3156,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + len = skb->len - skb_transport_offset(skb); tcp_hdr(skb)->check = ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0); } @@ -3044,6 +3170,33 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, return 0; } +/** + * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack + * @rxq: pointer to the rx queue that receives the timestamp + * @rx_desc: pointer to rx descriptor containing timestamp + * @skb: skb to put timestamp in + */ +static void +idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct sk_buff *skb) +{ + u64 cached_time, ts_ns; + u32 ts_high; + + if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID)) + return; + + cached_time = READ_ONCE(rxq->cached_phc_time); + + ts_high = le32_to_cpu(rx_desc->ts_high); + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high); + + *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) { + .hwtstamp = ns_to_ktime(ts_ns), + }; +} + /** * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor * @rxq: Rx descriptor ring packet is being transacted on @@ -3058,7 +3211,7 @@ static int idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) { - struct idpf_rx_csum_decoded csum_bits; + struct libeth_rx_csum csum_bits; struct libeth_rx_pt decoded; u16 rx_ptype; @@ -3069,6 +3222,9 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, /* process RSS/hash */ idpf_rx_hash(rxq, skb, rx_desc, decoded); + if (idpf_queue_has(PTP, rxq)) + idpf_rx_hwtstamp(rxq, rx_desc, skb); + skb->protocol = eth_type_trans(skb, rxq->netdev); skb_record_rx_queue(skb, rxq->idx); @@ -3551,8 +3707,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) q_vector->tx = NULL; kfree(q_vector->rx); q_vector->rx = NULL; - - free_cpumask_var(q_vector->affinity_mask); } kfree(vport->q_vectors); @@ -3579,8 +3733,6 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) vidx = vport->q_vector_idxs[vector]; irq_num = adapter->msix_entries[vidx].vector; - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); kfree(free_irq(irq_num, q_vector)); } } @@ -3768,8 +3920,6 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport) "Request_irq failed, error: %d\n", err); goto free_q_irqs; } - /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, q_vector->affinity_mask); } return 0; @@ -4181,7 +4331,8 @@ static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport) static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) { int (*napi_poll)(struct napi_struct *napi, int budget); - u16 v_idx; + u16 v_idx, qv_idx; + int irq_num; if (idpf_is_queue_model_split(vport->txq_model)) napi_poll = idpf_vport_splitq_napi_poll; @@ -4190,12 +4341,12 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; + qv_idx = vport->q_vector_idxs[v_idx]; + irq_num = vport->adapter->msix_entries[qv_idx].vector; - netif_napi_add(vport->netdev, &q_vector->napi, napi_poll); - - /* only set affinity_mask if the CPU is online */ - if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, q_vector->affinity_mask); + netif_napi_add_config(vport->netdev, &q_vector->napi, + napi_poll, v_idx); + netif_napi_set_irq(&q_vector->napi, irq_num); } } @@ -4239,9 +4390,6 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; - if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL)) - goto error; - q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), GFP_KERNEL); if (!q_vector->tx) diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 0f71a6f5557b9..c779fe71df999 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -142,6 +142,7 @@ do { \ #define IDPF_TX_FLAGS_IPV4 BIT(1) #define IDPF_TX_FLAGS_IPV6 BIT(2) #define IDPF_TX_FLAGS_TUNNEL BIT(3) +#define IDPF_TX_FLAGS_TSYN BIT(4) union idpf_tx_flex_desc { struct idpf_flex_tx_desc q; /* queue based scheduling */ @@ -213,25 +214,6 @@ enum idpf_tx_ctx_desc_eipt_offload { IDPF_TX_CTX_EXT_IP_IPV4 = 0x3 }; -/* Checksum offload bits decoded from the receive descriptor. */ -struct idpf_rx_csum_decoded { - u32 l3l4p : 1; - u32 ipe : 1; - u32 eipe : 1; - u32 eudpe : 1; - u32 ipv6exadd : 1; - u32 l4e : 1; - u32 pprs : 1; - u32 nat : 1; - u32 raw_csum_inv : 1; - u32 raw_csum : 16; -}; - -struct idpf_rx_extracted { - unsigned int size; - u16 rx_ptype; -}; - #define IDPF_TX_COMPLQ_CLEAN_BUDGET 256 #define IDPF_TX_MIN_PKT_LEN 17 #define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1 @@ -308,6 +290,8 @@ struct idpf_ptype_state { * @__IDPF_Q_POLL_MODE: Enable poll mode * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) + * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the + * queue * @__IDPF_Q_FLAGS_NBITS: Must be last */ enum idpf_queue_flags_t { @@ -318,6 +302,7 @@ enum idpf_queue_flags_t { __IDPF_Q_POLL_MODE, __IDPF_Q_CRC_EN, __IDPF_Q_HSPLIT_EN, + __IDPF_Q_PTP, __IDPF_Q_FLAGS_NBITS, }; @@ -401,7 +386,6 @@ struct idpf_intr_reg { * @rx_intr_mode: Dynamic ITR or not * @rx_itr_idx: RX ITR index * @v_idx: Vector index - * @affinity_mask: CPU affinity mask */ struct idpf_q_vector { __cacheline_group_begin_aligned(read_mostly); @@ -438,13 +422,12 @@ struct idpf_q_vector { __cacheline_group_begin_aligned(cold); u16 v_idx; - cpumask_var_t affinity_mask; __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_q_vector, 120, 24 + sizeof(struct napi_struct) + 2 * sizeof(struct dim), - 8 + sizeof(cpumask_var_t)); + 8); struct idpf_rx_queue_stats { u64_stats_t packets; @@ -464,6 +447,7 @@ struct idpf_tx_queue_stats { u64_stats_t q_busy; u64_stats_t skb_drops; u64_stats_t dma_map_errs; + u64_stats_t tstamp_skipped; }; #define IDPF_ITR_DYNAMIC 1 @@ -515,6 +499,7 @@ struct idpf_txq_stash { * @next_to_alloc: RX buffer to allocate at * @skb: Pointer to the skb * @truesize: data buffer truesize in singleq + * @cached_phc_time: Cached PHC time for the Rx queue * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_rx_queue_stats * @q_id: Queue id @@ -562,6 +547,7 @@ struct idpf_rx_queue { struct sk_buff *skb; u32 truesize; + u64 cached_phc_time; struct u64_stats_sync stats_sync; struct idpf_rx_queue_stats q_stats; @@ -581,7 +567,7 @@ struct idpf_rx_queue { __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_rx_queue, 64, - 80 + sizeof(struct u64_stats_sync), + 88 + sizeof(struct u64_stats_sync), 32); /** @@ -638,6 +624,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * @compl_tag_bufid_m: Completion tag buffer id mask * @compl_tag_cur_gen: Used to keep track of current completion tag generation * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset + * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP + * @tstamp_task: Work that handles Tx timestamp read * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_tx_queue_stats * @q_id: Queue id @@ -651,7 +639,7 @@ struct idpf_tx_queue { struct idpf_base_tx_desc *base_tx; struct idpf_base_tx_ctx_desc *base_ctx; union idpf_tx_flex_desc *flex_tx; - struct idpf_flex_tx_ctx_desc *flex_ctx; + union idpf_flex_tx_ctx_desc *flex_ctx; void *desc_ring; }; @@ -687,6 +675,9 @@ struct idpf_tx_queue { u16 compl_tag_cur_gen; u16 compl_tag_gen_max; + struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps; + struct work_struct *tstamp_task; + struct u64_stats_sync stats_sync; struct idpf_tx_queue_stats q_stats; __cacheline_group_end_aligned(read_write); @@ -700,7 +691,7 @@ struct idpf_tx_queue { __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_tx_queue, 64, - 88 + sizeof(struct u64_stats_sync), + 112 + sizeof(struct u64_stats_sync), 24); /** @@ -940,7 +931,7 @@ static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector) if (!q_vector) return NUMA_NO_NODE; - cpu = cpumask_first(q_vector->affinity_mask); + cpu = cpumask_first(&q_vector->napi.config->affinity_mask); return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 3d2413b8684fc..07a9f5ae34fd9 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -5,88 +5,7 @@ #include "idpf.h" #include "idpf_virtchnl.h" - -#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000 -#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000) -#define IDPF_VC_XN_IDX_M GENMASK(7, 0) -#define IDPF_VC_XN_SALT_M GENMASK(15, 8) -#define IDPF_VC_XN_RING_LEN U8_MAX - -/** - * enum idpf_vc_xn_state - Virtchnl transaction status - * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used - * @IDPF_VC_XN_WAITING: expecting a reply, not yet received - * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, - * buffer updated - * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there - * was an error, buffer not updated - * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down - * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the - * return context; a callback may be provided to handle - * return - */ -enum idpf_vc_xn_state { - IDPF_VC_XN_IDLE = 1, - IDPF_VC_XN_WAITING, - IDPF_VC_XN_COMPLETED_SUCCESS, - IDPF_VC_XN_COMPLETED_FAILED, - IDPF_VC_XN_SHUTDOWN, - IDPF_VC_XN_ASYNC, -}; - -struct idpf_vc_xn; -/* Callback for asynchronous messages */ -typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *, - const struct idpf_ctlq_msg *); - -/** - * struct idpf_vc_xn - Data structure representing virtchnl transactions - * @completed: virtchnl event loop uses that to signal when a reply is - * available, uses kernel completion API - * @state: virtchnl event loop stores the data below, protected by the - * completion's lock. - * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be - * truncated on its way to the receiver thread according to - * reply_buf.iov_len. - * @reply: Reference to the buffer(s) where the reply data should be written - * to. May be 0-length (then NULL address permitted) if the reply data - * should be ignored. - * @async_handler: if sent asynchronously, a callback can be provided to handle - * the reply when it's received - * @vc_op: corresponding opcode sent with this transaction - * @idx: index used as retrieval on reply receive, used for cookie - * @salt: changed every message to make unique, used for cookie - */ -struct idpf_vc_xn { - struct completion completed; - enum idpf_vc_xn_state state; - size_t reply_sz; - struct kvec reply; - async_vc_cb async_handler; - u32 vc_op; - u8 idx; - u8 salt; -}; - -/** - * struct idpf_vc_xn_params - Parameters for executing transaction - * @send_buf: kvec for send buffer - * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length - * @timeout_ms: timeout to wait for reply - * @async: send message asynchronously, will not wait on completion - * @async_handler: If sent asynchronously, optional callback handler. The user - * must be careful when using async handlers as the memory for - * the recv_buf _cannot_ be on stack if this is async. - * @vc_op: virtchnl op to send - */ -struct idpf_vc_xn_params { - struct kvec send_buf; - struct kvec recv_buf; - int timeout_ms; - bool async; - async_vc_cb async_handler; - u32 vc_op; -}; +#include "idpf_ptp.h" /** * struct idpf_vc_xn_manager - Manager for tracking transactions @@ -235,6 +154,55 @@ static int idpf_mb_clean(struct idpf_adapter *adapter) return err; } +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +/** + * idpf_ptp_is_mb_msg - Check if the message is PTP-related + * @op: virtchnl opcode + * + * Return: true if msg is PTP-related, false otherwise. + */ +static bool idpf_ptp_is_mb_msg(u32 op) +{ + switch (op) { + case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME: + case VIRTCHNL2_OP_PTP_GET_CROSS_TIME: + case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME: + case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE: + case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME: + case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS: + case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP: + return true; + default: + return false; + } +} + +/** + * idpf_prepare_ptp_mb_msg - Prepare PTP related message + * + * @adapter: Driver specific private structure + * @op: virtchnl opcode + * @ctlq_msg: Corresponding control queue message + */ +static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, + struct idpf_ctlq_msg *ctlq_msg) +{ + /* If the message is PTP-related and the secondary mailbox is available, + * send the message through the secondary mailbox. + */ + if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid) + return; + + ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv; + ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id; + ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id; +} +#else /* !CONFIG_PTP_1588_CLOCK */ +static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, + struct idpf_ctlq_msg *ctlq_msg) +{ } +#endif /* CONFIG_PTP_1588_CLOCK */ + /** * idpf_send_mb_msg - Send message over mailbox * @adapter: Driver specific private structure @@ -278,6 +246,9 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; ctlq_msg->func_id = 0; + + idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg); + ctlq_msg->data_len = msg_size; ctlq_msg->cookie.mbx.chnl_opcode = op; ctlq_msg->cookie.mbx.chnl_retval = 0; @@ -449,8 +420,8 @@ static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr, * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for * error. */ -static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, - const struct idpf_vc_xn_params *params) +ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, + const struct idpf_vc_xn_params *params) { const struct kvec *send_buf = ¶ms->send_buf; struct idpf_vc_xn *xn; @@ -900,7 +871,8 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) VIRTCHNL2_CAP_MACFILTER | VIRTCHNL2_CAP_SPLITQ_QSCHED | VIRTCHNL2_CAP_PROMISC | - VIRTCHNL2_CAP_LOOPBACK); + VIRTCHNL2_CAP_LOOPBACK | + VIRTCHNL2_CAP_PTP); xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS; xn_params.send_buf.iov_base = ∩︀ @@ -3029,6 +3001,11 @@ int idpf_vc_core_init(struct idpf_adapter *adapter) goto err_intr_req; } + err = idpf_ptp_init(adapter); + if (err) + pci_err(adapter->pdev, "PTP init failed, err=%pe\n", + ERR_PTR(err)); + idpf_init_avail_queues(adapter); /* Skew the delay for init tasks for each function based on fn number @@ -3091,6 +3068,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter) if (!remove_in_prog) idpf_vc_xn_shutdown(adapter->vcxn_mngr); + idpf_ptp_release(adapter); idpf_deinit_task(adapter); idpf_intr_rel(adapter); @@ -3158,6 +3136,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) u16 rx_itr[] = {2, 8, 32, 96, 128}; struct idpf_rss_data *rss_data; u16 idx = vport->idx; + int err; vport_config = adapter->vport_config[idx]; rss_data = &vport_config->user_config.rss_data; @@ -3192,6 +3171,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) idpf_vport_alloc_vec_indexes(vport); vport->crc_enable = adapter->crc_enable; + + if (!(vport_msg->vport_flags & + cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT))) + return; + + err = idpf_ptp_get_vport_tstamps_caps(vport); + if (err) { + pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n"); + return; + } + + INIT_WORK(&vport->tstamp_task, idpf_tstamp_task); } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h index 83da5d8da56bf..3522c1238ea24 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h @@ -4,6 +4,88 @@ #ifndef _IDPF_VIRTCHNL_H_ #define _IDPF_VIRTCHNL_H_ +#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000 +#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000) +#define IDPF_VC_XN_IDX_M GENMASK(7, 0) +#define IDPF_VC_XN_SALT_M GENMASK(15, 8) +#define IDPF_VC_XN_RING_LEN U8_MAX + +/** + * enum idpf_vc_xn_state - Virtchnl transaction status + * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used + * @IDPF_VC_XN_WAITING: expecting a reply, not yet received + * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer + * updated + * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there + * was an error, buffer not updated + * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down + * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the + * return context; a callback may be provided to handle + * return + */ +enum idpf_vc_xn_state { + IDPF_VC_XN_IDLE = 1, + IDPF_VC_XN_WAITING, + IDPF_VC_XN_COMPLETED_SUCCESS, + IDPF_VC_XN_COMPLETED_FAILED, + IDPF_VC_XN_SHUTDOWN, + IDPF_VC_XN_ASYNC, +}; + +struct idpf_vc_xn; +/* Callback for asynchronous messages */ +typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *, + const struct idpf_ctlq_msg *); + +/** + * struct idpf_vc_xn - Data structure representing virtchnl transactions + * @completed: virtchnl event loop uses that to signal when a reply is + * available, uses kernel completion API + * @state: virtchnl event loop stores the data below, protected by the + * completion's lock. + * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be + * truncated on its way to the receiver thread according to + * reply_buf.iov_len. + * @reply: Reference to the buffer(s) where the reply data should be written + * to. May be 0-length (then NULL address permitted) if the reply data + * should be ignored. + * @async_handler: if sent asynchronously, a callback can be provided to handle + * the reply when it's received + * @vc_op: corresponding opcode sent with this transaction + * @idx: index used as retrieval on reply receive, used for cookie + * @salt: changed every message to make unique, used for cookie + */ +struct idpf_vc_xn { + struct completion completed; + enum idpf_vc_xn_state state; + size_t reply_sz; + struct kvec reply; + async_vc_cb async_handler; + u32 vc_op; + u8 idx; + u8 salt; +}; + +/** + * struct idpf_vc_xn_params - Parameters for executing transaction + * @send_buf: kvec for send buffer + * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length + * @timeout_ms: timeout to wait for reply + * @async: send message asynchronously, will not wait on completion + * @async_handler: If sent asynchronously, optional callback handler. The user + * must be careful when using async handlers as the memory for + * the recv_buf _cannot_ be on stack if this is async. + * @vc_op: virtchnl op to send + */ +struct idpf_vc_xn_params { + struct kvec send_buf; + struct kvec recv_buf; + int timeout_ms; + bool async; + async_vc_cb async_handler; + u32 vc_op; +}; + struct idpf_adapter; struct idpf_netdev_priv; struct idpf_vec_regs; @@ -11,6 +93,8 @@ struct idpf_vport; struct idpf_vport_max_q; struct idpf_vport_user_config_data; +ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, + const struct idpf_vc_xn_params *params); int idpf_init_dflt_mbx(struct idpf_adapter *adapter); void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter); int idpf_vc_core_init(struct idpf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c new file mode 100644 index 0000000000000..c582a9d34dd0b --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c @@ -0,0 +1,677 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation */ + +#include "idpf.h" +#include "idpf_ptp.h" +#include "idpf_virtchnl.h" + +/** + * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message + * @adapter: Driver specific private structure + * + * Send virtchnl get PTP capabilities message. + * + * Return: 0 on success, -errno on failure. + */ +int idpf_ptp_get_caps(struct idpf_adapter *adapter) +{ + struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL; + struct virtchnl2_ptp_get_caps send_ptp_caps_msg = { + .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME | + VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB | + VIRTCHNL2_CAP_PTP_GET_CROSS_TIME | + VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB | + VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB | + VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB | + VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB) + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS, + .send_buf.iov_base = &send_ptp_caps_msg, + .send_buf.iov_len = sizeof(send_ptp_caps_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets; + struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets; + struct virtchnl2_ptp_clk_reg_offsets clock_offsets; + struct idpf_ptp_secondary_mbx *scnd_mbx; + struct idpf_ptp *ptp = adapter->ptp; + enum idpf_ptp_access access_type; + u32 temp_offset; + int reply_sz; + + recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps), + GFP_KERNEL); + if (!recv_ptp_caps_msg) + return -ENOMEM; + + xn_params.recv_buf.iov_base = recv_ptp_caps_msg; + xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg); + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + else if (reply_sz != sizeof(*recv_ptp_caps_msg)) + return -EIO; + + ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps); + ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval); + ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj); + + scnd_mbx = &ptp->secondary_mbx; + scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id); + + /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary + * mailbox is not supported. + */ + scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff; + if (scnd_mbx->valid) + scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id; + + /* Determine the access type for the PTP features */ + idpf_ptp_get_features_access(adapter); + + access_type = ptp->get_dev_clk_time_access; + if (access_type != IDPF_PTP_DIRECT) + goto cross_tstamp; + + clock_offsets = recv_ptp_caps_msg->clk_offsets; + + temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l); + ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h); + ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l); + ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h); + ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger); + ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset); + +cross_tstamp: + access_type = ptp->get_cross_tstamp_access; + if (access_type != IDPF_PTP_DIRECT) + goto discipline_clock; + + cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets; + + temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l); + ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h); + ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger); + ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset); + +discipline_clock: + access_type = ptp->adj_dev_clk_time_access; + if (access_type != IDPF_PTP_DIRECT) + return 0; + + clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets; + + /* Device clock offsets */ + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type); + ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l); + ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h); + ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l); + ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h); + ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset); + + /* PHY clock offsets */ + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type); + ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l); + ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h); + ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l); + ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h); + ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset); + + return 0; +} + +/** + * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message + * @adapter: Driver specific private structure + * @dev_clk_time: Pointer to the device clock structure where the value is set + * + * Send virtchnl get time message to get the time of the clock. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *dev_clk_time) +{ + struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME, + .send_buf.iov_base = &get_dev_clk_time_msg, + .send_buf.iov_len = sizeof(get_dev_clk_time_msg), + .recv_buf.iov_base = &get_dev_clk_time_msg, + .recv_buf.iov_len = sizeof(get_dev_clk_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + u64 dev_time; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(get_dev_clk_time_msg)) + return -EIO; + + dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns); + dev_clk_time->dev_clk_time_ns = dev_time; + + return 0; +} + +/** + * idpf_ptp_get_cross_time - Send virtchnl get cross time message + * @adapter: Driver specific private structure + * @cross_time: Pointer to the device clock structure where the value is set + * + * Send virtchnl get cross time message to get the time of the clock and the + * system time. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_cross_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *cross_time) +{ + struct virtchnl2_ptp_get_cross_time cross_time_msg; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME, + .send_buf.iov_base = &cross_time_msg, + .send_buf.iov_len = sizeof(cross_time_msg), + .recv_buf.iov_base = &cross_time_msg, + .recv_buf.iov_len = sizeof(cross_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(cross_time_msg)) + return -EIO; + + cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns); + cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns); + + return 0; +} + +/** + * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message + * @adapter: Driver specific private structure + * @time: New time value + * + * Send virtchnl set time message to set the time of the clock. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time) +{ + struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = { + .dev_time_ns = cpu_to_le64(time), + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME, + .send_buf.iov_base = &set_dev_clk_time_msg, + .send_buf.iov_len = sizeof(set_dev_clk_time_msg), + .recv_buf.iov_base = &set_dev_clk_time_msg, + .recv_buf.iov_len = sizeof(set_dev_clk_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(set_dev_clk_time_msg)) + return -EIO; + + return 0; +} + +/** + * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message + * @adapter: Driver specific private structure + * @delta: Offset in nanoseconds to adjust the time by + * + * Send virtchnl adj time message to adjust the clock by the indicated delta. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta) +{ + struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = { + .delta = cpu_to_le64(delta), + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME, + .send_buf.iov_base = &adj_dev_clk_time_msg, + .send_buf.iov_len = sizeof(adj_dev_clk_time_msg), + .recv_buf.iov_base = &adj_dev_clk_time_msg, + .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(adj_dev_clk_time_msg)) + return -EIO; + + return 0; +} + +/** + * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message + * @adapter: Driver specific private structure + * @incval: Source timer increment value per clock cycle + * + * Send virtchnl adj fine message to adjust the frequency of the clock by + * incval. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval) +{ + struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = { + .incval = cpu_to_le64(incval), + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE, + .send_buf.iov_base = &adj_dev_clk_fine_msg, + .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg), + .recv_buf.iov_base = &adj_dev_clk_fine_msg, + .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(adj_dev_clk_fine_msg)) + return -EIO; + + return 0; +} + +/** + * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport + * @vport: Virtual port structure + * + * Send virtchnl get vport tstamps caps message to receive the set of tstamp + * capabilities per vport. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport) +{ + struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps; + struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps; + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *ptp_tx_tstamps, *tmp; + struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps; + struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS, + .send_buf.iov_base = &send_tx_tstamp_caps, + .send_buf.iov_len = sizeof(send_tx_tstamp_caps), + .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN, + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + enum idpf_ptp_access tstamp_access, get_dev_clk_access; + struct idpf_ptp *ptp = vport->adapter->ptp; + struct list_head *head; + int err = 0, reply_sz; + u16 num_latches; + u32 size; + + if (!ptp) + return -EOPNOTSUPP; + + tstamp_access = ptp->tx_tstamp_access; + get_dev_clk_access = ptp->get_dev_clk_time_access; + if (tstamp_access == IDPF_PTP_NONE || + get_dev_clk_access == IDPF_PTP_NONE) + return -EOPNOTSUPP; + + rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!rcv_tx_tstamp_caps) + return -ENOMEM; + + send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id); + xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps; + + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) { + err = reply_sz; + goto get_tstamp_caps_out; + } + + num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches); + size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches); + if (reply_sz != size) { + err = -EIO; + goto get_tstamp_caps_out; + } + + size = struct_size(tstamp_caps, tx_tstamp_status, num_latches); + tstamp_caps = kzalloc(size, GFP_KERNEL); + if (!tstamp_caps) { + err = -ENOMEM; + goto get_tstamp_caps_out; + } + + tstamp_caps->access = true; + tstamp_caps->num_entries = num_latches; + + INIT_LIST_HEAD(&tstamp_caps->latches_in_use); + INIT_LIST_HEAD(&tstamp_caps->latches_free); + + spin_lock_init(&tstamp_caps->lock_free); + spin_lock_init(&tstamp_caps->lock_in_use); + spin_lock_init(&tstamp_caps->lock_status); + + tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit; + + ptp_tx_tstamps = kcalloc(tstamp_caps->num_entries, + sizeof(*ptp_tx_tstamp), GFP_KERNEL); + if (!ptp_tx_tstamps) { + err = -ENOMEM; + goto err_free_ptp_tx_stamp_list; + } + + for (u16 i = 0; i < tstamp_caps->num_entries; i++) { + __le32 offset_l, offset_h; + + ptp_tx_tstamp = ptp_tx_tstamps + i; + tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i]; + + if (tstamp_access != IDPF_PTP_DIRECT) + goto skip_offsets; + + offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l; + offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h; + ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l); + ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h); + +skip_offsets: + ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index; + + list_add(&ptp_tx_tstamp->list_member, + &tstamp_caps->latches_free); + + tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE; + } + + vport->tx_tstamp_caps = tstamp_caps; + kfree(rcv_tx_tstamp_caps); + + /* Init mutex to protect tstamp statistics */ + mutex_init(&vport->tstamp_stats.tx_hwtstamp_lock); + + return 0; + +err_free_ptp_tx_stamp_list: + head = &tstamp_caps->latches_free; + list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) { + list_del(&ptp_tx_tstamp->list_member); + kfree(ptp_tx_tstamp); + } + + kfree(tstamp_caps); +get_tstamp_caps_out: + kfree(rcv_tx_tstamp_caps); + + return err; +} + +/** + * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on + * the skb compatibility. + * @caps: Tx timestamp capabilities that monitor the latch status + * @skb: skb for which the tstamp value is returned through virtchnl message + * @current_state: Current state of the Tx timestamp latch + * @expected_state: Expected state of the Tx timestamp latch + * + * Find a proper skb tracker for which the Tx timestamp is received and change + * the state to expected value. + * + * Return: true if the tracker has been found and updated, false otherwise. + */ +static bool +idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps, + struct sk_buff *skb, + enum idpf_ptp_tx_tstamp_state current_state, + enum idpf_ptp_tx_tstamp_state expected_state) +{ + bool updated = false; + + spin_lock(&caps->lock_status); + for (u16 i = 0; i < caps->num_entries; i++) { + struct idpf_ptp_tx_tstamp_status *status; + + status = &caps->tx_tstamp_status[i]; + + if (skb == status->skb && status->state == current_state) { + status->state = expected_state; + updated = true; + break; + } + } + spin_unlock(&caps->lock_status); + + return updated; +} + +/** + * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it + * back to the skb. + * @vport: Virtual port structure + * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane + * @ptp_tx_tstamp: Tx timestamp latch to add to the free list + * + * Read the value of the Tx timestamp for a given latch received from the + * Control Plane, extend it to 64 bit and provide back to the skb. + * + * Return: 0 on success, -errno otherwise. + */ +static int +idpf_ptp_get_tstamp_value(struct idpf_vport *vport, + struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch, + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp) +{ + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct skb_shared_hwtstamps shhwtstamps; + bool state_upd = false; + u8 tstamp_ns_lo_bit; + u64 tstamp; + + tx_tstamp_caps = vport->tx_tstamp_caps; + tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit; + + ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp); + ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit; + + state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps, + ptp_tx_tstamp->skb, + IDPF_PTP_READ_VALUE, + IDPF_PTP_FREE); + if (!state_upd) + return -EINVAL; + + tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp); + shhwtstamps.hwtstamp = ns_to_ktime(tstamp); + skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps); + consume_skb(ptp_tx_tstamp->skb); + + spin_lock(&tx_tstamp_caps->lock_free); + list_add(&ptp_tx_tstamp->list_member, + &tx_tstamp_caps->latches_free); + spin_unlock(&tx_tstamp_caps->lock_free); + + return 0; +} + +/** + * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps + * @adapter: Driver specific private structure + * @xn: transaction for message + * @ctlq_msg: received message + * + * Read the tstamps Tx tstamp values from a received message and put them + * directly to the skb. The number of timestamps to read is specified by + * the virtchnl message. + * + * Return: 0 on success, -errno otherwise. + */ +static int +idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter, + struct idpf_vc_xn *xn, + const struct idpf_ctlq_msg *ctlq_msg) +{ + struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg; + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch; + struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp; + struct idpf_vport *tstamp_vport = NULL; + struct list_head *head; + u16 num_latches; + u32 vport_id; + int err = 0; + + recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va; + vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id); + + idpf_for_each_vport(adapter, vport) { + if (!vport) + continue; + + if (vport->vport_id == vport_id) { + tstamp_vport = vport; + break; + } + } + + if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps) + return -EINVAL; + + tx_tstamp_caps = tstamp_vport->tx_tstamp_caps; + num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches); + + head = &tx_tstamp_caps->latches_in_use; + spin_lock(&tx_tstamp_caps->lock_in_use); + + for (u16 i = 0; i < num_latches; i++) { + tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i]; + + if (!tstamp_latch.valid) + continue; + + if (list_empty(head)) { + err = -ENOBUFS; + goto unlock; + } + + list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) { + if (tstamp_latch.index == tx_tstamp->idx) { + list_del(&tx_tstamp->list_member); + err = idpf_ptp_get_tstamp_value(tstamp_vport, + &tstamp_latch, + tx_tstamp); + if (err) + goto unlock; + + break; + } + } + } + +unlock: + spin_unlock(&tx_tstamp_caps->lock_in_use); + + return err; +} + +/** + * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message + * @vport: Virtual port structure + * + * Send virtchnl get Tx tstamp message to read the value of the HW timestamp. + * The message contains a list of indexes set in the Tx descriptors. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport) +{ + struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg; + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP, + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + .async = true, + .async_handler = idpf_ptp_get_tx_tstamp_async_handler, + }; + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp; + int reply_sz, size, msg_size; + struct list_head *head; + bool state_upd; + u16 id = 0; + + tx_tstamp_caps = vport->tx_tstamp_caps; + head = &tx_tstamp_caps->latches_in_use; + + size = struct_size(send_tx_tstamp_msg, tstamp_latches, + tx_tstamp_caps->num_entries); + send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL); + if (!send_tx_tstamp_msg) + return -ENOMEM; + + spin_lock(&tx_tstamp_caps->lock_in_use); + list_for_each_entry(ptp_tx_tstamp, head, list_member) { + u8 idx; + + state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps, + ptp_tx_tstamp->skb, + IDPF_PTP_REQUEST, + IDPF_PTP_READ_VALUE); + if (!state_upd) + continue; + + idx = ptp_tx_tstamp->idx; + send_tx_tstamp_msg->tstamp_latches[id].index = idx; + id++; + } + spin_unlock(&tx_tstamp_caps->lock_in_use); + + msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id); + send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id); + send_tx_tstamp_msg->num_latches = cpu_to_le16(id); + xn_params.send_buf.iov_base = send_tx_tstamp_msg; + xn_params.send_buf.iov_len = msg_size; + + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + kfree(send_tx_tstamp_msg); + + return min(reply_sz, 0); +} diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h index 63deb120359cf..07235f1b4c5cd 100644 --- a/drivers/net/ethernet/intel/idpf/virtchnl2.h +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -68,6 +68,16 @@ enum virtchnl2_op { VIRTCHNL2_OP_ADD_MAC_ADDR = 535, VIRTCHNL2_OP_DEL_MAC_ADDR = 536, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537, + + /* TimeSync opcodes */ + VIRTCHNL2_OP_PTP_GET_CAPS = 541, + VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP = 542, + VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME = 543, + VIRTCHNL2_OP_PTP_GET_CROSS_TIME = 544, + VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME = 545, + VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546, + VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547, + VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548, }; /** @@ -559,6 +569,14 @@ struct virtchnl2_queue_reg_chunks { }; VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks); +/** + * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities. + * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports + */ +enum virtchnl2_vport_flags { + VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0), +}; + /** * struct virtchnl2_create_vport - Create vport config info. * @vport_type: See enum virtchnl2_vport_type. @@ -577,7 +595,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks); * @max_mtu: Max MTU. CP populates this field on response. * @vport_id: Vport id. CP populates this field on response. * @default_mac_addr: Default MAC address. - * @pad: Padding. + * @vport_flags: See enum virtchnl2_vport_flags. * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions. * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions. * @pad1: Padding. @@ -610,7 +628,7 @@ struct virtchnl2_create_vport { __le16 max_mtu; __le32 vport_id; u8 default_mac_addr[ETH_ALEN]; - __le16 pad; + __le16 vport_flags; __le64 rx_desc_ids; __le64 tx_desc_ids; u8 pad1[72]; @@ -1270,4 +1288,296 @@ struct virtchnl2_promisc_info { }; VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info); +/** + * enum virtchnl2_ptp_caps - PTP capabilities + * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of + * device clock + * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of + * device clock + * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp + * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp + * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of + * device clock + * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of + * device clock + * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device + * clock + * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of + * device clock + * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping + * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping + * + * PF/VF negotiates a set of supported PTP capabilities with the Control Plane. + * There are two access methods - mailbox (_MB) and direct. + * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer, + * cross timestamping and the Tx timestamping. + */ +enum virtchnl2_ptp_caps { + VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME = BIT(0), + VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB = BIT(1), + VIRTCHNL2_CAP_PTP_GET_CROSS_TIME = BIT(2), + VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB = BIT(3), + VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME = BIT(4), + VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB = BIT(5), + VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK = BIT(6), + VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB = BIT(7), + VIRTCHNL2_CAP_PTP_TX_TSTAMPS = BIT(8), + VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB = BIT(9), +}; + +/** + * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks + * registers. + * @dev_clk_ns_l: Device clock low register offset + * @dev_clk_ns_h: Device clock high register offset + * @phy_clk_ns_l: PHY clock low register offset + * @phy_clk_ns_h: PHY clock high register offset + * @cmd_sync_trigger: The command sync trigger register offset + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_clk_reg_offsets { + __le32 dev_clk_ns_l; + __le32 dev_clk_ns_h; + __le32 phy_clk_ns_l; + __le32 phy_clk_ns_h; + __le32 cmd_sync_trigger; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets); + +/** + * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross + * time registers. + * @sys_time_ns_l: System time low register offset + * @sys_time_ns_h: System time high register offset + * @cmd_sync_trigger: The command sync trigger register offset + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_cross_time_reg_offsets { + __le32 sys_time_ns_l; + __le32 sys_time_ns_h; + __le32 cmd_sync_trigger; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets); + +/** + * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks + * adjustments registers. + * @dev_clk_cmd_type: Device clock command type register offset + * @dev_clk_incval_l: Device clock increment value low register offset + * @dev_clk_incval_h: Device clock increment value high registers offset + * @dev_clk_shadj_l: Device clock shadow adjust low register offset + * @dev_clk_shadj_h: Device clock shadow adjust high register offset + * @phy_clk_cmd_type: PHY timer command type register offset + * @phy_clk_incval_l: PHY timer increment value low register offset + * @phy_clk_incval_h: PHY timer increment value high register offset + * @phy_clk_shadj_l: PHY timer shadow adjust low register offset + * @phy_clk_shadj_h: PHY timer shadow adjust high register offset + */ +struct virtchnl2_ptp_clk_adj_reg_offsets { + __le32 dev_clk_cmd_type; + __le32 dev_clk_incval_l; + __le32 dev_clk_incval_h; + __le32 dev_clk_shadj_l; + __le32 dev_clk_shadj_h; + __le32 phy_clk_cmd_type; + __le32 phy_clk_incval_l; + __le32 phy_clk_incval_h; + __le32 phy_clk_shadj_l; + __le32 phy_clk_shadj_h; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets); + +/** + * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch + * capabilities. + * @tx_latch_reg_offset_l: Tx timestamp latch low register offset + * @tx_latch_reg_offset_h: Tx timestamp latch high register offset + * @index: Latch index provided to the Tx descriptor + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_tx_tstamp_latch_caps { + __le32 tx_latch_reg_offset_l; + __le32 tx_latch_reg_offset_h; + u8 index; + u8 pad[7]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps); + +/** + * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx + * tstamp entries. + * @vport_id: Vport number + * @num_latches: Total number of latches + * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp + * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp + * @pad: Padding for future tstamp granularity extensions + * @tstamp_latches: Capabilities of Tx timestamp entries + * + * PF/VF sends this message to negotiate the Tx timestamp latches for each + * Vport. + * + * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS. + */ +struct virtchnl2_ptp_get_vport_tx_tstamp_caps { + __le32 vport_id; + __le16 num_latches; + u8 tstamp_ns_lo_bit; + u8 tstamp_ns_hi_bit; + u8 pad[8]; + + struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[] + __counted_by_le(num_latches); +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps); + +/** + * struct virtchnl2_ptp_get_caps - Get PTP capabilities + * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps + * @max_adj: The maximum possible frequency adjustment + * @base_incval: The default timer increment value + * @peer_mbx_q_id: ID of the PTP Device Control daemon queue + * @peer_id: Peer ID for PTP Device Control daemon + * @secondary_mbx: Indicates to the driver that it should create a secondary + * mailbox to inetract with control plane for PTP + * @pad: Padding for future extensions + * @clk_offsets: Main timer and PHY registers offsets + * @cross_time_offsets: Cross time registers offsets + * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer + * + * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap + * with supported features and fulfills appropriate structures. + * If HW uses primary MBX for PTP: secondary_mbx is set to false. + * If HW uses secondary MBX for PTP: secondary_mbx is set to true. + * Control plane has 2 MBX and the driver has 1 MBX, send to peer + * driver may be used to send a message using valid ptp_peer_mb_q_id and + * ptp_peer_id. + * If HW does not use send to peer driver: secondary_mbx is no care field and + * peer_mbx_q_id holds invalid value (0xFFFF). + * + * Associated with VIRTCHNL2_OP_PTP_GET_CAPS. + */ +struct virtchnl2_ptp_get_caps { + __le32 caps; + __le32 max_adj; + __le64 base_incval; + __le16 peer_mbx_q_id; + u8 peer_id; + u8 secondary_mbx; + u8 pad[4]; + + struct virtchnl2_ptp_clk_reg_offsets clk_offsets; + struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets; + struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps); + +/** + * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp + * values, index and validity. + * @tstamp: Timestamp value + * @index: Timestamp index from which the value is read + * @valid: Timestamp validity + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_tx_tstamp_latch { + __le64 tstamp; + u8 index; + u8 valid; + u8 pad[6]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch); + +/** + * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches + * associated with the vport. + * @vport_id: Number of vport that requests the timestamp + * @num_latches: Number of latches + * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp + * @pad: Padding for future extensions + * @device_time: device time if get_devtime_with_txtstmp was set in request + * @tstamp_latches: PTP TX timestamp latch + * + * PF/VF sends this message to receive a specified number of timestamps + * entries. + * + * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP. + */ +struct virtchnl2_ptp_get_vport_tx_tstamp_latches { + __le32 vport_id; + __le16 num_latches; + u8 get_devtime_with_txtstmp; + u8 pad[1]; + __le64 device_time; + + struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[] + __counted_by_le(num_latches); +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches); + +/** + * struct virtchnl2_ptp_get_dev_clk_time - Associated with message + * VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME. + * @dev_time_ns: Device clock time value in nanoseconds + * + * PF/VF sends this message to receive the time from the main timer. + */ +struct virtchnl2_ptp_get_dev_clk_time { + __le64 dev_time_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time); + +/** + * struct virtchnl2_ptp_get_cross_time: Associated with message + * VIRTCHNL2_OP_PTP_GET_CROSS_TIME. + * @sys_time_ns: System counter value expressed in nanoseconds, read + * synchronously with device time + * @dev_time_ns: Device clock time value expressed in nanoseconds + * + * PF/VF sends this message to receive the cross time. + */ +struct virtchnl2_ptp_get_cross_time { + __le64 sys_time_ns; + __le64 dev_time_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time); + +/** + * struct virtchnl2_ptp_set_dev_clk_time: Associated with message + * VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME. + * @dev_time_ns: Device time value expressed in nanoseconds to set + * + * PF/VF sends this message to set the time of the main timer. + */ +struct virtchnl2_ptp_set_dev_clk_time { + __le64 dev_time_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time); + +/** + * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message + * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE. + * @incval: Source timer increment value per clock cycle + * + * PF/VF sends this message to adjust the frequency of the main timer by the + * indicated scaled ppm. + */ +struct virtchnl2_ptp_adj_dev_clk_fine { + __le64 incval; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine); + +/** + * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message + * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME. + * @delta: Offset in nanoseconds to adjust the time by + * + * PF/VF sends this message to adjust the time of the main timer by the delta. + */ +struct virtchnl2_ptp_adj_dev_clk_time { + __le64 delta; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time); + #endif /* _VIRTCHNL_2_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index f945705561200..f323e1c1989f1 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -509,6 +509,12 @@ static int igb_ptp_feature_enable_82580(struct ptp_clock_info *ptp, PTP_STRICT_FLAGS)) return -EOPNOTSUPP; + /* Both the rising and falling edge are timestamped */ + if (rq->extts.flags & PTP_STRICT_FLAGS && + (rq->extts.flags & PTP_ENABLE_FEATURE) && + (rq->extts.flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES) + return -EOPNOTSUPP; + if (on) { pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS, rq->extts.index); diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index b8111ad9a9a83..cd1d7b6c17823 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -579,6 +579,7 @@ struct igc_metadata_request { struct xsk_tx_metadata *meta; struct igc_ring *tx_ring; u32 cmd_type; + u16 used_desc; }; struct igc_q_vector { diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 84307bb7313e0..472f009630c98 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1092,7 +1092,8 @@ static int igc_init_empty_frame(struct igc_ring *ring, dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); if (dma_mapping_error(ring->dev, dma)) { - netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); + net_err_ratelimited("%s: DMA mapping error for empty frame\n", + netdev_name(ring->netdev)); return -ENOMEM; } @@ -1108,20 +1109,12 @@ static int igc_init_empty_frame(struct igc_ring *ring, return 0; } -static int igc_init_tx_empty_descriptor(struct igc_ring *ring, - struct sk_buff *skb, - struct igc_tx_buffer *first) +static void igc_init_tx_empty_descriptor(struct igc_ring *ring, + struct sk_buff *skb, + struct igc_tx_buffer *first) { union igc_adv_tx_desc *desc; u32 cmd_type, olinfo_status; - int err; - - if (!igc_desc_unused(ring)) - return -EBUSY; - - err = igc_init_empty_frame(ring, first, skb); - if (err) - return err; cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | @@ -1140,8 +1133,6 @@ static int igc_init_tx_empty_descriptor(struct igc_ring *ring, ring->next_to_use++; if (ring->next_to_use == ring->count) ring->next_to_use = 0; - - return 0; } #define IGC_EMPTY_FRAME_SIZE 60 @@ -1567,6 +1558,40 @@ static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *s return false; } +static int igc_insert_empty_frame(struct igc_ring *tx_ring) +{ + struct igc_tx_buffer *empty_info; + struct sk_buff *empty_skb; + void *data; + int ret; + + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + empty_skb = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); + if (unlikely(!empty_skb)) { + net_err_ratelimited("%s: skb alloc error for empty frame\n", + netdev_name(tx_ring->netdev)); + return -ENOMEM; + } + + data = skb_put(empty_skb, IGC_EMPTY_FRAME_SIZE); + memset(data, 0, IGC_EMPTY_FRAME_SIZE); + + /* Prepare DMA mapping and Tx buffer information */ + ret = igc_init_empty_frame(tx_ring, empty_info, empty_skb); + if (unlikely(ret)) { + dev_kfree_skb_any(empty_skb); + return ret; + } + + /* Prepare advanced context descriptor for empty packet */ + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); + + /* Prepare advanced data descriptor for empty packet */ + igc_init_tx_empty_descriptor(tx_ring, empty_skb, empty_info); + + return 0; +} + static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, struct igc_ring *tx_ring) { @@ -1586,6 +1611,7 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD, * + 2 desc gap to keep tail from touching head, * + 1 desc for context descriptor, + * + 2 desc for inserting an empty packet for launch time, * otherwise try next time */ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) @@ -1605,24 +1631,16 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); if (insert_empty) { - struct igc_tx_buffer *empty_info; - struct sk_buff *empty; - void *data; - - empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; - empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); - if (!empty) - goto done; - - data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); - memset(data, 0, IGC_EMPTY_FRAME_SIZE); - - igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); - - if (igc_init_tx_empty_descriptor(tx_ring, - empty, - empty_info) < 0) - dev_kfree_skb_any(empty); + /* Reset the launch time if the required empty frame fails to + * be inserted. However, this packet is not dropped, so it + * "dirties" the current Qbv cycle. This ensures that the + * upcoming packet, which is scheduled in the next Qbv cycle, + * does not require an empty frame. This way, the launch time + * continues to function correctly despite the current failure + * to insert the empty frame. + */ + if (igc_insert_empty_frame(tx_ring)) + launch_time = 0; } done: @@ -1650,7 +1668,8 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags; - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES) + if (skb->sk && + READ_ONCE(skb->sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC) tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1; } else { adapter->tx_hwtstamp_skipped++; @@ -2953,9 +2972,48 @@ static u64 igc_xsk_fill_timestamp(void *_priv) return *(u64 *)_priv; } +static void igc_xsk_request_launch_time(u64 launch_time, void *_priv) +{ + struct igc_metadata_request *meta_req = _priv; + struct igc_ring *tx_ring = meta_req->tx_ring; + __le32 launch_time_offset; + bool insert_empty = false; + bool first_flag = false; + u16 used_desc = 0; + + if (!tx_ring->launchtime_enable) + return; + + launch_time_offset = igc_tx_launchtime(tx_ring, + ns_to_ktime(launch_time), + &first_flag, &insert_empty); + if (insert_empty) { + /* Disregard the launch time request if the required empty frame + * fails to be inserted. + */ + if (igc_insert_empty_frame(tx_ring)) + return; + + meta_req->tx_buffer = + &tx_ring->tx_buffer_info[tx_ring->next_to_use]; + /* Inserting an empty packet requires two descriptors: + * one data descriptor and one context descriptor. + */ + used_desc += 2; + } + + /* Use one context descriptor to specify launch time and first flag. */ + igc_tx_ctxtdesc(tx_ring, launch_time_offset, first_flag, 0, 0, 0); + used_desc += 1; + + /* Update the number of used descriptors in this request */ + meta_req->used_desc += used_desc; +} + const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = { .tmo_request_timestamp = igc_xsk_request_timestamp, .tmo_fill_timestamp = igc_xsk_fill_timestamp, + .tmo_request_launch_time = igc_xsk_request_launch_time, }; static void igc_xdp_xmit_zc(struct igc_ring *ring) @@ -2978,7 +3036,13 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) ntu = ring->next_to_use; budget = igc_desc_unused(ring); - while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { + /* Packets with launch time require one data descriptor and one context + * descriptor. When the launch time falls into the next Qbv cycle, we + * may need to insert an empty packet, which requires two more + * descriptors. Therefore, to be safe, we always ensure we have at least + * 4 descriptors available. + */ + while (xsk_tx_peek_desc(pool, &xdp_desc) && budget >= 4) { struct igc_metadata_request meta_req; struct xsk_tx_metadata *meta = NULL; struct igc_tx_buffer *bi; @@ -2999,9 +3063,19 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) meta_req.tx_ring = ring; meta_req.tx_buffer = bi; meta_req.meta = meta; + meta_req.used_desc = 0; xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops, &meta_req); + /* xsk_tx_metadata_request() may have updated next_to_use */ + ntu = ring->next_to_use; + + /* xsk_tx_metadata_request() may have updated Tx buffer info */ + bi = meta_req.tx_buffer; + + /* xsk_tx_metadata_request() may use a few descriptors */ + budget -= meta_req.used_desc; + tx_desc = IGC_TX_DESC(ring, ntu); tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); @@ -3019,9 +3093,11 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) ntu++; if (ntu == ring->count) ntu = 0; + + ring->next_to_use = ntu; + budget--; } - ring->next_to_use = ntu; if (tx_desc) { igc_flush_tx_descriptors(ring); xsk_tx_release(pool); diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c index 13bbd3346e01f..c538e6b18aad5 100644 --- a/drivers/net/ethernet/intel/igc/igc_xdp.c +++ b/drivers/net/ethernet/intel/igc/igc_xdp.c @@ -14,6 +14,7 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, bool if_running = netif_running(dev); struct bpf_prog *old_prog; bool need_update; + unsigned int i; if (dev->mtu > ETH_DATA_LEN) { /* For now, the driver doesn't support XDP functionality with @@ -24,8 +25,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, } need_update = !!adapter->xdp_prog != !!prog; - if (if_running && need_update) - igc_close(dev); + if (if_running && need_update) { + for (i = 0; i < adapter->num_rx_queues; i++) { + igc_disable_rx_ring(adapter->rx_ring[i]); + igc_disable_tx_ring(adapter->tx_ring[i]); + napi_disable(&adapter->rx_ring[i]->q_vector->napi); + } + } old_prog = xchg(&adapter->xdp_prog, prog); if (old_prog) @@ -36,8 +42,13 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog, else xdp_features_clear_redirect_target(dev); - if (if_running && need_update) - igc_open(dev); + if (if_running && need_update) { + for (i = 0; i < adapter->num_rx_queues; i++) { + napi_enable(&adapter->rx_ring[i]->q_vector->napi); + igc_enable_tx_ring(adapter->tx_ring[i]); + igc_enable_rx_ring(adapter->rx_ring[i]); + } + } return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c index 683c668672d65..cb07ecd8937d3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c @@ -1122,7 +1122,7 @@ static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw) * returns error (ENOENT), then no cage present. If no cage present then * connection type is backplane or BASE-T. */ - return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL); + return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index da91c582d439b..f03925c1f521a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -3185,6 +3185,7 @@ static int ixgbe_get_ts_info(struct net_device *dev, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); break; case ixgbe_mac_X540: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 866024f2b9eeb..07ea1954a276e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -817,30 +817,9 @@ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) } } -/** - * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload - * @skb: current data packet - * @xs: pointer to transformer state struct - **/ -static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) -{ - if (xs->props.family == AF_INET) { - /* Offload with IPv4 options is not supported yet */ - if (ip_hdr(skb)->ihl != 5) - return false; - } else { - /* Offload with IPv6 extension headers is not support yet */ - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) - return false; - } - - return true; -} - static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { .xdo_dev_state_add = ixgbe_ipsec_add_sa, .xdo_dev_state_delete = ixgbe_ipsec_del_sa, - .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok, }; /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 467f81239e12f..481f917f7ed28 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3185,6 +3185,10 @@ static void ixgbe_handle_fw_event(struct ixgbe_adapter *adapter) case ixgbe_aci_opc_get_link_status: ixgbe_handle_link_status_event(adapter, &event); break; + case ixgbe_aci_opc_temp_tca_event: + e_crit(drv, "%s\n", ixgbe_overheat_msg); + ixgbe_down(adapter); + break; default: e_warn(hw, "unknown FW async event captured\n"); break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 9339edbd90821..eef25e11d9381 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -140,6 +140,7 @@ * proper mult and shift to convert the cycles into nanoseconds of time. */ #define IXGBE_X550_BASE_PERIOD 0xC80000000ULL +#define IXGBE_E610_BASE_PERIOD 0x333333333ULL #define INCVALUE_MASK 0x7FFFFFFF #define ISGN 0x80000000 @@ -415,6 +416,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: /* Upper 32 bits represent billions of cycles, lower 32 bits * represent cycles. However, we use timespec64_to_ns for the * correct math even though the units haven't been corrected @@ -492,11 +494,13 @@ static int ixgbe_ptp_adjfine_X550(struct ptp_clock_info *ptp, long scaled_ppm) struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); struct ixgbe_hw *hw = &adapter->hw; + u64 rate, base; bool neg_adj; - u64 rate; u32 inca; - neg_adj = diff_by_scaled_ppm(IXGBE_X550_BASE_PERIOD, scaled_ppm, &rate); + base = hw->mac.type == ixgbe_mac_e610 ? IXGBE_E610_BASE_PERIOD : + IXGBE_X550_BASE_PERIOD; + neg_adj = diff_by_scaled_ppm(base, scaled_ppm, &rate); /* warn if rate is too large */ if (rate >= INCVALUE_MASK) @@ -559,6 +563,7 @@ static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: /* Upper 32 bits represent billions of cycles, lower 32 bits * represent cycles. However, we use timespec64_to_ns for the * correct math even though the units haven't been corrected @@ -1067,6 +1072,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: /* enable timestamping all packets only if at least some * packets were requested. Otherwise, play nice and disable * timestamping @@ -1233,6 +1239,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) fallthrough; case ixgbe_mac_x550em_a: case ixgbe_mac_X550: + case ixgbe_mac_e610: cc.read = ixgbe_ptp_read_X550; break; case ixgbe_mac_X540: @@ -1280,6 +1287,7 @@ static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter) case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: case ixgbe_mac_X550: + case ixgbe_mac_e610: tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); /* Reset SYSTIME registers to 0 */ @@ -1407,6 +1415,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + case ixgbe_mac_e610: snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 30000000; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h index 8d06ade3c7cd9..617e07878e4f7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type_e610.h @@ -171,6 +171,9 @@ enum ixgbe_aci_opc { ixgbe_aci_opc_done_alt_write = 0x0904, ixgbe_aci_opc_clear_port_alt_write = 0x0906, + /* TCA Events */ + ixgbe_aci_opc_temp_tca_event = 0x0C94, + /* debug commands */ ixgbe_aci_opc_debug_dump_internals = 0xFF08, diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index f804b35d79c72..8ba037e3d9c27 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -428,30 +428,9 @@ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) } } -/** - * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload - * @skb: current data packet - * @xs: pointer to transformer state struct - **/ -static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) -{ - if (xs->props.family == AF_INET) { - /* Offload with IPv4 options is not supported yet */ - if (ip_hdr(skb)->ihl != 5) - return false; - } else { - /* Offload with IPv6 extension headers is not support yet */ - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) - return false; - } - - return true; -} - static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = { .xdo_dev_state_add = ixgbevf_ipsec_add_sa, .xdo_dev_state_delete = ixgbevf_ipsec_del_sa, - .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok, }; /** diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 4fe121b9f94b6..147571fdada37 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2342,7 +2342,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, prefetch(data); xdp_buff_clear_frags_flag(xdp); xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE, - data_len, false); + data_len, true); } static void @@ -2396,6 +2396,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, struct xdp_buff *xdp, u32 desc_status) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + u32 metasize = xdp->data - xdp->data_meta; struct sk_buff *skb; u8 num_frags; @@ -2410,6 +2411,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); skb->ip_summed = mvneta_rx_csum(pp, desc_status); if (unlikely(xdp_buff_has_frags(xdp))) @@ -5557,7 +5560,6 @@ static int mvneta_probe(struct platform_device *pdev) clk_prepare_enable(pp->clk_bus); pp->phylink_pcs.ops = &mvneta_phylink_pcs_ops; - pp->phylink_pcs.neg_mode = true; pp->phylink_config.dev = &dev->dev; pp->phylink_config.type = PHYLINK_NETDEV; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 1641791a2d5b4..8ed83fb988624 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = { MVPP2_PRS_RI_VLAN_MASK), /* Non IP flow, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG, - MVPP22_CLS_HEK_OPT_VLAN, + MVPP22_CLS_HEK_TAGGED, 0, 0), }; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index dd76c1b7ed3a1..54a235366a01b 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3915,13 +3915,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, while (rx_done < rx_todo) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + u32 rx_status, timestamp, metasize = 0; struct mvpp2_bm_pool *bm_pool; struct page_pool *pp = NULL; struct sk_buff *skb; unsigned int frag_size; dma_addr_t dma_addr; phys_addr_t phys_addr; - u32 rx_status, timestamp; int pool, rx_bytes, err, ret; struct page *page; void *data; @@ -3983,7 +3983,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq); xdp_prepare_buff(&xdp, data, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM, - rx_bytes, false); + rx_bytes, true); ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps); @@ -3999,6 +3999,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, ps.rx_bytes += rx_bytes; continue; } + + metasize = xdp.data - xdp.data_meta; } if (frag_size) @@ -4038,6 +4040,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); skb_put(skb, rx_bytes); + if (metasize) + skb_metadata_set(skb, metasize); skb->ip_summed = mvpp2_rx_csum(port, rx_status); skb->protocol = eth_type_trans(skb, dev); @@ -7024,9 +7028,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->dev_port = port->id; port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops; - port->pcs_gmac.neg_mode = true; port->pcs_xlg.ops = &mvpp2_phylink_xlg_pcs_ops; - port->pcs_xlg.neg_mode = true; if (!mvpp2_use_acpi_compat_mode(port_fwnode)) { port->phylink_config.dev = &dev->dev; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 8216f843a7cd5..0b27a695008bd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -66,8 +66,18 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); /* Supported devices */ static const struct pci_device_id cgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) }, - { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_A) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_A) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF10K_B) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN10K_B) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CN20KA) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM, + PCI_ANY_ID, PCI_SUBSYS_DEVID_CNF20KA) }, { 0, } /* end of table */ }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c index d39d86e694ccf..655dd4726d36e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c @@ -925,7 +925,6 @@ void rvu_mcs_exit(struct rvu *rvu) if (!rvu->mcs_intr_wq) return; - flush_workqueue(rvu->mcs_intr_wq); destroy_workqueue(rvu->mcs_intr_wq); rvu->mcs_intr_wq = NULL; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index a383b5ef5b2d8..60f085b00a8cc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -30,6 +30,8 @@ #define PCI_SUBSYS_DEVID_CNF10K_A 0xBA00 #define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00 #define PCI_SUBSYS_DEVID_CN10K_B 0xBD00 +#define PCI_SUBSYS_DEVID_CN20KA 0xC220 +#define PCI_SUBSYS_DEVID_CNF20KA 0xC320 /* PCI BAR nos */ #define PCI_AF_REG_BAR_NUM 0 diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index cb6513ab35e74..69e0778f9ac10 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -9,7 +9,7 @@ obj-$(CONFIG_RVU_ESWITCH) += rvu_rep.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ - otx2_devlink.o qos_sq.o qos.o + otx2_devlink.o qos_sq.o qos.o otx2_xsk.o rvu_nicvf-y := otx2_vf.o rvu_rep-y := rep.o diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index a15cc86635d66..c3b6e0f60a799 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -112,9 +112,12 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) struct otx2_nic *pfvf = dev; int cnt = cq->pool_ptrs; u64 ptrs[NPA_MAX_BURST]; + struct otx2_pool *pool; dma_addr_t bufptr; int num_ptrs = 1; + pool = &pfvf->qset.pool[cq->cq_idx]; + /* Refill pool with new buffers */ while (cq->pool_ptrs) { if (otx2_alloc_buffer(pfvf, cq, &bufptr)) { @@ -124,7 +127,9 @@ int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) break; } cq->pool_ptrs--; - ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM; + ptrs[num_ptrs] = pool->xsk_pool ? + (u64)bufptr : (u64)bufptr + OTX2_HEAD_ROOM; + num_ptrs++; if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) { __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c index 09a5b52682055..fc59e50bafce6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c @@ -744,24 +744,9 @@ static void cn10k_ipsec_del_state(struct xfrm_state *x) queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work); } -static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) -{ - if (x->props.family == AF_INET) { - /* Offload with IPv4 options is not supported yet */ - if (ip_hdr(skb)->ihl > 5) - return false; - } else { - /* Offload with IPv6 extension headers is not support yet */ - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) - return false; - } - return true; -} - static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = { .xdo_dev_state_add = cn10k_ipsec_add_state, .xdo_dev_state_delete = cn10k_ipsec_del_state, - .xdo_dev_offload_ok = cn10k_ipsec_offload_ok, }; static void cn10k_ipsec_sa_wq_handler(struct work_struct *work) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 2b49bfec78692..84cd029a85aab 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -17,6 +17,7 @@ #include "otx2_common.h" #include "otx2_struct.h" #include "cn10k.h" +#include "otx2_xsk.h" static bool otx2_is_pfc_enabled(struct otx2_nic *pfvf) { @@ -330,6 +331,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id) rss_ctx = rss->rss_ctx[ctx_id]; /* Get memory to put this msg */ for (idx = 0; idx < rss->rss_size; idx++) { + /* Ignore the queue if AF_XDP zero copy is enabled */ + if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx)) + continue; + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox); if (!aq) { /* The shared memory buffer can be full. @@ -549,10 +554,13 @@ static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, } static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, - dma_addr_t *dma) + dma_addr_t *dma, int qidx, int idx) { u8 *buf; + if (pool->xsk_pool) + return otx2_xsk_pool_alloc_buf(pfvf, pool, dma, idx); + if (pool->page_pool) return otx2_alloc_pool_buf(pfvf, pool, dma); @@ -571,12 +579,12 @@ static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, } int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, - dma_addr_t *dma) + dma_addr_t *dma, int qidx, int idx) { int ret; local_bh_disable(); - ret = __otx2_alloc_rbuf(pfvf, pool, dma); + ret = __otx2_alloc_rbuf(pfvf, pool, dma, qidx, idx); local_bh_enable(); return ret; } @@ -584,7 +592,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma) { - if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) + if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma, + cq->cq_idx, cq->pool_ptrs - 1))) return -ENOMEM; return 0; } @@ -884,7 +893,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf) #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */ #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */ -static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) +int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura) { struct otx2_qset *qset = &pfvf->qset; struct nix_aq_enq_req *aq; @@ -1028,6 +1037,10 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) sq->stats.bytes = 0; sq->stats.pkts = 0; + /* Attach XSK_BUFF_POOL to XDP queue */ + if (qidx > pfvf->hw.xdp_queues) + otx2_attach_xsk_buff(pfvf, sq, (qidx - pfvf->hw.xdp_queues)); + chan_offset = qidx % pfvf->hw.tx_chan_cnt; err = pfvf->hw_ops->sq_aq_init(pfvf, qidx, chan_offset, sqb_aura); @@ -1041,12 +1054,13 @@ int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) } -static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) +int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) { struct otx2_qset *qset = &pfvf->qset; int err, pool_id, non_xdp_queues; struct nix_aq_enq_req *aq; struct otx2_cq_queue *cq; + struct otx2_pool *pool; cq = &qset->cq[qidx]; cq->cq_idx = qidx; @@ -1055,8 +1069,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) cq->cq_type = CQ_RX; cq->cint_idx = qidx; cq->cqe_cnt = qset->rqe_cnt; - if (pfvf->xdp_prog) + if (pfvf->xdp_prog) { xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); + pool = &qset->pool[qidx]; + if (pool->xsk_pool) { + xdp_rxq_info_reg_mem_model(&cq->xdp_rxq, + MEM_TYPE_XSK_BUFF_POOL, + NULL); + xsk_pool_set_rxq_info(pool->xsk_pool, &cq->xdp_rxq); + } else if (pool->page_pool) { + xdp_rxq_info_reg_mem_model(&cq->xdp_rxq, + MEM_TYPE_PAGE_POOL, + pool->page_pool); + } + } } else if (qidx < non_xdp_queues) { cq->cq_type = CQ_TX; cq->cint_idx = qidx - pfvf->hw.rx_queues; @@ -1275,9 +1301,10 @@ void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); page = virt_to_head_page(phys_to_virt(pa)); - if (pool->page_pool) { page_pool_put_full_page(pool->page_pool, page, true); + } else if (pool->xsk_pool) { + /* Note: No way of identifying xdp_buff */ } else { dma_unmap_page_attrs(pfvf->dev, iova, size, DMA_FROM_DEVICE, @@ -1292,6 +1319,7 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) int pool_id, pool_start = 0, pool_end = 0, size = 0; struct otx2_pool *pool; u64 iova; + int idx; if (type == AURA_NIX_SQ) { pool_start = otx2_get_pool_idx(pfvf, type, 0); @@ -1306,16 +1334,21 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) /* Free SQB and RQB pointers from the aura pool */ for (pool_id = pool_start; pool_id < pool_end; pool_id++) { - iova = otx2_aura_allocptr(pfvf, pool_id); pool = &pfvf->qset.pool[pool_id]; + iova = otx2_aura_allocptr(pfvf, pool_id); while (iova) { if (type == AURA_NIX_RQ) iova -= OTX2_HEAD_ROOM; - otx2_free_bufs(pfvf, pool, iova, size); - iova = otx2_aura_allocptr(pfvf, pool_id); } + + for (idx = 0 ; idx < pool->xdp_cnt; idx++) { + if (!pool->xdp[idx]) + continue; + + xsk_buff_free(pool->xdp[idx]); + } } } @@ -1332,7 +1365,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf) qmem_free(pfvf->dev, pool->stack); qmem_free(pfvf->dev, pool->fc_addr); page_pool_destroy(pool->page_pool); - pool->page_pool = NULL; + devm_kfree(pfvf->dev, pool->xdp); + pool->xsk_pool = NULL; } devm_kfree(pfvf->dev, pfvf->qset.pool); pfvf->qset.pool = NULL; @@ -1419,6 +1453,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, int stack_pages, int numptrs, int buf_size, int type) { struct page_pool_params pp_params = { 0 }; + struct xsk_buff_pool *xsk_pool; struct npa_aq_enq_req *aq; struct otx2_pool *pool; int err; @@ -1462,21 +1497,35 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, aq->ctype = NPA_AQ_CTYPE_POOL; aq->op = NPA_AQ_INSTOP_INIT; - if (type != AURA_NIX_RQ) { - pool->page_pool = NULL; + if (type != AURA_NIX_RQ) + return 0; + + if (!test_bit(pool_id, pfvf->af_xdp_zc_qidx)) { + pp_params.order = get_order(buf_size); + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); + pp_params.nid = NUMA_NO_NODE; + pp_params.dev = pfvf->dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + pool->page_pool = page_pool_create(&pp_params); + if (IS_ERR(pool->page_pool)) { + netdev_err(pfvf->netdev, "Creation of page pool failed\n"); + return PTR_ERR(pool->page_pool); + } return 0; } - pp_params.order = get_order(buf_size); - pp_params.flags = PP_FLAG_DMA_MAP; - pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); - pp_params.nid = NUMA_NO_NODE; - pp_params.dev = pfvf->dev; - pp_params.dma_dir = DMA_FROM_DEVICE; - pool->page_pool = page_pool_create(&pp_params); - if (IS_ERR(pool->page_pool)) { - netdev_err(pfvf->netdev, "Creation of page pool failed\n"); - return PTR_ERR(pool->page_pool); + /* Set XSK pool to support AF_XDP zero-copy */ + xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, pool_id); + if (xsk_pool) { + pool->xsk_pool = xsk_pool; + pool->xdp_cnt = numptrs; + pool->xdp = devm_kcalloc(pfvf->dev, + numptrs, sizeof(struct xdp_buff *), GFP_KERNEL); + if (IS_ERR(pool->xdp)) { + netdev_err(pfvf->netdev, "Creation of xsk pool failed\n"); + return PTR_ERR(pool->xdp); + } } return 0; @@ -1537,9 +1586,18 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) } for (ptr = 0; ptr < num_sqbs; ptr++) { - err = otx2_alloc_rbuf(pfvf, pool, &bufptr); - if (err) + err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr); + if (err) { + if (pool->xsk_pool) { + ptr--; + while (ptr >= 0) { + xsk_buff_free(pool->xdp[ptr]); + ptr--; + } + } goto err_mem; + } + pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr; } @@ -1589,11 +1647,19 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) /* Allocate pointers and free them to aura/pool */ for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { pool = &pfvf->qset.pool[pool_id]; + for (ptr = 0; ptr < num_ptrs; ptr++) { - err = otx2_alloc_rbuf(pfvf, pool, &bufptr); - if (err) + err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr); + if (err) { + if (pool->xsk_pool) { + while (ptr) + xsk_buff_free(pool->xdp[--ptr]); + } return -ENOMEM; + } + pfvf->hw_ops->aura_freeptr(pfvf, pool_id, + pool->xsk_pool ? bufptr : bufptr + OTX2_HEAD_ROOM); } } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 65814e3dc93f5..1e88422825be7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -128,6 +129,12 @@ enum otx2_errcodes_re { ERRCODE_IL4_CSUM = 0x22, }; +enum otx2_xdp_action { + OTX2_XDP_TX = BIT(0), + OTX2_XDP_REDIRECT = BIT(1), + OTX2_AF_XDP_FRAME = BIT(2), +}; + struct otx2_dev_stats { u64 rx_bytes; u64 rx_frames; @@ -531,6 +538,8 @@ struct otx2_nic { /* Inline ipsec */ struct cn10k_ipsec ipsec; + /* af_xdp zero-copy */ + unsigned long *af_xdp_zc_qidx; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -1002,7 +1011,7 @@ void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq); void otx2_free_pending_sqe(struct otx2_nic *pfvf); void otx2_sqb_flush(struct otx2_nic *pfvf); int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, - dma_addr_t *dma); + dma_addr_t *dma, int qidx, int idx); int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); @@ -1032,6 +1041,8 @@ void otx2_pfaf_mbox_destroy(struct otx2_nic *pf); void otx2_disable_mbox_intr(struct otx2_nic *pf); void otx2_disable_napi(struct otx2_nic *pf); irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq); +int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura); +int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx); /* RSS configuration APIs*/ int otx2_rss_init(struct otx2_nic *pfvf); @@ -1094,7 +1105,8 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable); int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf); -bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx); +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, + u64 iova, int len, u16 qidx, u16 flags); u16 otx2_get_max_mtu(struct otx2_nic *pfvf); int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features); @@ -1175,4 +1187,5 @@ static inline int mcam_entry_cmp(const void *a, const void *b) dma_addr_t otx2_dma_map_skb_frag(struct otx2_nic *pfvf, struct sk_buff *skb, int seg, int *len); void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg); +int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 2d53dc77ef1ef..010385b299886 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -910,8 +910,12 @@ static int otx2_get_rxfh(struct net_device *dev, return -ENOENT; if (indir) { - for (idx = 0; idx < rss->rss_size; idx++) + for (idx = 0; idx < rss->rss_size; idx++) { + /* Ignore if the rx queue is AF_XDP zero copy enabled */ + if (test_bit(rss_ctx->ind_tbl[idx], pfvf->af_xdp_zc_qidx)) + continue; indir[idx] = rss_ctx->ind_tbl[idx]; + } } if (rxfh->key) memcpy(rxfh->key, rss->key, sizeof(rss->key)); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index e1dde93e8af82..cfed9ec5b1579 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -27,6 +27,7 @@ #include "qos.h" #include #include "cn10k_ipsec.h" +#include "otx2_xsk.h" #define DRV_NAME "rvu_nicpf" #define DRV_STRING "Marvell RVU NIC Physical Function Driver" @@ -1662,9 +1663,7 @@ void otx2_free_hw_resources(struct otx2_nic *pf) struct nix_lf_free_req *free_req; struct mbox *mbox = &pf->mbox; struct otx2_cq_queue *cq; - struct otx2_pool *pool; struct msg_req *req; - int pool_id; int qidx; /* Ensure all SQE are processed */ @@ -1705,13 +1704,6 @@ void otx2_free_hw_resources(struct otx2_nic *pf) /* Free RQ buffer pointers*/ otx2_free_aura_ptr(pf, AURA_NIX_RQ); - for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) { - pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx); - pool = &pf->qset.pool[pool_id]; - page_pool_destroy(pool->page_pool); - pool->page_pool = NULL; - } - otx2_free_cq_res(pf); /* Free all ingress bandwidth profiles allocated */ @@ -2691,7 +2683,6 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf, static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, int qidx) { - struct page *page; u64 dma_addr; int err = 0; @@ -2701,11 +2692,11 @@ static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf, if (dma_mapping_error(pf->dev, dma_addr)) return -ENOMEM; - err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx); + err = otx2_xdp_sq_append_pkt(pf, xdpf, dma_addr, xdpf->len, + qidx, OTX2_XDP_REDIRECT); if (!err) { otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE); - page = virt_to_page(xdpf->data); - put_page(page); + xdp_return_frame(xdpf); return -ENOMEM; } return 0; @@ -2789,6 +2780,8 @@ static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return otx2_xdp_setup(pf, xdp->prog); + case XDP_SETUP_XSK_POOL: + return otx2_xsk_pool_setup(pf, xdp->xsk.pool, xdp->xsk.queue_id); default: return -EINVAL; } @@ -2866,6 +2859,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, .ndo_bpf = otx2_xdp, + .ndo_xsk_wakeup = otx2_xsk_wakeup, .ndo_xdp_xmit = otx2_xdp_xmit, .ndo_setup_tc = otx2_setup_tc, .ndo_set_vf_trust = otx2_ndo_set_vf_trust, @@ -3204,16 +3198,28 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Enable link notifications */ otx2_cgx_config_linkevents(pf, true); + pf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL); + if (!pf->af_xdp_zc_qidx) { + err = -ENOMEM; + goto err_sriov_cleannup; + } + #ifdef CONFIG_DCB err = otx2_dcbnl_set_ops(netdev); if (err) - goto err_pf_sriov_init; + goto err_free_zc_bmap; #endif otx2_qos_init(pf, qos_txqs); return 0; +#ifdef CONFIG_DCB +err_free_zc_bmap: + bitmap_free(pf->af_xdp_zc_qidx); +#endif +err_sriov_cleannup: + otx2_sriov_vfcfg_cleanup(pf); err_pf_sriov_init: otx2_shutdown_tc(pf); err_mcam_flow_del: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 224cef9389274..af8cabe828d05 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "otx2_reg.h" #include "otx2_common.h" @@ -19,6 +20,7 @@ #include "otx2_txrx.h" #include "otx2_ptp.h" #include "cn10k.h" +#include "otx2_xsk.h" #define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx))) #define PTP_PORT 0x13F @@ -29,11 +31,17 @@ DEFINE_STATIC_KEY_FALSE(cn10k_ipsec_sa_enabled); +static int otx2_get_free_sqe(struct otx2_snd_queue *sq) +{ + return (sq->cons_head - sq->head - 1 + sq->sqe_cnt) + & (sq->sqe_cnt - 1); +} + static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, struct otx2_cq_queue *cq, - bool *need_xdp_flush); + u32 *metasize, bool *need_xdp_flush); static void otx2_sq_set_sqe_base(struct otx2_snd_queue *sq, struct sk_buff *skb) @@ -96,20 +104,22 @@ static unsigned int frag_num(unsigned int i) static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, - struct nix_cqe_tx_s *cqe) + struct nix_cqe_tx_s *cqe, + int *xsk_frames) { struct nix_send_comp_s *snd_comp = &cqe->comp; struct sg_list *sg; - struct page *page; - u64 pa; sg = &sq->sg[snd_comp->sqe_id]; + if (sg->flags & OTX2_AF_XDP_FRAME) { + (*xsk_frames)++; + return; + } - pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]); - otx2_dma_unmap_page(pfvf, sg->dma_addr[0], - sg->size[0], DMA_TO_DEVICE); - page = virt_to_page(phys_to_virt(pa)); - put_page(page); + if (sg->flags & OTX2_XDP_REDIRECT) + otx2_dma_unmap_page(pfvf, sg->dma_addr[0], sg->size[0], DMA_TO_DEVICE); + xdp_return_frame((struct xdp_frame *)sg->skb); + sg->skb = (u64)NULL; } static void otx2_snd_pkt_handler(struct otx2_nic *pfvf, @@ -326,6 +336,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, struct nix_rx_sg_s *sg = &cqe->sg; struct sk_buff *skb = NULL; void *end, *start; + u32 metasize = 0; u64 *seg_addr; u16 *seg_size; int seg; @@ -336,7 +347,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, } if (pfvf->xdp_prog) - if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush)) + if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, + &metasize, need_xdp_flush)) return; skb = napi_get_frags(napi); @@ -368,6 +380,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, skb->mark = parse->match_id; skb_mark_for_recycle(skb); + if (metasize) + skb_metadata_set(skb, metasize); napi_gro_frags(napi); } @@ -431,6 +445,18 @@ int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) return cnt - cq->pool_ptrs; } +static void otx2_zc_submit_pkts(struct otx2_nic *pfvf, struct xsk_buff_pool *xsk_pool, + int *xsk_frames, int qidx, int budget) +{ + if (*xsk_frames) + xsk_tx_completed(xsk_pool, *xsk_frames); + + if (xsk_uses_need_wakeup(xsk_pool)) + xsk_set_tx_need_wakeup(xsk_pool); + + otx2_zc_napi_handler(pfvf, xsk_pool, qidx, budget); +} + static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int budget) { @@ -439,16 +465,22 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct nix_cqe_tx_s *cqe; struct net_device *ndev; int processed_cqe = 0; + int xsk_frames = 0; + + qidx = cq->cq_idx - pfvf->hw.rx_queues; + sq = &pfvf->qset.sq[qidx]; if (cq->pend_cqe >= budget) goto process_cqe; - if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) + if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) { + if (sq->xsk_pool) + otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, + qidx, budget); return 0; + } process_cqe: - qidx = cq->cq_idx - pfvf->hw.rx_queues; - sq = &pfvf->qset.sq[qidx]; while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); @@ -458,10 +490,8 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, break; } - qidx = cq->cq_idx - pfvf->hw.rx_queues; - if (cq->cq_type == CQ_XDP) - otx2_xdp_snd_pkt_handler(pfvf, sq, cqe); + otx2_xdp_snd_pkt_handler(pfvf, sq, cqe, &xsk_frames); else otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[qidx], cqe, budget, &tx_pkts, &tx_bytes); @@ -502,6 +532,10 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, netif_carrier_ok(ndev)) netif_tx_wake_queue(txq); } + + if (sq->xsk_pool) + otx2_zc_submit_pkts(pfvf, sq->xsk_pool, &xsk_frames, qidx, budget); + return 0; } @@ -527,9 +561,10 @@ static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_p int otx2_napi_handler(struct napi_struct *napi, int budget) { struct otx2_cq_queue *rx_cq = NULL; + struct otx2_cq_queue *cq = NULL; + struct otx2_pool *pool = NULL; struct otx2_cq_poll *cq_poll; int workdone = 0, cq_idx, i; - struct otx2_cq_queue *cq; struct otx2_qset *qset; struct otx2_nic *pfvf; int filled_cnt = -1; @@ -554,6 +589,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) if (rx_cq && rx_cq->pool_ptrs) filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); + /* Clear the IRQ */ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); @@ -566,20 +602,31 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) otx2_adjust_adaptive_coalese(pfvf, cq_poll); + if (likely(cq)) + pool = &pfvf->qset.pool[cq->cq_idx]; + if (unlikely(!filled_cnt)) { struct refill_work *work; struct delayed_work *dwork; - work = &pfvf->refill_wrk[cq->cq_idx]; - dwork = &work->pool_refill_work; - /* Schedule a task if no other task is running */ - if (!cq->refill_task_sched) { - work->napi = napi; - cq->refill_task_sched = true; - schedule_delayed_work(dwork, - msecs_to_jiffies(100)); + if (likely(cq)) { + work = &pfvf->refill_wrk[cq->cq_idx]; + dwork = &work->pool_refill_work; + /* Schedule a task if no other task is running */ + if (!cq->refill_task_sched) { + work->napi = napi; + cq->refill_task_sched = true; + schedule_delayed_work(dwork, + msecs_to_jiffies(100)); + } + /* Call wake-up for not able to fill buffers */ + if (pool->xsk_pool) + xsk_set_rx_need_wakeup(pool->xsk_pool); } } else { + /* Clear wake-up, since buffers are filled successfully */ + if (pool && pool->xsk_pool) + xsk_clear_rx_need_wakeup(pool->xsk_pool); /* Re-enable interrupts */ otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), @@ -1147,7 +1194,7 @@ bool otx2_sq_append_skb(void *dev, struct netdev_queue *txq, /* Check if there is enough room between producer * and consumer index. */ - free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1); + free_desc = otx2_get_free_sqe(sq); if (free_desc < sq->sqe_thresh) return false; @@ -1230,15 +1277,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q u16 pool_id; u64 iova; - if (pfvf->xdp_prog) + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); + pool = &pfvf->qset.pool[pool_id]; + + if (pfvf->xdp_prog) { + if (pool->page_pool) + xdp_rxq_info_unreg_mem_model(&cq->xdp_rxq); + xdp_rxq_info_unreg(&cq->xdp_rxq); + } if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) return; - pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); - pool = &pfvf->qset.pool[pool_id]; - while (cq->pend_cqe) { cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); processed_cqe++; @@ -1359,8 +1410,9 @@ void otx2_free_pending_sqe(struct otx2_nic *pfvf) } } -static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, - int len, int *offset) +static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, + struct xdp_frame *xdpf, + u64 dma_addr, int len, int *offset, u16 flags) { struct nix_sqe_sg_s *sg = NULL; u64 *iova = NULL; @@ -1377,16 +1429,34 @@ static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr, sq->sg[sq->head].dma_addr[0] = dma_addr; sq->sg[sq->head].size[0] = len; sq->sg[sq->head].num_segs = 1; + sq->sg[sq->head].flags = flags; + sq->sg[sq->head].skb = (u64)xdpf; } -bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) +int otx2_read_free_sqe(struct otx2_nic *pfvf, u16 qidx) +{ + struct otx2_snd_queue *sq; + int free_sqe; + + sq = &pfvf->qset.sq[qidx]; + free_sqe = otx2_get_free_sqe(sq); + if (free_sqe < sq->sqe_thresh) { + netdev_warn(pfvf->netdev, "No free sqe for Send queue%d\n", qidx); + return 0; + } + + return free_sqe - sq->sqe_thresh; +} + +bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, struct xdp_frame *xdpf, + u64 iova, int len, u16 qidx, u16 flags) { struct nix_sqe_hdr_s *sqe_hdr; struct otx2_snd_queue *sq; int offset, free_sqe; sq = &pfvf->qset.sq[qidx]; - free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; + free_sqe = otx2_get_free_sqe(sq); if (free_sqe < sq->sqe_thresh) return false; @@ -1405,7 +1475,7 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) offset = sizeof(*sqe_hdr); - otx2_xdp_sqe_add_sg(sq, iova, len, &offset); + otx2_xdp_sqe_add_sg(sq, xdpf, iova, len, &offset, flags); sqe_hdr->sizem1 = (offset / 16) - 1; pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx); @@ -1416,16 +1486,30 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, struct bpf_prog *prog, struct nix_cqe_rx_s *cqe, struct otx2_cq_queue *cq, - bool *need_xdp_flush) + u32 *metasize, bool *need_xdp_flush) { + struct xdp_buff xdp, *xsk_buff = NULL; unsigned char *hard_start; + struct otx2_pool *pool; + struct xdp_frame *xdpf; int qidx = cq->cq_idx; - struct xdp_buff xdp; struct page *page; u64 iova, pa; u32 act; int err; + pool = &pfvf->qset.pool[qidx]; + + if (pool->xsk_pool) { + xsk_buff = pool->xdp[--cq->rbpool->xdp_top]; + if (!xsk_buff) + return false; + + xsk_buff->data_end = xsk_buff->data + cqe->sg.seg_size; + act = bpf_prog_run_xdp(prog, xsk_buff); + goto handle_xdp_verdict; + } + iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); page = virt_to_page(phys_to_virt(pa)); @@ -1434,41 +1518,64 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf, hard_start = (unsigned char *)phys_to_virt(pa); xdp_prepare_buff(&xdp, hard_start, OTX2_HEAD_ROOM, - cqe->sg.seg_size, false); + cqe->sg.seg_size, true); act = bpf_prog_run_xdp(prog, &xdp); +handle_xdp_verdict: switch (act) { case XDP_PASS: + *metasize = xdp.data - xdp.data_meta; break; case XDP_TX: qidx += pfvf->hw.tx_queues; cq->pool_ptrs++; - return otx2_xdp_sq_append_pkt(pfvf, iova, - cqe->sg.seg_size, qidx); + xdpf = xdp_convert_buff_to_frame(&xdp); + return otx2_xdp_sq_append_pkt(pfvf, xdpf, + cqe->sg.seg_addr, + cqe->sg.seg_size, + qidx, OTX2_XDP_TX); case XDP_REDIRECT: cq->pool_ptrs++; - err = xdp_do_redirect(pfvf->netdev, &xdp, prog); + if (xsk_buff) { + err = xdp_do_redirect(pfvf->netdev, xsk_buff, prog); + if (!err) { + *need_xdp_flush = true; + return true; + } + return false; + } - otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, - DMA_FROM_DEVICE); + err = xdp_do_redirect(pfvf->netdev, &xdp, prog); if (!err) { *need_xdp_flush = true; return true; } - put_page(page); + + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + xdpf = xdp_convert_buff_to_frame(&xdp); + xdp_return_frame(xdpf); break; default: bpf_warn_invalid_xdp_action(pfvf->netdev, prog, act); break; case XDP_ABORTED: + if (xsk_buff) + xsk_buff_free(xsk_buff); trace_xdp_exception(pfvf->netdev, prog, act); break; case XDP_DROP: - otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, - DMA_FROM_DEVICE); - put_page(page); cq->pool_ptrs++; + if (xsk_buff) { + xsk_buff_free(xsk_buff); + } else if (page->pp) { + page_pool_recycle_direct(pool->page_pool, page); + } else { + otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, + DMA_FROM_DEVICE); + put_page(page); + } return true; } return false; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index d23810963fdbd..acf259d720088 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -12,6 +12,7 @@ #include #include #include +#include #define LBK_CHAN_BASE 0x000 #define SDP_CHAN_BASE 0x700 @@ -76,6 +77,7 @@ struct otx2_rcv_queue { struct sg_list { u16 num_segs; + u16 flags; u64 skb; u64 size[OTX2_MAX_FRAGS_IN_SQE]; u64 dma_addr[OTX2_MAX_FRAGS_IN_SQE]; @@ -104,6 +106,8 @@ struct otx2_snd_queue { /* SQE ring and CPT response queue for Inline IPSEC */ struct qmem *sqe_ring; struct qmem *cpt_resp; + /* Buffer pool for af_xdp zero-copy */ + struct xsk_buff_pool *xsk_pool; } ____cacheline_aligned_in_smp; enum cq_type { @@ -127,7 +131,11 @@ struct otx2_pool { struct qmem *stack; struct qmem *fc_addr; struct page_pool *page_pool; + struct xsk_buff_pool *xsk_pool; + struct xdp_buff **xdp; + u16 xdp_cnt; u16 rbsize; + u16 xdp_top; }; struct otx2_cq_queue { @@ -144,6 +152,7 @@ struct otx2_cq_queue { void *cqe_base; struct qmem *cqe; struct otx2_pool *rbpool; + bool xsk_zc_en; struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index e926c6ce96cff..7ef3ba477d496 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -722,15 +722,27 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_shutdown_tc; + vf->af_xdp_zc_qidx = bitmap_zalloc(qcount, GFP_KERNEL); + if (!vf->af_xdp_zc_qidx) { + err = -ENOMEM; + goto err_unreg_devlink; + } + #ifdef CONFIG_DCB err = otx2_dcbnl_set_ops(netdev); if (err) - goto err_shutdown_tc; + goto err_free_zc_bmap; #endif otx2_qos_init(vf, qos_txqs); return 0; +#ifdef CONFIG_DCB +err_free_zc_bmap: + bitmap_free(vf->af_xdp_zc_qidx); +#endif +err_unreg_devlink: + otx2_unregister_dl(vf); err_shutdown_tc: otx2_shutdown_tc(vf); err_unreg_netdev: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c new file mode 100644 index 0000000000000..ce10caea8511d --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.c @@ -0,0 +1,225 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2024 Marvell. + * + */ + +#include +#include +#include +#include + +#include "otx2_common.h" +#include "otx2_xsk.h" + +int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, + dma_addr_t *dma, int idx) +{ + struct xdp_buff *xdp; + int delta; + + xdp = xsk_buff_alloc(pool->xsk_pool); + if (!xdp) + return -ENOMEM; + + pool->xdp[pool->xdp_top++] = xdp; + *dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp)); + /* Adjust xdp->data for unaligned addresses */ + delta = *dma - xsk_buff_xdp_get_dma(xdp); + xdp->data += delta; + + return 0; +} + +static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id) +{ + struct nix_cn10k_aq_enq_req *cn10k_rq_aq; + struct npa_aq_enq_req *aura_aq; + struct npa_aq_enq_req *pool_aq; + struct nix_aq_enq_req *rq_aq; + + if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); + if (!cn10k_rq_aq) + return -ENOMEM; + cn10k_rq_aq->qidx = qidx; + cn10k_rq_aq->rq.ena = 0; + cn10k_rq_aq->rq_mask.ena = 1; + cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ; + cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE; + } else { + rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox); + if (!rq_aq) + return -ENOMEM; + rq_aq->qidx = qidx; + rq_aq->sq.ena = 0; + rq_aq->sq_mask.ena = 1; + rq_aq->ctype = NIX_AQ_CTYPE_RQ; + rq_aq->op = NIX_AQ_INSTOP_WRITE; + } + + aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!aura_aq) + goto fail; + + aura_aq->aura_id = aura_id; + aura_aq->aura.ena = 0; + aura_aq->aura_mask.ena = 1; + aura_aq->ctype = NPA_AQ_CTYPE_AURA; + aura_aq->op = NPA_AQ_INSTOP_WRITE; + + pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox); + if (!pool_aq) + goto fail; + + pool_aq->aura_id = aura_id; + pool_aq->pool.ena = 0; + pool_aq->pool_mask.ena = 1; + + pool_aq->ctype = NPA_AQ_CTYPE_POOL; + pool_aq->op = NPA_AQ_INSTOP_WRITE; + + return otx2_sync_mbox_msg(&pfvf->mbox); + +fail: + otx2_mbox_reset(&pfvf->mbox.mbox, 0); + return -ENOMEM; +} + +static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx) +{ + struct otx2_qset *qset = &pfvf->qset; + struct otx2_cq_queue *cq; + struct otx2_pool *pool; + u64 iova; + + /* If the DOWN flag is set SQs are already freed */ + if (pfvf->flags & OTX2_FLAG_INTF_DOWN) + return; + + cq = &qset->cq[qidx]; + if (cq) + otx2_cleanup_rx_cqes(pfvf, cq, qidx); + + pool = &pfvf->qset.pool[qidx]; + iova = otx2_aura_allocptr(pfvf, qidx); + while (iova) { + iova -= OTX2_HEAD_ROOM; + otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize); + iova = otx2_aura_allocptr(pfvf, qidx); + } + + mutex_lock(&pfvf->mbox.lock); + otx2_xsk_ctx_disable(pfvf, qidx, qidx); + mutex_unlock(&pfvf->mbox.lock); +} + +int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx) +{ + u16 rx_queues = pf->hw.rx_queues; + u16 tx_queues = pf->hw.tx_queues; + int err; + + if (qidx >= rx_queues || qidx >= tx_queues) + return -EINVAL; + + err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + if (err) + return err; + + set_bit(qidx, pf->af_xdp_zc_qidx); + otx2_clean_up_rq(pf, qidx); + /* Reconfigure RSS table as 'qidx' cannot be part of RSS now */ + otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP); + /* Kick start the NAPI context so that receiving will start */ + return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX); +} + +int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx) +{ + struct net_device *netdev = pf->netdev; + struct xsk_buff_pool *pool; + struct otx2_snd_queue *sq; + + pool = xsk_get_pool_from_qid(netdev, qidx); + if (!pool) + return -EINVAL; + + sq = &pf->qset.sq[qidx + pf->hw.tx_queues]; + sq->xsk_pool = NULL; + otx2_clean_up_rq(pf, qidx); + clear_bit(qidx, pf->af_xdp_zc_qidx); + xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); + /* Reconfigure RSS table as 'qidx' now need to be part of RSS now */ + otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP); + + return 0; +} + +int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx) +{ + if (pool) + return otx2_xsk_pool_enable(pf, pool, qidx); + + return otx2_xsk_pool_disable(pf, qidx); +} + +int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) +{ + struct otx2_nic *pf = netdev_priv(dev); + struct otx2_cq_poll *cq_poll = NULL; + struct otx2_qset *qset = &pf->qset; + + if (pf->flags & OTX2_FLAG_INTF_DOWN) + return -ENETDOWN; + + if (queue_id >= pf->hw.rx_queues || queue_id >= pf->hw.tx_queues) + return -EINVAL; + + cq_poll = &qset->napi[queue_id]; + if (!cq_poll) + return -EINVAL; + + /* Trigger interrupt */ + if (!napi_if_scheduled_mark_missed(&cq_poll->napi)) { + otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0)); + otx2_write64(pf, NIX_LF_CINTX_INT_W1S(cq_poll->cint_idx), BIT_ULL(0)); + } + + return 0; +} + +void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx) +{ + if (test_bit(qidx, pfvf->af_xdp_zc_qidx)) + sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx); +} + +void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool, + int queue, int budget) +{ + struct xdp_desc *xdp_desc = pool->tx_descs; + int err, i, work_done = 0, batch; + + budget = min(budget, otx2_read_free_sqe(pfvf, queue)); + batch = xsk_tx_peek_release_desc_batch(pool, budget); + if (!batch) + return; + + for (i = 0; i < batch; i++) { + dma_addr_t dma_addr; + + dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr); + err = otx2_xdp_sq_append_pkt(pfvf, NULL, dma_addr, xdp_desc[i].len, + queue, OTX2_AF_XDP_FRAME); + if (!err) { + netdev_err(pfvf->netdev, "AF_XDP: Unable to transfer packet err%d\n", err); + break; + } + work_done++; + } + + if (work_done) + xsk_tx_release(pool); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h new file mode 100644 index 0000000000000..8047fafee8feb --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_xsk.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU PF/VF Netdev Devlink + * + * Copyright (C) 2024 Marvell. + * + */ + +#ifndef OTX2_XSK_H +#define OTX2_XSK_H + +struct otx2_nic; +struct xsk_buff_pool; + +int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid); +int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qid); +int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qid); +int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, + dma_addr_t *dma, int idx); +int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); +void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool, + int queue, int budget); +void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx); + +#endif /* OTX2_XSK_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c index 9d887bfc31089..c5dbae0e513b6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c @@ -82,7 +82,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx) } for (ptr = 0; ptr < num_sqbs; ptr++) { - err = otx2_alloc_rbuf(pfvf, pool, &bufptr); + err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr); if (err) goto sqb_free; pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 440a4c42b405f..71ffb55d1fc48 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -396,7 +396,6 @@ static int prestera_port_sfp_bind(struct prestera_port *port) continue; port->phylink_pcs.ops = &prestera_pcs_ops; - port->phylink_pcs.neg_mode = true; port->phy_config.dev = &port->dev->dev; port->phy_config.type = PHYLINK_NETDEV; @@ -635,7 +634,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id) goto err_dl_port_register; dev->features |= NETIF_F_HW_TC; - dev->netns_local = true; + dev->netns_immutable = true; dev->netdev_ops = &prestera_netdev_ops; dev->ethtool_ops = &prestera_ethtool_ops; SET_NETDEV_DEV(dev, sw->dev->dev); diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index 95c4405b7d7be..7bfd3f230ff50 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -7,14 +7,6 @@ config NET_VENDOR_MEDIATEK if NET_VENDOR_MEDIATEK -config NET_AIROHA - tristate "Airoha SoC Gigabit Ethernet support" - depends on NET_DSA || !NET_DSA - select PAGE_POOL - help - This driver supports the gigabit ethernet MACs in the - Airoha SoC family. - config NET_MEDIATEK_SOC_WED depends on ARCH_MEDIATEK || COMPILE_TEST def_bool NET_MEDIATEK_SOC != n diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index ddbb7f4a516ca..03e008fbc859b 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -11,4 +11,3 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o endif obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o -obj-$(CONFIG_NET_AIROHA) += airoha_eth.o diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 53485142938c4..43197b28b3e74 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -815,12 +815,60 @@ static void mtk_mac_link_up(struct phylink_config *config, mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } +static void mtk_mac_disable_tx_lpi(struct phylink_config *config) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + struct mtk_eth *eth = mac->hw; + + mtk_m32(eth, MAC_MCR_EEE100M | MAC_MCR_EEE1G, 0, MTK_MAC_MCR(mac->id)); +} + +static int mtk_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, + bool tx_clk_stop) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + struct mtk_eth *eth = mac->hw; + u32 val; + + /* Tx idle timer in ms */ + timer = DIV_ROUND_UP(timer, 1000); + + /* If the timer is zero, then set LPI_MODE, which allows the + * system to enter LPI mode immediately rather than waiting for + * the LPI threshold. + */ + if (!timer) + val = MAC_EEE_LPI_MODE; + else if (FIELD_FIT(MAC_EEE_LPI_TXIDLE_THD, timer)) + val = FIELD_PREP(MAC_EEE_LPI_TXIDLE_THD, timer); + else + val = MAC_EEE_LPI_TXIDLE_THD; + + if (tx_clk_stop) + val |= MAC_EEE_CKG_TXIDLE; + + /* PHY Wake-up time, this field does not have a reset value, so use the + * reset value from MT7531 (36us for 100M and 17us for 1000M). + */ + val |= FIELD_PREP(MAC_EEE_WAKEUP_TIME_1000, 17) | + FIELD_PREP(MAC_EEE_WAKEUP_TIME_100, 36); + + mtk_w32(eth, val, MTK_MAC_EEECR(mac->id)); + mtk_m32(eth, 0, MAC_MCR_EEE100M | MAC_MCR_EEE1G, MTK_MAC_MCR(mac->id)); + + return 0; +} + static const struct phylink_mac_ops mtk_phylink_ops = { .mac_select_pcs = mtk_mac_select_pcs, .mac_config = mtk_mac_config, .mac_finish = mtk_mac_finish, .mac_link_down = mtk_mac_link_down, .mac_link_up = mtk_mac_link_up, + .mac_disable_tx_lpi = mtk_mac_disable_tx_lpi, + .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi, }; static int mtk_mdio_init(struct mtk_eth *eth) @@ -830,17 +878,12 @@ static int mtk_mdio_init(struct mtk_eth *eth) int ret; u32 val; - mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); + mii_np = of_get_available_child_by_name(eth->dev->of_node, "mdio-bus"); if (!mii_np) { dev_err(eth->dev, "no %s child node found", "mdio-bus"); return -ENODEV; } - if (!of_device_is_available(mii_np)) { - ret = -ENODEV; - goto err_put_node; - } - eth->mii_bus = devm_mdiobus_alloc(eth->dev); if (!eth->mii_bus) { ret = -ENOMEM; @@ -2079,7 +2122,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, if (ring->page_pool) { struct page *page = virt_to_head_page(data); struct xdp_buff xdp; - u32 ret; + u32 ret, metasize; new_data = mtk_page_pool_get_buff(ring->page_pool, &dma_addr, @@ -2095,7 +2138,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q); xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen, - false); + true); xdp_buff_clear_frags_flag(&xdp); ret = mtk_xdp_run(eth, ring, &xdp, netdev); @@ -2115,6 +2158,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, skb_reserve(skb, xdp.data - xdp.data_hard_start); skb_put(skb, xdp.data_end - xdp.data); + metasize = xdp.data - xdp.data_meta; + if (metasize) + skb_metadata_set(skb, metasize); skb_mark_for_recycle(skb); } else { if (ring->frag_size <= PAGE_SIZE) @@ -4474,6 +4520,20 @@ static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam return phylink_ethtool_set_pauseparam(mac->phylink, pause); } +static int mtk_get_eee(struct net_device *dev, struct ethtool_keee *eee) +{ + struct mtk_mac *mac = netdev_priv(dev); + + return phylink_ethtool_get_eee(mac->phylink, eee); +} + +static int mtk_set_eee(struct net_device *dev, struct ethtool_keee *eee) +{ + struct mtk_mac *mac = netdev_priv(dev); + + return phylink_ethtool_set_eee(mac->phylink, eee); +} + static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { @@ -4506,6 +4566,8 @@ static const struct ethtool_ops mtk_ethtool_ops = { .set_pauseparam = mtk_set_pauseparam, .get_rxnfc = mtk_get_rxnfc, .set_rxnfc = mtk_set_rxnfc, + .get_eee = mtk_get_eee, + .set_eee = mtk_set_eee, }; static const struct net_device_ops mtk_netdev_ops = { @@ -4615,6 +4677,9 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->phylink_config.type = PHYLINK_NETDEV; mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; + mac->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD | + MAC_2500FD; + mac->phylink_config.lpi_timer_default = 1000; /* MT7623 gmac0 is now missing its speed-specific PLL configuration * in its .mac_config method (since state->speed is not valid there. diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 0d5225f1d3eef..90a377ab4359e 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -453,6 +453,8 @@ #define MAC_MCR_RX_FIFO_CLR_DIS BIT(12) #define MAC_MCR_BACKOFF_EN BIT(9) #define MAC_MCR_BACKPR_EN BIT(8) +#define MAC_MCR_EEE1G BIT(7) +#define MAC_MCR_EEE100M BIT(6) #define MAC_MCR_FORCE_RX_FC BIT(5) #define MAC_MCR_FORCE_TX_FC BIT(4) #define MAC_MCR_SPEED_1000 BIT(3) @@ -461,6 +463,15 @@ #define MAC_MCR_FORCE_LINK BIT(0) #define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE) +/* Mac EEE control registers */ +#define MTK_MAC_EEECR(x) (0x10104 + (x * 0x100)) +#define MAC_EEE_WAKEUP_TIME_1000 GENMASK(31, 24) +#define MAC_EEE_WAKEUP_TIME_100 GENMASK(23, 16) +#define MAC_EEE_LPI_TXIDLE_THD GENMASK(15, 8) +#define MAC_EEE_CKG_TXIDLE BIT(3) +#define MAC_EEE_CKG_RXLPI BIT(2) +#define MAC_EEE_LPI_MODE BIT(0) + /* Mac status registers */ #define MTK_MAC_MSR(x) (0x10108 + (x * 0x100)) #define MAC_MSR_EEE1G BIT(7) diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index f20bb390df3ad..c855fb799ce14 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -34,8 +34,10 @@ struct mtk_flow_data { u16 vlan_in; struct { - u16 id; - __be16 proto; + struct { + u16 id; + __be16 proto; + } vlans[2]; u8 num; } vlan; struct { @@ -349,18 +351,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, case FLOW_ACTION_CSUM: break; case FLOW_ACTION_VLAN_PUSH: - if (data.vlan.num == 1 || + if (data.vlan.num + data.pppoe.num == 2 || act->vlan.proto != htons(ETH_P_8021Q)) return -EOPNOTSUPP; - data.vlan.id = act->vlan.vid; - data.vlan.proto = act->vlan.proto; + data.vlan.vlans[data.vlan.num].id = act->vlan.vid; + data.vlan.vlans[data.vlan.num].proto = act->vlan.proto; data.vlan.num++; break; case FLOW_ACTION_VLAN_POP: break; case FLOW_ACTION_PPPOE_PUSH: - if (data.pppoe.num == 1) + if (data.pppoe.num == 1 || + data.vlan.num == 2) return -EOPNOTSUPP; data.pppoe.sid = act->pppoe.sid; @@ -450,12 +453,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) foe.bridge.vlan = data.vlan_in; - if (data.vlan.num == 1) { - if (data.vlan.proto != htons(ETH_P_8021Q)) - return -EOPNOTSUPP; + for (i = 0; i < data.vlan.num; i++) + mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id); - mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id); - } if (data.pppoe.num == 1) mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid); diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 25989c79c92e6..76f202d7f0553 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1427,15 +1427,10 @@ static int mtk_star_mdio_init(struct net_device *ndev) of_node = dev->of_node; - mdio_node = of_get_child_by_name(of_node, "mdio"); + mdio_node = of_get_available_child_by_name(of_node, "mdio"); if (!mdio_node) return -ENODEV; - if (!of_device_is_available(mdio_node)) { - ret = -ENODEV; - goto out_put_node; - } - priv->mii = devm_mdiobus_alloc(dev); if (!priv->mii) { ret = -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index b330020dc0d67..07b061a97a6ed 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -526,28 +526,6 @@ u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int coun return res; } -u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, u32 uid, u32 obj, u32 count) -{ - struct mlx4_zone_entry *zone; - int res = 0; - - spin_lock(&zones->lock); - - zone = __mlx4_find_zone_by_uid(zones, uid); - - if (NULL == zone) { - res = -1; - goto out; - } - - __mlx4_free_from_zone(zone, obj, count); - -out: - spin_unlock(&zones->lock); - - return res; -} - u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count) { struct mlx4_zone_entry *zone; @@ -682,9 +660,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) } static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, - struct mlx4_db *db, int order) + struct mlx4_db *db, unsigned int order) { - int o; + unsigned int o; int i; for (o = order; o <= 1; ++o) { @@ -712,7 +690,7 @@ static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, return 0; } -int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_db_pgdir *pgdir; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 15c57e9517e9a..b33285d755b90 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -48,60 +48,43 @@ #if IS_ENABLED(CONFIG_IPV6) #include #endif +#include #include "mlx4_en.h" -static int mlx4_alloc_page(struct mlx4_en_priv *priv, - struct mlx4_en_rx_alloc *frag, - gfp_t gfp) -{ - struct page *page; - dma_addr_t dma; - - page = alloc_page(gfp); - if (unlikely(!page)) - return -ENOMEM; - dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir); - if (unlikely(dma_mapping_error(priv->ddev, dma))) { - __free_page(page); - return -ENOMEM; - } - frag->page = page; - frag->dma = dma; - frag->page_offset = priv->rx_headroom; - return 0; -} - static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_alloc *frags, gfp_t gfp) { + dma_addr_t dma; int i; for (i = 0; i < priv->num_frags; i++, frags++) { if (!frags->page) { - if (mlx4_alloc_page(priv, frags, gfp)) { + frags->page = page_pool_alloc_pages(ring->pp, gfp); + if (!frags->page) { ring->alloc_fail++; return -ENOMEM; } + page_pool_fragment_page(frags->page, 1); + frags->page_offset = priv->rx_headroom; + ring->rx_alloc_pages++; } - rx_desc->data[i].addr = cpu_to_be64(frags->dma + - frags->page_offset); + dma = page_pool_get_dma_addr(frags->page); + rx_desc->data[i].addr = cpu_to_be64(dma + frags->page_offset); } return 0; } static void mlx4_en_free_frag(const struct mlx4_en_priv *priv, + struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_alloc *frag) { - if (frag->page) { - dma_unmap_page(priv->ddev, frag->dma, - PAGE_SIZE, priv->dma_dir); - __free_page(frag->page); - } + if (frag->page) + page_pool_put_full_page(ring->pp, frag->page, false); /* We need to clear all fields, otherwise a change of priv->log_rx_info * could lead to see garbage later in frag->page. */ @@ -141,18 +124,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, (index << ring->log_stride); struct mlx4_en_rx_alloc *frags = ring->rx_info + (index << priv->log_rx_info); - if (likely(ring->page_cache.index > 0)) { - /* XDP uses a single page per frame */ - if (!frags->page) { - ring->page_cache.index--; - frags->page = ring->page_cache.buf[ring->page_cache.index].page; - frags->dma = ring->page_cache.buf[ring->page_cache.index].dma; - } - frags->page_offset = XDP_PACKET_HEADROOM; - rx_desc->data[0].addr = cpu_to_be64(frags->dma + - XDP_PACKET_HEADROOM); - return 0; - } return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp); } @@ -178,7 +149,7 @@ static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv, frags = ring->rx_info + (index << priv->log_rx_info); for (nr = 0; nr < priv->num_frags; nr++) { en_dbg(DRV, priv, "Freeing fragment:%d\n", nr); - mlx4_en_free_frag(priv, frags + nr); + mlx4_en_free_frag(priv, ring, frags + nr); } } @@ -268,6 +239,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, u32 size, u16 stride, int node, int queue_index) { struct mlx4_en_dev *mdev = priv->mdev; + struct page_pool_params pp = {}; struct mlx4_en_rx_ring *ring; int err = -ENOMEM; int tmp; @@ -286,9 +258,26 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->log_stride = ffs(ring->stride) - 1; ring->buf_size = ring->size * ring->stride + TXBB_SIZE; - if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0) + pp.flags = PP_FLAG_DMA_MAP; + pp.pool_size = size * DIV_ROUND_UP(priv->rx_skb_size, PAGE_SIZE); + pp.nid = node; + pp.napi = &priv->rx_cq[queue_index]->napi; + pp.netdev = priv->dev; + pp.dev = &mdev->dev->persist->pdev->dev; + pp.dma_dir = priv->dma_dir; + + ring->pp = page_pool_create(&pp); + if (!ring->pp) goto err_ring; + if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0) + goto err_pp; + + err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_POOL, + ring->pp); + if (err) + goto err_xdp_info; + tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * sizeof(struct mlx4_en_rx_alloc)); ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node); @@ -319,6 +308,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->rx_info = NULL; err_xdp_info: xdp_rxq_info_unreg(&ring->xdp_rxq); +err_pp: + page_pool_destroy(ring->pp); err_ring: kfree(ring); *pring = NULL; @@ -409,26 +400,6 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) } } -/* When the rx ring is running in page-per-packet mode, a released frame can go - * directly into a small cache, to avoid unmapping or touching the page - * allocator. In bpf prog performance scenarios, buffers are either forwarded - * or dropped, never converted to skbs, so every page can come directly from - * this cache when it is sized to be a multiple of the napi budget. - */ -bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, - struct mlx4_en_rx_alloc *frame) -{ - struct mlx4_en_page_cache *cache = &ring->page_cache; - - if (cache->index >= MLX4_EN_CACHE_SIZE) - return false; - - cache->buf[cache->index].page = frame->page; - cache->buf[cache->index].dma = frame->dma; - cache->index++; - return true; -} - void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride) @@ -445,6 +416,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, xdp_rxq_info_unreg(&ring->xdp_rxq); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); kvfree(ring->rx_info); + page_pool_destroy(ring->pp); ring->rx_info = NULL; kfree(ring); *pring = NULL; @@ -453,14 +425,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { - int i; - - for (i = 0; i < ring->page_cache.index; i++) { - dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma, - PAGE_SIZE, priv->dma_dir); - put_page(ring->page_cache.buf[i].page); - } - ring->page_cache.index = 0; mlx4_en_free_rx_buf(priv, ring); if (ring->stride <= TXBB_SIZE) ring->buf -= TXBB_SIZE; @@ -487,7 +451,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, if (unlikely(!page)) goto fail; - dma = frags->dma; + dma = page_pool_get_dma_addr(page); dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset, frag_size, priv->dma_dir); @@ -498,6 +462,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, if (frag_info->frag_stride == PAGE_SIZE / 2) { frags->page_offset ^= PAGE_SIZE / 2; release = page_count(page) != 1 || + atomic_long_read(&page->pp_ref_count) != 1 || page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id(); } else if (!priv->rx_headroom) { @@ -511,10 +476,9 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv, release = frags->page_offset + frag_info->frag_size > PAGE_SIZE; } if (release) { - dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir); frags->page = NULL; } else { - page_ref_inc(page); + page_pool_ref_page(page); } nr++; @@ -784,7 +748,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* Get pointer to first fragment since we haven't * skb yet and cast it to ethhdr struct */ - dma = frags[0].dma + frags[0].page_offset; + dma = page_pool_get_dma_addr(frags[0].page); + dma += frags[0].page_offset; dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), DMA_FROM_DEVICE); @@ -823,7 +788,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud void *orig_data; u32 act; - dma = frags[0].dma + frags[0].page_offset; + dma = page_pool_get_dma_addr(frags[0].page); + dma += frags[0].page_offset; dma_sync_single_for_cpu(priv->ddev, dma, priv->frag_info[0].frag_size, DMA_FROM_DEVICE); @@ -886,6 +852,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud skb = napi_get_frags(&cq->napi); if (unlikely(!skb)) goto next; + skb_mark_for_recycle(skb); if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) { u64 timestamp = mlx4_en_get_cqe_ts(cqe); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 1ddb11cb25f91..87f35bcbeff8f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -44,6 +44,7 @@ #include #include #include +#include #include "mlx4_en.h" @@ -350,16 +351,10 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, int napi_mode) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; - struct mlx4_en_rx_alloc frame = { - .page = tx_info->page, - .dma = tx_info->map0_dma, - }; - - if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { - dma_unmap_page(priv->ddev, tx_info->map0_dma, - PAGE_SIZE, priv->dma_dir); - put_page(tx_info->page); - } + struct page_pool *pool = ring->recycle_ring->pp; + + /* Note that napi_mode = 0 means ndo_close() path, not budget = 0 */ + page_pool_put_full_page(pool, tx_info->page, !!napi_mode); return tx_info->nr_txbb; } @@ -450,6 +445,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev, if (unlikely(!priv->port_up)) return 0; + if (unlikely(!napi_budget) && cq->type == TX_XDP) + return 0; netdev_txq_bql_complete_prefetchw(ring->tx_queue); @@ -1194,7 +1191,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, tx_desc = ring->buf + (index << LOG_TXBB_SIZE); data = &tx_desc->data; - dma = frame->dma; + dma = page_pool_get_dma_addr(frame->page); tx_info->page = frame->page; frame->page = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index d7d856d1758a3..b213094ea30f3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1478,12 +1478,6 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc); u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count, int align, u32 skip_mask, u32 *puid); -/* Free objects, start from of the uid from zone_allocator - * . - */ -u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones, - u32 uid, u32 obj, u32 count); - /* If was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of * specifying the uid when freeing an object, zone allocator could figure it by * itself. Other parameters are similar to mlx4_zone_free. diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 28b70dcc652e6..ad0d91a751848 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -247,20 +247,11 @@ struct mlx4_en_tx_desc { struct mlx4_en_rx_alloc { struct page *page; - dma_addr_t dma; u32 page_offset; }; #define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT) -struct mlx4_en_page_cache { - u32 index; - struct { - struct page *page; - dma_addr_t dma; - } buf[MLX4_EN_CACHE_SIZE]; -}; - enum { MLX4_EN_TX_RING_STATE_RECOVERING, }; @@ -335,14 +326,14 @@ struct mlx4_en_rx_ring { u16 stride; u16 log_stride; u16 cqn; /* index of port CQ associated with this ring */ + u8 fcs_del; u32 prod; u32 cons; u32 buf_size; - u8 fcs_del; + struct page_pool *pp; void *buf; void *rx_info; struct bpf_prog __rcu *xdp_prog; - struct mlx4_en_page_cache page_cache; unsigned long bytes; unsigned long packets; unsigned long csum_ok; @@ -707,8 +698,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, struct mlx4_en_priv *priv, unsigned int length, int tx_ind, bool *doorbell_pending); void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring); -bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, - struct mlx4_en_rx_alloc *frame); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 4e43f4a7d2466..e3d0b13c1610e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -147,26 +147,6 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, return err; } -int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx) -{ - struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; - struct mlx4_mac_table *table = &info->mac_table; - int i; - - for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { - if (!table->refs[i]) - continue; - - if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { - *idx = i; - return 0; - } - } - - return -ENOENT; -} -EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); - static bool mlx4_need_mf_bond(struct mlx4_dev *dev) { int i, num_eth_ports = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index ea6070180c96b..6ec7d6e0181de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -31,6 +31,7 @@ config MLX5_CORE_EN bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support" depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE select PAGE_POOL + select PAGE_POOL_STATS select DIMLIB help Ethernet support in Mellanox Technologies ConnectX-4 NIC. @@ -80,8 +81,8 @@ config MLX5_BRIDGE default y help mlx5 ConnectX offloads support for Ethernet Bridging (BRIDGE). - Enable adding representors of mlx5 uplink and VF ports to Bridge and - offloading rules for traffic between such ports. Supports VLANs (trunk and + Enable offloading FDB rules from a bridge device containing + representors of mlx5 uplink and VF ports. Supports VLANs (trunk and access modes). config MLX5_CLS_ACT diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e733b81e18a21..e53dbdc0a7a17 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -94,6 +94,11 @@ static u16 in_to_opcode(void *in) return MLX5_GET(mbox_in, in, opcode); } +static u16 in_to_uid(void *in) +{ + return MLX5_GET(mbox_in, in, uid); +} + /* Returns true for opcodes that might be triggered very frequently and throttle * the command interface. Limit their command slots usage. */ @@ -823,7 +828,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) opcode = in_to_opcode(in); op_mod = MLX5_GET(mbox_in, in, op_mod); - uid = MLX5_GET(mbox_in, in, uid); + uid = in_to_uid(in); status = MLX5_GET(mbox_out, out, status); if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY && @@ -1871,6 +1876,17 @@ static int is_manage_pages(void *in) return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES; } +static bool mlx5_has_privileged_uid(struct mlx5_core_dev *dev) +{ + return !xa_empty(&dev->cmd.vars.privileged_uids); +} + +static bool mlx5_cmd_is_privileged_uid(struct mlx5_core_dev *dev, + u16 uid) +{ + return !!xa_load(&dev->cmd.vars.privileged_uids, uid); +} + /* Notes: * 1. Callback functions may not sleep * 2. Page queue commands do not support asynchrous completion @@ -1881,7 +1897,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, { struct mlx5_cmd_msg *inb, *outb; u16 opcode = in_to_opcode(in); - bool throttle_op; + bool throttle_locked = false; + bool unpriv_locked = false; + u16 uid = in_to_uid(in); int pages_queue; gfp_t gfp; u8 token; @@ -1890,12 +1908,17 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) return -ENXIO; - throttle_op = mlx5_cmd_is_throttle_opcode(opcode); - if (throttle_op) { - if (callback) { - if (down_trylock(&dev->cmd.vars.throttle_sem)) - return -EBUSY; - } else { + if (!callback) { + /* The semaphore is already held for callback commands. It was + * acquired in mlx5_cmd_exec_cb() + */ + if (uid && mlx5_has_privileged_uid(dev)) { + if (!mlx5_cmd_is_privileged_uid(dev, uid)) { + unpriv_locked = true; + down(&dev->cmd.vars.unprivileged_sem); + } + } else if (mlx5_cmd_is_throttle_opcode(opcode)) { + throttle_locked = true; down(&dev->cmd.vars.throttle_sem); } } @@ -1941,8 +1964,11 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, out_in: free_msg(dev, inb); out_up: - if (throttle_op) + if (throttle_locked) up(&dev->cmd.vars.throttle_sem); + if (unpriv_locked) + up(&dev->cmd.vars.unprivileged_sem); + return err; } @@ -2104,18 +2130,22 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work) struct mlx5_async_work *work = _work; struct mlx5_async_ctx *ctx; struct mlx5_core_dev *dev; - u16 opcode; + bool throttle_locked; + bool unpriv_locked; ctx = work->ctx; dev = ctx->dev; - opcode = work->opcode; + throttle_locked = work->throttle_locked; + unpriv_locked = work->unpriv_locked; status = cmd_status_err(dev, status, work->opcode, work->op_mod, work->out); work->user_callback(status, work); /* Can't access "work" from this point on. It could have been freed in * the callback. */ - if (mlx5_cmd_is_throttle_opcode(opcode)) + if (throttle_locked) up(&dev->cmd.vars.throttle_sem); + if (unpriv_locked) + up(&dev->cmd.vars.unprivileged_sem); if (atomic_dec_and_test(&ctx->num_inflight)) complete(&ctx->inflight_done); } @@ -2124,6 +2154,8 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, void *out, int out_size, mlx5_async_cbk_t callback, struct mlx5_async_work *work) { + struct mlx5_core_dev *dev = ctx->dev; + u16 uid; int ret; work->ctx = ctx; @@ -2131,11 +2163,43 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, work->opcode = in_to_opcode(in); work->op_mod = MLX5_GET(mbox_in, in, op_mod); work->out = out; + work->throttle_locked = false; + work->unpriv_locked = false; + uid = in_to_uid(in); + if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) return -EIO; - ret = cmd_exec(ctx->dev, in, in_size, out, out_size, + + if (uid && mlx5_has_privileged_uid(dev)) { + if (!mlx5_cmd_is_privileged_uid(dev, uid)) { + if (down_trylock(&dev->cmd.vars.unprivileged_sem)) { + ret = -EBUSY; + goto dec_num_inflight; + } + work->unpriv_locked = true; + } + } else if (mlx5_cmd_is_throttle_opcode(in_to_opcode(in))) { + if (down_trylock(&dev->cmd.vars.throttle_sem)) { + ret = -EBUSY; + goto dec_num_inflight; + } + work->throttle_locked = true; + } + + ret = cmd_exec(dev, in, in_size, out, out_size, mlx5_cmd_exec_cb_handler, work, false); - if (ret && atomic_dec_and_test(&ctx->num_inflight)) + if (ret) + goto sem_up; + + return 0; + +sem_up: + if (work->throttle_locked) + up(&dev->cmd.vars.throttle_sem); + if (work->unpriv_locked) + up(&dev->cmd.vars.unprivileged_sem); +dec_num_inflight: + if (atomic_dec_and_test(&ctx->num_inflight)) complete(&ctx->inflight_done); return ret; @@ -2371,10 +2435,16 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev) sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds); sema_init(&cmd->vars.pages_sem, 1); sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2)); + sema_init(&cmd->vars.unprivileged_sem, + DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2)); + + xa_init(&cmd->vars.privileged_uids); cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0); - if (!cmd->pool) - return -ENOMEM; + if (!cmd->pool) { + err = -ENOMEM; + goto err_destroy_xa; + } err = alloc_cmd_page(dev, cmd); if (err) @@ -2408,6 +2478,8 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev) free_cmd_page(dev, cmd); err_free_pool: dma_pool_destroy(cmd->pool); +err_destroy_xa: + xa_destroy(&dev->cmd.vars.privileged_uids); return err; } @@ -2420,6 +2492,7 @@ void mlx5_cmd_disable(struct mlx5_core_dev *dev) destroy_msg_cache(dev); free_cmd_page(dev, cmd); dma_pool_destroy(cmd->pool); + xa_destroy(&dev->cmd.vars.privileged_uids); } void mlx5_cmd_set_state(struct mlx5_core_dev *dev, @@ -2427,3 +2500,18 @@ void mlx5_cmd_set_state(struct mlx5_core_dev *dev, { dev->cmd.state = cmdif_state; } + +int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid) +{ + return xa_insert(&dev->cmd.vars.privileged_uids, uid, + xa_mk_value(uid), GFP_KERNEL); +} +EXPORT_SYMBOL(mlx5_cmd_add_privileged_uid); + +void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid) +{ + void *data = xa_erase(&dev->cmd.vars.privileged_uids, uid); + + WARN(!data, "Privileged UID %u does not exist\n", uid); +} +EXPORT_SYMBOL(mlx5_cmd_remove_privileged_uid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 98d4306929f3e..97bde76af3991 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -35,6 +35,64 @@ static u16 mlx5_fw_ver_subminor(u32 version) return version & 0xffff; } +static int mlx5_devlink_serial_numbers_put(struct mlx5_core_dev *dev, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct pci_dev *pdev = dev->pdev; + unsigned int vpd_size, kw_len; + char *str, *end; + u8 *vpd_data; + int err = 0; + int start; + + vpd_data = pci_vpd_alloc(pdev, &vpd_size); + if (IS_ERR(vpd_data)) + return 0; + + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len); + if (start >= 0) { + str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL); + if (!str) { + err = -ENOMEM; + goto end; + } + end = strchrnul(str, ' '); + *end = '\0'; + err = devlink_info_board_serial_number_put(req, str); + kfree(str); + } + + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "V3", &kw_len); + if (start >= 0) { + str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL); + if (!str) { + err = -ENOMEM; + goto end; + } + err = devlink_info_serial_number_put(req, str); + kfree(str); + } + + start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, "VU", &kw_len); + if (start >= 0) { + str = kstrndup(vpd_data + start, kw_len, GFP_KERNEL); + if (!str) { + err = -ENOMEM; + goto end; + } + end = strchrnul(str, ' '); + *end = '\0'; + err = devlink_info_function_uid_put(req, str); + kfree(str); + } + +end: + kfree(vpd_data); + return err; +} + #define DEVLINK_FW_STRING_LEN 32 static int @@ -46,6 +104,13 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, u32 running_fw, stored_fw; int err; + if (!mlx5_core_is_pf(dev)) + return 0; + + err = mlx5_devlink_serial_numbers_put(dev, req, extack); + if (err) + return err; + err = devlink_info_version_fixed_put(req, "fw.psid", dev->board_id); if (err) return err; @@ -324,7 +389,8 @@ static const struct devlink_ops mlx5_devlink_ops = { .rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set, .rate_node_new = mlx5_esw_devlink_rate_node_new, .rate_node_del = mlx5_esw_devlink_rate_node_del, - .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set, + .rate_leaf_parent_set = mlx5_esw_devlink_rate_leaf_parent_set, + .rate_node_parent_set = mlx5_esw_devlink_rate_node_parent_set, #endif #ifdef CONFIG_MLX5_SF_MANAGER .port_new = mlx5_devlink_sf_port_new, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c index c7216e84ef8c0..86253a89c24c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c @@ -13,6 +13,50 @@ struct mlx5_vnic_diag_stats { __be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)]; }; +static void mlx5_reporter_vnic_diagnose_counter_icm(struct mlx5_core_dev *dev, + struct devlink_fmsg *fmsg, + u16 vport_num, bool other_vport) +{ + u32 out_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {}; + u32 in_icm_reg[MLX5_ST_SZ_DW(vhca_icm_ctrl_reg)] = {}; + u32 out_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {}; + u32 in_reg[MLX5_ST_SZ_DW(nic_cap_reg)] = {}; + u32 cur_alloc_icm; + int vhca_icm_ctrl; + u16 vhca_id; + int err; + + err = mlx5_core_access_reg(dev, in_reg, sizeof(in_reg), out_reg, + sizeof(out_reg), MLX5_REG_NIC_CAP, 0, 0); + if (err) { + mlx5_core_warn(dev, "Reading nic_cap_reg failed. err = %d\n", err); + return; + } + vhca_icm_ctrl = MLX5_GET(nic_cap_reg, out_reg, vhca_icm_ctrl); + if (!vhca_icm_ctrl) + return; + + MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id_valid, other_vport); + if (other_vport) { + err = mlx5_vport_get_vhca_id(dev, vport_num, &vhca_id); + if (err) { + mlx5_core_warn(dev, "vport to vhca_id failed. vport_num = %d, err = %d\n", + vport_num, err); + return; + } + MLX5_SET(vhca_icm_ctrl_reg, in_icm_reg, vhca_id, vhca_id); + } + err = mlx5_core_access_reg(dev, in_icm_reg, sizeof(in_icm_reg), + out_icm_reg, sizeof(out_icm_reg), + MLX5_REG_VHCA_ICM_CTRL, 0, 0); + if (err) { + mlx5_core_warn(dev, "Reading vhca_icm_ctrl failed. err = %d\n", err); + return; + } + cur_alloc_icm = MLX5_GET(vhca_icm_ctrl_reg, out_icm_reg, cur_alloc_icm); + devlink_fmsg_u32_pair_put(fmsg, "icm_consumption", cur_alloc_icm); +} + void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, struct devlink_fmsg *fmsg, u16 vport_num, bool other_vport) @@ -59,6 +103,8 @@ void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev, devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail", VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail)); } + if (MLX5_CAP_GEN(dev, nic_cap_reg)) + mlx5_reporter_vnic_diagnose_counter_icm(dev, fmsg, vport_num, other_vport); devlink_fmsg_obj_nest_end(fmsg); devlink_fmsg_pair_nest_end(fmsg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c index 31142f6cc3729..1e5522a194839 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c @@ -242,7 +242,7 @@ static int mlx5_dpll_clock_quality_level_get(const struct dpll_device *dpll, return 0; } errout: - NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware\n"); + NL_SET_ERR_MSG_MOD(extack, "Invalid clock quality level obtained from firmware"); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 979fc56205e1f..32ed4963b8ada 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -95,8 +95,6 @@ struct page_pool; #define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev) \ MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, order_base_2(MLX5E_RX_MAX_HEAD)) -#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18 - /* Keep in sync with mlx5e_mpwrq_log_wqe_sz. * These are theoretical maximums, which can be further restricted by * capabilities. These values are used for static resource allocations and @@ -232,16 +230,22 @@ struct mlx5e_rx_wqe_cyc { DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data); }; -struct mlx5e_umr_wqe { +struct mlx5e_umr_wqe_hdr { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_mkey_seg mkc; +}; + +struct mlx5e_umr_wqe { + struct mlx5e_umr_wqe_hdr hdr; union { DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts); DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms); DECLARE_FLEX_ARRAY(struct mlx5_ksm, inline_ksms); }; }; +static_assert(offsetof(struct mlx5e_umr_wqe, inline_mtts) == sizeof(struct mlx5e_umr_wqe_hdr), + "struct members should be included in struct mlx5e_umr_wqe_hdr, not in struct mlx5e_umr_wqe"); enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER, @@ -386,7 +390,6 @@ enum { MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, MLX5E_SQ_STATE_PENDING_XSK_TX, MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, - MLX5E_SQ_STATE_XDP_MULTIBUF, MLX5E_NUM_SQ_STATES, /* Must be kept last */ }; @@ -395,6 +398,7 @@ struct mlx5e_tx_mpwqe { struct mlx5e_tx_wqe *wqe; u32 bytes_count; u8 ds_count; + u8 ds_count_max; u8 pkt_count; u8 inline_on; }; @@ -660,7 +664,7 @@ struct mlx5e_rq { } wqe; struct { struct mlx5_wq_ll wq; - struct mlx5e_umr_wqe umr_wqe; + struct mlx5e_umr_wqe_hdr umr_wqe; struct mlx5e_mpw_info *info; mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq; __be32 umr_mkey_be; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 1e8b7d3307014..b5c3a2a9d2a59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -84,9 +84,9 @@ enum { MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, #endif #ifdef CONFIG_MLX5_EN_IPSEC - MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, - MLX5E_ACCEL_FS_ESP_FT_LEVEL, + MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL, + MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, #endif }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 64b62ed17b07a..58ec5e44aa7ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -10,6 +10,9 @@ #include #include +#define MLX5_MPWRQ_MAX_LOG_WQE_SZ 18 +#define MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ 17 + static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev) { u8 min_page_shift = MLX5_CAP_GEN_2(mdev, log_min_mkey_entity_size); @@ -103,18 +106,22 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) { u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); - u8 max_pages_per_wqe, max_log_mpwqe_size; + u8 max_pages_per_wqe, max_log_wqe_size_calc; + u8 max_log_wqe_size_cap; u16 max_wqe_size; /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */ max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB; max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe), MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size; - max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift; + max_log_wqe_size_calc = ilog2(max_pages_per_wqe) + page_shift; + + WARN_ON_ONCE(max_log_wqe_size_calc < MLX5E_ORDER2_MAX_PACKET_MTU); - WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU); + max_log_wqe_size_cap = mlx5_core_is_ecpf(mdev) ? + MLX5_REP_MPWRQ_MAX_LOG_WQE_SZ : MLX5_MPWRQ_MAX_LOG_WQE_SZ; - return min_t(u8, max_log_mpwqe_size, MLX5_MPWRQ_MAX_LOG_WQE_SZ); + return min_t(u8, max_log_wqe_size_calc, max_log_wqe_size_cap); } u8 mlx5e_mpwrq_pages_per_wqe(struct mlx5_core_dev *mdev, u8 page_shift, @@ -423,7 +430,7 @@ u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * - PAGE_SIZE; + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); } @@ -827,7 +834,8 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); @@ -1036,7 +1044,8 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rq_param) { - int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); @@ -1240,7 +1249,6 @@ void mlx5e_build_xdpsq_param(struct mlx5_core_dev *mdev, mlx5e_build_sq_param_common(mdev, param); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); - param->is_xdp_mb = !mlx5e_rx_is_linear_skb(mdev, params, xsk); mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 3f8986f9d8629..bd5877acc5b1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -33,7 +33,6 @@ struct mlx5e_sq_param { struct mlx5_wq_param wq; bool is_mpw; bool is_tls; - bool is_xdp_mb; u16 stop_room; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 5f6a0605e4ae8..6049ccf475bc4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -80,6 +80,7 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable, int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) { struct mlx5_port_eth_proto eproto; + const struct mlx5_link_info *info; bool force_legacy = false; bool ext; int err; @@ -94,9 +95,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) if (err) goto out; } - *speed = mlx5_port_ptys2speed(mdev, eproto.oper, force_legacy); - if (!(*speed)) + info = mlx5_port_ptys2info(mdev, eproto.oper, force_legacy); + if (!info) { + *speed = SPEED_UNKNOWN; err = -EINVAL; + goto out; + } + *speed = info->speed; out: return err; @@ -296,11 +301,16 @@ enum mlx5e_fec_supported_link_mode { MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X, MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X, MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X, + MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X, + MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X, + MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X, + MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X, MLX5E_MAX_FEC_SUPPORTED_LINK_MODE, }; #define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X #define MLX5E_FEC_FIRST_100G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X +#define MLX5E_FEC_FIRST_200G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X #define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \ do { \ @@ -320,8 +330,10 @@ static bool mlx5e_is_fec_supported_link_mode(struct mlx5_core_dev *dev, return link_mode < MLX5E_FEC_FIRST_50G_PER_LANE_MODE || (link_mode < MLX5E_FEC_FIRST_100G_PER_LANE_MODE && MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm)) || - (link_mode >= MLX5E_FEC_FIRST_100G_PER_LANE_MODE && - MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm)); + (link_mode < MLX5E_FEC_FIRST_200G_PER_LANE_MODE && + MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm)) || + (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE && + MLX5_CAP_PCAM_FEATURE(dev, fec_200G_per_lane_in_pplm)); } /* get/set FEC admin field for a given speed */ @@ -368,6 +380,18 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write, case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X: MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_8x); break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_1x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_2x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_4x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 1600g_8x); + break; default: return -EINVAL; } @@ -421,6 +445,18 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap, case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X: *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_8x); break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_1X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_1x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_2X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_2x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_4X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_4x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_1600G_8X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 1600g_8x); + break; default: return -EINVAL; } @@ -494,6 +530,26 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active, return 0; } +static u16 mlx5e_remap_fec_conf_mode(enum mlx5e_fec_supported_link_mode link_mode, + u16 conf_fec) +{ + /* RS fec in ethtool is originally mapped to MLX5E_FEC_RS_528_514. + * For link modes up to 25G per lane, the value is kept. + * For 50G or 100G per lane, it's remapped to MLX5E_FEC_RS_544_514. + * For 200G per lane, remapped to MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD. + */ + if (conf_fec != BIT(MLX5E_FEC_RS_528_514)) + return conf_fec; + + if (link_mode >= MLX5E_FEC_FIRST_200G_PER_LANE_MODE) + return BIT(MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD); + + if (link_mode >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE) + return BIT(MLX5E_FEC_RS_544_514); + + return conf_fec; +} + int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy) { bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm); @@ -530,14 +586,7 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy) if (!mlx5e_is_fec_supported_link_mode(dev, i)) break; - /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514 - * to link modes up to 25G per lane and to - * MLX5E_FEC_RS_544_514 in the new link modes based on - * 50G or 100G per lane - */ - if (conf_fec == (1 << MLX5E_FEC_RS_528_514) && - i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE) - conf_fec = (1 << MLX5E_FEC_RS_544_514); + conf_fec = mlx5e_remap_fec_conf_mode(i, conf_fec); mlx5e_get_fec_cap_field(out, &fec_caps, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h index d1da225f35dad..fa2283dd383b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h @@ -61,6 +61,7 @@ enum { MLX5E_FEC_NOFEC, MLX5E_FEC_FIRECODE, MLX5E_FEC_RS_528_514, + MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD = 4, MLX5E_FEC_RS_544_514 = 7, MLX5E_FEC_LLRS_272_257_1 = 9, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index afd654583b6b1..131ed97ca997d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -326,7 +326,7 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_ptp *c, int txq_ix, int node; sq->pdev = c->pdev; - sq->clock = &mdev->clock; + sq->clock = mdev->clock; sq->mkey_be = c->mkey_be; sq->netdev = c->netdev; sq->priv = c->priv; @@ -696,7 +696,7 @@ static int mlx5e_init_ptp_rq(struct mlx5e_ptp *c, struct mlx5e_params *params, rq->pdev = c->pdev; rq->netdev = priv->netdev; rq->priv = priv; - rq->clock = &mdev->clock; + rq->clock = mdev->clock; rq->tstamp = &priv->tstamp; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 5d128c5b4529a..0f5d7ea8956f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -48,15 +48,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw) struct list_head *iter; netdev_for_each_lower_dev(dev, lower, iter) { - struct mlx5_core_dev *mdev; - struct mlx5e_priv *priv; - if (!mlx5e_eswitch_rep(lower)) continue; - priv = netdev_priv(lower); - mdev = priv->mdev; - if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw)) + if (mlx5_esw_bridge_dev_same_esw(lower, esw)) return lower; } @@ -125,7 +120,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device * priv = netdev_priv(rep); mdev = priv->mdev; if (netif_is_lag_master(dev)) - return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev); + return mlx5_lag_is_master(mdev); return true; } @@ -455,6 +450,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, if (!rep) return NOTIFY_DONE; + if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev)) + return NOTIFY_DONE; + switch (event) { case SWITCHDEV_FDB_ADD_TO_BRIDGE: fdb_info = container_of(info, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 25d751eba99ba..e75759533ae0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -317,10 +317,8 @@ mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx } static void -mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter, - struct devlink_fmsg *fmsg) +mlx5e_rx_reporter_diagnose_common_config(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg) { - struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq; struct mlx5e_ptp *ptp_ch = priv->channels.ptp; @@ -340,20 +338,100 @@ static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq, devlink_fmsg_obj_nest_end(fmsg); } -static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, - struct devlink_fmsg *fmsg, - struct netlink_ext_ack *extack) +static void mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(struct mlx5e_rx_res *rx_res, + struct devlink_fmsg *fmsg) { - struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); - struct mlx5e_ptp *ptp_ch = priv->channels.ptp; + unsigned int max_nch = mlx5e_rx_res_get_max_nch(rx_res); int i; - mutex_lock(&priv->state_lock); + devlink_fmsg_arr_pair_nest_start(fmsg, "Direct TIRs"); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - goto unlock; + for (i = 0; i < max_nch; i++) { + devlink_fmsg_obj_nest_start(fmsg); + + devlink_fmsg_u32_pair_put(fmsg, "ix", i); + devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rx_res_get_tirn_direct(rx_res, i)); + devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rx_res_get_rqtn_direct(rx_res, i)); + + devlink_fmsg_obj_nest_end(fmsg); + } + + devlink_fmsg_arr_pair_nest_end(fmsg); +} + +static void mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(struct mlx5e_rss *rss, bool inner, + struct devlink_fmsg *fmsg) +{ + bool found_valid_tir = false; + int tt; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + if (!mlx5e_rss_valid_tir(rss, tt, inner)) + continue; + + if (!found_valid_tir) { + char *tir_msg = inner ? "Inner TIRs Numbers" : "TIRs Numbers"; + + found_valid_tir = true; + devlink_fmsg_arr_pair_nest_start(fmsg, tir_msg); + } + + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_string_pair_put(fmsg, "tt", mlx5_ttc_get_name(tt)); + devlink_fmsg_u32_pair_put(fmsg, "tirn", mlx5e_rss_get_tirn(rss, tt, inner)); + devlink_fmsg_obj_nest_end(fmsg); + } + + if (found_valid_tir) + devlink_fmsg_arr_pair_nest_end(fmsg); +} + +static void mlx5e_rx_reporter_diagnose_rx_res_rss_ix(struct mlx5e_rx_res *rx_res, u32 rss_idx, + struct devlink_fmsg *fmsg) +{ + struct mlx5e_rss *rss = mlx5e_rx_res_rss_get(rx_res, rss_idx); + + if (!rss) + return; + + devlink_fmsg_obj_nest_start(fmsg); + + devlink_fmsg_u32_pair_put(fmsg, "Index", rss_idx); + devlink_fmsg_u32_pair_put(fmsg, "rqtn", mlx5e_rss_get_rqtn(rss)); + mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, false, fmsg); + if (mlx5e_rss_get_inner_ft_support(rss)) + mlx5e_rx_reporter_diagnose_rx_res_rss_tirn(rss, true, fmsg); + + devlink_fmsg_obj_nest_end(fmsg); +} + +static void mlx5e_rx_reporter_diagnose_rx_res_rss(struct mlx5e_rx_res *rx_res, + struct devlink_fmsg *fmsg) +{ + int rss_ix; + + devlink_fmsg_arr_pair_nest_start(fmsg, "RSS"); + for (rss_ix = 0; rss_ix < MLX5E_MAX_NUM_RSS; rss_ix++) + mlx5e_rx_reporter_diagnose_rx_res_rss_ix(rx_res, rss_ix, fmsg); + devlink_fmsg_arr_pair_nest_end(fmsg); +} + +static void mlx5e_rx_reporter_diagnose_rx_res(struct mlx5e_priv *priv, + struct devlink_fmsg *fmsg) +{ + struct mlx5e_rx_res *rx_res = priv->rx_res; + + mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX resources"); + mlx5e_rx_reporter_diagnose_rx_res_dir_tirns(rx_res, fmsg); + mlx5e_rx_reporter_diagnose_rx_res_rss(rx_res, fmsg); + mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + +static void mlx5e_rx_reporter_diagnose_rqs(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg) +{ + struct mlx5e_ptp *ptp_ch = priv->channels.ptp; + int i; - mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg); devlink_fmsg_arr_pair_nest_start(fmsg, "RQs"); for (i = 0; i < priv->channels.num; i++) { @@ -367,7 +445,24 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, } if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg); + devlink_fmsg_arr_pair_nest_end(fmsg); +} + +static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter); + + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; + + mlx5e_rx_reporter_diagnose_common_config(priv, fmsg); + mlx5e_rx_reporter_diagnose_rqs(priv, fmsg); + mlx5e_rx_reporter_diagnose_rx_res(priv, fmsg); unlock: mutex_unlock(&priv->state_lock); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 09433b91be176..532c7fa94d172 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -16,7 +16,6 @@ static const char * const sq_sw_state_type_name[] = { [MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE] = "vlan_need_l2_inline", [MLX5E_SQ_STATE_PENDING_XSK_TX] = "pending_xsk_tx", [MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC] = "pending_tls_rx_resync", - [MLX5E_SQ_STATE_XDP_MULTIBUF] = "xdp_multibuf", }; static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c index 5f742f8966004..74cd111ee320c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -81,6 +81,11 @@ struct mlx5e_rss { refcount_t refcnt; }; +bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss) +{ + return rss->inner_ft_support; +} + void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels) { rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels); @@ -156,6 +161,7 @@ static void mlx5e_rss_params_init(struct mlx5e_rss *rss) { enum mlx5_traffic_types tt; + rss->hash.symmetric = true; rss->hash.hfunc = ETH_RSS_HASH_TOP; netdev_rss_key_fill(rss->hash.toeplitz_hash_key, sizeof(rss->hash.toeplitz_hash_key)); @@ -449,6 +455,16 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, return mlx5e_tir_get_tirn(tir); } +u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss) +{ + return mlx5e_rqt_get_rqtn(&rss->rqt); +} + +bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner) +{ + return !!rss_get_tir(rss, tt, inner); +} + /* Fill the "tirn" output parameter. * Create the requested TIR if it's its first usage. */ @@ -551,7 +567,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss, return final_err; } -int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc) +int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric) { if (indir) memcpy(indir, rss->indir.table, @@ -564,11 +580,14 @@ int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc) if (hfunc) *hfunc = rss->hash.hfunc; + if (symmetric) + *symmetric = rss->hash.symmetric; + return 0; } int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, - const u8 *key, const u8 *hfunc, + const u8 *key, const u8 *hfunc, const bool *symmetric, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns) { bool changed_indir = false; @@ -608,6 +627,11 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, rss->indir.actual_table_size * sizeof(*rss->indir.table)); } + if (symmetric) { + rss->hash.symmetric = *symmetric; + changed_hash = true; + } + if (changed_indir && rss->enabled) { err = mlx5e_rss_apply(rss, rqns, vhca_ids, num_rqns); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h index d0df98963c8dd..8ac902190010b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h @@ -32,8 +32,11 @@ void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss); void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss); unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss); +bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss); u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner); +bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner); +u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss); int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, const struct mlx5e_packet_merge_param *init_pkt_merge_param, @@ -44,9 +47,9 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss); int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss, struct mlx5e_packet_merge_param *pkt_merge_param); -int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc); +int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc, bool *symmetric); int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, - const u8 *key, const u8 *hfunc, + const u8 *key, const u8 *hfunc, const bool *symmetric, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns); struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss); u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c index a86eade9a9e07..5fcbe47337b08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -5,8 +5,6 @@ #include "channels.h" #include "params.h" -#define MLX5E_MAX_NUM_RSS 16 - struct mlx5e_rx_res { struct mlx5_core_dev *mdev; /* primary */ enum mlx5e_rx_res_features features; @@ -196,7 +194,7 @@ void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int n } int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, - u32 *indir, u8 *key, u8 *hfunc) + u32 *indir, u8 *key, u8 *hfunc, bool *symmetric) { struct mlx5e_rss *rss; @@ -207,11 +205,12 @@ int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, if (!rss) return -ENOENT; - return mlx5e_rss_get_rxfh(rss, indir, key, hfunc); + return mlx5e_rss_get_rxfh(rss, indir, key, hfunc, symmetric); } int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, - const u32 *indir, const u8 *key, const u8 *hfunc) + const u32 *indir, const u8 *key, const u8 *hfunc, + const bool *symmetric) { u32 *vhca_ids = get_vhca_ids(res, 0); struct mlx5e_rss *rss; @@ -223,8 +222,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, if (!rss) return -ENOENT; - return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, vhca_ids, - res->rss_nch); + return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, symmetric, + res->rss_rqns, vhca_ids, res->rss_nch); } int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx, @@ -497,6 +496,11 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res) mlx5e_rx_res_free(res); } +unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res) +{ + return res->max_nch; +} + u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix) { return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir); @@ -522,7 +526,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res) return mlx5e_tir_get_tirn(&res->ptp.tir); } -static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix) +u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix) { return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h index 7b1a9f0f18741..3e09d91281af9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -10,6 +10,8 @@ #include "fs.h" #include "rss.h" +#define MLX5E_MAX_NUM_RSS 16 + struct mlx5e_rx_res; struct mlx5e_channels; @@ -34,6 +36,9 @@ u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix); u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res); +u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix); +unsigned int mlx5e_rx_res_get_max_nch(struct mlx5e_rx_res *res); +bool mlx5_rx_res_rss_inner_ft_support(struct mlx5e_rx_res *res); /* Activate/deactivate API */ void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs); @@ -44,9 +49,10 @@ void mlx5e_rx_res_xsk_update(struct mlx5e_rx_res *res, struct mlx5e_channels *ch /* Configuration API */ void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch); int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, - u32 *indir, u8 *key, u8 *hfunc); + u32 *indir, u8 *key, u8 *hfunc, bool *symmetric); int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, - const u32 *indir, const u8 *key, const u8 *hfunc); + const u32 *indir, const u8 *key, const u8 *hfunc, + const bool *symmetric); int mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, u32 rss_idx, enum mlx5_traffic_types tt); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h index d6c12d0ea55ba..2e528b2c34d64 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h @@ -73,11 +73,6 @@ struct mlx5e_tc_act { bool is_terminating_action; }; -struct mlx5e_tc_flow_action { - unsigned int num_entries; - struct flow_action_entry **entries; -}; - extern struct mlx5e_tc_act mlx5e_tc_act_drop; extern struct mlx5e_tc_act mlx5e_tc_act_trap; extern struct mlx5e_tc_act mlx5e_tc_act_accept; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c index feeb41693c176..b6cabe829f198 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c @@ -5,6 +5,16 @@ #include "en/tc_priv.h" #include "en/tc_ct.h" +static bool +tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + int act_index, + struct mlx5_flow_attr *attr) +{ + return !((act->ct.action & TCA_CT_ACT_COMMIT) && + flow_action_is_last_entry(parse_state->flow_action, act)); +} + static int tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state, const struct flow_action_entry *act, @@ -56,6 +66,7 @@ tc_act_is_missable_ct(const struct flow_action_entry *act) } struct mlx5e_tc_act mlx5e_tc_act_ct = { + .can_offload = tc_act_can_offload_ct, .parse_action = tc_act_parse_ct, .post_parse = tc_act_post_parse_ct, .is_multi_table_act = tc_act_is_multi_table_act_ct, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c index 8218c892b1617..7819fb2972802 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c @@ -593,3 +593,8 @@ mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter, *drops = packets2; *lastuse = max_t(u64, lastuse1, lastuse2); } + +int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter) +{ + return meter->meters_obj->base_id; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h index 9b795cd106bbe..d6afb6556875c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h @@ -72,4 +72,17 @@ void mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter, u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse); +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) + +int mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter); + +#else /* CONFIG_MLX5_CLS_ACT */ + +static inline int +mlx5e_flow_meter_get_base_id(struct mlx5e_flow_meter_handle *meter) +{ + return 0; +} +#endif /* CONFIG_MLX5_CLS_ACT */ + #endif /* __MLX5_EN_FLOW_METER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index a065e8fafb1d1..81332cd4a5824 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1349,6 +1349,32 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft, return 0; } +static bool +mlx5_tc_ct_filter_legacy_non_nic_flows(struct mlx5_ct_ft *ft, + struct flow_cls_offload *flow) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(flow); + struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv; + struct flow_match_meta match; + struct net_device *netdev; + bool same_dev = false; + + if (!is_mdev_legacy_mode(ct_priv->dev) || + !flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) + return true; + + flow_rule_match_meta(rule, &match); + + if (!(match.key->ingress_ifindex & match.mask->ingress_ifindex)) + return true; + + netdev = dev_get_by_index(&init_net, match.key->ingress_ifindex); + same_dev = ct_priv->netdev == netdev; + dev_put(netdev); + + return same_dev; +} + static int mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data, void *cb_priv) @@ -1361,6 +1387,9 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data, switch (f->command) { case FLOW_CLS_REPLACE: + if (!mlx5_tc_ct_filter_legacy_non_nic_flows(ft, f)) + return -EOPNOTSUPP; + return mlx5_tc_ct_block_flow_offload_add(ft, f); case FLOW_CLS_DESTROY: return mlx5_tc_ct_block_flow_offload_del(ft, f); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 721f35e597579..2162d776fe35d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -31,8 +31,7 @@ static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr { if (attr->n) neigh_release(attr->n); - if (attr->route_dev) - dev_put(attr->route_dev); + dev_put(attr->route_dev); } struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev) @@ -68,16 +67,14 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, * while holding rcu read lock. Take the net_device for correctness * sake. */ - if (uplink_upper) - dev_hold(uplink_upper); + dev_hold(uplink_upper); rcu_read_unlock(); dst_is_lag_dev = (uplink_upper && netif_is_lag_master(uplink_upper) && real_dev == uplink_upper && mlx5_lag_is_sriov(priv->mdev)); - if (uplink_upper) - dev_put(uplink_upper); + dev_put(uplink_upper); /* if the egress device isn't on the same HW e-switch or * it's a LAG device, use the uplink diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index e7e01f3298efb..a0fc76a1bc082 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -42,8 +42,7 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv, &attr->action, out_index); out: - if (route_dev) - dev_put(route_dev); + dev_put(route_dev); return err; } @@ -753,8 +752,7 @@ static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw, } out: - if (route_dev) - dev_put(route_dev); + dev_put(route_dev); return err; } @@ -788,8 +786,7 @@ static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw, mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data); out: - if (route_dev) - dev_put(route_dev); + dev_put(route_dev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c index e4e487c8431b8..5c762a71818db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c @@ -140,7 +140,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv, gbp_mask = (u32 *)&enc_opts.mask->data[0]; if (*gbp_mask & ~VXLAN_GBP_MASK) { - NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)\n", *gbp_mask); + NL_SET_ERR_MSG_FMT_MOD(extack, "Wrong VxLAN GBP mask(0x%08X)", *gbp_mask); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c index 11f724ad90dbf..19499072f67f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c @@ -124,7 +124,7 @@ void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder, const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key); void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + MLX5_SET(tirc, tirc, rx_hash_symmetric, rss_hash->symmetric); memcpy(rss_key, rss_hash->toeplitz_hash_key, len); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h index 857a84bcd53af..e8df3aaf6562f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h @@ -9,6 +9,7 @@ struct mlx5e_rss_params_hash { u8 hfunc; u8 toeplitz_hash_key[40]; + bool symmetric; }; struct mlx5e_rss_params_traffic_type { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 53ca16cb9c41a..140606fcd23b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -46,7 +46,7 @@ static void mlx5e_init_trap_rq(struct mlx5e_trap *t, struct mlx5e_params *params rq->pdev = t->pdev; rq->netdev = priv->netdev; rq->priv = priv; - rq->clock = &mdev->clock; + rq->clock = mdev->clock; rq->tstamp = &priv->tstamp; rq->mdev = mdev; rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 5ec468268d1aa..e837c21d3d213 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -214,6 +214,19 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) return pi; } +static inline u16 mlx5e_txqsq_get_next_pi_anysize(struct mlx5e_txqsq *sq, + u16 *size) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + u16 pi, contig_wqebbs; + + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); + *size = min_t(u16, contig_wqebbs, sq->max_sq_mpw_wqebbs); + + return pi; +} + void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq); static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) @@ -358,9 +371,9 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma) void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq); -static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs) +static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session) { - return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS; + return session->ds_count == session->ds_count_max; } static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 94b2916620873..f803e1c935900 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -289,9 +289,9 @@ static u64 mlx5e_xsk_fill_timestamp(void *_priv) ts = get_cqe_ts(priv->cqe); if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev)) - return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts); + return mlx5_real_time_cyc2time(priv->cq->mdev->clock, ts); - return mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts); + return mlx5_timecounter_cyc2time(priv->cq->mdev->clock, ts); } static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv) @@ -390,6 +390,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) .wqe = wqe, .bytes_count = 0, .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, + .ds_count_max = sq->max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS, .pkt_count = 0, .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on), }; @@ -501,7 +502,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptx mlx5e_xdp_mpwqe_add_dseg(sq, p, stats); - if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs))) + if (unlikely(mlx5e_xdp_mpwqe_is_full(session))) mlx5e_xdp_mpwqe_complete(sq); stats->xmit++; @@ -546,6 +547,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, bool inline_ok; bool linear; u16 pi; + int i; struct mlx5e_xdpsq_stats *stats = sq->stats; @@ -612,41 +614,33 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); - if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) { - int i; - - memset(&cseg->trailer, 0, sizeof(cseg->trailer)); - memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer)); - - eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); + memset(&cseg->trailer, 0, sizeof(cseg->trailer)); + memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer)); - for (i = 0; i < num_frags; i++) { - skb_frag_t *frag = &xdptxdf->sinfo->frags[i]; - dma_addr_t addr; + eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); - addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] : - page_pool_get_dma_addr(skb_frag_page(frag)) + - skb_frag_off(frag); + for (i = 0; i < num_frags; i++) { + skb_frag_t *frag = &xdptxdf->sinfo->frags[i]; + dma_addr_t addr; - dseg->addr = cpu_to_be64(addr); - dseg->byte_count = cpu_to_be32(skb_frag_size(frag)); - dseg->lkey = sq->mkey_be; - dseg++; - } + addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] : + page_pool_get_dma_addr(skb_frag_page(frag)) + + skb_frag_off(frag); - cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); + dseg->addr = cpu_to_be64(addr); + dseg->byte_count = cpu_to_be32(skb_frag_size(frag)); + dseg->lkey = sq->mkey_be; + dseg++; + } - sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) { - .num_wqebbs = num_wqebbs, - .num_pkts = 1, - }; + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - sq->pc += num_wqebbs; - } else { - cseg->fm_ce_se = 0; + sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) { + .num_wqebbs = num_wqebbs, + .num_pkts = 1, + }; - sq->pc++; - } + sq->pc += num_wqebbs; xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index e054db1e10f8a..446e492c6bb8e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -182,13 +182,13 @@ static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur) return cur; } -static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs) +static inline bool mlx5e_xdp_mpwqe_is_full(struct mlx5e_tx_mpwqe *session) { if (session->inline_on) return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > - max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS; + session->ds_count_max; - return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs); + return mlx5e_tx_mpwqe_is_full(session); } struct mlx5e_xdp_wqe_info { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c index 1b7132fa70de2..2b05536d564ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c @@ -123,7 +123,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); wi->consumed_strides = 0; - umr_wqe->ctrl.opmod_idx_opcode = + umr_wqe->hdr.ctrl.opmod_idx_opcode = cpu_to_be32((icosq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); /* Optimized for speed: keep in sync with mlx5e_mpwrq_umr_entry_size. */ @@ -134,7 +134,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) offset = offset * sizeof(struct mlx5_klm) * 2 / MLX5_OCTWORD; else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) offset = offset * sizeof(struct mlx5_ksm) * 4 / MLX5_OCTWORD; - umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); + umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset); icosq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, @@ -144,7 +144,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) icosq->pc += rq->mpwqe.umr_wqebbs; - icosq->doorbell_cseg = &umr_wqe->ctrl; + icosq->doorbell_cseg = &umr_wqe->hdr.ctrl; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 9240cfe25d102..d743e823362ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -72,7 +72,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c, rq->netdev = c->netdev; rq->priv = c->priv; rq->tstamp = c->tstamp; - rq->clock = &mdev->clock; + rq->clock = mdev->clock; rq->icosq = &c->icosq; rq->ix = c->ix; rq->channel = c; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 501709ac310f2..2dd842aac6fc4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -277,12 +277,12 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry, case XFRM_DEV_OFFLOAD_IN: src = attrs->dmac; dst = attrs->smac; - pkey = &attrs->saddr.a4; + pkey = &attrs->addrs.saddr.a4; break; case XFRM_DEV_OFFLOAD_OUT: src = attrs->smac; dst = attrs->dmac; - pkey = &attrs->daddr.a4; + pkey = &attrs->addrs.daddr.a4; break; default: return; @@ -303,6 +303,16 @@ static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry, neigh_release(n); } +static void mlx5e_ipsec_state_mask(struct mlx5e_ipsec_addr *addrs) +{ + /* + * State doesn't have subnet prefixes in outer headers. + * The match is performed for exaxt source/destination addresses. + */ + memset(addrs->smask.m6, 0xFF, sizeof(__be32) * 4); + memset(addrs->dmask.m6, 0xFF, sizeof(__be32) * 4); +} + void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5_accel_esp_xfrm_attrs *attrs) { @@ -374,9 +384,11 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, attrs->spi = be32_to_cpu(x->id.spi); /* source , destination ips */ - memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr)); - memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr)); - attrs->family = x->props.family; + memcpy(&attrs->addrs.saddr, x->props.saddr.a6, + sizeof(attrs->addrs.saddr)); + memcpy(&attrs->addrs.daddr, x->id.daddr.a6, sizeof(attrs->addrs.daddr)); + attrs->addrs.family = x->props.family; + mlx5e_ipsec_state_mask(&attrs->addrs); attrs->type = x->xso.type; attrs->reqid = x->props.reqid; attrs->upspec.dport = ntohs(x->sel.dport); @@ -428,7 +440,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev, } if (x->encap) { if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) { - NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported"); + NL_SET_ERR_MSG_MOD(extack, + "Encapsulation is not supported"); return -EINVAL; } @@ -853,13 +866,13 @@ static int mlx5e_ipsec_netevent_event(struct notifier_block *nb, xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) { attrs = &sa_entry->attrs; - if (attrs->family == AF_INET) { - if (!neigh_key_eq32(n, &attrs->saddr.a4) && - !neigh_key_eq32(n, &attrs->daddr.a4)) + if (attrs->addrs.family == AF_INET) { + if (!neigh_key_eq32(n, &attrs->addrs.saddr.a4) && + !neigh_key_eq32(n, &attrs->addrs.daddr.a4)) continue; } else { - if (!neigh_key_eq128(n, &attrs->saddr.a4) && - !neigh_key_eq128(n, &attrs->daddr.a4)) + if (!neigh_key_eq128(n, &attrs->addrs.saddr.a4) && + !neigh_key_eq128(n, &attrs->addrs.daddr.a4)) continue; } @@ -953,21 +966,6 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) priv->ipsec = NULL; } -static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) -{ - if (x->props.family == AF_INET) { - /* Offload with IPv4 options is not supported yet */ - if (ip_hdr(skb)->ihl > 5) - return false; - } else { - /* Offload with IPv6 extension headers is not support yet */ - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) - return false; - } - - return true; -} - static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); @@ -1035,7 +1033,7 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x) * by removing always available headers. */ headers = sizeof(struct ethhdr); - if (sa_entry->attrs.family == AF_INET) + if (sa_entry->attrs.addrs.family == AF_INET) headers += sizeof(struct iphdr); else headers += sizeof(struct ipv6hdr); @@ -1044,6 +1042,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x) x->curlft.bytes += success_bytes - headers * success_packets; } +static __be32 word_to_mask(int prefix) +{ + if (prefix < 0) + return 0; + + if (!prefix || prefix > 31) + return cpu_to_be32(0xFFFFFFFF); + + return cpu_to_be32(((1U << prefix) - 1) << (32 - prefix)); +} + +static void mlx5e_ipsec_policy_mask(struct mlx5e_ipsec_addr *addrs, + struct xfrm_selector *sel) +{ + int i; + + if (addrs->family == AF_INET) { + addrs->smask.m4 = word_to_mask(sel->prefixlen_s); + addrs->saddr.a4 &= addrs->smask.m4; + addrs->dmask.m4 = word_to_mask(sel->prefixlen_d); + addrs->daddr.a4 &= addrs->dmask.m4; + return; + } + + for (i = 0; i < 4; i++) { + if (sel->prefixlen_s != 32 * i) + addrs->smask.m6[i] = + word_to_mask(sel->prefixlen_s - 32 * i); + addrs->saddr.a6[i] &= addrs->smask.m6[i]; + + if (sel->prefixlen_d != 32 * i) + addrs->dmask.m6[i] = + word_to_mask(sel->prefixlen_d - 32 * i); + addrs->daddr.a6[i] &= addrs->dmask.m6[i]; + } +} + static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev, struct xfrm_policy *x, struct netlink_ext_ack *extack) @@ -1116,9 +1151,10 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry, sel = &x->selector; memset(attrs, 0, sizeof(*attrs)); - memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr)); - memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr)); - attrs->family = sel->family; + memcpy(&attrs->addrs.saddr, sel->saddr.a6, sizeof(attrs->addrs.saddr)); + memcpy(&attrs->addrs.daddr, sel->daddr.a6, sizeof(attrs->addrs.daddr)); + attrs->addrs.family = sel->family; + mlx5e_ipsec_policy_mask(&attrs->addrs, sel); attrs->dir = x->xdo.dir; attrs->action = x->action; attrs->type = XFRM_DEV_OFFLOAD_PACKET; @@ -1196,7 +1232,6 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { .xdo_dev_state_add = mlx5e_xfrm_add_state, .xdo_dev_state_delete = mlx5e_xfrm_del_state, .xdo_dev_state_free = mlx5e_xfrm_free_state, - .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index 7d943e93cf6dc..a63c2289f8af9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -76,27 +76,36 @@ struct mlx5_replay_esn { u8 trigger : 1; }; -struct mlx5_accel_esp_xfrm_attrs { - u32 spi; - u32 mode; - struct aes_gcm_keymat aes_gcm; - +struct mlx5e_ipsec_addr { union { __be32 a4; __be32 a6[4]; } saddr; - + union { + __be32 m4; + __be32 m6[4]; + } smask; union { __be32 a4; __be32 a6[4]; } daddr; + union { + __be32 m4; + __be32 m6[4]; + } dmask; + u8 family; +}; +struct mlx5_accel_esp_xfrm_attrs { + u32 spi; + u32 mode; + struct aes_gcm_keymat aes_gcm; + struct mlx5e_ipsec_addr addrs; struct upspec upspec; u8 dir : 2; u8 type : 2; u8 drop : 1; u8 encap : 1; - u8 family; struct mlx5_replay_esn replay_esn; u32 authsize; u32 reqid; @@ -128,6 +137,7 @@ struct mlx5e_ipsec_hw_stats { u64 ipsec_rx_bytes; u64 ipsec_rx_drop_pkts; u64 ipsec_rx_drop_bytes; + u64 ipsec_rx_drop_mismatch_sa_sel; u64 ipsec_tx_pkts; u64 ipsec_tx_bytes; u64 ipsec_tx_drop_pkts; @@ -184,6 +194,7 @@ struct mlx5e_ipsec_ft { struct mutex mutex; /* Protect changes to this struct */ struct mlx5_flow_table *pol; struct mlx5_flow_table *sa; + struct mlx5_flow_table *sa_sel; struct mlx5_flow_table *status; u32 refcnt; }; @@ -195,6 +206,8 @@ struct mlx5e_ipsec_drop { struct mlx5e_ipsec_rule { struct mlx5_flow_handle *rule; + struct mlx5_flow_handle *status_pass; + struct mlx5_flow_handle *sa_sel; struct mlx5_modify_hdr *modify_hdr; struct mlx5_pkt_reformat *pkt_reformat; struct mlx5_fc *fc; @@ -206,6 +219,7 @@ struct mlx5e_ipsec_rule { struct mlx5e_ipsec_miss { struct mlx5_flow_group *group; struct mlx5_flow_handle *rule; + struct mlx5_fc *fc; }; struct mlx5e_ipsec_tx_create_attr { @@ -274,18 +288,8 @@ struct mlx5e_ipsec_sa_entry { }; struct mlx5_accel_pol_xfrm_attrs { - union { - __be32 a4; - __be32 a6[4]; - } saddr; - - union { - __be32 a4; - __be32 a6[4]; - } daddr; - + struct mlx5e_ipsec_addr addrs; struct upspec upspec; - u8 family; u8 action; u8 type : 2; u8 dir : 2; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index e7b64679f1219..98b6a3a623f99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -16,6 +16,16 @@ #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40 +#define MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS 16 + +enum { + MLX5_IPSEC_ASO_OK, + MLX5_IPSEC_ASO_BAD_REPLY, + + /* For crypto offload, set by driver */ + MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD = 0xAA, +}; + struct mlx5e_ipsec_fc { struct mlx5_fc *cnt; struct mlx5_fc *drop; @@ -33,6 +43,9 @@ struct mlx5e_ipsec_tx { }; struct mlx5e_ipsec_status_checks { + struct mlx5_flow_group *pass_group; + struct mlx5_flow_handle *packet_offload_pass_rule; + struct mlx5_flow_handle *crypto_offload_pass_rule; struct mlx5_flow_group *drop_all_group; struct mlx5e_ipsec_drop all; }; @@ -41,10 +54,12 @@ struct mlx5e_ipsec_rx { struct mlx5e_ipsec_ft ft; struct mlx5e_ipsec_miss pol; struct mlx5e_ipsec_miss sa; - struct mlx5e_ipsec_rule status; - struct mlx5e_ipsec_status_checks status_drops; + struct mlx5e_ipsec_miss sa_sel; + struct mlx5e_ipsec_status_checks status_checks; struct mlx5e_ipsec_fc *fc; struct mlx5_fs_chains *chains; + struct mlx5_flow_table *pol_miss_ft; + struct mlx5_flow_handle *pol_miss_rule; u8 allow_tunnel_mode : 1; }; @@ -130,11 +145,12 @@ static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio) static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns, int level, int prio, + int num_reserved_entries, int max_num_groups, u32 flags) { struct mlx5_flow_table_attr ft_attr = {}; - ft_attr.autogroup.num_reserved_entries = 1; + ft_attr.autogroup.num_reserved_entries = num_reserved_entries; ft_attr.autogroup.max_num_groups = max_num_groups; ft_attr.max_fte = NUM_IPSEC_FTE; ft_attr.level = level; @@ -147,22 +163,35 @@ static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns, static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx) { - mlx5_del_flow_rules(rx->status_drops.all.rule); - mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc); - mlx5_destroy_flow_group(rx->status_drops.drop_all_group); + mlx5_del_flow_rules(rx->status_checks.all.rule); + mlx5_fc_destroy(ipsec->mdev, rx->status_checks.all.fc); + mlx5_destroy_flow_group(rx->status_checks.drop_all_group); } static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx) { - mlx5_del_flow_rules(rx->status.rule); + mlx5_del_flow_rules(rx->status_checks.packet_offload_pass_rule); + mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule); +} - if (rx != ipsec->rx_esw) - return; +static void ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5e_ipsec_rx *rx, + struct mlx5_flow_spec *spec) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; -#ifdef CONFIG_MLX5_ESWITCH - mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0); -#endif + if (rx == ipsec->rx_esw) { + mlx5_esw_ipsec_rx_rule_add_match_obj(sa_entry, spec); + } else { + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_2); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_2, + sa_entry->ipsec_obj_id | BIT(31)); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; + } } static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry, @@ -200,11 +229,8 @@ static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry, MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome); MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2); - MLX5_SET(fte_match_param, spec->match_value, - misc_parameters_2.metadata_reg_c_2, - sa_entry->ipsec_obj_id | BIT(31)); spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + ipsec_rx_rule_add_match_obj(sa_entry, rx, spec); rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -281,10 +307,8 @@ static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4); MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2); - MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2, - sa_entry->ipsec_obj_id | BIT(31)); spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + ipsec_rx_rule_add_match_obj(sa_entry, rx, spec); rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -359,9 +383,9 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec, goto err_rule; } - rx->status_drops.drop_all_group = g; - rx->status_drops.all.rule = rule; - rx->status_drops.all.fc = flow_counter; + rx->status_checks.drop_all_group = g; + rx->status_checks.all.rule = rule; + rx->status_checks.all.fc = flow_counter; kvfree(flow_group_in); kvfree(spec); @@ -377,9 +401,52 @@ static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec, return err; } -static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx, - struct mlx5_flow_destination *dest) +static int ipsec_rx_status_pass_group_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table *ft = rx->ft.status; + struct mlx5_flow_group *fg; + void *match_criteria; + u32 *flow_group_in; + int err = 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS_2); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, + match_criteria); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters_2.ipsec_syndrome); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters_2.metadata_reg_c_4); + + MLX5_SET(create_flow_group_in, flow_group_in, + start_flow_index, ft->max_fte - 3); + MLX5_SET(create_flow_group_in, flow_group_in, + end_flow_index, ft->max_fte - 2); + + fg = mlx5_create_flow_group(ft, flow_group_in); + if (IS_ERR(fg)) { + err = PTR_ERR(fg); + mlx5_core_warn(ipsec->mdev, + "Failed to create rx status pass flow group, err=%d\n", + err); + } + rx->status_checks.pass_group = fg; + + kvfree(flow_group_in); + return err; +} + +static struct mlx5_flow_handle * +ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5_flow_destination *dest, + u8 aso_ok) { struct mlx5_flow_act flow_act = {}; struct mlx5_flow_handle *rule; @@ -388,7 +455,7 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec, spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) - return -ENOMEM; + return ERR_PTR(-ENOMEM); MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome); @@ -397,11 +464,11 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec, MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 0); MLX5_SET(fte_match_param, spec->match_value, - misc_parameters_2.metadata_reg_c_4, 0); + misc_parameters_2.metadata_reg_c_4, aso_ok); if (rx == ipsec->rx_esw) spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK; spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; - flow_act.flags = FLOW_ACT_NO_APPEND; + flow_act.flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); @@ -412,19 +479,19 @@ static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec, goto err_rule; } - rx->status.rule = rule; kvfree(spec); - return 0; + return rule; err_rule: kvfree(spec); - return err; + return ERR_PTR(err); } static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx) { ipsec_rx_status_pass_destroy(ipsec, rx); + mlx5_destroy_flow_group(rx->status_checks.pass_group); ipsec_rx_status_drop_destroy(ipsec, rx); } @@ -432,19 +499,44 @@ static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx, struct mlx5_flow_destination *dest) { + struct mlx5_flow_destination pol_dest[2]; + struct mlx5_flow_handle *rule; int err; err = ipsec_rx_status_drop_all_create(ipsec, rx); if (err) return err; - err = ipsec_rx_status_pass_create(ipsec, rx, dest); + err = ipsec_rx_status_pass_group_create(ipsec, rx); if (err) - goto err_pass_create; + goto err_pass_group_create; + + rule = ipsec_rx_status_pass_create(ipsec, rx, dest, + MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_crypto_offload_pass_create; + } + rx->status_checks.crypto_offload_pass_rule = rule; + + pol_dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + pol_dest[0].ft = rx->ft.pol; + pol_dest[1] = dest[1]; + rule = ipsec_rx_status_pass_create(ipsec, rx, pol_dest, + MLX5_IPSEC_ASO_OK); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_packet_offload_pass_create; + } + rx->status_checks.packet_offload_pass_rule = rule; return 0; -err_pass_create: +err_packet_offload_pass_create: + mlx5_del_flow_rules(rx->status_checks.crypto_offload_pass_rule); +err_crypto_offload_pass_create: + mlx5_destroy_flow_group(rx->status_checks.pass_group); +err_pass_group_create: ipsec_rx_status_drop_destroy(ipsec, rx); return err; } @@ -493,6 +585,15 @@ static int ipsec_miss_create(struct mlx5_core_dev *mdev, return err; } +static void ipsec_rx_update_default_dest(struct mlx5e_ipsec_rx *rx, + struct mlx5_flow_destination *old_dest, + struct mlx5_flow_destination *new_dest) +{ + mlx5_modify_rule_destination(rx->pol_miss_rule, new_dest, old_dest); + mlx5_modify_rule_destination(rx->status_checks.crypto_offload_pass_rule, + new_dest, old_dest); +} + static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family) { struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET); @@ -507,8 +608,7 @@ static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family) new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family); new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest); - mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest); + ipsec_rx_update_default_dest(rx, &old_dest, &new_dest); } static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family) @@ -520,8 +620,7 @@ static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family) old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false), family2tt(family)); - mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest); - mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest); + ipsec_rx_update_default_dest(rx, &old_dest, &new_dest); mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev); } @@ -577,13 +676,8 @@ static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family) mlx5_ttc_fwd_default_dest(ttc, family2tt(family)); } -static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, - struct mlx5e_ipsec_rx *rx, u32 family) +static void ipsec_rx_policy_destroy(struct mlx5e_ipsec_rx *rx) { - /* disconnect */ - if (rx != ipsec->rx_esw) - ipsec_rx_ft_disconnect(ipsec, family); - if (rx->chains) { ipsec_chains_destroy(rx->chains); } else { @@ -592,6 +686,29 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, mlx5_destroy_flow_table(rx->ft.pol); } + if (rx->pol_miss_rule) { + mlx5_del_flow_rules(rx->pol_miss_rule); + mlx5_destroy_flow_table(rx->pol_miss_ft); + } +} + +static void ipsec_rx_sa_selector_destroy(struct mlx5_core_dev *mdev, + struct mlx5e_ipsec_rx *rx) +{ + mlx5_del_flow_rules(rx->sa_sel.rule); + mlx5_fc_destroy(mdev, rx->sa_sel.fc); + rx->sa_sel.fc = NULL; + mlx5_destroy_flow_group(rx->sa_sel.group); + mlx5_destroy_flow_table(rx->ft.sa_sel); +} + +static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, u32 family) +{ + /* disconnect */ + if (rx != ipsec->rx_esw) + ipsec_rx_ft_disconnect(ipsec, family); + mlx5_del_flow_rules(rx->sa.rule); mlx5_destroy_flow_group(rx->sa.group); mlx5_destroy_flow_table(rx->ft.sa); @@ -600,7 +717,17 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, mlx5_ipsec_rx_status_destroy(ipsec, rx); mlx5_destroy_flow_table(rx->ft.status); + ipsec_rx_sa_selector_destroy(mdev, rx); + + ipsec_rx_policy_destroy(rx); + mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev); + +#ifdef CONFIG_MLX5_ESWITCH + if (rx == ipsec->rx_esw) + mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), + 0, 1, 0); +#endif } static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, @@ -652,6 +779,28 @@ static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec, return 0; } +static void ipsec_rx_sa_miss_dest_get(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5e_ipsec_rx_create_attr *attr, + struct mlx5_flow_destination *dest, + struct mlx5_flow_destination *miss_dest) +{ + if (rx == ipsec->rx_esw) + *miss_dest = *dest; + else + *miss_dest = + mlx5_ttc_get_default_dest(attr->ttc, + family2tt(attr->family)); +} + +static void ipsec_rx_default_dest_get(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5_flow_destination *dest) +{ + dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest->ft = rx->pol_miss_ft; +} + static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx, struct mlx5e_ipsec_rx_create_attr *attr) @@ -659,15 +808,219 @@ static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec, struct mlx5_flow_destination dest = {}; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = rx->ft.pol; + dest.ft = rx->ft.sa; mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest); } +static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5e_ipsec_rx_create_attr *attr, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_table_attr ft_attr = {}; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_table *ft; + int err; + + if (rx == ipsec->rx_esw) { + /* No need to create miss table for switchdev mode, + * just set it to the root chain table. + */ + rx->pol_miss_ft = dest->ft; + return 0; + } + + ft_attr.max_fte = 1; + ft_attr.autogroup.max_num_groups = 1; + ft_attr.level = attr->pol_level; + ft_attr.prio = attr->prio; + + ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr); + if (IS_ERR(ft)) + return PTR_ERR(ft); + + rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_rule; + } + + rx->pol_miss_ft = ft; + rx->pol_miss_rule = rule; + + return 0; + +err_rule: + mlx5_destroy_flow_table(ft); + return err; +} + +static int ipsec_rx_policy_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5e_ipsec_rx_create_attr *attr, + struct mlx5_flow_destination *dest) +{ + struct mlx5_flow_destination default_dest; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_table *ft; + int err; + + err = ipsec_rx_chains_create_miss(ipsec, rx, attr, dest); + if (err) + return err; + + ipsec_rx_default_dest_get(ipsec, rx, &default_dest); + + if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) { + rx->chains = ipsec_chains_create(mdev, + default_dest.ft, + attr->chains_ns, + attr->prio, + attr->sa_level, + &rx->ft.pol); + if (IS_ERR(rx->chains)) + err = PTR_ERR(rx->chains); + } else { + ft = ipsec_ft_create(attr->ns, attr->pol_level, + attr->prio, 1, 2, 0); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + goto err_out; + } + rx->ft.pol = ft; + + err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, + &default_dest); + if (err) + mlx5_destroy_flow_table(rx->ft.pol); + } + + if (!err) + return 0; + +err_out: + if (rx->pol_miss_rule) { + mlx5_del_flow_rules(rx->pol_miss_rule); + mlx5_destroy_flow_table(rx->pol_miss_ft); + } + return err; +} + +static int ipsec_rx_sa_selector_create(struct mlx5e_ipsec *ipsec, + struct mlx5e_ipsec_rx *rx, + struct mlx5e_ipsec_rx_create_attr *attr) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_destination dest; + struct mlx5_flow_handle *rule; + struct mlx5_flow_table *ft; + struct mlx5_flow_group *fg; + u32 *flow_group_in; + struct mlx5_fc *fc; + int err; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + ft = ipsec_ft_create(attr->ns, attr->status_level, attr->prio, 1, + MLX5_IPSEC_FS_SA_SELECTOR_MAX_NUM_GROUPS, 0); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + mlx5_core_err(mdev, "Failed to create RX SA selector flow table, err=%d\n", + err); + goto err_ft; + } + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, + ft->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + ft->max_fte - 1); + fg = mlx5_create_flow_group(ft, flow_group_in); + if (IS_ERR(fg)) { + err = PTR_ERR(fg); + mlx5_core_err(mdev, "Failed to create RX SA selector miss group, err=%d\n", + err); + goto err_fg; + } + + fc = mlx5_fc_create(mdev, false); + if (IS_ERR(fc)) { + err = PTR_ERR(fc); + mlx5_core_err(mdev, + "Failed to create ipsec RX SA selector miss rule counter, err=%d\n", + err); + goto err_cnt; + } + + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter = fc; + flow_act.action = + MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_DROP; + + rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, "Failed to create RX SA selector miss drop rule, err=%d\n", + err); + goto err_rule; + } + + rx->ft.sa_sel = ft; + rx->sa_sel.group = fg; + rx->sa_sel.fc = fc; + rx->sa_sel.rule = rule; + + kvfree(flow_group_in); + + return 0; + +err_rule: + mlx5_fc_destroy(mdev, fc); +err_cnt: + mlx5_destroy_flow_group(fg); +err_fg: + mlx5_destroy_flow_table(ft); +err_ft: + kvfree(flow_group_in); + return err; +} + +/* The decryption processing is as follows: + * + * +----------+ +-------------+ + * | | | | + * | Kernel <--------------+----------+ policy miss <------------+ + * | | ^ | | ^ + * +----^-----+ | +-------------+ | + * | crypto | + * miss offload ok allow/default + * ^ ^ ^ + * | | packet | + * +----+---------+ +----+-------------+ offload ok +------+---+ + * | | | | (no UPSPEC) | | + * | SA (decrypt) +-----> status +--->------->----+ policy | + * | | | | | | + * +--------------+ ++---------+-------+ +-^----+---+ + * | | | | + * v packet +-->->---+ v + * | offload ok match | + * fails (with UPSPEC) | block + * | | +-------------+-+ | + * v v | | miss v + * drop +---> SA sel +--------->drop + * | | + * +---------------+ + */ + static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx, u32 family) { + struct mlx5_flow_destination dest[2], miss_dest; struct mlx5e_ipsec_rx_create_attr attr; - struct mlx5_flow_destination dest[2]; struct mlx5_flow_table *ft; u32 flags = 0; int err; @@ -678,80 +1031,61 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec, if (err) return err; - ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0); + ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 4, 0); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_fs_ft_status; } rx->ft.status = ft; - dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest[1].counter = rx->fc->cnt; - err = mlx5_ipsec_rx_status_create(ipsec, rx, dest); + err = ipsec_rx_sa_selector_create(ipsec, rx, &attr); if (err) - goto err_add; + goto err_fs_ft_sa_sel; /* Create FT */ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL) rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); if (rx->allow_tunnel_mode) flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; - ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags); + ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 1, 2, flags); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_fs_ft; } rx->ft.sa = ft; - err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest); + ipsec_rx_sa_miss_dest_get(ipsec, rx, &attr, &dest[0], &miss_dest); + err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, &miss_dest); if (err) goto err_fs; - if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) { - rx->chains = ipsec_chains_create(mdev, rx->ft.sa, - attr.chains_ns, - attr.prio, - attr.pol_level, - &rx->ft.pol); - if (IS_ERR(rx->chains)) { - err = PTR_ERR(rx->chains); - goto err_pol_ft; - } - - goto connect; - } + err = ipsec_rx_policy_create(ipsec, rx, &attr, &dest[0]); + if (err) + goto err_policy; - ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0); - if (IS_ERR(ft)) { - err = PTR_ERR(ft); - goto err_pol_ft; - } - rx->ft.pol = ft; - memset(dest, 0x00, 2 * sizeof(*dest)); - dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[0].ft = rx->ft.sa; - err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest); + dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[1].counter = rx->fc->cnt; + err = mlx5_ipsec_rx_status_create(ipsec, rx, dest); if (err) - goto err_pol_miss; + goto err_add; -connect: /* connect */ if (rx != ipsec->rx_esw) ipsec_rx_ft_connect(ipsec, rx, &attr); return 0; -err_pol_miss: - mlx5_destroy_flow_table(rx->ft.pol); -err_pol_ft: +err_add: + ipsec_rx_policy_destroy(rx); +err_policy: mlx5_del_flow_rules(rx->sa.rule); mlx5_destroy_flow_group(rx->sa.group); err_fs: mlx5_destroy_flow_table(rx->ft.sa); -err_fs_ft: if (rx->allow_tunnel_mode) mlx5_eswitch_unblock_encap(mdev); - mlx5_ipsec_rx_status_destroy(ipsec, rx); -err_add: +err_fs_ft: + ipsec_rx_sa_selector_destroy(mdev, rx); +err_fs_ft_sa_sel: mlx5_destroy_flow_table(rx->ft.status); err_fs_ft_status: mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev); @@ -941,7 +1275,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, int err; ipsec_tx_create_attr_set(ipsec, tx, &attr); - ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0); + ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 1, 0); if (IS_ERR(ft)) return PTR_ERR(ft); tx->ft.status = ft; @@ -954,7 +1288,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); if (tx->allow_tunnel_mode) flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; - ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags); + ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 1, 4, flags); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_sa_ft; @@ -982,7 +1316,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx, goto connect_roce; } - ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0); + ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 1, 2, 0); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_pol_ft; @@ -1150,9 +1484,14 @@ static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type) mutex_unlock(&tx->ft.mutex); } -static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr, - __be32 *daddr) +static void setup_fte_addr4(struct mlx5_flow_spec *spec, + struct mlx5e_ipsec_addr *addrs) { + __be32 *saddr = &addrs->saddr.a4; + __be32 *smask = &addrs->smask.m4; + __be32 *daddr = &addrs->daddr.a4; + __be32 *dmask = &addrs->dmask.m4; + if (!*saddr && !*daddr) return; @@ -1164,21 +1503,26 @@ static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr, if (*saddr) { memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, - outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), smask, 4); } if (*daddr) { memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4); - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, - outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), dmask, 4); } } -static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr, - __be32 *daddr) +static void setup_fte_addr6(struct mlx5_flow_spec *spec, + struct mlx5e_ipsec_addr *addrs) { + __be32 *saddr = addrs->saddr.a6; + __be32 *smask = addrs->smask.m6; + __be32 *daddr = addrs->daddr.a6; + __be32 *dmask = addrs->dmask.m6; + if (addr6_all_zero(saddr) && addr6_all_zero(daddr)) return; @@ -1190,15 +1534,15 @@ static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr, if (!addr6_all_zero(saddr)) { memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16); - memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), dmask, 16); } if (!addr6_all_zero(daddr)) { memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16); - memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), smask, 16); } } @@ -1340,7 +1684,8 @@ static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 MLX5_ACTION_TYPE_SET); MLX5_SET(set_action_in, action[2], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_4); - MLX5_SET(set_action_in, action[2], data, 0); + MLX5_SET(set_action_in, action[2], data, + MLX5_IPSEC_ASO_SW_CRYPTO_OFFLOAD); MLX5_SET(set_action_in, action[2], offset, 0); MLX5_SET(set_action_in, action[2], length, 32); } @@ -1387,7 +1732,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev, if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) { bfflen += sizeof(*esp_hdr) + 8; - switch (attrs->family) { + switch (attrs->addrs.family) { case AF_INET: bfflen += sizeof(*iphdr); break; @@ -1404,7 +1749,7 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev, return -ENOMEM; eth_hdr = (struct ethhdr *)reformatbf; - switch (attrs->family) { + switch (attrs->addrs.family) { case AF_INET: eth_hdr->h_proto = htons(ETH_P_IP); break; @@ -1427,11 +1772,11 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev, reformat_params->param_0 = attrs->authsize; hdr = reformatbf + sizeof(*eth_hdr); - switch (attrs->family) { + switch (attrs->addrs.family) { case AF_INET: iphdr = (struct iphdr *)hdr; - memcpy(&iphdr->saddr, &attrs->saddr.a4, 4); - memcpy(&iphdr->daddr, &attrs->daddr.a4, 4); + memcpy(&iphdr->saddr, &attrs->addrs.saddr.a4, 4); + memcpy(&iphdr->daddr, &attrs->addrs.daddr.a4, 4); iphdr->version = 4; iphdr->ihl = 5; iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL; @@ -1440,8 +1785,8 @@ setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev, break; case AF_INET6: ipv6hdr = (struct ipv6hdr *)hdr; - memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16); - memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16); + memcpy(&ipv6hdr->saddr, &attrs->addrs.saddr.a6, 16); + memcpy(&ipv6hdr->daddr, &attrs->addrs.daddr.a6, 16); ipv6hdr->nexthdr = IPPROTO_ESP; ipv6hdr->version = 6; ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL; @@ -1475,7 +1820,7 @@ static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs) return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP; return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT; case XFRM_DEV_OFFLOAD_OUT: - if (attrs->family == AF_INET) { + if (attrs->addrs.family == AF_INET) { if (attrs->encap) return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4; return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4; @@ -1576,6 +1921,85 @@ static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec, return 0; } +static int rx_add_rule_sa_selector(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5e_ipsec_rx *rx, + struct upspec *upspec) +{ + struct mlx5e_ipsec *ipsec = sa_entry->ipsec; + struct mlx5_core_dev *mdev = ipsec->mdev; + struct mlx5_flow_destination dest[2]; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_2.ipsec_syndrome); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_4); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.ipsec_syndrome, 0); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_4, 0); + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2; + + ipsec_rx_rule_add_match_obj(sa_entry, rx, spec); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[0].ft = rx->ft.sa_sel; + dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[1].counter = rx->fc->cnt; + + rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx pass rule, err=%d\n", + err); + goto err_add_status_pass_rule; + } + + sa_entry->ipsec_rule.status_pass = rule; + + MLX5_SET(fte_match_param, spec->match_criteria, + misc_parameters_2.ipsec_syndrome, 0); + MLX5_SET(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_4, 0); + + setup_fte_upper_proto_match(spec, upspec); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[0].ft = rx->ft.pol; + + rule = mlx5_add_flow_rules(rx->ft.sa_sel, spec, &flow_act, &dest[0], 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(mdev, + "Failed to add ipsec rx sa selector rule, err=%d\n", + err); + goto err_add_sa_sel_rule; + } + + sa_entry->ipsec_rule.sa_sel = rule; + + kvfree(spec); + return 0; + +err_add_sa_sel_rule: + mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass); +err_add_status_pass_rule: + kvfree(spec); + return err; +} + static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; @@ -1589,7 +2013,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) struct mlx5_fc *counter; int err = 0; - rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type); + rx = rx_ft_get(mdev, ipsec, attrs->addrs.family, attrs->type); if (IS_ERR(rx)) return PTR_ERR(rx); @@ -1599,16 +2023,15 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) goto err_alloc; } - if (attrs->family == AF_INET) - setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); + if (attrs->addrs.family == AF_INET) + setup_fte_addr4(spec, &attrs->addrs); else - setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); + setup_fte_addr6(spec, &attrs->addrs); setup_fte_spi(spec, attrs->spi, attrs->encap); if (!attrs->encap) setup_fte_esp(spec); setup_fte_no_frags(spec); - setup_fte_upper_proto_match(spec, &attrs->upspec); if (!attrs->drop) { if (rx != ipsec->rx_esw) @@ -1656,6 +2079,13 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err); goto err_add_flow; } + + if (attrs->upspec.proto && attrs->type == XFRM_DEV_OFFLOAD_PACKET) { + err = rx_add_rule_sa_selector(sa_entry, rx, &attrs->upspec); + if (err) + goto err_add_sa_sel; + } + if (attrs->type == XFRM_DEV_OFFLOAD_PACKET) err = rx_add_rule_drop_replay(sa_entry, rx); if (err) @@ -1679,6 +2109,11 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc); } err_add_replay: + if (sa_entry->ipsec_rule.sa_sel) { + mlx5_del_flow_rules(sa_entry->ipsec_rule.sa_sel); + mlx5_del_flow_rules(sa_entry->ipsec_rule.status_pass); + } +err_add_sa_sel: mlx5_del_flow_rules(rule); err_add_flow: mlx5_fc_destroy(mdev, counter); @@ -1691,7 +2126,7 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) err_mod_header: kvfree(spec); err_alloc: - rx_ft_put(ipsec, attrs->family, attrs->type); + rx_ft_put(ipsec, attrs->addrs.family, attrs->type); return err; } @@ -1723,10 +2158,10 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry) switch (attrs->type) { case XFRM_DEV_OFFLOAD_CRYPTO: - if (attrs->family == AF_INET) - setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); + if (attrs->addrs.family == AF_INET) + setup_fte_addr4(spec, &attrs->addrs); else - setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); + setup_fte_addr6(spec, &attrs->addrs); setup_fte_spi(spec, attrs->spi, false); setup_fte_esp(spec); setup_fte_reg_a(spec); @@ -1810,10 +2245,10 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) } tx = ipsec_tx(ipsec, attrs->type); - if (attrs->family == AF_INET) - setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); + if (attrs->addrs.family == AF_INET) + setup_fte_addr4(spec, &attrs->addrs); else - setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); + setup_fte_addr6(spec, &attrs->addrs); setup_fte_no_frags(spec); setup_fte_upper_proto_match(spec, &attrs->upspec); @@ -1883,12 +2318,12 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) struct mlx5e_ipsec_rx *rx; int err, dstn = 0; - ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio, - attrs->type); + ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->addrs.family, + attrs->prio, attrs->type); if (IS_ERR(ft)) return PTR_ERR(ft); - rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type); + rx = ipsec_rx(pol_entry->ipsec, attrs->addrs.family, attrs->type); spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) { @@ -1896,10 +2331,10 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) goto err_alloc; } - if (attrs->family == AF_INET) - setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4); + if (attrs->addrs.family == AF_INET) + setup_fte_addr4(spec, &attrs->addrs); else - setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6); + setup_fte_addr6(spec, &attrs->addrs); setup_fte_no_frags(spec); setup_fte_upper_proto_match(spec, &attrs->upspec); @@ -1923,8 +2358,7 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) flow_act.flags |= FLOW_ACT_NO_APPEND; if (rx == ipsec->rx_esw && rx->chains) flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; - dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest[dstn].ft = rx->ft.sa; + ipsec_rx_default_dest_get(ipsec, rx, &dest[dstn]); dstn++; rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn); if (IS_ERR(rule)) { @@ -1940,7 +2374,8 @@ static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry) err_action: kvfree(spec); err_alloc: - rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type); + rx_ft_put_policy(pol_entry->ipsec, attrs->addrs.family, attrs->prio, + attrs->type); return err; } @@ -2061,6 +2496,7 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats) stats->ipsec_rx_bytes = 0; stats->ipsec_rx_drop_pkts = 0; stats->ipsec_rx_drop_bytes = 0; + stats->ipsec_rx_drop_mismatch_sa_sel = 0; stats->ipsec_tx_pkts = 0; stats->ipsec_tx_bytes = 0; stats->ipsec_tx_drop_pkts = 0; @@ -2070,6 +2506,9 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats) mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes); mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts, &stats->ipsec_rx_drop_bytes); + if (ipsec->rx_ipv4->sa_sel.fc) + mlx5_fc_query(mdev, ipsec->rx_ipv4->sa_sel.fc, + &stats->ipsec_rx_drop_mismatch_sa_sel, &bytes); fc = ipsec->tx->fc; mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes); @@ -2098,6 +2537,11 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats) stats->ipsec_tx_drop_pkts += packets; stats->ipsec_tx_drop_bytes += bytes; } + + if (ipsec->rx_esw->sa_sel.fc && + !mlx5_fc_query(mdev, ipsec->rx_esw->sa_sel.fc, + &packets, &bytes)) + stats->ipsec_rx_drop_mismatch_sa_sel += packets; } } @@ -2195,12 +2639,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry) mlx5_del_flow_rules(ipsec_rule->auth.rule); mlx5_fc_destroy(mdev, ipsec_rule->auth.fc); + if (ipsec_rule->sa_sel) { + mlx5_del_flow_rules(ipsec_rule->sa_sel); + mlx5_del_flow_rules(ipsec_rule->status_pass); + } + if (ipsec_rule->replay.rule) { mlx5_del_flow_rules(ipsec_rule->replay.rule); mlx5_fc_destroy(mdev, ipsec_rule->replay.fc); } mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry); - rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type); + rx_ft_put(sa_entry->ipsec, sa_entry->attrs.addrs.family, + sa_entry->attrs.type); } int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry) @@ -2236,7 +2686,8 @@ void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry) mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev); if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) { - rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family, + rx_ft_put_policy(pol_entry->ipsec, + pol_entry->attrs.addrs.family, pol_entry->attrs.prio, pol_entry->attrs.type); return; } @@ -2376,7 +2827,7 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry) struct mlx5e_ipsec_rx *rx; struct mlx5e_ipsec_tx *tx; - rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type); + rx = ipsec_rx(sa_entry->ipsec, attrs->addrs.family, attrs->type); tx = ipsec_tx(sa_entry->ipsec, attrs->type); if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) return tx->allow_tunnel_mode; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index 92bf3fa44a3b1..93be388068f89 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -42,6 +42,7 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_mismatch_sa_sel) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index cae39198b4dbc..fdf9e9bb99ace 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -42,6 +42,9 @@ #include "lib/clock.h" #include "en/fs_ethtool.h" +#define LANES_UNKNOWN 0 +#define MAX_LANES 8 + void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo) { @@ -237,14 +240,33 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT, ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT, ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT); -} - -static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_1_200GBASE_CR1_KR1, ext, + ETHTOOL_LINK_MODE_200000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_200000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR_2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_200000baseVR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_2_400GBASE_CR2_KR2, ext, + ETHTOOL_LINK_MODE_400000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_400000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_400000baseDR2_Full_BIT, + ETHTOOL_LINK_MODE_400000baseDR2_2_Full_BIT, + ETHTOOL_LINK_MODE_400000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_400000baseVR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_4_800GBASE_CR4_KR4, ext, + ETHTOOL_LINK_MODE_800000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_800000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_800000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT, + ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT); +} + +static void mlx5e_ethtool_get_speed_arr(bool ext, struct ptys2ethtool_config **arr, u32 *size) { - bool ext = mlx5_ptys_ext_supported(mdev); - *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : ARRAY_SIZE(ptys2legacy_ethtool_table); @@ -891,37 +913,19 @@ int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue, return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal); } -static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev, - unsigned long *supported_modes, - u32 eth_proto_cap) -{ - unsigned long proto_cap = eth_proto_cap; - struct ptys2ethtool_config *table; - u32 max_size; - int proto; - - mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); - for_each_set_bit(proto, &proto_cap, max_size) - bitmap_or(supported_modes, supported_modes, - table[proto].supported, - __ETHTOOL_LINK_MODE_MASK_NBITS); -} - -static void ptys2ethtool_adver_link(unsigned long *advertising_modes, - u32 eth_proto_cap, bool ext) +static void ptys2ethtool_process_link(u32 eth_eproto, bool ext, bool advertised, + unsigned long *modes) { - unsigned long proto_cap = eth_proto_cap; + unsigned long eproto = eth_eproto; struct ptys2ethtool_config *table; u32 max_size; int proto; - table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table; - max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) : - ARRAY_SIZE(ptys2legacy_ethtool_table); - - for_each_set_bit(proto, &proto_cap, max_size) - bitmap_or(advertising_modes, advertising_modes, - table[proto].advertised, + mlx5e_ethtool_get_speed_arr(ext, &table, &max_size); + for_each_set_bit(proto, &eproto, max_size) + bitmap_or(modes, modes, + advertised ? + table[proto].advertised : table[proto].supported, __ETHTOOL_LINK_MODE_MASK_NBITS); } @@ -931,6 +935,7 @@ static const u32 pplm_fec_2_ethtool[] = { [MLX5E_FEC_RS_528_514] = ETHTOOL_FEC_RS, [MLX5E_FEC_RS_544_514] = ETHTOOL_FEC_RS, [MLX5E_FEC_LLRS_272_257_1] = ETHTOOL_FEC_LLRS, + [MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD] = ETHTOOL_FEC_RS, }; static u32 pplm2ethtool_fec(u_long fec_mode, unsigned long size) @@ -1074,50 +1079,53 @@ static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev, } } -static void get_speed_duplex(struct net_device *netdev, - u32 eth_proto_oper, bool force_legacy, - u16 data_rate_oper, - struct ethtool_link_ksettings *link_ksettings) +static void get_link_properties(struct net_device *netdev, + u32 eth_proto_oper, bool force_legacy, + u16 data_rate_oper, + struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); - u32 speed = SPEED_UNKNOWN; + const struct mlx5_link_info *info; u8 duplex = DUPLEX_UNKNOWN; + u32 speed = SPEED_UNKNOWN; + u32 lanes = LANES_UNKNOWN; if (!netif_carrier_ok(netdev)) goto out; - speed = mlx5_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy); - if (!speed) { - if (data_rate_oper) - speed = 100 * data_rate_oper; - else - speed = SPEED_UNKNOWN; - goto out; + info = mlx5_port_ptys2info(priv->mdev, eth_proto_oper, force_legacy); + if (info) { + speed = info->speed; + lanes = info->lanes; + duplex = DUPLEX_FULL; + } else if (data_rate_oper) { + speed = 100 * data_rate_oper; + lanes = MAX_LANES; } - duplex = DUPLEX_FULL; - out: - link_ksettings->base.speed = speed; link_ksettings->base.duplex = duplex; + link_ksettings->base.speed = speed; + link_ksettings->lanes = lanes; } static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap, struct ethtool_link_ksettings *link_ksettings) { unsigned long *supported = link_ksettings->link_modes.supported; - ptys2ethtool_supported_link(mdev, supported, eth_proto_cap); + bool ext = mlx5_ptys_ext_supported(mdev); + + ptys2ethtool_process_link(eth_proto_cap, ext, false, supported); ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); } -static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause, +static void get_advertising(u32 eth_proto_admin, u8 tx_pause, u8 rx_pause, struct ethtool_link_ksettings *link_ksettings, bool ext) { unsigned long *advertising = link_ksettings->link_modes.advertising; - ptys2ethtool_adver_link(advertising, eth_proto_cap, ext); - + ptys2ethtool_process_link(eth_proto_admin, ext, true, advertising); if (rx_pause) ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); if (tx_pause ^ rx_pause) @@ -1173,7 +1181,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp, unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; bool ext = mlx5_ptys_ext_supported(mdev); - ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext); + ptys2ethtool_process_link(eth_proto_lp, ext, true, lp_advertising); } static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, @@ -1235,8 +1243,8 @@ static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, get_supported(mdev, eth_proto_cap, link_ksettings); get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings, admin_ext); - get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext, - data_rate_oper, link_ksettings); + get_link_properties(priv->netdev, eth_proto_oper, !admin_ext, + data_rate_oper, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ? @@ -1341,28 +1349,22 @@ static bool ext_link_mode_requested(const unsigned long *adver) return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_supported) -{ - bool ext_link_mode = ext_link_mode_requested(adver); - - return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported; -} - static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, const struct ethtool_link_ksettings *link_ksettings) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_port_eth_proto eproto; + struct mlx5_link_info info = {}; const unsigned long *adver; bool an_changes = false; u8 an_disable_admin; bool ext_supported; + bool ext_requested; u8 an_disable_cap; bool an_disable; u32 link_modes; u8 an_status; u8 autoneg; - u32 speed; bool ext; int err; @@ -1370,13 +1372,15 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, adver = link_ksettings->link_modes.advertising; autoneg = link_ksettings->base.autoneg; - speed = link_ksettings->base.speed; + info.speed = link_ksettings->base.speed; + info.lanes = link_ksettings->lanes; ext_supported = mlx5_ptys_ext_supported(mdev); - ext = ext_requested(autoneg, adver, ext_supported); - if (!ext_supported && ext) + ext_requested = ext_link_mode_requested(adver); + if (!ext_supported && ext_requested) return -EOPNOTSUPP; + ext = autoneg == AUTONEG_ENABLE ? ext_requested : ext_supported; ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link : mlx5e_ethtool2ptys_adver_link; err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto); @@ -1386,7 +1390,7 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, goto out; } link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : - mlx5_port_speed2linkmodes(mdev, speed, !ext); + mlx5_port_info2linkmodes(mdev, &info, !ext); err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg); if (err) @@ -1458,18 +1462,27 @@ static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param * { struct mlx5e_priv *priv = netdev_priv(netdev); u32 rss_context = rxfh->rss_context; + bool symmetric; int err; mutex_lock(&priv->state_lock); err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, - rxfh->indir, rxfh->key, &rxfh->hfunc); + rxfh->indir, rxfh->key, &rxfh->hfunc, &symmetric); mutex_unlock(&priv->state_lock); - return err; + + if (err) + return err; + + if (symmetric) + rxfh->input_xfrm = RXH_XFRM_SYM_OR_XOR; + + return 0; } static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) { + bool symmetric = rxfh->input_xfrm == RXH_XFRM_SYM_OR_XOR; struct mlx5e_priv *priv = netdev_priv(dev); u32 *rss_context = &rxfh->rss_context; u8 hfunc = rxfh->hfunc; @@ -1504,7 +1517,8 @@ static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxf err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, rxfh->indir, rxfh->key, - hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc); + hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc, + rxfh->input_xfrm == RXH_XFRM_NO_CHANGE ? NULL : &symmetric); unlock: mutex_unlock(&priv->state_lock); @@ -2018,7 +2032,7 @@ static int mlx5e_get_module_eeprom_by_page(struct net_device *netdev, if (size_read < 0) { NL_SET_ERR_MSG_FMT_MOD( extack, - "Query module eeprom by page failed, read %u bytes, err %d\n", + "Query module eeprom by page failed, read %u bytes, err %d", i, size_read); return i; } @@ -2607,12 +2621,14 @@ static void mlx5e_get_ts_stats(struct net_device *netdev, } const struct ethtool_ops mlx5e_ethtool_ops = { + .cap_link_lanes_supported = true, .cap_rss_ctx_supported = true, .rxfh_per_ctx_key = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_USE_CQE, + .supported_input_xfrm = RXH_XFRM_SYM_OR_XOR, .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ext_state = mlx5e_get_link_ext_state, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 773624bb2c5d5..d68230a7b9f46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -884,8 +884,10 @@ static int flow_type_to_traffic_type(u32 flow_type) case ESP_V6_FLOW: return MLX5_TT_IPV6_IPSEC_ESP; case IPV4_FLOW: + case IP_USER_FLOW: return MLX5_TT_IPV4; case IPV6_FLOW: + case IPV6_USER_FLOW: return MLX5_TT_IPV6; default: return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a814b63ed97e5..3506024c24539 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -311,8 +311,8 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *wqe) { - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; + struct mlx5_wqe_ctrl_seg *cseg = &wqe->hdr.ctrl; + struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->hdr.uctrl; u16 octowords; u8 ds_cnt; @@ -359,7 +359,7 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) return 0; err_nomem: - kvfree(shampo->bitmap); + bitmap_free(shampo->bitmap); kvfree(shampo->pages); return -ENOMEM; @@ -367,7 +367,7 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node) static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq) { - kvfree(rq->mpwqe.shampo->bitmap); + bitmap_free(rq->mpwqe.shampo->bitmap); kvfree(rq->mpwqe.shampo->pages); } @@ -393,7 +393,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); } - mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe); + mlx5e_build_umr_wqe(rq, rq->icosq, + container_of(&rq->mpwqe.umr_wqe, + struct mlx5e_umr_wqe, hdr)); return 0; } @@ -737,7 +739,7 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param rq->netdev = c->netdev; rq->priv = c->priv; rq->tstamp = c->tstamp; - rq->clock = &mdev->clock; + rq->clock = mdev->clock; rq->icosq = &c->icosq; rq->ix = c->ix; rq->channel = c; @@ -1614,7 +1616,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, int err; sq->pdev = c->pdev; - sq->clock = &mdev->clock; + sq->clock = mdev->clock; sq->mkey_be = c->mkey_be; sq->netdev = c->netdev; sq->mdev = c->mdev; @@ -2023,41 +2025,12 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, csp.min_inline_mode = sq->min_inline_mode; set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - if (param->is_xdp_mb) - set_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state); - err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn); if (err) goto err_free_xdpsq; mlx5e_set_xmit_fp(sq, param->is_mpw); - if (!param->is_mpw && !test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) { - unsigned int ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + 1; - unsigned int inline_hdr_sz = 0; - int i; - - if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { - inline_hdr_sz = MLX5E_XDP_MIN_INLINE; - ds_cnt++; - } - - /* Pre initialize fixed WQE fields */ - for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; - - sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) { - .num_wqebbs = 1, - .num_pkts = 1, - }; - - cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); - } - } - return 0; err_free_xdpsq: @@ -3816,8 +3789,11 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, /* MQPRIO is another toplevel qdisc that can't be attached * simultaneously with the offloaded HTB. */ - if (WARN_ON(mlx5e_selq_is_htb_enabled(&priv->selq))) - return -EINVAL; + if (mlx5e_selq_is_htb_enabled(&priv->selq)) { + NL_SET_ERR_MSG_MOD(mqprio->extack, + "MQPRIO cannot be configured when HTB offload is enabled."); + return -EOPNOTSUPP; + } switch (mqprio->mode) { case TC_MQPRIO_MODE_DCB: @@ -4447,9 +4423,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, if (mlx5e_is_uplink_rep(priv)) { features = mlx5e_fix_uplink_rep_features(netdev, features); - netdev->netns_local = true; + netdev->netns_immutable = true; } else { - netdev->netns_local = false; + netdev->netns_immutable = false; } mutex_unlock(&priv->state_lock); @@ -5132,11 +5108,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; u8 mode, setting; - int err; - err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting); - if (err) - return err; + if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting)) + return -EOPNOTSUPP; mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index fdff9fd8a89ec..2abab241f03bd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -65,6 +65,7 @@ #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 +#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8 static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; @@ -855,6 +856,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev) /* RQ */ mlx5e_build_rq_params(mdev, params); + if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev)) + params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE; /* If netdev is already registered (e.g. move from nic profile to uplink, * RTNL lock must be held before triggering netdev notifiers. @@ -886,6 +889,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev, netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; netdev->watchdog_timeo = 15 * HZ; + if (mlx5_core_is_ecpf(mdev)) + netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE; #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) netdev->hw_features |= NETIF_F_HW_TC; @@ -900,7 +905,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev, netdev->features |= netdev->hw_features; - netdev->netns_local = true; + netdev->netns_immutable = true; } static int mlx5e_init_rep(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1963bc5adb188..5fd70b4d55beb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -631,16 +631,16 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, __be32 key, u16 offset, u16 ksm_len) { memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms)); - umr_wqe->ctrl.opmod_idx_opcode = + umr_wqe->hdr.ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); - umr_wqe->ctrl.umr_mkey = key; - umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) + umr_wqe->hdr.ctrl.umr_mkey = key; + umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | MLX5E_KSM_UMR_DS_CNT(ksm_len)); - umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; - umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); - umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len); - umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); + umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; + umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset); + umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len); + umr_wqe->hdr.uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index) @@ -704,7 +704,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1); sq->pc += wqe_bbs; - sq->doorbell_cseg = &umr_wqe->ctrl; + sq->doorbell_cseg = &umr_wqe->hdr.ctrl; return 0; @@ -814,12 +814,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe); wi->consumed_strides = 0; - umr_wqe->ctrl.opmod_idx_opcode = + umr_wqe->hdr.ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD; - umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); + umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset); sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, @@ -829,7 +829,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) sq->pc += rq->mpwqe.umr_wqebbs; - sq->doorbell_cseg = &umr_wqe->ctrl; + sq->doorbell_cseg = &umr_wqe->hdr.ctrl; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 1d60465cc2ca4..2f7a543feca62 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -166,6 +166,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb, struct udphdr *udph; struct iphdr *iph; + if (skb_linearize(skb)) + goto out; + /* We are only going to peek, no need to clone the SKB */ if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb)) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 611ec4b6f3709..1c121b435016d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -37,9 +37,7 @@ #include "en/ptp.h" #include "en/port.h" -#ifdef CONFIG_PAGE_POOL_STATS #include -#endif void mlx5e_ethtool_put_stat(u64 **data, u64 val) { @@ -196,7 +194,6 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, #endif { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, -#ifdef CONFIG_PAGE_POOL_STATS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_fast) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_alloc_slow_high_order) }, @@ -208,7 +205,6 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_ring_full) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_pp_recycle_released_ref) }, -#endif #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, @@ -377,7 +373,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_arfs_err += rq_stats->arfs_err; #endif s->rx_recover += rq_stats->recover; -#ifdef CONFIG_PAGE_POOL_STATS s->rx_pp_alloc_fast += rq_stats->pp_alloc_fast; s->rx_pp_alloc_slow += rq_stats->pp_alloc_slow; s->rx_pp_alloc_empty += rq_stats->pp_alloc_empty; @@ -389,7 +384,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_pp_recycle_ring += rq_stats->pp_recycle_ring; s->rx_pp_recycle_ring_full += rq_stats->pp_recycle_ring_full; s->rx_pp_recycle_released_ref += rq_stats->pp_recycle_released_ref; -#endif #ifdef CONFIG_MLX5_EN_TLS s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; @@ -496,7 +490,6 @@ static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv, } } -#ifdef CONFIG_PAGE_POOL_STATS static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c) { struct mlx5e_rq_stats *rq_stats = c->rq.stats; @@ -519,11 +512,6 @@ static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c) rq_stats->pp_recycle_ring_full = stats.recycle_stats.ring_full; rq_stats->pp_recycle_released_ref = stats.recycle_stats.released_refcnt; } -#else -static void mlx5e_stats_update_stats_rq_page_pool(struct mlx5e_channel *c) -{ -} -#endif static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) { @@ -1227,6 +1215,13 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv, mutex_unlock(&priv->state_lock); } +#define PPORT_PHY_LAYER_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.phys_layer_cntrs.c) +static const struct counter_desc pport_phy_layer_cntrs_stats_desc[] = { + { "link_down_events_phy", PPORT_PHY_LAYER_OFF(link_down_events) } +}; + #define PPORT_PHY_STATISTICAL_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.phys_layer_statistical_cntrs.c##_high) @@ -1243,25 +1238,45 @@ pport_phy_statistical_err_lanes_stats_desc[] = { { "rx_err_lane_3_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits_lane3) }, }; +#define PPORT_PHY_RECOVERY_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, counter_set.phys_layer_recovery_cntrs.c) +static const struct counter_desc +pport_phy_recovery_cntrs_stats_desc[] = { + { "total_success_recovery_phy", + PPORT_PHY_RECOVERY_OFF(total_successful_recovery_events) } +}; + +#define NUM_PPORT_PHY_LAYER_COUNTERS \ + ARRAY_SIZE(pport_phy_layer_cntrs_stats_desc) #define NUM_PPORT_PHY_STATISTICAL_COUNTERS \ ARRAY_SIZE(pport_phy_statistical_stats_desc) #define NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS \ ARRAY_SIZE(pport_phy_statistical_err_lanes_stats_desc) +#define NUM_PPORT_PHY_RECOVERY_COUNTERS \ + ARRAY_SIZE(pport_phy_recovery_cntrs_stats_desc) + +#define NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(dev) \ + (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_statistical_group) ? \ + NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0) +#define NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(dev) \ + (MLX5_CAP_PCAM_FEATURE(dev, per_lane_error_counters) ? \ + NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0) +#define NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(dev) \ + (MLX5_CAP_PCAM_FEATURE(dev, ppcnt_recovery_counters) ? \ + NUM_PPORT_PHY_RECOVERY_COUNTERS : 0) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(phy) { struct mlx5_core_dev *mdev = priv->mdev; int num_stats; - /* "1" for link_down_events special counter */ - num_stats = 1; + num_stats = NUM_PPORT_PHY_LAYER_COUNTERS; - num_stats += MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group) ? - NUM_PPORT_PHY_STATISTICAL_COUNTERS : 0; + num_stats += NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); - num_stats += MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters) ? - NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS : 0; + num_stats += NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev); + num_stats += NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); return num_stats; } @@ -1270,18 +1285,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy) struct mlx5_core_dev *mdev = priv->mdev; int i; - ethtool_puts(data, "link_down_events_phy"); - - if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) - return; + for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++) + ethtool_puts(data, pport_phy_layer_cntrs_stats_desc[i].format); - for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++) ethtool_puts(data, pport_phy_statistical_stats_desc[i].format); - if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) - for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) - ethtool_puts(data, - pport_phy_statistical_err_lanes_stats_desc[i].format); + for (i = 0; + i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev); + i++) + ethtool_puts(data, + pport_phy_statistical_err_lanes_stats_desc[i] + .format); + + for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++) + ethtool_puts(data, + pport_phy_recovery_cntrs_stats_desc[i].format); } static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy) @@ -1289,30 +1308,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy) struct mlx5_core_dev *mdev = priv->mdev; int i; - /* link_down_events_phy has special handling since it is not stored in __be64 format */ - mlx5e_ethtool_put_stat( - data, MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters, - counter_set.phys_layer_cntrs.link_down_events)); - - if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) - return; + for (i = 0; i < NUM_PPORT_PHY_LAYER_COUNTERS; i++) + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR32_BE(&priv->stats.pport + .phy_counters, + pport_phy_layer_cntrs_stats_desc, i)); - for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++) + for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_LOOPBACK_COUNTERS(mdev); i++) mlx5e_ethtool_put_stat( data, MLX5E_READ_CTR64_BE( &priv->stats.pport.phy_statistical_counters, pport_phy_statistical_stats_desc, i)); - if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters)) - for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++) - mlx5e_ethtool_put_stat( - data, - MLX5E_READ_CTR64_BE( - &priv->stats.pport - .phy_statistical_counters, - pport_phy_statistical_err_lanes_stats_desc, - i)); + for (i = 0; + i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_LOOPBACK_COUNTERS(mdev); + i++) + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR64_BE( + &priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_err_lanes_stats_desc, i)); + + for (i = 0; i < NUM_PPORT_PHY_RECOVERY_LOOPBACK_COUNTERS(mdev); i++) + mlx5e_ethtool_put_stat( + data, + MLX5E_READ_CTR32_BE( + &priv->stats.pport.phy_recovery_counters, + pport_phy_recovery_cntrs_stats_desc, i)); } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) @@ -1328,12 +1352,21 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy) MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); - if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) - return; + if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) { + out = pstats->phy_statistical_counters; + MLX5_SET(ppcnt_reg, in, grp, + MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, + 0); + } - out = pstats->phy_statistical_counters; - MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_recovery_counters)) { + out = pstats->phy_recovery_counters; + MLX5_SET(ppcnt_reg, in, grp, + MLX5_PHYSICAL_LAYER_RECOVERY_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, + 0); + } } void mlx5e_get_link_ext_stats(struct net_device *dev, @@ -2086,7 +2119,6 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, #endif { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, -#ifdef CONFIG_PAGE_POOL_STATS { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_fast) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_alloc_slow_high_order) }, @@ -2098,7 +2130,6 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_ring_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, pp_recycle_released_ref) }, -#endif #ifdef CONFIG_MLX5_EN_TLS { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 5961c569cfe01..8de6fcbd3a033 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -215,7 +215,6 @@ struct mlx5e_sw_stats { u64 ch_aff_change; u64 ch_force_irq; u64 ch_eq_rearm; -#ifdef CONFIG_PAGE_POOL_STATS u64 rx_pp_alloc_fast; u64 rx_pp_alloc_slow; u64 rx_pp_alloc_slow_high_order; @@ -227,7 +226,6 @@ struct mlx5e_sw_stats { u64 rx_pp_recycle_ring; u64 rx_pp_recycle_ring_full; u64 rx_pp_recycle_released_ref; -#endif #ifdef CONFIG_MLX5_EN_TLS u64 tx_tls_encrypted_packets; u64 tx_tls_encrypted_bytes; @@ -309,6 +307,9 @@ struct mlx5e_vport_stats { #define PPORT_PHY_STATISTICAL_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \ counter_set.phys_layer_statistical_cntrs.c##_high) +#define PPORT_PHY_RECOVERY_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, (pstats)->phy_recovery_counters, \ + counter_set.phys_layer_recovery_cntrs.c) #define PPORT_PER_PRIO_GET(pstats, prio, c) \ MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \ counter_set.eth_per_prio_grp_data_layout.c##_high) @@ -324,6 +325,7 @@ struct mlx5e_pport_stats { __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 phy_recovery_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 per_tc_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 per_tc_congest_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; @@ -381,7 +383,6 @@ struct mlx5e_rq_stats { u64 arfs_err; #endif u64 recover; -#ifdef CONFIG_PAGE_POOL_STATS u64 pp_alloc_fast; u64 pp_alloc_slow; u64 pp_alloc_slow_high_order; @@ -393,7 +394,6 @@ struct mlx5e_rq_stats { u64 pp_recycle_ring; u64 pp_recycle_ring_full; u64 pp_recycle_released_ref; -#endif #ifdef CONFIG_MLX5_EN_TLS u64 tls_decrypted_packets; u64 tls_decrypted_bytes; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f8c7912abe0e3..4fd853d19e313 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -525,9 +525,9 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq, { struct mlx5e_tx_mpwqe *session = &sq->mpwqe; struct mlx5e_tx_wqe *wqe; - u16 pi; + u16 pi, num_wqebbs; - pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs); + pi = mlx5e_txqsq_get_next_pi_anysize(sq, &num_wqebbs); wqe = MLX5E_TX_FETCH_WQE(sq, pi); net_prefetchw(wqe->data); @@ -535,6 +535,7 @@ static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq, .wqe = wqe, .bytes_count = 0, .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, + .ds_count_max = num_wqebbs * MLX5_SEND_WQEBB_NUM_DS, .pkt_count = 0, .inline_on = 0, }; @@ -626,7 +627,7 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_tx_mpwqe_add_dseg(sq, &txd); mlx5e_tx_skb_update_hwts_flags(skb); - if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) { + if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) { /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */ cseg = mlx5e_tx_mpwqe_session_complete(sq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 2b229b6226c6a..dfb079e59d858 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -871,8 +871,8 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx) static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) { + struct mlx5_irq_pool *pool = mlx5_irq_table_get_comp_irq_pool(dev); struct mlx5_eq_table *table = dev->priv.eq_table; - struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); struct irq_affinity_desc af_desc = {}; struct mlx5_irq *irq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c index d599e50af346b..3ce455c2535c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c @@ -27,7 +27,7 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns, esw_debug(dev, "Create vport[%d] %s ACL table\n", vport_num, ns == MLX5_FLOW_NAMESPACE_ESW_INGRESS ? "ingress" : "egress"); - root_ns = mlx5_get_flow_vport_acl_namespace(dev, ns, vport->index); + root_ns = mlx5_get_flow_vport_namespace(dev, ns, vport->index); if (!root_ns) { esw_warn(dev, "Failed to get E-Switch root namespace for vport (%d)\n", vport_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index 5f647358a05ca..76e35c827da00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -1863,7 +1863,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_ "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n", addr, vport_num); NL_SET_ERR_MSG_FMT_MOD(extack, - "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)\n", + "Failed to lookup bridge port to add MDB (MAC=%pM,vport=%u)", addr, vport_num); return -EINVAL; } @@ -1876,7 +1876,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_ "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n", addr, vid, vport_num); NL_SET_ERR_MSG_FMT_MOD(extack, - "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n", + "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)", addr, vid, vport_num); return -EINVAL; } @@ -1884,7 +1884,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_ err = mlx5_esw_bridge_port_mdb_attach(dev, port, addr, vid); if (err) { - NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)\n", + NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to add MDB (MAC=%pM,vid=%u,vport=%u)", addr, vid, vport_num); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index 982fe37146834..b7102e14d23d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -32,7 +32,7 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch * u16 pfnum; mlx5_esw_get_port_parent_id(dev, &ppid); - pfnum = mlx5_get_dev_index(dev); + pfnum = PCI_FUNC(dev->pdev->devfn); external = mlx5_core_is_ecpf_esw_manager(dev); if (external) controller_num = dev->priv.eswitch->offloads.host_number + 1; @@ -110,7 +110,7 @@ static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw struct netdev_phys_item_id ppid = {}; u16 pfnum; - pfnum = mlx5_get_dev_index(dev); + pfnum = PCI_FUNC(dev->pdev->devfn); mlx5_esw_get_port_parent_id(dev, &ppid); memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c index ed977ae75fab8..3cfe743610d3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c @@ -10,9 +10,9 @@ #endif enum { - MLX5_ESW_IPSEC_RX_POL_FT_LEVEL, MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL, MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL, + MLX5_ESW_IPSEC_RX_POL_FT_LEVEL, }; enum { @@ -85,6 +85,19 @@ int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry, return err; } +void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_flow_spec *spec) +{ + MLX5_SET(fte_match_param, spec->match_criteria, + misc_parameters_2.metadata_reg_c_1, + ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK); + MLX5_SET(fte_match_param, spec->match_value, + misc_parameters_2.metadata_reg_c_1, + sa_entry->rx_mapped_id << ESW_ZONE_ID_BITS); + + spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; +} + void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5e_ipsec *ipsec = sa_entry->ipsec; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h index ac9c65b89166e..514c15258b1d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h @@ -20,6 +20,8 @@ int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id, void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx_create_attr *attr); void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev); +void mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_flow_spec *spec); #else static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx_create_attr *attr) {} @@ -48,5 +50,8 @@ static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx_create_attr *attr) {} static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {} +static inline void +mlx5_esw_ipsec_rx_rule_add_match_obj(struct mlx5e_ipsec_sa_entry *sa_entry, + struct mlx5_flow_spec *spec) {} #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESW_IPSEC_FS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index 45183de424f3d..76382626ad41d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -96,7 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) if (!flow_group_in) return -ENOMEM; - ft_attr.max_fte = POOL_NEXT_SIZE; + ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE; ft_attr.prio = LEGACY_FDB_PRIO; fdb = mlx5_create_flow_table(root_ns, &ft_attr); if (IS_ERR(fdb)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 8b7c843446e11..b6ae384396b33 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -90,15 +90,30 @@ struct mlx5_esw_sched_node { struct list_head children; /* Valid only if this node is associated with a vport. */ struct mlx5_vport *vport; + /* Level in the hierarchy. The root node level is 1. */ + u8 level; }; +static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node) +{ + if (!node->parent) { + /* Root children are assigned a depth level of 2. */ + node->level = 2; + list_add_tail(&node->entry, &node->esw->qos.domain->nodes); + } else { + node->level = node->parent->level + 1; + list_add_tail(&node->entry, &node->parent->children); + } +} + static void esw_qos_node_set_parent(struct mlx5_esw_sched_node *node, struct mlx5_esw_sched_node *parent) { list_del_init(&node->entry); node->parent = parent; - list_add_tail(&node->entry, &parent->children); - node->esw = parent->esw; + if (parent) + node->esw = parent->esw; + esw_qos_node_attach_to_parent(node); } void mlx5_esw_qos_vport_qos_free(struct mlx5_vport *vport) @@ -305,8 +320,9 @@ static int esw_qos_set_node_min_rate(struct mlx5_esw_sched_node *node, return 0; } -static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id, - u32 *tsar_ix) +static int +esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_element_id, + u32 max_rate, u32 bw_share, u32 *tsar_ix) { u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; void *attr; @@ -323,6 +339,8 @@ static int esw_qos_create_node_sched_elem(struct mlx5_core_dev *dev, u32 parent_ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); MLX5_SET(scheduling_context, tsar_ctx, parent_element_id, parent_element_id); + MLX5_SET(scheduling_context, tsar_ctx, max_average_bw, max_rate); + MLX5_SET(scheduling_context, tsar_ctx, bw_share, bw_share); attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR); @@ -358,7 +376,6 @@ static struct mlx5_esw_sched_node * __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type type, struct mlx5_esw_sched_node *parent) { - struct list_head *parent_children; struct mlx5_esw_sched_node *node; node = kzalloc(sizeof(*node), GFP_KERNEL); @@ -370,8 +387,7 @@ __esw_qos_alloc_node(struct mlx5_eswitch *esw, u32 tsar_ix, enum sched_node_type node->type = type; node->parent = parent; INIT_LIST_HEAD(&node->children); - parent_children = parent ? &parent->children : &esw->qos.domain->nodes; - list_add_tail(&node->entry, parent_children); + esw_qos_node_attach_to_parent(node); return node; } @@ -396,7 +412,8 @@ __esw_qos_create_vports_sched_node(struct mlx5_eswitch *esw, struct mlx5_esw_sch u32 tsar_ix; int err; - err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, &tsar_ix); + err = esw_qos_create_node_sched_elem(esw->dev, esw->qos.root_tsar_ix, 0, + 0, &tsar_ix); if (err) { NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for node failed"); return ERR_PTR(err); @@ -463,7 +480,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) return -EOPNOTSUPP; - err = esw_qos_create_node_sched_elem(esw->dev, 0, &esw->qos.root_tsar_ix); + err = esw_qos_create_node_sched_elem(esw->dev, 0, 0, 0, + &esw->qos.root_tsar_ix); if (err) { esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err); return err; @@ -564,6 +582,9 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_ return err; esw_qos_normalize_min_rate(parent->esw, parent, extack); + trace_mlx5_esw_vport_qos_create(vport->dev, vport, + vport->qos.sched_node->max_rate, + vport->qos.sched_node->bw_share); return 0; } @@ -591,8 +612,11 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t sched_node->vport = vport; vport->qos.sched_node = sched_node; err = esw_qos_vport_enable(vport, parent, extack); - if (err) + if (err) { + __esw_qos_free_node(sched_node); esw_qos_put(esw); + vport->qos.sched_node = NULL; + } return err; } @@ -980,10 +1004,10 @@ int mlx5_esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw_s return err; } -int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, - struct devlink_rate *parent, - void *priv, void *parent_priv, - struct netlink_ext_ack *extack) +int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate, + struct devlink_rate *parent, + void *priv, void *parent_priv, + struct netlink_ext_ack *extack) { struct mlx5_esw_sched_node *node; struct mlx5_vport *vport = priv; @@ -994,3 +1018,105 @@ int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, node = parent_priv; return mlx5_esw_qos_vport_update_parent(vport, node, extack); } + +static int +mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node, + struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) +{ + u8 new_level, max_level; + + if (parent && parent->esw != node->esw) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot assign node to another E-Switch"); + return -EOPNOTSUPP; + } + + if (!list_empty(&node->children)) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot reassign a node that contains rate objects"); + return -EOPNOTSUPP; + } + + new_level = parent ? parent->level + 1 : 2; + max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth); + if (new_level > max_level) { + NL_SET_ERR_MSG_MOD(extack, + "Node hierarchy depth exceeds the maximum supported level"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int esw_qos_vports_node_update_parent(struct mlx5_esw_sched_node *node, + struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *curr_parent = node->parent; + struct mlx5_eswitch *esw = node->esw; + u32 parent_ix; + int err; + + parent_ix = parent ? parent->ix : node->esw->qos.root_tsar_ix; + mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + node->ix); + err = esw_qos_create_node_sched_elem(esw->dev, parent_ix, + node->max_rate, 0, &node->ix); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to create a node under the new hierarchy."); + if (esw_qos_create_node_sched_elem(esw->dev, curr_parent->ix, + node->max_rate, + node->bw_share, + &node->ix)) + esw_warn(esw->dev, "Node restore QoS failed\n"); + + return err; + } + esw_qos_node_set_parent(node, parent); + + return 0; +} + +static int mlx5_esw_qos_node_update_parent(struct mlx5_esw_sched_node *node, + struct mlx5_esw_sched_node *parent, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *curr_parent; + struct mlx5_eswitch *esw = node->esw; + int err; + + err = mlx5_esw_qos_node_validate_set_parent(node, parent, extack); + if (err) + return err; + + esw_qos_lock(esw); + curr_parent = node->parent; + err = esw_qos_vports_node_update_parent(node, parent, extack); + if (err) + goto out; + + esw_qos_normalize_min_rate(esw, curr_parent, extack); + esw_qos_normalize_min_rate(esw, parent, extack); + +out: + esw_qos_unlock(esw); + + return err; +} + +int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate, + struct devlink_rate *parent, + void *priv, void *parent_priv, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_sched_node *node = priv, *parent_node; + + if (!parent) + return mlx5_esw_qos_node_update_parent(node, NULL, extack); + + parent_node = parent_priv; + return mlx5_esw_qos_node_update_parent(node, parent_node, extack); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h index 6eb8f6a648c83..ed40ec8f027e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h @@ -29,10 +29,14 @@ int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, struct netlink_ext_ack *extack); int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, struct netlink_ext_ack *extack); -int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, - struct devlink_rate *parent, - void *priv, void *parent_priv, - struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate, + struct devlink_rate *parent, + void *priv, void *parent_priv, + struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_node_parent_set(struct devlink_rate *devlink_rate, + struct devlink_rate *parent, + void *priv, void *parent_priv, + struct netlink_ext_ack *extack); #endif #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 20cc01ceee8a9..a6a8eea5980ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -648,6 +648,7 @@ esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act) meter = attr->meter_attr.meter; flow_act->exe_aso.type = attr->exe_aso_type; flow_act->exe_aso.object_id = meter->obj_id; + flow_act->exe_aso.base_id = mlx5e_flow_meter_get_base_id(meter); flow_act->exe_aso.flow_meter.meter_idx = meter->idx; flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN; /* use metadata reg 5 for packet color */ @@ -2828,9 +2829,9 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master, if (IS_ERR(vport)) return PTR_ERR(vport); - egress_ns = mlx5_get_flow_vport_acl_namespace(master, - MLX5_FLOW_NAMESPACE_ESW_EGRESS, - vport->index); + egress_ns = mlx5_get_flow_vport_namespace(master, + MLX5_FLOW_NAMESPACE_ESW_EGRESS, + vport->index); if (!egress_ns) return -EINVAL; @@ -4157,37 +4158,12 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw, } EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match); -static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id) -{ - int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); - void *query_ctx; - void *hca_caps; - int err; - - *vhca_id = 0; - - query_ctx = kzalloc(query_out_sz, GFP_KERNEL); - if (!query_ctx) - return -ENOMEM; - - err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx); - if (err) - goto out_free; - - hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); - *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id); - -out_free: - kfree(query_ctx); - return err; -} - int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num) { u16 *old_entry, *vhca_map_entry, vhca_id; int err; - err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); + err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id); if (err) { esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n", vport_num, err); @@ -4213,7 +4189,7 @@ void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num) u16 *vhca_map_entry, vhca_id; int err; - err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id); + err = mlx5_vport_get_vhca_id(esw->dev, vport_num, &vhca_id); if (err) esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n", vport_num, err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index d91ea53eb394d..01c5f5990f9ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -6,6 +6,7 @@ #include "mlx5_core.h" #include "lib/eq.h" #include "lib/events.h" +#include "hwmon.h" struct mlx5_event_nb { struct mlx5_nb nb; @@ -153,21 +154,50 @@ static int any_notifier(struct notifier_block *nb, return NOTIFY_OK; } +#if IS_ENABLED(CONFIG_HWMON) +static void print_sensor_names_in_bit_set(struct mlx5_core_dev *dev, struct mlx5_hwmon *hwmon, + u64 bit_set, int bit_set_offset) +{ + unsigned long *bit_set_ptr = (unsigned long *)&bit_set; + int num_bits = sizeof(bit_set) * BITS_PER_BYTE; + int i; + + for_each_set_bit(i, bit_set_ptr, num_bits) { + const char *sensor_name = hwmon_get_sensor_name(hwmon, i + bit_set_offset); + + mlx5_core_warn(dev, "Sensor name[%d]: %s\n", i + bit_set_offset, sensor_name); + } +} +#endif /* CONFIG_HWMON */ + /* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */ static int temp_warn(struct notifier_block *nb, unsigned long type, void *data) { struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb); struct mlx5_events *events = event_nb->ctx; + struct mlx5_core_dev *dev = events->dev; struct mlx5_eqe *eqe = data; u64 value_lsb; u64 value_msb; value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); + /* bit 1-63 are not supported for NICs, + * hence read only bit 0 (asic) from lsb. + */ + value_lsb &= 0x1; value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); - mlx5_core_warn(events->dev, - "High temperature on sensors with bit set %llx %llx", - value_msb, value_lsb); + if (net_ratelimit()) { + mlx5_core_warn(dev, "High temperature on sensors with bit set %#llx %#llx.\n", + value_msb, value_lsb); +#if IS_ENABLED(CONFIG_HWMON) + if (dev->hwmon) { + print_sensor_names_in_bit_set(dev, dev->hwmon, value_lsb, 0); + print_sensor_names_in_bit_set(dev, dev->hwmon, value_msb, + sizeof(value_lsb) * BITS_PER_BYTE); + } +#endif + } return NOTIFY_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index ae20c061e0fb2..a47c29571f647 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -1142,6 +1142,8 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ case FS_FT_RDMA_RX: case FS_FT_RDMA_TX: case FS_FT_PORT_SEL: + case FS_FT_RDMA_TRANSPORT_RX: + case FS_FT_RDMA_TRANSPORT_TX: return mlx5_fs_cmd_get_fw_cmds(); default: return mlx5_fs_cmd_get_stub_cmds(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 22dc23d991d2e..6163bc98d94a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1456,7 +1456,7 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, struct mlx5_flow_table *ft; int autogroups_max_fte; - ft = mlx5_create_flow_table(ns, ft_attr); + ft = mlx5_create_vport_flow_table(ns, ft_attr, ft_attr->vport); if (IS_ERR(ft)) return ft; @@ -2764,9 +2764,9 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, } EXPORT_SYMBOL(mlx5_get_flow_namespace); -struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, - enum mlx5_flow_namespace_type type, - int vport) +struct mlx5_flow_namespace * +mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type, int vport_idx) { struct mlx5_flow_steering *steering = dev->priv.steering; @@ -2775,25 +2775,43 @@ struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_d switch (type) { case MLX5_FLOW_NAMESPACE_ESW_EGRESS: - if (vport >= steering->esw_egress_acl_vports) + if (vport_idx >= steering->esw_egress_acl_vports) return NULL; if (steering->esw_egress_root_ns && - steering->esw_egress_root_ns[vport]) - return &steering->esw_egress_root_ns[vport]->ns; + steering->esw_egress_root_ns[vport_idx]) + return &steering->esw_egress_root_ns[vport_idx]->ns; else return NULL; case MLX5_FLOW_NAMESPACE_ESW_INGRESS: - if (vport >= steering->esw_ingress_acl_vports) + if (vport_idx >= steering->esw_ingress_acl_vports) return NULL; if (steering->esw_ingress_root_ns && - steering->esw_ingress_root_ns[vport]) - return &steering->esw_ingress_root_ns[vport]->ns; + steering->esw_ingress_root_ns[vport_idx]) + return &steering->esw_ingress_root_ns[vport_idx]->ns; + else + return NULL; + case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX: + if (vport_idx >= steering->rdma_transport_rx_vports) + return NULL; + if (steering->rdma_transport_rx_root_ns && + steering->rdma_transport_rx_root_ns[vport_idx]) + return &steering->rdma_transport_rx_root_ns[vport_idx]->ns; + else + return NULL; + case MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX: + if (vport_idx >= steering->rdma_transport_tx_vports) + return NULL; + + if (steering->rdma_transport_tx_root_ns && + steering->rdma_transport_tx_root_ns[vport_idx]) + return &steering->rdma_transport_tx_root_ns[vport_idx]->ns; else return NULL; default: return NULL; } } +EXPORT_SYMBOL(mlx5_get_flow_vport_namespace); static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns, unsigned int prio, @@ -3199,6 +3217,127 @@ static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering) return err; } +static int +init_rdma_transport_rx_root_ns_one(struct mlx5_flow_steering *steering, + int vport_idx) +{ + struct fs_prio *prio; + + steering->rdma_transport_rx_root_ns[vport_idx] = + create_root_ns(steering, FS_FT_RDMA_TRANSPORT_RX); + if (!steering->rdma_transport_rx_root_ns[vport_idx]) + return -ENOMEM; + + /* create 1 prio*/ + prio = fs_create_prio(&steering->rdma_transport_rx_root_ns[vport_idx]->ns, + MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1); + return PTR_ERR_OR_ZERO(prio); +} + +static int +init_rdma_transport_tx_root_ns_one(struct mlx5_flow_steering *steering, + int vport_idx) +{ + struct fs_prio *prio; + + steering->rdma_transport_tx_root_ns[vport_idx] = + create_root_ns(steering, FS_FT_RDMA_TRANSPORT_TX); + if (!steering->rdma_transport_tx_root_ns[vport_idx]) + return -ENOMEM; + + /* create 1 prio*/ + prio = fs_create_prio(&steering->rdma_transport_tx_root_ns[vport_idx]->ns, + MLX5_RDMA_TRANSPORT_BYPASS_PRIO, 1); + return PTR_ERR_OR_ZERO(prio); +} + +static int init_rdma_transport_rx_root_ns(struct mlx5_flow_steering *steering) +{ + struct mlx5_core_dev *dev = steering->dev; + int total_vports; + int err; + int i; + + /* In case eswitch not supported and working in legacy mode */ + total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1; + + steering->rdma_transport_rx_root_ns = + kcalloc(total_vports, + sizeof(*steering->rdma_transport_rx_root_ns), + GFP_KERNEL); + if (!steering->rdma_transport_rx_root_ns) + return -ENOMEM; + + for (i = 0; i < total_vports; i++) { + err = init_rdma_transport_rx_root_ns_one(steering, i); + if (err) + goto cleanup_root_ns; + } + steering->rdma_transport_rx_vports = total_vports; + return 0; + +cleanup_root_ns: + while (i--) + cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]); + kfree(steering->rdma_transport_rx_root_ns); + steering->rdma_transport_rx_root_ns = NULL; + return err; +} + +static int init_rdma_transport_tx_root_ns(struct mlx5_flow_steering *steering) +{ + struct mlx5_core_dev *dev = steering->dev; + int total_vports; + int err; + int i; + + /* In case eswitch not supported and working in legacy mode */ + total_vports = mlx5_eswitch_get_total_vports(dev) ?: 1; + + steering->rdma_transport_tx_root_ns = + kcalloc(total_vports, + sizeof(*steering->rdma_transport_tx_root_ns), + GFP_KERNEL); + if (!steering->rdma_transport_tx_root_ns) + return -ENOMEM; + + for (i = 0; i < total_vports; i++) { + err = init_rdma_transport_tx_root_ns_one(steering, i); + if (err) + goto cleanup_root_ns; + } + steering->rdma_transport_tx_vports = total_vports; + return 0; + +cleanup_root_ns: + while (i--) + cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]); + kfree(steering->rdma_transport_tx_root_ns); + steering->rdma_transport_tx_root_ns = NULL; + return err; +} + +static void cleanup_rdma_transport_roots_ns(struct mlx5_flow_steering *steering) +{ + int i; + + if (steering->rdma_transport_rx_root_ns) { + for (i = 0; i < steering->rdma_transport_rx_vports; i++) + cleanup_root_ns(steering->rdma_transport_rx_root_ns[i]); + + kfree(steering->rdma_transport_rx_root_ns); + steering->rdma_transport_rx_root_ns = NULL; + } + + if (steering->rdma_transport_tx_root_ns) { + for (i = 0; i < steering->rdma_transport_tx_vports; i++) + cleanup_root_ns(steering->rdma_transport_tx_root_ns[i]); + + kfree(steering->rdma_transport_tx_root_ns); + steering->rdma_transport_tx_root_ns = NULL; + } +} + /* FT and tc chains are stored in the same array so we can re-use the * mlx5_get_fdb_sub_ns() and tc api for FT chains. * When creating a new ns for each chain store it in the first available slot. @@ -3631,6 +3770,7 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev) cleanup_root_ns(steering->rdma_rx_root_ns); cleanup_root_ns(steering->rdma_tx_root_ns); cleanup_root_ns(steering->egress_root_ns); + cleanup_rdma_transport_roots_ns(steering); devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params, ARRAY_SIZE(mlx5_fs_params)); @@ -3700,6 +3840,18 @@ int mlx5_fs_core_init(struct mlx5_core_dev *dev) goto err; } + if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(dev, ft_support)) { + err = init_rdma_transport_rx_root_ns(steering); + if (err) + goto err; + } + + if (MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(dev, ft_support)) { + err = init_rdma_transport_tx_root_ns(steering); + if (err) + goto err; + } + return 0; err: @@ -3850,8 +4002,10 @@ mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type struct mlx5_flow_namespace *ns; if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS || - ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS) - ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0); + ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS || + ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX || + ns_type == MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX) + ns = mlx5_get_flow_vport_namespace(dev, ns_type, 0); else ns = mlx5_get_flow_namespace(dev, ns_type); if (!ns) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 20837e526679d..0767239f651ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -115,7 +115,9 @@ enum fs_flow_table_type { FS_FT_PORT_SEL = 0X9, FS_FT_FDB_RX = 0xa, FS_FT_FDB_TX = 0xb, - FS_FT_MAX_TYPE = FS_FT_FDB_TX, + FS_FT_RDMA_TRANSPORT_RX = 0xd, + FS_FT_RDMA_TRANSPORT_TX = 0xe, + FS_FT_MAX_TYPE = FS_FT_RDMA_TRANSPORT_TX, }; enum fs_flow_table_op_mod { @@ -158,6 +160,10 @@ struct mlx5_flow_steering { struct mlx5_flow_root_namespace *port_sel_root_ns; int esw_egress_acl_vports; int esw_ingress_acl_vports; + struct mlx5_flow_root_namespace **rdma_transport_rx_root_ns; + struct mlx5_flow_root_namespace **rdma_transport_tx_root_ns; + int rdma_transport_rx_vports; + int rdma_transport_tx_vports; }; struct fs_node { @@ -341,16 +347,10 @@ struct mlx5_fc { u64 lastbytes; }; -struct mlx5_fc_bulk_hws_data { - struct mlx5hws_action *hws_action; - struct mutex lock; /* protects hws_action */ - refcount_t hws_action_refcount; -}; - struct mlx5_fc_bulk { struct mlx5_fs_bulk fs_bulk; u32 base_id; - struct mlx5_fc_bulk_hws_data hws_data; + struct mlx5_fs_hws_data hws_data; struct mlx5_fc fcs[]; }; @@ -434,7 +434,9 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node); (type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \ (type == FS_FT_FDB_RX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ (type == FS_FT_FDB_TX) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ - (BUILD_BUG_ON_ZERO(FS_FT_FDB_TX != FS_FT_MAX_TYPE))\ + (type == FS_FT_RDMA_TRANSPORT_RX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) : \ + (type == FS_FT_RDMA_TRANSPORT_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) : \ + (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TRANSPORT_TX != FS_FT_MAX_TYPE))\ ) #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c index c14590acc7726..f6abfd00d7e68 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c @@ -50,10 +50,12 @@ mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type tab int i, found_i = -1; for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) { - if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size && + if (dev->priv.ft_pool->ft_left[i] && + (FT_POOLS[i] >= desired_size || + desired_size == MLX5_FS_MAX_POOL_SIZE) && FT_POOLS[i] <= max_ft_size) { found_i = i; - if (desired_size != POOL_NEXT_SIZE) + if (desired_size != MLX5_FS_MAX_POOL_SIZE) break; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h index 25f4274b372b5..173e312db7204 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h @@ -7,8 +7,6 @@ #include #include "fs_core.h" -#define POOL_NEXT_SIZE 0 - int mlx5_ft_pool_init(struct mlx5_core_dev *dev); void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index b253d1673398d..57476487e31fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -287,6 +287,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, adv_rdma)) { + err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ADV_RDMA, + HCA_CAP_OPMOD_GET_CUR); + if (err) + return err; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 566710d34a7b2..6830a49fe682e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -345,15 +345,12 @@ static void mlx5_fw_live_patch_event(struct work_struct *work) } #if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE) -static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev) +static int mlx5_check_hotplug_interrupt(struct mlx5_core_dev *dev, + struct pci_dev *bridge) { - struct pci_dev *bridge = dev->pdev->bus->self; u16 reg16; int err; - if (!bridge) - return -EOPNOTSUPP; - err = pcie_capability_read_word(bridge, PCI_EXP_SLTCTL, ®16); if (err) return err; @@ -416,9 +413,15 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id) static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev, u8 reset_method) { + struct pci_dev *bridge = dev->pdev->bus->self; u16 dev_id; int err; + if (!bridge) { + mlx5_core_warn(dev, "PCI bus bridge is not accessible\n"); + return false; + } + if (!MLX5_CAP_GEN(dev, fast_teardown)) { mlx5_core_warn(dev, "fast teardown is not supported by firmware\n"); return false; @@ -426,7 +429,7 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev, #if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE) if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) { - err = mlx5_check_hotplug_interrupt(dev); + err = mlx5_check_hotplug_interrupt(dev, bridge); if (err) return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index a6329ca2d9bff..91613d5a36cd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -96,6 +96,11 @@ static int mlx5_health_get_rfr(u8 rfr_severity) return rfr_severity >> MLX5_RFR_BIT_OFFSET; } +static int mlx5_health_get_crr(u8 rfr_severity) +{ + return (rfr_severity >> MLX5_CRR_BIT_OFFSET) & 0x01; +} + static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; @@ -375,6 +380,8 @@ static const char *hsynd_str(u8 synd) return "High temperature"; case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR: return "ICM fetch PCI data poisoned error"; + case MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR: + return "Trust lockdown error"; default: return "unrecognized error"; } @@ -442,12 +449,15 @@ static void print_health_info(struct mlx5_core_dev *dev) mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time)); mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity)); + mlx5_log(dev, severity, "crr %d\n", mlx5_health_get_crr(rfr_severity)); mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity)); mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index)); mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver)); + if (mlx5_health_get_crr(rfr_severity)) + mlx5_core_warn(dev, "Cold reset is required\n"); } static int @@ -799,14 +809,17 @@ static void poll_health(struct timer_list *t) health->prev = count; if (health->miss_counter == MAX_MISSES) { mlx5_core_err(dev, "device's health compromised - reached miss count\n"); + health->synd = ioread8(&h->synd); print_health_info(dev); queue_work(health->wq, &health->report_work); } prev_synd = health->synd; health->synd = ioread8(&h->synd); - if (health->synd && health->synd != prev_synd) + if (health->synd && health->synd != prev_synd) { + print_health_info(dev); queue_work(health->wq, &health->report_work); + } out: mod_timer(&health->timer, get_next_poll_jiffies(dev)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c index 353f81dccd1ce..4ba2636d7fb6c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.c @@ -416,3 +416,8 @@ void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev) mlx5_hwmon_free(hwmon); mdev->hwmon = NULL; } + +const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel) +{ + return hwmon->temp_channel_desc[channel].sensor_name; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h index 999654a9b9da5..f38271c22c105 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/hwmon.h @@ -10,6 +10,7 @@ int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev); void mlx5_hwmon_dev_unregister(struct mlx5_core_dev *mdev); +const char *hwmon_get_sensor_name(struct mlx5_hwmon *hwmon, int channel); #else static inline int mlx5_hwmon_dev_register(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c index 1477db7f5307e..2691d88cdee1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c @@ -175,7 +175,7 @@ mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool, void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq) { - struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev); + struct mlx5_irq_pool *pool = mlx5_irq_get_pool(irq); int cpu; cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index cea5aa314f6c5..d058cbb4a00c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -523,8 +523,7 @@ static struct net_device *mlx5_lag_active_backup_get_netdev(struct mlx5_core_dev ndev = ldev->pf[last_idx].netdev; } - if (ndev) - dev_hold(ndev); + dev_hold(ndev); unlock: spin_unlock_irqrestore(&lag_lock, flags); @@ -584,8 +583,9 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, } } -static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, - unsigned long *flags) +static int mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev, + enum mlx5_lag_mode mode, + unsigned long *flags) { int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_core_dev *dev0; @@ -593,7 +593,12 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, if (first_idx < 0) return -EINVAL; + if (mode == MLX5_LAG_MODE_MPESW || + mode == MLX5_LAG_MODE_MULTIPATH) + return 0; + dev0 = ldev->pf[first_idx].dev; + if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) { if (ldev->ports > 2) return -EINVAL; @@ -608,32 +613,10 @@ static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, return 0; } -static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev, - struct lag_tracker *tracker, - enum mlx5_lag_mode mode, - unsigned long *flags) -{ - int first_idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); - struct lag_func *dev0; - - if (first_idx < 0 || mode == MLX5_LAG_MODE_MPESW) - return; - - dev0 = &ldev->pf[first_idx]; - if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) && - tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) { - if (ldev->ports > 2) - ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS; - set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags); - } -} - static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, struct lag_tracker *tracker, bool shared_fdb, unsigned long *flags) { - bool roce_lag = mode == MLX5_LAG_MODE_ROCE; - *flags = 0; if (shared_fdb) { set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags); @@ -643,11 +626,7 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode, if (mode == MLX5_LAG_MODE_MPESW) set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags); - if (roce_lag) - return mlx5_lag_set_port_sel_mode_roce(ldev, flags); - - mlx5_lag_set_port_sel_mode_offloads(ldev, tracker, mode, flags); - return 0; + return mlx5_lag_set_port_sel_mode(ldev, mode, flags); } char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags) @@ -951,7 +930,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev) mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); } -static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) +bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev) { int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); struct mlx5_core_dev *dev; @@ -1038,7 +1017,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) } if (do_bond && !__mlx5_lag_is_active(ldev)) { - bool shared_fdb = mlx5_shared_fdb_supported(ldev); + bool shared_fdb = mlx5_lag_shared_fdb_supported(ldev); roce_lag = mlx5_lag_is_roce_lag(ldev); @@ -1052,6 +1031,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) if (err) { if (shared_fdb || roce_lag) mlx5_lag_add_devices(ldev); + if (shared_fdb) { + mlx5_ldev_for_each(i, 0, ldev) + mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch); + } return; } else if (roce_lag) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 01cf723669471..c2f256bb2bc20 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -92,6 +92,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev) return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags); } +bool mlx5_lag_shared_fdb_supported(struct mlx5_lag *ldev); bool mlx5_lag_check_prereq(struct mlx5_lag *ldev); void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index ffac0bd6c8952..aad52d3a90e68 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -65,7 +65,6 @@ static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev) return err; } -#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4 static int enable_mpesw(struct mlx5_lag *ldev) { int idx = mlx5_lag_get_dev_index_by_seq(ldev, MLX5_LAG_P1); @@ -77,13 +76,11 @@ static int enable_mpesw(struct mlx5_lag *ldev) return -EINVAL; dev0 = ldev->pf[idx].dev; - if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS) - return -EOPNOTSUPP; - if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS || !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) || !MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) || - !mlx5_lag_check_prereq(ldev)) + !mlx5_lag_check_prereq(ldev) || + !mlx5_lag_shared_fdb_supported(ldev)) return -EOPNOTSUPP; err = mlx5_mpesw_metadata_set(ldev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index bde79cac33a97..d832a12ffec0e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -97,7 +97,7 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, mlx5_del_flow_rules(lag_definer->rules[idx]); } j = ldev->buckets; - }; + } goto destroy_fg; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index d61a1a9297c90..65a94e46edcf1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -43,6 +43,8 @@ #include #endif /* CONFIG_X86 */ +#define MLX5_RT_CLOCK_IDENTITY_SIZE MLX5_FLD_SZ_BYTES(mrtcq_reg, rt_clock_identity) + enum { MLX5_PIN_MODE_IN = 0x0, MLX5_PIN_MODE_OUT = 0x1, @@ -77,6 +79,56 @@ enum { MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000, }; +struct mlx5_clock_dev_state { + struct mlx5_core_dev *mdev; + struct mlx5_devcom_comp_dev *compdev; + struct mlx5_nb pps_nb; + struct work_struct out_work; +}; + +struct mlx5_clock_priv { + struct mlx5_clock clock; + struct mlx5_core_dev *mdev; + struct mutex lock; /* protect mdev and used in PTP callbacks */ + struct mlx5_core_dev *event_mdev; +}; + +static struct mlx5_clock_priv *clock_priv(struct mlx5_clock *clock) +{ + return container_of(clock, struct mlx5_clock_priv, clock); +} + +static void mlx5_clock_lockdep_assert(struct mlx5_clock *clock) +{ + if (!clock->shared) + return; + + lockdep_assert(lockdep_is_held(&clock_priv(clock)->lock)); +} + +static struct mlx5_core_dev *mlx5_clock_mdev_get(struct mlx5_clock *clock) +{ + mlx5_clock_lockdep_assert(clock); + + return clock_priv(clock)->mdev; +} + +static void mlx5_clock_lock(struct mlx5_clock *clock) +{ + if (!clock->shared) + return; + + mutex_lock(&clock_priv(clock)->lock); +} + +static void mlx5_clock_unlock(struct mlx5_clock *clock) +{ + if (!clock->shared) + return; + + mutex_unlock(&clock_priv(clock)->lock); +} + static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev) { return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev)); @@ -94,6 +146,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev) return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify); } +static int mlx5_clock_identity_get(struct mlx5_core_dev *mdev, + u8 identify[MLX5_RT_CLOCK_IDENTITY_SIZE]) +{ + u32 out[MLX5_ST_SZ_DW(mrtcq_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(mrtcq_reg)] = {}; + int err; + + err = mlx5_core_access_reg(mdev, in, sizeof(in), + out, sizeof(out), MLX5_REG_MRTCQ, 0, 0); + if (!err) + memcpy(identify, MLX5_ADDR_OF(mrtcq_reg, out, rt_clock_identity), + MLX5_RT_CLOCK_IDENTITY_SIZE); + + return err; +} + static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz) { /* Optimal shift constant leads to corrections above just 1 scaled ppm. @@ -119,21 +187,30 @@ static u32 mlx5_ptp_shift_constant(u32 dev_freq_khz) ilog2((U32_MAX / NSEC_PER_MSEC) * dev_freq_khz)); } +static s32 mlx5_clock_getmaxphase(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ? + MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX : + MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX; +} + static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); struct mlx5_core_dev *mdev; + s32 ret; - mdev = container_of(clock, struct mlx5_core_dev, clock); + mlx5_clock_lock(clock); + mdev = mlx5_clock_mdev_get(clock); + ret = mlx5_clock_getmaxphase(mdev); + mlx5_clock_unlock(clock); - return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ? - MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX : - MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX; + return ret; } static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta) { - s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info); + s64 max = mlx5_clock_getmaxphase(mdev); if (delta < -max || delta > max) return false; @@ -209,7 +286,7 @@ static int mlx5_mtctr_syncdevicetime(ktime_t *device_time, if (real_time_mode) *device_time = ns_to_ktime(REAL_TIME_TO_NS(device >> 32, device & U32_MAX)); else - *device_time = mlx5_timecounter_cyc2time(&mdev->clock, device); + *device_time = mlx5_timecounter_cyc2time(mdev->clock, device); return 0; } @@ -220,16 +297,23 @@ static int mlx5_ptp_getcrosststamp(struct ptp_clock_info *ptp, struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); struct system_time_snapshot history_begin = {0}; struct mlx5_core_dev *mdev; + int err; - mdev = container_of(clock, struct mlx5_core_dev, clock); + mlx5_clock_lock(clock); + mdev = mlx5_clock_mdev_get(clock); - if (!mlx5_is_ptm_source_time_available(mdev)) - return -EBUSY; + if (!mlx5_is_ptm_source_time_available(mdev)) { + err = -EBUSY; + goto unlock; + } ktime_get_snapshot(&history_begin); - return get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev, - &history_begin, cts); + err = get_device_system_crosststamp(mlx5_mtctr_syncdevicetime, mdev, + &history_begin, cts); +unlock: + mlx5_clock_unlock(clock); + return err; } #endif /* CONFIG_X86 */ @@ -263,8 +347,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc) { struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles); struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer); - struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, - clock); + struct mlx5_core_dev *mdev = mlx5_clock_mdev_get(clock); return mlx5_read_time(mdev, NULL, false) & cc->mask; } @@ -272,7 +355,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc) static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) { struct mlx5_ib_clock_info *clock_info = mdev->clock_info; - struct mlx5_clock *clock = &mdev->clock; + struct mlx5_clock *clock = mdev->clock; struct mlx5_timer *timer; u32 sign; @@ -295,12 +378,10 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) static void mlx5_pps_out(struct work_struct *work) { - struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps, - out_work); - struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock, - pps_info); - struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, - clock); + struct mlx5_clock_dev_state *clock_state = container_of(work, struct mlx5_clock_dev_state, + out_work); + struct mlx5_core_dev *mdev = clock_state->mdev; + struct mlx5_clock *clock = mdev->clock; u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; unsigned long flags; int i; @@ -330,7 +411,8 @@ static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info) unsigned long flags; clock = container_of(ptp_info, struct mlx5_clock, ptp_info); - mdev = container_of(clock, struct mlx5_core_dev, clock); + mlx5_clock_lock(clock); + mdev = mlx5_clock_mdev_get(clock); timer = &clock->timer; if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) @@ -342,6 +424,7 @@ static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info) write_sequnlock_irqrestore(&clock->lock, flags); out: + mlx5_clock_unlock(clock); return timer->overflow_period; } @@ -361,15 +444,12 @@ static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev, return mlx5_set_mtutc(mdev, in, sizeof(in)); } -static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) +static int mlx5_clock_settime(struct mlx5_core_dev *mdev, struct mlx5_clock *clock, + const struct timespec64 *ts) { - struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); struct mlx5_timer *timer = &clock->timer; - struct mlx5_core_dev *mdev; unsigned long flags; - mdev = container_of(clock, struct mlx5_core_dev, clock); - if (mlx5_modify_mtutc_allowed(mdev)) { int err = mlx5_ptp_settime_real_time(mdev, ts); @@ -385,6 +465,20 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 return 0; } +static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts) +{ + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_core_dev *mdev; + int err; + + mlx5_clock_lock(clock); + mdev = mlx5_clock_mdev_get(clock); + err = mlx5_clock_settime(mdev, clock, ts); + mlx5_clock_unlock(clock); + + return err; +} + static struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev, struct ptp_system_timestamp *sts) @@ -404,7 +498,8 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, struct mlx5_core_dev *mdev; u64 cycles, ns; - mdev = container_of(clock, struct mlx5_core_dev, clock); + mlx5_clock_lock(clock); + mdev = mlx5_clock_mdev_get(clock); if (mlx5_real_time_mode(mdev)) { *ts = mlx5_ptp_gettimex_real_time(mdev, sts); goto out; @@ -414,6 +509,7 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, ns = mlx5_timecounter_cyc2time(clock, cycles); *ts = ns_to_timespec64(ns); out: + mlx5_clock_unlock(clock); return 0; } @@ -444,14 +540,16 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; + int err = 0; - mdev = container_of(clock, struct mlx5_core_dev, clock); + mlx5_clock_lock(clock); + mdev = mlx5_clock_mdev_get(clock); if (mlx5_modify_mtutc_allowed(mdev)) { - int err = mlx5_ptp_adjtime_real_time(mdev, delta); + err = mlx5_ptp_adjtime_real_time(mdev, delta); if (err) - return err; + goto unlock; } write_seqlock_irqsave(&clock->lock, flags); @@ -459,17 +557,23 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); - return 0; +unlock: + mlx5_clock_unlock(clock); + return err; } static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); struct mlx5_core_dev *mdev; + int err; - mdev = container_of(clock, struct mlx5_core_dev, clock); + mlx5_clock_lock(clock); + mdev = mlx5_clock_mdev_get(clock); + err = mlx5_ptp_adjtime_real_time(mdev, delta); + mlx5_clock_unlock(clock); - return mlx5_ptp_adjtime_real_time(mdev, delta); + return err; } static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm) @@ -498,15 +602,17 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) struct mlx5_timer *timer = &clock->timer; struct mlx5_core_dev *mdev; unsigned long flags; + int err = 0; u32 mult; - mdev = container_of(clock, struct mlx5_core_dev, clock); + mlx5_clock_lock(clock); + mdev = mlx5_clock_mdev_get(clock); if (mlx5_modify_mtutc_allowed(mdev)) { - int err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm); + err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm); if (err) - return err; + goto unlock; } mult = (u32)adjust_by_scaled_ppm(timer->nominal_c_mult, scaled_ppm); @@ -518,7 +624,9 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) write_sequnlock_irqrestore(&clock->lock, flags); ptp_schedule_worker(clock->ptp, timer->overflow_period); - return 0; +unlock: + mlx5_clock_unlock(clock); + return err; } static int mlx5_extts_configure(struct ptp_clock_info *ptp, @@ -527,18 +635,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); - struct mlx5_core_dev *mdev = - container_of(clock, struct mlx5_core_dev, clock); u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + struct mlx5_core_dev *mdev; u32 field_select = 0; u8 pin_mode = 0; u8 pattern = 0; int pin = -1; int err = 0; - if (!MLX5_PPS_CAP(mdev)) - return -EOPNOTSUPP; - /* Reject requests with unsupported flags */ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE | PTP_RISING_EDGE | @@ -569,6 +673,14 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, field_select = MLX5_MTPPS_FS_ENABLE; } + mlx5_clock_lock(clock); + mdev = mlx5_clock_mdev_get(clock); + + if (!MLX5_PPS_CAP(mdev)) { + err = -EOPNOTSUPP; + goto unlock; + } + MLX5_SET(mtpps_reg, in, pin, pin); MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); MLX5_SET(mtpps_reg, in, pattern, pattern); @@ -577,15 +689,23 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp, err = mlx5_set_mtpps(mdev, in, sizeof(in)); if (err) - return err; + goto unlock; + + err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on); + if (err) + goto unlock; + + clock->pps_info.pin_armed[pin] = on; + clock_priv(clock)->event_mdev = mdev; - return mlx5_set_mtppse(mdev, pin, 0, - MLX5_EVENT_MODE_REPETETIVE & on); +unlock: + mlx5_clock_unlock(clock); + return err; } static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns) { - struct mlx5_clock *clock = &mdev->clock; + struct mlx5_clock *clock = mdev->clock; u64 cycles_now, cycles_delta; u64 nsec_now, nsec_delta; struct mlx5_timer *timer; @@ -644,7 +764,7 @@ static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq, u32 *out_pulse_duration_ns) { - struct mlx5_pps *pps_info = &mdev->clock.pps_info; + struct mlx5_pps *pps_info = &mdev->clock->pps_info; u32 out_pulse_duration; struct timespec64 ts; @@ -677,7 +797,7 @@ static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clo u32 *field_select, u32 *out_pulse_duration_ns, u64 *period, u64 *time_stamp) { - struct mlx5_pps *pps_info = &mdev->clock.pps_info; + struct mlx5_pps *pps_info = &mdev->clock->pps_info; struct ptp_clock_time *time = &rq->perout.start; struct timespec64 ts; @@ -712,26 +832,18 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); - struct mlx5_core_dev *mdev = - container_of(clock, struct mlx5_core_dev, clock); - bool rt_mode = mlx5_real_time_mode(mdev); u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; u32 out_pulse_duration_ns = 0; + struct mlx5_core_dev *mdev; u32 field_select = 0; u64 npps_period = 0; u64 time_stamp = 0; u8 pin_mode = 0; u8 pattern = 0; + bool rt_mode; int pin = -1; int err = 0; - if (!MLX5_PPS_CAP(mdev)) - return -EOPNOTSUPP; - - /* Reject requests with unsupported flags */ - if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) - return -EOPNOTSUPP; - if (rq->perout.index >= clock->ptp_info.n_pins) return -EINVAL; @@ -740,14 +852,29 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, if (pin < 0) return -EBUSY; - if (on) { - bool rt_mode = mlx5_real_time_mode(mdev); + mlx5_clock_lock(clock); + mdev = mlx5_clock_mdev_get(clock); + rt_mode = mlx5_real_time_mode(mdev); + + if (!MLX5_PPS_CAP(mdev)) { + err = -EOPNOTSUPP; + goto unlock; + } + + /* Reject requests with unsupported flags */ + if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) { + err = -EOPNOTSUPP; + goto unlock; + } + if (on) { pin_mode = MLX5_PIN_MODE_OUT; pattern = MLX5_OUT_PATTERN_PERIODIC; - if (rt_mode && rq->perout.start.sec > U32_MAX) - return -EINVAL; + if (rt_mode && rq->perout.start.sec > U32_MAX) { + err = -EINVAL; + goto unlock; + } field_select |= MLX5_MTPPS_FS_PIN_MODE | MLX5_MTPPS_FS_PATTERN | @@ -760,7 +887,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, else err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode); if (err) - return err; + goto unlock; } MLX5_SET(mtpps_reg, in, pin, pin); @@ -773,13 +900,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns); err = mlx5_set_mtpps(mdev, in, sizeof(in)); if (err) - return err; + goto unlock; if (rt_mode) - return 0; + goto unlock; + + err = mlx5_set_mtppse(mdev, pin, 0, MLX5_EVENT_MODE_REPETETIVE & on); - return mlx5_set_mtppse(mdev, pin, 0, - MLX5_EVENT_MODE_REPETETIVE & on); +unlock: + mlx5_clock_unlock(clock); + return err; } static int mlx5_pps_configure(struct ptp_clock_info *ptp, @@ -866,10 +996,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin, mtpps_size, MLX5_REG_MTPPS, 0, 0); } -static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin) +static int mlx5_get_pps_pin_mode(struct mlx5_core_dev *mdev, u8 pin) { - struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); - u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {}; u8 mode; int err; @@ -888,8 +1016,9 @@ static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin) return PTP_PF_NONE; } -static void mlx5_init_pin_config(struct mlx5_clock *clock) +static void mlx5_init_pin_config(struct mlx5_core_dev *mdev) { + struct mlx5_clock *clock = mdev->clock; int i; if (!clock->ptp_info.n_pins) @@ -910,15 +1039,15 @@ static void mlx5_init_pin_config(struct mlx5_clock *clock) sizeof(clock->ptp_info.pin_config[i].name), "mlx5_pps%d", i); clock->ptp_info.pin_config[i].index = i; - clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i); + clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(mdev, i); clock->ptp_info.pin_config[i].chan = 0; } } static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev) { - struct mlx5_clock *clock = &mdev->clock; u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + struct mlx5_clock *clock = mdev->clock; mlx5_query_mtpps(mdev, out, sizeof(out)); @@ -968,16 +1097,16 @@ static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev, static int mlx5_pps_event(struct notifier_block *nb, unsigned long type, void *data) { - struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb); + struct mlx5_clock_dev_state *clock_state = mlx5_nb_cof(nb, struct mlx5_clock_dev_state, + pps_nb); + struct mlx5_core_dev *mdev = clock_state->mdev; + struct mlx5_clock *clock = mdev->clock; struct ptp_clock_event ptp_event; struct mlx5_eqe *eqe = data; int pin = eqe->data.pps.pin; - struct mlx5_core_dev *mdev; unsigned long flags; u64 ns; - mdev = container_of(clock, struct mlx5_core_dev, clock); - switch (clock->ptp_info.pin_config[pin].func) { case PTP_PF_EXTTS: ptp_event.index = pin; @@ -997,11 +1126,15 @@ static int mlx5_pps_event(struct notifier_block *nb, ptp_clock_event(clock->ptp, &ptp_event); break; case PTP_PF_PEROUT: + if (clock->shared) { + mlx5_core_warn(mdev, " Received unexpected PPS out event\n"); + break; + } ns = perout_conf_next_event_timer(mdev, clock); write_seqlock_irqsave(&clock->lock, flags); clock->pps_info.start[pin] = ns; write_sequnlock_irqrestore(&clock->lock, flags); - schedule_work(&clock->pps_info.out_work); + schedule_work(&clock_state->out_work); break; default: mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n", @@ -1013,7 +1146,7 @@ static int mlx5_pps_event(struct notifier_block *nb, static void mlx5_timecounter_init(struct mlx5_core_dev *mdev) { - struct mlx5_clock *clock = &mdev->clock; + struct mlx5_clock *clock = mdev->clock; struct mlx5_timer *timer = &clock->timer; u32 dev_freq; @@ -1029,10 +1162,10 @@ static void mlx5_timecounter_init(struct mlx5_core_dev *mdev) ktime_to_ns(ktime_get_real())); } -static void mlx5_init_overflow_period(struct mlx5_clock *clock) +static void mlx5_init_overflow_period(struct mlx5_core_dev *mdev) { - struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); struct mlx5_ib_clock_info *clock_info = mdev->clock_info; + struct mlx5_clock *clock = mdev->clock; struct mlx5_timer *timer = &clock->timer; u64 overflow_cycles; u64 frac = 0; @@ -1065,7 +1198,7 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock) static void mlx5_init_clock_info(struct mlx5_core_dev *mdev) { - struct mlx5_clock *clock = &mdev->clock; + struct mlx5_clock *clock = mdev->clock; struct mlx5_ib_clock_info *info; struct mlx5_timer *timer; @@ -1088,7 +1221,7 @@ static void mlx5_init_clock_info(struct mlx5_core_dev *mdev) static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev) { - struct mlx5_clock *clock = &mdev->clock; + struct mlx5_clock *clock = mdev->clock; u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {}; u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; u8 log_max_freq_adjustment = 0; @@ -1107,7 +1240,7 @@ static void mlx5_init_timer_max_freq_adjustment(struct mlx5_core_dev *mdev) static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev) { - struct mlx5_clock *clock = &mdev->clock; + struct mlx5_clock *clock = mdev->clock; /* Configure the PHC */ clock->ptp_info = mlx5_ptp_clock_info; @@ -1123,38 +1256,30 @@ static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev) mlx5_timecounter_init(mdev); mlx5_init_clock_info(mdev); - mlx5_init_overflow_period(clock); + mlx5_init_overflow_period(mdev); if (mlx5_real_time_mode(mdev)) { struct timespec64 ts; ktime_get_real_ts64(&ts); - mlx5_ptp_settime(&clock->ptp_info, &ts); + mlx5_clock_settime(mdev, clock, &ts); } } static void mlx5_init_pps(struct mlx5_core_dev *mdev) { - struct mlx5_clock *clock = &mdev->clock; - if (!MLX5_PPS_CAP(mdev)) return; mlx5_get_pps_caps(mdev); - mlx5_init_pin_config(clock); + mlx5_init_pin_config(mdev); } -void mlx5_init_clock(struct mlx5_core_dev *mdev) +static void mlx5_init_clock_dev(struct mlx5_core_dev *mdev) { - struct mlx5_clock *clock = &mdev->clock; - - if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) { - mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); - return; - } + struct mlx5_clock *clock = mdev->clock; seqlock_init(&clock->lock); - INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out); /* Initialize the device clock */ mlx5_init_timer_clock(mdev); @@ -1163,35 +1288,27 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) mlx5_init_pps(mdev); clock->ptp = ptp_clock_register(&clock->ptp_info, - &mdev->pdev->dev); + clock->shared ? NULL : &mdev->pdev->dev); if (IS_ERR(clock->ptp)) { - mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n", + mlx5_core_warn(mdev, "%sptp_clock_register failed %ld\n", + clock->shared ? "shared clock " : "", PTR_ERR(clock->ptp)); clock->ptp = NULL; } - MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT); - mlx5_eq_notifier_register(mdev, &clock->pps_nb); - if (clock->ptp) ptp_schedule_worker(clock->ptp, 0); } -void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) +static void mlx5_destroy_clock_dev(struct mlx5_core_dev *mdev) { - struct mlx5_clock *clock = &mdev->clock; + struct mlx5_clock *clock = mdev->clock; - if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) - return; - - mlx5_eq_notifier_unregister(mdev, &clock->pps_nb); if (clock->ptp) { ptp_clock_unregister(clock->ptp); clock->ptp = NULL; } - cancel_work_sync(&clock->pps_info.out_work); - if (mdev->clock_info) { free_page((unsigned long)mdev->clock_info); mdev->clock_info = NULL; @@ -1199,3 +1316,248 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) kfree(clock->ptp_info.pin_config); } + +static void mlx5_clock_free(struct mlx5_core_dev *mdev) +{ + struct mlx5_clock_priv *cpriv = clock_priv(mdev->clock); + + mlx5_destroy_clock_dev(mdev); + mutex_destroy(&cpriv->lock); + kfree(cpriv); + mdev->clock = NULL; +} + +static int mlx5_clock_alloc(struct mlx5_core_dev *mdev, bool shared) +{ + struct mlx5_clock_priv *cpriv; + struct mlx5_clock *clock; + + cpriv = kzalloc(sizeof(*cpriv), GFP_KERNEL); + if (!cpriv) + return -ENOMEM; + + mutex_init(&cpriv->lock); + cpriv->mdev = mdev; + clock = &cpriv->clock; + clock->shared = shared; + mdev->clock = clock; + mlx5_clock_lock(clock); + mlx5_init_clock_dev(mdev); + mlx5_clock_unlock(clock); + + if (!clock->shared) + return 0; + + if (!clock->ptp) { + mlx5_core_warn(mdev, "failed to create ptp dev shared by multiple functions"); + mlx5_clock_free(mdev); + return -EINVAL; + } + + return 0; +} + +static void mlx5_shared_clock_register(struct mlx5_core_dev *mdev, u64 key) +{ + struct mlx5_core_dev *peer_dev, *next = NULL; + struct mlx5_devcom_comp_dev *pos; + + mdev->clock_state->compdev = mlx5_devcom_register_component(mdev->priv.devc, + MLX5_DEVCOM_SHARED_CLOCK, + key, NULL, mdev); + if (IS_ERR(mdev->clock_state->compdev)) + return; + + mlx5_devcom_comp_lock(mdev->clock_state->compdev); + mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) { + if (peer_dev->clock) { + next = peer_dev; + break; + } + } + + if (next) { + mdev->clock = next->clock; + /* clock info is shared among all the functions using the same clock */ + mdev->clock_info = next->clock_info; + } else { + mlx5_clock_alloc(mdev, true); + } + mlx5_devcom_comp_unlock(mdev->clock_state->compdev); + + if (!mdev->clock) { + mlx5_devcom_unregister_component(mdev->clock_state->compdev); + mdev->clock_state->compdev = NULL; + } +} + +static void mlx5_shared_clock_unregister(struct mlx5_core_dev *mdev) +{ + struct mlx5_core_dev *peer_dev, *next = NULL; + struct mlx5_clock *clock = mdev->clock; + struct mlx5_devcom_comp_dev *pos; + + mlx5_devcom_comp_lock(mdev->clock_state->compdev); + mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) { + if (peer_dev->clock && peer_dev != mdev) { + next = peer_dev; + break; + } + } + + if (next) { + struct mlx5_clock_priv *cpriv = clock_priv(clock); + + mlx5_clock_lock(clock); + if (mdev == cpriv->mdev) + cpriv->mdev = next; + mlx5_clock_unlock(clock); + } else { + mlx5_clock_free(mdev); + } + + mdev->clock = NULL; + mdev->clock_info = NULL; + mlx5_devcom_comp_unlock(mdev->clock_state->compdev); + + mlx5_devcom_unregister_component(mdev->clock_state->compdev); +} + +static void mlx5_clock_arm_pps_in_event(struct mlx5_clock *clock, + struct mlx5_core_dev *new_mdev, + struct mlx5_core_dev *old_mdev) +{ + struct ptp_clock_info *ptp_info = &clock->ptp_info; + struct mlx5_clock_priv *cpriv = clock_priv(clock); + int i; + + for (i = 0; i < ptp_info->n_pins; i++) { + if (ptp_info->pin_config[i].func != PTP_PF_EXTTS || + !clock->pps_info.pin_armed[i]) + continue; + + if (new_mdev) { + mlx5_set_mtppse(new_mdev, i, 0, MLX5_EVENT_MODE_REPETETIVE); + cpriv->event_mdev = new_mdev; + } else { + cpriv->event_mdev = NULL; + } + + if (old_mdev) + mlx5_set_mtppse(old_mdev, i, 0, MLX5_EVENT_MODE_DISABLE); + } +} + +void mlx5_clock_load(struct mlx5_core_dev *mdev) +{ + struct mlx5_clock *clock = mdev->clock; + struct mlx5_clock_priv *cpriv; + + if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) + return; + + INIT_WORK(&mdev->clock_state->out_work, mlx5_pps_out); + MLX5_NB_INIT(&mdev->clock_state->pps_nb, mlx5_pps_event, PPS_EVENT); + mlx5_eq_notifier_register(mdev, &mdev->clock_state->pps_nb); + + if (!clock->shared) { + mlx5_clock_arm_pps_in_event(clock, mdev, NULL); + return; + } + + cpriv = clock_priv(clock); + mlx5_devcom_comp_lock(mdev->clock_state->compdev); + mlx5_clock_lock(clock); + if (mdev == cpriv->mdev && mdev != cpriv->event_mdev) + mlx5_clock_arm_pps_in_event(clock, mdev, cpriv->event_mdev); + mlx5_clock_unlock(clock); + mlx5_devcom_comp_unlock(mdev->clock_state->compdev); +} + +void mlx5_clock_unload(struct mlx5_core_dev *mdev) +{ + struct mlx5_core_dev *peer_dev, *next = NULL; + struct mlx5_clock *clock = mdev->clock; + struct mlx5_devcom_comp_dev *pos; + + if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) + return; + + if (!clock->shared) { + mlx5_clock_arm_pps_in_event(clock, NULL, mdev); + goto out; + } + + mlx5_devcom_comp_lock(mdev->clock_state->compdev); + mlx5_devcom_for_each_peer_entry(mdev->clock_state->compdev, peer_dev, pos) { + if (peer_dev->clock && peer_dev != mdev) { + next = peer_dev; + break; + } + } + + mlx5_clock_lock(clock); + if (mdev == clock_priv(clock)->event_mdev) + mlx5_clock_arm_pps_in_event(clock, next, mdev); + mlx5_clock_unlock(clock); + mlx5_devcom_comp_unlock(mdev->clock_state->compdev); + +out: + mlx5_eq_notifier_unregister(mdev, &mdev->clock_state->pps_nb); + cancel_work_sync(&mdev->clock_state->out_work); +} + +static struct mlx5_clock null_clock; + +int mlx5_init_clock(struct mlx5_core_dev *mdev) +{ + u8 identity[MLX5_RT_CLOCK_IDENTITY_SIZE]; + struct mlx5_clock_dev_state *clock_state; + u64 key; + int err; + + if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) { + mdev->clock = &null_clock; + mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); + return 0; + } + + clock_state = kzalloc(sizeof(*clock_state), GFP_KERNEL); + if (!clock_state) + return -ENOMEM; + clock_state->mdev = mdev; + mdev->clock_state = clock_state; + + if (MLX5_CAP_MCAM_REG3(mdev, mrtcq) && mlx5_real_time_mode(mdev)) { + if (mlx5_clock_identity_get(mdev, identity)) { + mlx5_core_warn(mdev, "failed to get rt clock identity, create ptp dev per function\n"); + } else { + memcpy(&key, &identity, sizeof(key)); + mlx5_shared_clock_register(mdev, key); + } + } + + if (!mdev->clock) { + err = mlx5_clock_alloc(mdev, false); + if (err) { + kfree(clock_state); + mdev->clock_state = NULL; + return err; + } + } + + return 0; +} + +void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) +{ + if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) + return; + + if (mdev->clock->shared) + mlx5_shared_clock_unregister(mdev); + else + mlx5_clock_free(mdev); + kfree(mdev->clock_state); + mdev->clock_state = NULL; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index bd95b9f8d1439..c18a652c0faa1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -33,6 +33,35 @@ #ifndef __LIB_CLOCK_H__ #define __LIB_CLOCK_H__ +#include + +#define MAX_PIN_NUM 8 +struct mlx5_pps { + u8 pin_caps[MAX_PIN_NUM]; + u64 start[MAX_PIN_NUM]; + u8 enabled; + u64 min_npps_period; + u64 min_out_pulse_duration_ns; + bool pin_armed[MAX_PIN_NUM]; +}; + +struct mlx5_timer { + struct cyclecounter cycles; + struct timecounter tc; + u32 nominal_c_mult; + unsigned long overflow_period; +}; + +struct mlx5_clock { + seqlock_t lock; + struct hwtstamp_config hwtstamp_config; + struct ptp_clock *ptp; + struct ptp_clock_info ptp_info; + struct mlx5_pps pps_info; + struct mlx5_timer timer; + bool shared; +}; + static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev) { u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format); @@ -54,12 +83,14 @@ static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev) typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64); #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) -void mlx5_init_clock(struct mlx5_core_dev *mdev); +int mlx5_init_clock(struct mlx5_core_dev *mdev); void mlx5_cleanup_clock(struct mlx5_core_dev *mdev); +void mlx5_clock_load(struct mlx5_core_dev *mdev); +void mlx5_clock_unload(struct mlx5_core_dev *mdev); static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) { - return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1; + return mdev->clock->ptp ? ptp_clock_index(mdev->clock->ptp) : -1; } static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, @@ -87,8 +118,10 @@ static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock, return ns_to_ktime(time); } #else -static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {} +static inline int mlx5_init_clock(struct mlx5_core_dev *mdev) { return 0; } static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {} +static inline void mlx5_clock_load(struct mlx5_core_dev *mdev) {} +static inline void mlx5_clock_unload(struct mlx5_core_dev *mdev) {} static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) { return -1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h index d58032dd0df74..c79699b94a025 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -11,6 +11,7 @@ enum mlx5_devcom_component { MLX5_DEVCOM_MPV, MLX5_DEVCOM_HCA_PORTS, MLX5_DEVCOM_SD_GROUP, + MLX5_DEVCOM_SHARED_CLOCK, MLX5_DEVCOM_NUM_COMPONENTS, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index a80ecb672f33d..0a3c260af3772 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -161,7 +161,8 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains, ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); - sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE; + sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? + FT_TBL_SZ : MLX5_FS_MAX_POOL_SIZE; ft_attr.max_fte = sz; /* We use chains_default_ft(chains) as the table's next_ft till @@ -196,6 +197,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains, ns = mlx5_get_flow_namespace(chains->dev, chains->ns); } + if (!ns) { + mlx5_core_warn(chains->dev, "Failed to get flow namespace\n"); + return ERR_PTR(-EOPNOTSUPP); + } + ft_attr.autogroup.num_reserved_entries = 2; ft_attr.autogroup.max_num_groups = chains->group_num; ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); @@ -699,7 +705,7 @@ mlx5_chains_create_global_table(struct mlx5_fs_chains *chains) goto err_ignore; } - chain = mlx5_chains_get_chain_range(chains), + chain = mlx5_chains_get_chain_range(chains); prio = mlx5_chains_get_prio_range(chains); level = mlx5_chains_get_level_range(chains); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c index 9f13cea164465..eb3bd9c7f66eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c @@ -61,6 +61,25 @@ static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc) } } +static const char *mlx5_traffic_types_names[MLX5_NUM_TT] = { + [MLX5_TT_IPV4_TCP] = "TT_IPV4_TCP", + [MLX5_TT_IPV6_TCP] = "TT_IPV6_TCP", + [MLX5_TT_IPV4_UDP] = "TT_IPV4_UDP", + [MLX5_TT_IPV6_UDP] = "TT_IPV6_UDP", + [MLX5_TT_IPV4_IPSEC_AH] = "TT_IPV4_IPSEC_AH", + [MLX5_TT_IPV6_IPSEC_AH] = "TT_IPV6_IPSEC_AH", + [MLX5_TT_IPV4_IPSEC_ESP] = "TT_IPV4_IPSEC_ESP", + [MLX5_TT_IPV6_IPSEC_ESP] = "TT_IPV6_IPSEC_ESP", + [MLX5_TT_IPV4] = "TT_IPV4", + [MLX5_TT_IPV6] = "TT_IPV6", + [MLX5_TT_ANY] = "TT_ANY" +}; + +const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt) +{ + return mlx5_traffic_types_names[tt]; +} + struct mlx5_etype_proto { u16 etype; u8 proto; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h index 92eea6bea310b..ab9434fe3ae66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h @@ -49,6 +49,7 @@ struct ttc_params { struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT]; }; +const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt); struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc); struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index ec956c4bcebdb..41e8660c819c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1038,7 +1038,11 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) mlx5_init_reserved_gids(dev); - mlx5_init_clock(dev); + err = mlx5_init_clock(dev); + if (err) { + mlx5_core_err(dev, "failed to initialize hardware clock\n"); + goto err_tables_cleanup; + } dev->vxlan = mlx5_vxlan_create(dev); dev->geneve = mlx5_geneve_create(dev); @@ -1046,7 +1050,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) err = mlx5_init_rl_table(dev); if (err) { mlx5_core_err(dev, "Failed to init rate limiting\n"); - goto err_tables_cleanup; + goto err_clock_cleanup; } err = mlx5_mpfs_init(dev); @@ -1123,10 +1127,11 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) mlx5_mpfs_cleanup(dev); err_rl_cleanup: mlx5_cleanup_rl_table(dev); -err_tables_cleanup: +err_clock_cleanup: mlx5_geneve_destroy(dev->geneve); mlx5_vxlan_destroy(dev->vxlan); mlx5_cleanup_clock(dev); +err_tables_cleanup: mlx5_cleanup_reserved_gids(dev); mlx5_cq_debugfs_cleanup(dev); mlx5_fw_reset_cleanup(dev); @@ -1205,24 +1210,24 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); - mlx5_start_health_poll(dev); - err = mlx5_core_enable_hca(dev, 0); if (err) { mlx5_core_err(dev, "enable hca failed\n"); - goto stop_health_poll; + goto err_cmd_cleanup; } + mlx5_start_health_poll(dev); + err = mlx5_core_set_issi(dev); if (err) { mlx5_core_err(dev, "failed to set issi\n"); - goto err_disable_hca; + goto stop_health_poll; } err = mlx5_satisfy_startup_pages(dev, 1); if (err) { mlx5_core_err(dev, "failed to allocate boot pages\n"); - goto err_disable_hca; + goto stop_health_poll; } err = mlx5_tout_query_dtor(dev); @@ -1235,10 +1240,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); -err_disable_hca: - mlx5_core_disable_hca(dev, 0); stop_health_poll: mlx5_stop_health_poll(dev, boot); + mlx5_core_disable_hca(dev, 0); err_cmd_cleanup: mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_disable(dev); @@ -1249,8 +1253,8 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot) { mlx5_reclaim_startup_pages(dev); - mlx5_core_disable_hca(dev, 0); mlx5_stop_health_poll(dev, boot); + mlx5_core_disable_hca(dev, 0); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); mlx5_cmd_disable(dev); } @@ -1359,6 +1363,8 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_eq_table; } + mlx5_clock_load(dev); + err = mlx5_fw_tracer_init(dev->tracer); if (err) { mlx5_core_err(dev, "Failed to init FW tracer %d\n", err); @@ -1442,6 +1448,7 @@ static int mlx5_load(struct mlx5_core_dev *dev) mlx5_hv_vhca_cleanup(dev->hv_vhca); mlx5_fw_reset_events_stop(dev); mlx5_fw_tracer_cleanup(dev->tracer); + mlx5_clock_unload(dev); mlx5_eq_table_destroy(dev); err_eq_table: mlx5_irq_table_destroy(dev); @@ -1468,6 +1475,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev) mlx5_hv_vhca_cleanup(dev->hv_vhca); mlx5_fw_reset_events_stop(dev); mlx5_fw_tracer_cleanup(dev->tracer); + mlx5_clock_unload(dev); mlx5_eq_table_destroy(dev); mlx5_irq_table_destroy(dev); mlx5_pagealloc_stop(dev); @@ -1795,6 +1803,7 @@ static const int types[] = { MLX5_CAP_ADV_VIRTUALIZATION, MLX5_CAP_CRYPTO, MLX5_CAP_SHAMPO, + MLX5_CAP_ADV_RDMA, }; static void mlx5_hca_caps_free(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 99de67c3aa743..2e02bdea8361d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -114,6 +114,26 @@ struct mlx5_cmd_alias_obj_create_attr { u8 access_key[ACCESS_KEY_LEN]; }; +struct mlx5_port_eth_proto { + u32 cap; + u32 admin; + u32 oper; +}; + +struct mlx5_module_eeprom_query_params { + u16 size; + u16 offset; + u16 i2c_address; + u32 page; + u32 bank; + u32 module_number; +}; + +struct mlx5_link_info { + u32 speed; + u32 lanes; +}; + static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...) { struct device *device = dev->device; @@ -280,6 +300,78 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev); void mlx5_dm_cleanup(struct mlx5_core_dev *dev); +void mlx5_toggle_port_link(struct mlx5_core_dev *dev); +int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status); +int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status *status); +int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); + +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); +int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); +int mlx5_query_port_pause(struct mlx5_core_dev *dev, + u32 *rx_pause, u32 *tx_pause); + +int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); +int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, + u8 *pfc_en_rx); + +int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, + u16 stall_critical_watermark, + u16 stall_minor_watermark); +int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev, + u16 *stall_critical_watermark, + u16 *stall_minor_watermark); + +int mlx5_max_tc(struct mlx5_core_dev *mdev); +int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); +int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, + u8 prio, u8 *tc); +int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); +int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, + u8 tc, u8 *tc_group); +int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); +int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, + u8 tc, u8 *bw_pct); +int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_unit); +int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_unit); +int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); +int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); + +int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen); +int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen); +int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); +void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, + bool *enabled); +int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, + u16 offset, u16 size, u8 *data); +int +mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, + struct mlx5_module_eeprom_query_params *params, + u8 *data); + +int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); +int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); +int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); +int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); +int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio); +int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); + +int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, + struct mlx5_port_eth_proto *eproto); +bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev); +const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev, + u32 eth_proto_oper, + bool force_legacy); +u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev, + struct mlx5_link_info *info, + bool force_legacy); +int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); + #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ MLX5_CAP_GEN((mdev), pps_modify) && \ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \ @@ -346,6 +438,8 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap #define mlx5_vport_get_other_func_general_cap(dev, vport, out) \ mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL) +int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id); + static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index 0881e961d8b17..586688da9940e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -10,12 +10,15 @@ struct mlx5_irq; struct cpu_rmap; +struct mlx5_irq_pool; int mlx5_irq_table_init(struct mlx5_core_dev *dev); void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev); int mlx5_irq_table_create(struct mlx5_core_dev *dev); void mlx5_irq_table_destroy(struct mlx5_core_dev *dev); void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev); +struct mlx5_irq_pool * +mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev); int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table); int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table); struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev); @@ -38,7 +41,6 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq); int mlx5_irq_get_index(struct mlx5_irq *irq); int mlx5_irq_get_irq(const struct mlx5_irq *irq); -struct mlx5_irq_pool; #ifdef CONFIG_MLX5_SF struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev, struct cpumask *used_cpus, u16 vecidx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 7db9cab9bedf6..2c5f850c31f68 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -378,6 +378,11 @@ int mlx5_irq_get_index(struct mlx5_irq *irq) return irq->map.index; } +struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq) +{ + return irq->pool; +} + /* irq_pool API */ /* requesting an irq from a given pool according to given index */ @@ -405,18 +410,20 @@ static struct mlx5_irq_pool *sf_ctrl_irq_pool_get(struct mlx5_irq_table *irq_tab return irq_table->sf_ctrl_pool; } -static struct mlx5_irq_pool *sf_irq_pool_get(struct mlx5_irq_table *irq_table) +static struct mlx5_irq_pool * +sf_comp_irq_pool_get(struct mlx5_irq_table *irq_table) { return irq_table->sf_comp_pool; } -struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev) +struct mlx5_irq_pool * +mlx5_irq_table_get_comp_irq_pool(struct mlx5_core_dev *dev) { struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev); struct mlx5_irq_pool *pool = NULL; if (mlx5_core_is_sf(dev)) - pool = sf_irq_pool_get(irq_table); + pool = sf_comp_irq_pool_get(irq_table); /* In some configs, there won't be a pool of SFs IRQs. Hence, returning * the PF IRQs pool in case the SF pool doesn't exist. @@ -572,7 +579,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name, pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ; pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ; mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d", - name, size, start); + name ? name : "mlx5_pcif_pool", size, start); return pool; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h index c4d377f8df308..cc064425fe160 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h @@ -28,7 +28,6 @@ struct mlx5_irq_pool { struct mlx5_core_dev *dev; }; -struct mlx5_irq_pool *mlx5_irq_pool_get(struct mlx5_core_dev *dev); static inline bool mlx5_irq_pool_is_sf_pool(struct mlx5_irq_pool *pool) { return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf")); @@ -40,5 +39,6 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, int mlx5_irq_get_locked(struct mlx5_irq *irq); int mlx5_irq_read_locked(struct mlx5_irq *irq); int mlx5_irq_put(struct mlx5_irq *irq); +struct mlx5_irq_pool *mlx5_irq_get_pool(struct mlx5_irq *irq); #endif /* __PCI_IRQ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 50931584132b9..549f1066d2a50 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -196,7 +196,6 @@ void mlx5_toggle_port_link(struct mlx5_core_dev *dev) if (ps == MLX5_PORT_UP) mlx5_set_port_admin_status(dev, MLX5_PORT_UP); } -EXPORT_SYMBOL_GPL(mlx5_toggle_port_link); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status) @@ -210,7 +209,6 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PAOS, 0, 1); } -EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status); int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status) @@ -227,7 +225,6 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, *status = MLX5_GET(paos_reg, out, admin_status); return 0; } -EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, u16 *max_mtu, u16 *oper_mtu, u8 port) @@ -257,7 +254,6 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port) return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PMTU, 0, 1); } -EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port) @@ -447,7 +443,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, return mlx5_query_mcia(dev, &query, data); } -EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom); int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, struct mlx5_module_eeprom_query_params *params, @@ -467,7 +462,6 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, return mlx5_query_mcia(dev, params, data); } -EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page); static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, int pvlc_size, u8 local_port) @@ -518,7 +512,6 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause) return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PFCC, 0, 1); } -EXPORT_SYMBOL_GPL(mlx5_set_port_pause); int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 *rx_pause, u32 *tx_pause) @@ -538,7 +531,6 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev, return 0; } -EXPORT_SYMBOL_GPL(mlx5_query_port_pause); int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, u16 stall_critical_watermark, @@ -597,7 +589,6 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx) return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PFCC, 0, 1); } -EXPORT_SYMBOL_GPL(mlx5_set_port_pfc); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) { @@ -616,7 +607,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) return 0; } -EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); int mlx5_max_tc(struct mlx5_core_dev *mdev) { @@ -667,7 +657,6 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc) return 0; } -EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc); int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, u8 prio, u8 *tc) @@ -689,7 +678,6 @@ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, return err; } -EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc); static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, int inlen) @@ -728,7 +716,6 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group) return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); } -EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, u8 tc, u8 *tc_group) @@ -749,7 +736,6 @@ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, return 0; } -EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) { @@ -763,7 +749,6 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); } -EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc); int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 tc, u8 *bw_pct) @@ -784,7 +769,6 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, return 0; } -EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc); int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, @@ -808,7 +792,6 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); } -EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit); int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, @@ -834,7 +817,6 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, return 0; } -EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit); int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode) { @@ -845,7 +827,6 @@ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode) MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode); return mlx5_cmd_exec_in(mdev, set_wol_rol, in); } -EXPORT_SYMBOL_GPL(mlx5_set_port_wol); int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) { @@ -860,7 +841,6 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) return err; } -EXPORT_SYMBOL_GPL(mlx5_query_port_wol); int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen) { @@ -1058,53 +1038,57 @@ int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio) } /* speed in units of 1Mb */ -static const u32 mlx5e_link_speed[MLX5E_LINK_MODES_NUMBER] = { - [MLX5E_1000BASE_CX_SGMII] = 1000, - [MLX5E_1000BASE_KX] = 1000, - [MLX5E_10GBASE_CX4] = 10000, - [MLX5E_10GBASE_KX4] = 10000, - [MLX5E_10GBASE_KR] = 10000, - [MLX5E_20GBASE_KR2] = 20000, - [MLX5E_40GBASE_CR4] = 40000, - [MLX5E_40GBASE_KR4] = 40000, - [MLX5E_56GBASE_R4] = 56000, - [MLX5E_10GBASE_CR] = 10000, - [MLX5E_10GBASE_SR] = 10000, - [MLX5E_10GBASE_ER] = 10000, - [MLX5E_40GBASE_SR4] = 40000, - [MLX5E_40GBASE_LR4] = 40000, - [MLX5E_50GBASE_SR2] = 50000, - [MLX5E_100GBASE_CR4] = 100000, - [MLX5E_100GBASE_SR4] = 100000, - [MLX5E_100GBASE_KR4] = 100000, - [MLX5E_100GBASE_LR4] = 100000, - [MLX5E_100BASE_TX] = 100, - [MLX5E_1000BASE_T] = 1000, - [MLX5E_10GBASE_T] = 10000, - [MLX5E_25GBASE_CR] = 25000, - [MLX5E_25GBASE_KR] = 25000, - [MLX5E_25GBASE_SR] = 25000, - [MLX5E_50GBASE_CR2] = 50000, - [MLX5E_50GBASE_KR2] = 50000, +static const struct mlx5_link_info mlx5e_link_info[MLX5E_LINK_MODES_NUMBER] = { + [MLX5E_1000BASE_CX_SGMII] = {.speed = 1000, .lanes = 1}, + [MLX5E_1000BASE_KX] = {.speed = 1000, .lanes = 1}, + [MLX5E_10GBASE_CX4] = {.speed = 10000, .lanes = 4}, + [MLX5E_10GBASE_KX4] = {.speed = 10000, .lanes = 4}, + [MLX5E_10GBASE_KR] = {.speed = 10000, .lanes = 1}, + [MLX5E_20GBASE_KR2] = {.speed = 20000, .lanes = 2}, + [MLX5E_40GBASE_CR4] = {.speed = 40000, .lanes = 4}, + [MLX5E_40GBASE_KR4] = {.speed = 40000, .lanes = 4}, + [MLX5E_56GBASE_R4] = {.speed = 56000, .lanes = 4}, + [MLX5E_10GBASE_CR] = {.speed = 10000, .lanes = 1}, + [MLX5E_10GBASE_SR] = {.speed = 10000, .lanes = 1}, + [MLX5E_10GBASE_ER] = {.speed = 10000, .lanes = 1}, + [MLX5E_40GBASE_SR4] = {.speed = 40000, .lanes = 4}, + [MLX5E_40GBASE_LR4] = {.speed = 40000, .lanes = 4}, + [MLX5E_50GBASE_SR2] = {.speed = 50000, .lanes = 2}, + [MLX5E_100GBASE_CR4] = {.speed = 100000, .lanes = 4}, + [MLX5E_100GBASE_SR4] = {.speed = 100000, .lanes = 4}, + [MLX5E_100GBASE_KR4] = {.speed = 100000, .lanes = 4}, + [MLX5E_100GBASE_LR4] = {.speed = 100000, .lanes = 4}, + [MLX5E_100BASE_TX] = {.speed = 100, .lanes = 1}, + [MLX5E_1000BASE_T] = {.speed = 1000, .lanes = 1}, + [MLX5E_10GBASE_T] = {.speed = 10000, .lanes = 1}, + [MLX5E_25GBASE_CR] = {.speed = 25000, .lanes = 1}, + [MLX5E_25GBASE_KR] = {.speed = 25000, .lanes = 1}, + [MLX5E_25GBASE_SR] = {.speed = 25000, .lanes = 1}, + [MLX5E_50GBASE_CR2] = {.speed = 50000, .lanes = 2}, + [MLX5E_50GBASE_KR2] = {.speed = 50000, .lanes = 2}, }; -static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { - [MLX5E_SGMII_100M] = 100, - [MLX5E_1000BASE_X_SGMII] = 1000, - [MLX5E_5GBASE_R] = 5000, - [MLX5E_10GBASE_XFI_XAUI_1] = 10000, - [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = 40000, - [MLX5E_25GAUI_1_25GBASE_CR_KR] = 25000, - [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = 50000, - [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = 50000, - [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000, - [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000, - [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, - [MLX5E_400GAUI_8_400GBASE_CR8] = 400000, - [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000, - [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000, - [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000, - [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = 800000, +static const struct mlx5_link_info +mlx5e_ext_link_info[MLX5E_EXT_LINK_MODES_NUMBER] = { + [MLX5E_SGMII_100M] = {.speed = 100, .lanes = 1}, + [MLX5E_1000BASE_X_SGMII] = {.speed = 1000, .lanes = 1}, + [MLX5E_5GBASE_R] = {.speed = 5000, .lanes = 1}, + [MLX5E_10GBASE_XFI_XAUI_1] = {.speed = 10000, .lanes = 1}, + [MLX5E_40GBASE_XLAUI_4_XLPPI_4] = {.speed = 40000, .lanes = 4}, + [MLX5E_25GAUI_1_25GBASE_CR_KR] = {.speed = 25000, .lanes = 1}, + [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2] = {.speed = 50000, .lanes = 2}, + [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR] = {.speed = 50000, .lanes = 1}, + [MLX5E_CAUI_4_100GBASE_CR4_KR4] = {.speed = 100000, .lanes = 4}, + [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = {.speed = 100000, .lanes = 2}, + [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = {.speed = 200000, .lanes = 4}, + [MLX5E_400GAUI_8_400GBASE_CR8] = {.speed = 400000, .lanes = 8}, + [MLX5E_100GAUI_1_100GBASE_CR_KR] = {.speed = 100000, .lanes = 1}, + [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = {.speed = 200000, .lanes = 2}, + [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = {.speed = 400000, .lanes = 4}, + [MLX5E_800GAUI_8_800GBASE_CR8_KR8] = {.speed = 800000, .lanes = 8}, + [MLX5E_200GAUI_1_200GBASE_CR1_KR1] = {.speed = 200000, .lanes = 1}, + [MLX5E_400GAUI_2_400GBASE_CR2_KR2] = {.speed = 400000, .lanes = 2}, + [MLX5E_800GAUI_4_800GBASE_CR4_KR4] = {.speed = 800000, .lanes = 4}, }; int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, @@ -1142,54 +1126,61 @@ bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev) return !!eproto.cap; } -static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, - const u32 **arr, u32 *size, - bool force_legacy) +static void mlx5e_port_get_link_mode_info_arr(struct mlx5_core_dev *mdev, + const struct mlx5_link_info **arr, + u32 *size, + bool force_legacy) { bool ext = force_legacy ? false : mlx5_ptys_ext_supported(mdev); - *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) : - ARRAY_SIZE(mlx5e_link_speed); - *arr = ext ? mlx5e_ext_link_speed : mlx5e_link_speed; + *size = ext ? ARRAY_SIZE(mlx5e_ext_link_info) : + ARRAY_SIZE(mlx5e_link_info); + *arr = ext ? mlx5e_ext_link_info : mlx5e_link_info; } -u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper, - bool force_legacy) +const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev, + u32 eth_proto_oper, + bool force_legacy) { unsigned long temp = eth_proto_oper; - const u32 *table; - u32 speed = 0; + const struct mlx5_link_info *table; u32 max_size; int i; - mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy); + mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, + force_legacy); i = find_first_bit(&temp, max_size); if (i < max_size) - speed = table[i]; - return speed; + return &table[i]; + + return NULL; } -u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, - bool force_legacy) +u32 mlx5_port_info2linkmodes(struct mlx5_core_dev *mdev, + struct mlx5_link_info *info, + bool force_legacy) { + const struct mlx5_link_info *table; u32 link_modes = 0; - const u32 *table; u32 max_size; int i; - mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy); + mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, + force_legacy); for (i = 0; i < max_size; ++i) { - if (table[i] == speed) - link_modes |= MLX5E_PROT_MASK(i); + if (table[i].speed == info->speed) { + if (!info->lanes || table[i].lanes == info->lanes) + link_modes |= MLX5E_PROT_MASK(i); + } } return link_modes; } int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) { + const struct mlx5_link_info *table; struct mlx5_port_eth_proto eproto; u32 max_speed = 0; - const u32 *table; u32 max_size; bool ext; int err; @@ -1200,10 +1191,10 @@ int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) if (err) return err; - mlx5e_port_get_speed_arr(mdev, &table, &max_size, false); + mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size, false); for (i = 0; i < max_size; ++i) if (eproto.cap & MLX5E_PROT_MASK(i)) - max_speed = max(max_speed, table[i]); + max_speed = max(max_speed, table[i].speed); *speed = max_speed; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index b96909fbeb12d..0864ba625c07d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -285,7 +285,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_ NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported"); return -EOPNOTSUPP; } - if (new_attr->pfnum != mlx5_get_dev_index(dev)) { + if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) { NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c index 3dbd4efa21a2a..19dce1ba512d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c @@ -220,7 +220,7 @@ static int hws_bwc_queue_poll(struct mlx5hws_context *ctx, bool drain) { unsigned long timeout = jiffies + - msecs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT * MSEC_PER_SEC); + secs_to_jiffies(MLX5HWS_BWC_POLLING_TIMEOUT); struct mlx5hws_flow_op_result comp[MLX5HWS_BWC_MATCHER_REHASH_BURST_TH]; u16 burst_th = hws_bwc_get_burst_th(ctx, queue_id); bool got_comp = *pending_rules >= burst_th; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h index f9f569131ddeb..47f7ed1415535 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h @@ -24,8 +24,8 @@ struct mlx5hws_bwc_matcher { struct mlx5hws_matcher *matcher; struct mlx5hws_match_template *mt; struct mlx5hws_action_template *at[MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM]; + u32 priority; u8 num_of_at; - u16 priority; u8 size_log; atomic_t num_of_rules; struct list_head *rules; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c index 487e75476b0ab..e8f98c109b999 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c @@ -130,12 +130,6 @@ int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev, return mlx5_cmd_exec_in(mdev, destroy_flow_table, in); } -void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev, - u32 table_id) -{ - hws_cmd_general_obj_destroy(mdev, MLX5_OBJ_TYPE_FT_ALIAS, table_id); -} - static int hws_cmd_flow_group_create(struct mlx5_core_dev *mdev, struct mlx5hws_cmd_fg_attr *fg_attr, u32 *group_id) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h index 610c63d81ad95..51d9e0291ac16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h @@ -258,9 +258,6 @@ int mlx5hws_cmd_flow_table_query(struct mlx5_core_dev *mdev, int mlx5hws_cmd_flow_table_destroy(struct mlx5_core_dev *mdev, u8 fw_ft_type, u32 table_id); -void mlx5hws_cmd_alias_flow_table_destroy(struct mlx5_core_dev *mdev, - u32 table_id); - int mlx5hws_cmd_rtc_create(struct mlx5_core_dev *mdev, struct mlx5hws_cmd_rtc_create_attr *rtc_attr, u32 *rtc_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c index 10ece7df1cfaf..c8cc0c8115f53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c @@ -500,7 +500,8 @@ hws_definer_check_match_flags(struct mlx5hws_definer_conv_data *cd) return 0; err_conflict: - mlx5hws_err(cd->ctx, "Invalid definer fields combination\n"); + mlx5hws_err(cd->ctx, "Invalid definer fields combination: match_flags = 0x%08x\n", + cd->match_flags); return -EINVAL; } @@ -1985,8 +1986,7 @@ int mlx5hws_definer_get_obj(struct mlx5hws_context *ctx, continue; /* Reuse definer and set LRU (move to be first in the list) */ - list_del_init(&cached_definer->list_node); - list_add(&cached_definer->list_node, &cache->list_head); + list_move(&cached_definer->list_node, &cache->list_head); cached_definer->refcount++; return cached_definer->definer.obj_id; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c index f34bbbbba1c2a..1b787cd66e6fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c @@ -66,6 +66,8 @@ static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev, xa_init(&hws_pool->table_dests); xa_init(&hws_pool->vport_dests); xa_init(&hws_pool->vport_vhca_dests); + xa_init(&hws_pool->aso_meters); + xa_init(&hws_pool->sample_dests); return 0; cleanup_insert_hdr: @@ -88,10 +90,17 @@ static int mlx5_fs_init_hws_actions_pool(struct mlx5_core_dev *dev, static void mlx5_fs_cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx) { struct mlx5_fs_hws_actions_pool *hws_pool = &fs_ctx->hws_pool; + struct mlx5_fs_hws_data *fs_hws_data; struct mlx5hws_action *action; struct mlx5_fs_pool *pool; unsigned long i; + xa_for_each(&hws_pool->sample_dests, i, fs_hws_data) + kfree(fs_hws_data); + xa_destroy(&hws_pool->sample_dests); + xa_for_each(&hws_pool->aso_meters, i, fs_hws_data) + kfree(fs_hws_data); + xa_destroy(&hws_pool->aso_meters); xa_for_each(&hws_pool->vport_vhca_dests, i, action) mlx5hws_action_destroy(action); xa_destroy(&hws_pool->vport_vhca_dests); @@ -459,6 +468,106 @@ mlx5_fs_create_dest_action_range(struct mlx5hws_context *ctx, flags); } +static struct mlx5_fs_hws_data * +mlx5_fs_get_cached_hws_data(struct xarray *cache_xa, unsigned long index) +{ + struct mlx5_fs_hws_data *fs_hws_data; + int err; + + xa_lock(cache_xa); + fs_hws_data = xa_load(cache_xa, index); + if (!fs_hws_data) { + fs_hws_data = kzalloc(sizeof(*fs_hws_data), GFP_ATOMIC); + if (!fs_hws_data) { + xa_unlock(cache_xa); + return NULL; + } + refcount_set(&fs_hws_data->hws_action_refcount, 0); + mutex_init(&fs_hws_data->lock); + err = __xa_insert(cache_xa, index, fs_hws_data, GFP_ATOMIC); + if (err) { + kfree(fs_hws_data); + xa_unlock(cache_xa); + return NULL; + } + } + xa_unlock(cache_xa); + + return fs_hws_data; +} + +static struct mlx5hws_action * +mlx5_fs_get_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx, + struct mlx5_exe_aso *exe_aso) +{ + struct mlx5_fs_hws_create_action_ctx create_ctx; + struct mlx5hws_context *ctx = fs_ctx->hws_ctx; + struct mlx5_fs_hws_data *meter_hws_data; + u32 id = exe_aso->base_id; + struct xarray *meters_xa; + + meters_xa = &fs_ctx->hws_pool.aso_meters; + meter_hws_data = mlx5_fs_get_cached_hws_data(meters_xa, id); + if (!meter_hws_data) + return NULL; + + create_ctx.hws_ctx = ctx; + create_ctx.actions_type = MLX5HWS_ACTION_TYP_ASO_METER; + create_ctx.id = id; + create_ctx.return_reg_id = exe_aso->return_reg_id; + + return mlx5_fs_get_hws_action(meter_hws_data, &create_ctx); +} + +static void mlx5_fs_put_action_aso_meter(struct mlx5_fs_hws_context *fs_ctx, + struct mlx5_exe_aso *exe_aso) +{ + struct mlx5_fs_hws_data *meter_hws_data; + struct xarray *meters_xa; + + meters_xa = &fs_ctx->hws_pool.aso_meters; + meter_hws_data = xa_load(meters_xa, exe_aso->base_id); + if (!meter_hws_data) + return; + return mlx5_fs_put_hws_action(meter_hws_data); +} + +static struct mlx5hws_action * +mlx5_fs_get_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx, + struct mlx5_flow_rule *dst) +{ + struct mlx5_fs_hws_create_action_ctx create_ctx; + struct mlx5hws_context *ctx = fs_ctx->hws_ctx; + struct mlx5_fs_hws_data *sampler_hws_data; + u32 id = dst->dest_attr.sampler_id; + struct xarray *sampler_xa; + + sampler_xa = &fs_ctx->hws_pool.sample_dests; + sampler_hws_data = mlx5_fs_get_cached_hws_data(sampler_xa, id); + if (!sampler_hws_data) + return NULL; + + create_ctx.hws_ctx = ctx; + create_ctx.actions_type = MLX5HWS_ACTION_TYP_SAMPLER; + create_ctx.id = id; + + return mlx5_fs_get_hws_action(sampler_hws_data, &create_ctx); +} + +static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx, + u32 sampler_id) +{ + struct mlx5_fs_hws_data *sampler_hws_data; + struct xarray *sampler_xa; + + sampler_xa = &fs_ctx->hws_pool.sample_dests; + sampler_hws_data = xa_load(sampler_xa, sampler_id); + if (!sampler_hws_data) + return; + + mlx5_fs_put_hws_action(sampler_hws_data); +} + static struct mlx5hws_action * mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx, struct mlx5hws_action_dest_attr *dests, @@ -519,26 +628,101 @@ mlx5_fs_create_action_last(struct mlx5hws_context *ctx) return mlx5hws_action_create_last(ctx, flags); } -static void mlx5_fs_destroy_fs_action(struct mlx5_fs_hws_rule_action *fs_action) +static struct mlx5hws_action * +mlx5_fs_create_hws_action(struct mlx5_fs_hws_create_action_ctx *create_ctx) +{ + u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + + switch (create_ctx->actions_type) { + case MLX5HWS_ACTION_TYP_CTR: + return mlx5hws_action_create_counter(create_ctx->hws_ctx, + create_ctx->id, flags); + case MLX5HWS_ACTION_TYP_ASO_METER: + return mlx5hws_action_create_aso_meter(create_ctx->hws_ctx, + create_ctx->id, + create_ctx->return_reg_id, + flags); + case MLX5HWS_ACTION_TYP_SAMPLER: + return mlx5hws_action_create_flow_sampler(create_ctx->hws_ctx, + create_ctx->id, flags); + default: + return NULL; + } +} + +struct mlx5hws_action * +mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data, + struct mlx5_fs_hws_create_action_ctx *create_ctx) +{ + /* try avoid locking if not necessary */ + if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) + return fs_hws_data->hws_action; + + mutex_lock(&fs_hws_data->lock); + if (refcount_inc_not_zero(&fs_hws_data->hws_action_refcount)) { + mutex_unlock(&fs_hws_data->lock); + return fs_hws_data->hws_action; + } + fs_hws_data->hws_action = mlx5_fs_create_hws_action(create_ctx); + if (!fs_hws_data->hws_action) { + mutex_unlock(&fs_hws_data->lock); + return NULL; + } + refcount_set(&fs_hws_data->hws_action_refcount, 1); + mutex_unlock(&fs_hws_data->lock); + + return fs_hws_data->hws_action; +} + +void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data) +{ + if (!fs_hws_data) + return; + + /* try avoid locking if not necessary */ + if (refcount_dec_not_one(&fs_hws_data->hws_action_refcount)) + return; + + mutex_lock(&fs_hws_data->lock); + if (!refcount_dec_and_test(&fs_hws_data->hws_action_refcount)) { + mutex_unlock(&fs_hws_data->lock); + return; + } + mlx5hws_action_destroy(fs_hws_data->hws_action); + fs_hws_data->hws_action = NULL; + mutex_unlock(&fs_hws_data->lock); +} + +static void mlx5_fs_destroy_fs_action(struct mlx5_flow_root_namespace *ns, + struct mlx5_fs_hws_rule_action *fs_action) { + struct mlx5_fs_hws_context *fs_ctx = &ns->fs_hws_context; + switch (mlx5hws_action_get_type(fs_action->action)) { case MLX5HWS_ACTION_TYP_CTR: mlx5_fc_put_hws_action(fs_action->counter); break; + case MLX5HWS_ACTION_TYP_ASO_METER: + mlx5_fs_put_action_aso_meter(fs_ctx, fs_action->exe_aso); + break; + case MLX5HWS_ACTION_TYP_SAMPLER: + mlx5_fs_put_dest_action_sampler(fs_ctx, fs_action->sampler_id); + break; default: mlx5hws_action_destroy(fs_action->action); } } static void -mlx5_fs_destroy_fs_actions(struct mlx5_fs_hws_rule_action **fs_actions, +mlx5_fs_destroy_fs_actions(struct mlx5_flow_root_namespace *ns, + struct mlx5_fs_hws_rule_action **fs_actions, int *num_fs_actions) { int i; /* Free in reverse order to handle action dependencies */ for (i = *num_fs_actions - 1; i >= 0; i--) - mlx5_fs_destroy_fs_action(*fs_actions + i); + mlx5_fs_destroy_fs_action(ns, *fs_actions + i); *num_fs_actions = 0; kfree(*fs_actions); *fs_actions = NULL; @@ -735,8 +919,25 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns, } if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { - err = -EOPNOTSUPP; - goto free_actions; + if (fte_action->exe_aso.type != MLX5_EXE_ASO_FLOW_METER || + num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -EOPNOTSUPP; + goto free_actions; + } + + tmp_action = mlx5_fs_get_action_aso_meter(fs_ctx, + &fte_action->exe_aso); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + (*ractions)[num_actions].aso_meter.offset = + fte_action->exe_aso.flow_meter.meter_idx; + (*ractions)[num_actions].aso_meter.init_color = + fte_action->exe_aso.flow_meter.init_color; + (*ractions)[num_actions++].action = tmp_action; + fs_actions[num_fs_actions].action = tmp_action; + fs_actions[num_fs_actions++].exe_aso = &fte_action->exe_aso; } if (fte_action->action & MLX5_FLOW_CONTEXT_ACTION_DROP) { @@ -784,6 +985,14 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns, dest_action = mlx5_fs_get_dest_action_vport(fs_ctx, dst, type_uplink); break; + case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: + dest_action = + mlx5_fs_get_dest_action_sampler(fs_ctx, + dst); + fs_actions[num_fs_actions].action = dest_action; + fs_actions[num_fs_actions++].sampler_id = + dst->dest_attr.sampler_id; + break; default: err = -EOPNOTSUPP; goto free_actions; @@ -850,7 +1059,7 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns, return 0; free_actions: - mlx5_fs_destroy_fs_actions(&fs_actions, &num_fs_actions); + mlx5_fs_destroy_fs_actions(ns, &fs_actions, &num_fs_actions); free_dest_actions_alloc: kfree(dest_actions); free_fs_actions_alloc: @@ -900,7 +1109,7 @@ static int mlx5_cmd_hws_create_fte(struct mlx5_flow_root_namespace *ns, return 0; free_actions: - mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions, + mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions, &fte->fs_hws_rule.num_fs_actions); out_err: mlx5_core_err(ns->dev, "Failed to create hws rule err(%d)\n", err); @@ -920,7 +1129,8 @@ static int mlx5_cmd_hws_delete_fte(struct mlx5_flow_root_namespace *ns, err = mlx5hws_bwc_rule_destroy(rule->bwc_rule); rule->bwc_rule = NULL; - mlx5_fs_destroy_fs_actions(&rule->hws_fs_actions, &rule->num_fs_actions); + mlx5_fs_destroy_fs_actions(ns, &rule->hws_fs_actions, + &rule->num_fs_actions); return err; } @@ -958,11 +1168,12 @@ static int mlx5_cmd_hws_update_fte(struct mlx5_flow_root_namespace *ns, if (ret) goto restore_actions; - mlx5_fs_destroy_fs_actions(&saved_hws_fs_actions, &saved_num_fs_actions); + mlx5_fs_destroy_fs_actions(ns, &saved_hws_fs_actions, + &saved_num_fs_actions); return ret; restore_actions: - mlx5_fs_destroy_fs_actions(&fte->fs_hws_rule.hws_fs_actions, + mlx5_fs_destroy_fs_actions(ns, &fte->fs_hws_rule.hws_fs_actions, &fte->fs_hws_rule.num_fs_actions); fte->fs_hws_rule.hws_fs_actions = saved_hws_fs_actions; fte->fs_hws_rule.num_fs_actions = saved_num_fs_actions; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h index cbddb72d43624..8b56298288da2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.h @@ -22,6 +22,8 @@ struct mlx5_fs_hws_actions_pool { struct xarray table_dests; struct xarray vport_vhca_dests; struct xarray vport_dests; + struct xarray aso_meters; + struct xarray sample_dests; }; struct mlx5_fs_hws_context { @@ -49,6 +51,8 @@ struct mlx5_fs_hws_rule_action { struct mlx5hws_action *action; union { struct mlx5_fc *counter; + struct mlx5_exe_aso *exe_aso; + u32 sampler_id; }; }; @@ -58,6 +62,26 @@ struct mlx5_fs_hws_rule { int num_fs_actions; }; +struct mlx5_fs_hws_data { + struct mlx5hws_action *hws_action; + struct mutex lock; /* protects hws_action */ + refcount_t hws_action_refcount; +}; + +struct mlx5_fs_hws_create_action_ctx { + enum mlx5hws_action_type actions_type; + struct mlx5hws_context *hws_ctx; + u32 id; + union { + u8 return_reg_id; + }; +}; + +struct mlx5hws_action * +mlx5_fs_get_hws_action(struct mlx5_fs_hws_data *fs_hws_data, + struct mlx5_fs_hws_create_action_ctx *create_ctx); +void mlx5_fs_put_hws_action(struct mlx5_fs_hws_data *fs_hws_data); + #ifdef CONFIG_MLX5_HW_STEERING bool mlx5_fs_hws_is_supported(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c index 2ae4ac62b0e26..f1ecdba74e1f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c @@ -405,46 +405,17 @@ bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool, struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx, struct mlx5_fc *counter) { - u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED; + struct mlx5_fs_hws_create_action_ctx create_ctx; struct mlx5_fc_bulk *fc_bulk = counter->bulk; - struct mlx5_fc_bulk_hws_data *fc_bulk_hws; - fc_bulk_hws = &fc_bulk->hws_data; - /* try avoid locking if not necessary */ - if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) - return fc_bulk_hws->hws_action; + create_ctx.hws_ctx = ctx; + create_ctx.id = fc_bulk->base_id; + create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR; - mutex_lock(&fc_bulk_hws->lock); - if (refcount_inc_not_zero(&fc_bulk_hws->hws_action_refcount)) { - mutex_unlock(&fc_bulk_hws->lock); - return fc_bulk_hws->hws_action; - } - fc_bulk_hws->hws_action = - mlx5hws_action_create_counter(ctx, fc_bulk->base_id, flags); - if (!fc_bulk_hws->hws_action) { - mutex_unlock(&fc_bulk_hws->lock); - return NULL; - } - refcount_set(&fc_bulk_hws->hws_action_refcount, 1); - mutex_unlock(&fc_bulk_hws->lock); - - return fc_bulk_hws->hws_action; + return mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx); } void mlx5_fc_put_hws_action(struct mlx5_fc *counter) { - struct mlx5_fc_bulk_hws_data *fc_bulk_hws = &counter->bulk->hws_data; - - /* try avoid locking if not necessary */ - if (refcount_dec_not_one(&fc_bulk_hws->hws_action_refcount)) - return; - - mutex_lock(&fc_bulk_hws->lock); - if (!refcount_dec_and_test(&fc_bulk_hws->hws_action_refcount)) { - mutex_unlock(&fc_bulk_hws->lock); - return; - } - mlx5hws_action_destroy(fc_bulk_hws->hws_action); - fc_bulk_hws->hws_action = NULL; - mutex_unlock(&fc_bulk_hws->lock); + mlx5_fs_put_hws_action(&counter->bulk->hws_data); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c index d9dc4f2d0dc68..f51ed24526b91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/pat_arg.c @@ -153,8 +153,7 @@ mlx5hws_pat_get_existing_cached_pattern(struct mlx5hws_pattern_cache *cache, cached_pattern = mlx5hws_pat_find_cached_pattern(cache, num_of_actions, actions); if (cached_pattern) { /* LRU: move it to be first in the list */ - list_del_init(&cached_pattern->ptrn_list_node); - list_add(&cached_pattern->ptrn_list_node, &cache->ptrn_list); + list_move(&cached_pattern->ptrn_list_node, &cache->ptrn_list); cached_pattern->refcount++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c index 01ed6442095d7..c2218dc556c7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c @@ -39,9 +39,6 @@ static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool) INIT_LIST_HEAD(&cur_list); - object_range = - pool->dmn->info.caps.log_header_modify_argument_granularity; - object_range = max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity, DR_ICM_MODIFY_HDR_GRANULARITY_4K); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c index 60cb4527588a5..65740bb68b09b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c @@ -516,30 +516,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) return NULL; } -/* Assure synchronization of the device steering tables with updates made by SW - * insertion. - */ -int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags) -{ - int ret = 0; - - if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) { - mlx5dr_domain_lock(dmn); - ret = mlx5dr_send_ring_force_drain(dmn); - mlx5dr_domain_unlock(dmn); - if (ret) { - mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n", - flags, ret); - return ret; - } - } - - if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW) - ret = mlx5dr_cmd_sync_steering(dmn->mdev); - - return ret; -} - int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) { if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c index f57c84e5128bc..4fd4e8483382c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c @@ -1331,36 +1331,3 @@ void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn, kfree(send_ring->sync_buff); kfree(send_ring); } - -int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn) -{ - struct mlx5dr_send_ring *send_ring = dmn->send_ring; - struct postsend_info send_info = {}; - u8 data[DR_STE_SIZE]; - int num_of_sends_req; - int ret; - int i; - - /* Sending this amount of requests makes sure we will get drain */ - num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2; - - /* Send fake requests forcing the last to be signaled */ - send_info.write.addr = (uintptr_t)data; - send_info.write.length = DR_STE_SIZE; - send_info.write.lkey = 0; - /* Using the sync_mr in order to write/read */ - send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr; - send_info.rkey = send_ring->sync_mr->mkey; - - for (i = 0; i < num_of_sends_req; i++) { - ret = dr_postsend_icm_data(dmn, &send_info); - if (ret) - return ret; - } - - spin_lock(&send_ring->lock); - ret = dr_handle_pending_wc(dmn, send_ring); - spin_unlock(&send_ring->lock); - - return ret; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h index 5f409dc30aca8..3d5afc832fa56 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h @@ -210,6 +210,10 @@ struct mlx5dr_ste_ctx { void (*set_encap_l3)(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action, u32 reformat_id, int size); + void (*set_insert_hdr)(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, + u8 anchor, u8 offset, int size); + void (*set_remove_hdr)(u8 *hw_ste_p, u8 *s_action, u8 anchor, + u8 offset, int size); /* Send */ void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size); }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c index 7f83d77c43ef0..6447efbae00dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c @@ -266,10 +266,10 @@ void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, int size) dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, - u32 reformat_id, - u8 anchor, u8 offset, - int size) +void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, + u32 reformat_id, + u8 anchor, u8 offset, + int size) { MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER); @@ -286,9 +286,9 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, - u8 anchor, u8 offset, - int size) +void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, + u8 anchor, u8 offset, + int size) { MLX5_SET(ste_single_action_remove_header_size_v1, s_action, action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); @@ -584,11 +584,11 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_insert_hdr(last_ste, action, - attr->reformat.id, - attr->reformat.param_0, - attr->reformat.param_1, - attr->reformat.size); + ste_ctx->set_insert_hdr(last_ste, action, + attr->reformat.id, + attr->reformat.param_0, + attr->reformat.param_1, + attr->reformat.size); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) { @@ -597,10 +597,10 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_remove_hdr(last_ste, action, - attr->reformat.param_0, - attr->reformat.param_1, - attr->reformat.size); + ste_ctx->set_remove_hdr(last_ste, action, + attr->reformat.param_0, + attr->reformat.param_1, + attr->reformat.size); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; } @@ -792,11 +792,11 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); action_sz = DR_STE_ACTION_TRIPLE_SZ; } - dr_ste_v1_set_insert_hdr(last_ste, action, - attr->reformat.id, - attr->reformat.param_0, - attr->reformat.param_1, - attr->reformat.size); + ste_ctx->set_insert_hdr(last_ste, action, + attr->reformat.id, + attr->reformat.param_0, + attr->reformat.param_1, + attr->reformat.size); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; allow_modify_hdr = false; @@ -808,10 +808,10 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, allow_modify_hdr = true; allow_ctr = true; } - dr_ste_v1_set_remove_hdr(last_ste, action, - attr->reformat.param_0, - attr->reformat.param_1, - attr->reformat.size); + ste_ctx->set_remove_hdr(last_ste, action, + attr->reformat.param_0, + attr->reformat.param_1, + attr->reformat.size); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; } @@ -2200,6 +2200,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v1 = { .set_pop_vlan = &dr_ste_v1_set_pop_vlan, .set_rx_decap = &dr_ste_v1_set_rx_decap, .set_encap_l3 = &dr_ste_v1_set_encap_l3, + .set_insert_hdr = &dr_ste_v1_set_insert_hdr, + .set_remove_hdr = &dr_ste_v1_set_remove_hdr, /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h index a8d9e308d3392..591c20c95a6ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h @@ -156,6 +156,10 @@ void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num); void dr_ste_v1_set_encap_l3(u8 *hw_ste_p, u8 *frst_s_action, u8 *scnd_d_action, u32 reformat_id, int size); void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action); +void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, u32 reformat_id, + u8 anchor, u8 offset, int size); +void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, u8 anchor, + u8 offset, int size); void dr_ste_v1_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, struct mlx5dr_domain *dmn, u8 *action_type_set, u32 actions_caps, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, u32 *added_stes); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c index 0882dba0f64b9..d0ebaf820d42b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c @@ -69,6 +69,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v2 = { .set_pop_vlan = &dr_ste_v1_set_pop_vlan, .set_rx_decap = &dr_ste_v1_set_rx_decap, .set_encap_l3 = &dr_ste_v1_set_encap_l3, + .set_insert_hdr = &dr_ste_v1_set_insert_hdr, + .set_remove_hdr = &dr_ste_v1_set_remove_hdr, /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c index cc60ce1d274ef..e468a9ae44e8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v3.c @@ -79,6 +79,46 @@ static void dr_ste_v3_set_rx_decap(u8 *hw_ste_p, u8 *s_action) dr_ste_v1_set_reparse(hw_ste_p); } +static void dr_ste_v3_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, + u32 reformat_id, u8 anchor, + u8 offset, int size) +{ + MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, + action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER); + MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, + start_anchor, anchor); + + /* The hardware expects here size and offset in words (2 byte) */ + MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, + size, size / 2); + MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, + start_offset, offset / 2); + + MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, + pointer, reformat_id); + MLX5_SET(ste_double_action_insert_with_ptr_v3, d_action, + attributes, DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v3_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, + u8 anchor, u8 offset, int size) +{ + MLX5_SET(ste_single_action_remove_header_size_v3, s_action, + action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); + MLX5_SET(ste_single_action_remove_header_size_v3, s_action, + start_anchor, anchor); + + /* The hardware expects here size and offset in words (2 byte) */ + MLX5_SET(ste_single_action_remove_header_size_v3, s_action, + remove_size, size / 2); + MLX5_SET(ste_single_action_remove_header_size_v3, s_action, + start_offset, offset / 2); + + dr_ste_v1_set_reparse(hw_ste_p); +} + static int dr_ste_v3_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action, u32 hw_action_sz, @@ -211,6 +251,8 @@ static struct mlx5dr_ste_ctx ste_ctx_v3 = { .set_pop_vlan = &dr_ste_v3_set_pop_vlan, .set_rx_decap = &dr_ste_v3_set_rx_decap, .set_encap_l3 = &dr_ste_v3_set_encap_l3, + .set_insert_hdr = &dr_ste_v3_set_insert_hdr, + .set_remove_hdr = &dr_ste_v3_set_remove_hdr, /* Send */ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h index 7618c6147f866..cc328292bf84a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h @@ -1473,7 +1473,6 @@ struct mlx5dr_send_ring { int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn); void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn, struct mlx5dr_send_ring *send_ring); -int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn); int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste, u8 *data, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h index 0bb3724c10c26..fc8a2169d1a13 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h @@ -45,8 +45,6 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type); int mlx5dr_domain_destroy(struct mlx5dr_domain *domain); -int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags); - void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, struct mlx5dr_domain *peer_dmn, u16 peer_vhca_id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 0d5f750faa455..d10d4c3960408 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1199,6 +1199,31 @@ int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *ou } EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap); +int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id) +{ + int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); + void *query_ctx; + void *hca_caps; + int err; + + *vhca_id = 0; + + query_ctx = kzalloc(query_out_sz, GFP_KERNEL); + if (!query_ctx) + return -ENOMEM; + + err = mlx5_vport_get_other_func_general_cap(dev, vport, query_ctx); + if (err) + goto out_free; + + hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); + *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id); + +out_free: + kfree(query_ctx); + return err; +} + int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport, u16 opmod) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 5b44c931b6604..2ed3b9263be2b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -156,7 +156,7 @@ static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci) } strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx", sizeof(mlxsw_pci->napi_dev_rx->name)); - dev_set_threaded(mlxsw_pci->napi_dev_rx, true); + dev_set_threaded(mlxsw_pci->napi_dev_rx, NETDEV_NAPI_THREADED_ENABLE); return 0; @@ -2214,6 +2214,8 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) mlxsw_pci_wqe_byte_count_set(wqe, i, 0); + mlxsw_pci_wqe_ipcs_set(wqe, skb->ip_summed == CHECKSUM_PARTIAL); + /* Everything is set up, ring producer doorbell to get HW going */ q->producer_counter++; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 6bed495dcf0ff..7fa94e5828deb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -90,6 +90,11 @@ MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1); */ MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4); +/* pci_wqe_ipcs + * Calculate IPv4 and TCP / UDP checksums. + */ +MLXSW_ITEM32(pci, wqe, ipcs, 0x00, 14, 1); + /* pci_wqe_byte_count * Size of i-th scatter/gather entry, 0 if entry is unused. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d714311fd884e..3080ea032e7f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1574,10 +1574,12 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, netif_carrier_off(dev); dev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_HW_TC; - dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; + NETIF_F_HW_TC | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; dev->lltx = true; - dev->netns_local = true; + dev->netns_immutable = true; dev->min_mtu = ETH_MIN_MTU; dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR; @@ -2407,8 +2409,6 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { /* Multicast Router Traps */ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), - /* NVE traps */ - MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false), }; static const struct mlxsw_listener mlxsw_sp1_listener[] = { @@ -5230,25 +5230,13 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, return 0; if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack)) return -EOPNOTSUPP; - if (cu_info->linking) { - if (!netif_running(dev)) - return 0; - /* When the bridge is VLAN-aware, the VNI of the VxLAN - * device needs to be mapped to a VLAN, but at this - * point no VLANs are configured on the VxLAN device - */ - if (br_vlan_enabled(upper_dev)) - return 0; + if (!netif_running(dev)) + return 0; + if (cu_info->linking) return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, extack); - } else { - /* VLANs were already flushed, which triggered the - * necessary cleanup - */ - if (br_vlan_enabled(upper_dev)) - return 0; + else mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); - } break; case NETDEV_PRE_UP: upper_dev = netdev_master_upper_dev_get(dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index b10f80fc651b0..37cd1d002b3be 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -661,10 +661,10 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, const struct net_device *br_dev); int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, const struct net_device *br_dev, - const struct net_device *vxlan_dev, u16 vid, + struct net_device *vxlan_dev, u16 vid, struct netlink_ext_ack *extack); void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, - const struct net_device *vxlan_dev); + struct net_device *vxlan_dev); extern struct notifier_block mlxsw_sp_switchdev_notifier; /* spectrum.c */ @@ -754,9 +754,6 @@ void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, struct net_device *dev); -bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); -u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id, enum mlxsw_sp_l3proto ul_proto, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c index a54eedb69a3f5..067f0055a55af 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -212,7 +212,22 @@ static const u8 mlxsw_sp4_acl_bf_crc6_tab[256] = { * This array defines key offsets for easy access when copying key blocks from * entry key to Bloom filter chunk. */ -static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38}; +static char * +mlxsw_sp_acl_bf_enc_key_get(struct mlxsw_sp_acl_atcam_entry *aentry, + u8 chunk_index) +{ + switch (chunk_index) { + case 0: + return &aentry->ht_key.enc_key[2]; + case 1: + return &aentry->ht_key.enc_key[20]; + case 2: + return &aentry->ht_key.enc_key[38]; + default: + WARN_ON_ONCE(1); + return &aentry->ht_key.enc_key[0]; + } +} static u16 mlxsw_sp2_acl_bf_crc16_byte(u16 crc, u8 c) { @@ -235,9 +250,10 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, u8 key_offset, u8 chunk_key_len, u8 chunk_len) { struct mlxsw_afk_key_info *key_info = aregion->region->key_info; - u8 chunk_index, chunk_count, block_count; + u8 chunk_index, chunk_count; char *chunk = output; __be16 erp_region_id; + u32 block_count; block_count = mlxsw_afk_key_info_blocks_count_get(key_info); chunk_count = 1 + ((block_count - 1) >> 2); @@ -245,12 +261,13 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, (aregion->region->id << 4)); for (chunk_index = max_chunks - chunk_count; chunk_index < max_chunks; chunk_index++) { + char *enc_key; + memset(chunk, 0, pad_bytes); memcpy(chunk + pad_bytes, &erp_region_id, sizeof(erp_region_id)); - memcpy(chunk + key_offset, - &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]], - chunk_key_len); + enc_key = mlxsw_sp_acl_bf_enc_key_get(aentry, chunk_index); + memcpy(chunk + key_offset, enc_key, chunk_key_len); chunk += chunk_len; } *len = chunk_count * chunk_len; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 7d6d859cef3f9..464821dd492da 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -8184,41 +8184,6 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, return NULL; } -bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev) -{ - struct mlxsw_sp_rif *rif; - - mutex_lock(&mlxsw_sp->router->lock); - rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - mutex_unlock(&mlxsw_sp->router->lock); - - return rif; -} - -u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev) -{ - struct mlxsw_sp_rif *rif; - u16 vid = 0; - - mutex_lock(&mlxsw_sp->router->lock); - rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!rif) - goto out; - - /* We only return the VID for VLAN RIFs. Otherwise we return an - * invalid value (0). - */ - if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN) - goto out; - - vid = mlxsw_sp_fid_8021q_vid(rif->fid); - -out: - mutex_unlock(&mlxsw_sp->router->lock); - return vid; -} - static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif) { char ritr_pl[MLXSW_REG_RITR_LEN]; @@ -8417,19 +8382,6 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif) return lb_rif->common.rif_index; } -u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif) -{ - struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common); - u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev); - struct mlxsw_sp_vr *ul_vr; - - ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL); - if (WARN_ON(IS_ERR(ul_vr))) - return 0; - - return ul_vr->id; -} - u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif) { return lb_rif->ul_rif_id; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 0432c7cc6b07a..313efab5c324c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -90,7 +90,6 @@ struct mlxsw_sp_ipip_entry; struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); -u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif); u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 6397ff0dc951c..a48bf342084da 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -2929,23 +2929,8 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port); } -int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, - const struct net_device *br_dev, - const struct net_device *vxlan_dev, u16 vid, - struct netlink_ext_ack *extack) -{ - struct mlxsw_sp_bridge_device *bridge_device; - - bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); - if (WARN_ON(!bridge_device)) - return -EINVAL; - - return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, - extack); -} - -void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, - const struct net_device *vxlan_dev) +static void __mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, + const struct net_device *vxlan_dev) { struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); struct mlxsw_sp_fid *fid; @@ -2963,6 +2948,47 @@ void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fid_put(fid); } +int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + struct net_device *vxlan_dev, u16 vid, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_port *mlxsw_sp_port; + int err; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); + if (WARN_ON(!bridge_device)) + return -EINVAL; + + mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(bridge_device->dev); + if (!mlxsw_sp_port) + return -EINVAL; + + err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, + extack); + if (err) + return err; + + err = switchdev_bridge_port_offload(vxlan_dev, mlxsw_sp_port->dev, + NULL, NULL, NULL, false, extack); + if (err) + goto err_bridge_port_offload; + + return 0; + +err_bridge_port_offload: + __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + return err; +} + +void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, + struct net_device *vxlan_dev) +{ + switchdev_bridge_port_unoffload(vxlan_dev, NULL, NULL, NULL); + __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); +} + static void mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr, enum mlxsw_sp_l3proto *proto, @@ -3867,7 +3893,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fid_put(fid); return -EINVAL; } - mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); mlxsw_sp_fid_put(fid); return 0; } @@ -3883,7 +3909,7 @@ mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently * mapped to the VNI should be unmapped */ - mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); mlxsw_sp_fid_put(fid); /* Fifth case: The new VLAN is also egress untagged, which means the @@ -3923,7 +3949,7 @@ mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp_fid_8021q_vid(fid) != vid) goto out; - mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + __mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); out: mlxsw_sp_fid_put(fid); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 1f9c1c86839f1..b5c3f789c685c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -959,18 +959,18 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = { }, { .trap = MLXSW_SP_TRAP_CONTROL(ARP_REQUEST, NEIGH_DISCOVERY, - MIRROR), + TRAP), .listeners_arr = { - MLXSW_SP_RXL_MARK(ROUTER_ARPBC, NEIGH_DISCOVERY, - TRAP_TO_CPU, false), + MLXSW_SP_RXL_NO_MARK(ARPBC, NEIGH_DISCOVERY, + TRAP_TO_CPU, false), }, }, { .trap = MLXSW_SP_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY, - MIRROR), + TRAP), .listeners_arr = { - MLXSW_SP_RXL_MARK(ROUTER_ARPUC, NEIGH_DISCOVERY, - TRAP_TO_CPU, false), + MLXSW_SP_RXL_NO_MARK(ARPUC, NEIGH_DISCOVERY, + TRAP_TO_CPU, false), }, }, { diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 83477c8e6971b..80ee5c4825dc9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -29,6 +29,8 @@ enum { MLXSW_TRAP_ID_FDB_MISMATCH = 0x3B, MLXSW_TRAP_ID_FID_MISS = 0x3D, MLXSW_TRAP_ID_DECAP_ECN0 = 0x40, + MLXSW_TRAP_ID_ARPBC = 0x50, + MLXSW_TRAP_ID_ARPUC = 0x51, MLXSW_TRAP_ID_MTUERROR = 0x52, MLXSW_TRAP_ID_TTLERROR = 0x53, MLXSW_TRAP_ID_LBERROR = 0x54, @@ -66,13 +68,10 @@ enum { MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92, MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1, MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8, - MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD, MLXSW_TRAP_ID_IPV4_BFD = 0xD0, MLXSW_TRAP_ID_IPV6_BFD = 0xD1, MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, - MLXSW_TRAP_ID_ROUTER_ARPBC = 0xE0, - MLXSW_TRAP_ID_ROUTER_ARPUC = 0xE1, MLXSW_TRAP_ID_DISCARD_NON_ROUTABLE = 0x11A, MLXSW_TRAP_ID_DISCARD_ROUTER2 = 0x130, MLXSW_TRAP_ID_DISCARD_ROUTER3 = 0x131, diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile index 239b2258ec658..0dbc634adb4b8 100644 --- a/drivers/net/ethernet/meta/fbnic/Makefile +++ b/drivers/net/ethernet/meta/fbnic/Makefile @@ -20,6 +20,7 @@ fbnic-y := fbnic_csr.o \ fbnic_pci.o \ fbnic_phylink.o \ fbnic_rpc.o \ + fbnic_time.o \ fbnic_tlv.o \ fbnic_txrx.o \ - fbnic_time.o +# End of objects diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h index 14751f16e1252..4ca7b99ef1315 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -60,6 +60,12 @@ struct fbnic_dev { u8 mac_addr_boundary; u8 tce_tcam_last; + /* IP TCAM */ + struct fbnic_ip_addr ip_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES]; + struct fbnic_ip_addr ip_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES]; + struct fbnic_ip_addr ipo_src[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES]; + struct fbnic_ip_addr ipo_dst[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES]; + /* Number of TCQs/RCQs available on hardware */ u16 max_num_queues; @@ -180,6 +186,9 @@ void fbnic_dbg_exit(void); void fbnic_csr_get_regs(struct fbnic_dev *fbd, u32 *data, u32 *regs_version); int fbnic_csr_regs_len(struct fbnic_dev *fbd); +void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm); +void fbnic_config_rx_frames(struct fbnic_napi_vector *nv); + enum fbnic_boards { fbnic_board_asic }; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c index aeb9f333f4c7a..d9c0dc1c2af97 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.c @@ -30,6 +30,7 @@ static const struct fbnic_csr_bounds fbnic_csr_sects[] = { FBNIC_BOUNDS(RSFEC), FBNIC_BOUNDS(MAC_MAC), FBNIC_BOUNDS(SIG), + FBNIC_BOUNDS(PCIE_SS_COMPHY), FBNIC_BOUNDS(PUL_USER), FBNIC_BOUNDS(QUEUE), FBNIC_BOUNDS(RPC_RAM), diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h index 02bb81b3c5063..3b12a0ab59065 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -605,8 +605,11 @@ enum { FBNIC_RPC_ACT_TBL0_DEST_EI = 4, }; +#define FBNIC_RPC_ACT_TBL0_Q_SEL CSR_BIT(4) +#define FBNIC_RPC_ACT_TBL0_Q_ID CSR_GENMASK(15, 8) #define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16) #define FBNIC_RPC_ACT_TBL0_TS_ENA CSR_BIT(28) +#define FBNIC_RPC_ACT_TBL0_ACT_TBL_IDX CSR_BIT(29) #define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30) #define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */ @@ -677,6 +680,9 @@ enum { #define FBNIC_RPC_TCAM_OUTER_IPSRC(m, n)\ (0x08c00 + 0x08 * (n) + (m)) /* 0x023000 + 32*n + 4*m */ +#define FBNIC_RPC_TCAM_IP_ADDR_VALUE CSR_GENMASK(15, 0) +#define FBNIC_RPC_TCAM_IP_ADDR_MASK CSR_GENMASK(31, 16) + #define FBNIC_RPC_TCAM_OUTER_IPDST(m, n)\ (0x08c48 + 0x08 * (n) + (m)) /* 0x023120 + 32*n + 4*m */ #define FBNIC_RPC_TCAM_IPSRC(m, n)\ @@ -782,13 +788,52 @@ enum { #define FBNIC_MAC_STAT_TX_MULTICAST_H 0x11a4b /* 0x4692c */ #define FBNIC_MAC_STAT_TX_BROADCAST_L 0x11a4c /* 0x46930 */ #define FBNIC_MAC_STAT_TX_BROADCAST_H 0x11a4d /* 0x46934 */ + +/* PCIE Comphy Registers */ +#define FBNIC_CSR_START_PCIE_SS_COMPHY 0x2442e /* CSR section delimiter */ +#define FBNIC_CSR_END_PCIE_SS_COMPHY 0x279d7 /* CSR section delimiter */ + /* PUL User Registers */ #define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */ #define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */ #define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18) #define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */ #define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18) -#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */ +#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \ + 0x3106e /* 0xc41b8 */ +#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \ + 0x31070 /* 0xc41c0 */ +#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \ + 0x31071 /* 0xc41c4 */ +#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \ + 0x31072 /* 0xc41c8 */ +#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \ + 0x31073 /* 0xc41cc */ +#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \ + 0x31074 /* 0xc41d0 */ +#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \ + 0x31075 /* 0xc41d4 */ +#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \ + 0x31076 /* 0xc41d8 */ +#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \ + 0x31077 /* 0xc41dc */ +#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \ + 0x31078 /* 0xc41e0 */ +#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \ + 0x31079 /* 0xc41e4 */ +#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \ + 0x3107a /* 0xc41e8 */ +#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \ + 0x3107b /* 0xc41ec */ +#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \ + 0x3107c /* 0xc41f0 */ +#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \ + 0x3107d /* 0xc41f4 */ +#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \ + 0x3107e /* 0xc41f8 */ +#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \ + 0x3107f /* 0xc41fc */ +#define FBNIC_CSR_END_PUL_USER 0x310ea /* CSR section delimiter */ /* Queue Registers * @@ -928,43 +973,6 @@ enum { #define FBNIC_MAX_QUEUES 128 #define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1) -/* PUL User Registers*/ -#define FBNIC_PUL_USER_OB_RD_TLP_CNT_31_0 \ - 0x3106e /* 0xc41b8 */ -#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_31_0 \ - 0x31070 /* 0xc41c0 */ -#define FBNIC_PUL_USER_OB_RD_DWORD_CNT_63_32 \ - 0x31071 /* 0xc41c4 */ -#define FBNIC_PUL_USER_OB_WR_TLP_CNT_31_0 \ - 0x31072 /* 0xc41c8 */ -#define FBNIC_PUL_USER_OB_WR_TLP_CNT_63_32 \ - 0x31073 /* 0xc41cc */ -#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_31_0 \ - 0x31074 /* 0xc41d0 */ -#define FBNIC_PUL_USER_OB_WR_DWORD_CNT_63_32 \ - 0x31075 /* 0xc41d4 */ -#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_31_0 \ - 0x31076 /* 0xc41d8 */ -#define FBNIC_PUL_USER_OB_CPL_TLP_CNT_63_32 \ - 0x31077 /* 0xc41dc */ -#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_31_0 \ - 0x31078 /* 0xc41e0 */ -#define FBNIC_PUL_USER_OB_CPL_DWORD_CNT_63_32 \ - 0x31079 /* 0xc41e4 */ -#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_31_0 \ - 0x3107a /* 0xc41e8 */ -#define FBNIC_PUL_USER_OB_RD_DBG_CNT_CPL_CRED_63_32 \ - 0x3107b /* 0xc41ec */ -#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_31_0 \ - 0x3107c /* 0xc41f0 */ -#define FBNIC_PUL_USER_OB_RD_DBG_CNT_TAG_63_32 \ - 0x3107d /* 0xc41f4 */ -#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_31_0 \ - 0x3107e /* 0xc41f8 */ -#define FBNIC_PUL_USER_OB_RD_DBG_CNT_NP_CRED_63_32 \ - 0x3107f /* 0xc41fc */ -#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */ - /* BAR 4 CSRs */ /* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c index 59951b5abdb78..e8f2d7f2d9629 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_debugfs.c @@ -10,6 +10,166 @@ static struct dentry *fbnic_dbg_root; +static void fbnic_dbg_desc_break(struct seq_file *s, int i) +{ + while (i--) + seq_putc(s, '-'); + + seq_putc(s, '\n'); +} + +static int fbnic_dbg_mac_addr_show(struct seq_file *s, void *v) +{ + struct fbnic_dev *fbd = s->private; + char hdr[80]; + int i; + + /* Generate Header */ + snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n", + "Idx", "S", "TCAM Bitmap", "Addr/Mask"); + seq_puts(s, hdr); + fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr))); + + for (i = 0; i < FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES; i++) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + seq_printf(s, "%02d %d %64pb %pm\n", + i, mac_addr->state, mac_addr->act_tcam, + mac_addr->value.addr8); + seq_printf(s, " %pm\n", + mac_addr->mask.addr8); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_mac_addr); + +static int fbnic_dbg_tce_tcam_show(struct seq_file *s, void *v) +{ + struct fbnic_dev *fbd = s->private; + int i, tcam_idx = 0; + char hdr[80]; + + /* Generate Header */ + snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s\n", + "Idx", "S", "TCAM Bitmap", "Addr/Mask"); + seq_puts(s, hdr); + fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr))); + + for (i = 0; i < ARRAY_SIZE(fbd->mac_addr); i++) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + /* Verify BMC bit is set */ + if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) + continue; + + if (tcam_idx == FBNIC_TCE_TCAM_NUM_ENTRIES) + break; + + seq_printf(s, "%02d %d %64pb %pm\n", + tcam_idx, mac_addr->state, mac_addr->act_tcam, + mac_addr->value.addr8); + seq_printf(s, " %pm\n", + mac_addr->mask.addr8); + tcam_idx++; + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_tce_tcam); + +static int fbnic_dbg_act_tcam_show(struct seq_file *s, void *v) +{ + struct fbnic_dev *fbd = s->private; + char hdr[80]; + int i; + + /* Generate Header */ + snprintf(hdr, sizeof(hdr), "%3s %s %-55s %-4s %s\n", + "Idx", "S", "Value/Mask", "RSS", "Dest"); + seq_puts(s, hdr); + fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr))); + + for (i = 0; i < FBNIC_RPC_TCAM_ACT_NUM_ENTRIES; i++) { + struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i]; + + seq_printf(s, "%02d %d %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %08x\n", + i, act_tcam->state, + act_tcam->value.tcam[10], act_tcam->value.tcam[9], + act_tcam->value.tcam[8], act_tcam->value.tcam[7], + act_tcam->value.tcam[6], act_tcam->value.tcam[5], + act_tcam->value.tcam[4], act_tcam->value.tcam[3], + act_tcam->value.tcam[2], act_tcam->value.tcam[1], + act_tcam->value.tcam[0], act_tcam->rss_en_mask, + act_tcam->dest); + seq_printf(s, " %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", + act_tcam->mask.tcam[10], act_tcam->mask.tcam[9], + act_tcam->mask.tcam[8], act_tcam->mask.tcam[7], + act_tcam->mask.tcam[6], act_tcam->mask.tcam[5], + act_tcam->mask.tcam[4], act_tcam->mask.tcam[3], + act_tcam->mask.tcam[2], act_tcam->mask.tcam[1], + act_tcam->mask.tcam[0]); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_act_tcam); + +static int fbnic_dbg_ip_addr_show(struct seq_file *s, + struct fbnic_ip_addr *ip_addr) +{ + char hdr[80]; + int i; + + /* Generate Header */ + snprintf(hdr, sizeof(hdr), "%3s %s %-17s %s %s\n", + "Idx", "S", "TCAM Bitmap", "V", "Addr/Mask"); + seq_puts(s, hdr); + fbnic_dbg_desc_break(s, strnlen(hdr, sizeof(hdr))); + + for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) { + seq_printf(s, "%02d %d %64pb %d %pi6\n", + i, ip_addr->state, ip_addr->act_tcam, + ip_addr->version, &ip_addr->value); + seq_printf(s, " %pi6\n", + &ip_addr->mask); + } + + return 0; +} + +static int fbnic_dbg_ip_src_show(struct seq_file *s, void *v) +{ + struct fbnic_dev *fbd = s->private; + + return fbnic_dbg_ip_addr_show(s, fbd->ip_src); +} +DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_src); + +static int fbnic_dbg_ip_dst_show(struct seq_file *s, void *v) +{ + struct fbnic_dev *fbd = s->private; + + return fbnic_dbg_ip_addr_show(s, fbd->ip_dst); +} +DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ip_dst); + +static int fbnic_dbg_ipo_src_show(struct seq_file *s, void *v) +{ + struct fbnic_dev *fbd = s->private; + + return fbnic_dbg_ip_addr_show(s, fbd->ipo_src); +} +DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_src); + +static int fbnic_dbg_ipo_dst_show(struct seq_file *s, void *v) +{ + struct fbnic_dev *fbd = s->private; + + return fbnic_dbg_ip_addr_show(s, fbd->ipo_dst); +} +DEFINE_SHOW_ATTRIBUTE(fbnic_dbg_ipo_dst); + static int fbnic_dbg_pcie_stats_show(struct seq_file *s, void *v) { struct fbnic_dev *fbd = s->private; @@ -48,6 +208,20 @@ void fbnic_dbg_fbd_init(struct fbnic_dev *fbd) fbd->dbg_fbd = debugfs_create_dir(name, fbnic_dbg_root); debugfs_create_file("pcie_stats", 0400, fbd->dbg_fbd, fbd, &fbnic_dbg_pcie_stats_fops); + debugfs_create_file("mac_addr", 0400, fbd->dbg_fbd, fbd, + &fbnic_dbg_mac_addr_fops); + debugfs_create_file("tce_tcam", 0400, fbd->dbg_fbd, fbd, + &fbnic_dbg_tce_tcam_fops); + debugfs_create_file("act_tcam", 0400, fbd->dbg_fbd, fbd, + &fbnic_dbg_act_tcam_fops); + debugfs_create_file("ip_src", 0400, fbd->dbg_fbd, fbd, + &fbnic_dbg_ip_src_fops); + debugfs_create_file("ip_dst", 0400, fbd->dbg_fbd, fbd, + &fbnic_dbg_ip_dst_fops); + debugfs_create_file("ipo_src", 0400, fbd->dbg_fbd, fbd, + &fbnic_dbg_ipo_src_fops); + debugfs_create_file("ipo_dst", 0400, fbd->dbg_fbd, fbd, + &fbnic_dbg_ipo_dst_fops); } void fbnic_dbg_fbd_exit(struct fbnic_dev *fbd) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c index 20cd9f5f89e27..0a751a2aaf733 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c @@ -4,6 +4,7 @@ #include #include #include +#include #include "fbnic.h" #include "fbnic_netdev.h" @@ -135,6 +136,168 @@ static void fbnic_clone_free(struct fbnic_net *clone) kfree(clone); } +static int fbnic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + ec->tx_coalesce_usecs = fbn->tx_usecs; + ec->rx_coalesce_usecs = fbn->rx_usecs; + ec->rx_max_coalesced_frames = fbn->rx_max_frames; + + return 0; +} + +static int fbnic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + /* Verify against hardware limits */ + if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) { + NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max"); + return -EINVAL; + } + if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) { + NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max"); + return -EINVAL; + } + if (ec->rx_max_coalesced_frames > + FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) / + FBNIC_MIN_RXD_PER_FRAME) { + NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max"); + return -EINVAL; + } + + fbn->tx_usecs = ec->tx_coalesce_usecs; + fbn->rx_usecs = ec->rx_coalesce_usecs; + fbn->rx_max_frames = ec->rx_max_coalesced_frames; + + if (netif_running(netdev)) { + int i; + + for (i = 0; i < fbn->num_napi; i++) { + struct fbnic_napi_vector *nv = fbn->napi[i]; + + fbnic_config_txrx_usecs(nv, 0); + fbnic_config_rx_frames(nv); + } + } + + return 0; +} + +static void +fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX; + ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX; + ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX; + ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX; + + ring->rx_pending = fbn->rcq_size; + ring->rx_mini_pending = fbn->hpq_size; + ring->rx_jumbo_pending = fbn->ppq_size; + ring->tx_pending = fbn->txq_size; +} + +static void fbnic_set_rings(struct fbnic_net *fbn, + struct ethtool_ringparam *ring) +{ + fbn->rcq_size = ring->rx_pending; + fbn->hpq_size = ring->rx_mini_pending; + fbn->ppq_size = ring->rx_jumbo_pending; + fbn->txq_size = ring->tx_pending; +} + +static int +fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) + +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_net *clone; + int err; + + ring->rx_pending = roundup_pow_of_two(ring->rx_pending); + ring->rx_mini_pending = roundup_pow_of_two(ring->rx_mini_pending); + ring->rx_jumbo_pending = roundup_pow_of_two(ring->rx_jumbo_pending); + ring->tx_pending = roundup_pow_of_two(ring->tx_pending); + + /* These are absolute minimums allowing the device and driver to operate + * but not necessarily guarantee reasonable performance. Settings below + * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal + * at best. + */ + if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) || + ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN || + ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN || + ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) { + NL_SET_ERR_MSG_MOD(extack, "requested ring size too small"); + return -EINVAL; + } + + if (!netif_running(netdev)) { + fbnic_set_rings(fbn, ring); + return 0; + } + + clone = fbnic_clone_create(fbn); + if (!clone) + return -ENOMEM; + + fbnic_set_rings(clone, ring); + + err = fbnic_alloc_napi_vectors(clone); + if (err) + goto err_free_clone; + + err = fbnic_alloc_resources(clone); + if (err) + goto err_free_napis; + + fbnic_down_noidle(fbn); + err = fbnic_wait_all_queues_idle(fbn->fbd, true); + if (err) + goto err_start_stack; + + err = fbnic_set_netif_queues(clone); + if (err) + goto err_start_stack; + + /* Nothing can fail past this point */ + fbnic_flush(fbn); + + fbnic_clone_swap(fbn, clone); + + fbnic_up(fbn); + + fbnic_free_resources(clone); + fbnic_free_napi_vectors(clone); + fbnic_clone_free(clone); + + return 0; + +err_start_stack: + fbnic_flush(fbn); + fbnic_up(fbn); + fbnic_free_resources(clone); +err_free_napis: + fbnic_free_napi_vectors(clone); +err_free_clone: + fbnic_clone_free(clone); + return err; +} + static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data) { int i; @@ -218,11 +381,234 @@ fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd) return 0; } +static int fbnic_get_cls_rule_all(struct fbnic_net *fbn, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct fbnic_dev *fbd = fbn->fbd; + int i, cnt = 0; + + /* Report maximum rule count */ + cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; + + for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) { + int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET; + struct fbnic_act_tcam *act_tcam; + + act_tcam = &fbd->act_tcam[idx]; + if (act_tcam->state != FBNIC_TCAM_S_VALID) + continue; + + if (rule_locs) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[cnt] = i; + } + + cnt++; + } + + return cnt; +} + +static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp; + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_act_tcam *act_tcam; + int idx; + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES) + return -EINVAL; + + idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET; + act_tcam = &fbd->act_tcam[idx]; + + if (act_tcam->state != FBNIC_TCAM_S_VALID) + return -EINVAL; + + /* Report maximum rule count */ + cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; + + /* Set flow type field */ + if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) { + fsp->flow_type = ETHER_FLOW; + if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, + act_tcam->mask.tcam[1])) { + struct fbnic_mac_addr *mac_addr; + + idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, + act_tcam->value.tcam[1]); + mac_addr = &fbd->mac_addr[idx]; + + ether_addr_copy(fsp->h_u.ether_spec.h_dest, + mac_addr->value.addr8); + eth_broadcast_addr(fsp->m_u.ether_spec.h_dest); + } + } else if (act_tcam->value.tcam[1] & + FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) { + fsp->flow_type = IPV6_USER_FLOW; + fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6; + fsp->m_u.usr_ip6_spec.l4_proto = 0xff; + + if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX, + act_tcam->mask.tcam[0])) { + struct fbnic_ip_addr *ip_addr; + int i; + + idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX, + act_tcam->value.tcam[0]); + ip_addr = &fbd->ipo_src[idx]; + + for (i = 0; i < 4; i++) { + fsp->h_u.usr_ip6_spec.ip6src[i] = + ip_addr->value.s6_addr32[i]; + fsp->m_u.usr_ip6_spec.ip6src[i] = + ~ip_addr->mask.s6_addr32[i]; + } + } + + if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX, + act_tcam->mask.tcam[0])) { + struct fbnic_ip_addr *ip_addr; + int i; + + idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX, + act_tcam->value.tcam[0]); + ip_addr = &fbd->ipo_dst[idx]; + + for (i = 0; i < 4; i++) { + fsp->h_u.usr_ip6_spec.ip6dst[i] = + ip_addr->value.s6_addr32[i]; + fsp->m_u.usr_ip6_spec.ip6dst[i] = + ~ip_addr->mask.s6_addr32[i]; + } + } + } else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) { + if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) { + if (act_tcam->value.tcam[1] & + FBNIC_RPC_TCAM_ACT1_L4_IS_UDP) + fsp->flow_type = UDP_V6_FLOW; + else + fsp->flow_type = TCP_V6_FLOW; + fsp->h_u.tcp_ip6_spec.psrc = + cpu_to_be16(act_tcam->value.tcam[3]); + fsp->m_u.tcp_ip6_spec.psrc = + cpu_to_be16(~act_tcam->mask.tcam[3]); + fsp->h_u.tcp_ip6_spec.pdst = + cpu_to_be16(act_tcam->value.tcam[4]); + fsp->m_u.tcp_ip6_spec.pdst = + cpu_to_be16(~act_tcam->mask.tcam[4]); + } else { + fsp->flow_type = IPV6_USER_FLOW; + } + + if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX, + act_tcam->mask.tcam[0])) { + struct fbnic_ip_addr *ip_addr; + int i; + + idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX, + act_tcam->value.tcam[0]); + ip_addr = &fbd->ip_src[idx]; + + for (i = 0; i < 4; i++) { + fsp->h_u.usr_ip6_spec.ip6src[i] = + ip_addr->value.s6_addr32[i]; + fsp->m_u.usr_ip6_spec.ip6src[i] = + ~ip_addr->mask.s6_addr32[i]; + } + } + + if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX, + act_tcam->mask.tcam[0])) { + struct fbnic_ip_addr *ip_addr; + int i; + + idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX, + act_tcam->value.tcam[0]); + ip_addr = &fbd->ip_dst[idx]; + + for (i = 0; i < 4; i++) { + fsp->h_u.usr_ip6_spec.ip6dst[i] = + ip_addr->value.s6_addr32[i]; + fsp->m_u.usr_ip6_spec.ip6dst[i] = + ~ip_addr->mask.s6_addr32[i]; + } + } + } else { + if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) { + if (act_tcam->value.tcam[1] & + FBNIC_RPC_TCAM_ACT1_L4_IS_UDP) + fsp->flow_type = UDP_V4_FLOW; + else + fsp->flow_type = TCP_V4_FLOW; + fsp->h_u.tcp_ip4_spec.psrc = + cpu_to_be16(act_tcam->value.tcam[3]); + fsp->m_u.tcp_ip4_spec.psrc = + cpu_to_be16(~act_tcam->mask.tcam[3]); + fsp->h_u.tcp_ip4_spec.pdst = + cpu_to_be16(act_tcam->value.tcam[4]); + fsp->m_u.tcp_ip4_spec.pdst = + cpu_to_be16(~act_tcam->mask.tcam[4]); + } else { + fsp->flow_type = IPV4_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + } + + if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX, + act_tcam->mask.tcam[0])) { + struct fbnic_ip_addr *ip_addr; + + idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX, + act_tcam->value.tcam[0]); + ip_addr = &fbd->ip_src[idx]; + + fsp->h_u.usr_ip4_spec.ip4src = + ip_addr->value.s6_addr32[3]; + fsp->m_u.usr_ip4_spec.ip4src = + ~ip_addr->mask.s6_addr32[3]; + } + + if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX, + act_tcam->mask.tcam[0])) { + struct fbnic_ip_addr *ip_addr; + + idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX, + act_tcam->value.tcam[0]); + ip_addr = &fbd->ip_dst[idx]; + + fsp->h_u.usr_ip4_spec.ip4dst = + ip_addr->value.s6_addr32[3]; + fsp->m_u.usr_ip4_spec.ip4dst = + ~ip_addr->mask.s6_addr32[3]; + } + } + + /* Record action */ + if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL) + fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID, + act_tcam->dest); + else + fsp->flow_type |= FLOW_RSS; + + cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID, + act_tcam->dest); + + return 0; +} + static int fbnic_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct fbnic_net *fbn = netdev_priv(netdev); int ret = -EOPNOTSUPP; + u32 special = 0; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: @@ -232,6 +618,22 @@ static int fbnic_get_rxnfc(struct net_device *netdev, case ETHTOOL_GRXFH: ret = fbnic_get_rss_hash_opts(fbn, cmd); break; + case ETHTOOL_GRXCLSRULE: + ret = fbnic_get_cls_rule(fbn, cmd); + break; + case ETHTOOL_GRXCLSRLCNT: + rule_locs = NULL; + special = RX_CLS_LOC_SPECIAL; + fallthrough; + case ETHTOOL_GRXCLSRLALL: + ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs); + if (ret < 0) + break; + + cmd->data |= special; + cmd->rule_cnt = ret; + ret = 0; + break; } return ret; @@ -272,6 +674,406 @@ fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd) return 0; } +static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd) +{ + int i; + + for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) { + int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET; + + if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID) + return i; + } + + return -ENOSPC; +} + +static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn, + const struct ethtool_rxnfc *cmd) +{ + u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff; + u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0; + u16 misc = 0, misc_mask = ~0; + u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + FBNIC_RPC_ACT_TBL0_DEST_HOST); + struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL; + struct fbnic_mac_addr *mac_addr = NULL; + struct ethtool_rx_flow_spec *fsp; + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_act_tcam *act_tcam; + struct in6_addr *addr6, *mask6; + struct in_addr *addr4, *mask4; + int hash_idx, location; + u32 flow_type; + int idx, j; + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fsp->location != RX_CLS_LOC_ANY) + return -EINVAL; + location = fbnic_cls_rule_any_loc(fbd); + if (location < 0) + return location; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + dest = FBNIC_RPC_ACT_TBL0_DROP; + } else if (fsp->flow_type & FLOW_RSS) { + if (cmd->rss_context == 1) + dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID; + } else { + u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie); + + if (ring_idx >= fbn->num_rx_queues) + return -EINVAL; + + dest |= FBNIC_RPC_ACT_TBL0_Q_SEL | + FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx); + } + + idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET; + act_tcam = &fbd->act_tcam[idx]; + + /* Do not allow overwriting for now. + * To support overwriting rules we will need to add logic to free + * any IP or MACDA TCAMs that may be associated with the old rule. + */ + if (act_tcam->state != FBNIC_TCAM_S_DISABLED) + return -EBUSY; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS); + hash_idx = fbnic_get_rss_hash_idx(flow_type); + + switch (flow_type) { + case UDP_V4_FLOW: +udp4_flow: + flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP; + fallthrough; + case TCP_V4_FLOW: +tcp4_flow: + flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID; + flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP | + FBNIC_RPC_TCAM_ACT1_L4_VALID); + + sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc); + sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc); + dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst); + dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst); + goto ip4_flow; + case IP_USER_FLOW: + if (!fsp->m_u.usr_ip4_spec.proto) + goto ip4_flow; + if (fsp->m_u.usr_ip4_spec.proto != 0xff) + return -EINVAL; + if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP) + goto udp4_flow; + if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP) + goto tcp4_flow; + return -EINVAL; +ip4_flow: + addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src; + mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src; + if (mask4->s_addr) { + ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src, + addr4, mask4); + if (!ip_src) + return -ENOSPC; + + set_bit(idx, ip_src->act_tcam); + ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID | + FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX, + ip_src - fbd->ip_src); + ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID | + FBNIC_RPC_TCAM_ACT0_IPSRC_IDX); + } + + addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst; + mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst; + if (mask4->s_addr) { + ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst, + addr4, mask4); + if (!ip_dst) { + if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD) + memset(ip_src, 0, sizeof(*ip_src)); + return -ENOSPC; + } + + set_bit(idx, ip_dst->act_tcam); + ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID | + FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX, + ip_dst - fbd->ip_dst); + ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID | + FBNIC_RPC_TCAM_ACT0_IPDST_IDX); + } + flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID | + FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 | + FBNIC_RPC_TCAM_ACT1_IP_VALID | + FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID); + break; + case UDP_V6_FLOW: +udp6_flow: + flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP; + fallthrough; + case TCP_V6_FLOW: +tcp6_flow: + flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID; + flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP | + FBNIC_RPC_TCAM_ACT1_L4_VALID); + + sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc); + sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc); + dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst); + dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst); + goto ipv6_flow; + case IPV6_USER_FLOW: + if (!fsp->m_u.usr_ip6_spec.l4_proto) + goto ipv6_flow; + + if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff) + return -EINVAL; + if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP) + goto udp6_flow; + if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP) + goto tcp6_flow; + if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6) + return -EINVAL; + + addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src; + mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src; + if (!ipv6_addr_any(mask6)) { + ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src, + addr6, mask6); + if (!ip_src) + return -ENOSPC; + + set_bit(idx, ip_src->act_tcam); + ip_value |= + FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID | + FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX, + ip_src - fbd->ipo_src); + ip_mask &= + ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID | + FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX); + } + + addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst; + mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst; + if (!ipv6_addr_any(mask6)) { + ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst, + addr6, mask6); + if (!ip_dst) { + if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD) + memset(ip_src, 0, sizeof(*ip_src)); + return -ENOSPC; + } + + set_bit(idx, ip_dst->act_tcam); + ip_value |= + FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID | + FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX, + ip_dst - fbd->ipo_dst); + ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID | + FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX); + } + + flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID; + flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID; +ipv6_flow: + addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src; + mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src; + if (!ip_src && !ipv6_addr_any(mask6)) { + ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src, + addr6, mask6); + if (!ip_src) + return -ENOSPC; + + set_bit(idx, ip_src->act_tcam); + ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID | + FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX, + ip_src - fbd->ip_src); + ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID | + FBNIC_RPC_TCAM_ACT0_IPSRC_IDX); + } + + addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst; + mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst; + if (!ip_dst && !ipv6_addr_any(mask6)) { + ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst, + addr6, mask6); + if (!ip_dst) { + if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD) + memset(ip_src, 0, sizeof(*ip_src)); + return -ENOSPC; + } + + set_bit(idx, ip_dst->act_tcam); + ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID | + FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX, + ip_dst - fbd->ip_dst); + ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID | + FBNIC_RPC_TCAM_ACT0_IPDST_IDX); + } + + flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 | + FBNIC_RPC_TCAM_ACT1_IP_VALID | + FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 | + FBNIC_RPC_TCAM_ACT1_IP_VALID | + FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID); + break; + case ETHER_FLOW: + if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) { + u8 *addr = fsp->h_u.ether_spec.h_dest; + u8 *mask = fsp->m_u.ether_spec.h_dest; + + /* Do not allow MAC addr of 0 */ + if (is_zero_ether_addr(addr)) + return -EINVAL; + + /* Only support full MAC address to avoid + * conflicts with other MAC addresses. + */ + if (!is_broadcast_ether_addr(mask)) + return -EINVAL; + + if (is_multicast_ether_addr(addr)) + mac_addr = __fbnic_mc_sync(fbd, addr); + else + mac_addr = __fbnic_uc_sync(fbd, addr); + + if (!mac_addr) + return -ENOSPC; + + set_bit(idx, mac_addr->act_tcam); + flow_value |= + FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, + mac_addr - fbd->mac_addr); + flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX; + } + + flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + break; + default: + return -EINVAL; + } + + /* Write action table values */ + act_tcam->dest = dest; + act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx); + + /* Write IP Match value/mask to action_tcam[0] */ + act_tcam->value.tcam[0] = ip_value; + act_tcam->mask.tcam[0] = ip_mask; + + /* Write flow type value/mask to action_tcam[1] */ + act_tcam->value.tcam[1] = flow_value; + act_tcam->mask.tcam[1] = flow_mask; + + /* Write error, DSCP, extra L4 matches to action_tcam[2] */ + act_tcam->value.tcam[2] = misc; + act_tcam->mask.tcam[2] = misc_mask; + + /* Write source/destination port values */ + act_tcam->value.tcam[3] = sport; + act_tcam->mask.tcam[3] = sport_mask; + act_tcam->value.tcam[4] = dport; + act_tcam->mask.tcam[4] = dport_mask; + + for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++) + act_tcam->mask.tcam[j] = 0xffff; + + act_tcam->state = FBNIC_TCAM_S_UPDATE; + fsp->location = location; + + if (netif_running(fbn->netdev)) { + fbnic_write_rules(fbd); + if (ip_src || ip_dst) + fbnic_write_ip_addr(fbd); + if (mac_addr) + fbnic_write_macda(fbd); + } + + return 0; +} + +static void fbnic_clear_nfc_macda(struct fbnic_net *fbn, + unsigned int tcam_idx) +{ + struct fbnic_dev *fbd = fbn->fbd; + int idx; + + for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) + __fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx); + + /* Write updates to hardware */ + if (netif_running(fbn->netdev)) + fbnic_write_macda(fbd); +} + +static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn, + unsigned int tcam_idx) +{ + struct fbnic_dev *fbd = fbn->fbd; + int idx; + + for (idx = ARRAY_SIZE(fbd->ip_src); idx--;) + __fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx); + for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;) + __fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx); + for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;) + __fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx); + for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;) + __fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx); + + /* Write updates to hardware */ + if (netif_running(fbn->netdev)) + fbnic_write_ip_addr(fbd); +} + +static int fbnic_set_cls_rule_del(struct fbnic_net *fbn, + const struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp; + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_act_tcam *act_tcam; + int idx; + + fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES) + return -EINVAL; + + idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET; + act_tcam = &fbd->act_tcam[idx]; + + if (act_tcam->state != FBNIC_TCAM_S_VALID) + return -EINVAL; + + act_tcam->state = FBNIC_TCAM_S_DELETE; + + if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) && + (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX)) + fbnic_clear_nfc_macda(fbn, idx); + + if ((act_tcam->value.tcam[0] & + (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID | + FBNIC_RPC_TCAM_ACT0_IPDST_VALID | + FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID | + FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) && + (~act_tcam->mask.tcam[0] & + (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX | + FBNIC_RPC_TCAM_ACT0_IPDST_IDX | + FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX | + FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX))) + fbnic_clear_nfc_ip_addr(fbn, idx); + + if (netif_running(fbn->netdev)) + fbnic_write_rules(fbd); + + return 0; +} + static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) { struct fbnic_net *fbn = netdev_priv(netdev); @@ -281,6 +1083,12 @@ static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXFH: ret = fbnic_set_rss_hash_opts(fbn, cmd); break; + case ETHTOOL_SRXCLSRLINS: + ret = fbnic_set_cls_rule_ins(fbn, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = fbnic_set_cls_rule_del(fbn, cmd); + break; } return ret; @@ -374,6 +1182,61 @@ fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, return 0; } +static int +fbnic_modify_rxfh_context(struct net_device *netdev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + const u32 *indir = rxfh->indir; + unsigned int changes; + + if (!indir) + indir = ethtool_rxfh_context_indir(ctx); + + changes = fbnic_set_indir(fbn, rxfh->rss_context, indir); + if (changes && netif_running(netdev)) + fbnic_rss_reinit_hw(fbn->fbd, fbn); + + return 0; +} + +static int +fbnic_create_rxfh_context(struct net_device *netdev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) { + NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported"); + return -EOPNOTSUPP; + } + ctx->hfunc = ETH_RSS_HASH_TOP; + + if (!rxfh->indir) { + u32 *indir = ethtool_rxfh_context_indir(ctx); + unsigned int num_rx = fbn->num_rx_queues; + unsigned int i; + + for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) + indir[i] = ethtool_rxfh_indir_default(i, num_rx); + } + + return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack); +} + +static int +fbnic_remove_rxfh_context(struct net_device *netdev, + struct ethtool_rxfh_context *ctx, u32 rss_context, + struct netlink_ext_ack *extack) +{ + /* Nothing to do, contexts are allocated statically */ + return 0; +} + static void fbnic_get_channels(struct net_device *netdev, struct ethtool_channels *ch) { @@ -523,14 +1386,14 @@ static void fbnic_get_ts_stats(struct net_device *netdev, unsigned int start; int i; - ts_stats->pkts = fbn->tx_stats.ts_packets; - ts_stats->lost = fbn->tx_stats.ts_lost; + ts_stats->pkts = fbn->tx_stats.twq.ts_packets; + ts_stats->lost = fbn->tx_stats.twq.ts_lost; for (i = 0; i < fbn->num_tx_queues; i++) { ring = fbn->tx[i]; do { start = u64_stats_fetch_begin(&ring->stats.syncp); - ts_packets = ring->stats.ts_packets; - ts_lost = ring->stats.ts_lost; + ts_packets = ring->stats.twq.ts_packets; + ts_lost = ring->stats.twq.ts_lost; } while (u64_stats_fetch_retry(&ring->stats.syncp, start)); ts_stats->pkts += ts_packets; ts_stats->lost += ts_lost; @@ -586,9 +1449,17 @@ fbnic_get_eth_mac_stats(struct net_device *netdev, } static const struct ethtool_ops fbnic_ethtool_ops = { + .supported_coalesce_params = + ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_RX_MAX_FRAMES, + .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT, .get_drvinfo = fbnic_get_drvinfo, .get_regs_len = fbnic_get_regs_len, .get_regs = fbnic_get_regs, + .get_coalesce = fbnic_get_coalesce, + .set_coalesce = fbnic_set_coalesce, + .get_ringparam = fbnic_get_ringparam, + .set_ringparam = fbnic_set_ringparam, .get_strings = fbnic_get_strings, .get_ethtool_stats = fbnic_get_ethtool_stats, .get_sset_count = fbnic_get_sset_count, @@ -598,6 +1469,9 @@ static const struct ethtool_ops fbnic_ethtool_ops = { .get_rxfh_indir_size = fbnic_get_rxfh_indir_size, .get_rxfh = fbnic_get_rxfh, .set_rxfh = fbnic_set_rxfh, + .create_rxfh_context = fbnic_create_rxfh_context, + .modify_rxfh_context = fbnic_modify_rxfh_context, + .remove_rxfh_context = fbnic_remove_rxfh_context, .get_channels = fbnic_get_channels, .set_channels = fbnic_set_channels, .get_ts_info = fbnic_get_ts_info, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c index bbc7c1c0c37ef..88db3dacb9405 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -494,16 +494,13 @@ static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN], static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results) { - u32 active_slot = 0, all_multi = 0; + u32 all_multi = 0, version = 0; struct fbnic_dev *fbd = opaque; - u32 speed = 0, fec = 0; - size_t commit_size = 0; bool bmc_present; int err; - get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION, - fbd->fw_cap.running.mgmt.version); - + version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION); + fbd->fw_cap.running.mgmt.version = version; if (!fbd->fw_cap.running.mgmt.version) return -EINVAL; @@ -524,43 +521,41 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results) return -EINVAL; } - get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size, - fbd->fw_cap.running.mgmt.commit, - FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); - if (!commit_size) + if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, + fbd->fw_cap.running.mgmt.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0) dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n"); - get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION, - fbd->fw_cap.stored.mgmt.version); - get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size, - fbd->fw_cap.stored.mgmt.commit, - FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); - - get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION, - fbd->fw_cap.running.bootloader.version); - get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size, - fbd->fw_cap.running.bootloader.commit, - FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); - - get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION, - fbd->fw_cap.stored.bootloader.version); - get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size, - fbd->fw_cap.stored.bootloader.commit, - FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); - - get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION, - fbd->fw_cap.stored.undi.version); - get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size, - fbd->fw_cap.stored.undi.commit, - FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); - - get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot); - fbd->fw_cap.active_slot = active_slot; - - get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed); - get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec); - fbd->fw_cap.link_speed = speed; - fbd->fw_cap.link_fec = fec; + version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION); + fbd->fw_cap.stored.mgmt.version = version; + fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, + fbd->fw_cap.stored.mgmt.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION); + fbd->fw_cap.running.bootloader.version = version; + fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, + fbd->fw_cap.running.bootloader.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION); + fbd->fw_cap.stored.bootloader.version = version; + fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, + fbd->fw_cap.stored.bootloader.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION); + fbd->fw_cap.stored.undi.version = version; + fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, + fbd->fw_cap.stored.undi.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + fbd->fw_cap.active_slot = + fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT); + fbd->fw_cap.link_speed = + fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED); + fbd->fw_cap.link_fec = + fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC); bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT]; if (bmc_present) { @@ -575,7 +570,8 @@ static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results) if (err) return err; - get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi); + all_multi = + fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI); } else { memset(fbd->fw_cap.bmc_mac_addr, 0, sizeof(fbd->fw_cap.bmc_mac_addr)); @@ -743,9 +739,9 @@ int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd, } static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = { - FBNIC_TLV_ATTR_S32(FBNIC_TSENE_THERM), - FBNIC_TLV_ATTR_S32(FBNIC_TSENE_VOLT), - FBNIC_TLV_ATTR_S32(FBNIC_TSENE_ERROR), + FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM), + FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT), + FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR), FBNIC_TLV_ATTR_LAST }; @@ -754,32 +750,31 @@ static int fbnic_fw_parse_tsene_read_resp(void *opaque, { struct fbnic_fw_completion *cmpl_data; struct fbnic_dev *fbd = opaque; + s32 err_resp; int err = 0; /* Verify we have a completion pointer to provide with data */ cmpl_data = fbnic_fw_get_cmpl_by_type(fbd, FBNIC_TLV_MSG_ID_TSENE_READ_RESP); if (!cmpl_data) - return -EINVAL; + return -ENOSPC; - if (results[FBNIC_TSENE_ERROR]) { - err = fbnic_tlv_attr_get_unsigned(results[FBNIC_TSENE_ERROR]); - if (err) - goto exit_complete; - } + err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR); + if (err_resp) + goto msg_err; - if (!results[FBNIC_TSENE_THERM] || !results[FBNIC_TSENE_VOLT]) { + if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) { err = -EINVAL; - goto exit_complete; + goto msg_err; } cmpl_data->u.tsene.millidegrees = - fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_THERM]); + fta_get_sint(results, FBNIC_FW_TSENE_THERM); cmpl_data->u.tsene.millivolts = - fbnic_tlv_attr_get_signed(results[FBNIC_TSENE_VOLT]); + fta_get_sint(results, FBNIC_FW_TSENE_VOLT); -exit_complete: - cmpl_data->result = err; +msg_err: + cmpl_data->result = err_resp ? : err; complete(&cmpl_data->done); fbnic_fw_put_cmpl(cmpl_data); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h index fe68333d51b18..a3618e7826c25 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h @@ -139,10 +139,10 @@ enum { }; enum { - FBNIC_TSENE_THERM = 0x0, - FBNIC_TSENE_VOLT = 0x1, - FBNIC_TSENE_ERROR = 0x2, - FBNIC_TSENE_MSG_MAX + FBNIC_FW_TSENE_THERM = 0x0, + FBNIC_FW_TSENE_VOLT = 0x1, + FBNIC_FW_TSENE_ERROR = 0x2, + FBNIC_FW_TSENE_MSG_MAX }; enum { diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c index 7a96b6ee773f3..79a01fdd1dd16 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -487,8 +487,9 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, struct fbnic_net *fbn = netdev_priv(dev); struct fbnic_ring *rxr = fbn->rx[idx]; struct fbnic_queue_stats *stats; + u64 bytes, packets, alloc_fail; + u64 csum_complete, csum_none; unsigned int start; - u64 bytes, packets; if (!rxr) return; @@ -498,10 +499,16 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx, start = u64_stats_fetch_begin(&stats->syncp); bytes = stats->bytes; packets = stats->packets; + alloc_fail = stats->rx.alloc_failed; + csum_complete = stats->rx.csum_complete; + csum_none = stats->rx.csum_none; } while (u64_stats_fetch_retry(&stats->syncp, start)); rx->bytes = bytes; rx->packets = packets; + rx->alloc_fail = alloc_fail; + rx->csum_complete = csum_complete; + rx->csum_none = csum_none; } static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx, @@ -510,6 +517,7 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx, struct fbnic_net *fbn = netdev_priv(dev); struct fbnic_ring *txr = fbn->tx[idx]; struct fbnic_queue_stats *stats; + u64 stop, wake, csum, lso; unsigned int start; u64 bytes, packets; @@ -521,10 +529,18 @@ static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx, start = u64_stats_fetch_begin(&stats->syncp); bytes = stats->bytes; packets = stats->packets; + csum = stats->twq.csum_partial; + lso = stats->twq.lso; + stop = stats->twq.stop; + wake = stats->twq.wake; } while (u64_stats_fetch_retry(&stats->syncp, start)); tx->bytes = bytes; tx->packets = packets; + tx->needs_csum = csum + lso; + tx->hw_gso_wire_packets = lso; + tx->stop = stop; + tx->wake = wake; } static void fbnic_get_base_stats(struct net_device *dev, @@ -535,9 +551,16 @@ static void fbnic_get_base_stats(struct net_device *dev, tx->bytes = fbn->tx_stats.bytes; tx->packets = fbn->tx_stats.packets; + tx->needs_csum = fbn->tx_stats.twq.csum_partial + fbn->tx_stats.twq.lso; + tx->hw_gso_wire_packets = fbn->tx_stats.twq.lso; + tx->stop = fbn->tx_stats.twq.stop; + tx->wake = fbn->tx_stats.twq.wake; rx->bytes = fbn->rx_stats.bytes; rx->packets = fbn->rx_stats.packets; + rx->alloc_fail = fbn->rx_stats.rx.alloc_failed; + rx->csum_complete = fbn->rx_stats.rx.csum_complete; + rx->csum_none = fbn->rx_stats.rx.csum_none; } static const struct netdev_stat_ops fbnic_stat_ops = { @@ -588,7 +611,7 @@ void fbnic_netdev_free(struct fbnic_dev *fbd) * Allocate and initialize the netdev and netdev private structure. Bind * together the hardware, netdev, and pci data structures. * - * Return: 0 on success, negative on failure + * Return: Pointer to net_device on success, NULL on failure **/ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd) { @@ -618,6 +641,10 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd) fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT; fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT; + fbn->tx_usecs = FBNIC_TX_USECS_DEFAULT; + fbn->rx_usecs = FBNIC_RX_USECS_DEFAULT; + fbn->rx_max_frames = FBNIC_RX_FRAMES_DEFAULT; + default_queues = netif_get_num_default_rss_queues(); if (default_queues > fbd->max_num_queues) default_queues = fbd->max_num_queues; @@ -628,15 +655,32 @@ struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd) fbnic_rss_key_fill(fbn->rss_key); fbnic_rss_init_en_mask(fbn); + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->gso_partial_features = + NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_IPXIP4 | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->features |= + netdev->gso_partial_features | + FBNIC_TUN_GSO_FEATURES | NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_HW_CSUM | - NETIF_F_RXCSUM; + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_PARTIAL | + NETIF_F_GSO_UDP_L4; netdev->hw_features |= netdev->features; netdev->vlan_features |= netdev->features; netdev->hw_enc_features |= netdev->features; + netdev->features |= NETIF_F_NTUPLE; netdev->min_mtu = IPV6_MIN_MTU; netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h index a392ac1cc4f2f..561837e80ec80 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h @@ -12,6 +12,10 @@ #include "fbnic_txrx.h" #define FBNIC_MAX_NAPI_VECTORS 128u +#define FBNIC_MIN_RXD_PER_FRAME 2 + +/* Natively supported tunnel GSO features (not thru GSO_PARTIAL) */ +#define FBNIC_TUN_GSO_FEATURES NETIF_F_GSO_IPXIP6 struct fbnic_net { struct fbnic_ring *tx[FBNIC_MAX_TXQS]; @@ -27,6 +31,11 @@ struct fbnic_net { u32 ppq_size; u32 rcq_size; + u16 rx_usecs; + u16 tx_usecs; + + u32 rx_max_frames; + u16 num_napi; struct phylink *phylink; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c index bb11fc83367d8..860b02b22c15e 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c @@ -133,7 +133,6 @@ int fbnic_phylink_init(struct net_device *netdev) struct fbnic_net *fbn = netdev_priv(netdev); struct phylink *phylink; - fbn->phylink_pcs.neg_mode = true; fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops; fbn->phylink_config.dev = &netdev->dev; diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c index c25bd300b9024..8ff07b5562e3b 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c @@ -3,6 +3,7 @@ #include #include +#include #include "fbnic.h" #include "fbnic_netdev.h" @@ -60,7 +61,7 @@ void fbnic_rss_disable_hw(struct fbnic_dev *fbd) #define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \ FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \ FIELD_GET(RXH_##_fh, _val)) -static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type) +u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type) { u32 flow_hash = fbn->rss_flow_hash[flow_type]; u32 rss_en_mask = 0; @@ -698,6 +699,359 @@ void fbnic_write_tce_tcam(struct fbnic_dev *fbd) __fbnic_write_tce_tcam(fbd); } +struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd, + struct fbnic_ip_addr *ip_addr, + const struct in_addr *addr, + const struct in_addr *mask) +{ + struct fbnic_ip_addr *avail_addr = NULL; + unsigned int i; + + /* Scan from top of list to bottom, filling bottom up. */ + for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i++, ip_addr++) { + struct in6_addr *m = &ip_addr->mask; + + if (ip_addr->state == FBNIC_TCAM_S_DISABLED) { + avail_addr = ip_addr; + continue; + } + + if (ip_addr->version != 4) + continue; + + /* Drop avail_addr if mask is a subset of our current mask, + * This prevents us from inserting a longer prefix behind a + * shorter one. + * + * The mask is stored inverted value so as an example: + * m ffff ffff ffff ffff ffff ffff ffff 0000 0000 + * mask 0000 0000 0000 0000 0000 0000 0000 ffff ffff + * + * "m" and "mask" represent typical IPv4 mask stored in + * the TCAM and those provided by the stack. The code below + * should return a non-zero result if there is a 0 stored + * anywhere in "m" where "mask" has a 0. + */ + if (~m->s6_addr32[3] & ~mask->s_addr) { + avail_addr = NULL; + continue; + } + + /* Check to see if the mask actually contains fewer bits than + * our new mask "m". The XOR below should only result in 0 if + * "m" is masking a bit that we are looking for in our new + * "mask", we eliminated the 0^0 case with the check above. + * + * If it contains fewer bits we need to stop here, otherwise + * we might be adding an unreachable rule. + */ + if (~(m->s6_addr32[3] ^ mask->s_addr)) + break; + + if (ip_addr->value.s6_addr32[3] == addr->s_addr) { + avail_addr = ip_addr; + break; + } + } + + if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) { + ipv6_addr_set(&avail_addr->value, 0, 0, 0, addr->s_addr); + ipv6_addr_set(&avail_addr->mask, htonl(~0), htonl(~0), + htonl(~0), ~mask->s_addr); + avail_addr->version = 4; + + avail_addr->state = FBNIC_TCAM_S_ADD; + } + + return avail_addr; +} + +struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd, + struct fbnic_ip_addr *ip_addr, + const struct in6_addr *addr, + const struct in6_addr *mask) +{ + struct fbnic_ip_addr *avail_addr = NULL; + unsigned int i; + + ip_addr = &ip_addr[FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES - 1]; + + /* Scan from bottom of list to top, filling top down. */ + for (i = FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES; i--; ip_addr--) { + struct in6_addr *m = &ip_addr->mask; + + if (ip_addr->state == FBNIC_TCAM_S_DISABLED) { + avail_addr = ip_addr; + continue; + } + + if (ip_addr->version != 6) + continue; + + /* Drop avail_addr if mask is a superset of our current mask. + * This prevents us from inserting a longer prefix behind a + * shorter one. + * + * The mask is stored inverted value so as an example: + * m 0000 0000 0000 0000 0000 0000 0000 0000 0000 + * mask ffff ffff ffff ffff ffff ffff ffff ffff ffff + * + * "m" and "mask" represent typical IPv6 mask stored in + * the TCAM and those provided by the stack. The code below + * should return a non-zero result which will cause us + * to drop the avail_addr value that might be cached + * to prevent us from dropping a v6 address behind it. + */ + if ((m->s6_addr32[0] & mask->s6_addr32[0]) | + (m->s6_addr32[1] & mask->s6_addr32[1]) | + (m->s6_addr32[2] & mask->s6_addr32[2]) | + (m->s6_addr32[3] & mask->s6_addr32[3])) { + avail_addr = NULL; + continue; + } + + /* The previous test eliminated any overlap between the + * two values so now we need to check for gaps. + * + * If the mask is equal to our current mask then it should + * result with m ^ mask = ffff ffff, if however the value + * stored in m is bigger then we should see a 0 appear + * somewhere in the mask. + */ + if (~(m->s6_addr32[0] ^ mask->s6_addr32[0]) | + ~(m->s6_addr32[1] ^ mask->s6_addr32[1]) | + ~(m->s6_addr32[2] ^ mask->s6_addr32[2]) | + ~(m->s6_addr32[3] ^ mask->s6_addr32[3])) + break; + + if (ipv6_addr_cmp(&ip_addr->value, addr)) + continue; + + avail_addr = ip_addr; + break; + } + + if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) { + memcpy(&avail_addr->value, addr, sizeof(*addr)); + ipv6_addr_set(&avail_addr->mask, + ~mask->s6_addr32[0], ~mask->s6_addr32[1], + ~mask->s6_addr32[2], ~mask->s6_addr32[3]); + avail_addr->version = 6; + + avail_addr->state = FBNIC_TCAM_S_ADD; + } + + return avail_addr; +} + +int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx) +{ + if (!test_and_clear_bit(tcam_idx, ip_addr->act_tcam)) + return -ENOENT; + + if (bitmap_empty(ip_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES)) + ip_addr->state = FBNIC_TCAM_S_DELETE; + + return 0; +} + +static void fbnic_clear_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx) +{ + int i; + + /* Invalidate entry and clear addr state info */ + for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), 0); +} + +static void fbnic_clear_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx) +{ + int i; + + /* Invalidate entry and clear addr state info */ + for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), 0); +} + +static void fbnic_clear_ip_outer_src_entry(struct fbnic_dev *fbd, + unsigned int idx) +{ + int i; + + /* Invalidate entry and clear addr state info */ + for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), 0); +} + +static void fbnic_clear_ip_outer_dst_entry(struct fbnic_dev *fbd, + unsigned int idx) +{ + int i; + + /* Invalidate entry and clear addr state info */ + for (i = 0; i <= FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), 0); +} + +static void fbnic_write_ip_src_entry(struct fbnic_dev *fbd, unsigned int idx, + struct fbnic_ip_addr *ip_addr) +{ + __be16 *mask, *value; + int i; + + mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; + value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; + + for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), + FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) | + FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--))); + wrfl(fbd); + + /* Bit 129 is used to flag for v4/v6 */ + wr32(fbd, FBNIC_RPC_TCAM_IPSRC(idx, i), + (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE); +} + +static void fbnic_write_ip_dst_entry(struct fbnic_dev *fbd, unsigned int idx, + struct fbnic_ip_addr *ip_addr) +{ + __be16 *mask, *value; + int i; + + mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; + value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; + + for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), + FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) | + FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--))); + wrfl(fbd); + + /* Bit 129 is used to flag for v4/v6 */ + wr32(fbd, FBNIC_RPC_TCAM_IPDST(idx, i), + (ip_addr->version == 6) | FBNIC_RPC_TCAM_VALIDATE); +} + +static void fbnic_write_ip_outer_src_entry(struct fbnic_dev *fbd, + unsigned int idx, + struct fbnic_ip_addr *ip_addr) +{ + __be16 *mask, *value; + int i; + + mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; + value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; + + for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), + FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) | + FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--))); + wrfl(fbd); + + wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPSRC(idx, i), FBNIC_RPC_TCAM_VALIDATE); +} + +static void fbnic_write_ip_outer_dst_entry(struct fbnic_dev *fbd, + unsigned int idx, + struct fbnic_ip_addr *ip_addr) +{ + __be16 *mask, *value; + int i; + + mask = &ip_addr->mask.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; + value = &ip_addr->value.s6_addr16[FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN - 1]; + + for (i = 0; i < FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), + FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_MASK, ntohs(*mask--)) | + FIELD_PREP(FBNIC_RPC_TCAM_IP_ADDR_VALUE, ntohs(*value--))); + wrfl(fbd); + + wr32(fbd, FBNIC_RPC_TCAM_OUTER_IPDST(idx, i), FBNIC_RPC_TCAM_VALIDATE); +} + +void fbnic_write_ip_addr(struct fbnic_dev *fbd) +{ + int idx; + + for (idx = ARRAY_SIZE(fbd->ip_src); idx--;) { + struct fbnic_ip_addr *ip_addr = &fbd->ip_src[idx]; + + /* Check if update flag is set else skip. */ + if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE)) + continue; + + /* Clear by writing 0s. */ + if (ip_addr->state == FBNIC_TCAM_S_DELETE) { + /* Invalidate entry and clear addr state info */ + fbnic_clear_ip_src_entry(fbd, idx); + memset(ip_addr, 0, sizeof(*ip_addr)); + + continue; + } + + fbnic_write_ip_src_entry(fbd, idx, ip_addr); + + ip_addr->state = FBNIC_TCAM_S_VALID; + } + + /* Repeat process for other IP TCAMs */ + for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;) { + struct fbnic_ip_addr *ip_addr = &fbd->ip_dst[idx]; + + if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE)) + continue; + + if (ip_addr->state == FBNIC_TCAM_S_DELETE) { + fbnic_clear_ip_dst_entry(fbd, idx); + memset(ip_addr, 0, sizeof(*ip_addr)); + + continue; + } + + fbnic_write_ip_dst_entry(fbd, idx, ip_addr); + + ip_addr->state = FBNIC_TCAM_S_VALID; + } + + for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;) { + struct fbnic_ip_addr *ip_addr = &fbd->ipo_src[idx]; + + if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE)) + continue; + + if (ip_addr->state == FBNIC_TCAM_S_DELETE) { + fbnic_clear_ip_outer_src_entry(fbd, idx); + memset(ip_addr, 0, sizeof(*ip_addr)); + + continue; + } + + fbnic_write_ip_outer_src_entry(fbd, idx, ip_addr); + + ip_addr->state = FBNIC_TCAM_S_VALID; + } + + for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;) { + struct fbnic_ip_addr *ip_addr = &fbd->ipo_dst[idx]; + + if (!(ip_addr->state & FBNIC_TCAM_S_UPDATE)) + continue; + + if (ip_addr->state == FBNIC_TCAM_S_DELETE) { + fbnic_clear_ip_outer_dst_entry(fbd, idx); + memset(ip_addr, 0, sizeof(*ip_addr)); + + continue; + } + + fbnic_write_ip_outer_dst_entry(fbd, idx, ip_addr); + + ip_addr->state = FBNIC_TCAM_S_VALID; + } +} + void fbnic_clear_rules(struct fbnic_dev *fbd) { u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h index 0d8285fa5b454..6892414195c3e 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h @@ -7,6 +7,8 @@ #include #include +struct in_addr; + /* The TCAM state definitions follow an expected ordering. * They start out disabled, then move through the following states: * Disabled 0 -> Add 2 @@ -32,6 +34,12 @@ enum { #define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3 #define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32 +/* 8 IPSRC and IPDST TCAM Entries each + * 8 registers, Validate each + */ +#define FBNIC_RPC_TCAM_IP_ADDR_WORD_LEN 8 +#define FBNIC_RPC_TCAM_IP_ADDR_NUM_ENTRIES 8 + #define FBNIC_RPC_TCAM_ACT_WORD_LEN 11 #define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64 @@ -47,6 +55,13 @@ struct fbnic_mac_addr { DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES); }; +struct fbnic_ip_addr { + struct in6_addr mask, value; + unsigned char version; + unsigned char state; + DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES); +}; + struct fbnic_act_tcam { struct { u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN]; @@ -81,6 +96,11 @@ enum { #define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0 #define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1 +/* This should leave us with 48 total entries in the TCAM that can be used + * for NFC after also deducting the 14 needed for RSS table programming. + */ +#define FBNIC_RPC_ACT_TBL_NFC_OFFSET 2 + /* We reserve the last 14 entries for RSS rules on the host. The BMC * unicast rule will need to be populated above these and is expected to * use MACDA TCAM entry 23 to store the BMC MAC address. @@ -88,6 +108,9 @@ enum { #define FBNIC_RPC_ACT_TBL_RSS_OFFSET \ (FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES) +#define FBNIC_RPC_ACT_TBL_NFC_ENTRIES \ + (FBNIC_RPC_ACT_TBL_RSS_OFFSET - FBNIC_RPC_ACT_TBL_NFC_OFFSET) + /* Flags used to identify the owner for this MAC filter. Note that any * flags set for Broadcast thru Promisc indicate that the rule belongs * to the RSS filters for the host. @@ -168,6 +191,7 @@ void fbnic_rss_init_en_mask(struct fbnic_net *fbn); void fbnic_rss_disable_hw(struct fbnic_dev *fbd); void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn); void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn); +u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type); int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx); struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd, @@ -177,6 +201,17 @@ struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd, void fbnic_sift_macda(struct fbnic_dev *fbd); void fbnic_write_macda(struct fbnic_dev *fbd); +struct fbnic_ip_addr *__fbnic_ip4_sync(struct fbnic_dev *fbd, + struct fbnic_ip_addr *ip_addr, + const struct in_addr *addr, + const struct in_addr *mask); +struct fbnic_ip_addr *__fbnic_ip6_sync(struct fbnic_dev *fbd, + struct fbnic_ip_addr *ip_addr, + const struct in6_addr *addr, + const struct in6_addr *mask); +int __fbnic_ip_unsync(struct fbnic_ip_addr *ip_addr, unsigned int tcam_idx); +void fbnic_write_ip_addr(struct fbnic_dev *fbd); + static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr) { return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c index 2a174ab062a3d..517ed8b2f1cb7 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c @@ -196,13 +196,17 @@ int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id, /** * fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result * @attr: Attribute to retrieve data from + * @def: The default value if attr is NULL * * Return: unsigned 64b value containing integer value **/ -u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr) +u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def) { __le64 le64_value = 0; + if (!attr) + return def; + memcpy(&le64_value, &attr->value[0], le16_to_cpu(attr->hdr.len) - sizeof(*attr)); @@ -212,15 +216,21 @@ u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr) /** * fbnic_tlv_attr_get_signed - Retrieve signed value from result * @attr: Attribute to retrieve data from + * @def: The default value if attr is NULL * * Return: signed 64b value containing integer value **/ -s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr) +s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def) { - int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8; __le64 le64_value = 0; + int shift; s64 value; + if (!attr) + return def; + + shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8; + /* Copy the value and adjust for byte ordering */ memcpy(&le64_value, &attr->value[0], le16_to_cpu(attr->hdr.len) - sizeof(*attr)); @@ -233,19 +243,40 @@ s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr) /** * fbnic_tlv_attr_get_string - Retrieve string value from result * @attr: Attribute to retrieve data from - * @str: Pointer to an allocated string to store the data - * @max_size: The maximum size which can be in str + * @dst: Pointer to an allocated string to store the data + * @dstsize: The maximum size which can be in dst * - * Return: the size of the string read from firmware + * Return: the size of the string read from firmware or negative error. **/ -size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str, - size_t max_size) +ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst, + size_t dstsize) { - max_size = min_t(size_t, max_size, - (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr)); - memcpy(str, &attr->value, max_size); + size_t srclen, len; + ssize_t ret; + + if (!attr) + return -EINVAL; + + if (dstsize == 0) + return -E2BIG; + + srclen = le16_to_cpu(attr->hdr.len) - sizeof(*attr); + if (srclen > 0 && ((char *)attr->value)[srclen - 1] == '\0') + srclen--; + + if (srclen >= dstsize) { + len = dstsize - 1; + ret = -E2BIG; + } else { + len = srclen; + ret = len; + } + + memcpy(dst, &attr->value, len); + /* Zero pad end of dst. */ + memset(dst + len, 0, dstsize - len); - return max_size; + return ret; } /** diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h index 67300ab443532..c34bf87eeec95 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h @@ -114,34 +114,10 @@ static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr) return !!attr; } -u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr); -s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr); -size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str, - size_t max_size); - -#define get_unsigned_result(id, location) \ -do { \ - struct fbnic_tlv_msg *result = results[id]; \ - if (result) \ - location = fbnic_tlv_attr_get_unsigned(result); \ -} while (0) - -#define get_signed_result(id, location) \ -do { \ - struct fbnic_tlv_msg *result = results[id]; \ - if (result) \ - location = fbnic_tlv_attr_get_signed(result); \ -} while (0) - -#define get_string_result(id, size, str, max_size) \ -do { \ - struct fbnic_tlv_msg *result = results[id]; \ - if (result) \ - size = fbnic_tlv_attr_get_string(result, str, max_size); \ -} while (0) - -#define get_bool(id) (!!(results[id])) - +u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr, u64 def); +s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr, s64 def); +ssize_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *dst, + size_t dstsize); struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id); int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id); int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id, @@ -170,6 +146,13 @@ int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg, const struct fbnic_tlv_parser *parser); int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results); +#define fta_get_uint(_results, _id) \ + fbnic_tlv_attr_get_unsigned(_results[_id], 0) +#define fta_get_sint(_results, _id) \ + fbnic_tlv_attr_get_signed(_results[_id], 0) +#define fta_get_str(_results, _id, _dst, _dstsize) \ + fbnic_tlv_attr_get_string(_results[_id], _dst, _dstsize) + #define FBNIC_TLV_MSG_ERROR \ FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error) #endif /* _FBNIC_TLV_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c index d4d7027df9a03..ac11389a764ce 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "fbnic.h" #include "fbnic_csr.h" @@ -18,6 +19,7 @@ enum { struct fbnic_xmit_cb { u32 bytecount; + u16 gso_segs; u8 desc_count; u8 flags; int hw_head; @@ -113,6 +115,11 @@ static int fbnic_maybe_stop_tx(const struct net_device *dev, res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size, FBNIC_TX_DESC_WAKEUP); + if (!res) { + u64_stats_update_begin(&ring->stats.syncp); + ring->stats.twq.stop++; + u64_stats_update_end(&ring->stats.syncp); + } return !res; } @@ -173,9 +180,73 @@ static bool fbnic_tx_tstamp(struct sk_buff *skb) return true; } +static bool +fbnic_tx_lso(struct fbnic_ring *ring, struct sk_buff *skb, + struct skb_shared_info *shinfo, __le64 *meta, + unsigned int *l2len, unsigned int *i3len) +{ + unsigned int l3_type, l4_type, l4len, hdrlen; + unsigned char *l4hdr; + __be16 payload_len; + + if (unlikely(skb_cow_head(skb, 0))) + return true; + + if (shinfo->gso_type & SKB_GSO_PARTIAL) { + l3_type = FBNIC_TWD_L3_TYPE_OTHER; + } else if (!skb->encapsulation) { + if (ip_hdr(skb)->version == 4) + l3_type = FBNIC_TWD_L3_TYPE_IPV4; + else + l3_type = FBNIC_TWD_L3_TYPE_IPV6; + } else { + unsigned int o3len; + + o3len = skb_inner_network_header(skb) - skb_network_header(skb); + *i3len -= o3len; + *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_OHLEN_MASK, + o3len / 2)); + l3_type = FBNIC_TWD_L3_TYPE_V6V6; + } + + l4hdr = skb_checksum_start(skb); + payload_len = cpu_to_be16(skb->len - (l4hdr - skb->data)); + + if (shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + struct tcphdr *tcph = (struct tcphdr *)l4hdr; + + l4_type = FBNIC_TWD_L4_TYPE_TCP; + l4len = __tcp_hdrlen((struct tcphdr *)l4hdr); + csum_replace_by_diff(&tcph->check, (__force __wsum)payload_len); + } else { + struct udphdr *udph = (struct udphdr *)l4hdr; + + l4_type = FBNIC_TWD_L4_TYPE_UDP; + l4len = sizeof(struct udphdr); + csum_replace_by_diff(&udph->check, (__force __wsum)payload_len); + } + + hdrlen = (l4hdr - skb->data) + l4len; + *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L3_TYPE_MASK, l3_type) | + FIELD_PREP(FBNIC_TWD_L4_TYPE_MASK, l4_type) | + FIELD_PREP(FBNIC_TWD_L4_HLEN_MASK, l4len / 4) | + FIELD_PREP(FBNIC_TWD_MSS_MASK, shinfo->gso_size) | + FBNIC_TWD_FLAG_REQ_LSO); + + FBNIC_XMIT_CB(skb)->bytecount += (shinfo->gso_segs - 1) * hdrlen; + FBNIC_XMIT_CB(skb)->gso_segs = shinfo->gso_segs; + + u64_stats_update_begin(&ring->stats.syncp); + ring->stats.twq.lso += shinfo->gso_segs; + u64_stats_update_end(&ring->stats.syncp); + + return false; +} + static bool fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta) { + struct skb_shared_info *shinfo = skb_shinfo(skb); unsigned int l2len, i3len; if (fbnic_tx_tstamp(skb)) @@ -190,7 +261,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta) *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2)); - *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO); + if (shinfo->gso_size) { + if (fbnic_tx_lso(ring, skb, shinfo, meta, &l2len, &i3len)) + return true; + } else { + *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO); + u64_stats_update_begin(&ring->stats.syncp); + ring->stats.twq.csum_partial++; + u64_stats_update_end(&ring->stats.syncp); + } *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) | FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2)); @@ -198,12 +277,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta) } static void -fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq) +fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq, + u64 *csum_cmpl, u64 *csum_none) { skb_checksum_none_assert(skb); - if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) + if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { + (*csum_none)++; return; + } if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) { skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -212,6 +294,7 @@ fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq) skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = (__force __wsum)csum; + (*csum_cmpl)++; } } @@ -329,7 +412,9 @@ fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring) /* Write all members within DWORD to condense this into 2 4B writes */ FBNIC_XMIT_CB(skb)->bytecount = skb->len; + FBNIC_XMIT_CB(skb)->gso_segs = 1; FBNIC_XMIT_CB(skb)->desc_count = 0; + FBNIC_XMIT_CB(skb)->flags = 0; if (fbnic_tx_offloads(ring, skb, meta)) goto err_free; @@ -356,6 +441,59 @@ netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev) return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]); } +static netdev_features_t +fbnic_features_check_encap_gso(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features, unsigned int l3len) +{ + netdev_features_t skb_gso_features; + struct ipv6hdr *ip6_hdr; + unsigned char l4_hdr; + unsigned int start; + __be16 frag_off; + + /* Require MANGLEID for GSO_PARTIAL of IPv4. + * In theory we could support TSO with single, innermost v4 header + * by pretending everything before it is L2, but that needs to be + * parsed case by case.. so leaving it for when the need arises. + */ + if (!(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + skb_gso_features = skb_shinfo(skb)->gso_type; + skb_gso_features <<= NETIF_F_GSO_SHIFT; + + /* We'd only clear the native GSO features, so don't bother validating + * if the match can only be on those supported thru GSO_PARTIAL. + */ + if (!(skb_gso_features & FBNIC_TUN_GSO_FEATURES)) + return features; + + /* We can only do IPv6-in-IPv6, not v4-in-v6. It'd be nice + * to fall back to partial for this, or any failure below. + * This is just an optimization, UDPv4 will be caught later on. + */ + if (skb_gso_features & NETIF_F_TSO) + return features & ~FBNIC_TUN_GSO_FEATURES; + + /* Inner headers multiple of 2 */ + if ((skb_inner_network_header(skb) - skb_network_header(skb)) % 2) + return features & ~FBNIC_TUN_GSO_FEATURES; + + /* Encapsulated GSO packet, make 100% sure it's IPv6-in-IPv6. */ + ip6_hdr = ipv6_hdr(skb); + if (ip6_hdr->version != 6) + return features & ~FBNIC_TUN_GSO_FEATURES; + + l4_hdr = ip6_hdr->nexthdr; + start = (unsigned char *)ip6_hdr - skb->data + sizeof(struct ipv6hdr); + start = ipv6_skip_exthdr(skb, start, &l4_hdr, &frag_off); + if (frag_off || l4_hdr != IPPROTO_IPV6 || + skb->data + start != skb_inner_network_header(skb)) + return features & ~FBNIC_TUN_GSO_FEATURES; + + return features; +} + netdev_features_t fbnic_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) @@ -376,9 +514,12 @@ fbnic_features_check(struct sk_buff *skb, struct net_device *dev, !FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) || !FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) || !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2)) - return features & ~NETIF_F_CSUM_MASK; + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); - return features; + if (likely(!skb->encapsulation) || !skb_is_gso(skb)) + return features; + + return fbnic_features_check_encap_gso(skb, dev, features, l3len); } static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, @@ -429,7 +570,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, } total_bytes += FBNIC_XMIT_CB(skb)->bytecount; - total_packets += 1; + total_packets += FBNIC_XMIT_CB(skb)->gso_segs; napi_consume_skb(skb, napi_budget); } @@ -444,7 +585,7 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, if (unlikely(discard)) { u64_stats_update_begin(&ring->stats.syncp); ring->stats.dropped += total_packets; - ring->stats.ts_lost += ts_lost; + ring->stats.twq.ts_lost += ts_lost; u64_stats_update_end(&ring->stats.syncp); netdev_tx_completed_queue(txq, total_packets, total_bytes); @@ -456,9 +597,13 @@ static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, ring->stats.packets += total_packets; u64_stats_update_end(&ring->stats.syncp); - netif_txq_completed_wake(txq, total_packets, total_bytes, - fbnic_desc_unused(ring), - FBNIC_TX_DESC_WAKEUP); + if (!netif_txq_completed_wake(txq, total_packets, total_bytes, + fbnic_desc_unused(ring), + FBNIC_TX_DESC_WAKEUP)) { + u64_stats_update_begin(&ring->stats.syncp); + ring->stats.twq.wake++; + u64_stats_update_end(&ring->stats.syncp); + } } static void fbnic_clean_tsq(struct fbnic_napi_vector *nv, @@ -507,7 +652,7 @@ static void fbnic_clean_tsq(struct fbnic_napi_vector *nv, skb_tstamp_tx(skb, &hwtstamp); u64_stats_update_begin(&ring->stats.syncp); - ring->stats.ts_packets++; + ring->stats.twq.ts_packets++; u64_stats_update_end(&ring->stats.syncp); } @@ -661,8 +806,13 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq) struct page *page; page = page_pool_dev_alloc_pages(nv->page_pool); - if (!page) + if (!page) { + u64_stats_update_begin(&bdq->stats.syncp); + bdq->stats.rx.alloc_failed++; + u64_stats_update_end(&bdq->stats.syncp); + break; + } fbnic_page_pool_init(bdq, i, page); fbnic_bd_prep(bdq, i, page); @@ -875,12 +1025,13 @@ static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd, static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv, u64 rcd, struct sk_buff *skb, - struct fbnic_q_triad *qt) + struct fbnic_q_triad *qt, + u64 *csum_cmpl, u64 *csum_none) { struct net_device *netdev = nv->napi.dev; struct fbnic_ring *rcq = &qt->cmpl; - fbnic_rx_csum(rcd, skb, rcq); + fbnic_rx_csum(rcd, skb, rcq, csum_cmpl, csum_none); if (netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, @@ -898,7 +1049,8 @@ static bool fbnic_rcd_metadata_err(u64 rcd) static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt, int budget) { - unsigned int packets = 0, bytes = 0, dropped = 0; + unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0; + u64 csum_complete = 0, csum_none = 0; struct fbnic_ring *rcq = &qt->cmpl; struct fbnic_pkt_buff *pkt; s32 head0 = -1, head1 = -1; @@ -947,14 +1099,22 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, /* Populate skb and invalidate XDP */ if (!IS_ERR_OR_NULL(skb)) { - fbnic_populate_skb_fields(nv, rcd, skb, qt); + fbnic_populate_skb_fields(nv, rcd, skb, qt, + &csum_complete, + &csum_none); packets++; bytes += skb->len; napi_gro_receive(&nv->napi, skb); } else { - dropped++; + if (!skb) { + alloc_failed++; + dropped++; + } else { + dropped++; + } + fbnic_put_pkt_buff(nv, pkt, 1); } @@ -977,6 +1137,9 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, /* Re-add ethernet header length (removed in fbnic_build_skb) */ rcq->stats.bytes += ETH_HLEN * packets; rcq->stats.dropped += dropped; + rcq->stats.rx.alloc_failed += alloc_failed; + rcq->stats.rx.csum_complete += csum_complete; + rcq->stats.rx.csum_none += csum_none; u64_stats_update_end(&rcq->stats.syncp); /* Unmap and free processed buffers */ @@ -1054,6 +1217,11 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn, fbn->rx_stats.bytes += stats->bytes; fbn->rx_stats.packets += stats->packets; fbn->rx_stats.dropped += stats->dropped; + fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed; + fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete; + fbn->rx_stats.rx.csum_none += stats->rx.csum_none; + /* Remember to add new stats here */ + BUILD_BUG_ON(sizeof(fbn->rx_stats.rx) / 8 != 3); } void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, @@ -1065,8 +1233,14 @@ void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn, fbn->tx_stats.bytes += stats->bytes; fbn->tx_stats.packets += stats->packets; fbn->tx_stats.dropped += stats->dropped; - fbn->tx_stats.ts_lost += stats->ts_lost; - fbn->tx_stats.ts_packets += stats->ts_packets; + fbn->tx_stats.twq.csum_partial += stats->twq.csum_partial; + fbn->tx_stats.twq.lso += stats->twq.lso; + fbn->tx_stats.twq.ts_lost += stats->twq.ts_lost; + fbn->tx_stats.twq.ts_packets += stats->twq.ts_packets; + fbn->tx_stats.twq.stop += stats->twq.stop; + fbn->tx_stats.twq.wake += stats->twq.wake; + /* Remember to add new stats here */ + BUILD_BUG_ON(sizeof(fbn->tx_stats.twq) / 8 != 6); } static void fbnic_remove_tx_ring(struct fbnic_net *fbn, @@ -1142,7 +1316,9 @@ static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn, .dev = nv->dev, .dma_dir = DMA_BIDIRECTIONAL, .offset = 0, - .max_len = PAGE_SIZE + .max_len = PAGE_SIZE, + .napi = &nv->napi, + .netdev = fbn->netdev, }; struct page_pool *pp; @@ -2010,9 +2186,51 @@ static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv, fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl); } +static void fbnic_config_rim_threshold(struct fbnic_ring *rcq, u16 nv_idx, u32 rx_desc) +{ + u32 threshold; + + /* Set the threhsold to half the ring size if rx_frames + * is not configured + */ + threshold = rx_desc ? : rcq->size_mask / 2; + + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv_idx); + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, threshold); +} + +void fbnic_config_txrx_usecs(struct fbnic_napi_vector *nv, u32 arm) +{ + struct fbnic_net *fbn = netdev_priv(nv->napi.dev); + struct fbnic_dev *fbd = nv->fbd; + u32 val = arm; + + val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT, fbn->rx_usecs) | + FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN; + val |= FIELD_PREP(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT, fbn->tx_usecs) | + FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN; + + fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val); +} + +void fbnic_config_rx_frames(struct fbnic_napi_vector *nv) +{ + struct fbnic_net *fbn = netdev_priv(nv->napi.dev); + int i; + + for (i = nv->txt_count; i < nv->rxt_count + nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + fbnic_config_rim_threshold(&qt->cmpl, nv->v_idx, + fbn->rx_max_frames * + FBNIC_MIN_RXD_PER_FRAME); + } +} + static void fbnic_enable_rcq(struct fbnic_napi_vector *nv, struct fbnic_ring *rcq) { + struct fbnic_net *fbn = netdev_priv(nv->napi.dev); u32 log_size = fls(rcq->size_mask); u32 rcq_ctl; @@ -2040,8 +2258,8 @@ static void fbnic_enable_rcq(struct fbnic_napi_vector *nv, fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf); /* Store interrupt information for the completion queue */ - fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx); - fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2); + fbnic_config_rim_threshold(rcq, nv->v_idx, fbn->rx_max_frames * + FBNIC_MIN_RXD_PER_FRAME); fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0); /* Enable queue */ @@ -2080,12 +2298,7 @@ void fbnic_enable(struct fbnic_net *fbn) static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv) { - struct fbnic_dev *fbd = nv->fbd; - u32 val; - - val = FBNIC_INTR_CQ_REARM_INTR_UNMASK; - - fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val); + fbnic_config_txrx_usecs(nv, FBNIC_INTR_CQ_REARM_INTR_UNMASK); } void fbnic_napi_enable(struct fbnic_net *fbn) diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h index c2a94f31f71bd..f46616af41eac 100644 --- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h @@ -24,13 +24,29 @@ struct fbnic_net; #define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2) #define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP) +/* To receive the worst case packet we need: + * 1 descriptor for primary metadata + * + 1 descriptor for optional metadata + * + 1 descriptor for headers + * + 4 descriptors for payload + */ +#define FBNIC_MAX_RX_PKT_DESC 7 +#define FBNIC_RX_DESC_MIN roundup_pow_of_two(FBNIC_MAX_RX_PKT_DESC * 2) + #define FBNIC_MAX_TXQS 128u #define FBNIC_MAX_RXQS 128u +/* These apply to TWQs, TCQ, RCQ */ +#define FBNIC_QUEUE_SIZE_MIN 16u +#define FBNIC_QUEUE_SIZE_MAX SZ_64K + #define FBNIC_TXQ_SIZE_DEFAULT 1024 #define FBNIC_HPQ_SIZE_DEFAULT 256 #define FBNIC_PPQ_SIZE_DEFAULT 256 #define FBNIC_RCQ_SIZE_DEFAULT 1024 +#define FBNIC_TX_USECS_DEFAULT 35 +#define FBNIC_RX_USECS_DEFAULT 30 +#define FBNIC_RX_FRAMES_DEFAULT 0 #define FBNIC_RX_TROOM \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) @@ -56,9 +72,22 @@ struct fbnic_pkt_buff { struct fbnic_queue_stats { u64 packets; u64 bytes; + union { + struct { + u64 csum_partial; + u64 lso; + u64 ts_packets; + u64 ts_lost; + u64 stop; + u64 wake; + } twq; + struct { + u64 alloc_failed; + u64 csum_complete; + u64 csum_none; + } rx; + }; u64 dropped; - u64 ts_packets; - u64 ts_lost; struct u64_stats_sync syncp; }; diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c index 3062cc0f91992..c862b13b447a5 100644 --- a/drivers/net/ethernet/micrel/ks8851_spi.c +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -20,8 +20,6 @@ #include #include -#include -#include #include #include "ks8851.h" diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index 4a777b449ecd0..0be44dcb33938 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -942,6 +942,12 @@ static int lan743x_ptp_io_extts(struct lan743x_adapter *adapter, int on, extts = &ptp->extts[index]; + if (extts_request->flags & ~(PTP_ENABLE_FEATURE | + PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS)) + return -EOPNOTSUPP; + if (on) { extts_pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, index); if (extts_pin < 0) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index 3234a960fcc30..0af143ec0f869 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -828,7 +828,6 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p, port->phylink_config.type = PHYLINK_NETDEV; port->phylink_pcs.poll = true; port->phylink_pcs.ops = &lan966x_phylink_pcs_ops; - port->phylink_pcs.neg_mode = true; port->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 6a0e5b83ecd07..74ad1d73b4652 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -338,7 +338,6 @@ static int sparx5_create_port(struct sparx5 *sparx5, spx5_port->custom_etype = 0x8880; /* Vitesse */ spx5_port->phylink_pcs.poll = true; spx5_port->phylink_pcs.ops = &sparx5_phylink_pcs_ops; - spx5_port->phylink_pcs.neg_mode = true; spx5_port->is_mrouter = false; INIT_LIST_HEAD(&spx5_port->tc_templates); sparx5->ports[config->portno] = spx5_port; diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index be95336ce089a..62dfb6d1638c7 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -134,9 +134,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev) struct gdma_list_devices_resp resp = {}; struct gdma_general_req req = {}; struct gdma_dev_id dev; - u32 i, max_num_devs; + int found_dev = 0; u16 dev_type; int err; + u32 i; mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req), sizeof(resp)); @@ -148,12 +149,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev) return err ? err : -EPROTO; } - max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs); - - for (i = 0; i < max_num_devs; i++) { + for (i = 0; i < GDMA_DEV_LIST_SIZE && + found_dev < resp.num_of_devs; i++) { dev = resp.devs[i]; dev_type = dev.type; + /* Skip empty devices */ + if (dev.as_uint32 == 0) + continue; + + found_dev++; + /* HWC is already detected in mana_hwc_create_channel(). */ if (dev_type == GDMA_DEVICE_HWC) continue; @@ -666,8 +672,11 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd, gmi = &queue->mem_info; err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); - if (err) + if (err) { + dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n", + spec->type, spec->queue_size, err); goto free_q; + } queue->head = 0; queue->tail = 0; @@ -688,6 +697,8 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd, *queue_ptr = queue; return 0; out: + dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n", + spec->type, spec->queue_size, err); mana_gd_free_memory(gmi); free_q: kfree(queue); @@ -770,7 +781,13 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd, } gmi->dma_region_handle = resp.dma_region_handle; + dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n", + gmi->dma_region_handle); out: + if (err) + dev_dbg(gc->dev, + "Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n", + length, req->gdma_page_type, resp.hdr.status, err); kfree(req); return err; } @@ -793,8 +810,11 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd, gmi = &queue->mem_info; err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); - if (err) + if (err) { + dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n", + spec->type, spec->queue_size, err); goto free_q; + } err = mana_gd_create_dma_region(gd, gmi); if (err) @@ -815,6 +835,8 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd, *queue_ptr = queue; return 0; out: + dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n", + spec->type, spec->queue_size, err); mana_gd_free_memory(gmi); free_q: kfree(queue); @@ -841,8 +863,11 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, gmi = &queue->mem_info; err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); - if (err) + if (err) { + dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n", + spec->type, spec->queue_size, err); goto free_q; + } err = mana_gd_create_dma_region(gd, gmi); if (err) @@ -862,6 +887,8 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, *queue_ptr = queue; return 0; out: + dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n", + spec->type, spec->queue_size, err); mana_gd_free_memory(gmi); free_q: kfree(queue); @@ -1157,8 +1184,11 @@ int mana_gd_post_and_ring(struct gdma_queue *queue, int err; err = mana_gd_post_work_request(queue, wqe_req, wqe_info); - if (err) + if (err) { + dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n", + queue->type, queue->queue_size, err); return err; + } mana_gd_wq_ring_doorbell(gc, queue); @@ -1435,8 +1465,10 @@ static int mana_gd_setup(struct pci_dev *pdev) mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base); err = mana_gd_setup_irqs(pdev); - if (err) + if (err) { + dev_err(gc->dev, "Failed to setup IRQs: %d\n", err); return err; + } err = mana_hwc_create_channel(gc); if (err) @@ -1454,12 +1486,14 @@ static int mana_gd_setup(struct pci_dev *pdev) if (err) goto destroy_hwc; + dev_dbg(&pdev->dev, "mana gdma setup successful\n"); return 0; destroy_hwc: mana_hwc_destroy_channel(gc); remove_irq: mana_gd_remove_irqs(pdev); + dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err); return err; } @@ -1470,6 +1504,7 @@ static void mana_gd_cleanup(struct pci_dev *pdev) mana_hwc_destroy_channel(gc); mana_gd_remove_irqs(pdev); + dev_dbg(&pdev->dev, "mana gdma cleanup successful\n"); } static bool mana_is_pf(unsigned short dev_id) @@ -1488,8 +1523,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE); err = pci_enable_device(pdev); - if (err) + if (err) { + dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err); return -ENXIO; + } pci_set_master(pdev); @@ -1498,9 +1535,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto disable_dev; err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (err) + if (err) { + dev_err(&pdev->dev, "DMA set mask failed: %d\n", err); goto release_region; - + } dma_set_max_seg_size(&pdev->dev, UINT_MAX); err = -ENOMEM; @@ -1547,6 +1585,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * adapter-MTU file and apc->mana_pci_debugfs folder. */ debugfs_remove_recursive(gc->mana_pci_debugfs); + gc->mana_pci_debugfs = NULL; pci_iounmap(pdev, bar0_va); free_gc: pci_set_drvdata(pdev, NULL); @@ -1569,12 +1608,16 @@ static void mana_gd_remove(struct pci_dev *pdev) debugfs_remove_recursive(gc->mana_pci_debugfs); + gc->mana_pci_debugfs = NULL; + pci_iounmap(pdev, gc->bar0_va); vfree(gc); pci_release_regions(pdev); pci_disable_device(pdev); + + dev_dbg(&pdev->dev, "mana gdma remove successful\n"); } /* The 'state' parameter is not used. */ @@ -1622,6 +1665,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev) debugfs_remove_recursive(gc->mana_pci_debugfs); + gc->mana_pci_debugfs = NULL; + pci_disable_device(pdev); } @@ -1648,8 +1693,10 @@ static int __init mana_driver_init(void) mana_debugfs_root = debugfs_create_dir("mana", NULL); err = pci_register_driver(&mana_driver); - if (err) + if (err) { debugfs_remove(mana_debugfs_root); + mana_debugfs_root = NULL; + } return err; } @@ -1659,6 +1706,8 @@ static void __exit mana_driver_exit(void) pci_unregister_driver(&mana_driver); debugfs_remove(mana_debugfs_root); + + mana_debugfs_root = NULL; } module_init(mana_driver_init); diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index a00f915c51881..58cfd247d8c5a 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -440,7 +440,8 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, gmi = &dma_buf->mem_info; err = mana_gd_alloc_memory(gc, buf_size, gmi); if (err) { - dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err); + dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n", + buf_size, err); goto out; } @@ -529,6 +530,9 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc, out: if (err) mana_hwc_destroy_wq(hwc, hwc_wq); + + dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n", + queue_size, q_type, err); return err; } @@ -867,6 +871,10 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, } if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) { + if (ctx->status_code == -1) { + err = -EOPNOTSUPP; + goto out; + } dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n", ctx->status_code); err = -EPROTO; diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c index 23b1521c0df96..d30721d4516fc 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c +++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c @@ -91,7 +91,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, goto out; xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq); - xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false); + xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, true); act = bpf_prog_run_xdp(prog, xdp); diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index aa1e47233fe50..83dc7ff0d8864 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -52,10 +53,12 @@ static int mana_open(struct net_device *ndev) { struct mana_port_context *apc = netdev_priv(ndev); int err; - err = mana_alloc_queues(ndev); - if (err) + + if (err) { + netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err); return err; + } apc->port_is_up = true; @@ -64,7 +67,7 @@ static int mana_open(struct net_device *ndev) netif_carrier_on(ndev); netif_tx_wake_all_queues(ndev); - + netdev_dbg(ndev, "%s successful\n", __func__); return 0; } @@ -176,6 +179,9 @@ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc, return 0; frag_err: + if (net_ratelimit()) + netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n", + skb->len); for (i = sg_i - 1; i >= hsg; i--) dma_unmap_page(dev, ash->dma_handle[i], ash->size[i], DMA_TO_DEVICE); @@ -256,6 +262,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (skb_cow_head(skb, MANA_HEADROOM)) goto tx_drop_count; + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto tx_drop_count; + txq = &apc->tx_qp[txq_idx].txq; gdma_sq = txq->gdma_sq; cq = &apc->tx_qp[txq_idx].tx_cq; @@ -652,30 +661,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu mpc->rxbpre_total = 0; for (i = 0; i < num_rxb; i++) { - if (mpc->rxbpre_alloc_size > PAGE_SIZE) { - va = netdev_alloc_frag(mpc->rxbpre_alloc_size); - if (!va) - goto error; - - page = virt_to_head_page(va); - /* Check if the frag falls back to single page */ - if (compound_order(page) < - get_order(mpc->rxbpre_alloc_size)) { - put_page(page); - goto error; - } - } else { - page = dev_alloc_page(); - if (!page) - goto error; + page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size)); + if (!page) + goto error; - va = page_to_virt(page); - } + va = page_to_virt(page); da = dma_map_single(dev, va + mpc->rxbpre_headroom, mpc->rxbpre_datasize, DMA_FROM_DEVICE); if (dma_mapping_error(dev, da)) { - put_page(virt_to_head_page(va)); + put_page(page); goto error; } @@ -687,6 +682,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu return 0; error: + netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues); mana_pre_dealloc_rxbufs(mpc); return -ENOMEM; } @@ -738,12 +734,11 @@ static const struct net_device_ops mana_devops = { static void mana_cleanup_port_context(struct mana_port_context *apc) { /* - * at this point all dir/files under the vport directory - * are already cleaned up. - * We are sure the apc->mana_port_debugfs remove will not - * cause any freed memory access issues + * make sure subsequent cleanup attempts don't end up removing already + * cleaned dentry pointer */ debugfs_remove(apc->mana_port_debugfs); + apc->mana_port_debugfs = NULL; kfree(apc->rxqs); apc->rxqs = NULL; } @@ -779,6 +774,9 @@ static int mana_send_request(struct mana_context *ac, void *in_buf, err = mana_gd_send_request(gc, in_len, in_buf, out_len, out_buf); if (err || resp->status) { + if (err == -EOPNOTSUPP) + return err; + dev_err(dev, "Failed to send mana message: %d, 0x%x\n", err, resp->status); return err ? err : -EPROTO; @@ -1161,6 +1159,95 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, return err; } +int mana_query_link_cfg(struct mana_port_context *apc) +{ + struct net_device *ndev = apc->ndev; + struct mana_query_link_config_req req = {}; + struct mana_query_link_config_resp resp = {}; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG, + sizeof(req), sizeof(resp)); + + req.vport = apc->port_handle; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + + if (err) { + if (err == -EOPNOTSUPP) { + netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n"); + goto out; + } + netdev_err(ndev, "Failed to query link config: %d\n", err); + goto out; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG, + sizeof(resp)); + + if (err || resp.hdr.status) { + netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EPROTO; + goto out; + } + + if (resp.qos_unconfigured) { + err = -EINVAL; + goto out; + } + apc->speed = resp.link_speed; + return 0; + +out: + return err; +} + +int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed) +{ + struct mana_set_bw_clamp_req req = {}; + struct mana_set_bw_clamp_resp resp = {}; + struct net_device *ndev = apc->ndev; + int err; + + mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP, + sizeof(req), sizeof(resp)); + req.vport = apc->port_handle; + req.link_speed = speed; + req.enable_clamping = TRI_STATE_TRUE; + + err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + sizeof(resp)); + + if (err) { + if (err == -EOPNOTSUPP) { + netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n"); + return err; + } + netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d", + speed, err); + return err; + } + + err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP, + sizeof(resp)); + + if (err || resp.hdr.status) { + netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err, + resp.hdr.status); + if (!err) + err = -EPROTO; + return err; + } + + if (resp.qos_unconfigured) + netdev_info(ndev, "QoS is unconfigured\n"); + + return 0; +} + int mana_create_wq_obj(struct mana_port_context *apc, mana_handle_t vport, u32 wq_type, struct mana_obj_spec *wq_spec, @@ -1254,6 +1341,7 @@ static void mana_destroy_eq(struct mana_context *ac) return; debugfs_remove_recursive(ac->mana_eqs_debugfs); + ac->mana_eqs_debugfs = NULL; for (i = 0; i < gc->max_num_queues; i++) { eq = ac->eqs[i].eq; @@ -1304,8 +1392,10 @@ static int mana_create_eq(struct mana_context *ac) for (i = 0; i < gc->max_num_queues; i++) { spec.eq.msix_index = (i + 1) % gc->num_msix_usable; err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); - if (err) + if (err) { + dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err); goto out; + } mana_create_eq_debugfs(ac, i); } @@ -1547,8 +1637,12 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, return NULL; if (xdp->data_hard_start) { + u32 metasize = xdp->data - xdp->data_meta; + skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); return skb; } @@ -1660,7 +1754,7 @@ static void mana_rx_skb(void *buf_va, bool from_pool, } static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, - dma_addr_t *da, bool *from_pool, bool is_napi) + dma_addr_t *da, bool *from_pool) { struct page *page; void *va; @@ -1671,21 +1765,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, if (rxq->xdp_save_va) { va = rxq->xdp_save_va; rxq->xdp_save_va = NULL; - } else if (rxq->alloc_size > PAGE_SIZE) { - if (is_napi) - va = napi_alloc_frag(rxq->alloc_size); - else - va = netdev_alloc_frag(rxq->alloc_size); - - if (!va) - return NULL; - - page = virt_to_head_page(va); - /* Check if the frag falls back to single page */ - if (compound_order(page) < get_order(rxq->alloc_size)) { - put_page(page); - return NULL; - } } else { page = page_pool_dev_alloc_pages(rxq->page_pool); if (!page) @@ -1718,7 +1797,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, dma_addr_t da; void *va; - va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true); + va = mana_get_rxfrag(rxq, dev, &da, &from_pool); if (!va) return; @@ -1914,6 +1993,7 @@ static void mana_destroy_txq(struct mana_port_context *apc) for (i = 0; i < apc->num_queues; i++) { debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs); + apc->tx_qp[i].mana_tx_debugfs = NULL; napi = &apc->tx_qp[i].tx_cq.napi; if (apc->tx_qp[i].txq.napi_initialized) { @@ -2080,6 +2160,8 @@ static int mana_create_txq(struct mana_port_context *apc, return 0; out: + netdev_err(net, "Failed to create %d TX queues, %d\n", + apc->num_queues, err); mana_destroy_txq(apc); return err; } @@ -2099,6 +2181,7 @@ static void mana_destroy_rxq(struct mana_port_context *apc, return; debugfs_remove_recursive(rxq->mana_rx_debugfs); + rxq->mana_rx_debugfs = NULL; napi = &rxq->rx_cq.napi; @@ -2156,7 +2239,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key, if (mpc->rxbufs_pre) va = mana_get_rxbuf_pre(rxq, &da); else - va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false); + va = mana_get_rxfrag(rxq, dev, &da, &from_pool); if (!va) return -ENOMEM; @@ -2242,6 +2325,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc) pprm.nid = gc->numa_node; pprm.napi = &rxq->rx_cq.napi; pprm.netdev = rxq->ndev; + pprm.order = get_order(rxq->alloc_size); rxq->page_pool = page_pool_create(&pprm); @@ -2415,6 +2499,7 @@ static int mana_add_rx_queues(struct mana_port_context *apc, rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); if (!rxq) { err = -ENOMEM; + netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err); goto out; } @@ -2661,12 +2746,18 @@ int mana_alloc_queues(struct net_device *ndev) int err; err = mana_create_vport(apc, ndev); - if (err) + if (err) { + netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err); return err; + } err = netif_set_real_num_tx_queues(ndev, apc->num_queues); - if (err) + if (err) { + netdev_err(ndev, + "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n", + apc->num_queues, err); goto destroy_vport; + } err = mana_add_rx_queues(apc, ndev); if (err) @@ -2675,14 +2766,20 @@ int mana_alloc_queues(struct net_device *ndev) apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; err = netif_set_real_num_rx_queues(ndev, apc->num_queues); - if (err) + if (err) { + netdev_err(ndev, + "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n", + apc->num_queues, err); goto destroy_vport; + } mana_rss_table_init(apc); err = mana_config_rss(apc, TRI_STATE_TRUE, true, true); - if (err) + if (err) { + netdev_err(ndev, "Failed to configure RSS table: %d\n", err); goto destroy_vport; + } if (gd->gdma_context->is_pf) { err = mana_pf_register_filter(apc); @@ -2823,8 +2920,10 @@ int mana_detach(struct net_device *ndev, bool from_close) if (apc->port_st_save) { err = mana_dealloc_queues(ndev); - if (err) + if (err) { + netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err); return err; + } } if (!from_close) { @@ -2873,6 +2972,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, ndev->dev_port = port_idx; SET_NETDEV_DEV(ndev, gc->dev); + netif_set_tso_max_size(ndev, GSO_MAX_SIZE); + netif_carrier_off(ndev); netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE); @@ -2968,6 +3069,8 @@ static int add_adev(struct gdma_dev *gd) goto add_fail; gd->adev = adev; + dev_dbg(gd->gdma_context->dev, + "Auxiliary device added successfully\n"); return 0; add_fail: @@ -3009,8 +3112,10 @@ int mana_probe(struct gdma_dev *gd, bool resuming) } err = mana_create_eq(ac); - if (err) + if (err) { + dev_err(dev, "Failed to create EQs: %d\n", err); goto out; + } err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION, &num_ports); @@ -3066,8 +3171,14 @@ int mana_probe(struct gdma_dev *gd, bool resuming) err = add_adev(gd); out: - if (err) + if (err) { mana_remove(gd, false); + } else { + dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n", + gd, gd->dev_id.as_uint32, ac->num_ports, + gd->dev_id.type, gd->dev_id.instance); + dev_dbg(dev, "%s succeeded\n", __func__); + } return err; } @@ -3129,6 +3240,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending) gd->driver_data = NULL; gd->gdma_context = NULL; kfree(ac); + dev_dbg(dev, "%s succeeded\n", __func__); } struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index) diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index c419626073f5f..b29d0fe0a2014 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -427,12 +427,30 @@ static int mana_set_ringparam(struct net_device *ndev, static int mana_get_link_ksettings(struct net_device *ndev, struct ethtool_link_ksettings *cmd) { + struct mana_port_context *apc = netdev_priv(ndev); + int err; + + err = mana_query_link_cfg(apc); + + cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->speed; cmd->base.duplex = DUPLEX_FULL; cmd->base.port = PORT_OTHER; return 0; } +static int mana_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct mana_port_context *apc = netdev_priv(ndev); + int err; + + err = mana_set_bw_clamp(apc, cmd->base.speed); + + apc->speed = (err) ? apc->speed : cmd->base.speed; + return 0; +} + const struct ethtool_ops mana_ethtool_ops = { .get_ethtool_stats = mana_get_ethtool_stats, .get_sset_count = mana_get_sset_count, @@ -447,5 +465,6 @@ const struct ethtool_ops mana_ethtool_ops = { .get_ringparam = mana_get_ringparam, .set_ringparam = mana_set_ringparam, .get_link_ksettings = mana_get_link_ksettings, + .set_link_ksettings = mana_set_link_ksettings, .get_link = ethtool_op_get_link, }; diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c index 515069d5637b0..671af5d4c5d25 100644 --- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c +++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c @@ -565,20 +565,9 @@ static void nfp_net_xfrm_del_state(struct xfrm_state *x) xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1); } -static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) -{ - if (x->props.family == AF_INET) - /* Offload with IPv4 options is not supported yet */ - return ip_hdr(skb)->ihl == 5; - - /* Offload with IPv6 extension headers is not support yet */ - return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)); -} - static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = { .xdo_dev_state_add = nfp_net_xfrm_add_state, .xdo_dev_state_delete = nfp_net_xfrm_del_state, - .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok, }; void nfp_net_ipsec_init(struct nfp_net *nn) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c index 0d6c59d6d4aed..ea6a288c0d5ed 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_hwmon.c @@ -83,42 +83,12 @@ nfp_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, return 0; } -static u32 nfp_chip_config[] = { - HWMON_C_REGISTER_TZ, - 0 -}; - -static const struct hwmon_channel_info nfp_chip = { - .type = hwmon_chip, - .config = nfp_chip_config, -}; - -static u32 nfp_temp_config[] = { - HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT, - 0 -}; - -static const struct hwmon_channel_info nfp_temp = { - .type = hwmon_temp, - .config = nfp_temp_config, -}; - -static u32 nfp_power_config[] = { - HWMON_P_INPUT | HWMON_P_MAX, - HWMON_P_INPUT, - HWMON_P_INPUT, - 0 -}; - -static const struct hwmon_channel_info nfp_power = { - .type = hwmon_power, - .config = nfp_power_config, -}; - static const struct hwmon_channel_info * const nfp_hwmon_info[] = { - &nfp_chip, - &nfp_temp, - &nfp_power, + HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT), + HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_MAX, + HWMON_P_INPUT, + HWMON_P_INPUT), NULL }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index f915c423fe70b..886061d7351a1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -454,7 +454,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, static void qed_free_cdev(struct qed_dev *cdev) { - kfree((void *)cdev); + kfree(cdev); } static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index fa167b1aa0190..5222a035fd198 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3033,7 +3033,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, u16 length; int rc; - /* Valiate PF can send such a request */ + /* Validate PF can send such a request */ if (!vf->vport_instance) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, @@ -3312,7 +3312,7 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, goto out; } - /* Determine if the unicast filtering is acceptible by PF */ + /* Determine if the unicast filtering is acceptable by PF */ if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && (params.type == QED_FILTER_VLAN || params.type == QED_FILTER_MAC_VLAN)) { @@ -3729,7 +3729,7 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); if (rc) { - DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", + DP_ERR(p_hwfn, "Failed to re-enable VF[%d] access\n", vfid); return rc; } @@ -4480,7 +4480,7 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) struct qed_ptt *ptt = qed_ptt_acquire(hwfn); /* Failure to acquire the ptt in 100g creates an odd error - * where the first engine has already relased IOV. + * where the first engine has already released IOV. */ if (!ptt) { DP_ERR(hwfn, "Failed to acquire ptt\n"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index f9dd50152b1e3..28d24d59efb84 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, num_vlans = sriov->num_allowed_vlans; sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL); - if (!sriov->allowed_vlans) + if (!sriov->allowed_vlans) { + qlcnic_sriov_free_vlans(adapter); return -ENOMEM; + } vlans = (u16 *)&cmd->rsp.arg[3]; for (i = 0; i < num_vlans; i++) @@ -2167,8 +2169,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) vf = &sriov->vf_info[i]; vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, sizeof(*vf->sriov_vlans), GFP_KERNEL); - if (!vf->sriov_vlans) + if (!vf->sriov_vlans) { + qlcnic_sriov_free_vlans(adapter); return -ENOMEM; + } } return 0; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index f3bea196a8f9d..ba8763cac9d9e 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -117,11 +117,14 @@ static void rmnet_unregister_bridge(struct rmnet_port *port) rmnet_unregister_real_device(bridge_dev); } -static int rmnet_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int rmnet_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION; + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct net_device *real_dev; int mode = RMNET_EPMODE_VND; struct rmnet_endpoint *ep; @@ -134,7 +137,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, return -EINVAL; } - real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) { NL_SET_ERR_MSG_MOD(extack, "link does not exist"); return -ENODEV; diff --git a/drivers/net/ethernet/realtek/Kconfig b/drivers/net/ethernet/realtek/Kconfig index 8a8ea51c639e9..fe136f61586fe 100644 --- a/drivers/net/ethernet/realtek/Kconfig +++ b/drivers/net/ethernet/realtek/Kconfig @@ -114,7 +114,8 @@ config R8169 will be called r8169. This is recommended. config R8169_LEDS - def_bool R8169 && LEDS_TRIGGER_NETDEV + bool "Support for controlling the NIC LEDs" + depends on R8169 && LEDS_TRIGGER_NETDEV depends on !(R8169=y && LEDS_CLASS=m) help Optional support for controlling the NIC LED's with the netdev diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 5a5eba49c6515..4eebd9cb40a37 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -89,6 +89,7 @@ #define JUMBO_6K (6 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) #define JUMBO_7K (7 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) #define JUMBO_9K (9 * SZ_1K - VLAN_ETH_HLEN - ETH_FCS_LEN) +#define JUMBO_16K (SZ_16K - VLAN_ETH_HLEN - ETH_FCS_LEN) static const struct { const char *name; @@ -169,6 +170,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { { PCI_VDEVICE(REALTEK, 0x8125) }, { PCI_VDEVICE(REALTEK, 0x8126) }, { PCI_VDEVICE(REALTEK, 0x3000) }, + { PCI_VDEVICE(REALTEK, 0x5000) }, {} }; @@ -2850,6 +2852,32 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) RTL_R32(tp, CSIDR) : ~0; } +static void rtl_disable_zrxdc_timeout(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + u32 csi; + int rc; + u8 val; + +#define RTL_GEN3_RELATED_OFF 0x0890 +#define RTL_GEN3_ZRXDC_NONCOMPL 0x1 + if (pdev->cfg_size > RTL_GEN3_RELATED_OFF) { + rc = pci_read_config_byte(pdev, RTL_GEN3_RELATED_OFF, &val); + if (rc == PCIBIOS_SUCCESSFUL) { + val &= ~RTL_GEN3_ZRXDC_NONCOMPL; + rc = pci_write_config_byte(pdev, RTL_GEN3_RELATED_OFF, + val); + if (rc == PCIBIOS_SUCCESSFUL) + return; + } + } + + netdev_notice_once(tp->dev, + "No native access to PCI extended config space, falling back to CSI\n"); + csi = rtl_csi_read(tp, RTL_GEN3_RELATED_OFF); + rtl_csi_write(tp, RTL_GEN3_RELATED_OFF, csi & ~RTL_GEN3_ZRXDC_NONCOMPL); +} + static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) { struct pci_dev *pdev = tp->pci_dev; @@ -3822,6 +3850,7 @@ static void rtl_hw_start_8125d(struct rtl8169_private *tp) static void rtl_hw_start_8126a(struct rtl8169_private *tp) { + rtl_disable_zrxdc_timeout(tp); rtl_set_def_aspm_entry_latency(tp); rtl_hw_start_8125_common(tp); } @@ -5199,6 +5228,33 @@ static int r8169_mdio_write_reg(struct mii_bus *mii_bus, int phyaddr, return 0; } +static int r8169_mdio_read_reg_c45(struct mii_bus *mii_bus, int addr, + int devnum, int regnum) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (addr > 0) + return -ENODEV; + + if (devnum == MDIO_MMD_VEND2 && regnum > MDIO_STAT2) + return r8168_phy_ocp_read(tp, regnum); + + return 0; +} + +static int r8169_mdio_write_reg_c45(struct mii_bus *mii_bus, int addr, + int devnum, int regnum, u16 val) +{ + struct rtl8169_private *tp = mii_bus->priv; + + if (addr > 0 || devnum != MDIO_MMD_VEND2 || regnum <= MDIO_STAT2) + return -ENODEV; + + r8168_phy_ocp_write(tp, regnum, val); + + return 0; +} + static int r8169_mdio_register(struct rtl8169_private *tp) { struct pci_dev *pdev = tp->pci_dev; @@ -5222,12 +5278,18 @@ static int r8169_mdio_register(struct rtl8169_private *tp) new_bus->priv = tp; new_bus->parent = &pdev->dev; new_bus->irq[0] = PHY_MAC_INTERRUPT; + new_bus->phy_mask = GENMASK(31, 1); snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", pci_domain_nr(pdev->bus), pci_dev_id(pdev)); new_bus->read = r8169_mdio_read_reg; new_bus->write = r8169_mdio_write_reg; + if (tp->mac_version >= RTL_GIGA_MAC_VER_40) { + new_bus->read_c45 = r8169_mdio_read_reg_c45; + new_bus->write_c45 = r8169_mdio_write_reg_c45; + } + ret = devm_mdiobus_register(&pdev->dev, new_bus); if (ret) return ret; @@ -5251,9 +5313,9 @@ static int r8169_mdio_register(struct rtl8169_private *tp) /* mimic behavior of r8125/r8126 vendor drivers */ if (tp->mac_version == RTL_GIGA_MAC_VER_61) - phy_set_eee_broken(tp->phydev, - ETHTOOL_LINK_MODE_2500baseT_Full_BIT); - phy_set_eee_broken(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT); + phy_disable_eee_mode(tp->phydev, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT); + phy_disable_eee_mode(tp->phydev, ETHTOOL_LINK_MODE_5000baseT_Full_BIT); /* PHY will be woken up in rtl_open() */ phy_suspend(tp->phydev); @@ -5326,6 +5388,9 @@ static int rtl_jumbo_max(struct rtl8169_private *tp) /* RTL8168c */ case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24: return JUMBO_6K; + /* RTL8125/8126 */ + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_71: + return JUMBO_16K; default: return JUMBO_9K; } @@ -5360,7 +5425,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp) /* register is set if system vendor successfully tested ASPM 1.2 */ static bool rtl_aspm_is_safe(struct rtl8169_private *tp) { - if (tp->mac_version >= RTL_GIGA_MAC_VER_61 && + if (tp->mac_version >= RTL_GIGA_MAC_VER_46 && r8168_mac_ocp_read(tp, 0xc0b2) & 0xf) return true; @@ -5409,11 +5474,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (region < 0) return dev_err_probe(&pdev->dev, -ENODEV, "no MMIO resource found\n"); - rc = pcim_iomap_regions(pdev, BIT(region), KBUILD_MODNAME); - if (rc < 0) - return dev_err_probe(&pdev->dev, rc, "cannot remap MMIO, aborting\n"); - - tp->mmio_addr = pcim_iomap_table(pdev)[region]; + tp->mmio_addr = pcim_iomap_region(pdev, region, KBUILD_MODNAME); + if (IS_ERR(tp->mmio_addr)) + return dev_err_probe(&pdev->dev, PTR_ERR(tp->mmio_addr), + "cannot remap MMIO, aborting\n"); txconfig = RTL_R32(tp, TxConfig); if (txconfig == ~0U) diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c index 3bd11cb56294c..2aacc1996796d 100644 --- a/drivers/net/ethernet/realtek/rtase/rtase_main.c +++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c @@ -1501,7 +1501,10 @@ static void rtase_wait_for_quiescence(const struct net_device *dev) static void rtase_sw_reset(struct net_device *dev) { struct rtase_private *tp = netdev_priv(dev); + struct rtase_ring *ring, *tmp; + struct rtase_int_vector *ivec; int ret; + u32 i; netif_stop_queue(dev); netif_carrier_off(dev); @@ -1512,6 +1515,13 @@ static void rtase_sw_reset(struct net_device *dev) rtase_tx_clear(tp); rtase_rx_clear(tp); + for (i = 0; i < tp->int_nums; i++) { + ivec = &tp->int_vector[i]; + list_for_each_entry_safe(ring, tmp, &ivec->ring_list, + ring_entry) + list_del(&ring->ring_entry); + } + ret = rtase_init_ring(dev); if (ret) { netdev_err(dev, "unable to init ring\n"); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c9f4976a35275..12e4f68c0c8f9 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -3075,7 +3075,7 @@ static int ravb_probe(struct platform_device *pdev) if (info->coalesce_irqs) { netdev_sw_irq_coalesce_default_on(ndev); if (num_present_cpus() == 1) - dev_set_threaded(ndev, true); + dev_set_threaded(ndev, NETDEV_NAPI_THREADED_ENABLE); } /* Network device register */ diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 6e4ef7af27bf3..b4365906669f3 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -179,8 +179,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, /* Reject requests with unsupported flags */ if (req->flags & ~(PTP_ENABLE_FEATURE | PTP_RISING_EDGE | - PTP_FALLING_EDGE | - PTP_STRICT_FLAGS)) + PTP_FALLING_EDGE)) return -EOPNOTSUPP; if (req->index) diff --git a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c index 72e7fcc56693a..4c3e8cc5046f5 100644 --- a/drivers/net/ethernet/renesas/rcar_gen4_ptp.c +++ b/drivers/net/ethernet/renesas/rcar_gen4_ptp.c @@ -29,8 +29,8 @@ static const struct rcar_gen4_ptp_reg_offset gen4_offs = { static int rcar_gen4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct rcar_gen4_ptp_private *ptp_priv = ptp_to_priv(ptp); - bool neg_adj = scaled_ppm < 0 ? true : false; s64 addend = ptp_priv->default_addend; + bool neg_adj = scaled_ppm < 0; s64 diff; if (neg_adj) diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 84d09a8973b78..aba772e14555d 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1287,17 +1287,14 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) if (!ports) return NULL; - for_each_child_of_node(ports, port) { + for_each_available_child_of_node(ports, port) { err = of_property_read_u32(port, "reg", &index); if (err < 0) { port = NULL; goto out; } - if (index == rdev->etha->index) { - if (!of_device_is_available(port)) - port = NULL; + if (index == rdev->etha->index) break; - } } out: diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index fe0bf1d3217af..36af94a2e062a 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2576,7 +2576,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_carrier_init(rocker_port); dev->features |= NETIF_F_SG; - dev->netns_local = true; + dev->netns_immutable = true; /* MTU range: 68 - 9000 */ dev->min_mtu = ROCKER_PORT_MIN_MTU; diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 3eb55dcfa8a61..c4c43434f3143 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -38,8 +38,9 @@ config SFC_MTD default y help This exposes the on-board flash and/or EEPROM as MTD devices - (e.g. /dev/mtd1). This is required to update the firmware or - the boot configuration under Linux. + (e.g. /dev/mtd1). This is required to update the boot + configuration under Linux, or use some older userland tools to + update the firmware. config SFC_MCDI_MON bool "Solarflare SFC9100-family hwmon support" depends on SFC && HWMON && !(SFC=y && HWMON=m) diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 8f446b9bd5ee3..d99039ec468d6 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -7,7 +7,7 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \ mcdi_functions.o mcdi_filters.o mcdi_mon.o \ ef100.o ef100_nic.o ef100_netdev.o \ ef100_ethtool.o ef100_rx.o ef100_tx.o \ - efx_devlink.o + efx_devlink.o efx_reflash.o sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \ mae.o tc.o tc_bindings.o tc_counters.o \ diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 452009ed7a435..47349c148c0c0 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3501,7 +3501,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); const struct efx_ef10_nvram_type_info *info; - size_t size, erase_size, outlen; + size_t size, erase_size, write_size, outlen; int type_idx = 0; bool protected; int rc; @@ -3516,7 +3516,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, if (info->port != efx_port_num(efx)) return -ENODEV; - rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); + rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &write_size, + &protected); if (rc) return rc; if (protected && @@ -3561,6 +3562,8 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, if (!erase_size) part->common.mtd.flags |= MTD_NO_ERASE; + part->common.mtd.writesize = write_size; + return 0; } @@ -4416,6 +4419,7 @@ const struct efx_nic_type efx_x4_nic_type = { .can_rx_scatter = true, .always_rx_scatter = true, .option_descriptors = true, + .flash_auto_partition = true, .min_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .offload_features = EF10_OFFLOAD_FEATURES, diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 7f7d560cb2b4c..d941f073f1ebf 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -452,7 +452,6 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data) NETIF_F_HIGHDMA | NETIF_F_ALL_TSO; netif_set_tso_max_segs(net_dev, ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT); - efx->mdio.dev = net_dev; rc = efx_ef100_init_datapath_caps(efx); if (rc < 0) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 650136dfc642f..112e55b98ed3b 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -474,28 +474,6 @@ void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, } } -/************************************************************************** - * - * ioctls - * - *************************************************************************/ - -/* Net device ioctl - * Context: process, rtnl_lock() held. - */ -static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - struct mii_ioctl_data *data = if_mii(ifr); - - /* Convert phy_id from older PRTAD/DEVAD format */ - if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && - (data->phy_id & 0xfc00) == 0x0400) - data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; - - return mdio_mii_ioctl(&efx->mdio, data, cmd); -} - /************************************************************************** * * Kernel net device interface @@ -593,7 +571,6 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_tx_timeout = efx_watchdog, .ndo_start_xmit = efx_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_eth_ioctl = efx_ioctl, .ndo_change_mtu = efx_change_mtu, .ndo_set_mac_address = efx_set_mac_address, .ndo_set_rx_mode = efx_set_rx_mode, @@ -1201,7 +1178,6 @@ static int efx_pci_probe(struct pci_dev *pci_dev, rc = efx_init_struct(efx, pci_dev); if (rc) goto fail1; - efx->mdio.dev = net_dev; pci_info(pci_dev, "Solarflare NIC detected\n"); diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index c88ec3e248362..5a14d94163b19 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -1003,6 +1003,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev) INIT_LIST_HEAD(&efx->vf_reps); INIT_WORK(&efx->mac_work, efx_mac_work); init_waitqueue_head(&efx->flush_wq); + mutex_init(&efx->reflash_mutex); efx->tx_queues_per_channel = 1; efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE; diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c index 3cd750820fdde..d842c60dfc100 100644 --- a/drivers/net/ethernet/sfc/efx_devlink.c +++ b/drivers/net/ethernet/sfc/efx_devlink.c @@ -19,6 +19,7 @@ #include "mae.h" #include "ef100_rep.h" #endif +#include "efx_reflash.h" struct efx_devlink { struct efx_nic *efx; @@ -615,7 +616,19 @@ static int efx_devlink_info_get(struct devlink *devlink, return 0; } +static int efx_devlink_flash_update(struct devlink *devlink, + struct devlink_flash_update_params *params, + struct netlink_ext_ack *extack) +{ + struct efx_devlink *devlink_private = devlink_priv(devlink); + struct efx_nic *efx = devlink_private->efx; + + return efx_reflash_flash_firmware(efx, params->fw, extack); +} + static const struct devlink_ops sfc_devlink_ops = { + .supported_flash_update_params = 0, + .flash_update = efx_devlink_flash_update, .info_get = efx_devlink_info_get, }; diff --git a/drivers/net/ethernet/sfc/efx_reflash.c b/drivers/net/ethernet/sfc/efx_reflash.c new file mode 100644 index 0000000000000..b12e95f1c80a5 --- /dev/null +++ b/drivers/net/ethernet/sfc/efx_reflash.c @@ -0,0 +1,522 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for AMD network controllers and boards + * Copyright (C) 2025, Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include +#include +#include "efx_reflash.h" +#include "net_driver.h" +#include "fw_formats.h" +#include "mcdi_pcol.h" +#include "mcdi.h" + +/* Try to parse a Reflash header at the specified offset */ +static bool efx_reflash_parse_reflash_header(const struct firmware *fw, + size_t header_offset, u32 *type, + u32 *subtype, const u8 **data, + size_t *data_size) +{ + size_t header_end, trailer_offset, trailer_end; + u32 magic, version, payload_size, header_len; + const u8 *header, *trailer; + u32 expected_crc, crc; + + if (check_add_overflow(header_offset, EFX_REFLASH_HEADER_LENGTH_OFST + + EFX_REFLASH_HEADER_LENGTH_LEN, + &header_end)) + return false; + if (fw->size < header_end) + return false; + + header = fw->data + header_offset; + magic = get_unaligned_le32(header + EFX_REFLASH_HEADER_MAGIC_OFST); + if (magic != EFX_REFLASH_HEADER_MAGIC_VALUE) + return false; + + version = get_unaligned_le32(header + EFX_REFLASH_HEADER_VERSION_OFST); + if (version != EFX_REFLASH_HEADER_VERSION_VALUE) + return false; + + payload_size = get_unaligned_le32(header + EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST); + header_len = get_unaligned_le32(header + EFX_REFLASH_HEADER_LENGTH_OFST); + if (check_add_overflow(header_offset, header_len, &trailer_offset) || + check_add_overflow(trailer_offset, payload_size, &trailer_offset) || + check_add_overflow(trailer_offset, EFX_REFLASH_TRAILER_LEN, + &trailer_end)) + return false; + if (fw->size < trailer_end) + return false; + + trailer = fw->data + trailer_offset; + expected_crc = get_unaligned_le32(trailer + EFX_REFLASH_TRAILER_CRC_OFST); + /* Addition could overflow u32, but not size_t since we already + * checked trailer_offset didn't overflow. So cast to size_t first. + */ + crc = crc32_le(0, header, (size_t)header_len + payload_size); + if (crc != expected_crc) + return false; + + *type = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST); + *subtype = get_unaligned_le32(header + EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST); + if (*type == EFX_REFLASH_FIRMWARE_TYPE_BUNDLE) { + /* All the bundle data is written verbatim to NVRAM */ + *data = fw->data; + *data_size = fw->size; + } else { + /* Other payload types strip the reflash header and trailer + * from the data written to NVRAM + */ + *data = header + header_len; + *data_size = payload_size; + } + + return true; +} + +/* Map from FIRMWARE_TYPE to NVRAM_PARTITION_TYPE */ +static int efx_reflash_partition_type(u32 type, u32 subtype, + u32 *partition_type, + u32 *partition_subtype) +{ + int rc = 0; + + switch (type) { + case EFX_REFLASH_FIRMWARE_TYPE_BOOTROM: + *partition_type = NVRAM_PARTITION_TYPE_EXPANSION_ROM; + *partition_subtype = subtype; + break; + case EFX_REFLASH_FIRMWARE_TYPE_BUNDLE: + *partition_type = NVRAM_PARTITION_TYPE_BUNDLE; + *partition_subtype = subtype; + break; + default: + /* Not supported */ + rc = -EINVAL; + } + + return rc; +} + +/* Try to parse a SmartNIC image header at the specified offset */ +static bool efx_reflash_parse_snic_header(const struct firmware *fw, + size_t header_offset, + u32 *partition_type, + u32 *partition_subtype, + const u8 **data, size_t *data_size) +{ + u32 magic, version, payload_size, header_len, expected_crc, crc; + size_t header_end, payload_end; + const u8 *header; + + if (check_add_overflow(header_offset, EFX_SNICIMAGE_HEADER_MINLEN, + &header_end) || + fw->size < header_end) + return false; + + header = fw->data + header_offset; + magic = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_MAGIC_OFST); + if (magic != EFX_SNICIMAGE_HEADER_MAGIC_VALUE) + return false; + + version = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_VERSION_OFST); + if (version != EFX_SNICIMAGE_HEADER_VERSION_VALUE) + return false; + + header_len = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_LENGTH_OFST); + if (check_add_overflow(header_offset, header_len, &header_end)) + return false; + payload_size = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST); + if (check_add_overflow(header_end, payload_size, &payload_end) || + fw->size < payload_end) + return false; + + expected_crc = get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_CRC_OFST); + + /* Calculate CRC omitting the expected CRC field itself */ + crc = crc32_le(~0, header, EFX_SNICIMAGE_HEADER_CRC_OFST); + crc = ~crc32_le(crc, + header + EFX_SNICIMAGE_HEADER_CRC_OFST + + EFX_SNICIMAGE_HEADER_CRC_LEN, + header_len + payload_size - EFX_SNICIMAGE_HEADER_CRC_OFST - + EFX_SNICIMAGE_HEADER_CRC_LEN); + if (crc != expected_crc) + return false; + + *partition_type = + get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST); + *partition_subtype = + get_unaligned_le32(header + EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST); + *data = fw->data; + *data_size = fw->size; + return true; +} + +/* Try to parse a SmartNIC bundle header at the specified offset */ +static bool efx_reflash_parse_snic_bundle_header(const struct firmware *fw, + size_t header_offset, + u32 *partition_type, + u32 *partition_subtype, + const u8 **data, + size_t *data_size) +{ + u32 magic, version, bundle_type, header_len, expected_crc, crc; + size_t header_end; + const u8 *header; + + if (check_add_overflow(header_offset, EFX_SNICBUNDLE_HEADER_LEN, + &header_end)) + return false; + if (fw->size < header_end) + return false; + + header = fw->data + header_offset; + magic = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_MAGIC_OFST); + if (magic != EFX_SNICBUNDLE_HEADER_MAGIC_VALUE) + return false; + + version = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_VERSION_OFST); + if (version != EFX_SNICBUNDLE_HEADER_VERSION_VALUE) + return false; + + bundle_type = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST); + if (bundle_type != NVRAM_PARTITION_TYPE_BUNDLE) + return false; + + header_len = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_LENGTH_OFST); + if (header_len != EFX_SNICBUNDLE_HEADER_LEN) + return false; + + expected_crc = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_CRC_OFST); + crc = ~crc32_le(~0, header, EFX_SNICBUNDLE_HEADER_CRC_OFST); + if (crc != expected_crc) + return false; + + *partition_type = NVRAM_PARTITION_TYPE_BUNDLE; + *partition_subtype = get_unaligned_le32(header + EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST); + *data = fw->data; + *data_size = fw->size; + return true; +} + +/* Try to find a valid firmware payload in the firmware data. + * When we recognise a valid header, we parse it for the partition type + * (so we know where to ask the MC to write it to) and the location of + * the data blob to write. + */ +static int efx_reflash_parse_firmware_data(const struct firmware *fw, + u32 *partition_type, + u32 *partition_subtype, + const u8 **data, size_t *data_size) +{ + size_t header_offset; + u32 type, subtype; + + /* Some packaging formats (such as CMS/PKCS#7 signed images) + * prepend a header for which finding the size is a non-trivial + * task, so step through the firmware data until we find a valid + * header. + * + * The checks are intended to reject firmware data that is clearly not + * in the expected format. They do not need to be exhaustive as the + * running firmware will perform its own comprehensive validity and + * compatibility checks during the update procedure. + * + * Firmware packages may contain multiple reflash images, e.g. a + * bundle containing one or more other images. Only check the + * outermost container by stopping after the first candidate image + * found even it is for an unsupported partition type. + */ + for (header_offset = 0; header_offset < fw->size; header_offset++) { + if (efx_reflash_parse_snic_bundle_header(fw, header_offset, + partition_type, + partition_subtype, + data, data_size)) + return 0; + + if (efx_reflash_parse_snic_header(fw, header_offset, + partition_type, + partition_subtype, data, + data_size)) + return 0; + + if (efx_reflash_parse_reflash_header(fw, header_offset, &type, + &subtype, data, data_size)) + return efx_reflash_partition_type(type, subtype, + partition_type, + partition_subtype); + } + + return -EINVAL; +} + +/* Limit the number of status updates during the erase or write phases */ +#define EFX_DEVLINK_STATUS_UPDATE_COUNT 50 + +/* Expected timeout for the efx_mcdi_nvram_update_finish_polled() */ +#define EFX_DEVLINK_UPDATE_FINISH_TIMEOUT 900 + +/* Ideal erase chunk size. This is a balance between minimising the number of + * MCDI requests to erase an entire partition whilst avoiding tripping the MCDI + * RPC timeout. + */ +#define EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE (64 * 1024) + +static int efx_reflash_erase_partition(struct efx_nic *efx, + struct netlink_ext_ack *extack, + struct devlink *devlink, u32 type, + size_t partition_size, + size_t align) +{ + size_t chunk, offset, next_update; + int rc; + + /* Partitions that cannot be erased or do not require erase before + * write are advertised with a erase alignment/sector size of zero. + */ + if (align == 0) + /* Nothing to do */ + return 0; + + if (partition_size % align) + return -EINVAL; + + /* Erase the entire NVRAM partition a chunk at a time to avoid + * potentially tripping the MCDI RPC timeout. + */ + if (align >= EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE) + chunk = align; + else + chunk = rounddown(EFX_NVRAM_ERASE_IDEAL_CHUNK_SIZE, align); + + for (offset = 0, next_update = 0; offset < partition_size; offset += chunk) { + if (offset >= next_update) { + devlink_flash_update_status_notify(devlink, "Erasing", + NULL, offset, + partition_size); + next_update += partition_size / EFX_DEVLINK_STATUS_UPDATE_COUNT; + } + + chunk = min_t(size_t, partition_size - offset, chunk); + rc = efx_mcdi_nvram_erase(efx, type, offset, chunk); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Erase failed for NVRAM partition %#x at %#zx-%#zx", + type, offset, offset + chunk - 1); + return rc; + } + } + + devlink_flash_update_status_notify(devlink, "Erasing", NULL, + partition_size, partition_size); + + return 0; +} + +static int efx_reflash_write_partition(struct efx_nic *efx, + struct netlink_ext_ack *extack, + struct devlink *devlink, u32 type, + const u8 *data, size_t data_size, + size_t align) +{ + size_t write_max, chunk, offset, next_update; + int rc; + + if (align == 0) + return -EINVAL; + + /* Write the NVRAM partition in chunks that are the largest multiple + * of the partition's required write alignment that will fit into the + * MCDI NVRAM_WRITE RPC payload. + */ + if (efx->type->mcdi_max_ver < 2) + write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN * + MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM; + else + write_max = MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN * + MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2; + chunk = rounddown(write_max, align); + + for (offset = 0, next_update = 0; offset + chunk <= data_size; offset += chunk) { + if (offset >= next_update) { + devlink_flash_update_status_notify(devlink, "Writing", + NULL, offset, + data_size); + next_update += data_size / EFX_DEVLINK_STATUS_UPDATE_COUNT; + } + + rc = efx_mcdi_nvram_write(efx, type, offset, data + offset, chunk); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Write failed for NVRAM partition %#x at %#zx-%#zx", + type, offset, offset + chunk - 1); + return rc; + } + } + + /* Round up left over data to satisfy write alignment */ + if (offset < data_size) { + size_t remaining = data_size - offset; + u8 *buf; + + if (offset >= next_update) + devlink_flash_update_status_notify(devlink, "Writing", + NULL, offset, + data_size); + + chunk = roundup(remaining, align); + buf = kmalloc(chunk, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, data + offset, remaining); + memset(buf + remaining, 0xFF, chunk - remaining); + rc = efx_mcdi_nvram_write(efx, type, offset, buf, chunk); + kfree(buf); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Write failed for NVRAM partition %#x at %#zx-%#zx", + type, offset, offset + chunk - 1); + return rc; + } + } + + devlink_flash_update_status_notify(devlink, "Writing", NULL, data_size, + data_size); + + return 0; +} + +int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw, + struct netlink_ext_ack *extack) +{ + size_t data_size, size, erase_align, write_align; + struct devlink *devlink = efx->devlink; + u32 type, data_subtype, subtype; + const u8 *data; + bool protected; + int rc; + + if (!efx_has_cap(efx, BUNDLE_UPDATE)) { + NL_SET_ERR_MSG_MOD(extack, "NVRAM bundle updates are not supported by the firmware"); + return -EOPNOTSUPP; + } + + mutex_lock(&efx->reflash_mutex); + + devlink_flash_update_status_notify(devlink, "Checking update", NULL, 0, 0); + + if (efx->type->flash_auto_partition) { + /* NIC wants entire FW file including headers; + * FW will validate 'subtype' if there is one + */ + type = NVRAM_PARTITION_TYPE_AUTO; + data = fw->data; + data_size = fw->size; + } else { + rc = efx_reflash_parse_firmware_data(fw, &type, &data_subtype, &data, + &data_size); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, + "Firmware image validation check failed"); + goto out_unlock; + } + + rc = efx_mcdi_nvram_metadata(efx, type, &subtype, NULL, NULL, 0); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Metadata query for NVRAM partition %#x failed", + type); + goto out_unlock; + } + + if (subtype != data_subtype) { + NL_SET_ERR_MSG_MOD(extack, + "Firmware image is not appropriate for this adapter"); + rc = -EINVAL; + goto out_unlock; + } + } + + rc = efx_mcdi_nvram_info(efx, type, &size, &erase_align, &write_align, + &protected); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Info query for NVRAM partition %#x failed", + type); + goto out_unlock; + } + + if (protected) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "NVRAM partition %#x is protected", + type); + rc = -EPERM; + goto out_unlock; + } + + if (write_align == 0) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "NVRAM partition %#x is not writable", + type); + rc = -EACCES; + goto out_unlock; + } + + if (erase_align != 0 && size % erase_align) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "NVRAM partition %#x has a bad partition table entry, can't erase it", + type); + rc = -EACCES; + goto out_unlock; + } + + if (data_size > size) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Firmware image is too big for NVRAM partition %#x", + type); + rc = -EFBIG; + goto out_unlock; + } + + devlink_flash_update_status_notify(devlink, "Starting update", NULL, 0, 0); + + rc = efx_mcdi_nvram_update_start(efx, type); + if (rc) { + NL_SET_ERR_MSG_FMT_MOD(extack, + "Update start request for NVRAM partition %#x failed", + type); + goto out_unlock; + } + + rc = efx_reflash_erase_partition(efx, extack, devlink, type, size, + erase_align); + if (rc) + goto out_update_finish; + + rc = efx_reflash_write_partition(efx, extack, devlink, type, data, + data_size, write_align); + if (rc) + goto out_update_finish; + + devlink_flash_update_timeout_notify(devlink, "Finishing update", NULL, + EFX_DEVLINK_UPDATE_FINISH_TIMEOUT); + +out_update_finish: + if (rc) + /* Don't obscure the return code from an earlier failure */ + efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_ABORT); + else + rc = efx_mcdi_nvram_update_finish_polled(efx, type); +out_unlock: + mutex_unlock(&efx->reflash_mutex); + devlink_flash_update_status_notify(devlink, rc ? "Update failed" : + "Update complete", + NULL, 0, 0); + return rc; +} diff --git a/drivers/net/ethernet/sfc/efx_reflash.h b/drivers/net/ethernet/sfc/efx_reflash.h new file mode 100644 index 0000000000000..3dffac5651615 --- /dev/null +++ b/drivers/net/ethernet/sfc/efx_reflash.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for AMD network controllers and boards + * Copyright (C) 2025, Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef _EFX_REFLASH_H +#define _EFX_REFLASH_H + +#include "net_driver.h" +#include + +int efx_reflash_flash_firmware(struct efx_nic *efx, const struct firmware *fw, + struct netlink_ext_ack *extack); + +#endif /* _EFX_REFLASH_H */ diff --git a/drivers/net/ethernet/sfc/fw_formats.h b/drivers/net/ethernet/sfc/fw_formats.h new file mode 100644 index 0000000000000..cbc350c960133 --- /dev/null +++ b/drivers/net/ethernet/sfc/fw_formats.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for AMD network controllers and boards + * Copyright (C) 2025, Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef _EFX_FW_FORMATS_H +#define _EFX_FW_FORMATS_H + +/* Header layouts of firmware update images recognised by Efx NICs. + * The sources-of-truth for these layouts are AMD internal documents + * and sfregistry headers, neither of which are available externally + * nor usable directly by the driver. + * + * While each format includes a 'magic number', these are at different + * offsets in the various formats, and a legal header for one format + * could have the right value in whichever field occupies that offset + * to match another format's magic. + * Besides, some packaging formats (such as CMS/PKCS#7 signed images) + * prepend a header for which finding the size is a non-trivial task; + * rather than trying to parse those headers, we search byte-by-byte + * through the provided firmware image looking for a valid header. + * Thus, format recognition has to include validation of the checksum + * field, even though the firmware will validate that itself before + * applying the image. + */ + +/* EF10 (Medford2, X2) "reflash" header format. Defined in SF-121352-AN */ +#define EFX_REFLASH_HEADER_MAGIC_OFST 0 +#define EFX_REFLASH_HEADER_MAGIC_LEN 4 +#define EFX_REFLASH_HEADER_MAGIC_VALUE 0x106F1A5 + +#define EFX_REFLASH_HEADER_VERSION_OFST 4 +#define EFX_REFLASH_HEADER_VERSION_LEN 4 +#define EFX_REFLASH_HEADER_VERSION_VALUE 4 + +#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_OFST 8 +#define EFX_REFLASH_HEADER_FIRMWARE_TYPE_LEN 4 +#define EFX_REFLASH_FIRMWARE_TYPE_BOOTROM 0x2 +#define EFX_REFLASH_FIRMWARE_TYPE_BUNDLE 0xd + +#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_OFST 12 +#define EFX_REFLASH_HEADER_FIRMWARE_SUBTYPE_LEN 4 + +#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_OFST 16 +#define EFX_REFLASH_HEADER_PAYLOAD_SIZE_LEN 4 + +#define EFX_REFLASH_HEADER_LENGTH_OFST 20 +#define EFX_REFLASH_HEADER_LENGTH_LEN 4 + +/* Reflash trailer */ +#define EFX_REFLASH_TRAILER_CRC_OFST 0 +#define EFX_REFLASH_TRAILER_CRC_LEN 4 + +#define EFX_REFLASH_TRAILER_LEN \ + (EFX_REFLASH_TRAILER_CRC_OFST + EFX_REFLASH_TRAILER_CRC_LEN) + +/* EF100 "SmartNIC image" header format. + * Defined in sfregistry "src/layout/snic_image_hdr.h". + */ +#define EFX_SNICIMAGE_HEADER_MAGIC_OFST 16 +#define EFX_SNICIMAGE_HEADER_MAGIC_LEN 4 +#define EFX_SNICIMAGE_HEADER_MAGIC_VALUE 0x541C057A + +#define EFX_SNICIMAGE_HEADER_VERSION_OFST 20 +#define EFX_SNICIMAGE_HEADER_VERSION_LEN 4 +#define EFX_SNICIMAGE_HEADER_VERSION_VALUE 1 + +#define EFX_SNICIMAGE_HEADER_LENGTH_OFST 24 +#define EFX_SNICIMAGE_HEADER_LENGTH_LEN 4 + +#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_OFST 36 +#define EFX_SNICIMAGE_HEADER_PARTITION_TYPE_LEN 4 + +#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_OFST 40 +#define EFX_SNICIMAGE_HEADER_PARTITION_SUBTYPE_LEN 4 + +#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_OFST 60 +#define EFX_SNICIMAGE_HEADER_PAYLOAD_SIZE_LEN 4 + +#define EFX_SNICIMAGE_HEADER_CRC_OFST 64 +#define EFX_SNICIMAGE_HEADER_CRC_LEN 4 + +#define EFX_SNICIMAGE_HEADER_MINLEN 256 + +/* EF100 "SmartNIC bundle" header format. Defined in SF-122606-TC */ +#define EFX_SNICBUNDLE_HEADER_MAGIC_OFST 0 +#define EFX_SNICBUNDLE_HEADER_MAGIC_LEN 4 +#define EFX_SNICBUNDLE_HEADER_MAGIC_VALUE 0xB1001001 + +#define EFX_SNICBUNDLE_HEADER_VERSION_OFST 4 +#define EFX_SNICBUNDLE_HEADER_VERSION_LEN 4 +#define EFX_SNICBUNDLE_HEADER_VERSION_VALUE 1 + +#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_OFST 8 +#define EFX_SNICBUNDLE_HEADER_BUNDLE_TYPE_LEN 4 + +#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_OFST 12 +#define EFX_SNICBUNDLE_HEADER_BUNDLE_SUBTYPE_LEN 4 + +#define EFX_SNICBUNDLE_HEADER_LENGTH_OFST 20 +#define EFX_SNICBUNDLE_HEADER_LENGTH_LEN 4 + +#define EFX_SNICBUNDLE_HEADER_CRC_OFST 224 +#define EFX_SNICBUNDLE_HEADER_CRC_LEN 4 + +#define EFX_SNICBUNDLE_HEADER_LEN \ + (EFX_SNICBUNDLE_HEADER_CRC_OFST + EFX_SNICBUNDLE_HEADER_CRC_LEN) + +#endif /* _EFX_FW_FORMATS_H */ diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c index 50f097487b140..6fd0c1e9a7d54 100644 --- a/drivers/net/ethernet/sfc/mae.c +++ b/drivers/net/ethernet/sfc/mae.c @@ -755,7 +755,7 @@ int efx_mae_match_check_caps_lhs(struct efx_nic *efx, rc = efx_mae_match_check_cap_typ(supported_fields[MAE_FIELD_INGRESS_PORT], ingress_port_mask_type); if (rc) { - NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s\n", + NL_SET_ERR_MSG_FMT_MOD(extack, "No support for %s mask in field %s", mask_type_name(ingress_port_mask_type), "ingress_port"); return rc; diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index d461b1a6ce810..dbd2ee915838c 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1625,12 +1625,15 @@ static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number, return rc; } +#define EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN 128 + int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, size_t *size_out, size_t *erase_size_out, - bool *protected_out) + size_t *write_size_out, bool *protected_out) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_V2_OUT_LEN); + size_t write_size = 0; size_t outlen; int rc; @@ -1645,6 +1648,12 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, goto fail; } + if (outlen >= MC_CMD_NVRAM_INFO_V2_OUT_LEN) + write_size = MCDI_DWORD(outbuf, NVRAM_INFO_V2_OUT_WRITESIZE); + else + write_size = EFX_MCDI_NVRAM_DEFAULT_WRITE_LEN; + + *write_size_out = write_size; *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & @@ -2163,11 +2172,9 @@ int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type, return rc; } -#ifdef CONFIG_SFC_MTD - #define EFX_MCDI_NVRAM_LEN_MAX 128 -static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) +int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN); int rc; @@ -2185,6 +2192,8 @@ static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) return rc; } +#ifdef CONFIG_SFC_MTD + static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, loff_t offset, u8 *buffer, size_t length) { @@ -2209,13 +2218,20 @@ static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, return 0; } -static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, - loff_t offset, const u8 *buffer, size_t length) +#endif /* CONFIG_SFC_MTD */ + +int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, + loff_t offset, const u8 *buffer, size_t length) { - MCDI_DECLARE_BUF(inbuf, - MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)); + efx_dword_t *inbuf; + size_t inlen; int rc; + inlen = ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4); + inbuf = kzalloc(inlen, GFP_KERNEL); + if (!inbuf) + return -ENOMEM; + MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); @@ -2223,14 +2239,14 @@ static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, - ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), - NULL, 0, NULL); + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, inlen, NULL, 0, NULL); + kfree(inbuf); + return rc; } -static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, - loff_t offset, size_t length) +int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, loff_t offset, + size_t length) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN); int rc; @@ -2246,7 +2262,8 @@ static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, return rc; } -static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) +int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type, + enum efx_update_finish_mode mode) { MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN); @@ -2254,22 +2271,41 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) int rc, rc2; MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); - /* Always set this flag. Old firmware ignores it */ - MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS, + + /* Old firmware doesn't support background update finish and abort + * operations. Fallback to waiting if the requested mode is not + * supported. + */ + if (!efx_has_cap(efx, NVRAM_UPDATE_POLL_VERIFY_RESULT) || + (!efx_has_cap(efx, NVRAM_UPDATE_ABORT_SUPPORTED) && + mode == EFX_UPDATE_FINISH_ABORT)) + mode = EFX_UPDATE_FINISH_WAIT; + + MCDI_POPULATE_DWORD_4(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS, NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, - 1); + (mode != EFX_UPDATE_FINISH_ABORT), + NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND, + (mode == EFX_UPDATE_FINISH_BACKGROUND), + NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT, + (mode == EFX_UPDATE_FINISH_POLL), + NVRAM_UPDATE_FINISH_V2_IN_FLAG_ABORT, + (mode == EFX_UPDATE_FINISH_ABORT)); rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) { rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE); - if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS) + if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS && + rc2 != MC_CMD_NVRAM_VERIFY_RC_PENDING) netif_err(efx, drv, efx->net_dev, "NVRAM update failed verification with code 0x%x\n", rc2); switch (rc2) { case MC_CMD_NVRAM_VERIFY_RC_SUCCESS: break; + case MC_CMD_NVRAM_VERIFY_RC_PENDING: + rc = -EAGAIN; + break; case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED: case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED: case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED: @@ -2284,6 +2320,8 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES: case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS: case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH: + case MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED: + case MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE: rc = -EPERM; break; default: @@ -2296,6 +2334,42 @@ static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) return rc; } +#define EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS 5 +#define EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS 5000 +#define EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES 185 + +int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type) +{ + unsigned int delay = EFX_MCDI_NVRAM_UPDATE_FINISH_INITIAL_POLL_DELAY_MS; + unsigned int retry = 0; + int rc; + + /* NVRAM updates can take a long time (e.g. up to 1 minute for bundle + * images). Polling for NVRAM update completion ensures that other MCDI + * commands can be issued before the background NVRAM update completes. + * + * The initial call either completes the update synchronously, or + * returns -EAGAIN to indicate processing is continuing. In the latter + * case, we poll for at least 900 seconds, at increasing intervals + * (5ms, 50ms, 500ms, 5s). + */ + rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_BACKGROUND); + while (rc == -EAGAIN) { + if (retry > EFX_MCDI_NVRAM_UPDATE_FINISH_RETRIES) + return -ETIMEDOUT; + retry++; + + msleep(delay); + if (delay < EFX_MCDI_NVRAM_UPDATE_FINISH_MAX_POLL_DELAY_MS) + delay *= 10; + + rc = efx_mcdi_nvram_update_finish(efx, type, EFX_UPDATE_FINISH_POLL); + } + return rc; +} + +#ifdef CONFIG_SFC_MTD + int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, u8 *buffer) { @@ -2389,7 +2463,8 @@ int efx_mcdi_mtd_sync(struct mtd_info *mtd) if (part->updating) { part->updating = false; - rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type); + rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type, + EFX_UPDATE_FINISH_WAIT); } return rc; diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index cdb17d7c147fc..3755cd3fe1e60 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -392,7 +392,7 @@ int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq); int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, size_t *size_out, size_t *erase_size_out, - bool *protected_out); + size_t *write_size_out, bool *protected_out); int efx_new_mcdi_nvram_test_all(struct efx_nic *efx); int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type, u32 *subtype, u16 version[4], char *desc, @@ -424,6 +424,26 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; } static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {} #endif +int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type); +int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, + loff_t offset, const u8 *buffer, size_t length); +int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, + loff_t offset, size_t length); +int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type, + u32 *subtype, u16 version[4], char *desc, + size_t descsize); + +enum efx_update_finish_mode { + EFX_UPDATE_FINISH_WAIT, + EFX_UPDATE_FINISH_BACKGROUND, + EFX_UPDATE_FINISH_POLL, + EFX_UPDATE_FINISH_ABORT, +}; + +int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type, + enum efx_update_finish_mode mode); +int efx_mcdi_nvram_update_finish_polled(struct efx_nic *efx, unsigned int type); + #ifdef CONFIG_SFC_MTD int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len, size_t *retlen, u8 *buffer); diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index cd297e19cddcc..9cb339c461fba 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -72,19 +72,19 @@ * | \------- Error * \------------------------------ Resync (always set) * - * The client writes it's request into MC shared memory, and rings the - * doorbell. Each request is completed by either by the MC writing + * The client writes its request into MC shared memory, and rings the + * doorbell. Each request is completed either by the MC writing * back into shared memory, or by writing out an event. * * All MCDI commands support completion by shared memory response. Each * request may also contain additional data (accounted for by HEADER.LEN), - * and some response's may also contain additional data (again, accounted + * and some responses may also contain additional data (again, accounted * for by HEADER.LEN). * * Some MCDI commands support completion by event, in which any associated * response data is included in the event. * - * The protocol requires one response to be delivered for every request, a + * The protocol requires one response to be delivered for every request; a * request should not be sent unless the response for the previous request * has been received (either by polling shared memory, or by receiving * an event). @@ -165,6 +165,7 @@ #define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc + #define MC_CMD_ERR_CODE_OFST 0 #define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4 @@ -321,7 +322,7 @@ /* enum: The requesting client is not a function */ #define MC_CMD_ERR_CLIENT_NOT_FN 0x100c /* enum: The requested operation might require the command to be passed between - * MCs, and thetransport doesn't support that. Should only ever been seen over + * MCs, and the transport doesn't support that. Should only ever been seen over * the UART. */ #define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d @@ -358,7 +359,7 @@ * sub-variant switching. */ #define MC_CMD_ERR_FILTERS_PRESENT 0x1014 -/* enum: The clock whose frequency you've attempted to set set doesn't exist on +/* enum: The clock whose frequency you've attempted to set doesn't exist on * this NIC */ #define MC_CMD_ERR_NO_CLOCK 0x1015 @@ -387,25 +388,6 @@ */ #define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b -/* MC_CMD_RESOURCE_SPECIFIER enum */ -/* enum: Any */ -#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff -#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */ - -/* MC_CMD_FPGA_FLASH_INDEX enum */ -#define MC_CMD_FPGA_FLASH_PRIMARY 0x0 /* enum */ -#define MC_CMD_FPGA_FLASH_SECONDARY 0x1 /* enum */ - -/* MC_CMD_EXTERNAL_MAE_LINK_MODE enum */ -/* enum: Legacy mode as described in XN-200039-TC. */ -#define MC_CMD_EXTERNAL_MAE_LINK_MODE_LEGACY 0x0 -/* enum: Switchdev mode as described in XN-200039-TC. */ -#define MC_CMD_EXTERNAL_MAE_LINK_MODE_SWITCHDEV 0x1 -/* enum: Bootstrap mode as described in XN-200039-TC. */ -#define MC_CMD_EXTERNAL_MAE_LINK_MODE_BOOTSTRAP 0x2 -/* enum: Link-mode change is in-progress as described in XN-200039-TC. */ -#define MC_CMD_EXTERNAL_MAE_LINK_MODE_PENDING 0xf - /* PCIE_INTERFACE enum: From EF100 onwards, SFC products can have multiple PCIe * interfaces. There is a need to refer to interfaces explicitly from drivers * (for example, a management driver on one interface administering a function @@ -424,6 +406,14 @@ * an on-NIC ARM module is expected to be connected. */ #define PCIE_INTERFACE_NIC_EMBEDDED 0x1 +/* enum: The PCIe logical interface 0. It is an alias for HOST_PRIMARY. */ +#define PCIE_INTERFACE_PCIE_HOST_INTF_0 0x0 +/* enum: The PCIe logical interface 1. */ +#define PCIE_INTERFACE_PCIE_HOST_INTF_1 0x2 +/* enum: The PCIe logical interface 2. */ +#define PCIE_INTERFACE_PCIE_HOST_INTF_2 0x3 +/* enum: The PCIe logical interface 3. */ +#define PCIE_INTERFACE_PCIE_HOST_INTF_3 0x4 /* enum: For MCDI commands issued over a PCIe interface, this value is * translated into the interface over which the command was issued. Not * meaningful for other MCDI transports. @@ -640,7 +630,11 @@ * be allocated by different counter blocks, so e.g. AR counter 42 is different * from CT counter 42. Generation counts are also type-specific. This value is * also present in the header of streaming counter packets, in the IDENTIFIER - * field (see packetiser packet format definitions). + * field (see packetiser packet format definitions). Also note that LACP + * counter IDs are not allocated individually, instead the counter IDs are + * directly tied to the LACP balance table indices. These in turn are allocated + * in large contiguous blocks as a LAG config. Calling MAE_COUNTER_ALLOC/FREE + * with an LACP counter type will return EPERM. */ /* enum: Action Rule counters - can be referenced in AR response. */ #define MAE_COUNTER_TYPE_AR 0x0 @@ -648,6 +642,14 @@ #define MAE_COUNTER_TYPE_CT 0x1 /* enum: Outer Rule counters - can be referenced in OR response. */ #define MAE_COUNTER_TYPE_OR 0x2 +/* enum: LACP counters - linked to LACP balance table entries. */ +#define MAE_COUNTER_TYPE_LACP 0x3 + +/* MAE_COUNTER_ID enum: ID of allocated counter or counter list. */ +/* enum: A counter ID that is guaranteed never to represent a real counter or + * counter list. + */ +#define MAE_COUNTER_ID_NULL 0xffffffff /* TABLE_ID enum: Unique IDs for tables. The 32-bit ID values have been * structured with bits [31:24] reserved (0), [23:16] indicating which major @@ -656,7 +658,9 @@ * variations of the same table. (All of the tables currently defined within * the streaming engines are listed here, but this does not imply that they are * all supported - MC_CMD_TABLE_LIST returns the list of actually supported - * tables.) + * tables.) The DPU offload engines' enumerators follow a deliberate pattern: + * 0x01010000 + is_dpu_net * 0x10000 + is_wr_or_tx * 0x8000 + is_lite_pipe * + * 0x1000 + oe_engine_type * 0x100 + oe_instance_within_pipe * 0x10 */ /* enum: Outer_Rule_Table in the MAE - refer to SF-123102-TC. */ #define TABLE_ID_OUTER_RULE_TABLE 0x10000 @@ -694,45 +698,70 @@ #define TABLE_ID_RSS_CONTEXT_TABLE 0x20200 /* enum: Indirection_Table in VNIC Rx - refer to SF-123102-TC. */ #define TABLE_ID_INDIRECTION_TABLE 0x20300 - -/* TABLE_COMPRESSED_VLAN enum: Compressed VLAN TPID as used by some field - * types; can be calculated by (((ether_type_msb >> 2) & 0x4) ^ 0x4) | - * (ether_type_msb & 0x3); - */ -#define TABLE_COMPRESSED_VLAN_TPID_8100 0x5 /* enum */ -#define TABLE_COMPRESSED_VLAN_TPID_88A8 0x4 /* enum */ -#define TABLE_COMPRESSED_VLAN_TPID_9100 0x1 /* enum */ -#define TABLE_COMPRESSED_VLAN_TPID_9200 0x2 /* enum */ -#define TABLE_COMPRESSED_VLAN_TPID_9300 0x3 /* enum */ - -/* TABLE_NAT_DIR enum: NAT direction. */ -#define TABLE_NAT_DIR_SOURCE 0x0 /* enum */ -#define TABLE_NAT_DIR_DEST 0x1 /* enum */ - -/* TABLE_RSS_KEY_MODE enum: Defines how the value for Toeplitz hashing for RSS - * is constructed as a concatenation (indicated here by "++") of packet header - * fields. - */ -/* enum: IP src addr ++ IP dst addr */ -#define TABLE_RSS_KEY_MODE_SA_DA 0x0 -/* enum: IP src addr ++ IP dst addr ++ TCP/UDP src port ++ TCP/UDP dst port */ -#define TABLE_RSS_KEY_MODE_SA_DA_SP_DP 0x1 -/* enum: IP src addr */ -#define TABLE_RSS_KEY_MODE_SA 0x2 -/* enum: IP dst addr */ -#define TABLE_RSS_KEY_MODE_DA 0x3 -/* enum: IP src addr ++ TCP/UDP src port */ -#define TABLE_RSS_KEY_MODE_SA_SP 0x4 -/* enum: IP dest addr ++ TCP dest port */ -#define TABLE_RSS_KEY_MODE_DA_DP 0x5 -/* enum: Nothing (produces input of 0, resulting in output hash of 0) */ -#define TABLE_RSS_KEY_MODE_NONE 0x7 - -/* TABLE_RSS_SPREAD_MODE enum: RSS spreading mode. */ -/* enum: RSS uses Indirection_Table lookup. */ -#define TABLE_RSS_SPREAD_MODE_INDIRECTION 0x0 -/* enum: RSS uses even spreading calculation. */ -#define TABLE_RSS_SPREAD_MODE_EVEN 0x1 +/* enum: DPU.host read pipe first CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_HOST_RD_CRC0_OE_PROFILE 0x1010000 +/* enum: DPU.host read pipe second CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_HOST_RD_CRC1_OE_PROFILE 0x1010010 +/* enum: DPU.host write pipe first CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_HOST_WR_CRC0_OE_PROFILE 0x1018000 +/* enum: DPU.host write pipe second CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_HOST_WR_CRC1_OE_PROFILE 0x1018010 +/* enum: DPU.net 'full' receive pipe CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RX_CRC0_OE_PROFILE 0x1020000 +/* enum: DPU.net 'full' receive pipe first checksum offload engine profiles - + * refer to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RX_CSUM0_OE_PROFILE 0x1020100 +/* enum: DPU.net 'full' receive pipe second checksum offload engine profiles - + * refer to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RX_CSUM1_OE_PROFILE 0x1020110 +/* enum: DPU.net 'full' receive pipe AES-GCM offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RX_AES_GCM0_OE_PROFILE 0x1020200 +/* enum: DPU.net 'lite' receive pipe CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RXLITE_CRC0_OE_PROFILE 0x1021000 +/* enum: DPU.net 'lite' receive pipe checksum offload engine profiles - refer + * to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_RXLITE_CSUM0_OE_PROFILE 0x1021100 +/* enum: DPU.net 'full' transmit pipe CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TX_CRC0_OE_PROFILE 0x1028000 +/* enum: DPU.net 'full' transmit pipe first checksum offload engine profiles - + * refer to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TX_CSUM0_OE_PROFILE 0x1028100 +/* enum: DPU.net 'full' transmit pipe second checksum offload engine profiles - + * refer to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TX_CSUM1_OE_PROFILE 0x1028110 +/* enum: DPU.net 'full' transmit pipe AES-GCM offload engine profiles - refer + * to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TX_AES_GCM0_OE_PROFILE 0x1028200 +/* enum: DPU.net 'lite' transmit pipe CRC offload engine profiles - refer to + * XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TXLITE_CRC0_OE_PROFILE 0x1029000 +/* enum: DPU.net 'lite' transmit pipe checksum offload engine profiles - refer + * to XN-200147-AN. + */ +#define TABLE_ID_DPU_NET_TXLITE_CSUM0_OE_PROFILE 0x1029100 /* TABLE_FIELD_ID enum: Unique IDs for fields. Related concepts have been * loosely grouped together into blocks with gaps for expansion, but the values @@ -1026,6 +1055,16 @@ #define TABLE_FIELD_ID_BAL_TBL_BASE_DIV64 0xde /* enum: Length of balance table region: 0=>64, 1=>128, 2=>256. */ #define TABLE_FIELD_ID_BAL_TBL_LEN_ID 0xdf +/* enum: LACP LAG ID (i.e. the low 3 bits of LACP LAG mport ID), indexing + * LACP_LAG_Config_Table. Refer to SF-123102-TC. + */ +#define TABLE_FIELD_ID_LACP_LAG_ID 0xe0 +/* enum: Address in LACP_Balance_Table. The balance table is partitioned + * between LAGs according to the settings in LACP_LAG_Config_Table and then + * indexed by the LACP hash, providing the mapping to destination mports. Refer + * to SF-123102-TC. + */ +#define TABLE_FIELD_ID_BAL_TBL_ADDR 0xe1 /* enum: UDP port to match for UDP-based encapsulations; required to be 0 for * other encapsulation types. */ @@ -1082,6 +1121,55 @@ #define TABLE_FIELD_ID_INDIR_TBL_LEN_ID 0x105 /* enum: An offset to be applied to the base destination queue ID. */ #define TABLE_FIELD_ID_INDIR_OFFSET 0x106 +/* enum: DPU offload engine profile ID to address. */ +#define TABLE_FIELD_ID_OE_PROFILE 0x3e8 +/* enum: Width of the CRC to calculate - see CRC_VARIANT enum. */ +#define TABLE_FIELD_ID_CRC_VARIANT 0x3f2 +/* enum: If set, reflect the bits of each input byte, bit 7 is LSB, bit 0 is + * MSB. If clear, bit 7 is MSB, bit 0 is LSB. + */ +#define TABLE_FIELD_ID_CRC_REFIN 0x3f3 +/* enum: If set, reflect the bits of each output byte, bit 7 is LSB, bit 0 is + * MSB. If clear, bit 7 is MSB, bit 0 is LSB. + */ +#define TABLE_FIELD_ID_CRC_REFOUT 0x3f4 +/* enum: If set, invert every bit of the output value. */ +#define TABLE_FIELD_ID_CRC_INVOUT 0x3f5 +/* enum: The CRC polynomial to use for checksumming, in normal form. */ +#define TABLE_FIELD_ID_CRC_POLY 0x3f6 +/* enum: Operation for the checksum engine to perform - see DPU_CSUM_OP enum. + */ +#define TABLE_FIELD_ID_CSUM_OP 0x410 +/* enum: Byte offset of checksum relative to region_start (for VALIDATE_* + * operations only). + */ +#define TABLE_FIELD_ID_CSUM_OFFSET 0x411 +/* enum: Indicates there is additional data on OPR bus that needs to be + * incorporated into the payload checksum. + */ +#define TABLE_FIELD_ID_CSUM_OPR_ADDITIONAL_DATA 0x412 +/* enum: Log2 data size of additional data on OPR bus. */ +#define TABLE_FIELD_ID_CSUM_OPR_DATA_SIZE_LOG2 0x413 +/* enum: 4 byte offset of where to find the additional data on the OPR bus. */ +#define TABLE_FIELD_ID_CSUM_OPR_4B_OFF 0x414 +/* enum: Operation type for the AES-GCM core - see GCM_OP_CODE enum. */ +#define TABLE_FIELD_ID_GCM_OP_CODE 0x41a +/* enum: Key length - AES_KEY_LEN enum. */ +#define TABLE_FIELD_ID_GCM_KEY_LEN 0x41b +/* enum: OPR 4 byte offset for ICV or GHASH output (only in BULK_* mode) or + * IPSEC descrypt output. + */ +#define TABLE_FIELD_ID_GCM_OPR_4B_OFFSET 0x41c +/* enum: If OP_CODE is BULK_*, indicates Emit GHASH (Fragment mode). Else, + * indicates IPSEC-ESN mode. + */ +#define TABLE_FIELD_ID_GCM_EMIT_GHASH_ISESN 0x41d +/* enum: Replay Protection Enable. */ +#define TABLE_FIELD_ID_GCM_REPLAY_PROTECT_EN 0x41e +/* enum: IPSEC Encrypt ESP trailer NEXT_HEADER byte. */ +#define TABLE_FIELD_ID_GCM_NEXT_HDR 0x41f +/* enum: Replay Window Size. */ +#define TABLE_FIELD_ID_GCM_REPLAY_WIN_SIZE 0x420 /* MCDI_EVENT structuredef: The structure of an MCDI_EVENT on Siena/EF10/EF100 * platforms @@ -1138,6 +1226,24 @@ #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0 #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8 +#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_OFST 0 +#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_LBN 0 +#define MCDI_EVENT_PORT_LINKCHANGE_PORT_HANDLE_WIDTH 24 +#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_OFST 0 +#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_LBN 24 +#define MCDI_EVENT_PORT_LINKCHANGE_SEQ_NUM_WIDTH 7 +#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_OFST 0 +#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_LBN 31 +#define MCDI_EVENT_PORT_LINKCHANGE_LINK_UP_WIDTH 1 +#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_OFST 0 +#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_LBN 0 +#define MCDI_EVENT_PORT_MODULECHANGE_PORT_HANDLE_WIDTH 24 +#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_OFST 0 +#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_LBN 24 +#define MCDI_EVENT_PORT_MODULECHANGE_SEQ_NUM_WIDTH 7 +#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_OFST 0 +#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_LBN 31 +#define MCDI_EVENT_PORT_MODULECHANGE_MDI_CONNECTED_WIDTH 1 #define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0 #define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0 #define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8 @@ -1237,7 +1343,7 @@ #define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe /* enum: Notify that invalid flash type detected */ #define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf -/* enum: Notify that the attempt to run FPGA Controller firmware timedout */ +/* enum: Notify that the attempt to run FPGA Controller firmware timed out */ #define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10 /* enum: Failure to probe one or more FPGA boot flash chips */ #define MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11 @@ -1255,7 +1361,7 @@ #define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8 /* enum: FC Assert happened, but the register information is not available */ #define MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0 -/* enum: The register information for FC Assert is ready for readinng by driver +/* enum: The register information for FC Assert is ready for reading by driver */ #define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0 @@ -1364,6 +1470,12 @@ #define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0 #define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30 #define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_OFST 0 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_LBN 0 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_VI_ID_WIDTH 16 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_OFST 0 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_LBN 16 +#define MCDI_EVENT_DESC_PROXY_VIRTQ_ID_WIDTH 16 #define MCDI_EVENT_DATA_LBN 0 #define MCDI_EVENT_DATA_WIDTH 32 /* Alias for PTP_DATA. */ @@ -1500,6 +1612,31 @@ * change to the journal. */ #define MCDI_EVENT_CODE_MPORT_JOURNAL_CHANGE 0x27 +/* enum: Notification that a source queue is enabled and attached to its proxy + * sink queue. SRC field contains the handle of the affected descriptor proxy + * function. DATA field contains the relative source queue number and absolute + * VI ID. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_QUEUE_START 0x28 +/* enum: Notification of a change in link state and/or link speed of a network + * port link. This event applies to a network port identified by a handle, + * PORT_HANDLE, which is discovered by the driver using the MC_CMD_ENUM_PORTS + * command. + */ +#define MCDI_EVENT_CODE_PORT_LINKCHANGE 0x29 +/* enum: Notification of a change in the state of an MDI (external connector) + * of a network port. This typically corresponds to module plug/unplug for + * modular interfaces (e.g., SFP/QSFP and similar) or cable connect/disconnect. + * This event applies to a network port identified by a handle, PORT_HANDLE, + * which is discovered by the driver using the MC_CMD_ENUM_PORTS command. + */ +#define MCDI_EVENT_CODE_PORT_MODULECHANGE 0x2a +/* enum: Notification that the port enumeration journal has changed since it + * was last read and updates can be read using the MC_CMD_ENUM_PORTS command. + * The firmware may moderate the events so that an event is not sent for every + * change to the journal. + */ +#define MCDI_EVENT_CODE_ENUM_PORTS_CHANGE 0x2b /* enum: Artificial event generated by host and posted via MC for test * purposes. */ @@ -1512,6 +1649,14 @@ #define MCDI_EVENT_LINKCHANGE_DATA_LEN 4 #define MCDI_EVENT_LINKCHANGE_DATA_LBN 0 #define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32 +#define MCDI_EVENT_PORT_LINKCHANGE_DATA_OFST 0 +#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LEN 4 +#define MCDI_EVENT_PORT_LINKCHANGE_DATA_LBN 0 +#define MCDI_EVENT_PORT_LINKCHANGE_DATA_WIDTH 32 +#define MCDI_EVENT_PORT_MODULECHANGE_DATA_OFST 0 +#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LEN 4 +#define MCDI_EVENT_PORT_MODULECHANGE_DATA_LBN 0 +#define MCDI_EVENT_PORT_MODULECHANGE_DATA_WIDTH 32 #define MCDI_EVENT_SENSOREVT_DATA_OFST 0 #define MCDI_EVENT_SENSOREVT_DATA_LEN 4 #define MCDI_EVENT_SENSOREVT_DATA_LBN 0 @@ -1668,247 +1813,6 @@ #define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0 #define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32 -/* FCDI_EVENT structuredef */ -#define FCDI_EVENT_LEN 8 -#define FCDI_EVENT_CONT_LBN 32 -#define FCDI_EVENT_CONT_WIDTH 1 -#define FCDI_EVENT_LEVEL_LBN 33 -#define FCDI_EVENT_LEVEL_WIDTH 3 -/* enum: Info. */ -#define FCDI_EVENT_LEVEL_INFO 0x0 -/* enum: Warning. */ -#define FCDI_EVENT_LEVEL_WARN 0x1 -/* enum: Error. */ -#define FCDI_EVENT_LEVEL_ERR 0x2 -/* enum: Fatal. */ -#define FCDI_EVENT_LEVEL_FATAL 0x3 -#define FCDI_EVENT_DATA_OFST 0 -#define FCDI_EVENT_DATA_LEN 4 -#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0 -#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 -#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 -#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ -#define FCDI_EVENT_LINK_UP 0x1 /* enum */ -#define FCDI_EVENT_DATA_LBN 0 -#define FCDI_EVENT_DATA_WIDTH 32 -#define FCDI_EVENT_SRC_LBN 36 -#define FCDI_EVENT_SRC_WIDTH 8 -#define FCDI_EVENT_EV_CODE_LBN 60 -#define FCDI_EVENT_EV_CODE_WIDTH 4 -#define FCDI_EVENT_CODE_LBN 44 -#define FCDI_EVENT_CODE_WIDTH 8 -/* enum: The FC was rebooted. */ -#define FCDI_EVENT_CODE_REBOOT 0x1 -/* enum: Bad assert. */ -#define FCDI_EVENT_CODE_ASSERT 0x2 -/* enum: DDR3 test result. */ -#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3 -/* enum: Link status. */ -#define FCDI_EVENT_CODE_LINK_STATE 0x4 -/* enum: A timed read is ready to be serviced. */ -#define FCDI_EVENT_CODE_TIMED_READ 0x5 -/* enum: One or more PPS IN events */ -#define FCDI_EVENT_CODE_PPS_IN 0x6 -/* enum: Tick event from PTP clock */ -#define FCDI_EVENT_CODE_PTP_TICK 0x7 -/* enum: ECC error counters */ -#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8 -/* enum: Current status of PTP */ -#define FCDI_EVENT_CODE_PTP_STATUS 0x9 -/* enum: Port id config to map MC-FC port idx */ -#define FCDI_EVENT_CODE_PORT_CONFIG 0xa -/* enum: Boot result or error code */ -#define FCDI_EVENT_CODE_BOOT_RESULT 0xb -#define FCDI_EVENT_REBOOT_SRC_LBN 36 -#define FCDI_EVENT_REBOOT_SRC_WIDTH 8 -#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */ -#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */ -#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 -#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4 -#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 -#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 -#define FCDI_EVENT_ASSERT_TYPE_LBN 36 -#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8 -#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36 -#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8 -#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0 -#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4 -#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0 -#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32 -#define FCDI_EVENT_LINK_STATE_DATA_OFST 0 -#define FCDI_EVENT_LINK_STATE_DATA_LEN 4 -#define FCDI_EVENT_LINK_STATE_DATA_LBN 0 -#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32 -#define FCDI_EVENT_PTP_STATE_OFST 0 -#define FCDI_EVENT_PTP_STATE_LEN 4 -#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */ -#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */ -#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */ -#define FCDI_EVENT_PTP_STATE_LBN 0 -#define FCDI_EVENT_PTP_STATE_WIDTH 32 -#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36 -#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8 -#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0 -#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4 -#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0 -#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32 -/* Index of MC port being referred to */ -#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36 -#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8 -/* FC Port index that matches the MC port index in SRC */ -#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 -#define FCDI_EVENT_PORT_CONFIG_DATA_LEN 4 -#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 -#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 -#define FCDI_EVENT_BOOT_RESULT_OFST 0 -#define FCDI_EVENT_BOOT_RESULT_LEN 4 -/* Enum values, see field(s): */ -/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */ -#define FCDI_EVENT_BOOT_RESULT_LBN 0 -#define FCDI_EVENT_BOOT_RESULT_WIDTH 32 - -/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events - * to the MC. Note that this structure | is overlayed over a normal FCDI event - * such that bits 32-63 containing | event code, level, source etc remain the - * same. In this case the data | field of the header is defined to be the - * number of timestamps - */ -#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16 -#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248 -#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016 -#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8) -/* Number of timestamps following */ -#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 -#define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4 -#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0 -#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32 -/* Seconds field of a timestamp record */ -#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8 -#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4 -#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64 -#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32 -/* Nanoseconds field of a timestamp record */ -#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12 -#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4 -#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96 -#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32 -/* Timestamp records comprising the event */ -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LEN 4 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_LBN 64 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_WIDTH 32 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LEN 4 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_LBN 96 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_WIDTH 32 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 -#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 - -/* MUM_EVENT structuredef */ -#define MUM_EVENT_LEN 8 -#define MUM_EVENT_CONT_LBN 32 -#define MUM_EVENT_CONT_WIDTH 1 -#define MUM_EVENT_LEVEL_LBN 33 -#define MUM_EVENT_LEVEL_WIDTH 3 -/* enum: Info. */ -#define MUM_EVENT_LEVEL_INFO 0x0 -/* enum: Warning. */ -#define MUM_EVENT_LEVEL_WARN 0x1 -/* enum: Error. */ -#define MUM_EVENT_LEVEL_ERR 0x2 -/* enum: Fatal. */ -#define MUM_EVENT_LEVEL_FATAL 0x3 -#define MUM_EVENT_DATA_OFST 0 -#define MUM_EVENT_DATA_LEN 4 -#define MUM_EVENT_SENSOR_ID_OFST 0 -#define MUM_EVENT_SENSOR_ID_LBN 0 -#define MUM_EVENT_SENSOR_ID_WIDTH 8 -/* Enum values, see field(s): */ -/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ -#define MUM_EVENT_SENSOR_STATE_OFST 0 -#define MUM_EVENT_SENSOR_STATE_LBN 8 -#define MUM_EVENT_SENSOR_STATE_WIDTH 8 -#define MUM_EVENT_PORT_PHY_READY_OFST 0 -#define MUM_EVENT_PORT_PHY_READY_LBN 0 -#define MUM_EVENT_PORT_PHY_READY_WIDTH 1 -#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0 -#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1 -#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1 -#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0 -#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2 -#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1 -#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0 -#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3 -#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1 -#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0 -#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4 -#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1 -#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0 -#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5 -#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1 -#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0 -#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6 -#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1 -#define MUM_EVENT_DATA_LBN 0 -#define MUM_EVENT_DATA_WIDTH 32 -#define MUM_EVENT_SRC_LBN 36 -#define MUM_EVENT_SRC_WIDTH 8 -#define MUM_EVENT_EV_CODE_LBN 60 -#define MUM_EVENT_EV_CODE_WIDTH 4 -#define MUM_EVENT_CODE_LBN 44 -#define MUM_EVENT_CODE_WIDTH 8 -/* enum: The MUM was rebooted. */ -#define MUM_EVENT_CODE_REBOOT 0x1 -/* enum: Bad assert. */ -#define MUM_EVENT_CODE_ASSERT 0x2 -/* enum: Sensor failure. */ -#define MUM_EVENT_CODE_SENSOR 0x3 -/* enum: Link fault has been asserted, or has cleared. */ -#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4 -#define MUM_EVENT_SENSOR_DATA_OFST 0 -#define MUM_EVENT_SENSOR_DATA_LEN 4 -#define MUM_EVENT_SENSOR_DATA_LBN 0 -#define MUM_EVENT_SENSOR_DATA_WIDTH 32 -#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0 -#define MUM_EVENT_PORT_PHY_FLAGS_LEN 4 -#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0 -#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32 -#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0 -#define MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4 -#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0 -#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32 -#define MUM_EVENT_PORT_PHY_CAPS_OFST 0 -#define MUM_EVENT_PORT_PHY_CAPS_LEN 4 -#define MUM_EVENT_PORT_PHY_CAPS_LBN 0 -#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32 -#define MUM_EVENT_PORT_PHY_TECH_OFST 0 -#define MUM_EVENT_PORT_PHY_TECH_LEN 4 -#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */ -#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */ -#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */ -#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */ -#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */ -#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */ -#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */ -#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */ -#define MUM_EVENT_PORT_PHY_TECH_LBN 0 -#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32 -#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36 -#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4 -#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */ -#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */ -#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */ -#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */ -#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */ -#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40 -#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4 - /***********************************/ /* MC_CMD_READ32 @@ -1968,90 +1872,6 @@ #define MC_CMD_WRITE32_OUT_LEN 0 -/***********************************/ -/* MC_CMD_COPYCODE - * Copy MC code between two locations and jump. Note - this command really - * belongs to INSECURE category but is required by shmboot. The command handler - * has additional checks to reject insecure calls. - */ -#define MC_CMD_COPYCODE 0x3 -#undef MC_CMD_0x3_PRIVILEGE_CTG - -#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_COPYCODE_IN msgrequest */ -#define MC_CMD_COPYCODE_IN_LEN 16 -/* Source address - * - * The main image should be entered via a copy of a single word from and to a - * magic address, which controls various aspects of the boot. The magic address - * is a bitfield, with each bit as documented below. - */ -#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0 -#define MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4 -/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */ -#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000 -/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and - * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below) - */ -#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0 -/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT, - * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see - * below) - */ -#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6 -#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1 -/* Destination address */ -#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4 -#define MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4 -#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8 -#define MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4 -/* Address of where to jump after copy. */ -#define MC_CMD_COPYCODE_IN_JUMP_OFST 12 -#define MC_CMD_COPYCODE_IN_JUMP_LEN 4 -/* enum: Control should return to the caller rather than jumping */ -#define MC_CMD_COPYCODE_JUMP_NONE 0x1 - -/* MC_CMD_COPYCODE_OUT msgresponse */ -#define MC_CMD_COPYCODE_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_SET_FUNC - * Select function for function-specific commands. - */ -#define MC_CMD_SET_FUNC 0x4 -#undef MC_CMD_0x4_PRIVILEGE_CTG - -#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_SET_FUNC_IN msgrequest */ -#define MC_CMD_SET_FUNC_IN_LEN 4 -/* Set function */ -#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0 -#define MC_CMD_SET_FUNC_IN_FUNC_LEN 4 - -/* MC_CMD_SET_FUNC_OUT msgresponse */ -#define MC_CMD_SET_FUNC_OUT_LEN 0 - - /***********************************/ /* MC_CMD_GET_BOOT_STATUS * Get the instruction address from which the MC booted. @@ -2259,6 +2079,7 @@ /* Log destination */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0 #define MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4 +/* enum property: bitmask */ /* enum: UART. */ #define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum: Event queue. */ @@ -2304,6 +2125,9 @@ /* MC_CMD_GET_VERSION_OUT msgresponse */ #define MC_CMD_GET_VERSION_OUT_LEN 32 +/* This is normally the UTC build time in seconds since epoch or one of the + * special values listed + */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ @@ -2326,6 +2150,9 @@ /* MC_CMD_GET_VERSION_EXT_OUT msgresponse */ #define MC_CMD_GET_VERSION_EXT_OUT_LEN 48 +/* This is normally the UTC build time in seconds since epoch or one of the + * special values listed + */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ @@ -2356,6 +2183,9 @@ * (depending on which components exist on a particular adapter) */ #define MC_CMD_GET_VERSION_V2_OUT_LEN 304 +/* This is normally the UTC build time in seconds since epoch or one of the + * special values listed + */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ @@ -2495,6 +2325,9 @@ * (depending on which components exist on a particular adapter) */ #define MC_CMD_GET_VERSION_V3_OUT_LEN 328 +/* This is normally the UTC build time in seconds since epoch or one of the + * special values listed + */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ @@ -2641,6 +2474,9 @@ * version information */ #define MC_CMD_GET_VERSION_V4_OUT_LEN 392 +/* This is normally the UTC build time in seconds since epoch or one of the + * special values listed + */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ @@ -2803,6 +2639,9 @@ * and board version information */ #define MC_CMD_GET_VERSION_V5_OUT_LEN 424 +/* This is normally the UTC build time in seconds since epoch or one of the + * special values listed + */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ /* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ /* Enum values, see field(s): */ @@ -3065,8 +2904,18 @@ * subscribers. */ #define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b -/* enum: Above this for future use. */ -#define MC_CMD_PTP_OP_MAX 0x1c +/* enum: X4 and later adapters should use this instead of + * PTP_OP_TIME_EVENT_SUBSCRIBE. Subscribe to receive periodic time events + * indicating the current NIC time + */ +#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE_V2 0x1c +/* enum: For X4 and later NICs. Packet timestamps and time sync events have + * IS_SET and IN_SYNC flags, that indicates whether time is updated and if it + * is in sync with host. Once set, IN_SYNC flag is cleared by hardware after a + * software configurable time out. Host driver need to query what is set and + * what is maximum supported interval, this MCDI can be used to query these. + */ +#define MC_CMD_PTP_OP_GET_SYNC_TIMEOUT 0x1d /* MC_CMD_PTP_IN_ENABLE msgrequest */ #define MC_CMD_PTP_IN_ENABLE_LEN 16 @@ -3507,6 +3356,22 @@ #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12 #define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4 +/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2 msgrequest */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_LEN 16 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ +/* Event queue ID */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_OFST 8 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_QUEUE_ID_LEN 4 +/* Space for flags. */ +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_OFST 12 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_FLAGS_LEN 4 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_OFST 12 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_LBN 31 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_V2_REPORT_SYNC_STATUS_WIDTH 1 + /* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */ #define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12 /* MC_CMD_PTP_IN_CMD_OFST 0 */ @@ -3540,6 +3405,13 @@ #define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20 #define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4 +/* MC_CMD_PTP_IN_GET_SYNC_TIMEOUT msgrequest */ +#define MC_CMD_PTP_IN_GET_SYNC_TIMEOUT_LEN 8 +/* MC_CMD_PTP_IN_CMD_OFST 0 */ +/* MC_CMD_PTP_IN_CMD_LEN 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ +/* MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */ + /* MC_CMD_PTP_OUT msgresponse */ #define MC_CMD_PTP_OUT_LEN 0 @@ -3939,416 +3811,14 @@ /* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */ #define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0 - -/***********************************/ -/* MC_CMD_CSR_READ32 - * Read 32bit words from the indirect memory map. - */ -#define MC_CMD_CSR_READ32 0xc -#undef MC_CMD_0xc_PRIVILEGE_CTG - -#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_CSR_READ32_IN msgrequest */ -#define MC_CMD_CSR_READ32_IN_LEN 12 -/* Address */ -#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0 -#define MC_CMD_CSR_READ32_IN_ADDR_LEN 4 -#define MC_CMD_CSR_READ32_IN_STEP_OFST 4 -#define MC_CMD_CSR_READ32_IN_STEP_LEN 4 -#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8 -#define MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4 - -/* MC_CMD_CSR_READ32_OUT msgresponse */ -#define MC_CMD_CSR_READ32_OUT_LENMIN 4 -#define MC_CMD_CSR_READ32_OUT_LENMAX 252 -#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num)) -#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) -/* The last dword is the status, not a value read */ -#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 -#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4 -#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1 -#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63 -#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 - - -/***********************************/ -/* MC_CMD_CSR_WRITE32 - * Write 32bit dwords to the indirect memory map. - */ -#define MC_CMD_CSR_WRITE32 0xd -#undef MC_CMD_0xd_PRIVILEGE_CTG - -#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_CSR_WRITE32_IN msgrequest */ -#define MC_CMD_CSR_WRITE32_IN_LENMIN 12 -#define MC_CMD_CSR_WRITE32_IN_LENMAX 252 -#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020 -#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) -#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4) -/* Address */ -#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 -#define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4 -#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4 -#define MC_CMD_CSR_WRITE32_IN_STEP_LEN 4 -#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8 -#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4 -#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1 -#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61 -#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253 - -/* MC_CMD_CSR_WRITE32_OUT msgresponse */ -#define MC_CMD_CSR_WRITE32_OUT_LEN 4 -#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0 -#define MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4 - - -/***********************************/ -/* MC_CMD_HP - * These commands are used for HP related features. They are grouped under one - * MCDI command to avoid creating too many MCDI commands. - */ -#define MC_CMD_HP 0x54 -#undef MC_CMD_0x54_PRIVILEGE_CTG - -#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_HP_IN msgrequest */ -#define MC_CMD_HP_IN_LEN 16 -/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at - * the specified address with the specified interval.When address is NULL, - * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current - * state / 2: (debug) Show temperature reported by one of the supported - * sensors. - */ -#define MC_CMD_HP_IN_SUBCMD_OFST 0 -#define MC_CMD_HP_IN_SUBCMD_LEN 4 -/* enum: OCSD (Option Card Sensor Data) sub-command. */ -#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0 -/* enum: Last known valid HP sub-command. */ -#define MC_CMD_HP_IN_LAST_SUBCMD 0x0 -/* The address to the array of sensor fields. (Or NULL to use a sub-command.) - */ -#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4 -#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8 -#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4 -#define MC_CMD_HP_IN_OCSD_ADDR_LO_LEN 4 -#define MC_CMD_HP_IN_OCSD_ADDR_LO_LBN 32 -#define MC_CMD_HP_IN_OCSD_ADDR_LO_WIDTH 32 -#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8 -#define MC_CMD_HP_IN_OCSD_ADDR_HI_LEN 4 -#define MC_CMD_HP_IN_OCSD_ADDR_HI_LBN 64 -#define MC_CMD_HP_IN_OCSD_ADDR_HI_WIDTH 32 -/* The requested update interval, in seconds. (Or the sub-command if ADDR is - * NULL.) - */ -#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12 -#define MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4 - -/* MC_CMD_HP_OUT msgresponse */ -#define MC_CMD_HP_OUT_LEN 4 -#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0 -#define MC_CMD_HP_OUT_OCSD_STATUS_LEN 4 -/* enum: OCSD stopped for this card. */ -#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1 -/* enum: OCSD was successfully started with the address provided. */ -#define MC_CMD_HP_OUT_OCSD_STARTED 0x2 -/* enum: OCSD was already started for this card. */ -#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3 - - -/***********************************/ -/* MC_CMD_STACKINFO - * Get stack information. - */ -#define MC_CMD_STACKINFO 0xf -#undef MC_CMD_0xf_PRIVILEGE_CTG - -#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_STACKINFO_IN msgrequest */ -#define MC_CMD_STACKINFO_IN_LEN 0 - -/* MC_CMD_STACKINFO_OUT msgresponse */ -#define MC_CMD_STACKINFO_OUT_LENMIN 12 -#define MC_CMD_STACKINFO_OUT_LENMAX 252 -#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num)) -#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12) -/* (thread ptr, stack size, free space) for each thread in system */ -#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0 -#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12 -#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1 -#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21 -#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85 - - -/***********************************/ -/* MC_CMD_MDIO_READ - * MDIO register read. - */ -#define MC_CMD_MDIO_READ 0x10 -#undef MC_CMD_0x10_PRIVILEGE_CTG - -#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_MDIO_READ_IN msgrequest */ -#define MC_CMD_MDIO_READ_IN_LEN 16 -/* Bus number; there are two MDIO buses: one for the internal PHY, and one for - * external devices. - */ -#define MC_CMD_MDIO_READ_IN_BUS_OFST 0 -#define MC_CMD_MDIO_READ_IN_BUS_LEN 4 -/* enum: Internal. */ -#define MC_CMD_MDIO_BUS_INTERNAL 0x0 -/* enum: External. */ -#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 -/* Port address */ -#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4 -#define MC_CMD_MDIO_READ_IN_PRTAD_LEN 4 -/* Device Address or clause 22. */ -#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8 -#define MC_CMD_MDIO_READ_IN_DEVAD_LEN 4 -/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you - * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. - */ -#define MC_CMD_MDIO_CLAUSE22 0x20 -/* Address */ -#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12 -#define MC_CMD_MDIO_READ_IN_ADDR_LEN 4 - -/* MC_CMD_MDIO_READ_OUT msgresponse */ -#define MC_CMD_MDIO_READ_OUT_LEN 8 -/* Value */ -#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0 -#define MC_CMD_MDIO_READ_OUT_VALUE_LEN 4 -/* Status the MDIO commands return the raw status bits from the MDIO block. A - * "good" transaction should have the DONE bit set and all other bits clear. - */ -#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4 -#define MC_CMD_MDIO_READ_OUT_STATUS_LEN 4 -/* enum: Good. */ -#define MC_CMD_MDIO_STATUS_GOOD 0x8 - - -/***********************************/ -/* MC_CMD_MDIO_WRITE - * MDIO register write. - */ -#define MC_CMD_MDIO_WRITE 0x11 -#undef MC_CMD_0x11_PRIVILEGE_CTG - -#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_MDIO_WRITE_IN msgrequest */ -#define MC_CMD_MDIO_WRITE_IN_LEN 20 -/* Bus number; there are two MDIO buses: one for the internal PHY, and one for - * external devices. - */ -#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0 -#define MC_CMD_MDIO_WRITE_IN_BUS_LEN 4 -/* enum: Internal. */ -/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */ -/* enum: External. */ -/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */ -/* Port address */ -#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4 -#define MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4 -/* Device Address or clause 22. */ -#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8 -#define MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4 -/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you - * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22. - */ -/* MC_CMD_MDIO_CLAUSE22 0x20 */ -/* Address */ -#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12 -#define MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4 -/* Value */ -#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16 -#define MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4 - -/* MC_CMD_MDIO_WRITE_OUT msgresponse */ -#define MC_CMD_MDIO_WRITE_OUT_LEN 4 -/* Status; the MDIO commands return the raw status bits from the MDIO block. A - * "good" transaction should have the DONE bit set and all other bits clear. - */ -#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0 -#define MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4 -/* enum: Good. */ -/* MC_CMD_MDIO_STATUS_GOOD 0x8 */ - - -/***********************************/ -/* MC_CMD_DBI_WRITE - * Write DBI register(s). - */ -#define MC_CMD_DBI_WRITE 0x12 -#undef MC_CMD_0x12_PRIVILEGE_CTG - -#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_DBI_WRITE_IN msgrequest */ -#define MC_CMD_DBI_WRITE_IN_LENMIN 12 -#define MC_CMD_DBI_WRITE_IN_LENMAX 252 -#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020 -#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num)) -#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12) -/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset - * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF. - */ -#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0 -#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12 -#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1 -#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21 -#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85 - -/* MC_CMD_DBI_WRITE_OUT msgresponse */ -#define MC_CMD_DBI_WRITE_OUT_LEN 0 - -/* MC_CMD_DBIWROP_TYPEDEF structuredef */ -#define MC_CMD_DBIWROP_TYPEDEF_LEN 12 -#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0 -#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4 -#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0 -#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 -#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 -#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4 -#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4 -#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 -#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 -#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4 -#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 -#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1 -#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4 -#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14 -#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1 -#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 -#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32 -#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8 -#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4 -#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64 -#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32 - - -/***********************************/ -/* MC_CMD_PORT_READ32 - * Read a 32-bit register from the indirect port register map. The port to - * access is implied by the Shared memory channel used. - */ -#define MC_CMD_PORT_READ32 0x14 - -/* MC_CMD_PORT_READ32_IN msgrequest */ -#define MC_CMD_PORT_READ32_IN_LEN 4 -/* Address */ -#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0 -#define MC_CMD_PORT_READ32_IN_ADDR_LEN 4 - -/* MC_CMD_PORT_READ32_OUT msgresponse */ -#define MC_CMD_PORT_READ32_OUT_LEN 8 -/* Value */ -#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0 -#define MC_CMD_PORT_READ32_OUT_VALUE_LEN 4 -/* Status */ -#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4 -#define MC_CMD_PORT_READ32_OUT_STATUS_LEN 4 - - -/***********************************/ -/* MC_CMD_PORT_WRITE32 - * Write a 32-bit register to the indirect port register map. The port to - * access is implied by the Shared memory channel used. - */ -#define MC_CMD_PORT_WRITE32 0x15 - -/* MC_CMD_PORT_WRITE32_IN msgrequest */ -#define MC_CMD_PORT_WRITE32_IN_LEN 8 -/* Address */ -#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0 -#define MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4 -/* Value */ -#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4 -#define MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4 - -/* MC_CMD_PORT_WRITE32_OUT msgresponse */ -#define MC_CMD_PORT_WRITE32_OUT_LEN 4 -/* Status */ -#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0 -#define MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4 - - -/***********************************/ -/* MC_CMD_PORT_READ128 - * Read a 128-bit register from the indirect port register map. The port to - * access is implied by the Shared memory channel used. - */ -#define MC_CMD_PORT_READ128 0x16 - -/* MC_CMD_PORT_READ128_IN msgrequest */ -#define MC_CMD_PORT_READ128_IN_LEN 4 -/* Address */ -#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0 -#define MC_CMD_PORT_READ128_IN_ADDR_LEN 4 - -/* MC_CMD_PORT_READ128_OUT msgresponse */ -#define MC_CMD_PORT_READ128_OUT_LEN 20 -/* Value */ -#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0 -#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16 -/* Status */ -#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16 -#define MC_CMD_PORT_READ128_OUT_STATUS_LEN 4 - - -/***********************************/ -/* MC_CMD_PORT_WRITE128 - * Write a 128-bit register to the indirect port register map. The port to - * access is implied by the Shared memory channel used. - */ -#define MC_CMD_PORT_WRITE128 0x17 - -/* MC_CMD_PORT_WRITE128_IN msgrequest */ -#define MC_CMD_PORT_WRITE128_IN_LEN 20 -/* Address */ -#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0 -#define MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4 -/* Value */ -#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4 -#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16 - -/* MC_CMD_PORT_WRITE128_OUT msgresponse */ -#define MC_CMD_PORT_WRITE128_OUT_LEN 4 -/* Status */ -#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0 -#define MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4 - -/* MC_CMD_CAPABILITIES structuredef */ -#define MC_CMD_CAPABILITIES_LEN 4 -/* Small buf table. */ -#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0 -#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1 -/* Turbo mode (for Maranello). */ -#define MC_CMD_CAPABILITIES_TURBO_LBN 1 -#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1 -/* Turbo mode active (for Maranello). */ -#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2 -#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1 -/* PTP offload. */ -#define MC_CMD_CAPABILITIES_PTP_LBN 3 -#define MC_CMD_CAPABILITIES_PTP_WIDTH 1 -/* AOE mode. */ -#define MC_CMD_CAPABILITIES_AOE_LBN 4 -#define MC_CMD_CAPABILITIES_AOE_WIDTH 1 -/* AOE mode active. */ -#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5 -#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1 -/* AOE mode active. */ -#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6 -#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1 -#define MC_CMD_CAPABILITIES_RESERVED_LBN 7 -#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25 +/* MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT msgresponse */ +#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_LEN 8 +/* Current value set in NIC, in seconds */ +#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_OFST 0 +#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_CURRENT_LEN 4 +/* Maximum supported by NIC, in seconds */ +#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_OFST 4 +#define MC_CMD_PTP_OUT_GET_SYNC_TIMEOUT_MAXIMUM_LEN 4 /***********************************/ @@ -4426,112 +3896,6 @@ #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM_MCDI2 32 -/***********************************/ -/* MC_CMD_DBI_READX - * Read DBI register(s) -- extended functionality - */ -#define MC_CMD_DBI_READX 0x19 -#undef MC_CMD_0x19_PRIVILEGE_CTG - -#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_DBI_READX_IN msgrequest */ -#define MC_CMD_DBI_READX_IN_LENMIN 8 -#define MC_CMD_DBI_READX_IN_LENMAX 248 -#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016 -#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num)) -#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8) -/* Each Read op consists of an address (offset 0), VF/CS2) */ -#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0 -#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8 -#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0 -#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LEN 4 -#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_LBN 0 -#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_WIDTH 32 -#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4 -#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LEN 4 -#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_LBN 32 -#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_WIDTH 32 -#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1 -#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31 -#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127 - -/* MC_CMD_DBI_READX_OUT msgresponse */ -#define MC_CMD_DBI_READX_OUT_LENMIN 4 -#define MC_CMD_DBI_READX_OUT_LENMAX 252 -#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num)) -#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4) -/* Value */ -#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0 -#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4 -#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1 -#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63 -#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255 - -/* MC_CMD_DBIRDOP_TYPEDEF structuredef */ -#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 -#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0 -#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4 -#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0 -#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 -#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 -#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4 -#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4 -#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 -#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 -#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4 -#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 -#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1 -#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4 -#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14 -#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1 -#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32 -#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32 - - -/***********************************/ -/* MC_CMD_SET_RAND_SEED - * Set the 16byte seed for the MC pseudo-random generator. - */ -#define MC_CMD_SET_RAND_SEED 0x1a -#undef MC_CMD_0x1a_PRIVILEGE_CTG - -#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_SET_RAND_SEED_IN msgrequest */ -#define MC_CMD_SET_RAND_SEED_IN_LEN 16 -/* Seed value. */ -#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0 -#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16 - -/* MC_CMD_SET_RAND_SEED_OUT msgresponse */ -#define MC_CMD_SET_RAND_SEED_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_LTSSM_HIST - * Retrieve the history of the LTSSM, if the build supports it. - */ -#define MC_CMD_LTSSM_HIST 0x1b - -/* MC_CMD_LTSSM_HIST_IN msgrequest */ -#define MC_CMD_LTSSM_HIST_IN_LEN 0 - -/* MC_CMD_LTSSM_HIST_OUT msgresponse */ -#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0 -#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252 -#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num)) -#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4) -/* variable number of LTSSM values, as bytes. The history is read-to-clear. */ -#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0 -#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4 -#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0 -#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63 -#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255 - - /***********************************/ /* MC_CMD_DRV_ATTACH * Inform MCPU that this port is managed on the host (i.e. driver active). For @@ -4705,6 +4069,7 @@ /* Flags associated with this function */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 #define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4 +/* enum property: bitshift */ /* enum: Labels the lowest-numbered function visible to the OS */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0 /* enum: The function can control the link state of the physical port it is @@ -4731,22 +4096,6 @@ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED 0x5 -/***********************************/ -/* MC_CMD_SHMUART - * Route UART output to circular buffer in shared memory instead. - */ -#define MC_CMD_SHMUART 0x1f - -/* MC_CMD_SHMUART_IN msgrequest */ -#define MC_CMD_SHMUART_IN_LEN 4 -/* ??? */ -#define MC_CMD_SHMUART_IN_FLAG_OFST 0 -#define MC_CMD_SHMUART_IN_FLAG_LEN 4 - -/* MC_CMD_SHMUART_OUT msgresponse */ -#define MC_CMD_SHMUART_OUT_LEN 0 - - /***********************************/ /* MC_CMD_PORT_RESET * Generic per-port reset. There is no equivalent for per-board reset. Locks @@ -4789,100 +4138,6 @@ #define MC_CMD_ENTITY_RESET_OUT_LEN 0 -/***********************************/ -/* MC_CMD_PCIE_CREDITS - * Read instantaneous and minimum flow control thresholds. - */ -#define MC_CMD_PCIE_CREDITS 0x21 - -/* MC_CMD_PCIE_CREDITS_IN msgrequest */ -#define MC_CMD_PCIE_CREDITS_IN_LEN 8 -/* poll period. 0 is disabled */ -#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0 -#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4 -/* wipe statistics */ -#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4 -#define MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4 - -/* MC_CMD_PCIE_CREDITS_OUT msgresponse */ -#define MC_CMD_PCIE_CREDITS_OUT_LEN 16 -#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0 -#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2 -#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2 -#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2 -#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4 -#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2 -#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6 -#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2 -#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8 -#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2 -#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10 -#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2 -#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12 -#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2 -#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14 -#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2 - - -/***********************************/ -/* MC_CMD_RXD_MONITOR - * Get histogram of RX queue fill level. - */ -#define MC_CMD_RXD_MONITOR 0x22 - -/* MC_CMD_RXD_MONITOR_IN msgrequest */ -#define MC_CMD_RXD_MONITOR_IN_LEN 12 -#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0 -#define MC_CMD_RXD_MONITOR_IN_QID_LEN 4 -#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4 -#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4 -#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8 -#define MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4 - -/* MC_CMD_RXD_MONITOR_OUT msgresponse */ -#define MC_CMD_RXD_MONITOR_OUT_LEN 80 -#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0 -#define MC_CMD_RXD_MONITOR_OUT_QID_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44 -#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48 -#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76 -#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4 - - /***********************************/ /* MC_CMD_PUTS * Copy the given ASCII string out onto UART and/or out of the network port. @@ -4931,6 +4186,54 @@ /* MC_CMD_GET_PHY_CFG_IN msgrequest */ #define MC_CMD_GET_PHY_CFG_IN_LEN 0 +/* MC_CMD_GET_PHY_CFG_IN_V2 msgrequest */ +#define MC_CMD_GET_PHY_CFG_IN_V2_LEN 8 +/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details + */ +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LEN 8 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_LBN 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LO_WIDTH 32 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_OFST 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_LBN 32 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_HI_WIDTH 32 +/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */ +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_OFST 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_LINK_END_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LEN 8 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_OFST 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_LBN 0 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_OFST 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_LBN 32 +#define MC_CMD_GET_PHY_CFG_IN_V2_TARGET_FLAT_HI_WIDTH 32 + /* MC_CMD_GET_PHY_CFG_OUT msgresponse */ #define MC_CMD_GET_PHY_CFG_OUT_LEN 72 /* flags */ @@ -5026,6 +4329,9 @@ #define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8 #define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21 #define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_200000FDX_OFST 8 +#define MC_CMD_PHY_CAP_200000FDX_LBN 22 +#define MC_CMD_PHY_CAP_200000FDX_WIDTH 1 /* ?? */ #define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12 #define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4 @@ -5059,6 +4365,7 @@ #define MC_CMD_MEDIA_DSFP 0x8 #define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48 #define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4 +/* enum property: bitshift */ /* enum: Native clause 22 */ #define MC_CMD_MMD_CLAUSE22 0x0 #define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */ @@ -5084,7 +4391,7 @@ #define MC_CMD_START_BIST 0x25 #undef MC_CMD_0x25_PRIVILEGE_CTG -#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_START_BIST_IN msgrequest */ #define MC_CMD_START_BIST_IN_LEN 4 @@ -5124,7 +4431,7 @@ #define MC_CMD_POLL_BIST 0x26 #undef MC_CMD_0x26_PRIVILEGE_CTG -#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_POLL_BIST_IN msgrequest */ #define MC_CMD_POLL_BIST_IN_LEN 0 @@ -5281,33 +4588,6 @@ #define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4 -/***********************************/ -/* MC_CMD_FLUSH_RX_QUEUES - * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ - * flushes should be initiated via this MCDI operation, rather than via - * directly writing FLUSH_CMD. - * - * The flush is completed (either done/fail) asynchronously (after this command - * returns). The driver must still wait for flush done/failure events as usual. - */ -#define MC_CMD_FLUSH_RX_QUEUES 0x27 - -/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */ -#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4 -#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252 -#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020 -#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num)) -#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4) -#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0 -#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4 -#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1 -#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63 -#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255 - -/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */ -#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0 - - /***********************************/ /* MC_CMD_GET_LOOPBACK_MODES * Returns a bitmask of loopback modes available at each speed. @@ -5320,6 +4600,54 @@ /* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */ #define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0 +/* MC_CMD_GET_LOOPBACK_MODES_IN_V2 msgrequest */ +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_LEN 8 +/* Target port to request loopback modes for. Uses MAE_LINK_ENDPOINT_SELECTOR + * which identifies a real or virtual network port by MAE port and link end. + * See the structure definition for more details + */ +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_LBN 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_OFST 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_LBN 32 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_HI_WIDTH 32 +/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */ +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_OFST 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_LINK_END_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_LBN 0 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_OFST 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_LBN 32 +#define MC_CMD_GET_LOOPBACK_MODES_IN_V2_TARGET_FLAT_HI_WIDTH 32 + /* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */ #define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40 /* Supported loopbacks. */ @@ -5333,6 +4661,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_LBN 32 #define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_WIDTH 32 +/* enum property: bitshift */ /* enum: None. */ #define MC_CMD_LOOPBACK_NONE 0x0 /* enum: Data. */ @@ -5422,6 +4751,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_LBN 96 #define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ /* Supported loopbacks. */ @@ -5435,6 +4765,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_LBN 160 #define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ /* Supported loopbacks. */ @@ -5448,6 +4779,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_LBN 224 #define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ /* Supported loopbacks. */ @@ -5461,6 +4793,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_LBN 288 #define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ @@ -5479,6 +4812,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_LBN 32 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_WIDTH 32 +/* enum property: bitshift */ /* enum: None. */ /* MC_CMD_LOOPBACK_NONE 0x0 */ /* enum: Data. */ @@ -5568,6 +4902,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_LBN 96 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ /* Supported loopbacks. */ @@ -5581,6 +4916,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_LBN 160 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ /* Supported loopbacks. */ @@ -5594,6 +4930,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_LBN 224 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ /* Supported loopbacks. */ @@ -5607,6 +4944,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_LBN 288 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ /* Supported 25G loopbacks. */ @@ -5620,6 +4958,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_LBN 352 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ /* Supported 50 loopbacks. */ @@ -5633,6 +4972,7 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_LBN 416 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ /* Supported 100G loopbacks. */ @@ -5646,142 +4986,1221 @@ #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LEN 4 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_LBN 480 #define MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ /* 100M */ -/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */ -#define AN_TYPE_LEN 4 -#define AN_TYPE_TYPE_OFST 0 -#define AN_TYPE_TYPE_LEN 4 -/* enum: None, AN disabled or not supported */ -#define MC_CMD_AN_NONE 0x0 -/* enum: Clause 28 - BASE-T */ -#define MC_CMD_AN_CLAUSE28 0x1 -/* enum: Clause 37 - BASE-X */ -#define MC_CMD_AN_CLAUSE37 0x2 -/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable - * assemblies. Includes Clause 72/Clause 92 link-training. - */ -#define MC_CMD_AN_CLAUSE73 0x3 -#define AN_TYPE_TYPE_LBN 0 -#define AN_TYPE_TYPE_WIDTH 32 - -/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3 - */ -#define FEC_TYPE_LEN 4 -#define FEC_TYPE_TYPE_OFST 0 -#define FEC_TYPE_TYPE_LEN 4 -/* enum: No FEC */ -#define MC_CMD_FEC_NONE 0x0 -/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */ -#define MC_CMD_FEC_BASER 0x1 -/* enum: Clause 91/Clause 108 Reed-Solomon FEC */ -#define MC_CMD_FEC_RS 0x2 -#define FEC_TYPE_TYPE_LBN 0 -#define FEC_TYPE_TYPE_WIDTH 32 - - -/***********************************/ -/* MC_CMD_GET_LINK - * Read the unified MAC/PHY link state. Locks required: None Return code: 0, - * ETIME. - */ -#define MC_CMD_GET_LINK 0x29 -#undef MC_CMD_0x29_PRIVILEGE_CTG - -#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_LINK_IN msgrequest */ -#define MC_CMD_GET_LINK_IN_LEN 0 - -/* MC_CMD_GET_LINK_OUT msgresponse */ -#define MC_CMD_GET_LINK_OUT_LEN 28 -/* Near-side advertised capabilities. Refer to - * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. - */ -#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 -#define MC_CMD_GET_LINK_OUT_CAP_LEN 4 -/* Link-partner advertised capabilities. Refer to - * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. - */ -#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 -#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4 -/* Autonegotiated speed in mbit/s. The link may still be down even if this - * reads non-zero. +/* MC_CMD_GET_LOOPBACK_MODES_OUT_V3 msgresponse: Supported loopback modes for + * newer NICs with 200G support */ -#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 -#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4 -/* Current loopback setting. */ -#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 -#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4 -/* Enum values, see field(s): */ -/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ -#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 -#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4 -#define MC_CMD_GET_LINK_OUT_LINK_UP_OFST 16 -#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 -#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 -#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_OFST 16 -#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 -#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1 -#define MC_CMD_GET_LINK_OUT_BPX_LINK_OFST 16 -#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2 -#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 -#define MC_CMD_GET_LINK_OUT_PHY_LINK_OFST 16 -#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 -#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 -#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_OFST 16 -#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6 -#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1 -#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_OFST 16 -#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7 -#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 -#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_OFST 16 -#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8 -#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1 -#define MC_CMD_GET_LINK_OUT_MODULE_UP_OFST 16 -#define MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9 -#define MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1 -/* This returns the negotiated flow control value. */ -#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 -#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_LEN 72 +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_OFST 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_LBN 0 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_OFST 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_LBN 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100M_HI_WIDTH 32 +/* enum property: bitshift */ +/* enum: None. */ +/* MC_CMD_LOOPBACK_NONE 0x0 */ +/* enum: Data. */ +/* MC_CMD_LOOPBACK_DATA 0x1 */ +/* enum: GMAC. */ +/* MC_CMD_LOOPBACK_GMAC 0x2 */ +/* enum: XGMII. */ +/* MC_CMD_LOOPBACK_XGMII 0x3 */ +/* enum: XGXS. */ +/* MC_CMD_LOOPBACK_XGXS 0x4 */ +/* enum: XAUI. */ +/* MC_CMD_LOOPBACK_XAUI 0x5 */ +/* enum: GMII. */ +/* MC_CMD_LOOPBACK_GMII 0x6 */ +/* enum: SGMII. */ +/* MC_CMD_LOOPBACK_SGMII 0x7 */ +/* enum: XGBR. */ +/* MC_CMD_LOOPBACK_XGBR 0x8 */ +/* enum: XFI. */ +/* MC_CMD_LOOPBACK_XFI 0x9 */ +/* enum: XAUI Far. */ +/* MC_CMD_LOOPBACK_XAUI_FAR 0xa */ +/* enum: GMII Far. */ +/* MC_CMD_LOOPBACK_GMII_FAR 0xb */ +/* enum: SGMII Far. */ +/* MC_CMD_LOOPBACK_SGMII_FAR 0xc */ +/* enum: XFI Far. */ +/* MC_CMD_LOOPBACK_XFI_FAR 0xd */ +/* enum: GPhy. */ +/* MC_CMD_LOOPBACK_GPHY 0xe */ +/* enum: PhyXS. */ +/* MC_CMD_LOOPBACK_PHYXS 0xf */ +/* enum: PCS. */ +/* MC_CMD_LOOPBACK_PCS 0x10 */ +/* enum: PMA-PMD. */ +/* MC_CMD_LOOPBACK_PMAPMD 0x11 */ +/* enum: Cross-Port. */ +/* MC_CMD_LOOPBACK_XPORT 0x12 */ +/* enum: XGMII-Wireside. */ +/* MC_CMD_LOOPBACK_XGMII_WS 0x13 */ +/* enum: XAUI Wireside. */ +/* MC_CMD_LOOPBACK_XAUI_WS 0x14 */ +/* enum: XAUI Wireside Far. */ +/* MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */ +/* enum: XAUI Wireside near. */ +/* MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */ +/* enum: GMII Wireside. */ +/* MC_CMD_LOOPBACK_GMII_WS 0x17 */ +/* enum: XFI Wireside. */ +/* MC_CMD_LOOPBACK_XFI_WS 0x18 */ +/* enum: XFI Wireside Far. */ +/* MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */ +/* enum: PhyXS Wireside. */ +/* MC_CMD_LOOPBACK_PHYXS_WS 0x1a */ +/* enum: PMA lanes MAC-Serdes. */ +/* MC_CMD_LOOPBACK_PMA_INT 0x1b */ +/* enum: KR Serdes Parallel (Encoder). */ +/* MC_CMD_LOOPBACK_SD_NEAR 0x1c */ +/* enum: KR Serdes Serial. */ +/* MC_CMD_LOOPBACK_SD_FAR 0x1d */ +/* enum: PMA lanes MAC-Serdes Wireside. */ +/* MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */ +/* enum: KR Serdes Parallel Wireside (Full PCS). */ +/* MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */ +/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */ +/* MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */ +/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */ +/* MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */ +/* enum: KR Serdes Serial Wireside. */ +/* MC_CMD_LOOPBACK_SD_FES_WS 0x22 */ +/* enum: Near side of AOE Siena side port */ +/* MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */ +/* enum: Medford Wireside datapath loopback */ +/* MC_CMD_LOOPBACK_DATA_WS 0x24 */ +/* enum: Force link up without setting up any physical loopback (snapper use + * only) + */ +/* MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_OFST 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_LBN 64 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_OFST 12 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_LBN 96 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_1G_HI_WIDTH 32 +/* enum property: bitshift */ /* Enum values, see field(s): */ -/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ -#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 -#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4 -#define MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 -#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 -#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 -#define MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 -#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 -#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 -#define MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 -#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 -#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 -#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 -#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 -#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_OFST 16 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_LBN 128 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_OFST 20 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_LBN 160 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_10G_HI_WIDTH 32 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_OFST 24 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_LBN 192 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_OFST 28 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_LBN 224 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_SUGGESTED_HI_WIDTH 32 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* 100M */ +/* Supported loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_OFST 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_LBN 256 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_OFST 36 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_LBN 288 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_40G_HI_WIDTH 32 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 25G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_OFST 40 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_LBN 320 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_OFST 44 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_LBN 352 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_25G_HI_WIDTH 32 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 50 loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_OFST 48 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_LBN 384 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_OFST 52 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_LBN 416 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_50G_HI_WIDTH 32 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 100G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_OFST 56 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_LBN 448 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_OFST 60 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_LBN 480 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_100G_HI_WIDTH 32 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* 100M */ +/* Supported 200G loopbacks. */ +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_OFST 64 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LEN 8 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_OFST 64 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_LBN 512 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_LO_WIDTH 32 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_OFST 68 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LEN 4 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_LBN 544 +#define MC_CMD_GET_LOOPBACK_MODES_OUT_V3_200G_HI_WIDTH 32 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* 100M */ -/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */ -#define MC_CMD_GET_LINK_OUT_V2_LEN 44 +/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */ +#define AN_TYPE_LEN 4 +#define AN_TYPE_TYPE_OFST 0 +#define AN_TYPE_TYPE_LEN 4 +/* enum: None, AN disabled or not supported */ +#define MC_CMD_AN_NONE 0x0 +/* enum: Clause 28 - BASE-T */ +#define MC_CMD_AN_CLAUSE28 0x1 +/* enum: Clause 37 - BASE-X */ +#define MC_CMD_AN_CLAUSE37 0x2 +/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable + * assemblies. Includes Clause 72/Clause 92 link-training. + */ +#define MC_CMD_AN_CLAUSE73 0x3 +#define AN_TYPE_TYPE_LBN 0 +#define AN_TYPE_TYPE_WIDTH 32 + +/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3 + */ +#define FEC_TYPE_LEN 4 +#define FEC_TYPE_TYPE_OFST 0 +#define FEC_TYPE_TYPE_LEN 4 +/* enum: No FEC */ +#define MC_CMD_FEC_NONE 0x0 +/* enum: IEEE 802.3, Clause 74 BASE-R FEC (a.k.a Firecode) */ +#define MC_CMD_FEC_BASER 0x1 +/* enum: IEEE 802.3, Clause 91/Clause 108 Reed-Solomon FEC */ +#define MC_CMD_FEC_RS 0x2 +/* enum: IEEE 802.3, Clause 161, interleaved RS-FEC sublayer for 100GBASE-R + * PHYs + */ +#define MC_CMD_FEC_IEEE_RS_INT 0x3 +/* enum: Ethernet Consortium, Low Latency RS-FEC. RS(272, 258). Replaces FEC + * specified in Clause 119 for 100/200G PHY. Replaces FEC specified in Clause + * 134 for 50G PHY. + */ +#define MC_CMD_FEC_ETCS_RS_LL 0x4 +/* enum: FEC mode selected automatically */ +#define MC_CMD_FEC_AUTO 0x5 +#define FEC_TYPE_TYPE_LBN 0 +#define FEC_TYPE_TYPE_WIDTH 32 + +/* MC_CMD_ETH_TECH structuredef: Ethernet technology as defined by IEEE802.3, + * Ethernet Technology Consortium, proprietary technologies. The driver must + * not use technologies labelled NONE and AUTO. + */ +#define MC_CMD_ETH_TECH_LEN 16 +/* The enums in this field can be used either as bitwise indices into a tech + * mask (e.g. see MC_CMD_ETH_AN_FIELDS/TECH_MASK for example) or as regular + * enums (e.g. see MC_CMD_LINK_CTRL_IN/ADVERTISED_TECH_ABILITIES_MASK). This + * structure must be updated to add new technologies when projects that need + * them arise. An incomplete list of possible expansion in the future include: + * 100GBASE_KP4, 800GBASE_CR8, 800GBASE_KR8, 800GBASE_DR8, 800GBASE_SR8 + * 800GBASE_VR8 + */ +#define MC_CMD_ETH_TECH_TECH_OFST 0 +#define MC_CMD_ETH_TECH_TECH_LEN 16 +/* enum: 1000BASE-KX - 1000BASE-X PCS/PMA over an electrical backplane PMD. See + * IEEE 802.3 Clause 70 + */ +#define MC_CMD_ETH_TECH_1000BASEKX 0x0 +/* enum: 10GBASE-R - PCS/PMA over an electrical backplane PMD. Refer to IEEE + * 802.3 Clause 72 + */ +#define MC_CMD_ETH_TECH_10GBASE_KR 0x1 +/* enum: 40GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3 + * Clause 84. + */ +#define MC_CMD_ETH_TECH_40GBASE_KR4 0x2 +/* enum: 40GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See + * IEEE 802.3 Clause 85 + */ +#define MC_CMD_ETH_TECH_40GBASE_CR4 0x3 +/* enum: 40GBASE-R PCS/PMA over 4 lane multimode fiber PMD as specified in + * Clause 86 + */ +#define MC_CMD_ETH_TECH_40GBASE_SR4 0x4 +/* enum: 40GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD with long + * reach. See IEEE 802.3 Clause 87 + */ +#define MC_CMD_ETH_TECH_40GBASE_LR4 0x5 +/* enum: 25GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE + * 802.3 Clause 110 + */ +#define MC_CMD_ETH_TECH_25GBASE_CR 0x6 +/* enum: 25GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3 + * Clause 111 + */ +#define MC_CMD_ETH_TECH_25GBASE_KR 0x7 +/* enum: 25GBASE-R PCS/PMA over multimode fiber PMD. Refer to IEEE 802.3 Clause + * 112 + */ +#define MC_CMD_ETH_TECH_25GBASE_SR 0x8 +/* enum: An Ethernet Physical layer operating at 50 Gb/s on twin-axial copper + * cable. Refer to Ethernet Technology Consortium 25/50G Ethernet Spec. + */ +#define MC_CMD_ETH_TECH_50GBASE_CR2 0x9 +/* enum: An Ethernet Physical layer operating at 50 Gb/s on copper backplane. + * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec. + */ +#define MC_CMD_ETH_TECH_50GBASE_KR2 0xa +/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3 + * Clause 93 + */ +#define MC_CMD_ETH_TECH_100GBASE_KR4 0xb +/* enum: 100GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3 + * Clause 95 + */ +#define MC_CMD_ETH_TECH_100GBASE_SR4 0xc +/* enum: 100GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD. See + * IEEE 802.3 Clause 92 + */ +#define MC_CMD_ETH_TECH_100GBASE_CR4 0xd +/* enum: 100GBASE-R PCS/PMA over 4 WDM lane single mode fiber PMD, with + * long/extended reach,. See IEEE 802.3 Clause 88 + */ +#define MC_CMD_ETH_TECH_100GBASE_LR4_ER4 0xe +/* enum: An Ethernet Physical layer operating at 50 Gb/s on short reach fiber. + * Refer to Ethernet Technology Consortium 25/50G Ethernet Spec. + */ +#define MC_CMD_ETH_TECH_50GBASE_SR2 0xf +/* enum: 1000BASEX PCS/PMA. See IEEE 802.3 Clause 36 over undefined PMD, duplex + * mode unknown + */ +#define MC_CMD_ETH_TECH_1000BASEX 0x10 +/* enum: Non-standardised. 10G direct attach */ +#define MC_CMD_ETH_TECH_10GBASE_CR 0x11 +/* enum: 10GBASE-SR fiber over 850nm optics. See IEEE 802.3 Clause 52 */ +#define MC_CMD_ETH_TECH_10GBASE_SR 0x12 +/* enum: 10GBASE-LR fiber over 1310nm optics. See IEEE 802.3 Clause 52 */ +#define MC_CMD_ETH_TECH_10GBASE_LR 0x13 +/* enum: 10GBASE-LRM fiber over 1310 nm optics. See IEEE 802.3 Clause 68 */ +#define MC_CMD_ETH_TECH_10GBASE_LRM 0x14 +/* enum: 10GBASE-ER fiber over 1550nm optics. See IEEE 802.3 Clause 52 */ +#define MC_CMD_ETH_TECH_10GBASE_ER 0x15 +/* enum: 50GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3 + * Clause 137 + */ +#define MC_CMD_ETH_TECH_50GBASE_KR 0x16 +/* enum: 50GBASE-SR PCS/PMA over multimode fiber PMD as specified in Clause 138 + */ +#define MC_CMD_ETH_TECH_50GBASE_SR 0x17 +/* enum: 50GBASE-CR PCS/PMA over shielded copper balanced cable PMD. See IEEE + * 802.3 Clause 136 + */ +#define MC_CMD_ETH_TECH_50GBASE_CR 0x18 +/* enum: 50GBASE-R PCS/PMA over single mode fiber PMD as specified in Clause + * 139. + */ +#define MC_CMD_ETH_TECH_50GBASE_LR_ER_FR 0x19 +/* enum: 100 Gb/s PHY using 100GBASE-R encoding over single-mode fiber with + * reach up to at least 500 m (see IEEE 802.3 Clause 140) + */ +#define MC_CMD_ETH_TECH_50GBASE_DR 0x1a +/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3 + * Clause 137 + */ +#define MC_CMD_ETH_TECH_100GBASE_KR2 0x1b +/* enum: 100GBASE-R PCS/PMA over 2 lane multimode fiber PMD. See IEEE 802.3 + * Clause 138 + */ +#define MC_CMD_ETH_TECH_100GBASE_SR2 0x1c +/* enum: 100GBASE-R PCS/PMA over 2 lane shielded copper balanced cable PMD. See + * IEEE 802.3 Clause 136 + */ +#define MC_CMD_ETH_TECH_100GBASE_CR2 0x1d +/* enum: Unknown source */ +#define MC_CMD_ETH_TECH_100GBASE_LR2_ER2_FR2 0x1e +/* enum: Unknown source */ +#define MC_CMD_ETH_TECH_100GBASE_DR2 0x1f +/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3 + * Clause 137 + */ +#define MC_CMD_ETH_TECH_200GBASE_KR4 0x20 +/* enum: 200GBASE-R PCS/PMA over 4 lane multimode fiber PMD. See IEEE 802.3 + * Clause 138 + */ +#define MC_CMD_ETH_TECH_200GBASE_SR4 0x21 +/* enum: 200GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD as specified + * in Clause 122 + */ +#define MC_CMD_ETH_TECH_200GBASE_LR4_ER4_FR4 0x22 +/* enum: 200GBASE-R PCS/PMA over 4-lane single-mode fiber PMD. See IEEE 802.3 + * Clause 121 + */ +#define MC_CMD_ETH_TECH_200GBASE_DR4 0x23 +/* enum: 200GBASE-R PCS/PMA over 4 lane shielded copper balanced cable PMD as + * specified in Clause 136 + */ +#define MC_CMD_ETH_TECH_200GBASE_CR4 0x24 +/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-KR8 PMD uses + * 802.3 Clause 137, but the number PMD lanes is 8. + */ +#define MC_CMD_ETH_TECH_400GBASE_KR8 0x25 +/* enum: 400GBASE-R PCS/PMA over 8-lane multimode fiber PMD. See IEEE 802.3 + * Clause 138 + */ +#define MC_CMD_ETH_TECH_400GBASE_SR8 0x26 +/* enum: 400GBASE-R PCS/PMA over 8 WDM lane single-mode fiber PMD. See IEEE + * 802.3 Clause 122 + */ +#define MC_CMD_ETH_TECH_400GBASE_LR8_ER8_FR8 0x27 +/* enum: Unknown source */ +#define MC_CMD_ETH_TECH_400GBASE_DR8 0x28 +/* enum: Ethernet Technology Consortium 400G AN Spec. 400GBASE-CR8 PMD uses + * IEEE 802.3 Clause 136, but the number PMD lanes is 8. + */ +#define MC_CMD_ETH_TECH_400GBASE_CR8 0x29 +/* enum: 100GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3ck + * Clause 163. + */ +#define MC_CMD_ETH_TECH_100GBASE_KR 0x2a +/* enum: IEEE 802.3ck. 100G PHY with PMD as specified in Clause 167 over short + * reach fiber + */ +#define MC_CMD_ETH_TECH_100GBASE_SR 0x2b +/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause + * 140 + */ +#define MC_CMD_ETH_TECH_100GBASE_LR_ER_FR 0x2c +/* enum: 100GBASE-R PCS/PMA over shielded balanced copper cable PMD. See IEEE + * 802.3 in Clause 162 IEEE 802.3ck. + */ +#define MC_CMD_ETH_TECH_100GBASE_CR 0x2d +/* enum: 100G PMD together with single-mode fiber medium. See IEEE 802.3 Clause + * 140 + */ +#define MC_CMD_ETH_TECH_100GBASE_DR 0x2e +/* enum: 200GBASE-R PCS/PMA over an electrical backplane PMD as specified in + * Clause 163 IEEE 802.3ck + */ +#define MC_CMD_ETH_TECH_200GBASE_KR2 0x2f +/* enum: 200G PHY with PMD as specified in Clause 167 over short reach fiber + * IEEE 802.3ck + */ +#define MC_CMD_ETH_TECH_200GBASE_SR2 0x30 +/* enum: Unknown source */ +#define MC_CMD_ETH_TECH_200GBASE_LR2_ER2_FR2 0x31 +/* enum: Unknown source */ +#define MC_CMD_ETH_TECH_200GBASE_DR2 0x32 +/* enum: 200GBASE-R PCS/PMA over 2 lane shielded balanced copper cable PMD as + * specified in Clause 162 IEEE 802.3ck. + */ +#define MC_CMD_ETH_TECH_200GBASE_CR2 0x33 +/* enum: 400GBASE-R PCS/PMA over an electrical backplane PMD. See IEEE 802.3 + * Clause 163 IEEE 802.3ck. + */ +#define MC_CMD_ETH_TECH_400GBASE_KR4 0x34 +/* enum: 400G PHY with PMD over short reach fiber. See Clause 167 of IEEE + * 802.3ck. + */ +#define MC_CMD_ETH_TECH_400GBASE_SR4 0x35 +/* enum: 400GBASE-R PCS/PMA over 4 WDM lane single-mode fiber PMD. See IEEE + * 802.3 Clause 151 + */ +#define MC_CMD_ETH_TECH_400GBASE_LR4_ER4_FR4 0x36 +/* enum: 400GBASE-R PCS/PMA over 4-lane single-mode fiber PMD as specified in + * Clause 124 + */ +#define MC_CMD_ETH_TECH_400GBASE_DR4 0x37 +/* enum: 400GBASE-R PCS/PMA over 4 lane shielded balanced copper cable PMD as + * specified in Clause 162 of IEEE 802.3ck. + */ +#define MC_CMD_ETH_TECH_400GBASE_CR4 0x38 +/* enum: Automatic tech mode. The driver must not use this. */ +#define MC_CMD_ETH_TECH_AUTO 0x39 +/* enum: See IEEE 802.3cc-2017 Clause 114 */ +#define MC_CMD_ETH_TECH_25GBASE_LR_ER 0x3a +/* enum: Up to 7 m over twinaxial copper cable assembly (10 lanes, 10 Gbit/s + * each) See IEEE 802.3ba-2010 Clause 85 + */ +#define MC_CMD_ETH_TECH_100GBASE_CR10 0x3b +/* enum: Invalid tech mode. The driver must not use this. */ +#define MC_CMD_ETH_TECH_NONE 0x7f +#define MC_CMD_ETH_TECH_TECH_LBN 0 +#define MC_CMD_ETH_TECH_TECH_WIDTH 128 + +/* MC_CMD_LINK_STATUS_FLAGS structuredef */ +#define MC_CMD_LINK_STATUS_FLAGS_LEN 8 +/* Flags used to report the current configuration/state of the link. */ +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_OFST 0 +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LEN 8 +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_OFST 0 +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LEN 4 +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_LBN 0 +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LO_WIDTH 32 +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_OFST 4 +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LEN 4 +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_LBN 32 +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_HI_WIDTH 32 +/* enum property: bitshift */ +/* enum: Whether we have overall link up */ +#define MC_CMD_LINK_STATUS_FLAGS_LINK_UP 0x0 +/* enum: If set, the PHY has no external RX link synchronisation */ +#define MC_CMD_LINK_STATUS_FLAGS_NO_PHY_LINK 0x1 +/* enum: If set, PMD/MDI is not connected (e.g. cable disconnected, module cage + * empty) + */ +#define MC_CMD_LINK_STATUS_FLAGS_PMD_MDI_DISCONNECTED 0x2 +/* enum: Set on error while decoding module data (e.g. module EEPROM does not + * contain valid values, has checksum errors, etc.) + */ +#define MC_CMD_LINK_STATUS_FLAGS_PMD_BAD 0x3 +/* enum: Set when module unsupported (e.g. unsupported link rate or link + * technology) + */ +#define MC_CMD_LINK_STATUS_FLAGS_PMD_UNSUPPORTED 0x4 +/* enum: Set on error while communicating with the module (e.g. I2C errors + * while reading EEPROM) + */ +#define MC_CMD_LINK_STATUS_FLAGS_PMD_COMMS_FAULT 0x5 +/* enum: Set on module overcurrent/overvoltage condition */ +#define MC_CMD_LINK_STATUS_FLAGS_PMD_POWER_FAULT 0x6 +/* enum: Set on module overtemperature condition */ +#define MC_CMD_LINK_STATUS_FLAGS_PMD_THERMAL_FAULT 0x7 +/* enum: If set, the module is indicating Loss of Signal */ +#define MC_CMD_LINK_STATUS_FLAGS_PMD_LOS 0x8 +/* enum: If set, PMA is indicating loss of CDR lock (clock sync) */ +#define MC_CMD_LINK_STATUS_FLAGS_PMA_NO_CDR_LOCK 0x9 +/* enum: If set, PMA is indicating loss of analog signal */ +#define MC_CMD_LINK_STATUS_FLAGS_PMA_LOS 0xa +/* enum: If set, PCS is indicating loss of block lock */ +#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_BLOCK_LOCK 0xb +/* enum: If set, PCS is indicating loss of alignment marker lock on one or more + * lanes + */ +#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_AM_LOCK 0xc +/* enum: If set, PCS is indicating loss of overall alignment lock */ +#define MC_CMD_LINK_STATUS_FLAGS_PCS_NO_ALIGN_LOCK 0xd +/* enum: If set, PCS is indicating high bit error rate condition. */ +#define MC_CMD_LINK_STATUS_FLAGS_PCS_HI_BER 0xe +/* enum: If set, FEC is indicating loss of FEC lock */ +#define MC_CMD_LINK_STATUS_FLAGS_FEC_NO_LOCK 0xf +/* enum: If set, indicates that the number of symbol errors in a 8192-codeword + * window has exceeded the threshold K (417). + */ +#define MC_CMD_LINK_STATUS_FLAGS_FEC_HI_SER 0x10 +/* enum: If set, the receiver has detected the local FEC has degraded. */ +#define MC_CMD_LINK_STATUS_FLAGS_FEC_LOCAL_DEGRADED 0x11 +/* enum: If set, the receiver has detected the remote FEC has degraded. */ +#define MC_CMD_LINK_STATUS_FLAGS_FEC_RM_DEGRADED 0x12 +/* enum: If set, the number of symbol errors is over an internal threshold. */ +#define MC_CMD_LINK_STATUS_FLAGS_FEC_DEGRADED_SER 0x13 +/* enum: If set, autonegotiation has detected an auto-negotiation capable link + * partner + */ +#define MC_CMD_LINK_STATUS_FLAGS_AN_ABLE 0x14 +/* enum: If set, autonegotiation base page exchange has failed */ +#define MC_CMD_LINK_STATUS_FLAGS_AN_BP_FAILED 0x15 +/* enum: If set, autonegotiation next page exchange has failed */ +#define MC_CMD_LINK_STATUS_FLAGS_AN_NP_FAILED 0x16 +/* enum: If set, autonegotiation has failed to negotiate a common set of + * capabilities + */ +#define MC_CMD_LINK_STATUS_FLAGS_AN_NO_HCD 0x17 +/* enum: If set, local end link training has failed to establish link training + * frame lock on one or more lanes + */ +#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_LOCAL_FRAME_LOCK 0x18 +/* enum: If set, remote end link training has failed to establish link training + * frame lock on one or more lanes + */ +#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RM_FRAME_LOCK 0x19 +/* enum: If set, remote end has failed to assert Receiver Ready (link training + * success) within the designated timeout + */ +#define MC_CMD_LINK_STATUS_FLAGS_LT_NO_RX_READY 0x1a +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_LBN 0 +#define MC_CMD_LINK_STATUS_FLAGS_STATUS_FLAGS_WIDTH 64 + +/* MC_CMD_PAUSE_MODE structuredef */ +#define MC_CMD_PAUSE_MODE_LEN 1 +#define MC_CMD_PAUSE_MODE_TYPE_OFST 0 +#define MC_CMD_PAUSE_MODE_TYPE_LEN 1 +/* enum: See IEEE 802.3 Clause 73.6.6 */ +#define MC_CMD_PAUSE_MODE_AN_PAUSE 0x0 +/* enum: See IEEE 802.3 Clause 73.6.6 */ +#define MC_CMD_PAUSE_MODE_AN_ASYM_DIR 0x1 +#define MC_CMD_PAUSE_MODE_TYPE_LBN 0 +#define MC_CMD_PAUSE_MODE_TYPE_WIDTH 8 + +/* MC_CMD_ETH_AN_FIELDS structuredef: Fields used for IEEE 802.3 Clause 73 + * Auto-Negotiation. Warning - This is fixed size and cannot be extended. This + * structure is used to define autonegotiable abilities (advertised, link + * partner and supported abilities). + */ +#define MC_CMD_ETH_AN_FIELDS_LEN 25 +/* Mask of Ethernet technologies. The bit indices in this mask are taken from + * the TECH field in the MC_CMD_ETH_TECH structure. + */ +#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_OFST 0 +#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LEN 16 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_ETH_TECH/TECH */ +#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_LBN 0 +#define MC_CMD_ETH_AN_FIELDS_TECH_MASK_WIDTH 128 +/* Mask of supported FEC modes */ +#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_OFST 16 +#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_LBN 128 +#define MC_CMD_ETH_AN_FIELDS_FEC_MASK_WIDTH 32 +/* Mask of requested FEC modes */ +#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_OFST 20 +#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_LBN 160 +#define MC_CMD_ETH_AN_FIELDS_FEC_REQ_WIDTH 32 +/* Bitmask of negotiated pause modes */ +#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_OFST 24 +#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LEN 1 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_PAUSE_MODE/TYPE */ +#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_LBN 192 +#define MC_CMD_ETH_AN_FIELDS_PAUSE_MASK_WIDTH 8 + +/* MC_CMD_LOOPBACK_V2 structuredef: Loopback modes for use with the new + * MC_CMD_LINK_CTRL and MC_CMD_LINK_STATE. These loopback modes are not + * supported in other getlink/setlink commands. + */ +#define MC_CMD_LOOPBACK_V2_LEN 4 +#define MC_CMD_LOOPBACK_V2_MODE_OFST 0 +#define MC_CMD_LOOPBACK_V2_MODE_LEN 4 +/* enum: No loopback */ +#define MC_CMD_LOOPBACK_V2_NONE 0x0 +/* enum: Let firmware choose a supported loopback mode */ +#define MC_CMD_LOOPBACK_V2_AUTO 0x1 +/* enum: Loopback after the MAC */ +#define MC_CMD_LOOPBACK_V2_POST_MAC 0x2 +/* enum: Loopback after the PCS */ +#define MC_CMD_LOOPBACK_V2_POST_PCS 0x3 +/* enum: Loopback after the PMA */ +#define MC_CMD_LOOPBACK_V2_POST_PMA 0x4 +/* enum: Loopback after the MDI Wireside */ +#define MC_CMD_LOOPBACK_V2_POST_MDI_WS 0x5 +/* enum: Loopback after the PMA Wireside */ +#define MC_CMD_LOOPBACK_V2_POST_PMA_WS 0x6 +/* enum: Loopback after the PCS Wireside */ +#define MC_CMD_LOOPBACK_V2_POST_PCS_WS 0x7 +/* enum: Loopback after the MAC Wireside */ +#define MC_CMD_LOOPBACK_V2_POST_MAC_WS 0x8 +/* enum: Loopback after the MAC FIFOs (before the MAC) */ +#define MC_CMD_LOOPBACK_V2_PRE_MAC 0x9 +#define MC_CMD_LOOPBACK_V2_MODE_LBN 0 +#define MC_CMD_LOOPBACK_V2_MODE_WIDTH 32 + +/* MC_CMD_FCNTL structuredef */ +#define MC_CMD_FCNTL_LEN 4 +#define MC_CMD_FCNTL_MASK_OFST 0 +#define MC_CMD_FCNTL_MASK_LEN 4 +/* enum: Flow control is off. */ +#define MC_CMD_FCNTL_OFF 0x0 +/* enum: Respond to flow control. */ +#define MC_CMD_FCNTL_RESPOND 0x1 +/* enum: Respond to and Issue flow control. */ +#define MC_CMD_FCNTL_BIDIR 0x2 +/* enum: Auto negotiate flow control. */ +#define MC_CMD_FCNTL_AUTO 0x3 +/* enum: Priority flow control. This is only supported on KSB. */ +#define MC_CMD_FCNTL_QBB 0x4 +/* enum: Issue flow control. */ +#define MC_CMD_FCNTL_GENERATE 0x5 +#define MC_CMD_FCNTL_MASK_LBN 0 +#define MC_CMD_FCNTL_MASK_WIDTH 32 + +/* MC_CMD_LINK_FLAGS structuredef */ +#define MC_CMD_LINK_FLAGS_LEN 4 +/* The enums defined in this field are used as indices into the + * MC_CMD_LINK_FLAGS bitmask. + */ +#define MC_CMD_LINK_FLAGS_MASK_OFST 0 +#define MC_CMD_LINK_FLAGS_MASK_LEN 4 +/* enum property: bitshift */ +/* enum: Enable auto-negotiation. If AN is enabled, link technology and FEC + * mode are determined by advertised capabilities and requested FEC modes, + * combined with link partner capabilities. If AN is disabled, link technology + * is forced to LINK_TECHNOLOGY and FEC mode is forced to FEC_MODE. Not valid + * if loopback is enabled + */ +#define MC_CMD_LINK_FLAGS_AUTONEG_EN 0x0 +/* enum: Enable parallel detect. In addition to AN, try to sense partner forced + * speed/FEC mode (when partner AN disabled). Only valid if AN is enabled. + */ +#define MC_CMD_LINK_FLAGS_PARALLEL_DETECT_EN 0x1 +/* enum: Force link down, in electrical idle. */ +#define MC_CMD_LINK_FLAGS_LINK_DISABLE 0x2 +/* enum: Ignore the sequence number and always apply. */ +#define MC_CMD_LINK_FLAGS_IGNORE_MODULE_SEQ 0x3 +#define MC_CMD_LINK_FLAGS_MASK_LBN 0 +#define MC_CMD_LINK_FLAGS_MASK_WIDTH 32 + + +/***********************************/ +/* MC_CMD_LINK_CTRL + * Write the unified MAC/PHY link configuration. Locks required: None. Return + * code: 0, EINVAL, ETIME, EAGAIN + */ +#define MC_CMD_LINK_CTRL 0x6b +#undef MC_CMD_0x6b_PRIVILEGE_CTG + +#define MC_CMD_0x6b_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_LINK_CTRL_IN msgrequest */ +#define MC_CMD_LINK_CTRL_IN_LEN 40 +/* Handle to the port to set link state for. */ +#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_LINK_CTRL_IN_PORT_HANDLE_LEN 4 +/* Control flags */ +#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_OFST 4 +#define MC_CMD_LINK_CTRL_IN_CONTROL_FLAGS_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_LINK_FLAGS/MASK */ +/* Reserved for future expansion, and included to provide padding for alignment + * purposes. + */ +#define MC_CMD_LINK_CTRL_IN_RESERVED_OFST 8 +#define MC_CMD_LINK_CTRL_IN_RESERVED_LEN 8 +#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_OFST 8 +#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LEN 4 +#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_LBN 64 +#define MC_CMD_LINK_CTRL_IN_RESERVED_LO_WIDTH 32 +#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_OFST 12 +#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LEN 4 +#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_LBN 96 +#define MC_CMD_LINK_CTRL_IN_RESERVED_HI_WIDTH 32 +/* Technology abilities to advertise during auto-negotiation */ +#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_OFST 16 +#define MC_CMD_LINK_CTRL_IN_ADVERTISED_TECH_ABILITIES_MASK_LEN 16 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_ETH_TECH/TECH */ +/* Pause abilities to advertise during auto-negotiation. Valid when auto- + * negotation is enabled and MC_CMD_SET_MAC_IN/FCTL is set to + * MC_CMD_FCNTL_AUTO. If auto-negotiation is disabled the driver must + * explicitly configure pause mode with MC_CMD_SET_MAC. + */ +#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_OFST 32 +#define MC_CMD_LINK_CTRL_IN_ADVERTISED_PAUSE_ABILITIES_MASK_LEN 1 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_PAUSE_MODE/TYPE */ +/* When auto-negotiation is enabled, this is the FEC mode to request. Note that + * a weaker FEC mode may get negotiated, depending on what the link partner + * supports. The driver should subsequently use MC_CMD_GET_LINK to check the + * actual negotiated FEC mode. When auto-negotiation is disabled, this is the + * forced FEC mode. + */ +#define MC_CMD_LINK_CTRL_IN_FEC_MODE_OFST 33 +#define MC_CMD_LINK_CTRL_IN_FEC_MODE_LEN 1 +/* enum property: value */ +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +/* This is only to be used when auto-negotiation is disabled (forced speed or + * loopback mode). If the specified value does not align with the values + * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid. + */ +#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_OFST 36 +#define MC_CMD_LINK_CTRL_IN_LINK_TECHNOLOGY_LEN 2 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_ETH_TECH/TECH */ +/* The sequence number of the last MODULECHANGE event. If this doesn't match, + * fail with EAGAIN. + */ +#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_OFST 38 +#define MC_CMD_LINK_CTRL_IN_MODULE_SEQ_LEN 1 +/* Loopback Mode. Only valid when auto-negotiation is disabled. */ +#define MC_CMD_LINK_CTRL_IN_LOOPBACK_OFST 39 +#define MC_CMD_LINK_CTRL_IN_LOOPBACK_LEN 1 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_LOOPBACK_V2/MODE */ + +/* MC_CMD_LINK_CTRL_OUT msgresponse */ +#define MC_CMD_LINK_CTRL_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_LINK_STATE + */ +#define MC_CMD_LINK_STATE 0x6c +#undef MC_CMD_0x6c_PRIVILEGE_CTG + +#define MC_CMD_0x6c_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_LINK_STATE_IN msgrequest */ +#define MC_CMD_LINK_STATE_IN_LEN 4 +/* Handle to the port to get link state for. */ +#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_LINK_STATE_IN_PORT_HANDLE_LEN 4 + +/* MC_CMD_LINK_STATE_OUT msgresponse */ +#define MC_CMD_LINK_STATE_OUT_LEN 114 +/* Flags used to report the current configuration/state of the link. */ +#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_OFST 0 +#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LEN 8 +#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_OFST 0 +#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LEN 4 +#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_LBN 0 +#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_LO_WIDTH 32 +#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_OFST 4 +#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LEN 4 +#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_LBN 32 +#define MC_CMD_LINK_STATE_OUT_STATUS_FLAGS_HI_WIDTH 32 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */ +/* Configured technology. If the specified value does not align with the values + * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid. + */ +#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_OFST 8 +#define MC_CMD_LINK_STATE_OUT_LINK_TECHNOLOGY_LEN 2 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_ETH_TECH/TECH */ +/* Configured FEC mode */ +#define MC_CMD_LINK_STATE_OUT_FEC_MODE_OFST 10 +#define MC_CMD_LINK_STATE_OUT_FEC_MODE_LEN 1 +/* enum property: value */ +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +/* Bitmask of auto-negotiated pause modes */ +#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_OFST 11 +#define MC_CMD_LINK_STATE_OUT_PAUSE_MASK_LEN 1 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_PAUSE_MODE/TYPE */ +/* Configured loopback mode */ +#define MC_CMD_LINK_STATE_OUT_LOOPBACK_OFST 12 +#define MC_CMD_LINK_STATE_OUT_LOOPBACK_LEN 1 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_LOOPBACK_V2/MODE */ +/* Abilities requested by the driver to advertise during auto-negotiation */ +#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_OFST 16 +#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_LEN 32 +/* See structuredef: MC_CMD_ETH_AN_FIELDS */ +#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_OFST 16 +#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_TECH_MASK_LEN 16 +#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_OFST 32 +#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_MASK_LEN 4 +#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_OFST 36 +#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_FEC_REQ_LEN 4 +#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_OFST 40 +#define MC_CMD_LINK_STATE_OUT_ADVERTISED_ABILITIES_PAUSE_MASK_LEN 1 +/* Abilities advertised by the link partner during auto-negotiation */ +#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_OFST 48 +#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_LEN 32 +/* See structuredef: MC_CMD_ETH_AN_FIELDS */ +#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_OFST 48 +#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_TECH_MASK_LEN 16 +#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_OFST 64 +#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_MASK_LEN 4 +#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_OFST 68 +#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_FEC_REQ_LEN 4 +#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_OFST 72 +#define MC_CMD_LINK_STATE_OUT_LINK_PARTNER_ABILITIES_PAUSE_MASK_LEN 1 +/* Abilities supported by the local device (including cable abilities) For + * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO + */ +#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_OFST 80 +#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_LEN 28 +/* See structuredef: MC_CMD_ETH_AN_FIELDS */ +#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_OFST 80 +#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_TECH_MASK_LEN 16 +#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_OFST 96 +#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_MASK_LEN 4 +#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_OFST 100 +#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_FEC_REQ_LEN 4 +#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_OFST 104 +#define MC_CMD_LINK_STATE_OUT_SUPPORTED_ABILITIES_PAUSE_MASK_LEN 1 +/* Control flags */ +#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_OFST 108 +#define MC_CMD_LINK_STATE_OUT_CONTROL_FLAGS_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_LINK_FLAGS/MASK */ +/* Sequence number to synchronize link change events */ +#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_OFST 112 +#define MC_CMD_LINK_STATE_OUT_PORT_LINKCHANGE_SEQ_NUM_LEN 1 +/* Sequence number to synchronize module change events */ +#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 113 +#define MC_CMD_LINK_STATE_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1 + +/* MC_CMD_LINK_STATE_OUT_V2 msgresponse: Updated LINK_STATE_OUT with + * LOCAL_AN_SUPPORT + */ +#define MC_CMD_LINK_STATE_OUT_V2_LEN 120 +/* Flags used to report the current configuration/state of the link. */ +#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_OFST 0 +#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LEN 8 +#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_OFST 0 +#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LEN 4 +#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_LBN 0 +#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_LO_WIDTH 32 +#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_OFST 4 +#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LEN 4 +#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_LBN 32 +#define MC_CMD_LINK_STATE_OUT_V2_STATUS_FLAGS_HI_WIDTH 32 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */ +/* Configured technology. If the specified value does not align with the values + * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid. + */ +#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_OFST 8 +#define MC_CMD_LINK_STATE_OUT_V2_LINK_TECHNOLOGY_LEN 2 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_ETH_TECH/TECH */ +/* Configured FEC mode */ +#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_OFST 10 +#define MC_CMD_LINK_STATE_OUT_V2_FEC_MODE_LEN 1 +/* enum property: value */ +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +/* Bitmask of auto-negotiated pause modes */ +#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_OFST 11 +#define MC_CMD_LINK_STATE_OUT_V2_PAUSE_MASK_LEN 1 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_PAUSE_MODE/TYPE */ +/* Configured loopback mode */ +#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_OFST 12 +#define MC_CMD_LINK_STATE_OUT_V2_LOOPBACK_LEN 1 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_LOOPBACK_V2/MODE */ +/* Abilities requested by the driver to advertise during auto-negotiation */ +#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_OFST 16 +#define MC_CMD_LINK_STATE_OUT_V2_ADVERTISED_ABILITIES_LEN 32 +/* Abilities advertised by the link partner during auto-negotiation */ +#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_OFST 48 +#define MC_CMD_LINK_STATE_OUT_V2_LINK_PARTNER_ABILITIES_LEN 32 +/* Abilities supported by the local device (including cable abilities) For + * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO + */ +#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_OFST 80 +#define MC_CMD_LINK_STATE_OUT_V2_SUPPORTED_ABILITIES_LEN 28 +/* Control flags */ +#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_OFST 108 +#define MC_CMD_LINK_STATE_OUT_V2_CONTROL_FLAGS_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_LINK_FLAGS/MASK */ +/* Sequence number to synchronize link change events */ +#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_OFST 112 +#define MC_CMD_LINK_STATE_OUT_V2_PORT_LINKCHANGE_SEQ_NUM_LEN 1 +/* Sequence number to synchronize module change events */ +#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_OFST 113 +#define MC_CMD_LINK_STATE_OUT_V2_PORT_MODULECHANGE_SEQ_NUM_LEN 1 +/* Reports the auto-negotiation supported by the local device. This depends on + * the port and module properties. + */ +#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_OFST 116 +#define MC_CMD_LINK_STATE_OUT_V2_LOCAL_AN_SUPPORT_LEN 4 +/* Enum values, see field(s): */ +/* AN_TYPE/TYPE */ + +/* MC_CMD_LINK_STATE_OUT_V3 msgresponse: Updated LINK_STATE_OUT_V2 for explicit + * reporting of the link speed and duplex mode. + */ +#define MC_CMD_LINK_STATE_OUT_V3_LEN 128 +/* Flags used to report the current configuration/state of the link. */ +#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_OFST 0 +#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LEN 8 +#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_OFST 0 +#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LEN 4 +#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_LBN 0 +#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_LO_WIDTH 32 +#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_OFST 4 +#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LEN 4 +#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_LBN 32 +#define MC_CMD_LINK_STATE_OUT_V3_STATUS_FLAGS_HI_WIDTH 32 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_LINK_STATUS_FLAGS/STATUS_FLAGS */ +/* Configured technology. If the specified value does not align with the values + * defined in the enum MC_CMD_ETH_TECH/TECH, it is considered invalid. + */ +#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_OFST 8 +#define MC_CMD_LINK_STATE_OUT_V3_LINK_TECHNOLOGY_LEN 2 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_ETH_TECH/TECH */ +/* Configured FEC mode */ +#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_OFST 10 +#define MC_CMD_LINK_STATE_OUT_V3_FEC_MODE_LEN 1 +/* enum property: value */ +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +/* Bitmask of auto-negotiated pause modes */ +#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_OFST 11 +#define MC_CMD_LINK_STATE_OUT_V3_PAUSE_MASK_LEN 1 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_PAUSE_MODE/TYPE */ +/* Configured loopback mode */ +#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_OFST 12 +#define MC_CMD_LINK_STATE_OUT_V3_LOOPBACK_LEN 1 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_LOOPBACK_V2/MODE */ +/* Abilities requested by the driver to advertise during auto-negotiation */ +#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_OFST 16 +#define MC_CMD_LINK_STATE_OUT_V3_ADVERTISED_ABILITIES_LEN 32 +/* Abilities advertised by the link partner during auto-negotiation */ +#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_OFST 48 +#define MC_CMD_LINK_STATE_OUT_V3_LINK_PARTNER_ABILITIES_LEN 32 +/* Abilities supported by the local device (including cable abilities) For + * fixed local device capbilities see MC_CMD_GET_LOCAL_DEVICE_INFO + */ +#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_OFST 80 +#define MC_CMD_LINK_STATE_OUT_V3_SUPPORTED_ABILITIES_LEN 28 +/* Control flags */ +#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_OFST 108 +#define MC_CMD_LINK_STATE_OUT_V3_CONTROL_FLAGS_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_LINK_FLAGS/MASK */ +/* Sequence number to synchronize link change events */ +#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_OFST 112 +#define MC_CMD_LINK_STATE_OUT_V3_PORT_LINKCHANGE_SEQ_NUM_LEN 1 +/* Sequence number to synchronize module change events */ +#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_OFST 113 +#define MC_CMD_LINK_STATE_OUT_V3_PORT_MODULECHANGE_SEQ_NUM_LEN 1 +/* Reports the auto-negotiation supported by the local device. This depends on + * the port and module properties. + */ +#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_OFST 116 +#define MC_CMD_LINK_STATE_OUT_V3_LOCAL_AN_SUPPORT_LEN 4 +/* Enum values, see field(s): */ +/* AN_TYPE/TYPE */ +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. LINK_SPEED field is intended to be used by drivers without + * the most up-to-date MCDI definitions, unable to deduce the link speed from + * the reported LINK_TECHNOLOGY field. + */ +#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_OFST 120 +#define MC_CMD_LINK_STATE_OUT_V3_LINK_SPEED_LEN 4 +#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_OFST 124 +#define MC_CMD_LINK_STATE_OUT_V3_FLAGS_LEN 4 +#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_OFST 124 +#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_LBN 0 +#define MC_CMD_LINK_STATE_OUT_V3_FULL_DUPLEX_WIDTH 1 + + +/***********************************/ +/* MC_CMD_GET_LINK + * Read the unified MAC/PHY link state. Locks required: None Return code: 0, + * ETIME. + */ +#define MC_CMD_GET_LINK 0x29 +#undef MC_CMD_0x29_PRIVILEGE_CTG + +#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LINK_IN msgrequest */ +#define MC_CMD_GET_LINK_IN_LEN 0 + +/* MC_CMD_GET_LINK_IN_V2 msgrequest */ +#define MC_CMD_GET_LINK_IN_V2_LEN 8 +/* Target port to request link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details. + */ +#define MC_CMD_GET_LINK_IN_V2_TARGET_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LEN 8 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_LBN 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LO_WIDTH 32 +#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_OFST 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_LBN 32 +#define MC_CMD_GET_LINK_IN_V2_TARGET_HI_WIDTH 32 +/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */ +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_OFST 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_LINK_END_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LEN 8 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_OFST 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_LBN 0 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_OFST 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_LBN 32 +#define MC_CMD_GET_LINK_IN_V2_TARGET_FLAT_HI_WIDTH 32 + +/* MC_CMD_GET_LINK_OUT msgresponse */ +#define MC_CMD_GET_LINK_OUT_LEN 28 /* Near-side advertised capabilities. Refer to * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. */ -#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0 -#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4 +#define MC_CMD_GET_LINK_OUT_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_CAP_LEN 4 /* Link-partner advertised capabilities. Refer to * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. */ -#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4 -#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4 +#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4 /* Autonegotiated speed in mbit/s. The link may still be down even if this * reads non-zero. */ -#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8 -#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4 +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4 /* Current loopback setting. */ -#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12 -#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4 +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 +#define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_LINK_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 +#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_OFST 16 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_OFST 16 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_OFST 16 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_OFST 16 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1 +/* This returns the negotiated flow control value. */ +#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 +#define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 +#define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 + +/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */ +#define MC_CMD_GET_LINK_OUT_V2_LEN 44 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0 +#define MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4 +/* Link-partner advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4 +#define MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4 +/* Autonegotiated speed in mbit/s. The link may still be down even if this + * reads non-zero. + */ +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8 +#define MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4 +/* Current loopback setting. */ +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12 +#define MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ #define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16 @@ -5813,6 +6232,7 @@ /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20 #define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4 +/* enum property: value */ /* Enum values, see field(s): */ /* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ #define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24 @@ -5969,6 +6389,95 @@ #define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7 #define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1 +/* MC_CMD_SET_LINK_IN_V3 msgrequest */ +#define MC_CMD_SET_LINK_IN_V3_LEN 28 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_V3_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_V3_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_V3_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_V3_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_OFST 4 +#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_V3_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V3_POWEROFF_OFST 4 +#define MC_CMD_SET_LINK_IN_V3_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_V3_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V3_TXDIS_OFST 4 +#define MC_CMD_SET_LINK_IN_V3_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_V3_TXDIS_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_OFST 4 +#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_LBN 3 +#define MC_CMD_SET_LINK_IN_V3_LINKDOWN_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_V3_LOOPBACK_SPEED_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_OFST 16 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_LEN 1 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_OFST 16 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_LBN 0 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_NUMBER_WIDTH 7 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_OFST 16 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_LBN 7 +#define MC_CMD_SET_LINK_IN_V3_MODULE_SEQ_IGNORE_WIDTH 1 +/* Padding */ +#define MC_CMD_SET_LINK_IN_V3_RESERVED_OFST 17 +#define MC_CMD_SET_LINK_IN_V3_RESERVED_LEN 3 +/* Target port to set link state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details + */ +#define MC_CMD_SET_LINK_IN_V3_TARGET_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LEN 8 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_LBN 160 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LO_WIDTH 32 +#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_OFST 24 +#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_LBN 192 +#define MC_CMD_SET_LINK_IN_V3_TARGET_HI_WIDTH 32 +/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */ +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_OFST 23 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_OFST 24 +#define MC_CMD_SET_LINK_IN_V3_TARGET_LINK_END_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LEN 8 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_OFST 20 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_LBN 160 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_OFST 24 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_LBN 192 +#define MC_CMD_SET_LINK_IN_V3_TARGET_FLAT_HI_WIDTH 32 + /* MC_CMD_SET_LINK_OUT msgresponse */ #define MC_CMD_SET_LINK_OUT_LEN 0 @@ -6034,17 +6543,17 @@ #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 #define MC_CMD_SET_MAC_IN_FCNTL_LEN 4 /* enum: Flow control is off. */ -#define MC_CMD_FCNTL_OFF 0x0 +/* MC_CMD_FCNTL_OFF 0x0 */ /* enum: Respond to flow control. */ -#define MC_CMD_FCNTL_RESPOND 0x1 +/* MC_CMD_FCNTL_RESPOND 0x1 */ /* enum: Respond to and Issue flow control. */ -#define MC_CMD_FCNTL_BIDIR 0x2 -/* enum: Auto neg flow control. */ -#define MC_CMD_FCNTL_AUTO 0x3 -/* enum: Priority flow control (eftest builds only). */ -#define MC_CMD_FCNTL_QBB 0x4 +/* MC_CMD_FCNTL_BIDIR 0x2 */ +/* enum: Auto negotiate flow control. */ +/* MC_CMD_FCNTL_AUTO 0x3 */ +/* enum: Priority flow control. This is only supported on KSB. */ +/* MC_CMD_FCNTL_QBB 0x4 */ /* enum: Issue flow control. */ -#define MC_CMD_FCNTL_GENERATE 0x5 +/* MC_CMD_FCNTL_GENERATE 0x5 */ #define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 #define MC_CMD_SET_MAC_IN_FLAGS_LEN 4 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24 @@ -6086,9 +6595,9 @@ /* MC_CMD_FCNTL_RESPOND 0x1 */ /* enum: Respond to and Issue flow control. */ /* MC_CMD_FCNTL_BIDIR 0x2 */ -/* enum: Auto neg flow control. */ +/* enum: Auto negotiate flow control. */ /* MC_CMD_FCNTL_AUTO 0x3 */ -/* enum: Priority flow control (eftest builds only). */ +/* enum: Priority flow control. This is only supported on KSB. */ /* MC_CMD_FCNTL_QBB 0x4 */ /* enum: Issue flow control. */ /* MC_CMD_FCNTL_GENERATE 0x5 */ @@ -6155,9 +6664,9 @@ /* MC_CMD_FCNTL_RESPOND 0x1 */ /* enum: Respond to and Issue flow control. */ /* MC_CMD_FCNTL_BIDIR 0x2 */ -/* enum: Auto neg flow control. */ +/* enum: Auto negotiate flow control. */ /* MC_CMD_FCNTL_AUTO 0x3 */ -/* enum: Priority flow control (eftest builds only). */ +/* enum: Priority flow control. This is only supported on KSB. */ /* MC_CMD_FCNTL_QBB 0x4 */ /* enum: Issue flow control. */ /* MC_CMD_FCNTL_GENERATE 0x5 */ @@ -6188,19 +6697,9 @@ #define MC_CMD_SET_MAC_V3_IN_CFG_FCS_OFST 28 #define MC_CMD_SET_MAC_V3_IN_CFG_FCS_LBN 4 #define MC_CMD_SET_MAC_V3_IN_CFG_FCS_WIDTH 1 -/* Identifies the MAC to update by the specifying the end of a logical MAE - * link. Setting TARGET to MAE_LINK_ENDPOINT_COMPAT is equivalent to using the - * previous version of the command (MC_CMD_SET_MAC_EXT). Not all possible - * combinations of MPORT_END and MPORT_SELECTOR in TARGET will work in all - * circumstances. 1. Some will always work (e.g. a VF can always address its - * logical MAC using MPORT_SELECTOR=ASSIGNED,LINK_END=VNIC), 2. Some are not - * meaningful and will always fail with EINVAL (e.g. attempting to address the - * VNIC end of a link to a physical port), 3. Some are meaningful but require - * the MCDI client to have the required permission and fail with EPERM - * otherwise (e.g. trying to set the MAC on a VF the caller cannot administer), - * and 4. Some could be implementation-specific and fail with ENOTSUP if not - * available (no examples exist right now). See SF-123581-TC section 4.3 for - * more details. +/* Target port to set mac state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details */ #define MC_CMD_SET_MAC_V3_IN_TARGET_OFST 32 #define MC_CMD_SET_MAC_V3_IN_TARGET_LEN 8 @@ -6212,6 +6711,7 @@ #define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LEN 4 #define MC_CMD_SET_MAC_V3_IN_TARGET_HI_LBN 288 #define MC_CMD_SET_MAC_V3_IN_TARGET_HI_WIDTH 32 +/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */ #define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_OFST 32 #define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_LEN 4 #define MC_CMD_SET_MAC_V3_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 32 @@ -6405,6 +6905,98 @@ #define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16 #define MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4 +/* MC_CMD_MAC_STATS_V2_IN msgrequest */ +#define MC_CMD_MAC_STATS_V2_IN_LEN 28 +/* ??? */ +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_OFST 0 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LEN 8 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_OFST 0 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_LBN 0 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_OFST 4 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_LBN 32 +#define MC_CMD_MAC_STATS_V2_IN_DMA_ADDR_HI_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_IN_CMD_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_CMD_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_DMA_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_DMA_LBN 0 +#define MC_CMD_MAC_STATS_V2_IN_DMA_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_CLEAR_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_CLEAR_LBN 1 +#define MC_CMD_MAC_STATS_V2_IN_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_LBN 4 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_LBN 5 +#define MC_CMD_MAC_STATS_V2_IN_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_OFST 8 +#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_LBN 16 +#define MC_CMD_MAC_STATS_V2_IN_PERIOD_MS_WIDTH 16 +/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as + * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not + * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to + * MC_CMD_MAC_NSTATS * sizeof(uint64_t) + */ +#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_OFST 12 +#define MC_CMD_MAC_STATS_V2_IN_DMA_LEN_LEN 4 +/* port id so vadapter stats can be provided */ +#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_OFST 16 +#define MC_CMD_MAC_STATS_V2_IN_PORT_ID_LEN 4 +/* Target port to request statistics for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details + */ +#define MC_CMD_MAC_STATS_V2_IN_TARGET_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LEN 8 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_LBN 160 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_OFST 24 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_LBN 192 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_HI_WIDTH 32 +/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */ +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_OFST 23 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 160 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 180 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 176 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 22 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_OFST 24 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_LINK_END_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LEN 8 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_OFST 20 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_LBN 160 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_OFST 24 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_LBN 192 +#define MC_CMD_MAC_STATS_V2_IN_TARGET_FLAT_HI_WIDTH 32 + /* MC_CMD_MAC_STATS_OUT_DMA msgresponse */ #define MC_CMD_MAC_STATS_OUT_DMA_LEN 0 @@ -6421,6 +7013,7 @@ #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_LBN 32 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_WIDTH 32 #define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS +/* enum property: index */ #define MC_CMD_MAC_GENERATION_START 0x0 /* enum */ #define MC_CMD_MAC_DMABUF_START 0x1 /* enum */ #define MC_CMD_MAC_TX_PKTS 0x1 /* enum */ @@ -6583,6 +7176,7 @@ #define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_LBN 32 #define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_WIDTH 32 #define MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2 +/* enum property: index */ /* enum: Start of FEC stats buffer space, Medford2 and up */ #define MC_CMD_MAC_FEC_DMABUF_START 0x61 /* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2) @@ -6622,6 +7216,7 @@ #define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_LBN 32 #define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_WIDTH 32 #define MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3 +/* enum property: index */ /* enum: Start of CTPIO stats buffer space, Medford2 and up */ #define MC_CMD_MAC_CTPIO_DMABUF_START 0x68 /* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the @@ -6702,6 +7297,7 @@ #define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_LBN 32 #define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_WIDTH 32 #define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4 +/* enum property: index */ /* enum: Start of V4 stats buffer space */ #define MC_CMD_MAC_V4_DMABUF_START 0x79 /* enum: RXDP counter: Number of packets truncated because scattering was @@ -6723,112 +7319,35 @@ /* Other enum values, see field(s): */ /* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */ - -/***********************************/ -/* MC_CMD_SRIOV - * to be documented - */ -#define MC_CMD_SRIOV 0x30 - -/* MC_CMD_SRIOV_IN msgrequest */ -#define MC_CMD_SRIOV_IN_LEN 12 -#define MC_CMD_SRIOV_IN_ENABLE_OFST 0 -#define MC_CMD_SRIOV_IN_ENABLE_LEN 4 -#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4 -#define MC_CMD_SRIOV_IN_VI_BASE_LEN 4 -#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8 -#define MC_CMD_SRIOV_IN_VF_COUNT_LEN 4 - -/* MC_CMD_SRIOV_OUT msgresponse */ -#define MC_CMD_SRIOV_OUT_LEN 8 -#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0 -#define MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4 -#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4 -#define MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4 - -/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */ -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32 -/* this is only used for the first record */ -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LEN 4 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_LBN 64 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_WIDTH 32 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LEN 4 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_LBN 96 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_WIDTH 32 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */ -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LEN 4 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_LBN 160 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_WIDTH 32 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LEN 4 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_LBN 192 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_WIDTH 32 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224 -#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32 - - -/***********************************/ -/* MC_CMD_MEMCPY - * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data - * embedded directly in the command. - * - * A common pattern is for a client to use generation counts to signal a dma - * update of a datastructure. To facilitate this, this MCDI operation can - * contain multiple requests which are executed in strict order. Requests take - * the form of duplicating the entire MCDI request continuously (including the - * requests record, which is ignored in all but the first structure) - * - * The source data can either come from a DMA from the host, or it can be - * embedded within the request directly, thereby eliminating a DMA read. To - * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and - * ADDR_LO=offset, and inserts the data at %offset from the start of the - * payload. It's the callers responsibility to ensure that the embedded data - * doesn't overlap the records. - * - * Returns: 0, EINVAL (invalid RID) - */ -#define MC_CMD_MEMCPY 0x31 - -/* MC_CMD_MEMCPY_IN msgrequest */ -#define MC_CMD_MEMCPY_IN_LENMIN 32 -#define MC_CMD_MEMCPY_IN_LENMAX 224 -#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992 -#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num)) -#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32) -/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */ -#define MC_CMD_MEMCPY_IN_RECORD_OFST 0 -#define MC_CMD_MEMCPY_IN_RECORD_LEN 32 -#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1 -#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7 -#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31 - -/* MC_CMD_MEMCPY_OUT msgresponse */ -#define MC_CMD_MEMCPY_OUT_LEN 0 +/* MC_CMD_MAC_STATS_V5_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V5_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V5_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V5*64))>>3) +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LEN 4 +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_LBN 0 +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_LO_WIDTH 32 +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LEN 4 +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_LBN 32 +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_HI_WIDTH 32 +#define MC_CMD_MAC_STATS_V5_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V5 +/* enum property: index */ +/* enum: Start of V5 stats buffer space */ +#define MC_CMD_MAC_V5_DMABUF_START 0x7c +/* enum: Link toggle counter: Number of times the link has toggled between + * up/down and down/up + */ +#define MC_CMD_MAC_LINK_TOGGLES 0x7c +/* enum: This includes the space at offset 125 which is the final + * GENERATION_END in a MAC_STATS_V5 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V5 0x7e +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA/STATISTICS */ /***********************************/ @@ -6984,6 +7503,7 @@ #define MC_CMD_WOL_FILTER_RESET_IN_LEN 4 #define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0 #define MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4 +/* enum property: bitmask */ #define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */ #define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */ @@ -6991,23 +7511,6 @@ #define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0 -/***********************************/ -/* MC_CMD_SET_MCAST_HASH - * Set the MCAST hash value without otherwise reconfiguring the MAC - */ -#define MC_CMD_SET_MCAST_HASH 0x35 - -/* MC_CMD_SET_MCAST_HASH_IN msgrequest */ -#define MC_CMD_SET_MCAST_HASH_IN_LEN 32 -#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0 -#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16 -#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16 -#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16 - -/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */ -#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0 - - /***********************************/ /* MC_CMD_NVRAM_TYPES * Return bitfield indicating available types of virtual NVRAM partitions. @@ -7026,6 +7529,7 @@ /* Bit mask of supported types. */ #define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0 #define MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4 +/* enum property: bitshift */ /* enum: Disabled callisto. */ #define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum: MC firmware. */ @@ -7152,6 +7656,12 @@ #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_LBN 8 +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITE_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_LBN 9 +#define MC_CMD_NVRAM_INFO_V2_OUT_SEQUENTIAL_WRITE_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 #define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20 @@ -7499,6 +8009,128 @@ #define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 /* enum: The update operation is in-progress. */ #define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a +/* enum: The update was an invalid user configuration file. */ +#define MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b +/* enum: The write was to the AUTO partition but the data was not recognised as + * a valid partition. + */ +#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c + +/* MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT msgresponse */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_LEN 88 +/* Result of nvram update completion processing. Result codes that indicate an + * internal build failure and therefore not expected to be seen by customers in + * the field are marked with a prefix 'Internal-error'. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_OFST 0 +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_RESULT_CODE_LEN 4 +/* enum: Invalid return code; only non-zero values are defined. Defined as + * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT. + */ +/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0 */ +/* enum: Verify succeeded without any errors. */ +/* MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1 */ +/* enum: CMS format verification failed due to an internal error. */ +/* MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2 */ +/* enum: Invalid CMS format in image metadata. */ +/* MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3 */ +/* enum: Message digest verification failed due to an internal error. */ +/* MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4 */ +/* enum: Error in message digest calculated over the reflash-header, payload + * and reflash-trailer. + */ +/* MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5 */ +/* enum: Signature verification failed due to an internal error. */ +/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6 */ +/* enum: There are no valid signatures in the image. */ +/* MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7 */ +/* enum: Trusted approvers verification failed due to an internal error. */ +/* MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8 */ +/* enum: The Trusted approver's list is empty. */ +/* MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9 */ +/* enum: Signature chain verification failed due to an internal error. */ +/* MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa */ +/* enum: The signers of the signatures in the image are not listed in the + * Trusted approver's list. + */ +/* MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb */ +/* enum: The image contains a test-signed certificate, but the adapter accepts + * only production signed images. + */ +/* MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc */ +/* enum: The image has a lower security level than the current firmware. */ +/* MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd */ +/* enum: Internal-error. The signed image is missing the 'contents' section, + * where the 'contents' section holds the actual image payload to be applied. + */ +/* MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe */ +/* enum: Internal-error. The bundle header is invalid. */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf */ +/* enum: Internal-error. The bundle does not have a valid reflash image layout. + */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 */ +/* enum: Internal-error. The bundle has an inconsistent layout of components or + * incorrect checksum. + */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 */ +/* enum: Internal-error. The bundle manifest is inconsistent with components in + * the bundle. + */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 */ +/* enum: Internal-error. The number of components in a bundle do not match the + * number of components advertised by the bundle manifest. + */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 */ +/* enum: Internal-error. The bundle contains too many components for the MC + * firmware to process + */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 */ +/* enum: Internal-error. The bundle manifest has an invalid/inconsistent + * component. + */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 */ +/* enum: Internal-error. The hash of a component does not match the hash stored + * in the bundle manifest. + */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 */ +/* enum: Internal-error. Component hash calculation failed. */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 */ +/* enum: Internal-error. The component does not have a valid reflash image + * layout. + */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 */ +/* enum: The bundle processing code failed to copy a component to its target + * partition. + */ +/* MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 */ +/* enum: The update operation is in-progress. */ +/* MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a */ +/* enum: The update was an invalid user configuration file. */ +/* MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG 0x1b */ +/* enum: The write was to the AUTO partition but the data was not recognised as + * a valid partition. + */ +/* MC_CMD_NVRAM_VERIFY_RC_UNKNOWN_TYPE 0x1c */ +/* If the update was a user configuration, what action(s) the user must take to + * apply the new configuration. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_OFST 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ACTIONS_REQUIRED_LEN 4 +/* enum: No action required. */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_NONE 0x0 +/* enum: The MC firmware must be rebooted (eg with MC_CMD_REBOOT). */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_REBOOT 0x1 +/* enum: The host must be rebooted. */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_REBOOT 0x2 +/* enum: The firmware and host must be rebooted (in either order). */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_FIRMWARE_AND_HOST_REBOOT 0x3 +/* enum: The host must be fully powered off. */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_HOST_POWERCYCLE 0x4 +/* If the update failed with MC_CMD_NVRAM_VERIFY_RC_BAD_CONFIG, a null- + * terminated US-ASCII string suitable for showing to the user. + */ +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V3_OUT_ERROR_STRING_LEN 80 /***********************************/ @@ -7522,7 +8154,7 @@ #define MC_CMD_REBOOT 0x3d #undef MC_CMD_0x3d_PRIVILEGE_CTG -#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_REBOOT_IN msgrequest */ #define MC_CMD_REBOOT_IN_LEN 4 @@ -7534,65 +8166,6 @@ #define MC_CMD_REBOOT_OUT_LEN 0 -/***********************************/ -/* MC_CMD_SCHEDINFO - * Request scheduler info. Locks required: NONE. Returns: An array of - * (timeslice,maximum overrun), one for each thread, in ascending order of - * thread address. - */ -#define MC_CMD_SCHEDINFO 0x3e -#undef MC_CMD_0x3e_PRIVILEGE_CTG - -#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_SCHEDINFO_IN msgrequest */ -#define MC_CMD_SCHEDINFO_IN_LEN 0 - -/* MC_CMD_SCHEDINFO_OUT msgresponse */ -#define MC_CMD_SCHEDINFO_OUT_LENMIN 4 -#define MC_CMD_SCHEDINFO_OUT_LENMAX 252 -#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num)) -#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4) -#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0 -#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4 -#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1 -#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63 -#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255 - - -/***********************************/ -/* MC_CMD_REBOOT_MODE - * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot - * mode to the specified value. Returns the old mode. - */ -#define MC_CMD_REBOOT_MODE 0x3f -#undef MC_CMD_0x3f_PRIVILEGE_CTG - -#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_REBOOT_MODE_IN msgrequest */ -#define MC_CMD_REBOOT_MODE_IN_LEN 4 -#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0 -#define MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4 -/* enum: Normal. */ -#define MC_CMD_REBOOT_MODE_NORMAL 0x0 -/* enum: Power-on Reset. */ -#define MC_CMD_REBOOT_MODE_POR 0x2 -/* enum: Snapper. */ -#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 -/* enum: snapper fake POR */ -#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4 -#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0 -#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7 -#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1 - -/* MC_CMD_REBOOT_MODE_OUT msgresponse */ -#define MC_CMD_REBOOT_MODE_OUT_LEN 4 -#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0 -#define MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4 - - /***********************************/ /* MC_CMD_SENSOR_INFO * Returns information about every available sensor. @@ -8061,6 +8634,54 @@ /* MC_CMD_GET_PHY_STATE_IN msgrequest */ #define MC_CMD_GET_PHY_STATE_IN_LEN 0 +/* MC_CMD_GET_PHY_STATE_IN_V2 msgrequest */ +#define MC_CMD_GET_PHY_STATE_IN_V2_LEN 8 +/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details. + */ +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LEN 8 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_LBN 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LO_WIDTH 32 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_OFST 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_LBN 32 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_HI_WIDTH 32 +/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */ +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 3 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 20 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 16 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 2 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_OFST 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_LINK_END_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LEN 8 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_OFST 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_LBN 0 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_OFST 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_LBN 32 +#define MC_CMD_GET_PHY_STATE_IN_V2_TARGET_FLAT_HI_WIDTH 32 + /* MC_CMD_GET_PHY_STATE_OUT msgresponse */ #define MC_CMD_GET_PHY_STATE_OUT_LEN 4 #define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0 @@ -8071,22 +8692,6 @@ #define MC_CMD_PHY_STATE_ZOMBIE 0x2 -/***********************************/ -/* MC_CMD_SETUP_8021QBB - * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to - * disable 802.Qbb for a given priority. - */ -#define MC_CMD_SETUP_8021QBB 0x44 - -/* MC_CMD_SETUP_8021QBB_IN msgrequest */ -#define MC_CMD_SETUP_8021QBB_IN_LEN 32 -#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0 -#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32 - -/* MC_CMD_SETUP_8021QBB_OUT msgresponse */ -#define MC_CMD_SETUP_8021QBB_OUT_LEN 0 - - /***********************************/ /* MC_CMD_WOL_FILTER_GET * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS @@ -8105,133 +8710,6 @@ #define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4 -/***********************************/ -/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD - * Add a protocol offload to NIC for lights-out state. Locks required: None. - * Returns: 0, ENOSYS - */ -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 -#undef MC_CMD_0x46_PRIVILEGE_CTG - -#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK - -/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */ -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num)) -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4) -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 -#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ -#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */ -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254 - -/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */ -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14 -/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ -/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4 - -/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */ -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42 -/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */ -/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */ -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16 - -/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */ -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0 -#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4 - - -/***********************************/ -/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD - * Remove a protocol offload from NIC for lights-out state. Locks required: - * None. Returns: 0, ENOSYS - */ -#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 -#undef MC_CMD_0x47_PRIVILEGE_CTG - -#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK - -/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */ -#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8 -#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 -#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 -#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4 -#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4 - -/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */ -#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_MAC_RESET_RESTORE - * Restore MAC after block reset. Locks required: None. Returns: 0. - */ -#define MC_CMD_MAC_RESET_RESTORE 0x48 - -/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */ -#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0 - -/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */ -#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_TESTASSERT - * Deliberately trigger an assert-detonation in the firmware for testing - * purposes (i.e. to allow tests that the driver copes gracefully). Locks - * required: None Returns: 0 - */ -#define MC_CMD_TESTASSERT 0x49 -#undef MC_CMD_0x49_PRIVILEGE_CTG - -#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_TESTASSERT_IN msgrequest */ -#define MC_CMD_TESTASSERT_IN_LEN 0 - -/* MC_CMD_TESTASSERT_OUT msgresponse */ -#define MC_CMD_TESTASSERT_OUT_LEN 0 - -/* MC_CMD_TESTASSERT_V2_IN msgrequest */ -#define MC_CMD_TESTASSERT_V2_IN_LEN 4 -/* How to provoke the assertion */ -#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0 -#define MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4 -/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless - * you're testing firmware, this is what you want. - */ -#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0 -/* enum: Assert using assert(0); */ -#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1 -/* enum: Deliberately trigger a watchdog */ -#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2 -/* enum: Deliberately trigger a trap by loading from an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3 -/* enum: Deliberately trigger a trap by storing to an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4 -/* enum: Jump to an invalid address */ -#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5 - -/* MC_CMD_TESTASSERT_V2_OUT msgresponse */ -#define MC_CMD_TESTASSERT_V2_OUT_LEN 0 - - /***********************************/ /* MC_CMD_WORKAROUND * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't @@ -8324,6 +8802,62 @@ #define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_LBN 16 #define MC_CMD_GET_PHY_MEDIA_INFO_IN_DSFP_BANK_WIDTH 16 +/* MC_CMD_GET_PHY_MEDIA_INFO_IN_V2 msgrequest */ +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_LEN 12 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_PAGE_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_LBN 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_PAGE_WIDTH 16 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_OFST 0 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_LBN 16 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_DSFP_BANK_WIDTH 16 +/* Target port to request PHY state for. Uses MAE_LINK_ENDPOINT_SELECTOR which + * identifies a real or virtual network port by MAE port and link end. See the + * structure definition for more details + */ +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LEN 8 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_LBN 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LO_WIDTH 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_OFST 8 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_LBN 64 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_HI_WIDTH 32 +/* See structuredef: MAE_LINK_ENDPOINT_SELECTOR */ +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FLAT_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_OFST 7 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_TYPE_LEN 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_MPORT_ID_LEN 3 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_LBN 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_PPORT_ID_WIDTH 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_LBN 52 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_INTF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_LBN 48 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_MH_PF_ID_WIDTH 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_OFST 6 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_PF_ID_LEN 1 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_MPORT_SELECTOR_FUNC_VF_ID_LEN 2 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_OFST 8 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_LINK_END_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LEN 8 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_OFST 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_LBN 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_LO_WIDTH 32 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_OFST 8 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LEN 4 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_LBN 64 +#define MC_CMD_GET_PHY_MEDIA_INFO_IN_V2_TARGET_FLAT_HI_WIDTH 32 + /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 @@ -8348,7 +8882,7 @@ #define MC_CMD_NVRAM_TEST 0x4c #undef MC_CMD_0x4c_PRIVILEGE_CTG -#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_NVRAM_TEST_IN msgrequest */ #define MC_CMD_NVRAM_TEST_IN_LEN 4 @@ -8369,103 +8903,6 @@ #define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 -/***********************************/ -/* MC_CMD_MRSFP_TWEAK - * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds. - * I2C I/O expander bits are always read; if equaliser parameters are supplied, - * they are configured first. Locks required: None. Return code: 0, EINVAL. - */ -#define MC_CMD_MRSFP_TWEAK 0x4d - -/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */ -#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16 -/* 0-6 low->high de-emph. */ -#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0 -#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4 -/* 0-8 low->high ref.V */ -#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4 -#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4 -/* 0-8 0-8 low->high boost */ -#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8 -#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4 -/* 0-8 low->high ref.V */ -#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12 -#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4 - -/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */ -#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0 - -/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */ -#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12 -/* input bits */ -#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 -#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4 -/* output bits */ -#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 -#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4 -/* direction */ -#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 -#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4 -/* enum: Out. */ -#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 -/* enum: In. */ -#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 - - -/***********************************/ -/* MC_CMD_SENSOR_SET_LIMS - * Adjusts the sensor limits. This is a warranty-voiding operation. Returns: - * ENOENT if the sensor specified does not exist, EINVAL if the limits are out - * of range. - */ -#define MC_CMD_SENSOR_SET_LIMS 0x4e -#undef MC_CMD_0x4e_PRIVILEGE_CTG - -#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */ -#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20 -#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0 -#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4 -/* Enum values, see field(s): */ -/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ -/* interpretation is is sensor-specific. */ -#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4 -#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4 -/* interpretation is is sensor-specific. */ -#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8 -#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4 -/* interpretation is is sensor-specific. */ -#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12 -#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4 -/* interpretation is is sensor-specific. */ -#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16 -#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4 - -/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */ -#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_GET_RESOURCE_LIMITS - */ -#define MC_CMD_GET_RESOURCE_LIMITS 0x4f - -/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */ -#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0 - -/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */ -#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16 -#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0 -#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4 -#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4 -#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4 -#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8 -#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4 -#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12 -#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4 - - /***********************************/ /* MC_CMD_NVRAM_PARTITIONS * Reads the list of available virtual NVRAM partition types. Locks required: @@ -8582,806 +9019,6 @@ #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12 #define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4 - -/***********************************/ -/* MC_CMD_CLP - * Perform a CLP related operation, see SF-110495-PS for details of CLP - * processing. This command has been extended to accomodate the requirements of - * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC, - * SF-120509-TC and SF-117282-PS. - */ -#define MC_CMD_CLP 0x56 -#undef MC_CMD_0x56_PRIVILEGE_CTG - -#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_CLP_IN msgrequest */ -#define MC_CMD_CLP_IN_LEN 4 -/* Sub operation */ -#define MC_CMD_CLP_IN_OP_OFST 0 -#define MC_CMD_CLP_IN_OP_LEN 4 -/* enum: Return to factory default settings */ -#define MC_CMD_CLP_OP_DEFAULT 0x1 -/* enum: Set MAC address */ -#define MC_CMD_CLP_OP_SET_MAC 0x2 -/* enum: Get MAC address */ -#define MC_CMD_CLP_OP_GET_MAC 0x3 -/* enum: Set UEFI/GPXE boot mode */ -#define MC_CMD_CLP_OP_SET_BOOT 0x4 -/* enum: Get UEFI/GPXE boot mode */ -#define MC_CMD_CLP_OP_GET_BOOT 0x5 - -/* MC_CMD_CLP_OUT msgresponse */ -#define MC_CMD_CLP_OUT_LEN 0 - -/* MC_CMD_CLP_IN_DEFAULT msgrequest */ -#define MC_CMD_CLP_IN_DEFAULT_LEN 4 -/* MC_CMD_CLP_IN_OP_OFST 0 */ -/* MC_CMD_CLP_IN_OP_LEN 4 */ - -/* MC_CMD_CLP_OUT_DEFAULT msgresponse */ -#define MC_CMD_CLP_OUT_DEFAULT_LEN 0 - -/* MC_CMD_CLP_IN_SET_MAC msgrequest */ -#define MC_CMD_CLP_IN_SET_MAC_LEN 12 -/* MC_CMD_CLP_IN_OP_OFST 0 */ -/* MC_CMD_CLP_IN_OP_LEN 4 */ -/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 - * restores the permanent (factory-programmed) MAC address associated with the - * port. A non-zero MAC address persists until a PCIe reset or a power cycle. - */ -#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 -#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 -/* Padding */ -#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10 -#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2 - -/* MC_CMD_CLP_OUT_SET_MAC msgresponse */ -#define MC_CMD_CLP_OUT_SET_MAC_LEN 0 - -/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */ -#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16 -/* MC_CMD_CLP_IN_OP_OFST 0 */ -/* MC_CMD_CLP_IN_OP_LEN 4 */ -/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 - * restores the permanent (factory-programmed) MAC address associated with the - * port. A non-zero MAC address persists until a PCIe reset or a power cycle. - */ -#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4 -#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6 -/* Padding */ -#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10 -#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2 -#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12 -#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4 -#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12 -#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0 -#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1 - -/* MC_CMD_CLP_IN_GET_MAC msgrequest */ -#define MC_CMD_CLP_IN_GET_MAC_LEN 4 -/* MC_CMD_CLP_IN_OP_OFST 0 */ -/* MC_CMD_CLP_IN_OP_LEN 4 */ - -/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */ -#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8 -/* MC_CMD_CLP_IN_OP_OFST 0 */ -/* MC_CMD_CLP_IN_OP_LEN 4 */ -#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4 -#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4 -#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4 -#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0 -#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1 - -/* MC_CMD_CLP_OUT_GET_MAC msgresponse */ -#define MC_CMD_CLP_OUT_GET_MAC_LEN 8 -/* MAC address assigned to port */ -#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0 -#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6 -/* Padding */ -#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6 -#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2 - -/* MC_CMD_CLP_IN_SET_BOOT msgrequest */ -#define MC_CMD_CLP_IN_SET_BOOT_LEN 5 -/* MC_CMD_CLP_IN_OP_OFST 0 */ -/* MC_CMD_CLP_IN_OP_LEN 4 */ -/* Boot flag */ -#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4 -#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1 - -/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */ -#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0 - -/* MC_CMD_CLP_IN_GET_BOOT msgrequest */ -#define MC_CMD_CLP_IN_GET_BOOT_LEN 4 -/* MC_CMD_CLP_IN_OP_OFST 0 */ -/* MC_CMD_CLP_IN_OP_LEN 4 */ - -/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */ -#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4 -/* Boot flag */ -#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0 -#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1 -/* Padding */ -#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1 -#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3 - - -/***********************************/ -/* MC_CMD_MUM - * Perform a MUM operation - */ -#define MC_CMD_MUM 0x57 -#undef MC_CMD_0x57_PRIVILEGE_CTG - -#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_MUM_IN msgrequest */ -#define MC_CMD_MUM_IN_LEN 4 -#define MC_CMD_MUM_IN_OP_HDR_OFST 0 -#define MC_CMD_MUM_IN_OP_HDR_LEN 4 -#define MC_CMD_MUM_IN_OP_OFST 0 -#define MC_CMD_MUM_IN_OP_LBN 0 -#define MC_CMD_MUM_IN_OP_WIDTH 8 -/* enum: NULL MCDI command to MUM */ -#define MC_CMD_MUM_OP_NULL 0x1 -/* enum: Get MUM version */ -#define MC_CMD_MUM_OP_GET_VERSION 0x2 -/* enum: Issue raw I2C command to MUM */ -#define MC_CMD_MUM_OP_RAW_CMD 0x3 -/* enum: Read from registers on devices connected to MUM. */ -#define MC_CMD_MUM_OP_READ 0x4 -/* enum: Write to registers on devices connected to MUM. */ -#define MC_CMD_MUM_OP_WRITE 0x5 -/* enum: Control UART logging. */ -#define MC_CMD_MUM_OP_LOG 0x6 -/* enum: Operations on MUM GPIO lines */ -#define MC_CMD_MUM_OP_GPIO 0x7 -/* enum: Get sensor readings from MUM */ -#define MC_CMD_MUM_OP_READ_SENSORS 0x8 -/* enum: Initiate clock programming on the MUM */ -#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9 -/* enum: Initiate FPGA load from flash on the MUM */ -#define MC_CMD_MUM_OP_FPGA_LOAD 0xa -/* enum: Request sensor reading from MUM ADC resulting from earlier request via - * MUM ATB - */ -#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb -/* enum: Send commands relating to the QSFP ports via the MUM for PHY - * operations - */ -#define MC_CMD_MUM_OP_QSFP 0xc -/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage - * level) from MUM - */ -#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd - -/* MC_CMD_MUM_IN_NULL msgrequest */ -#define MC_CMD_MUM_IN_NULL_LEN 4 -/* MUM cmd header */ -#define MC_CMD_MUM_IN_CMD_OFST 0 -#define MC_CMD_MUM_IN_CMD_LEN 4 - -/* MC_CMD_MUM_IN_GET_VERSION msgrequest */ -#define MC_CMD_MUM_IN_GET_VERSION_LEN 4 -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ - -/* MC_CMD_MUM_IN_READ msgrequest */ -#define MC_CMD_MUM_IN_READ_LEN 16 -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -/* ID of (device connected to MUM) to read from registers of */ -#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4 -#define MC_CMD_MUM_IN_READ_DEVICE_LEN 4 -/* enum: Hittite HMC1035 clock generator on Sorrento board */ -#define MC_CMD_MUM_DEV_HITTITE 0x1 -/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */ -#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2 -/* 32-bit address to read from */ -#define MC_CMD_MUM_IN_READ_ADDR_OFST 8 -#define MC_CMD_MUM_IN_READ_ADDR_LEN 4 -/* Number of words to read. */ -#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12 -#define MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4 - -/* MC_CMD_MUM_IN_WRITE msgrequest */ -#define MC_CMD_MUM_IN_WRITE_LENMIN 16 -#define MC_CMD_MUM_IN_WRITE_LENMAX 252 -#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020 -#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) -#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4) -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -/* ID of (device connected to MUM) to write to registers of */ -#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4 -#define MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4 -/* enum: Hittite HMC1035 clock generator on Sorrento board */ -/* MC_CMD_MUM_DEV_HITTITE 0x1 */ -/* 32-bit address to write to */ -#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8 -#define MC_CMD_MUM_IN_WRITE_ADDR_LEN 4 -/* Words to write */ -#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12 -#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 -#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1 -#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60 -#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252 - -/* MC_CMD_MUM_IN_RAW_CMD msgrequest */ -#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17 -#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252 -#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020 -#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) -#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1) -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -/* MUM I2C cmd code */ -#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4 -#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4 -/* Number of bytes to write */ -#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8 -#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4 -/* Number of bytes to read */ -#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12 -#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4 -/* Bytes to write */ -#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16 -#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 -#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1 -#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236 -#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004 - -/* MC_CMD_MUM_IN_LOG msgrequest */ -#define MC_CMD_MUM_IN_LOG_LEN 8 -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_LOG_OP_OFST 4 -#define MC_CMD_MUM_IN_LOG_OP_LEN 4 -#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */ - -/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */ -#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */ -/* MC_CMD_MUM_IN_LOG_OP_LEN 4 */ -/* Enable/disable debug output to UART */ -#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8 -#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4 - -/* MC_CMD_MUM_IN_GPIO msgrequest */ -#define MC_CMD_MUM_IN_GPIO_LEN 8 -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_HDR_LEN 4 -#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 -#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 -#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ -#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */ -#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */ -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */ -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */ -#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */ - -/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */ -#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4 - -/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */ -#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4 -/* The first 32-bit word to be written to the GPIO OUT register. */ -#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8 -#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4 -/* The second 32-bit word to be written to the GPIO OUT register. */ -#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12 -#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4 - -/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */ -#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4 - -/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */ -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4 -/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */ -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8 -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4 -/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */ -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12 -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4 - -/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */ -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4 - -/* MC_CMD_MUM_IN_GPIO_OP msgrequest */ -#define MC_CMD_MUM_IN_GPIO_OP_LEN 8 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4 -#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 -#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */ -#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16 -#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8 - -/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4 - -/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 - -/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 - -/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 -#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 - -/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */ -#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8 -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 -#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4 -#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4 -#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 -#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 -#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4 -#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 -#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8 - -/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */ -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12 -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -/* Bit-mask of clocks to be programmed */ -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4 -#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */ -#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */ -#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */ -/* Control flags for clock programming */ -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2 -#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1 - -/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ -#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -/* Enable/Disable FPGA config from flash */ -#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4 -#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4 - -/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */ -#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4 -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ - -/* MC_CMD_MUM_IN_QSFP msgrequest */ -#define MC_CMD_MUM_IN_QSFP_LEN 12 -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 -#define MC_CMD_MUM_IN_QSFP_HDR_LEN 4 -#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4 -#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 -#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 -#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */ -#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */ -#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */ -#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */ -#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */ -#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8 -#define MC_CMD_MUM_IN_QSFP_IDX_LEN 4 - -/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */ -#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4 -#define MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4 -#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8 -#define MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4 -#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12 -#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4 - -/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */ -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4 -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4 -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8 -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4 -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12 -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4 -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16 -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4 -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20 -#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4 - -/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */ -#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4 -#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4 -#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8 -#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4 - -/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */ -#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4 -#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4 -#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8 -#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4 -#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12 -#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4 - -/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */ -#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4 -#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4 -#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8 -#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4 - -/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */ -#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12 -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ -#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 -#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4 -#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 -#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4 - -/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */ -#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4 -/* MUM cmd header */ -/* MC_CMD_MUM_IN_CMD_OFST 0 */ -/* MC_CMD_MUM_IN_CMD_LEN 4 */ - -/* MC_CMD_MUM_OUT msgresponse */ -#define MC_CMD_MUM_OUT_LEN 0 - -/* MC_CMD_MUM_OUT_NULL msgresponse */ -#define MC_CMD_MUM_OUT_NULL_LEN 0 - -/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */ -#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12 -#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0 -#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4 -#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4 -#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8 -#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4 -#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LEN 4 -#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_LBN 32 -#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_WIDTH 32 -#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8 -#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LEN 4 -#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_LBN 64 -#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_WIDTH 32 - -/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */ -#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1 -#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252 -#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020 -#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num)) -#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1) -/* returned data */ -#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0 -#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1 -#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1 -#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252 -#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020 - -/* MC_CMD_MUM_OUT_READ msgresponse */ -#define MC_CMD_MUM_OUT_READ_LENMIN 4 -#define MC_CMD_MUM_OUT_READ_LENMAX 252 -#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020 -#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num)) -#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4) -#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0 -#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4 -#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1 -#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63 -#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255 - -/* MC_CMD_MUM_OUT_WRITE msgresponse */ -#define MC_CMD_MUM_OUT_WRITE_LEN 0 - -/* MC_CMD_MUM_OUT_LOG msgresponse */ -#define MC_CMD_MUM_OUT_LOG_LEN 0 - -/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */ -#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0 - -/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */ -#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8 -/* The first 32-bit word read from the GPIO IN register. */ -#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0 -#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4 -/* The second 32-bit word read from the GPIO IN register. */ -#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4 -#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4 - -/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */ -#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0 - -/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */ -#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8 -/* The first 32-bit word read from the GPIO OUT register. */ -#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0 -#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4 -/* The second 32-bit word read from the GPIO OUT register. */ -#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4 -#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4 - -/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */ -#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0 - -/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */ -#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8 -#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0 -#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4 -#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4 -#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4 - -/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */ -#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4 -#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0 -#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4 - -/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */ -#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0 - -/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */ -#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0 - -/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */ -#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0 - -/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */ -#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4 -#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252 -#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020 -#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num)) -#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4) -#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0 -#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4 -#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1 -#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63 -#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255 -#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0 -#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0 -#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16 -#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0 -#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16 -#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8 -#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0 -#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24 -#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8 - -/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */ -#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4 -#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0 -#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4 - -/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */ -#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0 - -/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */ -#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4 -#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0 -#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4 - -/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */ -#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0 - -/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */ -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8 -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0 -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4 -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4 -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4 -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4 -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 -#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1 - -/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */ -#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4 -#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0 -#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4 - -/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252 -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020 -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1) -/* in bytes */ -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4 -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4 -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248 -#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016 - -/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ -#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 -#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0 -#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4 -#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4 -#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4 - -/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */ -#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 -#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 -#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4 - -/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8) -/* Discrete (soldered) DDR resistor strap info */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 -/* Number of SODIMM info records */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4 -/* Array of SODIMM info records */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LEN 4 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_LBN 64 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_WIDTH 32 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LEN 4 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_LBN 96 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_WIDTH 32 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8 -/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0 -/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1 -/* enum: Total number of SODIMM banks */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */ -/* enum: Values 5-15 are reserved for future usage */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4 -/* enum: No module present */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0 -/* enum: Module present supported and powered on */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1 -/* enum: Module present but bad type */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2 -/* enum: Module present but incompatible voltage */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3 -/* enum: Module present but unknown SPD */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4 -/* enum: Module present but slot cannot support it */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5 -/* enum: Modules may or may not be present, but cannot establish contact by I2C - */ -#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52 -#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12 - /* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This * should match the equivalent structure in the sensor_query SPHINX service. */ @@ -9500,27 +9137,22 @@ * and a generation count for this version of the sensor table. On systems * advertising the DYNAMIC_SENSORS capability bit, this replaces the * MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors - * added by the NMC. - * - * Sensor handles are persistent for the lifetime of the sensor and are used to - * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and - * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. - * - * The generation count is maintained by the MC, is persistent across reboots - * and will be incremented each time the sensor table is modified. When the - * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated - * containing the new generation count. The driver should compare this against - * the current generation count, and if it is different, call - * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table. - * - * The sensor count is provided to allow a future path to supporting more than + * added by the NMC. Sensor handles are persistent for the lifetime of the + * sensor and are used to identify sensors in + * MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. The generation count is maintained by the + * MC, is persistent across reboots and will be incremented each time the + * sensor table is modified. When the table is modified, a + * CODE_DYNAMIC_SENSORS_CHANGE event will be generated containing the new + * generation count. The driver should compare this against the current + * generation count, and if it is different, call MC_CMD_DYNAMIC_SENSORS_LIST + * again to update it's copy of the sensor table. The sensor count is provided + * to allow a future path to supporting more than * MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e. * the maximum number that will fit in a single response. As this is a fairly * large number (253) it is not anticipated that this will be needed in the - * near future, so can currently be ignored. - * - * On Riverhead this command is implemented as a wrapper for `list` in the - * sensor_query SPHINX service. + * near future, so can currently be ignored. On Riverhead this command is + * implemented as a wrapper for `list` in the sensor_query SPHINX service. */ #define MC_CMD_DYNAMIC_SENSORS_LIST 0x66 #undef MC_CMD_0x66_PRIVILEGE_CTG @@ -9557,15 +9189,13 @@ /***********************************/ /* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS * Get descriptions for a set of sensors, specified as an array of sensor - * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST - * - * Any handles which do not correspond to a sensor currently managed by the MC - * will be dropped from from the response. This may happen when a sensor table - * update is in progress, and effectively means the set of usable sensors is - * the intersection between the sets of sensors known to the driver and the MC. - * - * On Riverhead this command is implemented as a wrapper for - * `get_descriptions` in the sensor_query SPHINX service. + * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. Any handles which do not + * correspond to a sensor currently managed by the MC will be dropped from from + * the response. This may happen when a sensor table update is in progress, and + * effectively means the set of usable sensors is the intersection between the + * sets of sensors known to the driver and the MC. On Riverhead this command is + * implemented as a wrapper for `get_descriptions` in the sensor_query SPHINX + * service. */ #define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67 #undef MC_CMD_0x67_PRIVILEGE_CTG @@ -9602,19 +9232,15 @@ /***********************************/ /* MC_CMD_DYNAMIC_SENSORS_GET_READINGS * Read the state and value for a set of sensors, specified as an array of - * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. - * - * In the case of a broken sensor, then the state of the response's - * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value - * provided should be treated as erroneous. - * - * Any handles which do not correspond to a sensor currently managed by the MC - * will be dropped from from the response. This may happen when a sensor table - * update is in progress, and effectively means the set of usable sensors is - * the intersection between the sets of sensors known to the driver and the MC. - * - * On Riverhead this command is implemented as a wrapper for `get_readings` - * in the sensor_query SPHINX service. + * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. In the case of a + * broken sensor, then the state of the response's MC_CMD_DYNAMIC_SENSORS_VALUE + * entry will be set to BROKEN, and any value provided should be treated as + * erroneous. Any handles which do not correspond to a sensor currently managed + * by the MC will be dropped from from the response. This may happen when a + * sensor table update is in progress, and effectively means the set of usable + * sensors is the intersection between the sets of sensors known to the driver + * and the MC. On Riverhead this command is implemented as a wrapper for + * `get_readings` in the sensor_query SPHINX service. */ #define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68 #undef MC_CMD_0x68_PRIVILEGE_CTG @@ -9647,45 +9273,1286 @@ #define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21 #define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85 - +/* MC_CMD_MAC_FLAGS structuredef */ +#define MC_CMD_MAC_FLAGS_LEN 4 +/* The enums defined in this field are used as indices into the + * MC_CMD_MAC_FLAGS bitmask. + */ +#define MC_CMD_MAC_FLAGS_MASK_OFST 0 +#define MC_CMD_MAC_FLAGS_MASK_LEN 4 +/* enum property: bitshift */ +/* enum: Include the FCS in the packet data delivered to the host. Ignored if + * RX_INCLUDE_FCS not set in capabilities. + */ +#define MC_CMD_MAC_FLAGS_FLAG_INCLUDE_FCS 0x0 +#define MC_CMD_MAC_FLAGS_MASK_LBN 0 +#define MC_CMD_MAC_FLAGS_MASK_WIDTH 32 + +/* MC_CMD_TRANSMISSION_MODE structuredef */ +#define MC_CMD_TRANSMISSION_MODE_LEN 4 +#define MC_CMD_TRANSMISSION_MODE_MASK_OFST 0 +#define MC_CMD_TRANSMISSION_MODE_MASK_LEN 4 +/* enum property: value */ +#define MC_CMD_TRANSMISSION_MODE_PROMSC_MODE 0x0 /* enum */ +#define MC_CMD_TRANSMISSION_MODE_UNCST_MODE 0x1 /* enum */ +#define MC_CMD_TRANSMISSION_MODE_BRDCST_MODE 0x2 /* enum */ +#define MC_CMD_TRANSMISSION_MODE_MASK_LBN 0 +#define MC_CMD_TRANSMISSION_MODE_MASK_WIDTH 32 + +/* MC_CMD_MAC_CONFIG_OPTIONS structuredef */ +#define MC_CMD_MAC_CONFIG_OPTIONS_LEN 4 +#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_OFST 0 +#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LEN 4 +/* enum property: bitmask */ +/* enum: Configure the MAC address. */ +#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_ADDR 0x0 +/* enum: Configure the maximum frame length. */ +#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_MAX_FRAME_LEN 0x1 +/* enum: Configure flow control. */ +#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_FCNTL 0x2 +/* enum: Configure the transmission mode. */ +#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_TRANSMISSION_MODE 0x3 +/* enum: Configure FCS. */ +#define MC_CMD_MAC_CONFIG_OPTIONS_CFG_INCLUDE_FCS 0x4 +#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_LBN 0 +#define MC_CMD_MAC_CONFIG_OPTIONS_MASK_WIDTH 32 + + +/***********************************/ +/* MC_CMD_MAC_CTRL + * Set MAC configuration. Return code: 0, EINVAL, ENOTSUP + */ +#define MC_CMD_MAC_CTRL 0x1df +#undef MC_CMD_0x1df_PRIVILEGE_CTG + +#define MC_CMD_0x1df_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_MAC_CTRL_IN msgrequest */ +#define MC_CMD_MAC_CTRL_IN_LEN 32 +/* Handle for selected network port. */ +#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_MAC_CTRL_IN_PORT_HANDLE_LEN 4 +/* Select which parameters to configure. A parameter will only be modified if + * the corresponding control flag is set. + */ +#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_OFST 4 +#define MC_CMD_MAC_CTRL_IN_CONTROL_FLAGS_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */ +/* MAC address of the device. */ +#define MC_CMD_MAC_CTRL_IN_ADDR_OFST 8 +#define MC_CMD_MAC_CTRL_IN_ADDR_LEN 8 +#define MC_CMD_MAC_CTRL_IN_ADDR_LO_OFST 8 +#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LEN 4 +#define MC_CMD_MAC_CTRL_IN_ADDR_LO_LBN 64 +#define MC_CMD_MAC_CTRL_IN_ADDR_LO_WIDTH 32 +#define MC_CMD_MAC_CTRL_IN_ADDR_HI_OFST 12 +#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LEN 4 +#define MC_CMD_MAC_CTRL_IN_ADDR_HI_LBN 96 +#define MC_CMD_MAC_CTRL_IN_ADDR_HI_WIDTH 32 +/* Includes the ethernet header, optional VLAN tags, payload and FCS. */ +#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_OFST 16 +#define MC_CMD_MAC_CTRL_IN_MAX_FRAME_LEN_LEN 4 +/* Settings for flow control. */ +#define MC_CMD_MAC_CTRL_IN_FCNTL_OFST 20 +#define MC_CMD_MAC_CTRL_IN_FCNTL_LEN 4 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_FCNTL/MASK */ +/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast + * mode. + */ +#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_OFST 24 +#define MC_CMD_MAC_CTRL_IN_TRANSMISSION_MODE_LEN 4 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_TRANSMISSION_MODE/MASK */ +/* Flags to control and expand the configuration of the MAC. */ +#define MC_CMD_MAC_CTRL_IN_FLAGS_OFST 28 +#define MC_CMD_MAC_CTRL_IN_FLAGS_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_MAC_FLAGS/MASK */ + +/* MC_CMD_MAC_CTRL_IN_V2 msgrequest: Updated MAC_CTRL with QBB mask */ +#define MC_CMD_MAC_CTRL_IN_V2_LEN 33 +/* Handle for selected network port. */ +#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_OFST 0 +#define MC_CMD_MAC_CTRL_IN_V2_PORT_HANDLE_LEN 4 +/* Select which parameters to configure. A parameter will only be modified if + * the corresponding control flag is set. + */ +#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_OFST 4 +#define MC_CMD_MAC_CTRL_IN_V2_CONTROL_FLAGS_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */ +/* MAC address of the device. */ +#define MC_CMD_MAC_CTRL_IN_V2_ADDR_OFST 8 +#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LEN 8 +#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_OFST 8 +#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LEN 4 +#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_LBN 64 +#define MC_CMD_MAC_CTRL_IN_V2_ADDR_LO_WIDTH 32 +#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_OFST 12 +#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LEN 4 +#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_LBN 96 +#define MC_CMD_MAC_CTRL_IN_V2_ADDR_HI_WIDTH 32 +/* Includes the ethernet header, optional VLAN tags, payload and FCS. */ +#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_OFST 16 +#define MC_CMD_MAC_CTRL_IN_V2_MAX_FRAME_LEN_LEN 4 +/* Settings for flow control. */ +#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_OFST 20 +#define MC_CMD_MAC_CTRL_IN_V2_FCNTL_LEN 4 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_FCNTL/MASK */ +/* Configure the MAC to transmit in one of promiscuous, unicast or broadcast + * mode. + */ +#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_OFST 24 +#define MC_CMD_MAC_CTRL_IN_V2_TRANSMISSION_MODE_LEN 4 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_TRANSMISSION_MODE/MASK */ +/* Flags to control and expand the configuration of the MAC. */ +#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_OFST 28 +#define MC_CMD_MAC_CTRL_IN_V2_FLAGS_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_MAC_FLAGS/MASK */ +/* Priority-based flow control mask (QBB). PRIO7 corresponds to the highest + * priority, and PRIO0 to the lowest. This field is only used when CFG_FCNTL is + * set and FCNTL is QBB + */ +#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_OFST 32 +#define MC_CMD_MAC_CTRL_IN_V2_PRIO_FCNTL_MASK_LEN 1 +/* enum property: bitmask */ +#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO0 0x0 /* enum */ +#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO1 0x1 /* enum */ +#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO2 0x2 /* enum */ +#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO3 0x3 /* enum */ +#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO4 0x4 /* enum */ +#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO5 0x5 /* enum */ +#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO6 0x6 /* enum */ +#define MC_CMD_MAC_CTRL_IN_V2_QBB_PRIO7 0x7 /* enum */ + +/* MC_CMD_MAC_CTRL_OUT msgresponse */ +#define MC_CMD_MAC_CTRL_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_MAC_STATE + * Read the MAC state. Return code: 0, ETIME. + */ +#define MC_CMD_MAC_STATE 0x1e0 +#undef MC_CMD_0x1e0_PRIVILEGE_CTG + +#define MC_CMD_0x1e0_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_MAC_STATE_IN msgrequest */ +#define MC_CMD_MAC_STATE_IN_LEN 4 +/* Handle for selected network port. */ +#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_MAC_STATE_IN_PORT_HANDLE_LEN 4 + +/* MC_CMD_MAC_STATE_OUT msgresponse */ +#define MC_CMD_MAC_STATE_OUT_LEN 32 +/* The configured maximum frame length of the MAC. */ +#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_OFST 0 +#define MC_CMD_MAC_STATE_OUT_MAX_FRAME_LEN_LEN 4 +/* This returns the negotiated flow control value. */ +#define MC_CMD_MAC_STATE_OUT_FCNTL_OFST 4 +#define MC_CMD_MAC_STATE_OUT_FCNTL_LEN 4 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_FCNTL/MASK */ +/* MAC address of the device. */ +#define MC_CMD_MAC_STATE_OUT_ADDR_OFST 8 +#define MC_CMD_MAC_STATE_OUT_ADDR_LEN 8 +#define MC_CMD_MAC_STATE_OUT_ADDR_LO_OFST 8 +#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LEN 4 +#define MC_CMD_MAC_STATE_OUT_ADDR_LO_LBN 64 +#define MC_CMD_MAC_STATE_OUT_ADDR_LO_WIDTH 32 +#define MC_CMD_MAC_STATE_OUT_ADDR_HI_OFST 12 +#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LEN 4 +#define MC_CMD_MAC_STATE_OUT_ADDR_HI_LBN 96 +#define MC_CMD_MAC_STATE_OUT_ADDR_HI_WIDTH 32 +/* Flags indicating MAC faults. */ +#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_OFST 16 +#define MC_CMD_MAC_STATE_OUT_MAC_FAULT_FLAGS_LEN 4 +/* enum property: bitshift */ +/* enum: Indicates a local MAC fault. */ +#define MC_CMD_MAC_STATE_OUT_LOCAL 0x0 +/* enum: Indicates a remote MAC fault. */ +#define MC_CMD_MAC_STATE_OUT_REMOTE 0x1 +/* enum: Indicates a pending reconfiguration of the MAC. */ +#define MC_CMD_MAC_STATE_OUT_PENDING_RECONFIG 0x2 +/* The flags that were used to configure the MAC. This is a copy of the FLAGS + * field in the MC_CMD_MAC_CTRL_IN command. + */ +#define MC_CMD_MAC_STATE_OUT_FLAGS_OFST 20 +#define MC_CMD_MAC_STATE_OUT_FLAGS_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_MAC_FLAGS/MASK */ +/* The transmission mode that was used to configure the MAC. This is a copy of + * the TRANSMISSION_MODE field in the MC_CMD_MAC_CTRL_IN command. + */ +#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_OFST 24 +#define MC_CMD_MAC_STATE_OUT_TRANSMISSION_MODE_LEN 4 +/* enum property: value */ +/* Enum values, see field(s): */ +/* MC_CMD_TRANSMISSION_MODE/MASK */ +/* The control flags that were used to configure the MAC. This is a copy of the + * CONTROL field in the MC_CMD_MAC_CTRL_IN command. + */ +#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_OFST 28 +#define MC_CMD_MAC_STATE_OUT_CONTROL_FLAGS_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_MAC_CONFIG_OPTIONS/MASK */ + + +/***********************************/ +/* MC_CMD_GET_ASSIGNED_PORT_HANDLE + * Obtain a handle that can be operated on to configure and query the status of + * the link. ENOENT is returned when no port is assigned to the client. Return + * code: 0, ENOENT + */ +#define MC_CMD_GET_ASSIGNED_PORT_HANDLE 0x1e2 +#undef MC_CMD_0x1e2_PRIVILEGE_CTG + +#define MC_CMD_0x1e2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN msgrequest */ +#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_IN_LEN 0 + +/* MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT msgresponse */ +#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_LEN 4 +/* Handle for assigned port. */ +#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_OFST 0 +#define MC_CMD_GET_ASSIGNED_PORT_HANDLE_OUT_PORT_HANDLE_LEN 4 + +/* MC_CMD_STAT_ID structuredef */ +#define MC_CMD_STAT_ID_LEN 4 +#define MC_CMD_STAT_ID_SOURCE_ID_OFST 0 +#define MC_CMD_STAT_ID_SOURCE_ID_LEN 2 +/* enum property: index */ +/* enum: Internal markers (generation start and end markers) */ +#define MC_CMD_STAT_ID_MARKER 0x1 +/* enum: Network port MAC statistics. */ +#define MC_CMD_STAT_ID_MAC 0x2 +/* enum: Network port PHY statistics. */ +#define MC_CMD_STAT_ID_PHY 0x3 +#define MC_CMD_STAT_ID_SOURCE_ID_LBN 0 +#define MC_CMD_STAT_ID_SOURCE_ID_WIDTH 16 +#define MC_CMD_STAT_ID_MARKER_STAT_ID_OFST 2 +#define MC_CMD_STAT_ID_MARKER_STAT_ID_LEN 2 +/* enum property: index */ +/* enum: This value is used to mark the start of a generation of statistics for + * DMA synchronization. It is incremented whenever a new set of statistics is + * transferred. Always the first entry in the DMA buffer. + */ +#define MC_CMD_STAT_ID_GENERATION_START 0x1 +/* enum: This value is used to mark the end of a generation of statistics for + * DMA synchronizaion. Always the last entry in the DMA buffer and set to the + * same value as GENERATION_START. The host driver must compare the + * GENERATION_START and GENERATION_END values to verify that the DMA buffer is + * consistent upon copying the the DMA buffer. If they do not match, it means + * that new DMA transfer has started while the host driver was copying the DMA + * buffer. In this case, the host driver must repeat the copy operation. + */ +#define MC_CMD_STAT_ID_GENERATION_END 0x2 +#define MC_CMD_STAT_ID_MARKER_STAT_ID_LBN 16 +#define MC_CMD_STAT_ID_MARKER_STAT_ID_WIDTH 16 +#define MC_CMD_STAT_ID_MAC_STAT_ID_OFST 2 +#define MC_CMD_STAT_ID_MAC_STAT_ID_LEN 2 +/* enum property: index */ +/* enum: Total number of packets transmitted (includes pause frames). */ +#define MC_CMD_STAT_ID_TX_PKTS 0x1 +/* enum: Pause frames transmitted. */ +#define MC_CMD_STAT_ID_TX_PAUSE_PKTS 0x2 +/* enum: Control frames transmitted. */ +#define MC_CMD_STAT_ID_TX_CONTROL_PKTS 0x3 +/* enum: Unicast packets transmitted (includes pause frames). */ +#define MC_CMD_STAT_ID_TX_UNICAST_PKTS 0x4 +/* enum: Multicast packets transmitted (includes pause frames). */ +#define MC_CMD_STAT_ID_TX_MULTICAST_PKTS 0x5 +/* enum: Broadcast packets transmitted (includes pause frames). */ +#define MC_CMD_STAT_ID_TX_BROADCAST_PKTS 0x6 +/* enum: Bytes transmitted (includes pause frames). */ +#define MC_CMD_STAT_ID_TX_BYTES 0x7 +/* enum: Bytes transmitted with bad CRC. */ +#define MC_CMD_STAT_ID_TX_BAD_BYTES 0x8 +/* enum: Bytes transmitted with good CRC. */ +#define MC_CMD_STAT_ID_TX_GOOD_BYTES 0x9 +/* enum: Packets transmitted with length less than 64 bytes. */ +#define MC_CMD_STAT_ID_TX_LT64_PKTS 0xa +/* enum: Packets transmitted with length equal to 64 bytes. */ +#define MC_CMD_STAT_ID_TX_64_PKTS 0xb +/* enum: Packets transmitted with length between 65 and 127 bytes. */ +#define MC_CMD_STAT_ID_TX_65_TO_127_PKTS 0xc +/* enum: Packets transmitted with length between 128 and 255 bytes. */ +#define MC_CMD_STAT_ID_TX_128_TO_255_PKTS 0xd +/* enum: Packets transmitted with length between 256 and 511 bytes. */ +#define MC_CMD_STAT_ID_TX_256_TO_511_PKTS 0xe +/* enum: Packets transmitted with length between 512 and 1023 bytes. */ +#define MC_CMD_STAT_ID_TX_512_TO_1023_PKTS 0xf +/* enum: Packets transmitted with length between 1024 and 1518 bytes. */ +#define MC_CMD_STAT_ID_TX_1024_TO_15XX_PKTS 0x10 +/* enum: Packets transmitted with length between 1519 and 9216 bytes. */ +#define MC_CMD_STAT_ID_TX_15XX_TO_JUMBO_PKTS 0x11 +/* enum: Packets transmitted with length greater than 9216 bytes. */ +#define MC_CMD_STAT_ID_TX_GTJUMBO_PKTS 0x12 +/* enum: Packets transmitted with bad FCS. */ +#define MC_CMD_STAT_ID_TX_BAD_FCS_PKTS 0x13 +/* enum: Packets transmitted with good FCS. */ +#define MC_CMD_STAT_ID_TX_GOOD_FCS_PKTS 0x14 +/* enum: Packets received. */ +#define MC_CMD_STAT_ID_RX_PKTS 0x15 +/* enum: Pause frames received. */ +#define MC_CMD_STAT_ID_RX_PAUSE_PKTS 0x16 +/* enum: Total number of good packets received. */ +#define MC_CMD_STAT_ID_RX_GOOD_PKTS 0x17 +/* enum: Total number of BAD packets received. */ +#define MC_CMD_STAT_ID_RX_BAD_PKTS 0x18 +/* enum: Total number of control frames received. */ +#define MC_CMD_STAT_ID_RX_CONTROL_PKTS 0x19 +/* enum: Total number of unicast packets received. */ +#define MC_CMD_STAT_ID_RX_UNICAST_PKTS 0x1a +/* enum: Total number of multicast packets received. */ +#define MC_CMD_STAT_ID_RX_MULTICAST_PKTS 0x1b +/* enum: Total number of broadcast packets received. */ +#define MC_CMD_STAT_ID_RX_BROADCAST_PKTS 0x1c +/* enum: Total number of bytes received. */ +#define MC_CMD_STAT_ID_RX_BYTES 0x1d +/* enum: Total number of bytes received with bad CRC. */ +#define MC_CMD_STAT_ID_RX_BAD_BYTES 0x1e +/* enum: Total number of bytes received with GOOD CRC. */ +#define MC_CMD_STAT_ID_RX_GOOD_BYTES 0x1f +/* enum: Packets received with length equal to 64 bytes. */ +#define MC_CMD_STAT_ID_RX_64_PKTS 0x20 +/* enum: Packets received with length between 65 and 127 bytes. */ +#define MC_CMD_STAT_ID_RX_65_TO_127_PKTS 0x21 +/* enum: Packets received with length between 128 and 255 bytes. */ +#define MC_CMD_STAT_ID_RX_128_TO_255_PKTS 0x22 +/* enum: Packets received with length between 256 and 511 bytes. */ +#define MC_CMD_STAT_ID_RX_256_TO_511_PKTS 0x23 +/* enum: Packets received with length between 512 and 1023 bytes. */ +#define MC_CMD_STAT_ID_RX_512_TO_1023_PKTS 0x24 +/* enum: Packets received with length between 1024 and 1518 bytes. */ +#define MC_CMD_STAT_ID_RX_1024_TO_15XX_PKTS 0x25 +/* enum: Packets received with length between 1519 and 9216 bytes. */ +#define MC_CMD_STAT_ID_RX_15XX_TO_JUMBO_PKTS 0x26 +/* enum: Packets received with length greater than 9216 bytes. */ +#define MC_CMD_STAT_ID_RX_GTJUMBO_PKTS 0x27 +/* enum: Packets received with length less than 64 bytes. */ +#define MC_CMD_STAT_ID_RX_UNDERSIZE_PKTS 0x28 +/* enum: Packets received with bad FCS. */ +#define MC_CMD_STAT_ID_RX_BAD_FCS_PKTS 0x29 +/* enum: Packets received with GOOD FCS. */ +#define MC_CMD_STAT_ID_RX_GOOD_FCS_PKTS 0x2a +/* enum: Packets received with overflow. */ +#define MC_CMD_STAT_ID_RX_OVERFLOW_PKTS 0x2b +/* enum: Packets received with symbol error. */ +#define MC_CMD_STAT_ID_RX_SYMBOL_ERROR_PKTS 0x2c +/* enum: Packets received with alignment error. */ +#define MC_CMD_STAT_ID_RX_ALIGN_ERROR_PKTS 0x2d +/* enum: Packets received with length error. */ +#define MC_CMD_STAT_ID_RX_LENGTH_ERROR_PKTS 0x2e +/* enum: Packets received with internal error. */ +#define MC_CMD_STAT_ID_RX_INTERNAL_ERROR_PKTS 0x2f +/* enum: Packets received with jabber. These packets are larger than the + * allowed maximum receive unit (MRU). This indicates that a packet either has + * a bad CRC or has an RX error. + */ +#define MC_CMD_STAT_ID_RX_JABBER_PKTS 0x30 +/* enum: Packets dropped due to having no descriptor. This is a datapath stat + */ +#define MC_CMD_STAT_ID_RX_NODESC_DROPS 0x31 +/* enum: Packets received with lanes 0 and 1 character error. */ +#define MC_CMD_STAT_ID_RX_LANES01_CHAR_ERR 0x32 +/* enum: Packets received with lanes 2 and 3 character error. */ +#define MC_CMD_STAT_ID_RX_LANES23_CHAR_ERR 0x33 +/* enum: Packets received with lanes 0 and 1 disparity error. */ +#define MC_CMD_STAT_ID_RX_LANES01_DISP_ERR 0x34 +/* enum: Packets received with lanes 2 and 3 disparity error. */ +#define MC_CMD_STAT_ID_RX_LANES23_DISP_ERR 0x35 +/* enum: Packets received with match fault. */ +#define MC_CMD_STAT_ID_RX_MATCH_FAULT 0x36 +#define MC_CMD_STAT_ID_MAC_STAT_ID_LBN 16 +#define MC_CMD_STAT_ID_MAC_STAT_ID_WIDTH 16 +/* Include FEC stats. */ +#define MC_CMD_STAT_ID_PHY_STAT_ID_OFST 2 +#define MC_CMD_STAT_ID_PHY_STAT_ID_LEN 2 +/* enum property: index */ +/* enum: Number of uncorrected FEC codewords on link (RS-FEC only from Medford2 + * onwards) + */ +#define MC_CMD_STAT_ID_FEC_UNCORRECTED_ERRORS 0x1 +/* enum: Number of corrected FEC codewords on link (RS-FEC only from Medford2 + * onwards) + */ +#define MC_CMD_STAT_ID_FEC_CORRECTED_ERRORS 0x2 +/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */ +#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE0 0x3 +/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */ +#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE1 0x4 +/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */ +#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE2 0x5 +/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */ +#define MC_CMD_STAT_ID_FEC_CORRECTED_SYMBOLS_LANE3 0x6 +#define MC_CMD_STAT_ID_PHY_STAT_ID_LBN 16 +#define MC_CMD_STAT_ID_PHY_STAT_ID_WIDTH 16 + +/* MC_CMD_STAT_DESC structuredef: Structure describing the layout and size of + * the stats DMA buffer descriptor. + */ +#define MC_CMD_STAT_DESC_LEN 8 +/* Unique identifier of the statistic. Formatted as MC_CMD_STAT_ID */ +#define MC_CMD_STAT_DESC_STAT_ID_OFST 0 +#define MC_CMD_STAT_DESC_STAT_ID_LEN 4 +#define MC_CMD_STAT_DESC_STAT_ID_LBN 0 +#define MC_CMD_STAT_DESC_STAT_ID_WIDTH 32 +/* See structuredef: MC_CMD_STAT_ID */ +#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_OFST 0 +#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LEN 2 +#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_LBN 0 +#define MC_CMD_STAT_DESC_STAT_ID_SOURCE_ID_WIDTH 16 +#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_OFST 2 +#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LEN 2 +#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_LBN 16 +#define MC_CMD_STAT_DESC_STAT_ID_MARKER_STAT_ID_WIDTH 16 +#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_OFST 2 +#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LEN 2 +#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_LBN 16 +#define MC_CMD_STAT_DESC_STAT_ID_MAC_STAT_ID_WIDTH 16 +#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_OFST 2 +#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LEN 2 +#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_LBN 16 +#define MC_CMD_STAT_DESC_STAT_ID_PHY_STAT_ID_WIDTH 16 +/* Index of the statistic in the DMA buffer. */ +#define MC_CMD_STAT_DESC_STAT_INDEX_OFST 4 +#define MC_CMD_STAT_DESC_STAT_INDEX_LEN 2 +#define MC_CMD_STAT_DESC_STAT_INDEX_LBN 32 +#define MC_CMD_STAT_DESC_STAT_INDEX_WIDTH 16 +/* Reserved for future extension (e.g. flags field) - currently always 0. */ +#define MC_CMD_STAT_DESC_RESERVED_OFST 6 +#define MC_CMD_STAT_DESC_RESERVED_LEN 2 +#define MC_CMD_STAT_DESC_RESERVED_LBN 48 +#define MC_CMD_STAT_DESC_RESERVED_WIDTH 16 + + +/***********************************/ +/* MC_CMD_MAC_STATISTICS_DESCRIPTOR + * Get a list of descriptors that describe the layout and size of the stats + * buffer required for retrieving statistics for a given port. Each entry in + * the list is formatted as MC_CMD_STAT_DESC and provides the ID of each stat + * and its location and size in the buffer. It also gives the overall minimum + * size of the DMA buffer required when DMA mode is used. Note that the first + * and last entries in the list are reserved for the generation start + * (MC_CMD_MARKER_STAT_GENERATION_START) and end + * (MC_CMD_MARKER_STAT_GENERATION_END) markers respectively, to be used for DMA + * synchronisation as described in the documentation for the relevant enum + * entries. The entries are present in the buffer even if DMA mode is not used. + * Provisions are made (but currently unused) for extending the size of the + * descriptors, extending the size of the list beyond the maximum MCDI response + * size, as well as the dynamic runtime updates of the list. Returns: 0 on + * success, ENOENT on non-existent port handle + */ +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR 0x1e3 +#undef MC_CMD_0x1e3_PRIVILEGE_CTG + +#define MC_CMD_0x1e3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN msgrequest */ +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_LEN 8 +/* Handle of port to get MAC statitstics descriptors for. */ +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_PORT_HANDLE_LEN 4 +/* Offset of the first entry to return, for cases where not all entries fit in + * the MCDI response. Should be set to 0 on initial request, and on subsequent + * requests updated by the number of entries already returned, as long as the + * MORE_ENTRIES flag is set. + */ +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_OFST 4 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_IN_OFFSET_LEN 4 + +/* MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT msgresponse */ +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMIN 28 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX 252 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_LEN(num) (20+8*(num)) +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_NUM(len) (((len)-20)/8) +/* Generation number of the stats buffer. This is incremented each time the + * buffer is updated, and is used to verify the consistency of the buffer + * contents. Reserved for future extension (dynamic list updates). Currently + * always set to 0. + */ +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_OFST 0 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_GENERATION_LEN 4 +/* Minimum size of the DMA buffer required to retrieve all statistics for the + * port. This is the sum of the sizes of all the statistics, plus the size of + * the generation markers. Minimum buffer size in bytes required to fit all + * statistics. Drivers will typically round up this value to the granularity of + * the host DMA allocation units. + */ +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_OFST 4 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_DMA_BUFFER_SIZE_LEN 4 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_OFST 8 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_FLAGS_LEN 4 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_OFST 8 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_LBN 0 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_MORE_ENTRIES_WIDTH 1 +/* Size of the individual descriptor entry in the list. Determines the entry + * stride in the list. Currently always set to size of MC_CMD_STAT_DESC, larger + * values can be used in the future for extending the descriptor, by appending + * new data to the end of the existing structure. + */ +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_OFST 12 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_SIZE_LEN 4 +/* Number of entries returned in the descriptor list. */ +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_OFST 16 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRY_COUNT_LEN 4 +/* List of descriptors. Each entry is formatted as MC_CMD_STAT_DESC and + * provides the ID of each stat and its location and size in the buffer. The + * first and last entries are reserved for the generation start and end markers + * respectively. + */ +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_OFST 20 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LEN 8 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_OFST 20 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LEN 4 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_LBN 160 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_LO_WIDTH 32 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_OFST 24 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LEN 4 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_LBN 192 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_HI_WIDTH 32 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MINNUM 1 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM 29 +#define MC_CMD_MAC_STATISTICS_DESCRIPTOR_OUT_ENTRIES_MAXNUM_MCDI2 125 + + +/***********************************/ +/* MC_CMD_MAC_STATISTICS + * Get generic MAC statistics. This call retrieves unified statistics managed + * by the MC. The MC will populate and provide all supported statistics in the + * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the + * aforementioned command for the format and contents of the stats DMA buffer. + * To ensure consistent and accurate results, it is essential for the driver to + * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on + * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port + * handle, and EINVAL on invalid parameters (DMA buffer too small) + */ +#define MC_CMD_MAC_STATISTICS 0x1e4 +#undef MC_CMD_0x1e4_PRIVILEGE_CTG + +#define MC_CMD_0x1e4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_MAC_STATISTICS_IN msgrequest */ +#define MC_CMD_MAC_STATISTICS_IN_LEN 20 +/* Handle of port to get MAC statistics for. */ +#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_MAC_STATISTICS_IN_PORT_HANDLE_LEN 4 +/* Contains options for querying the MAC statistics. */ +#define MC_CMD_MAC_STATISTICS_IN_CMD_OFST 4 +#define MC_CMD_MAC_STATISTICS_IN_CMD_LEN 4 +#define MC_CMD_MAC_STATISTICS_IN_DMA_OFST 4 +#define MC_CMD_MAC_STATISTICS_IN_DMA_LBN 0 +#define MC_CMD_MAC_STATISTICS_IN_DMA_WIDTH 1 +#define MC_CMD_MAC_STATISTICS_IN_CLEAR_OFST 4 +#define MC_CMD_MAC_STATISTICS_IN_CLEAR_LBN 1 +#define MC_CMD_MAC_STATISTICS_IN_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_OFST 4 +#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_OFST 4 +#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4 +#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4 +#define MC_CMD_MAC_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_OFST 4 +#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_LBN 16 +#define MC_CMD_MAC_STATISTICS_IN_PERIOD_MS_WIDTH 16 +/* This is the address of the DMA buffer to use for transfer of the statistics. + * Only valid if the DMA flag is set to 1. + */ +#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_OFST 8 +#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_OFST 8 +#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_LBN 64 +#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_OFST 12 +#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_LBN 96 +#define MC_CMD_MAC_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32 +/* This is the length of the DMA buffer to use for the transfer of the + * statistics. The buffer should be at least DMA_BUFFER_SIZE long, as returned + * by MC_CMD_MAC_STATISTICS_DESCRIPTOR. If the supplied buffer is too small, + * the command will fail with EINVAL. Only valid if the DMA flag is set to 1. + */ +#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_OFST 16 +#define MC_CMD_MAC_STATISTICS_IN_DMA_LEN_LEN 4 + +/* MC_CMD_MAC_STATISTICS_OUT msgresponse */ +#define MC_CMD_MAC_STATISTICS_OUT_LENMIN 5 +#define MC_CMD_MAC_STATISTICS_OUT_LENMAX 252 +#define MC_CMD_MAC_STATISTICS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_MAC_STATISTICS_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_MAC_STATISTICS_OUT_DATA_NUM(len) (((len)-4)/1) +/* length of the data in bytes */ +#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_OFST 0 +#define MC_CMD_MAC_STATISTICS_OUT_DATALEN_LEN 4 +#define MC_CMD_MAC_STATISTICS_OUT_DATA_OFST 4 +#define MC_CMD_MAC_STATISTICS_OUT_DATA_LEN 1 +#define MC_CMD_MAC_STATISTICS_OUT_DATA_MINNUM 1 +#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM 248 +#define MC_CMD_MAC_STATISTICS_OUT_DATA_MAXNUM_MCDI2 1016 + +/* NET_PORT_HANDLE_DESC structuredef: Network port descriptor containing a port + * handle and attributes used, for example, in MC_CMD_ENUM_PORTS. + */ +#define NET_PORT_HANDLE_DESC_LEN 53 +/* The handle to identify the port */ +#define NET_PORT_HANDLE_DESC_PORT_HANDLE_OFST 0 +#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LEN 4 +#define NET_PORT_HANDLE_DESC_PORT_HANDLE_LBN 0 +#define NET_PORT_HANDLE_DESC_PORT_HANDLE_WIDTH 32 +/* Includes the type of port e.g. physical, virtual or MAE MPORT and other + * properties relevant to the port. + */ +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_OFST 4 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LEN 8 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_OFST 4 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LEN 4 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_LBN 32 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LO_WIDTH 32 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_OFST 8 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LEN 4 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_LBN 64 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_HI_WIDTH 32 +#define NET_PORT_HANDLE_DESC_PORT_TYPE_OFST 4 +#define NET_PORT_HANDLE_DESC_PORT_TYPE_LBN 0 +#define NET_PORT_HANDLE_DESC_PORT_TYPE_WIDTH 3 +#define NET_PORT_HANDLE_DESC_PHYSICAL 0x0 /* enum */ +#define NET_PORT_HANDLE_DESC_VIRTUAL 0x1 /* enum */ +#define NET_PORT_HANDLE_DESC_MPORT 0x2 /* enum */ +#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_OFST 4 +#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_LBN 8 +#define NET_PORT_HANDLE_DESC_IS_ZOMBIE_WIDTH 1 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_LBN 32 +#define NET_PORT_HANDLE_DESC_PORT_PROPERTIES_WIDTH 64 +/* The dynamic change that led to the port enumeration */ +#define NET_PORT_HANDLE_DESC_ENTRY_SRC_OFST 12 +#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LEN 1 +/* enum: Indicates that the ENTRY_SRC field has not been initialized. */ +#define NET_PORT_HANDLE_DESC_UNKNOWN 0x0 +/* enum: The port was enumerated at start of day. */ +#define NET_PORT_HANDLE_DESC_PRESENT 0x1 +/* enum: The port was dynamically added. */ +#define NET_PORT_HANDLE_DESC_ADDED 0x2 +/* enum: The port was dynamically deleted. */ +#define NET_PORT_HANDLE_DESC_DELETED 0x3 +#define NET_PORT_HANDLE_DESC_ENTRY_SRC_LBN 96 +#define NET_PORT_HANDLE_DESC_ENTRY_SRC_WIDTH 8 +/* This is an opaque 40 byte label exposed to users as a unique identifier of + * the port. It is represented as a zero-terminated ASCII string and assigned + * by the port administrator which is typically either the firmware for a + * physical port or the host software responsible for creating the virtual + * port. The label is conveyed to the driver after assignment, which, unlike + * the port administrator, does not need to know how to interpret the label. + */ +#define NET_PORT_HANDLE_DESC_PORT_LABEL_OFST 13 +#define NET_PORT_HANDLE_DESC_PORT_LABEL_LEN 40 +#define NET_PORT_HANDLE_DESC_PORT_LABEL_LBN 104 +#define NET_PORT_HANDLE_DESC_PORT_LABEL_WIDTH 320 + + +/***********************************/ +/* MC_CMD_ENUM_PORTS + * This command returns handles for all ports present in the system. The PCIe + * function type of each port (either physical or virtual) is also reported. + * After a start-of-day port enumeration, firmware keeps track of all available + * ports upon creation or deletion and updates the ports if there is a change. + * This command is cleared after a control interface reset (e.g. FLR, + * ENTITY_RESET), in which case it must be called again to reenumerate the + * ports. The command is also clear-on-read and repeated calls will drain the + * buffer. + */ +#define MC_CMD_ENUM_PORTS 0x1e5 +#undef MC_CMD_0x1e5_PRIVILEGE_CTG + +#define MC_CMD_0x1e5_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_ENUM_PORTS_IN msgrequest */ +#define MC_CMD_ENUM_PORTS_IN_LEN 0 + +/* MC_CMD_ENUM_PORTS_OUT msgresponse */ +#define MC_CMD_ENUM_PORTS_OUT_LENMIN 12 +#define MC_CMD_ENUM_PORTS_OUT_LENMAX 252 +#define MC_CMD_ENUM_PORTS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_ENUM_PORTS_OUT_LEN(num) (12+1*(num)) +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_NUM(len) (((len)-12)/1) +/* Any unused flags are reserved and must be ignored. */ +#define MC_CMD_ENUM_PORTS_OUT_FLAGS_OFST 0 +#define MC_CMD_ENUM_PORTS_OUT_FLAGS_LEN 4 +#define MC_CMD_ENUM_PORTS_OUT_MORE_OFST 0 +#define MC_CMD_ENUM_PORTS_OUT_MORE_LBN 0 +#define MC_CMD_ENUM_PORTS_OUT_MORE_WIDTH 1 +/* The number of NET_PORT_HANDLE_DESC structures in PORT_HANDLES. */ +#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_OFST 4 +#define MC_CMD_ENUM_PORTS_OUT_PORT_COUNT_LEN 4 +#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_OFST 8 +#define MC_CMD_ENUM_PORTS_OUT_SIZEOF_NET_PORT_HANDLE_DESC_LEN 4 +/* Array of NET_PORT_HANDLE_DESC structures. Callers must use must use the + * SIZEOF_NET_PORT_HANDLE_DESC field field as the array stride between entries. + * This may also allow for tail padding for alignment. Fields beyond + * SIZEOF_NET_PORT_HANDLE_DESC are not present. + */ +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_OFST 12 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_LEN 1 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MINNUM 0 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM 240 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_MAXNUM_MCDI2 1008 +/* See structuredef: NET_PORT_HANDLE_DESC */ +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_OFST 12 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_HANDLE_LEN 4 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_OFST 16 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LEN 8 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_OFST 16 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LEN 4 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_LBN 128 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_LO_WIDTH 32 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_OFST 20 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LEN 4 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_LBN 160 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_PROPERTIES_HI_WIDTH 32 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_LBN 128 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_TYPE_WIDTH 3 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_LBN 136 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_IS_ZOMBIE_WIDTH 1 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_OFST 24 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_ENTRY_SRC_LEN 1 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_OFST 25 +#define MC_CMD_ENUM_PORTS_OUT_PORT_HANDLES_PORT_LABEL_LEN 40 + + +/***********************************/ +/* MC_CMD_GET_TRANSCEIVER_PROPERTIES + * Read properties of the transceiver associated with the port. Can be either + * for a fixed onboard transceiver or an inserted module. The returned data is + * interpreted from the transceiver hardware and may be fixed up by the + * firmware. Use MC_CMD_GET_MODULE_DATA to get raw undecoded data. + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES 0x1e6 +#undef MC_CMD_0x1e6_PRIVILEGE_CTG + +#define MC_CMD_0x1e6_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN msgrequest */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_LEN 4 +/* Handle to port to get transceiver properties from. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_IN_PORT_HANDLE_LEN 4 + +/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT msgresponse */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LEN 89 +/* Supported technology abilities. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_OFST 0 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_TECH_ABILITIES_MASK_LEN 16 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_ETH_TECH/TECH */ +/* Reserved for future expansion to accommodate future Ethernet technology + * expansion. + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_OFST 16 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_RESERVED_LEN 16 +/* Preferred FEC modes. This is a function of the cable type and length. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_OFST 32 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PREFERRED_FEC_MASK_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* FEC_TYPE/TYPE */ +/* SFF-8042 code reported by the module. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_OFST 36 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CODE_LEN 2 +/* Medium. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_OFST 38 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIUM_LEN 1 +/* enum property: value */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 /* enum */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_COPPER 0x1 /* enum */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_OPTICAL 0x2 /* enum */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BACKPLANE 0x3 /* enum */ +/* Identifies the tech */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_OFST 39 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_MEDIA_SUBTYPE_LEN 1 +/* enum property: value */ +/* MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_UNKNOWN 0x0 */ +/* enum: Ethernet over twisted-pair copper cables for distances up to 100 + * meters. + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASET 0x1 +/* enum: Ethernet over twin-axial, balanced copper cable. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_CR 0x2 +/* enum: Ethernet over backplane for connections on the same board. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KX 0x3 +/* enum: Ethernet over a single backplane lane for connections between + * different boards. + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KR 0x4 +/* enum: Ethernet over copper backplane. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_KP 0x5 +/* enum: Ethernet over fiber optic. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_BASEX 0x6 +/* enum: Short range ethernet over multimode fiber optic (See IEEE 802.3 Clause + * 49 and 52). + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SR 0x7 +/* enum: Long range, extended range or far reach ethernet used with single mode + * fiber optics. + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LR_ER_FR 0x8 +/* enum: Long reach multimode ethernet over multimode optical fiber. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_LRM 0x9 +/* enum: Very short reach PAM4 ethernet over multimode optical fiber (see IEEE + * 802.3db). + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VR 0xa +/* enum: BASE-R encoding and PAM4 over single-mode fiber with reach up to at + * least 500 meters (803.2 Clause 121 and 124) + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_DR 0xb +/* String of the vendor name as intepreted by NMC firmware. NMC firmware + * applies workarounds for known buggy transceivers. The vendor name is + * presented as 16 bytes of ASCII characters padded with spaces. It can also be + * represented as 16 bytes of zeros if the field is unspecified for the + * connected module. See SFF-8472/CMIS specifications for details. + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_OFST 40 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_LEN 1 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_NAME_NUM 16 +/* The vendor part number as intepreted by NMC firmware. The field is presented + * as 16 bytes of ASCII chars padded with spaces. It can also be 16 bytes of + * zeros if the field is unspecified for the connected module. + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_OFST 56 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_LEN 1 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_VENDOR_PN_NUM 16 +/* Serial number of the module presented as 16 bytes of ASCII characters padded + * with spaces. It can also be 16 bytes of zeros if the field is unspecified + * for the connected module. See SFF-8472/CMIS specifications for details. + */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_OFST 72 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_LEN 1 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_SERIAL_NUMBER_NUM 16 +/* This reports the number of module changes detected by the NMC firmware. */ +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_OFST 88 +#define MC_CMD_GET_TRANSCEIVER_PROPERTIES_OUT_PORT_MODULECHANGE_SEQ_NUM_LEN 1 + + +/***********************************/ +/* MC_CMD_GET_FIXED_PORT_PROPERTIES + */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES 0x1e7 +#undef MC_CMD_0x1e7_PRIVILEGE_CTG + +#define MC_CMD_0x1e7_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_GET_FIXED_PORT_PROPERTIES_IN msgrequest: In this context, the port + * consists of the MAC and the PHY, and excludes any modules inserted into the + * cage. This information is fixed for a given board but not for a given ASIC. + * This command reports properties for the port as it is currently configured, + * and not its hardware capabilities, which can be better than the current + * configuration. + */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_LEN 4 +/* Handle to the port to from which to retreive properties */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_IN_PORT_HANDLE_LEN 4 + +/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT msgresponse */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LEN 36 +/* Supported capabilities of the port in its current configuration. */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_OFST 0 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_LEN 25 +/* See structuredef: MC_CMD_ETH_AN_FIELDS */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_OFST 0 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_TECH_MASK_LEN 16 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_OFST 16 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_MASK_LEN 4 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_OFST 20 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_FEC_REQ_LEN 4 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_OFST 24 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_ABILITIES_PAUSE_MASK_LEN 1 +/* Number of lanes supported by the port in its current configuration. */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_OFST 25 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_NUM_LANES_LEN 1 +/* Bitmask of supported loopback modes. Where the response to this command + * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in + * preference to ensure that all available loopback modes are seen. + */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_OFST 26 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_LOOPBACK_MODES_MASK_LEN 1 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_LOOPBACK_V2/MODE */ +/* This field serves as a cage index that uniquely identifies the cage to which + * the module is connected. This is useful when splitter cables that have + * multiple ports on a single cage are used. + */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_OFST 27 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_INDEX_LEN 1 +/* This bitmask is used to specify the lanes within the cage identified by + * MDI_INDEX that are allocated to the port. + */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_OFST 28 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MDI_LANE_MASK_LEN 1 +/* Maximum frame length supported by the port in its current configuration. */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_OFST 32 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_MAX_FRAME_LEN_LEN 4 + +/* MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2 msgresponse */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LEN 48 +/* Supported capabilities of the port in its current configuration. */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_OFST 0 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_ABILITIES_LEN 25 +/* Number of lanes supported by the port in its current configuration. */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_OFST 25 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_NUM_LANES_LEN 1 +/* Bitmask of supported loopback modes. Where the response to this command + * includes the LOOPBACK_MODES_MASK_V2 field, that field should be used in + * preference to ensure that all available loopback modes are seen. + */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_OFST 26 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_LEN 1 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_LOOPBACK_V2/MODE */ +/* This field serves as a cage index that uniquely identifies the cage to which + * the module is connected. This is useful when splitter cables that have + * multiple ports on a single cage are used. + */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_OFST 27 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_INDEX_LEN 1 +/* This bitmask is used to specify the lanes within the cage identified by + * MDI_INDEX that are allocated to the port. + */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_OFST 28 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MDI_LANE_MASK_LEN 1 +/* Maximum frame length supported by the port in its current configuration. */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_OFST 32 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_MAX_FRAME_LEN_LEN 4 +/* Bitmask of supported loopback modes. This field replaces the + * LOOPBACK_MODES_MASK field which is defined under version 1 of this command. + */ +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_OFST 40 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LEN 8 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_OFST 40 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LEN 4 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_LBN 320 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_LO_WIDTH 32 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_OFST 44 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LEN 4 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_LBN 352 +#define MC_CMD_GET_FIXED_PORT_PROPERTIES_OUT_V2_LOOPBACK_MODES_MASK_V2_HI_WIDTH 32 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* MC_CMD_LOOPBACK_V2/MODE */ + + +/***********************************/ +/* MC_CMD_GET_MODULE_DATA + * Read media-specific data from the PHY (e.g. SFP/SFP+ module ID information + * for SFP+ PHYs). This command returns raw data from the module's EEPROM and + * it is not interpreted by the MC. Use MC_CMD_GET_TRANSCEIVER_PROPERTIES to + * get interpreted data. Return code: 0, ENOENT + */ +#define MC_CMD_GET_MODULE_DATA 0x1e8 +#undef MC_CMD_0x1e8_PRIVILEGE_CTG + +#define MC_CMD_0x1e8_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_GET_MODULE_DATA_IN msgrequest */ +#define MC_CMD_GET_MODULE_DATA_IN_LEN 16 +/* Handle to identify the port from which to request module properties. */ +#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_GET_MODULE_DATA_IN_PORT_HANDLE_LEN 4 +/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by + * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier + * access. + */ +#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_LBN 32 +#define MC_CMD_GET_MODULE_DATA_IN_DEVADDR_WIDTH 7 +/* 0 if the page does not support banked access, non-zero otherwise. Non-zero + * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory + * region. + */ +#define MC_CMD_GET_MODULE_DATA_IN_BANK_OFST 6 +#define MC_CMD_GET_MODULE_DATA_IN_BANK_LEN 2 +/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is + * valid if OFFSET is in the range 80h - ffh. + */ +#define MC_CMD_GET_MODULE_DATA_IN_PAGE_OFST 8 +#define MC_CMD_GET_MODULE_DATA_IN_PAGE_LEN 2 +/* Offset in the range 00h - 7fh to access lower memory. Offset in the range + * 80h - ffh to access upper memory + */ +#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_OFST 10 +#define MC_CMD_GET_MODULE_DATA_IN_OFFSET_LEN 1 +#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_OFST 12 +#define MC_CMD_GET_MODULE_DATA_IN_LENGTH_LEN 4 + +/* MC_CMD_GET_MODULE_DATA_IN_V2 msgrequest: Updated MC_CMD_GET_MODULE_DATA with + * 8-bit wide ADDRESSING field. This new field provides a correctly aligned + * container for the 7-bit DEVADDR field from V1, now renamed MODULE_ADDR, to + * ensure proper alignment. + */ +#define MC_CMD_GET_MODULE_DATA_IN_V2_LEN 16 +/* Handle to identify the port from which to request module properties. */ +#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_OFST 0 +#define MC_CMD_GET_MODULE_DATA_IN_V2_PORT_HANDLE_LEN 4 +/* 7 bit I2C address of the device. DEPRECATED: This field is replaced by + * MODULE_ADDR in V2. Use V2 of this command for proper alignment and easier + * access. + */ +#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_LBN 32 +#define MC_CMD_GET_MODULE_DATA_IN_V2_DEVADDR_WIDTH 7 +/* 0 if the page does not support banked access, non-zero otherwise. Non-zero + * BANK is valid if OFFSET is in the range 80h - ffh, i.e. in the Upper Memory + * region. + */ +#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_OFST 6 +#define MC_CMD_GET_MODULE_DATA_IN_V2_BANK_LEN 2 +/* 0 if paged access is not supported, non-zero otherwise. Non-zero PAGE is + * valid if OFFSET is in the range 80h - ffh. + */ +#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_OFST 8 +#define MC_CMD_GET_MODULE_DATA_IN_V2_PAGE_LEN 2 +/* Offset in the range 00h - 7fh to access lower memory. Offset in the range + * 80h - ffh to access upper memory + */ +#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_OFST 10 +#define MC_CMD_GET_MODULE_DATA_IN_V2_OFFSET_LEN 1 +#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_OFST 12 +#define MC_CMD_GET_MODULE_DATA_IN_V2_LENGTH_LEN 4 +/* Container for 7 bit I2C addresses. */ +#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_OFST 4 +#define MC_CMD_GET_MODULE_DATA_IN_V2_ADDRESSING_LEN 1 +#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_OFST 4 +#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_LBN 0 +#define MC_CMD_GET_MODULE_DATA_IN_V2_MODULE_ADDR_WIDTH 7 + +/* MC_CMD_GET_MODULE_DATA_OUT msgresponse */ +#define MC_CMD_GET_MODULE_DATA_OUT_LENMIN 5 +#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX 252 +#define MC_CMD_GET_MODULE_DATA_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_MODULE_DATA_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_GET_MODULE_DATA_OUT_DATA_NUM(len) (((len)-4)/1) +/* length of the data in bytes */ +#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_OFST 0 +#define MC_CMD_GET_MODULE_DATA_OUT_DATALEN_LEN 4 +#define MC_CMD_GET_MODULE_DATA_OUT_DATA_OFST 4 +#define MC_CMD_GET_MODULE_DATA_OUT_DATA_LEN 1 +#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MINNUM 1 +#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM 248 +#define MC_CMD_GET_MODULE_DATA_OUT_DATA_MAXNUM_MCDI2 1016 + +/* EVENT_MASK structuredef */ +#define EVENT_MASK_LEN 4 +#define EVENT_MASK_TYPE_OFST 0 +#define EVENT_MASK_TYPE_LEN 4 +/* enum: PORT_LINKCHANGE event is enabled */ +#define EVENT_MASK_PORT_LINKCHANGE 0x0 +/* enum: PORT_MODULECHANGE event is enabled */ +#define EVENT_MASK_PORT_MODULECHANGE 0x1 +#define EVENT_MASK_TYPE_LBN 0 +#define EVENT_MASK_TYPE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_SET_NETPORT_EVENTS_MASK + */ +#define MC_CMD_SET_NETPORT_EVENTS_MASK 0x1e9 +#undef MC_CMD_0x1e9_PRIVILEGE_CTG + +#define MC_CMD_0x1e9_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_SET_NETPORT_EVENTS_MASK_IN msgrequest: Enable or disable delivery of + * specified network port events for a given port identified by PORT_HANDLE. At + * start of day, or after any control interface reset (FLR, ENTITY_RESET, + * etc.), all event delivery is disabled for all ports associated with the + * control interface. + */ +#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_LEN 8 +/* Handle to port to set event delivery mask. */ +#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4 +/* Bitmask of events to enable. Event delivery is enabled when corresponding + * bit is 1, disabled when 0. + */ +#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_OFST 4 +#define MC_CMD_SET_NETPORT_EVENTS_MASK_IN_EVENT_MASK_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* EVENT_MASK/TYPE */ + +/* MC_CMD_SET_NETPORT_EVENTS_MASK_OUT msgresponse */ +#define MC_CMD_SET_NETPORT_EVENTS_MASK_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_NETPORT_EVENTS_MASK + */ +#define MC_CMD_GET_NETPORT_EVENTS_MASK 0x1ea +#undef MC_CMD_0x1ea_PRIVILEGE_CTG + +#define MC_CMD_0x1ea_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_GET_NETPORT_EVENTS_MASK_IN msgrequest: Get event delivery mask a + * given port identified by PORT_HANDLE. + */ +#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_LEN 4 +/* Handle to port to get event deliver mask for. */ +#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_GET_NETPORT_EVENTS_MASK_IN_PORT_HANDLE_LEN 4 + +/* MC_CMD_GET_NETPORT_EVENTS_MASK_OUT msgresponse */ +#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_LEN 4 +/* Bitmask of events enabled. Event delivery is enabled when corresponding bit + * is 1, disabled when 0. + */ +#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_OFST 0 +#define MC_CMD_GET_NETPORT_EVENTS_MASK_OUT_EVENT_MASK_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* EVENT_MASK/TYPE */ + + /***********************************/ -/* MC_CMD_EVENT_CTRL - * Configure which categories of unsolicited events the driver expects to - * receive (Riverhead). - */ -#define MC_CMD_EVENT_CTRL 0x69 -#undef MC_CMD_0x69_PRIVILEGE_CTG - -#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_EVENT_CTRL_IN msgrequest */ -#define MC_CMD_EVENT_CTRL_IN_LENMIN 0 -#define MC_CMD_EVENT_CTRL_IN_LENMAX 252 -#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020 -#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num)) -#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4) -/* Array of event categories for which the driver wishes to receive events. */ -#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0 -#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4 -#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0 -#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63 -#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255 -/* enum: Driver wishes to receive LINKCHANGE events. */ -#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0 -/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events. - */ -#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1 -/* enum: Driver wishes to receive receive errors. */ -#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2 -/* enum: Driver wishes to receive transmit errors. */ -#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3 -/* enum: Driver wishes to receive firmware alerts. */ -#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4 -/* enum: Driver wishes to receive reboot events. */ -#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5 - -/* MC_CMD_EVENT_CTRL_OUT msgrequest */ -#define MC_CMD_EVENT_CTRL_OUT_LEN 0 +/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS + */ +#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS 0x1eb +#undef MC_CMD_0x1eb_PRIVILEGE_CTG + +#define MC_CMD_0x1eb_PRIVILEGE_CTG SRIOV_CTG_LINK + +/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN msgrequest: Get network port events + * supported by the platform. Information returned is fixed for a given NIC + * platform. + */ +#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_IN_LEN 0 + +/* MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT msgresponse */ +#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_LEN 4 +/* Bitmask of events enabled. Event delivery is enabled when corresponding bit + * is 1, disabled when 0. + */ +#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_OFST 0 +#define MC_CMD_GET_SUPPORTED_NETPORT_EVENTS_OUT_EVENT_MASK_LEN 4 +/* enum property: bitshift */ +/* Enum values, see field(s): */ +/* EVENT_MASK/TYPE */ + + +/***********************************/ +/* MC_CMD_GET_NETPORT_STATISTICS + * Get generic MAC statistics. This call retrieves unified statistics managed + * by the MC. The MC will populate and provide all supported statistics in the + * format as returned by MC_CMD_MAC_STATISTICS_DESCRIPTOR. Refer to the + * aforementioned command for the format and contents of the stats DMA buffer. + * To ensure consistent and accurate results, it is essential for the driver to + * initialize the DMA buffer with zeros when DMA mode is used. Returns: 0 on + * success, ETIME if the DMA buffer is not ready, ENOENT on non-existent port + * handle, and EINVAL on invalid parameters (DMA buffer too small) + */ +#define MC_CMD_GET_NETPORT_STATISTICS 0x1fa +#undef MC_CMD_0x1fa_PRIVILEGE_CTG + +#define MC_CMD_0x1fa_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_NETPORT_STATISTICS_IN msgrequest */ +#define MC_CMD_GET_NETPORT_STATISTICS_IN_LEN 20 +/* Handle of port to get MAC statistics for. */ +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_OFST 0 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PORT_HANDLE_LEN 4 +/* Contains options for querying the MAC statistics. */ +#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_OFST 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_CMD_LEN 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_OFST 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LBN 0 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_WIDTH 1 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_OFST 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_LBN 1 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_CLEAR_WIDTH 1 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_OFST 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_LBN 2 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_OFST 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_LBN 3 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_OFST 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_LBN 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_OFST 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_LBN 15 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_PERIOD_MS_WIDTH 17 +/* Specifies the physical address of the DMA buffer to use for statistics + * transfer. This field must contain a valid address under either of these + * conditions: 1. DMA flag is set (immediate DMA requested) 2. Both + * PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured) + */ +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_OFST 8 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LEN 8 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_OFST 8 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LEN 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_LBN 64 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_LO_WIDTH 32 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_OFST 12 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LEN 4 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_LBN 96 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_ADDR_HI_WIDTH 32 +/* Specifies the length of the DMA buffer in bytes for statistics transfer. The + * buffer size must be at least DMA_BUFFER_SIZE bytes (as returned by + * MC_CMD_MAC_STATISTICS_DESCRIPTOR). Providing an insufficient buffer size + * will result in an EINVAL error. This field must contain a valid length under + * either of these conditions: 1. DMA flag is set (immediate DMA requested) 2. + * Both PERIODIC_CHANGE and PERIODIC_ENABLE are set (periodic DMA configured) + */ +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_OFST 16 +#define MC_CMD_GET_NETPORT_STATISTICS_IN_DMA_LEN_LEN 4 + +/* MC_CMD_GET_NETPORT_STATISTICS_OUT msgresponse */ +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMIN 0 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX 248 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LENMAX_MCDI2 1016 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_LEN(num) (0+8*(num)) +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_NUM(len) (((len)-0)/8) +/* Statistics buffer. Zero-length if DMA mode is used. The statistics buffer is + * an array of 8-byte counter values, containing the generation start marker, + * stats counters, and generation end marker. The index of each counter in the + * array is reported by the MAC_STATISTICS_DESCRIPTOR command. The same layout + * is used for the DMA buffer for DMA mode stats. + */ +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_OFST 0 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LEN 8 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_OFST 0 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LEN 4 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_LBN 0 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_LO_WIDTH 32 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_OFST 4 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LEN 4 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_LBN 32 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_HI_WIDTH 32 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MINNUM 0 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM 31 +#define MC_CMD_GET_NETPORT_STATISTICS_OUT_STATS_MAXNUM_MCDI2 127 /* EVB_PORT_ID structuredef */ #define EVB_PORT_ID_LEN 4 @@ -9706,44 +10573,6 @@ #define EVB_PORT_ID_PORT_ID_LBN 0 #define EVB_PORT_ID_PORT_ID_WIDTH 32 -/* EVB_VLAN_TAG structuredef */ -#define EVB_VLAN_TAG_LEN 2 -/* The VLAN tag value */ -#define EVB_VLAN_TAG_VLAN_ID_LBN 0 -#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12 -#define EVB_VLAN_TAG_MODE_LBN 12 -#define EVB_VLAN_TAG_MODE_WIDTH 4 -/* enum: Insert the VLAN. */ -#define EVB_VLAN_TAG_INSERT 0x0 -/* enum: Replace the VLAN if already present. */ -#define EVB_VLAN_TAG_REPLACE 0x1 - -/* BUFTBL_ENTRY structuredef */ -#define BUFTBL_ENTRY_LEN 12 -/* the owner ID */ -#define BUFTBL_ENTRY_OID_OFST 0 -#define BUFTBL_ENTRY_OID_LEN 2 -#define BUFTBL_ENTRY_OID_LBN 0 -#define BUFTBL_ENTRY_OID_WIDTH 16 -/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */ -#define BUFTBL_ENTRY_PGSZ_OFST 2 -#define BUFTBL_ENTRY_PGSZ_LEN 2 -#define BUFTBL_ENTRY_PGSZ_LBN 16 -#define BUFTBL_ENTRY_PGSZ_WIDTH 16 -/* the raw 64-bit address field from the SMC, not adjusted for page size */ -#define BUFTBL_ENTRY_RAWADDR_OFST 4 -#define BUFTBL_ENTRY_RAWADDR_LEN 8 -#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4 -#define BUFTBL_ENTRY_RAWADDR_LO_LEN 4 -#define BUFTBL_ENTRY_RAWADDR_LO_LBN 32 -#define BUFTBL_ENTRY_RAWADDR_LO_WIDTH 32 -#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8 -#define BUFTBL_ENTRY_RAWADDR_HI_LEN 4 -#define BUFTBL_ENTRY_RAWADDR_HI_LBN 64 -#define BUFTBL_ENTRY_RAWADDR_HI_WIDTH 32 -#define BUFTBL_ENTRY_RAWADDR_LBN 32 -#define BUFTBL_ENTRY_RAWADDR_WIDTH 64 - /* NVRAM_PARTITION_TYPE structuredef */ #define NVRAM_PARTITION_TYPE_LEN 2 #define NVRAM_PARTITION_TYPE_ID_OFST 0 @@ -9787,6 +10616,8 @@ #define NVRAM_PARTITION_TYPE_NMC_LOG 0x700 /* enum: Non-volatile log output of second core on dual-core device */ #define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 +/* enum: RAM (volatile) log output partition */ +#define NVRAM_PARTITION_TYPE_RAM_LOG 0x702 /* enum: Device state dump output partition */ #define NVRAM_PARTITION_TYPE_DUMP 0x800 /* enum: Crash log partition for NMC firmware */ @@ -9923,6 +10754,16 @@ #define NVRAM_PARTITION_TYPE_SUC_SOC_CONFIG 0x1f07 /* enum: System-on-Chip update information. */ #define NVRAM_PARTITION_TYPE_SOC_UPDATE 0x2003 +/* enum: Virtual partition. Write-only. Writes will actually be sent to an + * appropriate partition (for instance BUNDLE if the data starts with the magic + * number for a bundle update), or discarded with an error if not recognised as + * a supported type. + */ +#define NVRAM_PARTITION_TYPE_AUTO 0x2100 +/* enum: MC/NMC (first stage) bootloader firmware. (For X4, see XN-202072-PS + * and XN-202084-SW section 3.1). + */ +#define NVRAM_PARTITION_TYPE_BOOTLOADER 0x2200 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ @@ -9981,116 +10822,6 @@ #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 -/* LICENSED_FEATURES structuredef */ -#define LICENSED_FEATURES_LEN 8 -/* Bitmask of licensed firmware features */ -#define LICENSED_FEATURES_MASK_OFST 0 -#define LICENSED_FEATURES_MASK_LEN 8 -#define LICENSED_FEATURES_MASK_LO_OFST 0 -#define LICENSED_FEATURES_MASK_LO_LEN 4 -#define LICENSED_FEATURES_MASK_LO_LBN 0 -#define LICENSED_FEATURES_MASK_LO_WIDTH 32 -#define LICENSED_FEATURES_MASK_HI_OFST 4 -#define LICENSED_FEATURES_MASK_HI_LEN 4 -#define LICENSED_FEATURES_MASK_HI_LBN 32 -#define LICENSED_FEATURES_MASK_HI_WIDTH 32 -#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0 -#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0 -#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1 -#define LICENSED_FEATURES_PIO_OFST 0 -#define LICENSED_FEATURES_PIO_LBN 1 -#define LICENSED_FEATURES_PIO_WIDTH 1 -#define LICENSED_FEATURES_EVQ_TIMER_OFST 0 -#define LICENSED_FEATURES_EVQ_TIMER_LBN 2 -#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1 -#define LICENSED_FEATURES_CLOCK_OFST 0 -#define LICENSED_FEATURES_CLOCK_LBN 3 -#define LICENSED_FEATURES_CLOCK_WIDTH 1 -#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0 -#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4 -#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1 -#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0 -#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5 -#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1 -#define LICENSED_FEATURES_RX_SNIFF_OFST 0 -#define LICENSED_FEATURES_RX_SNIFF_LBN 6 -#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1 -#define LICENSED_FEATURES_TX_SNIFF_OFST 0 -#define LICENSED_FEATURES_TX_SNIFF_LBN 7 -#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1 -#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0 -#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8 -#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1 -#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0 -#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9 -#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 -#define LICENSED_FEATURES_MASK_LBN 0 -#define LICENSED_FEATURES_MASK_WIDTH 64 - -/* LICENSED_V3_APPS structuredef */ -#define LICENSED_V3_APPS_LEN 8 -/* Bitmask of licensed applications */ -#define LICENSED_V3_APPS_MASK_OFST 0 -#define LICENSED_V3_APPS_MASK_LEN 8 -#define LICENSED_V3_APPS_MASK_LO_OFST 0 -#define LICENSED_V3_APPS_MASK_LO_LEN 4 -#define LICENSED_V3_APPS_MASK_LO_LBN 0 -#define LICENSED_V3_APPS_MASK_LO_WIDTH 32 -#define LICENSED_V3_APPS_MASK_HI_OFST 4 -#define LICENSED_V3_APPS_MASK_HI_LEN 4 -#define LICENSED_V3_APPS_MASK_HI_LBN 32 -#define LICENSED_V3_APPS_MASK_HI_WIDTH 32 -#define LICENSED_V3_APPS_ONLOAD_OFST 0 -#define LICENSED_V3_APPS_ONLOAD_LBN 0 -#define LICENSED_V3_APPS_ONLOAD_WIDTH 1 -#define LICENSED_V3_APPS_PTP_OFST 0 -#define LICENSED_V3_APPS_PTP_LBN 1 -#define LICENSED_V3_APPS_PTP_WIDTH 1 -#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0 -#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2 -#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1 -#define LICENSED_V3_APPS_SOLARSECURE_OFST 0 -#define LICENSED_V3_APPS_SOLARSECURE_LBN 3 -#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1 -#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0 -#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4 -#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1 -#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0 -#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5 -#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1 -#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0 -#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6 -#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1 -#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0 -#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7 -#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1 -#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0 -#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8 -#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1 -#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0 -#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9 -#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1 -#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0 -#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10 -#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1 -#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0 -#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11 -#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1 -#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0 -#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12 -#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1 -#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0 -#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13 -#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1 -#define LICENSED_V3_APPS_DSHBRD_OFST 0 -#define LICENSED_V3_APPS_DSHBRD_LBN 14 -#define LICENSED_V3_APPS_DSHBRD_WIDTH 1 -#define LICENSED_V3_APPS_SCATRD_OFST 0 -#define LICENSED_V3_APPS_SCATRD_LBN 15 -#define LICENSED_V3_APPS_SCATRD_WIDTH 1 -#define LICENSED_V3_APPS_MASK_LBN 0 -#define LICENSED_V3_APPS_MASK_WIDTH 64 - /* LICENSED_V3_FEATURES structuredef */ #define LICENSED_V3_FEATURES_LEN 8 /* Bitmask of licensed firmware features */ @@ -10199,44 +10930,6 @@ #define RSS_MODE_HASH_SELECTOR_LBN 0 #define RSS_MODE_HASH_SELECTOR_WIDTH 8 -/* CTPIO_STATS_MAP structuredef */ -#define CTPIO_STATS_MAP_LEN 4 -/* The (function relative) VI number */ -#define CTPIO_STATS_MAP_VI_OFST 0 -#define CTPIO_STATS_MAP_VI_LEN 2 -#define CTPIO_STATS_MAP_VI_LBN 0 -#define CTPIO_STATS_MAP_VI_WIDTH 16 -/* The target bucket for the VI */ -#define CTPIO_STATS_MAP_BUCKET_OFST 2 -#define CTPIO_STATS_MAP_BUCKET_LEN 2 -#define CTPIO_STATS_MAP_BUCKET_LBN 16 -#define CTPIO_STATS_MAP_BUCKET_WIDTH 16 - - -/***********************************/ -/* MC_CMD_READ_REGS - * Get a dump of the MCPU registers - */ -#define MC_CMD_READ_REGS 0x50 -#undef MC_CMD_0x50_PRIVILEGE_CTG - -#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_READ_REGS_IN msgrequest */ -#define MC_CMD_READ_REGS_IN_LEN 0 - -/* MC_CMD_READ_REGS_OUT msgresponse */ -#define MC_CMD_READ_REGS_OUT_LEN 308 -/* Whether the corresponding register entry contains a valid value */ -#define MC_CMD_READ_REGS_OUT_MASK_OFST 0 -#define MC_CMD_READ_REGS_OUT_MASK_LEN 16 -/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr, - * fir, fp) - */ -#define MC_CMD_READ_REGS_OUT_REGS_OFST 16 -#define MC_CMD_READ_REGS_OUT_REGS_LEN 4 -#define MC_CMD_READ_REGS_OUT_REGS_NUM 73 - /***********************************/ /* MC_CMD_INIT_EVQ @@ -10640,25 +11333,6 @@ #define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3 #define MC_CMD_INIT_EVQ_V3_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1 -/* QUEUE_CRC_MODE structuredef */ -#define QUEUE_CRC_MODE_LEN 1 -#define QUEUE_CRC_MODE_MODE_LBN 0 -#define QUEUE_CRC_MODE_MODE_WIDTH 4 -/* enum: No CRC. */ -#define QUEUE_CRC_MODE_NONE 0x0 -/* enum: CRC Fiber channel over ethernet. */ -#define QUEUE_CRC_MODE_FCOE 0x1 -/* enum: CRC (digest) iSCSI header only. */ -#define QUEUE_CRC_MODE_ISCSI_HDR 0x2 -/* enum: CRC (digest) iSCSI header and payload. */ -#define QUEUE_CRC_MODE_ISCSI 0x3 -/* enum: CRC Fiber channel over IP over ethernet. */ -#define QUEUE_CRC_MODE_FCOIPOE 0x4 -/* enum: CRC MPA. */ -#define QUEUE_CRC_MODE_MPA 0x5 -#define QUEUE_CRC_MODE_SPARE_LBN 4 -#define QUEUE_CRC_MODE_SPARE_WIDTH 4 - /***********************************/ /* MC_CMD_INIT_RXQ @@ -10827,6 +11501,9 @@ #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 #define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4 @@ -10933,6 +11610,9 @@ #define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20 #define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4 @@ -11068,6 +11748,9 @@ #define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16 #define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20 #define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20 #define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4 @@ -11216,6 +11899,9 @@ #define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16 #define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20 #define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_LBN 21 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SUPPRESS_RX_EVENTS_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20 #define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4 @@ -11610,320 +12296,6 @@ /* MC_CMD_PROXY_CMD_OUT msgresponse */ #define MC_CMD_PROXY_CMD_OUT_LEN 0 -/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to - * manage proxied requests - */ -#define MC_PROXY_STATUS_BUFFER_LEN 16 -/* Handle allocated by the firmware for this proxy transaction */ -#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0 -#define MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4 -/* enum: An invalid handle. */ -#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0 -#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0 -#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32 -/* The requesting physical function number */ -#define MC_PROXY_STATUS_BUFFER_PF_OFST 4 -#define MC_PROXY_STATUS_BUFFER_PF_LEN 2 -#define MC_PROXY_STATUS_BUFFER_PF_LBN 32 -#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16 -/* The requesting virtual function number. Set to VF_NULL if the target is a - * PF. - */ -#define MC_PROXY_STATUS_BUFFER_VF_OFST 6 -#define MC_PROXY_STATUS_BUFFER_VF_LEN 2 -#define MC_PROXY_STATUS_BUFFER_VF_LBN 48 -#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16 -/* The target function RID. */ -#define MC_PROXY_STATUS_BUFFER_RID_OFST 8 -#define MC_PROXY_STATUS_BUFFER_RID_LEN 2 -#define MC_PROXY_STATUS_BUFFER_RID_LBN 64 -#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16 -/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */ -#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10 -#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2 -#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80 -#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16 -/* If a request is authorized rather than carried out by the host, this is the - * elevated privilege mask granted to the requesting function. - */ -#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12 -#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4 -#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96 -#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32 - - -/***********************************/ -/* MC_CMD_PROXY_CONFIGURE - * Enable/disable authorization of MCDI requests from unprivileged functions by - * a designated admin function - */ -#define MC_CMD_PROXY_CONFIGURE 0x58 -#undef MC_CMD_0x58_PRIVILEGE_CTG - -#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */ -#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 -#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 -#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0 -#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 -#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 -/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS - * of blocks, each of the size REQUEST_BLOCK_SIZE. - */ -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4 -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8 -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4 -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_LBN 32 -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_WIDTH 32 -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8 -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_LBN 64 -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_WIDTH 32 -/* Must be a power of 2 */ -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12 -#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4 -/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS - * of blocks, each of the size REPLY_BLOCK_SIZE. - */ -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16 -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8 -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16 -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_LBN 128 -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32 -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20 -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_LBN 160 -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32 -/* Must be a power of 2 */ -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24 -#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4 -/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS - * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if - * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. - */ -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28 -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8 -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28 -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_LBN 224 -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_WIDTH 32 -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32 -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_LBN 256 -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_WIDTH 32 -/* Must be a power of 2, or zero if this buffer is not provided */ -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36 -#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4 -/* Applies to all three buffers */ -#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40 -#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4 -/* A bit mask defining which MCDI operations may be proxied */ -#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 -#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 - -/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */ -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1 -/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS - * of blocks, each of the size REQUEST_BLOCK_SIZE. - */ -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_LBN 32 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_WIDTH 32 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_LBN 64 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_WIDTH 32 -/* Must be a power of 2 */ -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4 -/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS - * of blocks, each of the size REPLY_BLOCK_SIZE. - */ -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_LBN 128 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_WIDTH 32 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_LBN 160 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_WIDTH 32 -/* Must be a power of 2 */ -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4 -/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS - * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if - * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. - */ -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_LBN 224 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_WIDTH 32 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LEN 4 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_LBN 256 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_WIDTH 32 -/* Must be a power of 2, or zero if this buffer is not provided */ -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4 -/* Applies to all three buffers */ -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4 -/* A bit mask defining which MCDI operations may be proxied */ -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108 -#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4 - -/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ -#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_PROXY_COMPLETE - * Tells FW that a requested proxy operation has either been completed (by - * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the - * function that enabled proxying/authorization (by using - * MC_CMD_PROXY_CONFIGURE). - */ -#define MC_CMD_PROXY_COMPLETE 0x5f -#undef MC_CMD_0x5f_PRIVILEGE_CTG - -#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_PROXY_COMPLETE_IN msgrequest */ -#define MC_CMD_PROXY_COMPLETE_IN_LEN 12 -#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0 -#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4 -#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4 -#define MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4 -/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply - * is stored in the REPLY_BUFF. - */ -#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0 -/* enum: The operation has been authorized. The originating function may now - * try again. - */ -#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1 -/* enum: The operation has been declined. */ -#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2 -/* enum: The authorization failed because the relevant application did not - * respond in time. - */ -#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3 -#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8 -#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4 - -/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */ -#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_ALLOC_BUFTBL_CHUNK - * Allocate a set of buffer table entries using the specified owner ID. This - * operation allocates the required buffer table entries (and fails if it - * cannot do so). The buffer table entries will initially be zeroed. - */ -#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87 -#undef MC_CMD_0x87_PRIVILEGE_CTG - -#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD - -/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */ -#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8 -/* Owner ID to use */ -#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0 -#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4 -/* Size of buffer table pages to use, in bytes (note that only a few values are - * legal on any specific hardware). - */ -#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4 -#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4 - -/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */ -#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12 -#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0 -#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4 -#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4 -#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4 -/* Buffer table IDs for use in DMA descriptors. */ -#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8 -#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4 - - -/***********************************/ -/* MC_CMD_PROGRAM_BUFTBL_ENTRIES - * Reprogram a set of buffer table entries in the specified chunk. - */ -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88 -#undef MC_CMD_0x88_PRIVILEGE_CTG - -#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD - -/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */ -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8) -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4 -/* ID */ -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 -/* Num entries */ -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 -/* Buffer table entry address */ -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LEN 4 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_LBN 96 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_WIDTH 32 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LEN 4 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_LBN 128 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_WIDTH 32 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32 -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32 - -/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */ -#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_FREE_BUFTBL_CHUNK - */ -#define MC_CMD_FREE_BUFTBL_CHUNK 0x89 -#undef MC_CMD_0x89_PRIVILEGE_CTG - -#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD - -/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */ -#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4 -#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0 -#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4 - -/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */ -#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0 - /***********************************/ /* MC_CMD_FILTER_OP @@ -12822,6 +13194,10 @@ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5 /* enum: read the supported encapsulation types for the VNIC */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_TYPES 0x6 +/* enum: read the supported RX filter matches for low-latency queues (as + * allocated by MC_CMD_ALLOC_LL_QUEUES) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_LL_RX_MATCHES 0x7 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 @@ -12860,6 +13236,48 @@ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 +/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse: + * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO. + * (Medford-only; for use by SolarSecure apps, not directly by drivers. See + * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet + * been used in any released code and may change during development. This note + * will be removed once it is regarded as stable. + */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36 +/* identifies the type of operation requested */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* a version number representing the set of rule lookups that are implemented + * by the currently running firmware + */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_LEN 4 +/* enum: implements lookup sequences described in SF-114946-SW draft C */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0 +/* the number of nodes in the subnet map */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_LEN 4 +/* the number of entries in one subnet map node */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_LEN 4 +/* minimum valid value for a subnet ID in a subnet map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_LEN 4 +/* maximum valid value for a subnet ID in a subnet map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_LEN 4 +/* the number of entries in the local and remote port range maps */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_LEN 4 +/* minimum valid value for a portrange ID in a port range map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_LEN 4 +/* maximum valid value for a portrange ID in a port range map leaf */ +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32 +#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_LEN 4 + /* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is * returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value * OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the @@ -12913,136 +13331,6 @@ #define MC_CMD_GET_PARSER_DISP_SUPPORTED_VNIC_ENCAP_TYPES_OUT_ENCAP_TYPE_L2GRE_WIDTH 1 -/***********************************/ -/* MC_CMD_PARSER_DISP_RW - * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging. - * Please note that this interface is only of use to debug tools which have - * knowledge of firmware and hardware data structures; nothing here is intended - * for use by normal driver code. Note that although this command is in the - * Admin privilege group, in tamperproof adapters, only read operations are - * permitted. - */ -#define MC_CMD_PARSER_DISP_RW 0xe5 -#undef MC_CMD_0xe5_PRIVILEGE_CTG - -#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_PARSER_DISP_RW_IN msgrequest */ -#define MC_CMD_PARSER_DISP_RW_IN_LEN 32 -/* identifies the target of the operation */ -#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0 -#define MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4 -/* enum: RX dispatcher CPU */ -#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0 -/* enum: TX dispatcher CPU */ -#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1 -/* enum: Lookup engine (with original metadata format). Deprecated; used only - * by cmdclient as a fallback for very old Huntington firmware, and not - * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA - * instead. - */ -#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 -/* enum: Lookup engine (with requested metadata format) */ -#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 -/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */ -#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 -/* enum: RX1 dispatcher CPU (only valid for Medford) */ -#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 -/* enum: Miscellaneous other state (only valid for Medford) */ -#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 -/* identifies the type of operation requested */ -#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 -#define MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4 -/* enum: Read a word of DICPU DMEM or a LUE entry */ -#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0 -/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on - * tamperproof adapters. - */ -#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 -/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not - * permitted on tamperproof adapters. - */ -#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 -/* data memory address (DICPU targets) or LUE index (LUE targets) */ -#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 -#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4 -/* selector (for MISC_STATE target) */ -#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8 -#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4 -/* enum: Port to datapath mapping */ -#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 -/* value to write (for DMEM writes) */ -#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 -#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4 -/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ -#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12 -#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4 -/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */ -#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16 -#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4 -/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */ -#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12 -#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4 -/* value to write (for LUE writes) */ -#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12 -#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20 - -/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */ -#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52 -/* value read (for DMEM reads) */ -#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0 -#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4 -/* value read (for LUE reads) */ -#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0 -#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20 -/* up to 8 32-bit words of additional soft state from the LUE manager (the - * exact content is firmware-dependent and intended only for debug use) - */ -#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20 -#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32 -/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */ -#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0 -#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4 -#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4 -#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ -#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ - - -/***********************************/ -/* MC_CMD_GET_PF_COUNT - * Get number of PFs on the device. - */ -#define MC_CMD_GET_PF_COUNT 0xb6 -#undef MC_CMD_0xb6_PRIVILEGE_CTG - -#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_PF_COUNT_IN msgrequest */ -#define MC_CMD_GET_PF_COUNT_IN_LEN 0 - -/* MC_CMD_GET_PF_COUNT_OUT msgresponse */ -#define MC_CMD_GET_PF_COUNT_OUT_LEN 1 -/* Identifies the number of PFs on the device. */ -#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0 -#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1 - - -/***********************************/ -/* MC_CMD_SET_PF_COUNT - * Set number of PFs on the device. - */ -#define MC_CMD_SET_PF_COUNT 0xb7 - -/* MC_CMD_SET_PF_COUNT_IN msgrequest */ -#define MC_CMD_SET_PF_COUNT_IN_LEN 4 -/* New number of PFs on the device. */ -#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0 -#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4 - -/* MC_CMD_SET_PF_COUNT_OUT msgresponse */ -#define MC_CMD_SET_PF_COUNT_OUT_LEN 0 - - /***********************************/ /* MC_CMD_GET_PORT_ASSIGNMENT * Get port assignment for current PCI function. @@ -13068,25 +13356,6 @@ #define MC_CMD_GET_PORT_ASSIGNMENT_OUT_NULL_PORT 0xffffffff -/***********************************/ -/* MC_CMD_SET_PORT_ASSIGNMENT - * Set port assignment for current PCI function. - */ -#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9 -#undef MC_CMD_0xb9_PRIVILEGE_CTG - -#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */ -#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4 -/* Identifies the port assignment for this function. */ -#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0 -#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4 - -/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */ -#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0 - - /***********************************/ /* MC_CMD_ALLOC_VIS * Allocate VIs for current PCI function. @@ -13183,263 +13452,6 @@ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4 -/***********************************/ -/* MC_CMD_SET_SRIOV_CFG - * Set SRIOV config for this PF. - */ -#define MC_CMD_SET_SRIOV_CFG 0xbb -#undef MC_CMD_0xbb_PRIVILEGE_CTG - -#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */ -#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20 -/* Number of VFs currently enabled. */ -#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0 -#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4 -/* Max number of VFs before sriov stride and offset may need to be changed. */ -#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4 -#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4 -#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 -#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4 -#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8 -#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 -#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 -/* RID offset of first VF from PF, or 0 for no change, or - * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset. - */ -#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12 -#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4 -/* RID offset of each subsequent VF from the previous, 0 for no change, or - * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride. - */ -#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16 -#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4 - -/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */ -#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_GET_VI_ALLOC_INFO - * Get information about number of VI's and base VI number allocated to this - * function. This message is not available to dynamic clients created by - * MC_CMD_CLIENT_ALLOC. - */ -#define MC_CMD_GET_VI_ALLOC_INFO 0x8d -#undef MC_CMD_0x8d_PRIVILEGE_CTG - -#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */ -#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0 - -/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */ -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12 -/* The number of VIs allocated on this function */ -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0 -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4 -/* The base absolute VI number allocated to this function. Required to - * correctly interpret wakeup events. - */ -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4 -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4 -/* Function's port vi_shift value (always 0 on Huntington) */ -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8 -#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4 - - -/***********************************/ -/* MC_CMD_DUMP_VI_STATE - * For CmdClient use. Dump pertinent information on a specific absolute VI. The - * VI must be owned by the calling client or one of its ancestors; usership of - * the VI (as set by MC_CMD_SET_VI_USER) is not sufficient. - */ -#define MC_CMD_DUMP_VI_STATE 0x8e -#undef MC_CMD_0x8e_PRIVILEGE_CTG - -#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_DUMP_VI_STATE_IN msgrequest */ -#define MC_CMD_DUMP_VI_STATE_IN_LEN 4 -/* The VI number to query. */ -#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0 -#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4 - -/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */ -#define MC_CMD_DUMP_VI_STATE_OUT_LEN 100 -/* The PF part of the function owning this VI. */ -#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0 -#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2 -/* The VF part of the function owning this VI. */ -#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2 -#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2 -/* Base of VIs allocated to this function. */ -#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4 -#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2 -/* Count of VIs allocated to the owner function. */ -#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6 -#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2 -/* Base interrupt vector allocated to this function. */ -#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8 -#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2 -/* Number of interrupt vectors allocated to this function. */ -#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10 -#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2 -/* Raw evq ptr table data. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_LBN 96 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_LBN 128 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_WIDTH 32 -/* Raw evq timer table data. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_LBN 160 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_LBN 192 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_WIDTH 32 -/* Combined metadata field. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8 -/* TXDPCPU raw table data for queue. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_LBN 256 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_LBN 288 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_WIDTH 32 -/* TXDPCPU raw table data for queue. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_LBN 320 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_LBN 352 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_WIDTH 32 -/* TXDPCPU raw table data for queue. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_LBN 384 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_LBN 416 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_WIDTH 32 -/* Combined metadata field. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_LBN 448 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_LBN 480 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24 -/* RXDPCPU raw table data for queue. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_LBN 512 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_LBN 544 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_WIDTH 32 -/* RXDPCPU raw table data for queue. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_LBN 576 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_LBN 608 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_WIDTH 32 -/* Reserved, currently 0. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_LBN 640 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_LBN 672 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_WIDTH 32 -/* Combined metadata field. */ -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_LBN 704 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LEN 4 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_LBN 736 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_WIDTH 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32 -#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8 -/* Current user, as assigned by MC_CMD_SET_VI_USER. */ -#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_OFST 96 -#define MC_CMD_DUMP_VI_STATE_OUT_USER_CLIENT_ID_LEN 4 - - /***********************************/ /* MC_CMD_ALLOC_PIOBUF * Allocate a push I/O buffer for later use with a tx queue. @@ -13491,354 +13503,102 @@ /* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */ #define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4 -/* VI number to get information for. */ +/* Queue handle, encodes queue type and VI number to get information for. */ #define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 #define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 -/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */ -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4 -/* Transaction processing steering hint 1 for use with the Rx Queue. */ -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0 -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1 -/* Transaction processing steering hint 2 for use with the Ev Queue. */ -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1 -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1 -/* Use Relaxed ordering model for TLPs on this VI. */ -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16 -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1 -/* Use ID based ordering for TLPs on this VI. */ -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17 -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1 -/* Set no snoop bit for TLPs on this VI. */ -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18 -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1 -/* Enable TPH for TLPs on this VI. */ -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19 -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1 -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0 -#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4 +/* MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT msgresponse: This message has the same + * layout as GET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to + * simplify use in drivers + */ +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_LEN 4 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_DATA_LEN 4 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_LBN 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG1_RX_WIDTH 8 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_LBN 8 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_TAG2_EV_WIDTH 8 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_LBN 16 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_WIDTH 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_LBN 16 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_PACKET_DATA_WIDTH 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_LBN 17 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_ID_BASED_ORDERING_WIDTH 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_LBN 18 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_NO_SNOOP_WIDTH 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_LBN 19 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_TPH_ON_WIDTH 1 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_OFST 0 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_LBN 20 +#define MC_CMD_GET_VI_TLP_PROCESSING_V2_OUT_RELAXED_ORDERING_SYNC_DATA_WIDTH 1 /***********************************/ /* MC_CMD_SET_VI_TLP_PROCESSING * Set TLP steering and ordering information for a VI. The caller must have the * GRP_FUNC_DMA privilege and must be the currently-assigned user of this VI or - * an ancestor of the current user (see MC_CMD_SET_VI_USER). + * an ancestor of the current user (see MC_CMD_SET_VI_USER). Note that LL + * queues require this to be called after allocation but before initialisation + * of the queue. TLP options of a queue are fixed after queue is initialised, + * with the values set to current global value or they can be overriden using + * this command. At LL queue allocation, all overrides are cleared. */ #define MC_CMD_SET_VI_TLP_PROCESSING 0xb1 #undef MC_CMD_0xb1_PRIVILEGE_CTG #define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL -/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */ -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8 -/* VI number to set information for. */ -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0 -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4 -/* Transaction processing steering hint 1 for use with the Rx Queue. */ -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4 -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1 -/* Transaction processing steering hint 2 for use with the Ev Queue. */ -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5 -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1 -/* Use Relaxed ordering model for TLPs on this VI. */ -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48 -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1 -/* Use ID based ordering for TLPs on this VI. */ -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49 -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1 -/* Set the no snoop bit for TLPs on this VI. */ -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50 -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1 -/* Enable TPH for TLPs on this VI. */ -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51 -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1 -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4 -#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4 +/* MC_CMD_SET_VI_TLP_PROCESSING_V2_IN msgrequest: This message has the same + * layout as SET_VI_TLP_PROCESSING_OUT, but with corrected field ordering to + * simplify use in drivers. + */ +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_LEN 8 +/* Queue handle, encodes queue type and VI number to set information for. */ +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_OFST 0 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_INSTANCE_LEN 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_DATA_LEN 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_LBN 0 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG1_RX_WIDTH 8 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_LBN 8 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_TAG2_EV_WIDTH 8 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_LBN 16 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_WIDTH 1 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_LBN 16 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_PACKET_DATA_WIDTH 1 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_LBN 17 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_ID_BASED_ORDERING_WIDTH 1 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_LBN 18 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_NO_SNOOP_WIDTH 1 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_LBN 19 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_TPH_ON_WIDTH 1 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_OFST 4 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_LBN 20 +#define MC_CMD_SET_VI_TLP_PROCESSING_V2_IN_RELAXED_ORDERING_SYNC_DATA_WIDTH 1 /* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */ #define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0 -/***********************************/ -/* MC_CMD_GET_TLP_PROCESSING_GLOBALS - * Get global PCIe steering and transaction processing configuration. - */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc -#undef MC_CMD_0xbc_PRIVILEGE_CTG - -#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 -/* enum: MISC. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0 -/* enum: IDO. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1 -/* enum: RO. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2 -/* enum: TPH Type. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3 - -/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4 -/* Enum values, see field(s): */ -/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ -/* Amalgamated TLP info word. */ -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9 -#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23 - - -/***********************************/ -/* MC_CMD_SET_TLP_PROCESSING_GLOBALS - * Set global PCIe steering and transaction processing configuration. - */ -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd -#undef MC_CMD_0xbd_PRIVILEGE_CTG - -#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */ -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4 -/* Enum values, see field(s): */ -/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */ -/* Amalgamated TLP info word. */ -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10 -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22 - -/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */ -#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_SATELLITE_DOWNLOAD - * Download a new set of images to the satellite CPUs from the host. - */ -#define MC_CMD_SATELLITE_DOWNLOAD 0x91 -#undef MC_CMD_0x91_PRIVILEGE_CTG - -#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs - * are subtle, and so downloads must proceed in a number of phases. - * - * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0. - * - * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download - * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should - * be a checksum (a simple 32-bit sum) of the transferred data. An individual - * download may be aborted using CHUNK_ID_ABORT. - * - * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15), - * similar to PHASE_IMEMS. - * - * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0. - * - * After any error (a requested abort is not considered to be an error) the - * sequence must be restarted from PHASE_RESET. - */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num)) -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4) -/* Download phase. (Note: the IDLE phase is used internally and is never valid - * in a command from the host.) - */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */ -/* Target for download. (These match the blob numbers defined in - * mc_flash_layout.h.) - */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9 -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa -/* enum: Valid in phase 2 (PHASE_IMEMS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb -/* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc -/* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd -/* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe -/* enum: Valid in phase 3 (PHASE_VECTORS) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf -/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff -/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4 -/* enum: Last chunk, containing checksum rather than data */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff -/* enum: Abort download of this item */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe -/* Length of this chunk in bytes */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4 -/* Data for this chunk */ -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59 -#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251 - -/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8 -/* Same as MC_CMD_ERR field, but included as 0 in success cases */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0 -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4 -/* Extra status information */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4 -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4 -/* enum: Code download OK, completed. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0 -/* enum: Code download aborted as requested. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1 -/* enum: Code download OK so far, send next chunk. */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2 -/* enum: Download phases out of sequence */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100 -/* enum: Bad target for this phase */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101 -/* enum: Chunk ID out of sequence */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200 -/* enum: Chunk length zero or too large */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201 -/* enum: Checksum was incorrect */ -#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300 - - /***********************************/ /* MC_CMD_GET_CAPABILITIES - * Get device capabilities. - * - * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to - * reference inherent device capabilities as opposed to current NVRAM config. + * Get device capabilities. This is supplementary to the MC_CMD_GET_BOARD_CFG + * command, and intended to reference inherent device capabilities as opposed + * to current NVRAM config. */ #define MC_CMD_GET_CAPABILITIES 0xbe #undef MC_CMD_0xbe_PRIVILEGE_CTG @@ -14194,2096 +13954,3092 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 + +/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 /* enum: DPDK RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 /* enum: DPDK TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC (when * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 /* One byte per PF containing the number of the external port assigned to this * PF, indexed by PF number. Special values indicate that a PF is either not * present or not assigned. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ -/* Number of VIs available for each external port */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4 +/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4 /* Size of RX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ RX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1 /* Size of TX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ TX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1 /* Total number of available PIO buffers */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2 /* Size of a single PIO buffer */ -#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70 -#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2 -/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76 +/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84 /* First word of flags. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5 /* enum: DPDK RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5 /* enum: DPDK TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC (when * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 /* One byte per PF containing the number of the external port assigned to this * PF, indexed by PF number. Special values indicate that a PF is either not * present or not assigned. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */ -/* Number of VIs available for each external port */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4 +/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4 /* Size of RX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ RX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1 /* Size of TX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ TX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1 /* Total number of available PIO buffers */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2 /* Size of a single PIO buffer */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2 /* On chips later than Medford the amount of address space assigned to each VI * is configurable. This is a global setting that the driver must query to * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available * with 8k VI windows. - */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1 -/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. - * CTPIO is not mapped. - */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0 -/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1 -/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2 -/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing - * (SF-115995-SW) in the present configuration of firmware and port mode. - */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 -/* Number of buffers per adapter that can be used for VFIFO Stuffing - * (SF-115995-SW) in the present configuration of firmware and port mode. - */ -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 -#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 - -/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78 -/* First word of flags. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1 + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5 /* enum: DPDK RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5 /* enum: DPDK TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC (when * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 /* One byte per PF containing the number of the external port assigned to this * PF, indexed by PF number. Special values indicate that a PF is either not * present or not assigned. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */ -/* Number of VIs available for each external port */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4 +/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4 /* Size of RX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ RX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1 /* Size of TX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ TX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1 /* Total number of available PIO buffers */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2 /* Size of a single PIO buffer */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2 /* On chips later than Medford the amount of address space assigned to each VI * is configurable. This is a global setting that the driver must query to * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available * with 8k VI windows. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1 /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 /* Number of buffers per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 /* Entry count in the MAC stats array, including the final GENERATION_END * entry. For MAC stats DMA, drivers should allocate a buffer large enough to * hold at least this many 64-bit stats values, if they wish to receive all * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the * stats array returned will be truncated. */ -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76 -#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 -/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84 +/* MC_CMD_GET_CAPABILITIES_V7_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LEN 152 /* First word of flags. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_RULES_ENGINE 0x5 /* enum: DPDK RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_RULES_ENGINE 0x5 /* enum: DPDK TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0 -/* enum: Trivial TX PD firmware for early Huntington development (Huntington - * development only) - */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 -/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) - */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 -/* enum: TX PD firmware with approximately Siena-compatible behaviour - * (Huntington development only) - */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 -/* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 -/* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3 -/* enum: siena_compat variant TX PD firmware using PM rather than MAC - * (Huntington development only) - */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ -/* enum: TX PD firmware handling layer 2 only for high packet rate performance - * tests (Medford development only) - */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 -/* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 -/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9 -/* enum: DPDK TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa -/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe -/* Hardware capabilities of NIC */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4 -/* Licensed capabilities */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4 -/* Second word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC (when * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 /* One byte per PF containing the number of the external port assigned to this * PF, indexed by PF number. Special values indicate that a PF is either not * present or not assigned. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */ -/* Number of VIs available for each external port */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4 +/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4 /* Size of RX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ RX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_LEN 1 /* Size of TX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ TX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_LEN 1 /* Total number of available PIO buffers */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_LEN 2 /* Size of a single PIO buffer */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_LEN 2 /* On chips later than Medford the amount of address space assigned to each VI * is configurable. This is a global setting that the driver must query to * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available * with 8k VI windows. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_LEN 1 /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 /* Number of buffers per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 /* Entry count in the MAC stats array, including the final GENERATION_END * entry. For MAC stats DMA, drivers should allocate a buffer large enough to * hold at least this many 64-bit stats values, if they wish to receive all * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the * stats array returned will be truncated. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_LEN 2 /* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. */ -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80 -#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CXL_CONFIG_ENABLE_WIDTH 1 -/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148 +/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160 /* First word of flags. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_RULES_ENGINE 0x5 /* enum: DPDK RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_RULES_ENGINE 0x5 /* enum: DPDK TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC (when * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 /* One byte per PF containing the number of the external port assigned to this * PF, indexed by PF number. Special values indicate that a PF is either not * present or not assigned. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */ -/* Number of VIs available for each external port */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4 +/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4 /* Size of RX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ RX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_LEN 1 /* Size of TX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ TX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_LEN 1 /* Total number of available PIO buffers */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_LEN 2 /* Size of a single PIO buffer */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_LEN 2 /* On chips later than Medford the amount of address space assigned to each VI * is configurable. This is a global setting that the driver must query to * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available * with 8k VI windows. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_LEN 1 /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 /* Number of buffers per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 /* Entry count in the MAC stats array, including the final GENERATION_END * entry. For MAC stats DMA, drivers should allocate a buffer large enough to * hold at least this many 64-bit stats values, if they wish to receive all * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the * stats array returned will be truncated. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_LEN 2 /* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_LEN 4 /* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when * they create an RX queue. Due to hardware limitations, only a small number of @@ -16292,457 +17048,531 @@ * available for use. If the list is empty, there are no limitations on * concurrent buffer sizes. */ -#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 - -/* MC_CMD_GET_CAPABILITIES_V7_OUT msgresponse */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_LEN 152 -/* First word of flags. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CXL_CONFIG_ENABLE_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LBN 1216 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_WIDTH 32 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LBN 1248 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_WIDTH 32 + +/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_OFST 4 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_RULES_ENGINE 0x5 /* enum: DPDK RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_OFST 6 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_RULES_ENGINE 0x5 /* enum: DPDK TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_CSR 0x103 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_OFST 12 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_OFST 16 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC (when * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 /* One byte per PF containing the number of the external port assigned to this * PF, indexed by PF number. Special values indicate that a PF is either not * present or not assigned. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_OFST 42 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */ -/* Number of VIs available for each external port */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4 +/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4 /* Size of RX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ RX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_OFST 66 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_LEN 1 /* Size of TX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ TX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_OFST 67 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_LEN 1 /* Total number of available PIO buffers */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_OFST 68 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_LEN 2 /* Size of a single PIO buffer */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_OFST 70 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_LEN 2 /* On chips later than Medford the amount of address space assigned to each VI * is configurable. This is a global setting that the driver must query to * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available * with 8k VI windows. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_OFST 72 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_LEN 1 /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 /* Number of buffers per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 /* Entry count in the MAC stats array, including the final GENERATION_END * entry. For MAC stats DMA, drivers should allocate a buffer large enough to * hold at least this many 64-bit stats values, if they wish to receive all * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the * stats array returned will be truncated. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_OFST 76 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_LEN 2 /* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_OFST 80 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_LEN 4 /* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when * they create an RX queue. Due to hardware limitations, only a small number of @@ -16751,502 +17581,566 @@ * available for use. If the list is empty, there are no limitations on * concurrent buffer sizes. */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 /* Third word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 - -/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160 -/* First word of flags. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CXL_CONFIG_ENABLE_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LBN 1216 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_WIDTH 32 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LBN 1248 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_WIDTH 32 +/* The minimum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum number of queues that can be used by an RSS context in exclusive + * mode. In exclusive mode the context has a configurable indirection table and + * a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 +/* The maximum number of queues that can be used by an RSS context in even- + * spreading mode. In even-spreading mode the context has no indirection table + * but it does have a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 +/* The total number of RSS contexts supported. Note that the number of + * available contexts using indirection tables is also limited by the + * availability of indirection table space allocated from a common pool. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_OFST 176 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_LEN 4 +/* The total amount of indirection table space that can be shared between RSS + * contexts. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V10_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_LEN 192 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_OFST 4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_RULES_ENGINE 0x5 /* enum: DPDK RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_OFST 6 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_RULES_ENGINE 0x5 /* enum: DPDK TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_CSR 0x103 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_OFST 12 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_OFST 16 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC (when * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 /* One byte per PF containing the number of the external port assigned to this * PF, indexed by PF number. Special values indicate that a PF is either not * present or not assigned. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V10_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_OFST 42 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */ -/* Number of VIs available for each external port */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4 +/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4 /* Size of RX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ RX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_OFST 66 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_LEN 1 /* Size of TX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ TX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_OFST 67 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_LEN 1 /* Total number of available PIO buffers */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_OFST 68 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_LEN 2 /* Size of a single PIO buffer */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_OFST 70 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_LEN 2 /* On chips later than Medford the amount of address space assigned to each VI * is configurable. This is a global setting that the driver must query to * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available * with 8k VI windows. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_OFST 72 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_LEN 1 /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 /* Number of buffers per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 /* Entry count in the MAC stats array, including the final GENERATION_END * entry. For MAC stats DMA, drivers should allocate a buffer large enough to * hold at least this many 64-bit stats values, if they wish to receive all * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the * stats array returned will be truncated. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_OFST 76 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_LEN 2 /* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_OFST 80 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_LEN 4 /* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when * they create an RX queue. Due to hardware limitations, only a small number of @@ -17255,516 +18149,580 @@ * available for use. If the list is empty, there are no limitations on * concurrent buffer sizes. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 /* Third word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_CXL_CONFIG_ENABLE_WIDTH 1 /* These bits are reserved for communicating test-specific capabilities to * host-side test software. All production drivers should treat this field as * opaque. */ -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_LBN 1216 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_WIDTH 32 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_LBN 1248 -#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_WIDTH 32 - -/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184 -/* First word of flags. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LBN 1216 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_WIDTH 32 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_OFST 156 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LBN 1248 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_WIDTH 32 +/* The minimum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum number of queues that can be used by an RSS context in exclusive + * mode. In exclusive mode the context has a configurable indirection table and + * a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 +/* The maximum number of queues that can be used by an RSS context in even- + * spreading mode. In even-spreading mode the context has no indirection table + * but it does have a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 +/* The total number of RSS contexts supported. Note that the number of + * available contexts using indirection tables is also limited by the + * availability of indirection table space allocated from a common pool. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_OFST 176 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_LEN 4 +/* The total amount of indirection table space that can be shared between RSS + * contexts. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_OFST 180 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_LEN 4 +/* A bitmap of the queue sizes the device can provide, where bit N being set + * indicates that 2**N is a valid size. The device may be limited in the number + * of different queue sizes that can exist simultaneously, so a bit being set + * here does not guarantee that an attempt to create a queue of that size will + * succeed. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_OFST 184 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_LEN 4 +/* A bitmap of queue sizes that are always available, in the same format as + * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes + * will never fail due to unavailability of the requested size. + */ +#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188 +#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V11_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_LEN 196 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_OFST 4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_RULES_ENGINE 0x5 /* enum: DPDK RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_OFST 6 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_RULES_ENGINE 0x5 /* enum: DPDK TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_CSR 0x103 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) - */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 -/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) - */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 -/* enum: TX PD firmware with approximately Siena-compatible behaviour - * (Huntington development only) - */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 -/* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 -/* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_VSWITCH 0x3 -/* enum: siena_compat variant TX PD firmware using PM rather than MAC - * (Huntington development only) - */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ -/* enum: TX PD firmware handling layer 2 only for high packet rate performance - * tests (Medford development only) - */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 -/* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 -/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_L3XUDP 0x9 -/* enum: DPDK TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_DPDK 0xa -/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe -/* Hardware capabilities of NIC */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_OFST 12 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_LEN 4 -/* Licensed capabilities */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_OFST 16 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_LEN 4 -/* Second word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 + */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC (when * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 /* One byte per PF containing the number of the external port assigned to this * PF, indexed by PF number. Special values indicate that a PF is either not * present or not assigned. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_OFST 42 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V11_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */ -/* Number of VIs available for each external port */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4 +/* MC_CMD_GET_CAPABILITIES_V11_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_VIS_PER_PORT_NUM 4 /* Size of RX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ RX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_OFST 66 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_DESC_CACHE_SIZE_LEN 1 /* Size of TX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ TX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_OFST 67 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TX_DESC_CACHE_SIZE_LEN 1 /* Total number of available PIO buffers */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_OFST 68 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NUM_PIO_BUFFS_LEN 2 /* Size of a single PIO buffer */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_OFST 70 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_SIZE_PIO_BUFF_LEN 2 /* On chips later than Medford the amount of address space assigned to each VI * is configurable. This is a global setting that the driver must query to * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available * with 8k VI windows. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_OFST 72 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_LEN 1 /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 /* Number of buffers per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 /* Entry count in the MAC stats array, including the final GENERATION_END * entry. For MAC stats DMA, drivers should allocate a buffer large enough to * hold at least this many 64-bit stats values, if they wish to receive all * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the * stats array returned will be truncated. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_OFST 76 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAC_STATS_NUM_STATS_LEN 2 /* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_OFST 80 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FILTER_ACTION_MARK_MAX_LEN 4 /* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when * they create an RX queue. Due to hardware limitations, only a small number of @@ -17773,670 +18731,723 @@ * available for use. If the list is empty, there are no limitations on * concurrent buffer sizes. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 /* Third word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_CXL_CONFIG_ENABLE_WIDTH 1 /* These bits are reserved for communicating test-specific capabilities to * host-side test software. All production drivers should treat this field as * opaque. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_LBN 1216 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_WIDTH 32 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_LBN 1248 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_WIDTH 32 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_LBN 1216 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_LO_WIDTH 32 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_OFST 156 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_LBN 1248 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_TEST_RESERVED_HI_WIDTH 32 /* The minimum size (in table entries) of indirection table to be allocated * from the pool for an RSS context. Note that the table size used must be a * power of 2. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 /* The maximum size (in table entries) of indirection table to be allocated * from the pool for an RSS context. Note that the table size used must be a * power of 2. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 /* The maximum number of queues that can be used by an RSS context in exclusive * mode. In exclusive mode the context has a configurable indirection table and * a configurable RSS key. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 /* The maximum number of queues that can be used by an RSS context in even- * spreading mode. In even-spreading mode the context has no indirection table * but it does have a configurable RSS key. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 /* The total number of RSS contexts supported. Note that the number of * available contexts using indirection tables is also limited by the * availability of indirection table space allocated from a common pool. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_OFST 176 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_OFST 176 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_NUM_CONTEXTS_LEN 4 /* The total amount of indirection table space that can be shared between RSS * contexts. */ -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180 -#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_OFST 180 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_RSS_TABLE_POOL_SIZE_LEN 4 +/* A bitmap of the queue sizes the device can provide, where bit N being set + * indicates that 2**N is a valid size. The device may be limited in the number + * of different queue sizes that can exist simultaneously, so a bit being set + * here does not guarantee that an attempt to create a queue of that size will + * succeed. + */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_OFST 184 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_SUPPORTED_QUEUE_SIZES_LEN 4 +/* A bitmap of queue sizes that are always available, in the same format as + * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes + * will never fail due to unavailability of the requested size. + */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_OFST 188 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_GUARANTEED_QUEUE_SIZES_LEN 4 +/* Number of available indirect memory maps. */ +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192 +#define MC_CMD_GET_CAPABILITIES_V11_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4 -/* MC_CMD_GET_CAPABILITIES_V10_OUT msgresponse */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_LEN 192 +/* MC_CMD_GET_CAPABILITIES_V12_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V12_OUT_LEN 204 /* First word of flags. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS1_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VPORT_RECONFIGURE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_STRIPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_QUERY_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_SET_MAC_ENHANCED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_QBB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_RSS_LIMITED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_INCLUDE_FCS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VLAN_INSERTION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_0_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_PREFIX_LEN_14_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_BATCHING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCAST_FILTER_CHAINING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DISABLE_SCATTER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVB_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_OFST 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VXLAN_NVGRE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_OFST 4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DPCPU_FW_ID_LEN 2 /* enum: Standard RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP 0x0 /* enum: Low latency RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_LOW_LATENCY 0x1 /* enum: Packed stream RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_PACKED_STREAM 0x2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_PACKED_STREAM 0x2 /* enum: Rules engine RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_RULES_ENGINE 0x5 /* enum: DPDK RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_DPDK 0x6 /* enum: BIST RXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_BIST 0x10a +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_BIST 0x10a /* enum: RXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 /* enum: RXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 /* enum: RXDP Test firmware image 3 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 /* enum: RXDP Test firmware image 4 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 /* enum: RXDP Test firmware image 5 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_BACKPRESSURE 0x105 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_BACKPRESSURE 0x105 /* enum: RXDP Test firmware image 6 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 /* enum: RXDP Test firmware image 7 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 /* enum: RXDP Test firmware image 9 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* enum: RXDP Test firmware image 10 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_TEST_FW_SLOW 0x10c +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_TEST_FW_SLOW 0x10c /* TxDPCPU firmware id. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_OFST 6 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DPCPU_FW_ID_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DPCPU_FW_ID_LEN 2 /* enum: Standard TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP 0x0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP 0x0 /* enum: Low latency TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_LOW_LATENCY 0x1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_LOW_LATENCY 0x1 /* enum: High packet rate TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_HIGH_PACKET_RATE 0x3 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_HIGH_PACKET_RATE 0x3 /* enum: Rules engine TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_RULES_ENGINE 0x5 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_RULES_ENGINE 0x5 /* enum: DPDK TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_DPDK 0x6 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_DPDK 0x6 /* enum: BIST TXDP firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_BIST 0x12d +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_BIST 0x12d /* enum: TXDP Test firmware image 1 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 /* enum: TXDP CSR bus test firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXDP_TEST_FW_CSR 0x103 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_OFST 8 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial RX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: RX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant RX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum: Low latency RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum: Packed stream RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 /* enum: RX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK RX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* enum: RX PD firmware parsing but not filtering network overlay tunnel * encapsulations (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_REV_WIDTH 12 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_OFST 10 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation * of REV field in future) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RESERVED 0x0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RESERVED 0x0 /* enum: Trivial TX PD firmware for early Huntington development (Huntington * development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum: TX PD firmware for telemetry prototyping (Medford2 development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum: Full featured TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 /* enum: (deprecated original name for the FULL_FEATURED variant) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum: siena_compat variant TX PD firmware using PM rather than MAC * (Huntington development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ /* enum: TX PD firmware handling layer 2 only for high packet rate performance * tests (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 /* enum: Rules engine TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 /* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_L3XUDP 0x9 /* enum: DPDK TX PD production firmware */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_DPDK 0xa +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_DPDK 0xa /* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe /* Hardware capabilities of NIC */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_OFST 12 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_HW_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_HW_CAPABILITIES_LEN 4 /* Licensed capabilities */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_OFST 16 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_LICENSE_CAPABILITIES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_LICENSE_CAPABILITIES_LEN 4 /* Second word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS2_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_ENCAP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVQ_TIMER_CTRL_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EVENT_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_CUT_THROUGH_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_EVQ_V2_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TIMESTAMP_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_SNIFF_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_LBN 13 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_BACKGROUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MCDI_DB_RETURN_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_LBN 15 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_CTPIO_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_LBN 16 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_LBN 17 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TSA_BOUND_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_LBN 19 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_FLAG_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_LBN 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_LBN 22 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_L3XUDP_SUPPORT_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_LBN 24 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_LBN 25 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RXDP_HLB_IDLE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_LBN 28 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_BUNDLE_UPDATE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_LBN 29 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V3_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_LBN 30 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_SENSORS_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 /* Number of FATSOv2 contexts per datapath supported by this NIC (when * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 /* One byte per PF containing the number of the external port assigned to this * PF, indexed by PF number. Special values indicate that a PF is either not * present or not assigned. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff +#define MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff /* enum: PF does not exist. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe +#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe /* enum: PF does exist but is not assigned to any external port. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_ASSIGNED 0xfd +#define MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_ASSIGNED 0xfd /* enum: This value indicates that PF is assigned, but it cannot be expressed * in this field. It is intended for a possible future situation where a more * complex scheme of PFs to ports mapping is being used. The future driver * should look for a new field supporting the new scheme. The current/old * driver should treat this value as PF_NOT_ASSIGNED. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc /* One byte per PF containing the number of its VFs, indexed by PF number. A * special value indicates that a PF is not present. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_OFST 42 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_LEN 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VFS_PER_PF_NUM 16 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VFS_PER_PF_NUM 16 /* enum: The caller is not permitted to access information on this PF. */ -/* MC_CMD_GET_CAPABILITIES_V10_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* MC_CMD_GET_CAPABILITIES_V12_OUT_ACCESS_NOT_PERMITTED 0xff */ /* enum: PF does not exist. */ -/* MC_CMD_GET_CAPABILITIES_V10_OUT_PF_NOT_PRESENT 0xfe */ -/* Number of VIs available for each external port */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_OFST 58 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_LEN 2 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_VIS_PER_PORT_NUM 4 +/* MC_CMD_GET_CAPABILITIES_V12_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for external ports 0-3. For devices with more than + * four ports, the remainder are in NUM_VIS_PER_PORT2 in + * GET_CAPABILITIES_V12_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT_NUM 4 /* Size of RX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ RX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_OFST 66 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_DESC_CACHE_SIZE_LEN 1 /* Size of TX descriptor cache expressed as binary logarithm The actual size * equals (2 ^ TX_DESC_CACHE_SIZE) */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_OFST 67 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TX_DESC_CACHE_SIZE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TX_DESC_CACHE_SIZE_LEN 1 /* Total number of available PIO buffers */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_OFST 68 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NUM_PIO_BUFFS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_PIO_BUFFS_LEN 2 /* Size of a single PIO buffer */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_OFST 70 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_SIZE_PIO_BUFF_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_SIZE_PIO_BUFF_LEN 2 /* On chips later than Medford the amount of address space assigned to each VI * is configurable. This is a global setting that the driver must query to * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available * with 8k VI windows. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_OFST 72 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_LEN 1 /* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. * CTPIO is not mapped. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_8K 0x0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_8K 0x0 /* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_16K 0x1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_16K 0x1 /* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VI_WINDOW_MODE_64K 0x2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VI_WINDOW_MODE_64K 0x2 /* Number of vFIFOs per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 /* Number of buffers per adapter that can be used for VFIFO Stuffing * (SF-115995-SW) in the present configuration of firmware and port mode. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 /* Entry count in the MAC stats array, including the final GENERATION_END - * entry. For MAC stats DMA, drivers should allocate a buffer large enough to - * hold at least this many 64-bit stats values, if they wish to receive all - * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the - * stats array returned will be truncated. - */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_OFST 76 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAC_STATS_NUM_STATS_LEN 2 -/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field - * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. - */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_OFST 80 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FILTER_ACTION_MARK_MAX_LEN 4 -/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in - * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when - * they create an RX queue. Due to hardware limitations, only a small number of - * different buffer sizes may be available concurrently. Nonzero entries in - * this array are the sizes of buffers which the system guarantees will be - * available for use. If the list is empty, there are no limitations on - * concurrent buffer sizes. - */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 -/* Third word of flags. Not present on older firmware (check the length). */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_FLAGS3_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_LBN 0 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_WOL_ETHERWAKE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_LBN 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_EVEN_SPREADING_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_LBN 3 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_LBN 4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_VDPA_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_ENCAPSULATED_MCDI_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_EXTERNAL_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NVRAM_UPDATE_ABORT_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V2_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_STEER_ON_OUTER_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_MAE_ACTION_SET_ALLOC_V3_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_DYNAMIC_MPORT_JOURNAL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_CLIENT_CMD_VF_PROXY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_LL_RX_EVENT_SUPPRESSION_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_CXL_CONFIG_ENABLE_WIDTH 1 /* These bits are reserved for communicating test-specific capabilities to * host-side test software. All production drivers should treat this field as * opaque. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_OFST 152 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LEN 8 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_OFST 152 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_LBN 1216 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_LO_WIDTH 32 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_OFST 156 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LEN 4 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_LBN 1248 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_TEST_RESERVED_HI_WIDTH 32 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_LBN 1216 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_LO_WIDTH 32 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_OFST 156 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_LBN 1248 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_TEST_RESERVED_HI_WIDTH 32 /* The minimum size (in table entries) of indirection table to be allocated * from the pool for an RSS context. Note that the table size used must be a * power of 2. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 /* The maximum size (in table entries) of indirection table to be allocated * from the pool for an RSS context. Note that the table size used must be a * power of 2. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 /* The maximum number of queues that can be used by an RSS context in exclusive * mode. In exclusive mode the context has a configurable indirection table and * a configurable RSS key. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 /* The maximum number of queues that can be used by an RSS context in even- * spreading mode. In even-spreading mode the context has no indirection table * but it does have a configurable RSS key. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 /* The total number of RSS contexts supported. Note that the number of * available contexts using indirection tables is also limited by the * availability of indirection table space allocated from a common pool. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_OFST 176 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_NUM_CONTEXTS_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_OFST 176 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_NUM_CONTEXTS_LEN 4 /* The total amount of indirection table space that can be shared between RSS * contexts. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_OFST 180 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_RSS_TABLE_POOL_SIZE_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_OFST 180 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_RSS_TABLE_POOL_SIZE_LEN 4 /* A bitmap of the queue sizes the device can provide, where bit N being set * indicates that 2**N is a valid size. The device may be limited in the number * of different queue sizes that can exist simultaneously, so a bit being set * here does not guarantee that an attempt to create a queue of that size will * succeed. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_OFST 184 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_SUPPORTED_QUEUE_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_OFST 184 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_SUPPORTED_QUEUE_SIZES_LEN 4 /* A bitmap of queue sizes that are always available, in the same format as * SUPPORTED_QUEUE_SIZES. Attempting to create a queue with one of these sizes * will never fail due to unavailability of the requested size. */ -#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_OFST 188 -#define MC_CMD_GET_CAPABILITIES_V10_OUT_GUARANTEED_QUEUE_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_OFST 188 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_GUARANTEED_QUEUE_SIZES_LEN 4 +/* Number of available indirect memory maps. */ +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_OFST 192 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_INDIRECT_MAP_INDEX_COUNT_LEN 4 +/* Number of VIs available for external ports 4-7. Information for ports 0-3 is + * in NUM_VIS_PER_PORT in GET_CAPABILITIES_V2_OUT. + */ +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_OFST 196 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V12_OUT_NUM_VIS_PER_PORT2_NUM 4 /***********************************/ @@ -18468,168 +19479,13 @@ * are not defined. */ #define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1 - - -/***********************************/ -/* MC_CMD_TCM_BUCKET_ALLOC - * Allocate a pacer bucket (for qau rp or a snapper test) - */ -#define MC_CMD_TCM_BUCKET_ALLOC 0xb2 -#undef MC_CMD_0xb2_PRIVILEGE_CTG - -#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */ -#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0 - -/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */ -#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4 -/* the bucket id */ -#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0 -#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4 - - -/***********************************/ -/* MC_CMD_TCM_BUCKET_FREE - * Free a pacer bucket +/* enum: MCDI command used for platform management. Typically, these commands + * are used for low-level operations directed at the platform as a whole (e.g. + * MMIO device enumeration) rather than individual functions and use a + * dedicated comms channel (e.g. RPmsg/IPI). May be handled by the same or + * different CPU as MCDI_MESSAGE_TYPE_MC. */ -#define MC_CMD_TCM_BUCKET_FREE 0xb3 -#undef MC_CMD_0xb3_PRIVILEGE_CTG - -#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */ -#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4 -/* the bucket id */ -#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0 -#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4 - -/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */ -#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_TCM_BUCKET_INIT - * Initialise pacer bucket with a given rate - */ -#define MC_CMD_TCM_BUCKET_INIT 0xb4 -#undef MC_CMD_0xb4_PRIVILEGE_CTG - -#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */ -#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8 -/* the bucket id */ -#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0 -#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4 -/* the rate in mbps */ -#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4 -#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4 - -/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */ -#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12 -/* the bucket id */ -#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0 -#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4 -/* the rate in mbps */ -#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4 -#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4 -/* the desired maximum fill level */ -#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8 -#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4 - -/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */ -#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_TCM_TXQ_INIT - * Initialise txq in pacer with given options or set options - */ -#define MC_CMD_TCM_TXQ_INIT 0xb5 -#undef MC_CMD_0xb5_PRIVILEGE_CTG - -#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */ -#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28 -/* the txq id */ -#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0 -#define MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4 -/* the static priority associated with the txq */ -#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4 -#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4 -/* bitmask of the priority queues this txq is inserted into when inserted. */ -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4 -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8 -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8 -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1 -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8 -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2 -#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 -/* the reaction point (RP) bucket */ -#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12 -#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4 -/* an already reserved bucket (typically set to bucket associated with outer - * vswitch) - */ -#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16 -#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4 -/* an already reserved bucket (typically set to bucket associated with inner - * vswitch) - */ -#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20 -#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4 -/* the min bucket (typically for ETS/minimum bandwidth) */ -#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24 -#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4 - -/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */ -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32 -/* the txq id */ -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4 -/* the static priority associated with the txq */ -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4 -/* bitmask of the priority queues this txq is inserted into when inserted. */ -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 -/* the reaction point (RP) bucket */ -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4 -/* an already reserved bucket (typically set to bucket associated with outer - * vswitch) - */ -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4 -/* an already reserved bucket (typically set to bucket associated with inner - * vswitch) - */ -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4 -/* the min bucket (typically for ETS/minimum bandwidth) */ -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4 -/* the static priority associated with the txq */ -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28 -#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4 - -/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */ -#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0 +#define MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_PLATFORM 0x2 /***********************************/ @@ -18739,27 +19595,6 @@ #define MC_CMD_VSWITCH_FREE_OUT_LEN 0 -/***********************************/ -/* MC_CMD_VSWITCH_QUERY - * read some config of v-switch. For now this command is an empty placeholder. - * It may be used to check if a v-switch is connected to a given EVB port (if - * not, then the command returns ENOENT). - */ -#define MC_CMD_VSWITCH_QUERY 0x63 -#undef MC_CMD_0x63_PRIVILEGE_CTG - -#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_VSWITCH_QUERY_IN msgrequest */ -#define MC_CMD_VSWITCH_QUERY_IN_LEN 4 -/* The port to which the v-switch is connected. */ -#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 -#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4 - -/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */ -#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0 - - /***********************************/ /* MC_CMD_VPORT_ALLOC * allocate a v-port. @@ -18935,28 +19770,6 @@ #define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0 -/***********************************/ -/* MC_CMD_VADAPTOR_GET_MAC - * read the MAC address assigned to a v-adaptor. - */ -#define MC_CMD_VADAPTOR_GET_MAC 0x5e -#undef MC_CMD_0x5e_PRIVILEGE_CTG - -#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */ -#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4 -/* The port to which the v-adaptor is connected. */ -#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0 -#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4 - -/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */ -#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6 -/* The MAC address assigned to this v-adaptor */ -#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0 -#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6 - - /***********************************/ /* MC_CMD_VADAPTOR_QUERY * read some config of v-adaptor. @@ -19013,86 +19826,6 @@ #define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0 -/***********************************/ -/* MC_CMD_RDWR_A64_REGIONS - * Assign the 64 bit region addresses. - */ -#define MC_CMD_RDWR_A64_REGIONS 0x9b -#undef MC_CMD_0x9b_PRIVILEGE_CTG - -#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */ -#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17 -#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0 -#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4 -#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4 -#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4 -#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8 -#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4 -#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12 -#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4 -/* Write enable bits 0-3, set to write, clear to read. */ -#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128 -#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4 -#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16 -#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1 - -/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included - * regardless of state of write bits in the request. - */ -#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16 -#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0 -#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4 -#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4 -#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4 -#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8 -#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4 -#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12 -#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4 - - -/***********************************/ -/* MC_CMD_ONLOAD_STACK_ALLOC - * Allocate an Onload stack ID. - */ -#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c -#undef MC_CMD_0x9c_PRIVILEGE_CTG - -#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD - -/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */ -#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4 -/* The handle of the owning upstream port */ -#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 -#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 - -/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */ -#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4 -/* The handle of the new Onload stack */ -#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0 -#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4 - - -/***********************************/ -/* MC_CMD_ONLOAD_STACK_FREE - * Free an Onload stack ID. - */ -#define MC_CMD_ONLOAD_STACK_FREE 0x9d -#undef MC_CMD_0x9d_PRIVILEGE_CTG - -#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD - -/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */ -#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4 -/* The handle of the Onload stack */ -#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0 -#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4 - -/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */ -#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0 - - /***********************************/ /* MC_CMD_RSS_CONTEXT_ALLOC * Allocate an RSS context. @@ -19304,93 +20037,6 @@ #define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128 -/***********************************/ -/* MC_CMD_RSS_CONTEXT_WRITE_TABLE - * Write a portion of a selectable-size indirection table for an RSS context. - * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the - * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES. - */ -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e -#undef MC_CMD_0x13e_PRIVILEGE_CTG - -#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */ -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num)) -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4) -/* The handle of the RSS context */ -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4 -/* An array of index-value pairs to be written to the table. Structure is - * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY. - */ -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254 - -/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */ -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0 - -/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */ -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4 -/* The index of the table entry to be written. */ -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16 -/* The value to write into the table entry. */ -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16 -#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16 - - -/***********************************/ -/* MC_CMD_RSS_CONTEXT_READ_TABLE - * Read a portion of a selectable-size indirection table for an RSS context. - * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the - * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES. - */ -#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f -#undef MC_CMD_0x13f_PRIVILEGE_CTG - -#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */ -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num)) -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2) -/* The handle of the RSS context */ -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4 -/* An array containing the indices of the entries to be read. */ -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508 - -/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */ -#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num)) -#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2) -/* A buffer containing the requested entries read from the table. */ -#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126 -#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510 - - /***********************************/ /* MC_CMD_RSS_CONTEXT_SET_FLAGS * Set various control flags for an RSS context. @@ -19524,158 +20170,6 @@ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4 -/***********************************/ -/* MC_CMD_DOT1P_MAPPING_ALLOC - * Allocate a .1p mapping. - */ -#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4 -#undef MC_CMD_0xa4_PRIVILEGE_CTG - -#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */ -#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8 -/* The handle of the owning upstream port */ -#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0 -#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4 -/* Number of queues spanned by this mapping, in the range 1-64; valid fixed - * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and - * referenced RSS contexts must span no more than this number. - */ -#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4 -#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4 - -/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */ -#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4 -/* The handle of the new .1p mapping. This should be considered opaque to the - * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid - * handle. - */ -#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0 -#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4 -/* enum: guaranteed invalid .1p mapping handle value */ -#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff - - -/***********************************/ -/* MC_CMD_DOT1P_MAPPING_FREE - * Free a .1p mapping. - */ -#define MC_CMD_DOT1P_MAPPING_FREE 0xa5 -#undef MC_CMD_0xa5_PRIVILEGE_CTG - -#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */ -#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4 -/* The handle of the .1p mapping */ -#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0 -#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4 - -/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */ -#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_DOT1P_MAPPING_SET_TABLE - * Set the mapping table for a .1p mapping. - */ -#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6 -#undef MC_CMD_0xa6_PRIVILEGE_CTG - -#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */ -#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36 -/* The handle of the .1p mapping */ -#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 -#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 -/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context - * handle) - */ -#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4 -#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32 - -/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */ -#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_DOT1P_MAPPING_GET_TABLE - * Get the mapping table for a .1p mapping. - */ -#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7 -#undef MC_CMD_0xa7_PRIVILEGE_CTG - -#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */ -#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4 -/* The handle of the .1p mapping */ -#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0 -#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4 - -/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */ -#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36 -/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context - * handle) - */ -#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4 -#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32 - - -/***********************************/ -/* MC_CMD_GET_VECTOR_CFG - * Get Interrupt Vector config for this PF. - */ -#define MC_CMD_GET_VECTOR_CFG 0xbf -#undef MC_CMD_0xbf_PRIVILEGE_CTG - -#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */ -#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0 - -/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */ -#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12 -/* Base absolute interrupt vector number. */ -#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0 -#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4 -/* Number of interrupt vectors allocate to this PF. */ -#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4 -#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4 -/* Number of interrupt vectors to allocate per VF. */ -#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8 -#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4 - - -/***********************************/ -/* MC_CMD_SET_VECTOR_CFG - * Set Interrupt Vector config for this PF. - */ -#define MC_CMD_SET_VECTOR_CFG 0xc0 -#undef MC_CMD_0xc0_PRIVILEGE_CTG - -#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */ -#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12 -/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to - * let the system find a suitable base. - */ -#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0 -#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4 -/* Number of interrupt vectors allocate to this PF. */ -#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4 -#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4 -/* Number of interrupt vectors to allocate per VF. */ -#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8 -#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4 - -/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */ -#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0 - - /***********************************/ /* MC_CMD_VPORT_ADD_MAC_ADDRESS * Add a MAC address to a v-port @@ -19809,124 +20303,6 @@ #define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 -/***********************************/ -/* MC_CMD_EVB_PORT_QUERY - * read some config of v-port. - */ -#define MC_CMD_EVB_PORT_QUERY 0x62 -#undef MC_CMD_0x62_PRIVILEGE_CTG - -#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */ -#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4 -/* The handle of the v-port */ -#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0 -#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4 - -/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */ -#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8 -/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ -#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0 -#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4 -/* The number of VLAN tags that may be used on a v-adaptor connected to this - * EVB port. - */ -#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4 -#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4 - - -/***********************************/ -/* MC_CMD_DUMP_BUFTBL_ENTRIES - * Dump buffer table entries, mainly for command client debug use. Dumps - * absolute entries, and does not use chunk handles. All entries must be in - * range, and used for q page mapping, Although the latter restriction may be - * lifted in future. - */ -#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab -#undef MC_CMD_0xab_PRIVILEGE_CTG - -#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */ -#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8 -/* Index of the first buffer table entry. */ -#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0 -#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4 -/* Number of buffer table entries to dump. */ -#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4 -#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4 - -/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */ -#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 -#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 -#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) -#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12) -/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ -#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 -#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 -#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 -#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21 -#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85 - - -/***********************************/ -/* MC_CMD_SET_RXDP_CONFIG - * Set global RXDP configuration settings - */ -#define MC_CMD_SET_RXDP_CONFIG 0xc1 -#undef MC_CMD_0xc1_PRIVILEGE_CTG - -#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */ -#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4 -#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 -#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4 -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0 -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0 -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1 -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2 -/* enum: pad to 64 bytes */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 -/* enum: pad to 128 bytes (Medford only) */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 -/* enum: pad to 256 bytes (Medford only) */ -#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 - -/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ -#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_GET_RXDP_CONFIG - * Get global RXDP configuration settings - */ -#define MC_CMD_GET_RXDP_CONFIG 0xc2 -#undef MC_CMD_0xc2_PRIVILEGE_CTG - -#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */ -#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0 - -/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */ -#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4 -#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 -#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4 -#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0 -#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 -#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 -#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0 -#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1 -#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2 -/* Enum values, see field(s): */ -/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */ - - /***********************************/ /* MC_CMD_GET_CLOCK * Return the system and PDCPU clock frequencies. @@ -19949,210 +20325,6 @@ #define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4 -/***********************************/ -/* MC_CMD_SET_CLOCK - * Control the system and DPCPU clock frequencies. Changes are lost reboot. - */ -#define MC_CMD_SET_CLOCK 0xad -#undef MC_CMD_0xad_PRIVILEGE_CTG - -#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_SET_CLOCK_IN msgrequest */ -#define MC_CMD_SET_CLOCK_IN_LEN 28 -/* Requested frequency in MHz for system clock domain */ -#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0 -#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4 -/* enum: Leave the system clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0 -/* Requested frequency in MHz for inter-core clock domain */ -#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4 -#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4 -/* enum: Leave the inter-core clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0 -/* Requested frequency in MHz for DPCPU clock domain */ -#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8 -#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4 -/* enum: Leave the DPCPU clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0 -/* Requested frequency in MHz for PCS clock domain */ -#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12 -#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4 -/* enum: Leave the PCS clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0 -/* Requested frequency in MHz for MC clock domain */ -#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16 -#define MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4 -/* enum: Leave the MC clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0 -/* Requested frequency in MHz for rmon clock domain */ -#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20 -#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4 -/* enum: Leave the rmon clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0 -/* Requested frequency in MHz for vswitch clock domain */ -#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24 -#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4 -/* enum: Leave the vswitch clock domain frequency unchanged */ -#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0 - -/* MC_CMD_SET_CLOCK_OUT msgresponse */ -#define MC_CMD_SET_CLOCK_OUT_LEN 28 -/* Resulting system frequency in MHz */ -#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0 -#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4 -/* enum: The system clock domain doesn't exist */ -#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0 -/* Resulting inter-core frequency in MHz */ -#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4 -#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4 -/* enum: The inter-core clock domain doesn't exist / isn't used */ -#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0 -/* Resulting DPCPU frequency in MHz */ -#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8 -#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4 -/* enum: The dpcpu clock domain doesn't exist */ -#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0 -/* Resulting PCS frequency in MHz */ -#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12 -#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4 -/* enum: The PCS clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0 -/* Resulting MC frequency in MHz */ -#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16 -#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4 -/* enum: The MC clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0 -/* Resulting rmon frequency in MHz */ -#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20 -#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4 -/* enum: The rmon clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0 -/* Resulting vswitch frequency in MHz */ -#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24 -#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4 -/* enum: The vswitch clock domain doesn't exist / isn't controlled */ -#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0 - - -/***********************************/ -/* MC_CMD_DPCPU_RPC - * Send an arbitrary DPCPU message. - */ -#define MC_CMD_DPCPU_RPC 0xae -#undef MC_CMD_0xae_PRIVILEGE_CTG - -#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_DPCPU_RPC_IN msgrequest */ -#define MC_CMD_DPCPU_RPC_IN_LEN 36 -#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0 -#define MC_CMD_DPCPU_RPC_IN_CPU_LEN 4 -/* enum: RxDPCPU0 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0 -/* enum: TxDPCPU0 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1 -/* enum: TxDPCPU1 */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2 -/* enum: RxDPCPU1 (Medford only) */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3 -/* enum: RxDPCPU (will be for the calling function; for now, just an alias of - * DPCPU_RX0) - */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80 -/* enum: TxDPCPU (will be for the calling function; for now, just an alias of - * DPCPU_TX0) - */ -#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81 -/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be - * initialised to zero - */ -#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8 -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48 -#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16 -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4 -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64 -#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16 -#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12 -#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24 -/* Register data to write. Only valid in write/write-read. */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4 -/* Register address. */ -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20 -#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4 - -/* MC_CMD_DPCPU_RPC_OUT msgresponse */ -#define MC_CMD_DPCPU_RPC_OUT_LEN 36 -#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0 -#define MC_CMD_DPCPU_RPC_OUT_RC_LEN 4 -/* DATA */ -#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4 -#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32 -#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4 -#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32 -#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16 -#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12 -#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24 -#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4 - - /***********************************/ /* MC_CMD_TRIGGER_INTERRUPT * Trigger an interrupt by prodding the BIU. @@ -20172,66 +20344,6 @@ #define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0 -/***********************************/ -/* MC_CMD_SHMBOOT_OP - * Special operations to support (for now) shmboot. - */ -#define MC_CMD_SHMBOOT_OP 0xe6 -#undef MC_CMD_0xe6_PRIVILEGE_CTG - -#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_SHMBOOT_OP_IN msgrequest */ -#define MC_CMD_SHMBOOT_OP_IN_LEN 4 -/* Identifies the operation to perform */ -#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0 -#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4 -/* enum: Copy slave_data section to the slave core. (Greenport only) */ -#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0 - -/* MC_CMD_SHMBOOT_OP_OUT msgresponse */ -#define MC_CMD_SHMBOOT_OP_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_CAP_BLK_READ - * Read multiple 64bit words from capture block memory - */ -#define MC_CMD_CAP_BLK_READ 0xe7 -#undef MC_CMD_0xe7_PRIVILEGE_CTG - -#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_CAP_BLK_READ_IN msgrequest */ -#define MC_CMD_CAP_BLK_READ_IN_LEN 12 -#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0 -#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4 -#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4 -#define MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4 -#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8 -#define MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4 - -/* MC_CMD_CAP_BLK_READ_OUT msgresponse */ -#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8 -#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248 -#define MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016 -#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num)) -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8) -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LEN 4 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_LBN 0 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_WIDTH 32 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LEN 4 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_LBN 32 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_WIDTH 32 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31 -#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127 - - /***********************************/ /* MC_CMD_DUMP_DO * Take a dump of the DUT state @@ -20379,34 +20491,6 @@ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4 -/***********************************/ -/* MC_CMD_SET_PSU - * Adjusts power supply parameters. This is a warranty-voiding operation. - * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if - * the parameter is out of range. - */ -#define MC_CMD_SET_PSU 0xea -#undef MC_CMD_0xea_PRIVILEGE_CTG - -#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_SET_PSU_IN msgrequest */ -#define MC_CMD_SET_PSU_IN_LEN 12 -#define MC_CMD_SET_PSU_IN_PARAM_OFST 0 -#define MC_CMD_SET_PSU_IN_PARAM_LEN 4 -#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */ -#define MC_CMD_SET_PSU_IN_RAIL_OFST 4 -#define MC_CMD_SET_PSU_IN_RAIL_LEN 4 -#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */ -#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */ -/* desired value, eg voltage in mV */ -#define MC_CMD_SET_PSU_IN_VALUE_OFST 8 -#define MC_CMD_SET_PSU_IN_VALUE_LEN 4 - -/* MC_CMD_SET_PSU_OUT msgresponse */ -#define MC_CMD_SET_PSU_OUT_LEN 0 - - /***********************************/ /* MC_CMD_GET_FUNCTION_INFO * Get function information. PF and VF number. @@ -20448,7 +20532,7 @@ #define MC_CMD_ENABLE_OFFLINE_BIST 0xed #undef MC_CMD_0xed_PRIVILEGE_CTG -#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */ #define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0 @@ -20457,130 +20541,6 @@ #define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0 -/***********************************/ -/* MC_CMD_UART_SEND_DATA - * Send checksummed[sic] block of data over the uart. Response is a placeholder - * should we wish to make this reliable; currently requests are fire-and- - * forget. - */ -#define MC_CMD_UART_SEND_DATA 0xee -#undef MC_CMD_0xee_PRIVILEGE_CTG - -#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_UART_SEND_DATA_OUT msgrequest */ -#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16 -#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252 -#define MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num)) -#define MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1) -/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */ -#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0 -#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4 -/* Offset at which to write the data */ -#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4 -#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4 -/* Length of data */ -#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8 -#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4 -/* Reserved for future use */ -#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12 -#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4 -#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16 -#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1 -#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0 -#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236 -#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004 - -/* MC_CMD_UART_SEND_DATA_IN msgresponse */ -#define MC_CMD_UART_SEND_DATA_IN_LEN 0 - - -/***********************************/ -/* MC_CMD_UART_RECV_DATA - * Request checksummed[sic] block of data over the uart. Only a placeholder, - * subject to change and not currently implemented. - */ -#define MC_CMD_UART_RECV_DATA 0xef -#undef MC_CMD_0xef_PRIVILEGE_CTG - -#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_UART_RECV_DATA_OUT msgrequest */ -#define MC_CMD_UART_RECV_DATA_OUT_LEN 16 -/* CRC32 over OFFSET, LENGTH, RESERVED */ -#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0 -#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4 -/* Offset from which to read the data */ -#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4 -#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4 -/* Length of data */ -#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8 -#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4 -/* Reserved for future use */ -#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12 -#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4 - -/* MC_CMD_UART_RECV_DATA_IN msgresponse */ -#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16 -#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252 -#define MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020 -#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num)) -#define MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1) -/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */ -#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0 -#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4 -/* Offset at which to write the data */ -#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4 -#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4 -/* Length of data */ -#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8 -#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4 -/* Reserved for future use */ -#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12 -#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4 -#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16 -#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1 -#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0 -#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236 -#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004 - - -/***********************************/ -/* MC_CMD_READ_FUSES - * Read data programmed into the device One-Time-Programmable (OTP) Fuses - */ -#define MC_CMD_READ_FUSES 0xf0 -#undef MC_CMD_0xf0_PRIVILEGE_CTG - -#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_READ_FUSES_IN msgrequest */ -#define MC_CMD_READ_FUSES_IN_LEN 8 -/* Offset in OTP to read */ -#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0 -#define MC_CMD_READ_FUSES_IN_OFFSET_LEN 4 -/* Length of data to read in bytes */ -#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4 -#define MC_CMD_READ_FUSES_IN_LENGTH_LEN 4 - -/* MC_CMD_READ_FUSES_OUT msgresponse */ -#define MC_CMD_READ_FUSES_OUT_LENMIN 4 -#define MC_CMD_READ_FUSES_OUT_LENMAX 252 -#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) -#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1) -/* Length of returned OTP data in bytes */ -#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 -#define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4 -/* Returned data */ -#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4 -#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 -#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0 -#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248 -#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016 - - /***********************************/ /* MC_CMD_KR_TUNE * Get or set KR Serdes RXEQ and TX Driver settings @@ -20588,7 +20548,7 @@ #define MC_CMD_KR_TUNE 0xf1 #undef MC_CMD_0xf1_PRIVILEGE_CTG -#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN +#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN_TSA_UNBOUND /* MC_CMD_KR_TUNE_IN msgrequest */ #define MC_CMD_KR_TUNE_IN_LENMIN 4 @@ -21123,274 +21083,18 @@ /* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ /* C(+1) status */ #define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_OFST 8 -#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4 -/* Enum values, see field(s): */ -/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ -/* C(-1) value */ -#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12 -#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4 -/* C(0) value */ -#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16 -#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4 -/* C(+1) status */ -#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20 -#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4 - - -/***********************************/ -/* MC_CMD_PCIE_TUNE - * Get or set PCIE Serdes RXEQ and TX Driver settings - */ -#define MC_CMD_PCIE_TUNE 0xf2 -#undef MC_CMD_0xf2_PRIVILEGE_CTG - -#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_PCIE_TUNE_IN msgrequest */ -#define MC_CMD_PCIE_TUNE_IN_LENMIN 4 -#define MC_CMD_PCIE_TUNE_IN_LENMAX 252 -#define MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020 -#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num)) -#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4) -/* Requested operation */ -#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0 -#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1 -/* enum: Get current RXEQ settings */ -#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0 -/* enum: Override RXEQ settings */ -#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1 -/* enum: Get current TX Driver settings */ -#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2 -/* enum: Override TX Driver settings */ -#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3 -/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */ -#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5 -/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The - * caller should call this command repeatedly after starting eye plot, until no - * more data is returned. - */ -#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6 -/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */ -#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7 -/* Align the arguments to 32 bits */ -#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1 -#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3 -/* Arguments specific to the operation */ -#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4 -#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4 -#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0 -#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62 -#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254 - -/* MC_CMD_PCIE_TUNE_OUT msgresponse */ -#define MC_CMD_PCIE_TUNE_OUT_LEN 0 - -/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4 -/* Requested operation */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1 -/* Align the arguments to 32 bits */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3 - -/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) -/* RXEQ Parameter */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: Attenuation (0-15) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0 -/* enum: CTLE Boost (0-15) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1 -/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2 -/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3 -/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4 -/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 -/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 -/* enum: DFE DLev */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 -/* enum: Figure of Merit */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 -/* enum: CTLE EQ Capacitor (HF Gain) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 -/* enum: CTLE EQ Resistor (DC Gain) */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 - -/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */ -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4) -/* Requested operation */ -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1 -/* Align the arguments to 32 bits */ -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3 -/* RXEQ Parameter */ -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 -/* Enum values, see field(s): */ -/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */ -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5 -/* Enum values, see field(s): */ -/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 -#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 - -/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */ -#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0 - -/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4 -/* Requested operation */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1 -/* Align the arguments to 32 bits */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3 - -/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) -/* RXEQ Parameter */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: TxMargin (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0 -/* enum: TxSwing (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1 -/* enum: De-emphasis coefficient C(-1) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2 -/* enum: De-emphasis coefficient C(0) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 -/* enum: De-emphasis coefficient C(+1) (PIPE) */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4 -/* Enum values, see field(s): */ -/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24 -#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 - -/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */ -#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8 -/* Requested operation */ -#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0 -#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1 -/* Align the arguments to 32 bits */ -#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 -#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 -#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4 -#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4 - -/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */ -#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0 - -/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */ -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4 -/* Requested operation */ -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0 -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1 -/* Align the arguments to 32 bits */ -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1 -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3 - -/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */ -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2) -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 -#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510 - -/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */ -#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0 - -/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */ -#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */ +/* C(-1) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4 +/* C(0) value */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4 +/* C(+1) status */ +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20 +#define MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4 /***********************************/ @@ -21531,56 +21235,6 @@ #define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24 -/***********************************/ -/* MC_CMD_LICENSING_GET_ID_V3 - * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license - * partition - V3 licensing (Medford) - */ -#define MC_CMD_LICENSING_GET_ID_V3 0xd1 -#undef MC_CMD_0xd1_PRIVILEGE_CTG - -#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */ -#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0 - -/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */ -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8 -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252 -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1) -/* type of license (eg 3) */ -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4 -/* length of the license ID (in bytes) */ -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4 -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4 -/* the unique license ID of the adapter */ -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8 -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0 -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244 -#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012 - - -/***********************************/ -/* MC_CMD_MC2MC_PROXY - * Execute an arbitrary MCDI command on the slave MC of a dual-core device. - * This will fail on a single-core system. - */ -#define MC_CMD_MC2MC_PROXY 0xf4 -#undef MC_CMD_0xf4_PRIVILEGE_CTG - -#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_MC2MC_PROXY_IN msgrequest */ -#define MC_CMD_MC2MC_PROXY_IN_LEN 0 - -/* MC_CMD_MC2MC_PROXY_OUT msgresponse */ -#define MC_CMD_MC2MC_PROXY_OUT_LEN 0 - - /***********************************/ /* MC_CMD_GET_LICENSED_APP_STATE * Query the state of an individual licensed application. (Note that the actual @@ -21609,424 +21263,6 @@ #define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1 -/***********************************/ -/* MC_CMD_GET_LICENSED_V3_APP_STATE - * Query the state of an individual licensed application. (Note that the actual - * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE - * operation or a reboot of the MC.) Used for V3 licensing (Medford) - */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2 -#undef MC_CMD_0xd2_PRIVILEGE_CTG - -#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8 -/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit - * mask - */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0 -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8 -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0 -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LEN 4 -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_LBN 0 -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_WIDTH 32 -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4 -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LEN 4 -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_LBN 32 -#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_WIDTH 32 - -/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4 -/* state of this application */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4 -/* enum: no (or invalid) license is present for the application */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 -/* enum: a valid license is present for the application */ -#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 - - -/***********************************/ -/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES - * Query the state of an one or more licensed features. (Note that the actual - * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE - * operation or a reboot of the MC.) Used for V3 licensing (Medford) - */ -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3 -#undef MC_CMD_0xd3_PRIVILEGE_CTG - -#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */ -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8 -/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or - * more bits set - */ -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LEN 4 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_LBN 0 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_WIDTH 32 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LEN 4 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_LBN 32 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_WIDTH 32 - -/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */ -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8 -/* states of these features - bit set for licensed, clear for not licensed */ -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LEN 4 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_LBN 0 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_WIDTH 32 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LEN 4 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_LBN 32 -#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_WIDTH 32 - - -/***********************************/ -/* MC_CMD_LICENSED_APP_OP - * Perform an action for an individual licensed application - not used for V3 - * licensing. - */ -#define MC_CMD_LICENSED_APP_OP 0xf6 -#undef MC_CMD_0xf6_PRIVILEGE_CTG - -#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_LICENSED_APP_OP_IN msgrequest */ -#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8 -#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252 -#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020 -#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) -#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4) -/* application ID */ -#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 -#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4 -/* the type of operation requested */ -#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4 -#define MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4 -/* enum: validate application */ -#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0 -/* enum: mask application */ -#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1 -/* arguments specific to this particular operation */ -#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8 -#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 -#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0 -#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61 -#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253 - -/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */ -#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0 -#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252 -#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num)) -#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4) -/* result specific to this particular operation */ -#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0 -#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4 -#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0 -#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63 -#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255 - -/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */ -#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 -/* application ID */ -#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0 -#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4 -/* the type of operation requested */ -#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4 -#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4 -/* validation challenge */ -#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8 -#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64 - -/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */ -#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68 -/* feature expiry (time_t) */ -#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0 -#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4 -/* validation response */ -#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4 -#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64 - -/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */ -#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12 -/* application ID */ -#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0 -#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4 -/* the type of operation requested */ -#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4 -#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4 -/* flag */ -#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8 -#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4 - -/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */ -#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_LICENSED_V3_VALIDATE_APP - * Perform validation for an individual licensed application - V3 licensing - * (Medford) - */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4 -#undef MC_CMD_0xd4_PRIVILEGE_CTG - -#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56 -/* challenge for validation (384 bits) */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48 -/* application ID expressed as a single bit mask */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LEN 4 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_LBN 384 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_WIDTH 32 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LEN 4 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_LBN 416 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_WIDTH 32 - -/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116 -/* validation response to challenge in the form of ECDSA signature consisting - * of two 384-bit integers, r and s, in big-endian order. The signature signs a - * SHA-384 digest of a message constructed from the concatenation of the input - * message and the remaining fields of this output message, e.g. challenge[48 - * bytes] ... expiry_time[4 bytes] ... - */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96 -/* application expiry time */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4 -/* application expiry units */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4 -/* enum: expiry units are accounting units */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 -/* enum: expiry units are calendar days */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 -/* base MAC address of the NIC stored in NVRAM (note that this is a constant - * value for a given NIC regardless which function is calling, effectively this - * is PF0 base MAC address) - */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6 -/* MAC address of v-adaptor associated with the client. If no such v-adapator - * exists, then the field is filled with 0xFF. - */ -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110 -#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6 - - -/***********************************/ -/* MC_CMD_LICENSED_V3_MASK_FEATURES - * Mask features - V3 licensing (Medford) - */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 -#undef MC_CMD_0xd5_PRIVILEGE_CTG - -#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12 -/* mask to be applied to features to be changed */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0 -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8 -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0 -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LEN 4 -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_LBN 0 -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_WIDTH 32 -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4 -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LEN 4 -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_LBN 32 -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_WIDTH 32 -/* whether to turn on or turn off the masked features */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4 -/* enum: turn the features off */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 -/* enum: turn the features back on */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 - -/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */ -#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_LICENSING_V3_TEMPORARY - * Perform operations to support installation of a single temporary license in - * the adapter, in addition to those found in the licensing partition. See - * SF-116124-SW for an overview of how this could be used. The license is - * stored in MC persistent data and so will survive a MC reboot, but will be - * erased when the adapter is power cycled - */ -#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6 -#undef MC_CMD_0xd6_PRIVILEGE_CTG - -#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */ -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4 -/* operation code */ -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0 -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4 -/* enum: install a new license, overwriting any existing temporary license. - * This is an asynchronous operation owing to the time taken to validate an - * ECDSA license - */ -#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0 -/* enum: clear the license immediately rather than waiting for the next power - * cycle - */ -#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1 -/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET - * operation - */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2 - -/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */ -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164 -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0 -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4 -/* ECDSA license and signature */ -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4 -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160 - -/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */ -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4 -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0 -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4 - -/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */ -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4 -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0 -#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4 - -/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */ -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12 -/* status code */ -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0 -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4 -/* enum: finished validating and installing license */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0 -/* enum: license validation and installation in progress */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1 -/* enum: licensing error. More specific error messages are not provided to - * avoid exposing details of the licensing system to the client - */ -#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2 -/* bitmask of licensed features */ -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4 -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8 -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4 -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LEN 4 -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_LBN 32 -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_WIDTH 32 -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8 -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LEN 4 -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_LBN 64 -#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_WIDTH 32 - - -/***********************************/ -/* MC_CMD_SET_PORT_SNIFF_CONFIG - * Configure RX port sniffing for the physical port associated with the calling - * function. Only a privileged function may change the port sniffing - * configuration. A copy of all traffic delivered to the host (non-promiscuous - * mode) or all traffic arriving at the port (promiscuous mode) may be - * delivered to a specific queue, or a set of queues with RSS. - */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7 -#undef MC_CMD_0xf7_PRIVILEGE_CTG - -#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16 -/* configuration flags */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0 -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0 -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1 -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1 -/* receive queue handle (for RSS mode, this is the base queue) */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 -/* receive mode */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 -/* enum: receive to just the specified queue */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 -/* enum: receive to multiple queues using RSS context */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 -/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note - * that these handles should be considered opaque to the host, although a value - * of 0xFFFFFFFF is guaranteed never to be a valid handle. - */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 -#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 - -/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */ -#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_GET_PORT_SNIFF_CONFIG - * Obtain the current RX port sniffing configuration for the physical port - * associated with the calling function. Only a privileged function may read - * the configuration. - */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8 -#undef MC_CMD_0xf8_PRIVILEGE_CTG - -#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0 - -/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16 -/* configuration flags */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0 -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0 -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1 -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1 -/* receiving queue handle (for RSS mode, this is the base queue) */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 -/* receive mode */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 -/* enum: receiving to just the specified queue */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 -/* enum: receiving to multiple queues using RSS context */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 -/* RSS context (for RX_MODE_RSS) */ -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 -#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 - - /***********************************/ /* MC_CMD_SET_PARSER_DISP_CONFIG * Change configuration related to the parser-dispatcher subsystem. @@ -22072,305 +21308,6 @@ #define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0 -/***********************************/ -/* MC_CMD_GET_PARSER_DISP_CONFIG - * Read configuration related to the parser-dispatcher subsystem. - */ -#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa -#undef MC_CMD_0xfa_PRIVILEGE_CTG - -#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */ -#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8 -/* the type of configuration setting to read */ -#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 -#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 -/* Enum values, see field(s): */ -/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */ -/* handle for the entity to query: queue handle, EVB port ID, etc. depending on - * the type of configuration setting being read - */ -#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4 -#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4 - -/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ -#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 -#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252 -#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num)) -#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4) -/* current value: the details depend on the type of configuration setting being - * read - */ -#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0 -#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4 -#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1 -#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63 -#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255 - - -/***********************************/ -/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG - * Configure TX port sniffing for the physical port associated with the calling - * function. Only a privileged function may change the port sniffing - * configuration. A copy of all traffic transmitted through the port may be - * delivered to a specific queue, or a set of queues with RSS. Note that these - * packets are delivered with transmit timestamps in the packet prefix, not - * receive timestamps, so it is likely that the queue(s) will need to be - * dedicated as TX sniff receivers. - */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb -#undef MC_CMD_0xfb_PRIVILEGE_CTG - -#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16 -/* configuration flags */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0 -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 -/* receive queue handle (for RSS mode, this is the base queue) */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4 -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4 -/* receive mode */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8 -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4 -/* enum: receive to just the specified queue */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0 -/* enum: receive to multiple queues using RSS context */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1 -/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note - * that these handles should be considered opaque to the host, although a value - * of 0xFFFFFFFF is guaranteed never to be a valid handle. - */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12 -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4 - -/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ -#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG - * Obtain the current TX port sniffing configuration for the physical port - * associated with the calling function. Only a privileged function may read - * the configuration. - */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc -#undef MC_CMD_0xfc_PRIVILEGE_CTG - -#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0 - -/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16 -/* configuration flags */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0 -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 -/* receiving queue handle (for RSS mode, this is the base queue) */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4 -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4 -/* receive mode */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8 -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4 -/* enum: receiving to just the specified queue */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0 -/* enum: receiving to multiple queues using RSS context */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1 -/* RSS context (for RX_MODE_RSS) */ -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12 -#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4 - - -/***********************************/ -/* MC_CMD_RMON_STATS_RX_ERRORS - * Per queue rx error stats. - */ -#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe -#undef MC_CMD_0xfe_PRIVILEGE_CTG - -#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */ -#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8 -/* The rx queue to get stats for. */ -#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0 -#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4 -#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 -#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4 -#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4 -#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 -#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 - -/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */ -#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16 -#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0 -#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4 -#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4 -#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4 -#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8 -#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4 -#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12 -#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4 - - -/***********************************/ -/* MC_CMD_GET_PCIE_RESOURCE_INFO - * Find out about available PCIE resources - */ -#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd -#undef MC_CMD_0xfd_PRIVILEGE_CTG - -#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */ -#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0 - -/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */ -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28 -/* The maximum number of PFs the device can expose */ -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0 -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4 -/* The maximum number of VFs the device can expose in total */ -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4 -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4 -/* The maximum number of MSI-X vectors the device can provide in total */ -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8 -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4 -/* the number of MSI-X vectors the device will allocate by default to each PF - */ -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12 -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4 -/* the number of MSI-X vectors the device will allocate by default to each VF - */ -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16 -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4 -/* the maximum number of MSI-X vectors the device can allocate to any one PF */ -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20 -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4 -/* the maximum number of MSI-X vectors the device can allocate to any one VF */ -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24 -#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4 - - -/***********************************/ -/* MC_CMD_GET_PORT_MODES - * Find out about available port modes - */ -#define MC_CMD_GET_PORT_MODES 0xff -#undef MC_CMD_0xff_PRIVILEGE_CTG - -#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_PORT_MODES_IN msgrequest */ -#define MC_CMD_GET_PORT_MODES_IN_LEN 0 - -/* MC_CMD_GET_PORT_MODES_OUT msgresponse */ -#define MC_CMD_GET_PORT_MODES_OUT_LEN 12 -/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) - * that are supported for customer use in production firmware. - */ -#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 -#define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4 -/* Default (canonical) board mode */ -#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4 -#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4 -/* Current board mode */ -#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 -#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4 - -/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */ -#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16 -/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) - * that are supported for customer use in production firmware. - */ -#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0 -#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4 -/* Default (canonical) board mode */ -#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4 -#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4 -/* Current board mode */ -#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8 -#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4 -/* Bitmask of engineering port modes available on the board (indexed by - * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that - * contains all modes implemented in firmware for a particular board. Modes - * listed in MODES are considered production modes and should be exposed in - * userland tools. Modes listed in ENGINEERING_MODES, but not in MODES - * should be considered hidden (not to be exposed in userland tools) and for - * engineering use only. There are no other semantic differences and any mode - * listed in either MODES or ENGINEERING_MODES can be set on the board. - */ -#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12 -#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4 - - -/***********************************/ -/* MC_CMD_OVERRIDE_PORT_MODE - * Override flash config port mode for subsequent MC reboot(s). Override data - * is stored in the presistent data section of DMEM and activated on next MC - * warm reboot. A cold reboot resets the override. It is assumed that a - * sufficient number of PFs are available and that port mapping is valid for - * the new port mode, as the override does not affect PF configuration. - */ -#define MC_CMD_OVERRIDE_PORT_MODE 0x137 -#undef MC_CMD_0x137_PRIVILEGE_CTG - -#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */ -#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8 -#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0 -#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4 -#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0 -#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0 -#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1 -/* New mode (TLV_PORT_MODE_*) to set, if override enabled */ -#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4 -#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4 - -/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */ -#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_READ_ATB - * Sample voltages on the ATB - */ -#define MC_CMD_READ_ATB 0x100 -#undef MC_CMD_0x100_PRIVILEGE_CTG - -#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_READ_ATB_IN msgrequest */ -#define MC_CMD_READ_ATB_IN_LEN 16 -#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0 -#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4 -#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */ -#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */ -#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */ -#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4 -#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4 -#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8 -#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4 -#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12 -#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4 - -/* MC_CMD_READ_ATB_OUT msgresponse */ -#define MC_CMD_READ_ATB_OUT_LEN 4 -#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0 -#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4 - - /***********************************/ /* MC_CMD_GET_WORKAROUNDS * Read the list of all implemented and all currently enabled workarounds. The @@ -22538,447 +21475,6 @@ #define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0 #define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4 - -/***********************************/ -/* MC_CMD_GET_SNAPSHOT_LENGTH - * Obtain the current range of allowable values for the SNAPSHOT_LENGTH - * parameter to MC_CMD_INIT_RXQ. - */ -#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 -#undef MC_CMD_0x101_PRIVILEGE_CTG - -#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */ -#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0 - -/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */ -#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8 -/* Minimum acceptable snapshot length. */ -#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0 -#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4 -/* Maximum acceptable snapshot length. */ -#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4 -#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4 - - -/***********************************/ -/* MC_CMD_FUSE_DIAGS - * Additional fuse diagnostics - */ -#define MC_CMD_FUSE_DIAGS 0x102 -#undef MC_CMD_0x102_PRIVILEGE_CTG - -#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_FUSE_DIAGS_IN msgrequest */ -#define MC_CMD_FUSE_DIAGS_IN_LEN 0 - -/* MC_CMD_FUSE_DIAGS_OUT msgresponse */ -#define MC_CMD_FUSE_DIAGS_OUT_LEN 48 -/* Total number of mismatched bits between pairs in area 0 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0 -#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4 -/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4 -#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4 -/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8 -#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4 -/* Checksum of data after logical OR of pairs in area 0 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12 -#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4 -/* Total number of mismatched bits between pairs in area 1 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16 -#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4 -/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20 -#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4 -/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24 -#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4 -/* Checksum of data after logical OR of pairs in area 1 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28 -#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4 -/* Total number of mismatched bits between pairs in area 2 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32 -#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4 -/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36 -#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4 -/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40 -#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4 -/* Checksum of data after logical OR of pairs in area 2 */ -#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44 -#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4 - - -/***********************************/ -/* MC_CMD_PRIVILEGE_MODIFY - * Modify the privileges of a set of PCIe functions. Note that this operation - * only effects non-admin functions unless the admin privilege itself is - * included in one of the masks provided. - */ -#define MC_CMD_PRIVILEGE_MODIFY 0x60 -#undef MC_CMD_0x60_PRIVILEGE_CTG - -#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16 -/* The groups of functions to have their privilege masks modified. */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0 -#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4 -#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */ -/* For VFS_OF_PF specify the PF, for ONE specify the target function */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 -#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4 -#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4 -#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 -#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 -#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4 -#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 -#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16 -/* Privileges to be added to the target functions. For privilege definitions - * refer to the command MC_CMD_PRIVILEGE_MASK - */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8 -#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4 -/* Privileges to be removed from the target functions. For privilege - * definitions refer to the command MC_CMD_PRIVILEGE_MASK - */ -#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12 -#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4 - -/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */ -#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_XPM_READ_BYTES - * Read XPM memory - */ -#define MC_CMD_XPM_READ_BYTES 0x103 -#undef MC_CMD_0x103_PRIVILEGE_CTG - -#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_XPM_READ_BYTES_IN msgrequest */ -#define MC_CMD_XPM_READ_BYTES_IN_LEN 8 -/* Start address (byte) */ -#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0 -#define MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4 -/* Count (bytes) */ -#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4 -#define MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4 - -/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ -#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 -#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252 -#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num)) -#define MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1) -/* Data */ -#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0 -#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1 -#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0 -#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252 -#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020 - - -/***********************************/ -/* MC_CMD_XPM_WRITE_BYTES - * Write XPM memory - */ -#define MC_CMD_XPM_WRITE_BYTES 0x104 -#undef MC_CMD_0x104_PRIVILEGE_CTG - -#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ -#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 -#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252 -#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020 -#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) -#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1) -/* Start address (byte) */ -#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 -#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4 -/* Count (bytes) */ -#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4 -#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4 -/* Data */ -#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8 -#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 -#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0 -#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244 -#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012 - -/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */ -#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_XPM_READ_SECTOR - * Read XPM sector - */ -#define MC_CMD_XPM_READ_SECTOR 0x105 -#undef MC_CMD_0x105_PRIVILEGE_CTG - -#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */ -#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8 -/* Sector index */ -#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0 -#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4 -/* Sector size */ -#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4 -#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4 - -/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ -#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 -#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36 -#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36 -#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) -#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1) -/* Sector type */ -#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 -#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4 -#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */ -#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */ -/* Sector data */ -#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4 -#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 -#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0 -#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32 -#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32 - - -/***********************************/ -/* MC_CMD_XPM_WRITE_SECTOR - * Write XPM sector - */ -#define MC_CMD_XPM_WRITE_SECTOR 0x106 -#undef MC_CMD_0x106_PRIVILEGE_CTG - -#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ -#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 -#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44 -#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44 -#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num)) -#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1) -/* If writing fails due to an uncorrectable error, try up to RETRIES following - * sectors (or until no more space available). If 0, only one write attempt is - * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair - * mechanism. - */ -#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0 -#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1 -#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1 -#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3 -/* Sector type */ -#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 -#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4 -/* Enum values, see field(s): */ -/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ -/* Sector size */ -#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 -#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4 -/* Sector data */ -#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12 -#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 -#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0 -#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32 -#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32 - -/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */ -#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 -/* New sector index */ -#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0 -#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4 - - -/***********************************/ -/* MC_CMD_XPM_INVALIDATE_SECTOR - * Invalidate XPM sector - */ -#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 -#undef MC_CMD_0x107_PRIVILEGE_CTG - -#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */ -#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4 -/* Sector index */ -#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0 -#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4 - -/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */ -#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_XPM_BLANK_CHECK - * Blank-check XPM memory and report bad locations - */ -#define MC_CMD_XPM_BLANK_CHECK 0x108 -#undef MC_CMD_0x108_PRIVILEGE_CTG - -#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */ -#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8 -/* Start address (byte) */ -#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0 -#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4 -/* Count (bytes) */ -#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4 -#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4 - -/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ -#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 -#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252 -#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) -#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2) -/* Total number of bad (non-blank) locations */ -#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 -#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4 -/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit - * into MCDI response) - */ -#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4 -#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2 -#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0 -#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124 -#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508 - - -/***********************************/ -/* MC_CMD_XPM_REPAIR - * Blank-check and repair XPM memory - */ -#define MC_CMD_XPM_REPAIR 0x109 -#undef MC_CMD_0x109_PRIVILEGE_CTG - -#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_XPM_REPAIR_IN msgrequest */ -#define MC_CMD_XPM_REPAIR_IN_LEN 8 -/* Start address (byte) */ -#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0 -#define MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4 -/* Count (bytes) */ -#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4 -#define MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4 - -/* MC_CMD_XPM_REPAIR_OUT msgresponse */ -#define MC_CMD_XPM_REPAIR_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_XPM_DECODER_TEST - * Test XPM memory address decoders for gross manufacturing defects. Can only - * be performed on an unprogrammed part. - */ -#define MC_CMD_XPM_DECODER_TEST 0x10a -#undef MC_CMD_0x10a_PRIVILEGE_CTG - -#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */ -#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0 - -/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */ -#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_XPM_WRITE_TEST - * XPM memory write test. Test XPM write logic for gross manufacturing defects - * by writing to a dedicated test row. There are 16 locations in the test row - * and the test can only be performed on locations that have not been - * previously used (i.e. can be run at most 16 times). The test will pick the - * first available location to use, or fail with ENOSPC if none left. - */ -#define MC_CMD_XPM_WRITE_TEST 0x10b -#undef MC_CMD_0x10b_PRIVILEGE_CTG - -#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */ -#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0 - -/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */ -#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_EXEC_SIGNED - * Check the CMAC of the contents of IMEM and DMEM against the value supplied - * and if correct begin execution from the start of IMEM. The caller supplies a - * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC - * computation runs from the start of IMEM, and from the start of DMEM + 16k, - * to match flash booting. The command will respond with EINVAL if the CMAC - * does match, otherwise it will respond with success before it jumps to IMEM. - */ -#define MC_CMD_EXEC_SIGNED 0x10c -#undef MC_CMD_0x10c_PRIVILEGE_CTG - -#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_EXEC_SIGNED_IN msgrequest */ -#define MC_CMD_EXEC_SIGNED_IN_LEN 28 -/* the length of code to include in the CMAC */ -#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0 -#define MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4 -/* the length of date to include in the CMAC */ -#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4 -#define MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4 -/* the XPM sector containing the key to use */ -#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8 -#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4 -/* the expected CMAC value */ -#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12 -#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16 - -/* MC_CMD_EXEC_SIGNED_OUT msgresponse */ -#define MC_CMD_EXEC_SIGNED_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_PREPARE_SIGNED - * Prepare to upload a signed image. This will scrub the specified length of - * the data region, which must be at least as large as the DATALEN supplied to - * MC_CMD_EXEC_SIGNED. - */ -#define MC_CMD_PREPARE_SIGNED 0x10d -#undef MC_CMD_0x10d_PRIVILEGE_CTG - -#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_PREPARE_SIGNED_IN msgrequest */ -#define MC_CMD_PREPARE_SIGNED_IN_LEN 4 -/* the length of data area to clear */ -#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0 -#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4 - -/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */ -#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0 - - /* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */ #define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4 /* UDP port (the standard ports are named below but any port may be used) */ @@ -23048,110 +21544,6 @@ #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 -/***********************************/ -/* MC_CMD_RX_BALANCING - * Configure a port upconverter to distribute the packets on both RX engines. - * Packets are distributed based on a table with the destination vFIFO. The - * index of the table is a hash of source and destination of IPV4 and VLAN - * priority. - */ -#define MC_CMD_RX_BALANCING 0x118 -#undef MC_CMD_0x118_PRIVILEGE_CTG - -#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_RX_BALANCING_IN msgrequest */ -#define MC_CMD_RX_BALANCING_IN_LEN 16 -/* The RX port whose upconverter table will be modified */ -#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0 -#define MC_CMD_RX_BALANCING_IN_PORT_LEN 4 -/* The VLAN priority associated to the table index and vFIFO */ -#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4 -#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4 -/* The resulting bit of SRC^DST for indexing the table */ -#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8 -#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4 -/* The RX engine to which the vFIFO in the table entry will point to */ -#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12 -#define MC_CMD_RX_BALANCING_IN_ENG_LEN 4 - -/* MC_CMD_RX_BALANCING_OUT msgresponse */ -#define MC_CMD_RX_BALANCING_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_NVRAM_PRIVATE_APPEND - * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST - * if the tag is already present. - */ -#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c -#undef MC_CMD_0x11c_PRIVILEGE_CTG - -#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */ -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9 -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252 -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020 -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num)) -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1) -/* The tag to be appended */ -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0 -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4 -/* The length of the data */ -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4 -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4 -/* The data to be contained in the TLV structure */ -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8 -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1 -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1 -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244 -#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012 - -/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */ -#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_XPM_VERIFY_CONTENTS - * Verify that the contents of the XPM memory is correct (Medford only). This - * is used during manufacture to check that the XPM memory has been programmed - * correctly at ATE. - */ -#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b -#undef MC_CMD_0x11b_PRIVILEGE_CTG - -#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN - -/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */ -#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4 -/* Data type to be checked */ -#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0 -#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4 - -/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */ -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12 -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252 -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num)) -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1) -/* Number of sectors found (test builds only) */ -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0 -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4 -/* Number of bytes found (test builds only) */ -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4 -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4 -/* Length of signature */ -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8 -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4 -/* Signature */ -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12 -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1 -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0 -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240 -#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008 - - /***********************************/ /* MC_CMD_SET_EVQ_TMR * Update the timer load, timer reload and timer mode values for a given EVQ. @@ -23242,595 +21634,25 @@ #define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4 /* For timers updated using the bug35388 workaround, this is the time interval * (in nanoseconds) for each increment of the timer load/reload count. The - * requested duration of a timer is this value multiplied by the timer - * load/reload count. This field is only meaningful if the bug35388 workaround - * is enabled. - */ -#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24 -#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4 -/* For timers updated using the bug35388 workaround, this is the maximum value - * allowed for timer load/reload counts. This field is only meaningful if the - * bug35388 workaround is enabled. - */ -#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28 -#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4 -/* For timers updated using the bug35388 workaround, timer load/reload counts - * not a multiple of this step size will be rounded in an implementation - * defined manner. This field is only meaningful if the bug35388 workaround is - * enabled. - */ -#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32 -#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4 - - -/***********************************/ -/* MC_CMD_ALLOCATE_TX_VFIFO_CP - * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the - * non used switch buffers. - */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d -#undef MC_CMD_0x11d_PRIVILEGE_CTG - -#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20 -/* Desired instance. Must be set to a specific instance, which is a function - * local queue index. The calling client must be the currently-assigned user of - * this VI (see MC_CMD_SET_VI_USER). - */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0 -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4 -/* Will the common pool be used as TX_vFIFO_ULL (1) */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4 -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4 -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */ -/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0 -/* Number of buffers to reserve for the common pool */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8 -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4 -/* TX datapath to which the Common Pool is connected to. */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12 -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4 -/* enum: Extracts information from function */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 -/* Network port or RX Engine to which the common pool connects. */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16 -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4 -/* enum: Extracts information from function */ -/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */ -/* enum: To enable Switch loopback with Rx engine 0 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4 -/* enum: To enable Switch loopback with Rx engine 1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5 - -/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4 -/* ID of the common pool allocated */ -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0 -#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4 - - -/***********************************/ -/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO - * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the - * previously allocated common pools. - */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e -#undef MC_CMD_0x11e_PRIVILEGE_CTG - -#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20 -/* Common pool previously allocated to which the new vFIFO will be associated - */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4 -/* Port or RX engine to associate the vFIFO egress */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4 -/* enum: Extracts information from common pool */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */ -/* enum: To enable Switch loopback with Rx engine 0 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4 -/* enum: To enable Switch loopback with Rx engine 1 */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5 -/* Minimum number of buffers that the pool must have */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4 -/* enum: Do not check the space available */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0 -/* Will the vFIFO be used as TX_vFIFO_ULL */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4 -/* Network priority of the vFIFO,if applicable */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4 -/* enum: Search for the lowest unused priority */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1 - -/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8 -/* Short vFIFO ID */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4 -/* Network priority of the vFIFO */ -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4 -#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4 - - -/***********************************/ -/* MC_CMD_TEARDOWN_TX_VFIFO_VF - * This interface clears the configuration of the given vFIFO and leaves it - * ready to be re-used. - */ -#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f -#undef MC_CMD_0x11f_PRIVILEGE_CTG - -#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */ -#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4 -/* Short vFIFO ID */ -#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0 -#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4 - -/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */ -#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_DEALLOCATE_TX_VFIFO_CP - * This interface clears the configuration of the given common pool and leaves - * it ready to be re-used. - */ -#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121 -#undef MC_CMD_0x121_PRIVILEGE_CTG - -#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */ -#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4 -/* Common pool ID given when pool allocated */ -#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0 -#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4 - -/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */ -#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS - * This interface allows the host to find out how many common pool buffers are - * not yet assigned. - */ -#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124 -#undef MC_CMD_0x124_PRIVILEGE_CTG - -#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */ -#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0 - -/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */ -#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8 -/* Available buffers for the ENG to NET vFIFOs. */ -#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0 -#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4 -/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */ -#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4 -#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4 - - -/***********************************/ -/* MC_CMD_SUC_VERSION - * Get the version of the SUC - */ -#define MC_CMD_SUC_VERSION 0x134 -#undef MC_CMD_0x134_PRIVILEGE_CTG - -#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_SUC_VERSION_IN msgrequest */ -#define MC_CMD_SUC_VERSION_IN_LEN 0 - -/* MC_CMD_SUC_VERSION_OUT msgresponse */ -#define MC_CMD_SUC_VERSION_OUT_LEN 24 -/* The SUC firmware version as four numbers - a.b.c.d */ -#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0 -#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4 -#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4 -/* The date, in seconds since the Unix epoch, when the firmware image was - * built. - */ -#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16 -#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4 -/* The ID of the SUC chip. This is specific to the platform but typically - * indicates family, memory sizes etc. See SF-116728-SW for further details. - */ -#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20 -#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4 - -/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot - * loader. - */ -#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4 -#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0 -#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4 -/* enum: Requests the SUC boot version. */ -#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b - -/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */ -#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4 -/* The SUC boot version */ -#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0 -#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4 - - -/***********************************/ -/* MC_CMD_GET_RX_PREFIX_ID - * This command is part of the mechanism for configuring the format of the RX - * packet prefix. It takes as input a bitmask of the fields the host would like - * to be in the prefix. If the hardware supports RX prefixes with that - * combination of fields, then this command returns a list of prefix-ids, - * opaque identifiers suitable for use in the RX_PREFIX_ID field of a - * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not - * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids - * due to resource constraints, returns ENOSPC. - */ -#define MC_CMD_GET_RX_PREFIX_ID 0x13b -#undef MC_CMD_0x13b_PRIVILEGE_CTG - -#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */ -#define MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8 -/* Field bitmask. */ -#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8 -#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LEN 4 -#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_LBN 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_WIDTH 32 -#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4 -#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LEN 4 -#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_LBN 32 -#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_WIDTH 32 -#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2 -#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3 -#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4 -#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5 -#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6 -#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_LBN 7 -#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_MPORT_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7 -#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8 -#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9 -#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_LBN 10 -#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIPPED_WIDTH 1 -#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_LBN 11 -#define MC_CMD_GET_RX_PREFIX_ID_IN_VSWITCH_STATUS_WIDTH 1 - -/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */ -#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8 -#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252 -#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num)) -#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4) -/* Number of prefix-ids returned */ -#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0 -#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4 -/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or - * MC_CMD_QUERY_PREFIX_ID - */ -#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4 -#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4 -#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1 -#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62 -#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254 - -/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix - * field - */ -#define RX_PREFIX_FIELD_INFO_LEN 4 -/* The offset of the field from the start of the prefix, in bits */ -#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0 -#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2 -#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0 -#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16 -/* The width of the field, in bits */ -#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2 -#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1 -#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16 -#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8 -/* The type of the field. These enum values are in the same order as the fields - * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask - */ -#define RX_PREFIX_FIELD_INFO_TYPE_OFST 3 -#define RX_PREFIX_FIELD_INFO_TYPE_LEN 1 -#define RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */ -#define RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */ -#define RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */ -#define RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */ -#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */ -#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */ -#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */ -#define RX_PREFIX_FIELD_INFO_INGRESS_MPORT 0x7 /* enum */ -#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */ -#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */ -#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */ -#define RX_PREFIX_FIELD_INFO_VLAN_STRIPPED 0xa /* enum */ -#define RX_PREFIX_FIELD_INFO_VSWITCH_STATUS 0xb /* enum */ -#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24 -#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8 - -/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in - * which every field has a fixed offset and width - */ -#define RX_PREFIX_FIXED_RESPONSE_LENMIN 4 -#define RX_PREFIX_FIXED_RESPONSE_LENMAX 252 -#define RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020 -#define RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num)) -#define RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4) -/* Length of the RX prefix in bytes */ -#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0 -#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1 -#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0 -#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8 -/* Number of fields present in the prefix */ -#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1 -#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1 -#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8 -#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8 -#define RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2 -#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2 -#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16 -#define RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16 -/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */ -#define RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4 -#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4 -#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0 -#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62 -#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254 -#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32 -#define RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32 - - -/***********************************/ -/* MC_CMD_QUERY_RX_PREFIX_ID - * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID) - * and returns a description of the RX prefix of packets delievered to an RXQ - * created with that prefix id - */ -#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c -#undef MC_CMD_0x13c_PRIVILEGE_CTG - -#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */ -#define MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4 -/* Prefix id to query */ -#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0 -#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4 - -/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */ -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4 -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252 -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num)) -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1) -/* An enum describing the structure of this response. */ -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0 -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1 -/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */ -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0 -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1 -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3 -/* The response. Its format is as defined by the RESPONSE_TYPE value */ -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4 -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1 -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0 -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248 -#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016 - - -/***********************************/ -/* MC_CMD_BUNDLE - * A command to perform various bundle-related operations on insecure cards. - */ -#define MC_CMD_BUNDLE 0x13d -#undef MC_CMD_0x13d_PRIVILEGE_CTG - -#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE - -/* MC_CMD_BUNDLE_IN msgrequest */ -#define MC_CMD_BUNDLE_IN_LEN 4 -/* Sub-command code */ -#define MC_CMD_BUNDLE_IN_OP_OFST 0 -#define MC_CMD_BUNDLE_IN_OP_LEN 4 -/* enum: Get the current host access mode set on component partitions. */ -#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0 -/* enum: Set the host access mode set on component partitions. */ -#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1 - -/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current - * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and - * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On - * secure adapters, this command returns MC_CMD_ERR_EPERM. - */ -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4 -/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */ -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0 -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4 - -/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access - * control mode. - */ -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4 -/* Access mode of component partitions. */ -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0 -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4 -/* enum: Component partitions are read-only from the host. */ -#define MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0 -/* enum: Component partitions can read read-from written-to by the host. */ -#define MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1 - -/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component - * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as - * read-only on firmware built with bundle support. This command marks these - * partitions as read/writeable. The access status set by this command does not - * persist across MC reboots. This command only works on engineering (insecure) - * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM. - */ -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8 -/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */ -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0 -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4 -/* Access mode of component partitions. */ -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4 -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4 -/* Enum values, see field(s): */ -/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */ - -/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */ -#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_GET_VPD - * Read all VPD starting from a given address + * requested duration of a timer is this value multiplied by the timer + * load/reload count. This field is only meaningful if the bug35388 workaround + * is enabled. */ -#define MC_CMD_GET_VPD 0x165 -#undef MC_CMD_0x165_PRIVILEGE_CTG - -#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_VPD_IN msgresponse */ -#define MC_CMD_GET_VPD_IN_LEN 4 -/* VPD address to start from. In case VPD is longer than MCDI buffer - * (unlikely), user can make multiple calls with different starting addresses. +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4 +/* For timers updated using the bug35388 workaround, this is the maximum value + * allowed for timer load/reload counts. This field is only meaningful if the + * bug35388 workaround is enabled. */ -#define MC_CMD_GET_VPD_IN_ADDR_OFST 0 -#define MC_CMD_GET_VPD_IN_ADDR_LEN 4 - -/* MC_CMD_GET_VPD_OUT msgresponse */ -#define MC_CMD_GET_VPD_OUT_LENMIN 0 -#define MC_CMD_GET_VPD_OUT_LENMAX 252 -#define MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num)) -#define MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1) -/* VPD data returned. */ -#define MC_CMD_GET_VPD_OUT_DATA_OFST 0 -#define MC_CMD_GET_VPD_OUT_DATA_LEN 1 -#define MC_CMD_GET_VPD_OUT_DATA_MINNUM 0 -#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252 -#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020 - - -/***********************************/ -/* MC_CMD_GET_NCSI_INFO - * Provide information about the NC-SI stack - */ -#define MC_CMD_GET_NCSI_INFO 0x167 -#undef MC_CMD_0x167_PRIVILEGE_CTG - -#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_NCSI_INFO_IN msgrequest */ -#define MC_CMD_GET_NCSI_INFO_IN_LEN 8 -/* Operation to be performed */ -#define MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0 -#define MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4 -/* enum: Information on the link settings. */ -#define MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0 -/* enum: Statistics associated with the channel */ -#define MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1 -/* The NC-SI channel on which the operation is to be performed */ -#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4 -#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4 - -/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */ -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12 -/* Settings as received from BMC. */ -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4 -/* Advertised capabilities applied to channel. */ -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4 -/* General status */ -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4 -#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1 - -/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */ -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28 -/* The number of NC-SI commands received. */ -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0 -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4 -/* The number of NC-SI commands dropped. */ -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4 -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4 -/* The number of invalid NC-SI commands received. */ -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8 -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4 -/* The number of checksum errors seen. */ -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12 -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4 -/* The number of NC-SI requests received. */ -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16 -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4 -/* The number of NC-SI responses sent (includes AENs) */ -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20 -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4 -/* The number of NC-SI AENs sent */ -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24 -#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4 +/* For timers updated using the bug35388 workaround, timer load/reload counts + * not a multiple of this step size will be rounded in an implementation + * defined manner. This field is only meaningful if the bug35388 workaround is + * enabled. + */ +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32 +#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4 /* CLIENT_HANDLE structuredef: A client is an abstract entity that can make * requests of the device and that can own resources managed by the device. @@ -23848,59 +21670,10 @@ #define CLIENT_HANDLE_OPAQUE_LBN 0 #define CLIENT_HANDLE_OPAQUE_WIDTH 32 -/* CLOCK_INFO structuredef: Information about a single hardware clock */ -#define CLOCK_INFO_LEN 28 -/* Enumeration that uniquely identifies the clock */ -#define CLOCK_INFO_CLOCK_ID_OFST 0 -#define CLOCK_INFO_CLOCK_ID_LEN 2 -/* enum: The Riverhead CMC (card MC) */ -#define CLOCK_INFO_CLOCK_CMC 0x0 -/* enum: The Riverhead NMC (network MC) */ -#define CLOCK_INFO_CLOCK_NMC 0x1 -/* enum: The Riverhead SDNET slice main logic */ -#define CLOCK_INFO_CLOCK_SDNET 0x2 -/* enum: The Riverhead SDNET LUT */ -#define CLOCK_INFO_CLOCK_SDNET_LUT 0x3 -/* enum: The Riverhead SDNET control logic */ -#define CLOCK_INFO_CLOCK_SDNET_CTRL 0x4 -/* enum: The Riverhead Streaming SubSystem */ -#define CLOCK_INFO_CLOCK_SSS 0x5 -/* enum: The Riverhead network MAC and associated CSR registers */ -#define CLOCK_INFO_CLOCK_MAC 0x6 -#define CLOCK_INFO_CLOCK_ID_LBN 0 -#define CLOCK_INFO_CLOCK_ID_WIDTH 16 -/* Assorted flags */ -#define CLOCK_INFO_FLAGS_OFST 2 -#define CLOCK_INFO_FLAGS_LEN 2 -#define CLOCK_INFO_SETTABLE_OFST 2 -#define CLOCK_INFO_SETTABLE_LBN 0 -#define CLOCK_INFO_SETTABLE_WIDTH 1 -#define CLOCK_INFO_FLAGS_LBN 16 -#define CLOCK_INFO_FLAGS_WIDTH 16 -/* The frequency in HZ */ -#define CLOCK_INFO_FREQUENCY_OFST 4 -#define CLOCK_INFO_FREQUENCY_LEN 8 -#define CLOCK_INFO_FREQUENCY_LO_OFST 4 -#define CLOCK_INFO_FREQUENCY_LO_LEN 4 -#define CLOCK_INFO_FREQUENCY_LO_LBN 32 -#define CLOCK_INFO_FREQUENCY_LO_WIDTH 32 -#define CLOCK_INFO_FREQUENCY_HI_OFST 8 -#define CLOCK_INFO_FREQUENCY_HI_LEN 4 -#define CLOCK_INFO_FREQUENCY_HI_LBN 64 -#define CLOCK_INFO_FREQUENCY_HI_WIDTH 32 -#define CLOCK_INFO_FREQUENCY_LBN 32 -#define CLOCK_INFO_FREQUENCY_WIDTH 64 -/* Human-readable ASCII name for clock, with NUL termination */ -#define CLOCK_INFO_NAME_OFST 12 -#define CLOCK_INFO_NAME_LEN 1 -#define CLOCK_INFO_NAME_NUM 16 -#define CLOCK_INFO_NAME_LBN 96 -#define CLOCK_INFO_NAME_WIDTH 8 - /* SCHED_CREDIT_CHECK_RESULT structuredef */ #define SCHED_CREDIT_CHECK_RESULT_LEN 16 -/* The instance of the scheduler. Refer to XN-200389-AW for the location of - * these schedulers in the hardware. +/* The instance of the scheduler. Refer to XN-200389-AW (snic/hnic) and + * XN-200425-TC (cdx) for the location of these schedulers in the hardware. */ #define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_OFST 0 #define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LEN 1 @@ -23914,6 +21687,16 @@ #define SCHED_CREDIT_CHECK_RESULT_DMAC_H2C 0x7 /* enum */ #define SCHED_CREDIT_CHECK_RESULT_HUB_NET_B 0x8 /* enum */ #define SCHED_CREDIT_CHECK_RESULT_HUB_NET_REPLAY 0x9 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_ADAPTER_C2H_C 0xa /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_A2_H2C_C 0xb /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_A3_SOFT_ADAPTOR_C 0xc /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_A4_DPU_WRITE_C 0xd /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_JRC_RRU 0xe /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_CDM_SINK 0xf /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_PCIE_SINK 0x10 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_UPORT_SINK 0x11 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_PSX_SINK 0x12 /* enum */ +#define SCHED_CREDIT_CHECK_RESULT_A5_DPU_READ_C 0x13 /* enum */ #define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_LBN 0 #define SCHED_CREDIT_CHECK_RESULT_SCHED_INSTANCE_WIDTH 8 /* The type of node that this result refers to. */ @@ -23923,6 +21706,10 @@ #define SCHED_CREDIT_CHECK_RESULT_DEST 0x0 /* enum: Source node */ #define SCHED_CREDIT_CHECK_RESULT_SOURCE 0x1 +/* enum: Destination node credit type 1 (new to the Keystone schedulers, see + * SF-120268-TC) + */ +#define SCHED_CREDIT_CHECK_RESULT_DEST_CREDIT1 0x2 #define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_LBN 8 #define SCHED_CREDIT_CHECK_RESULT_NODE_TYPE_WIDTH 8 /* Level of node in scheduler hierarchy (level 0 is the bottom of the @@ -23949,592 +21736,6 @@ #define SCHED_CREDIT_CHECK_RESULT_ACTUAL_CREDITS_WIDTH 32 -/***********************************/ -/* MC_CMD_GET_CLOCKS_INFO - * Get information about the device clocks - */ -#define MC_CMD_GET_CLOCKS_INFO 0x166 -#undef MC_CMD_0x166_PRIVILEGE_CTG - -#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */ -#define MC_CMD_GET_CLOCKS_INFO_IN_LEN 0 - -/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */ -#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0 -#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252 -#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008 -#define MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num)) -#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28) -/* An array of CLOCK_INFO structures. */ -#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0 -#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28 -#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0 -#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9 -#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36 - - -/***********************************/ -/* MC_CMD_VNIC_ENCAP_RULE_ADD - * Add a rule for detecting encapsulations in the VNIC stage. Currently this - * only affects checksum validation in VNIC RX - on TX the send descriptor - * explicitly specifies encapsulation. These rules are per-VNIC, i.e. only - * apply to the current driver. If a rule matches, then the packet is - * considered to have the corresponding encapsulation type, and the inner - * packet is parsed. It is up to the driver to ensure that overlapping rules - * are not inserted. (If a packet would match multiple rules, a random one of - * them will be used.) A rule with the exact same match criteria may not be - * inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are - * supported, use MC_CMD_GET_PARSER_DISP_INFO with OP - * OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported - * combinations. Each driver may only have a limited set of active rules - - * returns ENOSPC if the caller's table is full. - */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d -#undef MC_CMD_0x16d_PRIVILEGE_CTG - -#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36 -/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4 -/* Any non-zero bits other than the ones named below or an unsupported - * combination will cause the NIC to return EOPNOTSUPP. In the future more - * flags may be added. - */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1 -/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order. - * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used. - */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2 -/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order. - * (Deprecated) - */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12 -/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12 -/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the - * case of IPv4, the IP should be in the first 4 bytes and all other bytes - * should be zero. - */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16 -/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1 -/* Actions that should be applied to packets match the rule. */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_OFST 29 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_LBN 1 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_RSS_ON_OUTER_WIDTH 1 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_OFST 29 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_LBN 2 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STEER_ON_OUTER_WIDTH 1 -/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2 -/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4 - -/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4 -/* Handle to inserted rule. Used for removing the rule. */ -#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0 -#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4 - - -/***********************************/ -/* MC_CMD_VNIC_ENCAP_RULE_REMOVE - * Remove a VNIC encapsulation rule. Packets which would have previously - * matched the rule will then be considered as unencapsulated. Returns EALREADY - * if the input HANDLE doesn't correspond to an existing rule. - */ -#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e -#undef MC_CMD_0x16e_PRIVILEGE_CTG - -#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */ -#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4 -/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */ -#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0 -#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4 - -/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */ -#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0 - -/* UUID structuredef: An RFC4122 standard UUID. The values here are stored in - * the endianness specified by the RFC; users should ignore the broken-out - * fields and instead do straight memory copies to ensure correct ordering. - */ -#define UUID_LEN 16 -#define UUID_TIME_LOW_OFST 0 -#define UUID_TIME_LOW_LEN 4 -#define UUID_TIME_LOW_LBN 0 -#define UUID_TIME_LOW_WIDTH 32 -#define UUID_TIME_MID_OFST 4 -#define UUID_TIME_MID_LEN 2 -#define UUID_TIME_MID_LBN 32 -#define UUID_TIME_MID_WIDTH 16 -#define UUID_TIME_HI_LBN 52 -#define UUID_TIME_HI_WIDTH 12 -#define UUID_VERSION_LBN 48 -#define UUID_VERSION_WIDTH 4 -#define UUID_RESERVED_LBN 64 -#define UUID_RESERVED_WIDTH 2 -#define UUID_CLK_SEQ_LBN 66 -#define UUID_CLK_SEQ_WIDTH 14 -#define UUID_NODE_OFST 10 -#define UUID_NODE_LEN 6 -#define UUID_NODE_LBN 80 -#define UUID_NODE_WIDTH 48 - - -/***********************************/ -/* MC_CMD_PLUGIN_ALLOC - * Create a handle to a datapath plugin's extension. This involves finding a - * currently-loaded plugin offering the given functionality (as identified by - * the UUID) and allocating a handle to track the usage of it. Plugin - * functionality is identified by 'extension' rather than any other identifier - * so that a single plugin bitfile may offer more than one piece of independent - * functionality. If two bitfiles are loaded which both offer the same - * extension, then the metadata is interrogated further to determine which is - * the newest and that is the one opened. See SF-123625-SW for architectural - * detail on datapath plugins. - */ -#define MC_CMD_PLUGIN_ALLOC 0x1ad -#undef MC_CMD_0x1ad_PRIVILEGE_CTG - -#define MC_CMD_0x1ad_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_PLUGIN_ALLOC_IN msgrequest */ -#define MC_CMD_PLUGIN_ALLOC_IN_LEN 24 -/* The functionality requested of the plugin, as a UUID structure */ -#define MC_CMD_PLUGIN_ALLOC_IN_UUID_OFST 0 -#define MC_CMD_PLUGIN_ALLOC_IN_UUID_LEN 16 -/* Additional options for opening the handle */ -#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_OFST 16 -#define MC_CMD_PLUGIN_ALLOC_IN_FLAGS_LEN 4 -#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_OFST 16 -#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_LBN 0 -#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_INFO_ONLY_WIDTH 1 -#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_OFST 16 -#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_LBN 1 -#define MC_CMD_PLUGIN_ALLOC_IN_FLAG_ALLOW_DISABLED_WIDTH 1 -/* Load the extension only if it is in the specified administrative group. - * Specify ANY to load the extension wherever it is found (if there are - * multiple choices then the extension with the highest MINOR_VER/PATCH_VER - * will be loaded). See MC_CMD_PLUGIN_GET_META_GLOBAL for a description of - * administrative groups. - */ -#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_OFST 20 -#define MC_CMD_PLUGIN_ALLOC_IN_ADMIN_GROUP_LEN 2 -/* enum: Load the extension from any ADMIN_GROUP. */ -#define MC_CMD_PLUGIN_ALLOC_IN_ANY 0xffff -/* Reserved */ -#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_OFST 22 -#define MC_CMD_PLUGIN_ALLOC_IN_RESERVED_LEN 2 - -/* MC_CMD_PLUGIN_ALLOC_OUT msgresponse */ -#define MC_CMD_PLUGIN_ALLOC_OUT_LEN 4 -/* Unique identifier of this usage */ -#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_OFST 0 -#define MC_CMD_PLUGIN_ALLOC_OUT_HANDLE_LEN 4 - - -/***********************************/ -/* MC_CMD_PLUGIN_FREE - * Delete a handle to a plugin's extension. - */ -#define MC_CMD_PLUGIN_FREE 0x1ae -#undef MC_CMD_0x1ae_PRIVILEGE_CTG - -#define MC_CMD_0x1ae_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_PLUGIN_FREE_IN msgrequest */ -#define MC_CMD_PLUGIN_FREE_IN_LEN 4 -/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */ -#define MC_CMD_PLUGIN_FREE_IN_HANDLE_OFST 0 -#define MC_CMD_PLUGIN_FREE_IN_HANDLE_LEN 4 - -/* MC_CMD_PLUGIN_FREE_OUT msgresponse */ -#define MC_CMD_PLUGIN_FREE_OUT_LEN 0 - - -/***********************************/ -/* MC_CMD_PLUGIN_GET_META_GLOBAL - * Returns the global metadata applying to the whole plugin extension. See the - * other metadata calls for subtypes of data. - */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL 0x1af -#undef MC_CMD_0x1af_PRIVILEGE_CTG - -#define MC_CMD_0x1af_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_PLUGIN_GET_META_GLOBAL_IN msgrequest */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_LEN 4 -/* Handle returned by MC_CMD_PLUGIN_ALLOC_OUT */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_OFST 0 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_IN_HANDLE_LEN 4 - -/* MC_CMD_PLUGIN_GET_META_GLOBAL_OUT msgresponse */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_LEN 36 -/* Unique identifier of this plugin extension. This is identical to the value - * which was requested when the handle was allocated. - */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_OFST 0 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_UUID_LEN 16 -/* semver sub-version of this plugin extension */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_OFST 16 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MINOR_VER_LEN 2 -/* semver micro-version of this plugin extension */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_OFST 18 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_PATCH_VER_LEN 2 -/* Number of different messages which can be sent to this extension */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_OFST 20 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_NUM_MSGS_LEN 4 -/* Byte offset within the VI window of the plugin's mapped CSR window. */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_OFST 24 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_OFFSET_LEN 2 -/* Number of bytes mapped through to the plugin's CSRs. 0 if that feature was - * not requested by the plugin (in which case MAPPED_CSR_OFFSET and - * MAPPED_CSR_FLAGS are ignored). - */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_OFST 26 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_SIZE_LEN 2 -/* Flags indicating how to perform the CSR window mapping. */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_OFST 28 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAGS_LEN 4 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_OFST 28 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_LBN 0 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_READ_WIDTH 1 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_OFST 28 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_LBN 1 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_MAPPED_CSR_FLAG_WRITE_WIDTH 1 -/* Identifier of the set of extensions which all change state together. - * Extensions having the same ADMIN_GROUP will always load and unload at the - * same time. ADMIN_GROUP values themselves are arbitrary (but they contain a - * generation number as an implementation detail to ensure that they're not - * reused rapidly). - */ -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_OFST 32 -#define MC_CMD_PLUGIN_GET_META_GLOBAL_OUT_ADMIN_GROUP_LEN 1 -/* Bitshift in MC_CMD_DEVEL_CLIENT_PRIVILEGE_MODIFY's MASK parameters - * corresponding to this extension, i.e. set the bit 1<= 1, TCI value to be inserted as outermost VLAN. */ #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_OFST 4 #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_VLAN0_TCI_BE_LEN 2 @@ -28291,19 +24449,23 @@ #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_OFST 20 #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_DELIVER_LEN 4 /* Allows an action set to trigger several counter updates. Set to - * COUNTER_LIST_ID_NULL to request no counter action. + * MAE_COUNTER_ID_NULL to request no counter action. */ #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_OFST 24 #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_LIST_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ /* If a driver only wished to update one counter within this action set, then * it can supply a COUNTER_ID instead of allocating a single-element counter * list. The ID must have been allocated with COUNTER_TYPE=AR. This field - * should be set to COUNTER_ID_NULL if this behaviour is not required. It is - * not valid to supply a non-NULL value for both COUNTER_LIST_ID and + * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It + * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and * COUNTER_ID. */ #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_OFST 28 #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_COUNTER_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_OFST 32 #define MC_CMD_MAE_ACTION_SET_ALLOC_IN_MARK_VALUE_LEN 4 /* Set to MAC_ID_NULL to request no source MAC replacement. */ @@ -28347,6 +24509,24 @@ #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_OFST 0 #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_LBN 14 #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_LBN 15 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_C_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_LBN 16 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_D_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_LBN 18 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DO_SET_NET_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_LBN 19 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_PLUGIN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_LBN 20 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_LACP_INC_L4_WIDTH 1 /* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */ #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_OFST 4 #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_VLAN0_TCI_BE_LEN 2 @@ -28372,19 +24552,23 @@ #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_OFST 20 #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_DELIVER_LEN 4 /* Allows an action set to trigger several counter updates. Set to - * COUNTER_LIST_ID_NULL to request no counter action. + * MAE_COUNTER_ID_NULL to request no counter action. */ #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_OFST 24 #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_LIST_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ /* If a driver only wished to update one counter within this action set, then * it can supply a COUNTER_ID instead of allocating a single-element counter * list. The ID must have been allocated with COUNTER_TYPE=AR. This field - * should be set to COUNTER_ID_NULL if this behaviour is not required. It is - * not valid to supply a non-NULL value for both COUNTER_LIST_ID and + * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It + * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and * COUNTER_ID. */ #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_OFST 28 #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_COUNTER_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_OFST 32 #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_MARK_VALUE_LEN 4 /* Set to MAC_ID_NULL to request no source MAC replacement. */ @@ -28437,6 +24621,172 @@ #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_LBN 6 #define MC_CMD_MAE_ACTION_SET_ALLOC_V2_IN_ECN_ECT_1_TO_CE_WIDTH 1 +/* MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN msgrequest: Only supported if + * MAE_ACTION_SET_ALLOC_V3_SUPPORTED is advertised in + * MC_CMD_GET_CAPABILITIES_V10_OUT. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LEN 53 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAGS_LEN 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_PUSH_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_LBN 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN_POP_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_LBN 8 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DECAP_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_LBN 9 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_LBN 10 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_FLAG_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_LBN 11 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_NAT_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_LBN 12 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DECR_IP_TTL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_LBN 13 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_SRC_MPORT_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_LBN 14 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SUPPRESS_SELF_DELIVERY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_LBN 15 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_C_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_LBN 16 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_D_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_LBN 17 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_RDP_OUT_HOST_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_LBN 18 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_SET_NET_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_LBN 19 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_PLUGIN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_OFST 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_LBN 20 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_LACP_INC_L4_WIDTH 1 +/* If VLAN_PUSH >= 1, TCI value to be inserted as outermost VLAN. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_OFST 4 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_TCI_BE_LEN 2 +/* If VLAN_PUSH >= 1, TPID value to be inserted as outermost VLAN. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_OFST 6 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN0_PROTO_BE_LEN 2 +/* If VLAN_PUSH == 2, inner TCI value to be inserted. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_OFST 8 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_TCI_BE_LEN 2 +/* If VLAN_PUSH == 2, inner TPID value to be inserted. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_OFST 10 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_VLAN1_PROTO_BE_LEN 2 +/* Reserved. Ignored by firmware. Should be set to zero or 0xffffffff. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_OFST 12 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RSVD_LEN 4 +/* Set to ENCAP_HEADER_ID_NULL to request no encap action */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_OFST 16 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ENCAP_HEADER_ID_LEN 4 +/* An m-port selector identifying the m-port that the modified packet should be + * delivered to. Set to MPORT_SELECTOR_NULL to request no delivery of the + * packet. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_OFST 20 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DELIVER_LEN 4 +/* Allows an action set to trigger several counter updates. Set to + * MAE_COUNTER_ID_NULL to request no counter action. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_OFST 24 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_LIST_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ +/* If a driver only wished to update one counter within this action set, then + * it can supply a COUNTER_ID instead of allocating a single-element counter + * list. The ID must have been allocated with COUNTER_TYPE=AR. This field + * should be set to MAE_COUNTER_ID_NULL if this behaviour is not required. It + * is not valid to supply a non-NULL value for both COUNTER_LIST_ID and + * COUNTER_ID. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_OFST 28 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_COUNTER_ID_LEN 4 +/* Enum values, see field(s): */ +/* MAE_COUNTER_ID */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_OFST 32 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_MARK_VALUE_LEN 4 +/* Set to MAC_ID_NULL to request no source MAC replacement. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_OFST 36 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_SRC_MAC_ID_LEN 4 +/* Set to MAC_ID_NULL to request no destination MAC replacement. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_OFST 40 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DST_MAC_ID_LEN 4 +/* Source m-port ID to be reported for DO_SET_SRC_MPORT action. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_OFST 44 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_REPORTED_SRC_MPORT_LEN 4 +/* Actions for modifying the Differentiated Services Code-Point (DSCP) bits + * within IPv4 and IPv6 headers. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_CONTROL_LEN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_ENCAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_LBN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_DSCP_DECAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_LBN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_DSCP_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_OFST 48 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_LBN 3 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DSCP_VALUE_WIDTH 6 +/* Actions for modifying the Explicit Congestion Notification (ECN) bits within + * IPv4 and IPv6 headers. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_CONTROL_LEN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_ENCAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_LBN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_ECN_DECAP_COPY_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_LBN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_DO_REPLACE_ECN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_LBN 3 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_VALUE_WIDTH 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_LBN 5 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_0_TO_CE_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_OFST 50 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_LBN 6 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_ECN_ECT_1_TO_CE_WIDTH 1 +/* Actions for overwriting CH_ROUTE subfields. */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_OFST 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OVERWRITE_LEN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_OFST 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_LBN 0 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_C_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_OFST 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_LBN 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_D_PL_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_OFST 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_LBN 2 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_PL_CHAN_WIDTH 1 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_OFST 51 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_LBN 3 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_RDP_OUT_HOST_CHAN_WIDTH 1 +/* Override outgoing CH_VC to network port for DO_SET_NET_CHAN action. Cannot + * be used in conjunction with DO_SET_SRC_MPORT action. + */ +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_OFST 52 +#define MC_CMD_MAE_ACTION_SET_ALLOC_V3_IN_NET_CHAN_LEN 1 + /* MC_CMD_MAE_ACTION_SET_ALLOC_OUT msgresponse */ #define MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN 4 /* The MSB of the AS_ID is guaranteed to be clear if the ID is not @@ -28680,58 +25030,6 @@ #define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM 32 #define MC_CMD_MAE_OUTER_RULE_REMOVE_OUT_REMOVED_OR_ID_MAXNUM_MCDI2 32 - -/***********************************/ -/* MC_CMD_MAE_OUTER_RULE_UPDATE - * Atomically change the response of an Outer Rule. - */ -#define MC_CMD_MAE_OUTER_RULE_UPDATE 0x17d -#undef MC_CMD_0x17d_PRIVILEGE_CTG - -#define MC_CMD_0x17d_PRIVILEGE_CTG SRIOV_CTG_MAE - -/* MC_CMD_MAE_OUTER_RULE_UPDATE_IN msgrequest */ -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_LEN 16 -/* ID of outer rule to update */ -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_OFST 0 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_OR_ID_LEN 4 -/* Packets matching the rule will be parsed with this encapsulation. */ -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_OFST 4 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ENCAP_TYPE_LEN 4 -/* Enum values, see field(s): */ -/* MAE_MCDI_ENCAP_TYPE */ -/* This field controls the actions that are performed when a rule is hit. */ -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_OFST 8 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_ACTION_CONTROL_LEN 4 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_OFST 8 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_LBN 0 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_CT_WIDTH 1 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_OFST 8 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_LBN 1 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_VNI_MODE_WIDTH 2 -/* Enum values, see field(s): */ -/* MAE_CT_VNI_MODE */ -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_OFST 8 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_LBN 3 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_DO_COUNT_WIDTH 1 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_OFST 8 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_LBN 4 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_TCP_FLAGS_INHIBIT_WIDTH 1 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_OFST 8 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_LBN 8 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_RECIRC_ID_WIDTH 8 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_OFST 8 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_LBN 16 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_CT_DOMAIN_WIDTH 16 -/* ID of counter to increment when the rule is hit. Only used if the DO_COUNT - * flag is set. The ID must have been allocated with COUNTER_TYPE=OR. - */ -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_OFST 12 -#define MC_CMD_MAE_OUTER_RULE_UPDATE_IN_COUNTER_ID_LEN 4 - -/* MC_CMD_MAE_OUTER_RULE_UPDATE_OUT msgresponse */ -#define MC_CMD_MAE_OUTER_RULE_UPDATE_OUT_LEN 0 - /* MAE_ACTION_RULE_RESPONSE structuredef */ #define MAE_ACTION_RULE_RESPONSE_LEN 16 #define MAE_ACTION_RULE_RESPONSE_ASL_ID_OFST 0 @@ -29122,142 +25420,6 @@ #define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_LBN 352 #define MAE_MPORT_DESC_VNIC_PLUGIN_TBD_WIDTH 32 -/* MAE_MPORT_DESC_V2 structuredef */ -#define MAE_MPORT_DESC_V2_LEN 56 -#define MAE_MPORT_DESC_V2_MPORT_ID_OFST 0 -#define MAE_MPORT_DESC_V2_MPORT_ID_LEN 4 -#define MAE_MPORT_DESC_V2_MPORT_ID_LBN 0 -#define MAE_MPORT_DESC_V2_MPORT_ID_WIDTH 32 -/* Reserved for future purposes, contains information independent of caller */ -#define MAE_MPORT_DESC_V2_FLAGS_OFST 4 -#define MAE_MPORT_DESC_V2_FLAGS_LEN 4 -#define MAE_MPORT_DESC_V2_FLAGS_LBN 32 -#define MAE_MPORT_DESC_V2_FLAGS_WIDTH 32 -#define MAE_MPORT_DESC_V2_CALLER_FLAGS_OFST 8 -#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LEN 4 -#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_OFST 8 -#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_LBN 0 -#define MAE_MPORT_DESC_V2_CAN_RECEIVE_ON_WIDTH 1 -#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_OFST 8 -#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_LBN 1 -#define MAE_MPORT_DESC_V2_CAN_DELIVER_TO_WIDTH 1 -#define MAE_MPORT_DESC_V2_CAN_DELETE_OFST 8 -#define MAE_MPORT_DESC_V2_CAN_DELETE_LBN 2 -#define MAE_MPORT_DESC_V2_CAN_DELETE_WIDTH 1 -#define MAE_MPORT_DESC_V2_IS_ZOMBIE_OFST 8 -#define MAE_MPORT_DESC_V2_IS_ZOMBIE_LBN 3 -#define MAE_MPORT_DESC_V2_IS_ZOMBIE_WIDTH 1 -#define MAE_MPORT_DESC_V2_CALLER_FLAGS_LBN 64 -#define MAE_MPORT_DESC_V2_CALLER_FLAGS_WIDTH 32 -/* Not the ideal name; it's really the type of thing connected to the m-port */ -#define MAE_MPORT_DESC_V2_MPORT_TYPE_OFST 12 -#define MAE_MPORT_DESC_V2_MPORT_TYPE_LEN 4 -/* enum: Connected to a MAC... */ -#define MAE_MPORT_DESC_V2_MPORT_TYPE_NET_PORT 0x0 -/* enum: Adds metadata and delivers to another m-port */ -#define MAE_MPORT_DESC_V2_MPORT_TYPE_ALIAS 0x1 -/* enum: Connected to a VNIC. */ -#define MAE_MPORT_DESC_V2_MPORT_TYPE_VNIC 0x2 -#define MAE_MPORT_DESC_V2_MPORT_TYPE_LBN 96 -#define MAE_MPORT_DESC_V2_MPORT_TYPE_WIDTH 32 -/* 128-bit value available to drivers for m-port identification. */ -#define MAE_MPORT_DESC_V2_UUID_OFST 16 -#define MAE_MPORT_DESC_V2_UUID_LEN 16 -#define MAE_MPORT_DESC_V2_UUID_LBN 128 -#define MAE_MPORT_DESC_V2_UUID_WIDTH 128 -/* Big wadge of space reserved for other common properties */ -#define MAE_MPORT_DESC_V2_RESERVED_OFST 32 -#define MAE_MPORT_DESC_V2_RESERVED_LEN 8 -#define MAE_MPORT_DESC_V2_RESERVED_LO_OFST 32 -#define MAE_MPORT_DESC_V2_RESERVED_LO_LEN 4 -#define MAE_MPORT_DESC_V2_RESERVED_LO_LBN 256 -#define MAE_MPORT_DESC_V2_RESERVED_LO_WIDTH 32 -#define MAE_MPORT_DESC_V2_RESERVED_HI_OFST 36 -#define MAE_MPORT_DESC_V2_RESERVED_HI_LEN 4 -#define MAE_MPORT_DESC_V2_RESERVED_HI_LBN 288 -#define MAE_MPORT_DESC_V2_RESERVED_HI_WIDTH 32 -#define MAE_MPORT_DESC_V2_RESERVED_LBN 256 -#define MAE_MPORT_DESC_V2_RESERVED_WIDTH 64 -/* Logical port index. Only valid when type NET Port. */ -#define MAE_MPORT_DESC_V2_NET_PORT_IDX_OFST 40 -#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LEN 4 -#define MAE_MPORT_DESC_V2_NET_PORT_IDX_LBN 320 -#define MAE_MPORT_DESC_V2_NET_PORT_IDX_WIDTH 32 -/* The m-port delivered to */ -#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_OFST 40 -#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LEN 4 -#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_LBN 320 -#define MAE_MPORT_DESC_V2_ALIAS_DELIVER_MPORT_ID_WIDTH 32 -/* The type of thing that owns the VNIC */ -#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_OFST 40 -#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LEN 4 -#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_FUNCTION 0x1 /* enum */ -#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_PLUGIN 0x2 /* enum */ -#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_LBN 320 -#define MAE_MPORT_DESC_V2_VNIC_CLIENT_TYPE_WIDTH 32 -/* The PCIe interface on which the function lives. CJK: We need an enumeration - * of interfaces that we extend as new interface (types) appear. This belongs - * elsewhere and should be referenced from here - */ -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_OFST 44 -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LEN 4 -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_LBN 352 -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_INTERFACE_WIDTH 32 -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_OFST 48 -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LEN 2 -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_LBN 384 -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_PF_IDX_WIDTH 16 -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_OFST 50 -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LEN 2 -/* enum: Indicates that the function is a PF */ -#define MAE_MPORT_DESC_V2_VF_IDX_NULL 0xffff -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_LBN 400 -#define MAE_MPORT_DESC_V2_VNIC_FUNCTION_VF_IDX_WIDTH 16 -/* Reserved. Should be ignored for now. */ -#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_OFST 44 -#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LEN 4 -#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_LBN 352 -#define MAE_MPORT_DESC_V2_VNIC_PLUGIN_TBD_WIDTH 32 -/* A client handle for the VNIC's owner. Only valid for type VNIC. */ -#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_OFST 52 -#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LEN 4 -#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_LBN 416 -#define MAE_MPORT_DESC_V2_VNIC_CLIENT_HANDLE_WIDTH 32 - - -/***********************************/ -/* MC_CMD_MAE_MPORT_ENUMERATE - * Deprecated in favour of MAE_MPORT_READ_JOURNAL. Support for this command - * will be removed at some future point. - */ -#define MC_CMD_MAE_MPORT_ENUMERATE 0x17c -#undef MC_CMD_0x17c_PRIVILEGE_CTG - -#define MC_CMD_0x17c_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_MAE_MPORT_ENUMERATE_IN msgrequest */ -#define MC_CMD_MAE_MPORT_ENUMERATE_IN_LEN 0 - -/* MC_CMD_MAE_MPORT_ENUMERATE_OUT msgresponse */ -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMIN 8 -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX 252 -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LENMAX_MCDI2 1020 -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_LEN(num) (8+1*(num)) -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_NUM(len) (((len)-8)/1) -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_OFST 0 -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_COUNT_LEN 4 -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_OFST 4 -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_SIZEOF_MPORT_DESC_LEN 4 -/* Any array of MAE_MPORT_DESC structures. The MAE_MPORT_DESC structure may - * grow in future version of this command. Drivers should use a stride of - * SIZEOF_MPORT_DESC. Fields beyond SIZEOF_MPORT_DESC are not present. - */ -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_OFST 8 -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_LEN 1 -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MINNUM 0 -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM 244 -#define MC_CMD_MAE_MPORT_ENUMERATE_OUT_MPORT_DESC_DATA_MAXNUM_MCDI2 1012 - /***********************************/ /* MC_CMD_MAE_MPORT_READ_JOURNAL @@ -29569,73 +25731,6 @@ #define MC_CMD_TABLE_INSERT_OUT_LEN 0 -/***********************************/ -/* MC_CMD_TABLE_UPDATE - * Update an existing entry in a table with a new response value. May return - * EINVAL for unknown table ID or other bad request parameters, ENOENT if the - * entry does not already exist, or EPERM if the operation is not permitted. In - * case of an error, the additional MCDI error argument field returns the raw - * error code from the underlying CAM driver. - */ -#define MC_CMD_TABLE_UPDATE 0x1ce -#undef MC_CMD_0x1ce_PRIVILEGE_CTG - -#define MC_CMD_0x1ce_PRIVILEGE_CTG SRIOV_CTG_GENERAL - -/* MC_CMD_TABLE_UPDATE_IN msgrequest */ -#define MC_CMD_TABLE_UPDATE_IN_LENMIN 16 -#define MC_CMD_TABLE_UPDATE_IN_LENMAX 252 -#define MC_CMD_TABLE_UPDATE_IN_LENMAX_MCDI2 1020 -#define MC_CMD_TABLE_UPDATE_IN_LEN(num) (12+4*(num)) -#define MC_CMD_TABLE_UPDATE_IN_DATA_NUM(len) (((len)-12)/4) -/* Table identifier. */ -#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_OFST 0 -#define MC_CMD_TABLE_UPDATE_IN_TABLE_ID_LEN 4 -/* Enum values, see field(s): */ -/* TABLE_ID */ -/* Width in bits of supplied key data (must match table properties). */ -#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_OFST 4 -#define MC_CMD_TABLE_UPDATE_IN_KEY_WIDTH_LEN 2 -/* Width in bits of supplied mask data (0 for direct/BCAM tables, or for STCAM - * when allocated MASK_ID is used instead). - */ -#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_OFST 6 -#define MC_CMD_TABLE_UPDATE_IN_MASK_WIDTH_LEN 2 -/* Width in bits of supplied response data (for INSERT and UPDATE operations - * this must match the table properties; for DELETE operations, no response - * data is required and this must be 0). - */ -#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_OFST 8 -#define MC_CMD_TABLE_UPDATE_IN_RESP_WIDTH_LEN 2 -/* Mask ID for STCAM table - used instead of mask data if the table descriptor - * reports ALLOC_MASKS==1. Otherwise set to 0. - */ -#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_OFST 6 -#define MC_CMD_TABLE_UPDATE_IN_MASK_ID_LEN 2 -/* Priority for TCAM or STCAM, in range 0..N_PRIORITIES-1, otherwise 0. */ -#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_OFST 8 -#define MC_CMD_TABLE_UPDATE_IN_PRIORITY_LEN 2 -/* (32-bit alignment padding - set to 0) */ -#define MC_CMD_TABLE_UPDATE_IN_RESERVED_OFST 10 -#define MC_CMD_TABLE_UPDATE_IN_RESERVED_LEN 2 -/* Sequence of key, mask (if MASK_WIDTH > 0), and response (if RESP_WIDTH > 0) - * data values. Each of these items is logically treated as a single wide N-bit - * value, in which the individual fields have been placed within that value per - * the LBN and WIDTH information from the table field descriptors. The wide - * N-bit value is padded with 0 bits at the MSB end if necessary to make a - * multiple of 32 bits. The value is then packed into this command as a - * sequence of 32-bit words, bits [31:0] first, then bits [63:32], etc. - */ -#define MC_CMD_TABLE_UPDATE_IN_DATA_OFST 12 -#define MC_CMD_TABLE_UPDATE_IN_DATA_LEN 4 -#define MC_CMD_TABLE_UPDATE_IN_DATA_MINNUM 1 -#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM 60 -#define MC_CMD_TABLE_UPDATE_IN_DATA_MAXNUM_MCDI2 252 - -/* MC_CMD_TABLE_UPDATE_OUT msgresponse */ -#define MC_CMD_TABLE_UPDATE_OUT_LEN 0 - - /***********************************/ /* MC_CMD_TABLE_DELETE * Delete an existing entry in a table. May return EINVAL for unknown table ID @@ -29702,5 +25797,124 @@ /* MC_CMD_TABLE_DELETE_OUT msgresponse */ #define MC_CMD_TABLE_DELETE_OUT_LEN 0 +/* MC_CMD_QUEUE_HANDLE structuredef: On X4, to distinguish between full- + * featured (X2-style) VIs and low-latency (X3-style) queues, we use the top + * bits of the queue handle to specify the queue type in all MCDI calls which + * refer to VIs/queues. These bits should be masked off when indexing into a + * queue in the BAR. + */ +#define MC_CMD_QUEUE_HANDLE_LEN 4 +/* Combined queue number and type. This is the ID returned by and passed into + * MCDI calls that use queues. + */ +#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_OFST 0 +#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LEN 4 +#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_OFST 0 +#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_LBN 0 +#define MC_CMD_QUEUE_HANDLE_QUEUE_NUM_WIDTH 24 +#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_OFST 0 +#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LBN 24 +#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_WIDTH 8 +/* enum: Indicates that the queue instance is a full-featured VI */ +#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_FF_VI 0x0 +/* enum: Indicates that the queue instance is a LL TXQ */ +#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_TXQ 0x1 +/* enum: Indicates that the queue instance is a LL RXQ */ +#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_RXQ 0x2 +/* enum: Indicates that the queue instance is a LL EVQ */ +#define MC_CMD_QUEUE_HANDLE_QUEUE_TYPE_LL_EVQ 0x3 +#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_LBN 0 +#define MC_CMD_QUEUE_HANDLE_QUEUE_HANDLE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_ALLOC_LL_QUEUES + * Allocate low latency (X3-style) queues for current PCI function. Can be + * called more than once if desired to allocate more queues. + */ +#define MC_CMD_ALLOC_LL_QUEUES 0x1dd +#undef MC_CMD_0x1dd_PRIVILEGE_CTG + +#define MC_CMD_0x1dd_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_ALLOC_LL_QUEUES_IN msgrequest */ +#define MC_CMD_ALLOC_LL_QUEUES_IN_LEN 24 +/* The minimum number of TXQs that is acceptable */ +#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_OFST 0 +#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_TXQ_COUNT_LEN 4 +/* The maximum number of TXQs that would be useful */ +#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_OFST 4 +#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_TXQ_COUNT_LEN 4 +/* The minimum number of RXQs that is acceptable */ +#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_OFST 8 +#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_RXQ_COUNT_LEN 4 +/* The maximum number of RXQs that would be useful */ +#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_OFST 12 +#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_RXQ_COUNT_LEN 4 +/* The minimum number of EVQs that is acceptable */ +#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_OFST 16 +#define MC_CMD_ALLOC_LL_QUEUES_IN_MIN_EVQ_COUNT_LEN 4 +/* The maximum number of EVQs that would be useful */ +#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_OFST 20 +#define MC_CMD_ALLOC_LL_QUEUES_IN_MAX_EVQ_COUNT_LEN 4 + +/* MC_CMD_ALLOC_LL_QUEUES_OUT msgresponse */ +#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMIN 16 +#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX 252 +#define MC_CMD_ALLOC_LL_QUEUES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_ALLOC_LL_QUEUES_OUT_LEN(num) (12+4*(num)) +#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_NUM(len) (((len)-12)/4) +/* The number of TXQs allocated in this request */ +#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_OFST 0 +#define MC_CMD_ALLOC_LL_QUEUES_OUT_TXQ_COUNT_LEN 4 +/* The number of RXQs allocated in this request */ +#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_OFST 4 +#define MC_CMD_ALLOC_LL_QUEUES_OUT_RXQ_COUNT_LEN 4 +/* The number of EVQs allocated in this request */ +#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_OFST 8 +#define MC_CMD_ALLOC_LL_QUEUES_OUT_EVQ_COUNT_LEN 4 +/* A list of allocated queues, returned as MC_CMD_QUEUE_HANDLEs, not + * necessarily contiguous. TXQs are first in the list, followed by RXQs then + * EVQs. The type of each queue is indicated by the top bits (see the + * QUEUE_TYPE enum) + */ +#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_OFST 12 +#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_LEN 4 +#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MINNUM 1 +#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM 60 +#define MC_CMD_ALLOC_LL_QUEUES_OUT_QUEUES_MAXNUM_MCDI2 252 + + +/***********************************/ +/* MC_CMD_FREE_LL_QUEUES + * Free low latency (X3-style) queues for current PCI function. + */ +#define MC_CMD_FREE_LL_QUEUES 0x1de +#undef MC_CMD_0x1de_PRIVILEGE_CTG + +#define MC_CMD_0x1de_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_FREE_LL_QUEUES_IN msgrequest */ +#define MC_CMD_FREE_LL_QUEUES_IN_LENMIN 8 +#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX 252 +#define MC_CMD_FREE_LL_QUEUES_IN_LENMAX_MCDI2 1020 +#define MC_CMD_FREE_LL_QUEUES_IN_LEN(num) (4+4*(num)) +#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_NUM(len) (((len)-4)/4) +/* The number of queues to free. */ +#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_OFST 0 +#define MC_CMD_FREE_LL_QUEUES_IN_QUEUE_COUNT_LEN 4 +/* A list of queues to free, as a list of MC_CMD_QUEUE_HANDLEs. They must have + * all been previously allocated by MC_CMD_ALLOC_LL_QUEUES. The type of each + * queue should be indicated by the top bits. + */ +#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_OFST 4 +#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_LEN 4 +#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MINNUM 1 +#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM 62 +#define MC_CMD_FREE_LL_QUEUES_IN_QUEUES_MAXNUM_MCDI2 254 + +/* MC_CMD_FREE_LL_QUEUES_OUT msgresponse */ +#define MC_CMD_FREE_LL_QUEUES_OUT_LEN 0 + #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index ad4694fa3ddae..7b236d291d8c2 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -17,58 +17,6 @@ #include "selftest.h" #include "mcdi_port_common.h" -static int efx_mcdi_mdio_read(struct net_device *net_dev, - int prtad, int devad, u16 addr) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN); - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus); - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); - MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); - - rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); - if (rc) - return rc; - - if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) != - MC_CMD_MDIO_STATUS_GOOD) - return -EIO; - - return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); -} - -static int efx_mcdi_mdio_write(struct net_device *net_dev, - int prtad, int devad, u16 addr, u16 value) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN); - MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN); - size_t outlen; - int rc; - - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); - MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); - - rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), &outlen); - if (rc) - return rc; - - if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) != - MC_CMD_MDIO_STATUS_GOOD) - return -EIO; - - return 0; -} u32 efx_mcdi_phy_get_caps(struct efx_nic *efx) { @@ -97,12 +45,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx) { int rc; - /* Set up MDIO structure for PHY */ - efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; - efx->mdio.mdio_read = efx_mcdi_mdio_read; - efx->mdio.mdio_write = efx_mcdi_mdio_write; - - /* Fill out MDIO structure, loopback modes, and initial link state */ + /* Fill out loopback modes and initial link state */ rc = efx_mcdi_phy_probe(efx); if (rc != 0) return rc; diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c index 76ea26722ca46..dae684194ac8c 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/mcdi_port_common.c @@ -448,15 +448,6 @@ int efx_mcdi_phy_probe(struct efx_nic *efx) efx->phy_data = phy_data; efx->phy_type = phy_data->type; - efx->mdio_bus = phy_data->channel; - efx->mdio.prtad = phy_data->port; - efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); - efx->mdio.mode_support = 0; - if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) - efx->mdio.mode_support |= MDIO_SUPPORTS_C22; - if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) - efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; - caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) mcdi_to_ethtool_linkset(phy_data->media, caps, @@ -546,8 +537,6 @@ void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ks cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media); cmd->base.phy_address = phy_cfg->port; cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg); - cmd->base.mdio_support = (efx->mdio.mode_support & - (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap, cmd->link_modes.supported); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index f70a7b7d6345c..5c0f306fb019a 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -956,8 +956,6 @@ struct efx_mae; * @stats_buffer: DMA buffer for statistics * @phy_type: PHY type * @phy_data: PHY private data (including PHY-specific stats) - * @mdio: PHY MDIO interface - * @mdio_bus: PHY MDIO bus ID (only used by Siena) * @phy_mode: PHY operating mode. Serialised by @mac_lock. * @link_advertising: Autonegotiation advertising flags * @fec_config: Forward Error Correction configuration flags. For bit positions @@ -1006,6 +1004,7 @@ struct efx_mae; * @dl_port: devlink port associated with the PF * @mem_bar: The BAR that is mapped into membase. * @reg_base: Offset from the start of the bar to the function control window. + * @reflash_mutex: Mutex for serialising firmware reflash operations. * @monitor_work: Hardware monitor workitem * @biu_lock: BIU (bus interface unit) lock * @last_irq_cpu: Last CPU to handle a possible test interrupt. This @@ -1131,8 +1130,6 @@ struct efx_nic { unsigned int phy_type; void *phy_data; - struct mdio_if_info mdio; - unsigned int mdio_bus; enum efx_phy_mode phy_mode; __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising); @@ -1191,6 +1188,7 @@ struct efx_nic { struct devlink_port *dl_port; unsigned int mem_bar; u32 reg_base; + struct mutex reflash_mutex; /* The following fields may be written more often */ @@ -1383,6 +1381,8 @@ struct efx_udp_tunnel { * @can_rx_scatter: NIC is able to scatter packets to multiple buffers * @always_rx_scatter: NIC will always scatter packets to multiple buffers * @option_descriptors: NIC supports TX option descriptors + * @flash_auto_partition: firmware flash uses AUTO partition, driver does + * not need to perform image parsing * @min_interrupt_mode: Lowest capability interrupt mode supported * from &enum efx_int_mode. * @timer_period_max: Maximum period of interrupt timer (in ticks) @@ -1559,6 +1559,7 @@ struct efx_nic_type { bool can_rx_scatter; bool always_rx_scatter; bool option_descriptors; + bool flash_auto_partition; unsigned int min_interrupt_mode; unsigned int timer_period_max; netdev_features_t offload_features; diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 0d93164988fc6..fa94aa3cd5fee 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -1043,7 +1043,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx, return -EOPNOTSUPP; } if (fa->ct.action) { - NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule\n", + NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled ct.action %u for LHS rule", fa->ct.action); return -EOPNOTSUPP; } @@ -1056,7 +1056,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx, act->zone = ct_zone; break; default: - NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule\n", + NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u for LHS rule", fa->id); return -EOPNOTSUPP; } @@ -1581,7 +1581,7 @@ static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx, type = efx_tc_indr_netdev_type(net_dev); if (type == EFX_ENCAP_TYPE_NONE) { - NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n"); + NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index f539813878f53..2e11060979655 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -43,7 +43,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index dc99821c6226f..ee890de69ffe7 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -970,7 +970,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); struct netsec_desc *desc = &dring->desc[idx]; struct page *page = virt_to_page(desc->addr); - u32 xdp_result = NETSEC_XDP_PASS; + u32 metasize, xdp_result = NETSEC_XDP_PASS; struct sk_buff *skb = NULL; u16 pkt_len, desc_len; dma_addr_t dma_handle; @@ -1019,7 +1019,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) prefetch(desc->addr); xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM, - pkt_len, false); + pkt_len, true); if (xdp_prog) { xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp); @@ -1048,6 +1048,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) skb_reserve(skb, xdp.data - xdp.data_hard_start); skb_put(skb, xdp.data_end - xdp.data); + metasize = xdp.data - xdp.data_meta; + if (metasize) + skb_metadata_set(skb, metasize); skb->protocol = eth_type_trans(skb, priv->ndev); if (priv->rx_cksum_offload_flag && diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 4cc85a36a1ab6..3c820ef567759 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -181,6 +181,17 @@ config DWMAC_SOCFPGA for the stmmac device driver. This driver is used for arria5 and cyclone5 FPGA SoCs. +config DWMAC_SOPHGO + tristate "Sophgo dwmac support" + depends on OF && (ARCH_SOPHGO || COMPILE_TEST) + default m if ARCH_SOPHGO + help + Support for ethernet controllers on Sophgo RISC-V SoCs + + This selects the Sophgo SoC specific glue layer support + for the stmmac device driver. This driver is used for the + ethernet controllers on various Sophgo SoCs. + config DWMAC_STARFIVE tristate "StarFive dwmac support" depends on OF && (ARCH_STARFIVE || COMPILE_TEST) @@ -307,6 +318,7 @@ config DWMAC_INTEL default X86 depends on X86 && STMMAC_ETH && PCI depends on COMMON_CLK + depends on ACPI help This selects the Intel platform specific bus support for the stmmac driver. This driver is used for Intel Quark/EHL/TGL. diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index b26f0e79c2b32..594883fb41644 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -24,6 +24,7 @@ obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o obj-$(CONFIG_DWMAC_S32) += dwmac-s32.o obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o +obj-$(CONFIG_DWMAC_SOPHGO) += dwmac-sophgo.o obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index e25db747a81a5..412b07e779456 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -101,8 +101,8 @@ struct stmmac_rxq_stats { /* Updates on each CPU protected by not allowing nested irqs. */ struct stmmac_pcpu_stats { struct u64_stats_sync syncp; - u64_stats_t rx_normal_irq_n[MTL_MAX_TX_QUEUES]; - u64_stats_t tx_normal_irq_n[MTL_MAX_RX_QUEUES]; + u64_stats_t rx_normal_irq_n[MTL_MAX_RX_QUEUES]; + u64_stats_t tx_normal_irq_n[MTL_MAX_TX_QUEUES]; }; /* Extra statistic and debug information exposed by ethtool */ @@ -530,6 +530,20 @@ struct dma_features { #define STMMAC_DEFAULT_TWT_LS 0x1E #define STMMAC_ET_MAX 0xFFFFF +/* Common LPI register bits */ +#define LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable, gmac4, xgmac2 only */ +#define LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable, gmac4 only */ +#define LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ +#define LPI_CTRL_STATUS_PLSEN BIT(18) /* Enable PHY Link Status */ +#define LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ +#define LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ +#define LPI_CTRL_STATUS_RLPIST BIT(9) /* Receive LPI state, gmac1000 only? */ +#define LPI_CTRL_STATUS_TLPIST BIT(8) /* Transmit LPI state, gmac1000 only? */ +#define LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */ +#define LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */ +#define LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */ +#define LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */ + #define STMMAC_CHAIN_MODE 0x1 #define STMMAC_RING_MODE 0x2 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c index ef99ef3f1ab47..37fe7c2888784 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -59,10 +59,11 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv) gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1); } -static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) +static struct anarion_gmac * +anarion_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat) { struct anarion_gmac *gmac; - phy_interface_t phy_mode; void __iomem *ctl_block; int err; @@ -79,11 +80,7 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) gmac->ctl_block = ctl_block; - err = of_get_phy_mode(pdev->dev.of_node, &phy_mode); - if (err) - return ERR_PTR(err); - - switch (phy_mode) { + switch (plat_dat->phy_interface) { case PHY_INTERFACE_MODE_RGMII: fallthrough; case PHY_INTERFACE_MODE_RGMII_ID: @@ -93,7 +90,7 @@ static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) break; default: dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", - phy_mode); + plat_dat->phy_interface); return ERR_PTR(-ENOTSUPP); } @@ -111,14 +108,14 @@ static int anarion_dwmac_probe(struct platform_device *pdev) if (ret) return ret; - gmac = anarion_config_dt(pdev); - if (IS_ERR(gmac)) - return PTR_ERR(gmac); - plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); + gmac = anarion_config_dt(pdev, plat_dat); + if (IS_ERR(gmac)) + return PTR_ERR(gmac); + plat_dat->init = anarion_gmac_init; plat_dat->exit = anarion_gmac_exit; anarion_gmac_init(pdev, gmac); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index bd4eb187f8c64..cd431f84f34f6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -29,14 +29,21 @@ struct tegra_eqos { void __iomem *regs; struct reset_control *rst; - struct clk *clk_master; struct clk *clk_slave; - struct clk *clk_tx; - struct clk *clk_rx; struct gpio_desc *reset; }; +static struct clk *dwc_eth_find_clk(struct plat_stmmacenet_data *plat_dat, + const char *name) +{ + for (int i = 0; i < plat_dat->num_clks; i++) + if (strcmp(plat_dat->clks[i].id, name) == 0) + return plat_dat->clks[i].clk; + + return NULL; +} + static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat) { @@ -46,7 +53,9 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, u32 a_index = 0; if (!plat_dat->axi) { - plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL); + plat_dat->axi = devm_kzalloc(&pdev->dev, + sizeof(struct stmmac_axi), + GFP_KERNEL); if (!plat_dat->axi) return -ENOMEM; @@ -123,49 +132,9 @@ static int dwc_qos_probe(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *stmmac_res) { - int err; - - plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk"); - if (IS_ERR(plat_dat->stmmac_clk)) { - dev_err(&pdev->dev, "apb_pclk clock not found.\n"); - return PTR_ERR(plat_dat->stmmac_clk); - } - - err = clk_prepare_enable(plat_dat->stmmac_clk); - if (err < 0) { - dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n", - err); - return err; - } - - plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk"); - if (IS_ERR(plat_dat->pclk)) { - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); - err = PTR_ERR(plat_dat->pclk); - goto disable; - } - - err = clk_prepare_enable(plat_dat->pclk); - if (err < 0) { - dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n", - err); - goto disable; - } + plat_dat->pclk = dwc_eth_find_clk(plat_dat, "phy_ref_clk"); return 0; - -disable: - clk_disable_unprepare(plat_dat->stmmac_clk); - return err; -} - -static void dwc_qos_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *priv = netdev_priv(ndev); - - clk_disable_unprepare(priv->plat->pclk); - clk_disable_unprepare(priv->plat->stmmac_clk); } #define SDMEMCOMPPADCTRL 0x8800 @@ -178,11 +147,10 @@ static void dwc_qos_remove(struct platform_device *pdev) #define AUTO_CAL_STATUS 0x880c #define AUTO_CAL_STATUS_ACTIVE BIT(31) -static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mode) +static void tegra_eqos_fix_speed(void *priv, int speed, unsigned int mode) { struct tegra_eqos *eqos = priv; bool needs_calibration = false; - long rate = 125000000; u32 value; int err; @@ -193,11 +161,10 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo fallthrough; case SPEED_10: - rate = rgmii_clock(speed); break; default: - dev_err(eqos->dev, "invalid speed %u\n", speed); + dev_err(eqos->dev, "invalid speed %d\n", speed); break; } @@ -240,10 +207,6 @@ static void tegra_eqos_fix_speed(void *priv, unsigned int speed, unsigned int mo value &= ~AUTO_CAL_CONFIG_ENABLE; writel(value, eqos->regs + AUTO_CAL_CONFIG); } - - err = clk_set_rate(eqos->clk_tx, rate); - if (err < 0) - dev_err(eqos->dev, "failed to set TX rate: %d\n", err); } static int tegra_eqos_init(struct platform_device *pdev, void *priv) @@ -261,7 +224,7 @@ static int tegra_eqos_init(struct platform_device *pdev, void *priv) } static int tegra_eqos_probe(struct platform_device *pdev, - struct plat_stmmacenet_data *data, + struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res) { struct device *dev = &pdev->dev; @@ -274,63 +237,24 @@ static int tegra_eqos_probe(struct platform_device *pdev, eqos->dev = &pdev->dev; eqos->regs = res->addr; + eqos->clk_slave = plat_dat->stmmac_clk; if (!is_of_node(dev->fwnode)) goto bypass_clk_reset_gpio; - eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus"); - if (IS_ERR(eqos->clk_master)) { - err = PTR_ERR(eqos->clk_master); - goto error; - } - - err = clk_prepare_enable(eqos->clk_master); - if (err < 0) - goto error; - - eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus"); - if (IS_ERR(eqos->clk_slave)) { - err = PTR_ERR(eqos->clk_slave); - goto disable_master; - } - - data->stmmac_clk = eqos->clk_slave; - - err = clk_prepare_enable(eqos->clk_slave); - if (err < 0) - goto disable_master; - - eqos->clk_rx = devm_clk_get(&pdev->dev, "rx"); - if (IS_ERR(eqos->clk_rx)) { - err = PTR_ERR(eqos->clk_rx); - goto disable_slave; - } - - err = clk_prepare_enable(eqos->clk_rx); - if (err < 0) - goto disable_slave; - - eqos->clk_tx = devm_clk_get(&pdev->dev, "tx"); - if (IS_ERR(eqos->clk_tx)) { - err = PTR_ERR(eqos->clk_tx); - goto disable_rx; - } - - err = clk_prepare_enable(eqos->clk_tx); - if (err < 0) - goto disable_rx; + plat_dat->clk_tx_i = dwc_eth_find_clk(plat_dat, "tx"); eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH); if (IS_ERR(eqos->reset)) { err = PTR_ERR(eqos->reset); - goto disable_tx; + return err; } usleep_range(2000, 4000); gpiod_set_value(eqos->reset, 0); /* MDIO bus was already reset just above */ - data->mdio_bus_data->needs_reset = false; + plat_dat->mdio_bus_data->needs_reset = false; eqos->rst = devm_reset_control_get(&pdev->dev, "eqos"); if (IS_ERR(eqos->rst)) { @@ -351,10 +275,11 @@ static int tegra_eqos_probe(struct platform_device *pdev, usleep_range(2000, 4000); bypass_clk_reset_gpio: - data->fix_mac_speed = tegra_eqos_fix_speed; - data->init = tegra_eqos_init; - data->bsp_priv = eqos; - data->flags |= STMMAC_FLAG_SPH_DISABLE; + plat_dat->fix_mac_speed = tegra_eqos_fix_speed; + plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate; + plat_dat->init = tegra_eqos_init; + plat_dat->bsp_priv = eqos; + plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE; err = tegra_eqos_init(pdev, eqos); if (err < 0) @@ -365,15 +290,7 @@ static int tegra_eqos_probe(struct platform_device *pdev, reset_control_assert(eqos->rst); reset_phy: gpiod_set_value(eqos->reset, 1); -disable_tx: - clk_disable_unprepare(eqos->clk_tx); -disable_rx: - clk_disable_unprepare(eqos->clk_rx); -disable_slave: - clk_disable_unprepare(eqos->clk_slave); -disable_master: - clk_disable_unprepare(eqos->clk_master); -error: + return err; } @@ -383,27 +300,29 @@ static void tegra_eqos_remove(struct platform_device *pdev) reset_control_assert(eqos->rst); gpiod_set_value(eqos->reset, 1); - clk_disable_unprepare(eqos->clk_tx); - clk_disable_unprepare(eqos->clk_rx); - clk_disable_unprepare(eqos->clk_slave); - clk_disable_unprepare(eqos->clk_master); } struct dwc_eth_dwmac_data { int (*probe)(struct platform_device *pdev, - struct plat_stmmacenet_data *data, + struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res); void (*remove)(struct platform_device *pdev); + const char *stmmac_clk_name; }; static const struct dwc_eth_dwmac_data dwc_qos_data = { .probe = dwc_qos_probe, - .remove = dwc_qos_remove, + .stmmac_clk_name = "apb_pclk", }; static const struct dwc_eth_dwmac_data tegra_eqos_data = { .probe = tegra_eqos_probe, .remove = tegra_eqos_remove, + .stmmac_clk_name = "slave_bus", +}; + +static const struct dwc_eth_dwmac_data fsd_eqos_data = { + .stmmac_clk_name = "slave_bus", }; static int dwc_eth_dwmac_probe(struct platform_device *pdev) @@ -434,9 +353,23 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); - ret = data->probe(pdev, plat_dat, &stmmac_res); + ret = devm_clk_bulk_get_all(&pdev->dev, &plat_dat->clks); + if (ret < 0) + return dev_err_probe(&pdev->dev, ret, "Failed to retrieve all required clocks\n"); + plat_dat->num_clks = ret; + + ret = clk_bulk_prepare_enable(plat_dat->num_clks, plat_dat->clks); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to enable clocks\n"); + + plat_dat->stmmac_clk = dwc_eth_find_clk(plat_dat, + data->stmmac_clk_name); + + if (data->probe) + ret = data->probe(pdev, plat_dat, &stmmac_res); if (ret < 0) { dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n"); + clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks); return ret; } @@ -451,7 +384,8 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) return ret; remove: - data->remove(pdev); + if (data->remove) + data->remove(pdev); return ret; } @@ -459,15 +393,21 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev) static void dwc_eth_dwmac_remove(struct platform_device *pdev) { const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev); + struct plat_stmmacenet_data *plat_dat = dev_get_platdata(&pdev->dev); stmmac_dvr_remove(&pdev->dev); - data->remove(pdev); + if (data->remove) + data->remove(pdev); + + if (plat_dat) + clk_bulk_disable_unprepare(plat_dat->num_clks, plat_dat->clks); } static const struct of_device_id dwc_eth_dwmac_match[] = { { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data }, { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data }, + { .compatible = "tesla,fsd-ethqos", .data = &fsd_eqos_data }, { } }; MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index 20d3a202bb8d1..5d279fa54b3e8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -51,7 +51,7 @@ struct imx_dwmac_ops { int (*fix_soc_reset)(void *priv, void __iomem *ioaddr); int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat); - void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode); + void (*fix_mac_speed)(void *priv, int speed, unsigned int mode); }; struct imx_priv_data { @@ -192,7 +192,20 @@ static void imx_dwmac_exit(struct platform_device *pdev, void *priv) /* nothing to do now */ } -static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode) +static int imx_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, + phy_interface_t interface, int speed) +{ + struct imx_priv_data *dwmac = bsp_priv; + + interface = dwmac->plat_dat->mac_interface; + if (interface == PHY_INTERFACE_MODE_RMII || + interface == PHY_INTERFACE_MODE_MII) + return 0; + + return stmmac_set_clk_tx_rate(bsp_priv, clk_tx_i, interface, speed); +} + +static void imx_dwmac_fix_speed(void *priv, int speed, unsigned int mode) { struct plat_stmmacenet_data *plat_dat; struct imx_priv_data *dwmac = priv; @@ -208,7 +221,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod rate = rgmii_clock(speed); if (rate < 0) { - dev_err(dwmac->dev, "invalid speed %u\n", speed); + dev_err(dwmac->dev, "invalid speed %d\n", speed); return; } @@ -217,7 +230,7 @@ static void imx_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mod dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate); } -static void imx93_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode) +static void imx93_dwmac_fix_speed(void *priv, int speed, unsigned int mode) { struct imx_priv_data *dwmac = priv; unsigned int iface; @@ -358,7 +371,6 @@ static int imx_dwmac_probe(struct platform_device *pdev) plat_dat->init = imx_dwmac_init; plat_dat->exit = imx_dwmac_exit; plat_dat->clks_config = imx_dwmac_clks_config; - plat_dat->fix_mac_speed = imx_dwmac_fix_speed; plat_dat->bsp_priv = dwmac; dwmac->plat_dat = plat_dat; dwmac->base_addr = stmmac_res.addr; @@ -371,8 +383,13 @@ static int imx_dwmac_probe(struct platform_device *pdev) if (ret) goto err_dwmac_init; - if (dwmac->ops->fix_mac_speed) + if (dwmac->ops->fix_mac_speed) { plat_dat->fix_mac_speed = dwmac->ops->fix_mac_speed; + } else if (!dwmac->ops->mac_rgmii_txclk_auto_adj) { + plat_dat->clk_tx_i = dwmac->clk_tx; + plat_dat->set_clk_tx_rate = imx_dwmac_set_clk_tx_rate; + } + dwmac->plat_dat->fix_soc_reset = dwmac->ops->fix_soc_reset; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index ddee6154d40bc..599def7b3a649 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -22,31 +22,12 @@ struct intel_dwmac { }; struct intel_dwmac_data { - void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode); unsigned long ptp_ref_clk_rate; unsigned long tx_clk_rate; bool tx_clk_en; }; -static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) -{ - struct intel_dwmac *dwmac = priv; - long rate; - int ret; - - rate = rgmii_clock(speed); - if (rate < 0) { - dev_err(dwmac->dev, "Invalid speed\n"); - return; - } - - ret = clk_set_rate(dwmac->tx_clk, rate); - if (ret) - dev_err(dwmac->dev, "Failed to configure tx clock rate\n"); -} - static const struct intel_dwmac_data kmb_data = { - .fix_mac_speed = kmb_eth_fix_mac_speed, .ptp_ref_clk_rate = 200000000, .tx_clk_rate = 125000000, .tx_clk_en = true, @@ -89,8 +70,6 @@ static int intel_eth_plat_probe(struct platform_device *pdev) * platform_match(). */ dwmac->data = device_get_match_data(&pdev->dev); - if (dwmac->data->fix_mac_speed) - plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed; /* Enable TX clock */ if (dwmac->data->tx_clk_en) { @@ -132,6 +111,9 @@ static int intel_eth_plat_probe(struct platform_device *pdev) } } + plat_dat->clk_tx_i = dwmac->tx_clk; + plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate; + plat_dat->bsp_priv = dwmac; plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 48acba5eb178e..c8bb9265bbb48 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -5,15 +5,30 @@ #include #include #include +#include #include "dwmac-intel.h" #include "dwmac4.h" #include "stmmac.h" #include "stmmac_ptp.h" +struct pmc_serdes_regs { + u8 index; + u32 val; +}; + +struct pmc_serdes_reg_info { + const struct pmc_serdes_regs *regs; + u8 num_regs; +}; + struct intel_priv_data { int mdio_adhoc_addr; /* mdio address for serdes & etc */ unsigned long crossts_adj; bool is_pse; + const int *tsn_lane_regs; + int max_tsn_lane_regs; + struct pmc_serdes_reg_info pid_1g; + struct pmc_serdes_reg_info pid_2p5g; }; /* This struct is used to associate PCI Function of MAC controller on a board, @@ -35,6 +50,45 @@ struct stmmac_pci_info { int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat); }; +static const struct pmc_serdes_regs pid_modphy3_1g_regs[] = { + { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G }, + { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G }, + { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G }, + { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G }, + { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G }, + {} +}; + +static const struct pmc_serdes_regs pid_modphy3_2p5g_regs[] = { + { PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G }, + { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G }, + { PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G }, + { PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G }, + { PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G }, + {} +}; + +static const struct pmc_serdes_regs pid_modphy1_1g_regs[] = { + { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_1G }, + { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_1G }, + { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_1G }, + { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_1G }, + { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_1G }, + {} +}; + +static const struct pmc_serdes_regs pid_modphy1_2p5g_regs[] = { + { PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0, B_MODPHY_PCR_LCPLL_DWORD0_2P5G }, + { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2, N_MODPHY_PCR_LCPLL_DWORD2_2P5G }, + { PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7, N_MODPHY_PCR_LCPLL_DWORD7_2P5G }, + { PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10, N_MODPHY_PCR_LPPLL_DWORD10_2P5G }, + { PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30, N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G }, + {} +}; + +static const int ehl_tsn_lane_regs[] = {7, 8, 9, 10, 11}; +static const int adln_tsn_lane_regs[] = {6}; + static int stmmac_pci_find_phy_addr(struct pci_dev *pdev, const struct dmi_system_id *dmi_list) { @@ -93,7 +147,7 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data) data &= ~SERDES_RATE_MASK; data &= ~SERDES_PCLK_MASK; - if (priv->plat->max_speed == 2500) + if (priv->plat->phy_interface == PHY_INTERFACE_MODE_2500BASEX) data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT | SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT; else @@ -415,6 +469,95 @@ static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv, } } +static int intel_tsn_lane_is_available(struct net_device *ndev, + struct intel_priv_data *intel_priv) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + struct pmc_ipc_cmd tmp = {}; + struct pmc_ipc_rbuf rbuf = {}; + int ret = 0, i, j; + const int max_fia_regs = 5; + + tmp.cmd = IPC_SOC_REGISTER_ACCESS; + tmp.sub_cmd = IPC_SOC_SUB_CMD_READ; + + for (i = 0; i < max_fia_regs; i++) { + tmp.wbuf[0] = R_PCH_FIA_15_PCR_LOS1_REG_BASE + i; + + ret = intel_pmc_ipc(&tmp, &rbuf); + if (ret < 0) { + netdev_info(priv->dev, "Failed to read from PMC.\n"); + return ret; + } + + for (j = 0; j <= intel_priv->max_tsn_lane_regs; j++) + if ((rbuf.buf[0] >> + (4 * (intel_priv->tsn_lane_regs[j] % 8)) & + B_PCH_FIA_PCR_L0O) == 0xB) + return 0; + } + + return -EINVAL; +} + +static int intel_set_reg_access(const struct pmc_serdes_regs *regs, int max_regs) +{ + int ret = 0, i; + + for (i = 0; i < max_regs; i++) { + struct pmc_ipc_cmd tmp = {}; + struct pmc_ipc_rbuf rbuf = {}; + + tmp.cmd = IPC_SOC_REGISTER_ACCESS; + tmp.sub_cmd = IPC_SOC_SUB_CMD_WRITE; + tmp.wbuf[0] = (u32)regs[i].index; + tmp.wbuf[1] = regs[i].val; + + ret = intel_pmc_ipc(&tmp, &rbuf); + if (ret < 0) + return ret; + } + + return ret; +} + +static int intel_mac_finish(struct net_device *ndev, + void *intel_data, + unsigned int mode, + phy_interface_t interface) +{ + struct intel_priv_data *intel_priv = intel_data; + struct stmmac_priv *priv = netdev_priv(ndev); + const struct pmc_serdes_regs *regs; + int max_regs = 0; + int ret = 0; + + ret = intel_tsn_lane_is_available(ndev, intel_priv); + if (ret < 0) { + netdev_info(priv->dev, "No TSN lane available to set the registers.\n"); + return ret; + } + + if (interface == PHY_INTERFACE_MODE_2500BASEX) { + regs = intel_priv->pid_2p5g.regs; + max_regs = intel_priv->pid_2p5g.num_regs; + } else { + regs = intel_priv->pid_1g.regs; + max_regs = intel_priv->pid_1g.num_regs; + } + + ret = intel_set_reg_access(regs, max_regs); + if (ret < 0) + return ret; + + priv->plat->phy_interface = interface; + + intel_serdes_powerdown(ndev, intel_priv); + intel_serdes_powerup(ndev, intel_priv); + + return ret; +} + static void common_default_data(struct plat_stmmacenet_data *plat) { plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ @@ -624,6 +767,8 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, static int ehl_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct intel_priv_data *intel_priv = plat->bsp_priv; + plat->rx_queues_to_use = 8; plat->tx_queues_to_use = 8; plat->flags |= STMMAC_FLAG_USE_PHY_WOL; @@ -639,20 +784,29 @@ static int ehl_common_data(struct pci_dev *pdev, plat->safety_feat_cfg->prtyen = 0; plat->safety_feat_cfg->tmouten = 0; + intel_priv->tsn_lane_regs = ehl_tsn_lane_regs; + intel_priv->max_tsn_lane_regs = ARRAY_SIZE(ehl_tsn_lane_regs); + return intel_mgbe_common_data(pdev, plat); } static int ehl_sgmii_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct intel_priv_data *intel_priv = plat->bsp_priv; + plat->bus_id = 1; plat->phy_interface = PHY_INTERFACE_MODE_SGMII; - plat->speed_mode_2500 = intel_speed_mode_2500; plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerdown = intel_serdes_powerdown; - + plat->mac_finish = intel_mac_finish; plat->clk_ptp_rate = 204800000; + intel_priv->pid_1g.regs = pid_modphy3_1g_regs; + intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy3_1g_regs); + intel_priv->pid_2p5g.regs = pid_modphy3_2p5g_regs; + intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy3_2p5g_regs); + return ehl_common_data(pdev, plat); } @@ -705,10 +859,18 @@ static struct stmmac_pci_info ehl_pse0_rgmii1g_info = { static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct intel_priv_data *intel_priv = plat->bsp_priv; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; - plat->speed_mode_2500 = intel_speed_mode_2500; plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerdown = intel_serdes_powerdown; + plat->mac_finish = intel_mac_finish; + + intel_priv->pid_1g.regs = pid_modphy1_1g_regs; + intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs); + intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs; + intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs); + return ehl_pse0_common_data(pdev, plat); } @@ -746,10 +908,18 @@ static struct stmmac_pci_info ehl_pse1_rgmii1g_info = { static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { + struct intel_priv_data *intel_priv = plat->bsp_priv; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; - plat->speed_mode_2500 = intel_speed_mode_2500; plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerdown = intel_serdes_powerdown; + plat->mac_finish = intel_mac_finish; + + intel_priv->pid_1g.regs = pid_modphy1_1g_regs; + intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs); + intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs; + intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs); + return ehl_pse1_common_data(pdev, plat); } @@ -835,6 +1005,55 @@ static int adls_sgmii_phy1_data(struct pci_dev *pdev, static struct stmmac_pci_info adls_sgmii1g_phy1_info = { .setup = adls_sgmii_phy1_data, }; + +static int adln_common_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + struct intel_priv_data *intel_priv = plat->bsp_priv; + + plat->rx_queues_to_use = 6; + plat->tx_queues_to_use = 4; + plat->clk_ptp_rate = 204800000; + + plat->safety_feat_cfg->tsoee = 1; + plat->safety_feat_cfg->mrxpee = 0; + plat->safety_feat_cfg->mestee = 1; + plat->safety_feat_cfg->mrxee = 1; + plat->safety_feat_cfg->mtxee = 1; + plat->safety_feat_cfg->epsi = 0; + plat->safety_feat_cfg->edpp = 0; + plat->safety_feat_cfg->prtyen = 0; + plat->safety_feat_cfg->tmouten = 0; + + intel_priv->tsn_lane_regs = adln_tsn_lane_regs; + intel_priv->max_tsn_lane_regs = ARRAY_SIZE(adln_tsn_lane_regs); + + return intel_mgbe_common_data(pdev, plat); +} + +static int adln_sgmii_phy0_data(struct pci_dev *pdev, + struct plat_stmmacenet_data *plat) +{ + struct intel_priv_data *intel_priv = plat->bsp_priv; + + plat->bus_id = 1; + plat->phy_interface = PHY_INTERFACE_MODE_SGMII; + plat->serdes_powerup = intel_serdes_powerup; + plat->serdes_powerdown = intel_serdes_powerdown; + plat->mac_finish = intel_mac_finish; + + intel_priv->pid_1g.regs = pid_modphy1_1g_regs; + intel_priv->pid_1g.num_regs = ARRAY_SIZE(pid_modphy1_1g_regs); + intel_priv->pid_2p5g.regs = pid_modphy1_2p5g_regs; + intel_priv->pid_2p5g.num_regs = ARRAY_SIZE(pid_modphy1_2p5g_regs); + + return adln_common_data(pdev, plat); +} + +static struct stmmac_pci_info adln_sgmii1g_phy0_info = { + .setup = adln_sgmii_phy0_data, +}; + static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = { { .func = 6, @@ -1217,8 +1436,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = { { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) }, { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) }, { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) }, - { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) }, - { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &adln_sgmii1g_phy0_info) }, + { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &adln_sgmii1g_phy0_info) }, {} }; MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h index 0a37987478c15..a12f8e65f89f1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h @@ -50,4 +50,33 @@ #define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0) #define PCH_PTP_CLK_FREQ_200MHZ (0) +/* Modphy Register index */ +#define R_PCH_FIA_15_PCR_LOS1_REG_BASE 8 +#define R_PCH_FIA_15_PCR_LOS2_REG_BASE 9 +#define R_PCH_FIA_15_PCR_LOS3_REG_BASE 10 +#define R_PCH_FIA_15_PCR_LOS4_REG_BASE 11 +#define R_PCH_FIA_15_PCR_LOS5_REG_BASE 12 +#define B_PCH_FIA_PCR_L0O GENMASK(3, 0) +#define PID_MODPHY1_B_MODPHY_PCR_LCPLL_DWORD0 13 +#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD2 14 +#define PID_MODPHY1_N_MODPHY_PCR_LCPLL_DWORD7 15 +#define PID_MODPHY1_N_MODPHY_PCR_LPPLL_DWORD10 16 +#define PID_MODPHY1_N_MODPHY_PCR_CMN_ANA_DWORD30 17 +#define PID_MODPHY3_B_MODPHY_PCR_LCPLL_DWORD0 18 +#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD2 19 +#define PID_MODPHY3_N_MODPHY_PCR_LCPLL_DWORD7 20 +#define PID_MODPHY3_N_MODPHY_PCR_LPPLL_DWORD10 21 +#define PID_MODPHY3_N_MODPHY_PCR_CMN_ANA_DWORD30 22 + +#define B_MODPHY_PCR_LCPLL_DWORD0_1G 0x46AAAA41 +#define N_MODPHY_PCR_LCPLL_DWORD2_1G 0x00000139 +#define N_MODPHY_PCR_LCPLL_DWORD7_1G 0x002A0003 +#define N_MODPHY_PCR_LPPLL_DWORD10_1G 0x00170008 +#define N_MODPHY_PCR_CMN_ANA_DWORD30_1G 0x0000D4AC +#define B_MODPHY_PCR_LCPLL_DWORD0_2P5G 0x58555551 +#define N_MODPHY_PCR_LCPLL_DWORD2_2P5G 0x0000012D +#define N_MODPHY_PCR_LCPLL_DWORD7_2P5G 0x001F0003 +#define N_MODPHY_PCR_LPPLL_DWORD10_2P5G 0x00170008 +#define N_MODPHY_PCR_CMN_ANA_DWORD30_2P5G 0x8200ACAC + #endif /* __DWMAC_INTEL_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 61227dcf56dc6..ca4035cbb55b6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -112,7 +112,7 @@ struct ipq806x_gmac { phy_interface_t phy_mode; }; -static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed) +static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, int speed) { struct device *dev = &gmac->pdev->dev; int div; @@ -138,7 +138,7 @@ static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed) return div; } -static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed) +static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, int speed) { struct device *dev = &gmac->pdev->dev; int div; @@ -164,7 +164,7 @@ static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed) return div; } -static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed) +static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, int speed) { uint32_t clk_bits, val; int div; @@ -211,16 +211,12 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed) return 0; } -static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) +static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac, + struct plat_stmmacenet_data *plat_dat) { struct device *dev = &gmac->pdev->dev; - int ret; - ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode); - if (ret) { - dev_err(dev, "missing phy mode property\n"); - return -EINVAL; - } + gmac->phy_mode = plat_dat->phy_interface; if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) { dev_err(dev, "missing qcom id property\n"); @@ -260,11 +256,12 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) return PTR_ERR_OR_ZERO(gmac->qsgmii_csr); } -static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) +static int ipq806x_gmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, + phy_interface_t interface, int speed) { - struct ipq806x_gmac *gmac = priv; + struct ipq806x_gmac *gmac = bsp_priv; - ipq806x_gmac_set_speed(gmac, speed); + return ipq806x_gmac_set_speed(gmac, speed); } static int @@ -397,7 +394,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) gmac->pdev = pdev; - err = ipq806x_gmac_of_parse(gmac); + err = ipq806x_gmac_of_parse(gmac, plat_dat); if (err) { dev_err(dev, "device tree parsing error\n"); return err; @@ -478,7 +475,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->has_gmac = true; plat_dat->bsp_priv = gmac; - plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; + plat_dat->set_clk_tx_rate = ipq806x_gmac_set_clk_tx_rate; plat_dat->multicast_filter_bins = 0; plat_dat->tx_fifo_size = 8192; plat_dat->rx_fifo_size = 8192; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index bfe6e2d631bdf..1a93787056a77 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -11,6 +11,8 @@ #include "dwmac_dma.h" #include "dwmac1000.h" +#define DRIVER_NAME "dwmac-loongson-pci" + /* Normal Loongson Tx Summary */ #define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000 /* Normal Loongson Rx Summary */ @@ -149,8 +151,7 @@ static struct stmmac_pci_info loongson_gmac_pci_info = { .setup = loongson_gmac_data, }; -static void loongson_gnet_fix_speed(void *priv, unsigned int speed, - unsigned int mode) +static void loongson_gnet_fix_speed(void *priv, int speed, unsigned int mode) { struct loongson_data *ld = (struct loongson_data *)priv; struct net_device *ndev = dev_get_drvdata(ld->dev); @@ -516,13 +517,26 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev, return 0; } +/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */ +static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + + return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, + !(value & DMA_BUS_MODE_SFT_RESET), + 10000, 2000000); +} + static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct plat_stmmacenet_data *plat; + struct stmmac_resources res = {}; struct stmmac_pci_info *info; - struct stmmac_resources res; struct loongson_data *ld; - int ret, i; + int ret; plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) @@ -552,20 +566,14 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id pci_set_master(pdev); /* Get the base address of device */ - for (i = 0; i < PCI_STD_NUM_BARS; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); - if (ret) - goto err_disable_device; - break; - } - - memset(&res, 0, sizeof(res)); - res.addr = pcim_iomap_table(pdev)[0]; + res.addr = pcim_iomap_region(pdev, 0, DRIVER_NAME); + ret = PTR_ERR_OR_ZERO(res.addr); + if (ret) + goto err_disable_device; plat->bsp_priv = ld; plat->setup = loongson_dwmac_setup; + plat->fix_soc_reset = loongson_dwmac_fix_reset; ld->dev = &pdev->dev; ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff; @@ -574,6 +582,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (ret) goto err_disable_device; + plat->tx_fifo_size = SZ_16K * plat->tx_queues_to_use; + plat->rx_fifo_size = SZ_16K * plat->rx_queues_to_use; + if (dev_of_node(&pdev->dev)) ret = loongson_dwmac_dt_config(pdev, plat, &res); else @@ -606,7 +617,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev) struct net_device *ndev = dev_get_drvdata(&pdev->dev); struct stmmac_priv *priv = netdev_priv(ndev); struct loongson_data *ld; - int i; ld = priv->plat->bsp_priv; stmmac_dvr_remove(&pdev->dev); @@ -617,13 +627,6 @@ static void loongson_dwmac_remove(struct pci_dev *pdev) if (ld->loongson_id == DWMAC_CORE_LS_MULTICHAN) loongson_dwmac_msi_clear(pdev); - for (i = 0; i < PCI_STD_NUM_BARS; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - pcim_iounmap_regions(pdev, BIT(i)); - break; - } - pci_disable_device(pdev); } @@ -673,7 +676,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = { MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table); static struct pci_driver loongson_dwmac_driver = { - .name = "dwmac-loongson-pci", + .name = DRIVER_NAME, .id_table = loongson_dwmac_id_table, .probe = loongson_dwmac_probe, .remove = loongson_dwmac_remove, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index c9636832a570a..d178d5ddc7c7a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -456,7 +456,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat) { struct mac_delay_struct *mac_delay = &plat->mac_delay; u32 tx_delay_ps, rx_delay_ps; - int err; plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg"); if (IS_ERR(plat->peri_regmap)) { @@ -464,12 +463,6 @@ static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat) return PTR_ERR(plat->peri_regmap); } - err = of_get_phy_mode(plat->np, &plat->phy_mode); - if (err) { - dev_err(plat->dev, "not find phy-mode\n"); - return err; - } - if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) { if (tx_delay_ps < plat->variant->tx_delay_max) { mac_delay->tx_delay = tx_delay_ps; @@ -587,6 +580,7 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev, { int i; + priv_plat->phy_mode = plat->phy_interface; plat->mac_interface = priv_plat->phy_mode; if (priv_plat->mac_wol) plat->flags &= ~STMMAC_FLAG_USE_PHY_WOL; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index 5469fa1b429e3..07c504d07604c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -22,9 +22,10 @@ struct meson_dwmac { void __iomem *reg; }; -static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) +static int meson6_dwmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, + phy_interface_t interface, int speed) { - struct meson_dwmac *dwmac = priv; + struct meson_dwmac *dwmac = bsp_priv; unsigned int val; val = readl(dwmac->reg); @@ -39,6 +40,8 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned } writel(val, dwmac->reg); + + return 0; } static int meson6_dwmac_probe(struct platform_device *pdev) @@ -65,7 +68,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev) return PTR_ERR(dwmac->reg); plat_dat->bsp_priv = dwmac; - plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed; + plat_dat->set_clk_tx_rate = meson6_dwmac_set_clk_tx_rate; return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 9c2d62d133ade..a50782994b979 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -417,11 +417,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) return PTR_ERR(dwmac->regs); dwmac->dev = &pdev->dev; - ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode); - if (ret) { - dev_err(&pdev->dev, "missing phy-mode property\n"); - return ret; - } + dwmac->phy_mode = plat_dat->phy_interface; /* use 2ns as fallback since this value was previously hardcoded */ if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns", diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 2a5b38723635b..0e4da216f942c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -111,7 +111,7 @@ struct qcom_ethqos { unsigned int link_clk_rate; struct clk *link_clk; struct phy *serdes_phy; - unsigned int speed; + int speed; int serdes_speed; phy_interface_t phy_mode; @@ -169,30 +169,17 @@ static void rgmii_dump(void *priv) rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG)); } -/* Clock rates */ -#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL) -#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL) -#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL) - static void -ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed) +ethqos_update_link_clk(struct qcom_ethqos *ethqos, int speed) { + long rate; + if (!phy_interface_mode_is_rgmii(ethqos->phy_mode)) return; - switch (speed) { - case SPEED_1000: - ethqos->link_clk_rate = RGMII_1000_NOM_CLK_FREQ; - break; - - case SPEED_100: - ethqos->link_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ; - break; - - case SPEED_10: - ethqos->link_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ; - break; - } + rate = rgmii_clock(speed); + if (rate > 0) + ethqos->link_clk_rate = rate * 2; clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate); } @@ -699,7 +686,7 @@ static int ethqos_configure(struct qcom_ethqos *ethqos) return ethqos->configure_func(ethqos); } -static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) +static void ethqos_fix_mac_speed(void *priv, int speed, unsigned int mode) { struct qcom_ethqos *ethqos = priv; @@ -807,9 +794,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) if (!ethqos) return -ENOMEM; - ret = of_get_phy_mode(np, ðqos->phy_mode); - if (ret) - return dev_err_probe(dev, ret, "Failed to get phy mode\n"); + ethqos->phy_mode = plat_dat->phy_interface; switch (ethqos->phy_mode) { case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index a4dc89e23a68e..700858ff6f7c3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -33,6 +33,8 @@ struct rk_gmac_ops { void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input, bool enable); void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv); + void (*integrated_phy_powerdown)(struct rk_priv_data *bsp_priv); + bool php_grf_required; bool regs_valid; u32 regs[]; }; @@ -91,6 +93,76 @@ struct rk_priv_data { (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) +#define RK_GRF_MACPHY_CON0 0xb00 +#define RK_GRF_MACPHY_CON1 0xb04 +#define RK_GRF_MACPHY_CON2 0xb08 +#define RK_GRF_MACPHY_CON3 0xb0c + +#define RK_MACPHY_ENABLE GRF_BIT(0) +#define RK_MACPHY_DISABLE GRF_CLR_BIT(0) +#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14) +#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7)) +#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0) +#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0) + +static void rk_gmac_integrated_ephy_powerup(struct rk_priv_data *priv) +{ + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M); + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE); + + regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID); + regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID); + + if (priv->phy_reset) { + /* PHY needs to be disabled before trying to reset it */ + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE); + if (priv->phy_reset) + reset_control_assert(priv->phy_reset); + usleep_range(10, 20); + if (priv->phy_reset) + reset_control_deassert(priv->phy_reset); + usleep_range(10, 20); + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE); + msleep(30); + } +} + +static void rk_gmac_integrated_ephy_powerdown(struct rk_priv_data *priv) +{ + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE); + if (priv->phy_reset) + reset_control_assert(priv->phy_reset); +} + +#define RK_FEPHY_SHUTDOWN GRF_BIT(1) +#define RK_FEPHY_POWERUP GRF_CLR_BIT(1) +#define RK_FEPHY_INTERNAL_RMII_SEL GRF_BIT(6) +#define RK_FEPHY_24M_CLK_SEL (GRF_BIT(8) | GRF_BIT(9)) +#define RK_FEPHY_PHY_ID GRF_BIT(11) + +static void rk_gmac_integrated_fephy_powerup(struct rk_priv_data *priv, + unsigned int reg) +{ + reset_control_assert(priv->phy_reset); + usleep_range(20, 30); + + regmap_write(priv->grf, reg, + RK_FEPHY_POWERUP | + RK_FEPHY_INTERNAL_RMII_SEL | + RK_FEPHY_24M_CLK_SEL | + RK_FEPHY_PHY_ID); + usleep_range(10000, 12000); + + reset_control_deassert(priv->phy_reset); + usleep_range(50000, 60000); +} + +static void rk_gmac_integrated_fephy_powerdown(struct rk_priv_data *priv, + unsigned int reg) +{ + regmap_write(priv->grf, reg, RK_FEPHY_SHUTDOWN); +} + #define PX30_GRF_GMAC_CON1 0x0904 /* PX30_GRF_GMAC_CON1 */ @@ -101,13 +173,6 @@ struct rk_priv_data { static void px30_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1, PX30_GMAC_PHY_INTF_SEL_RMII); } @@ -181,13 +246,6 @@ static const struct rk_gmac_ops px30_ops = { static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, RK3128_GMAC_PHY_INTF_SEL_RGMII | RK3128_GMAC_RMII_MODE_CLR); @@ -199,13 +257,6 @@ static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE); } @@ -214,11 +265,6 @@ static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - if (speed == 10) regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, RK3128_GMAC_CLK_2_5M); @@ -236,11 +282,6 @@ static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - if (speed == 10) { regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, RK3128_GMAC_RMII_CLK_2_5M | @@ -297,13 +338,6 @@ static const struct rk_gmac_ops rk3128_ops = { static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, RK3228_GMAC_PHY_INTF_SEL_RGMII | RK3228_GMAC_RMII_MODE_CLR | @@ -316,13 +350,6 @@ static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, RK3228_GMAC_PHY_INTF_SEL_RMII | RK3228_GMAC_RMII_MODE); @@ -335,11 +362,6 @@ static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - if (speed == 10) regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, RK3228_GMAC_CLK_2_5M); @@ -357,11 +379,6 @@ static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - if (speed == 10) regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, RK3228_GMAC_RMII_CLK_2_5M | @@ -378,6 +395,8 @@ static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv) { regmap_write(priv->grf, RK3228_GRF_CON_MUX, RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY); + + rk_gmac_integrated_ephy_powerup(priv); } static const struct rk_gmac_ops rk3228_ops = { @@ -385,7 +404,8 @@ static const struct rk_gmac_ops rk3228_ops = { .set_to_rmii = rk3228_set_to_rmii, .set_rgmii_speed = rk3228_set_rgmii_speed, .set_rmii_speed = rk3228_set_rmii_speed, - .integrated_phy_powerup = rk3228_integrated_phy_powerup, + .integrated_phy_powerup = rk3228_integrated_phy_powerup, + .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown, }; #define RK3288_GRF_SOC_CON1 0x0248 @@ -419,13 +439,6 @@ static const struct rk_gmac_ops rk3228_ops = { static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, RK3288_GMAC_PHY_INTF_SEL_RGMII | RK3288_GMAC_RMII_MODE_CLR); @@ -437,13 +450,6 @@ static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE); } @@ -452,11 +458,6 @@ static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - if (speed == 10) regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, RK3288_GMAC_CLK_2_5M); @@ -474,11 +475,6 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - if (speed == 10) { regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1, RK3288_GMAC_RMII_CLK_2_5M | @@ -511,13 +507,6 @@ static const struct rk_gmac_ops rk3288_ops = { static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0, RK3308_GMAC_PHY_INTF_SEL_RMII); } @@ -526,11 +515,6 @@ static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - if (speed == 10) { regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0, RK3308_GMAC_SPEED_10M); @@ -583,13 +567,6 @@ static const struct rk_gmac_ops rk3308_ops = { static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, RK3328_GMAC_PHY_INTF_SEL_RGMII | RK3328_GMAC_RMII_MODE_CLR | @@ -603,14 +580,8 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; unsigned int reg; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 : RK3328_GRF_MAC_CON1; @@ -623,11 +594,6 @@ static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - if (speed == 10) regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, RK3328_GMAC_CLK_2_5M); @@ -646,11 +612,6 @@ static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) struct device *dev = &bsp_priv->pdev->dev; unsigned int reg; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 : RK3328_GRF_MAC_CON1; @@ -670,6 +631,8 @@ static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv) { regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1, RK3328_MACPHY_RMII_MODE); + + rk_gmac_integrated_ephy_powerup(priv); } static const struct rk_gmac_ops rk3328_ops = { @@ -677,7 +640,8 @@ static const struct rk_gmac_ops rk3328_ops = { .set_to_rmii = rk3328_set_to_rmii, .set_rgmii_speed = rk3328_set_rgmii_speed, .set_rmii_speed = rk3328_set_rmii_speed, - .integrated_phy_powerup = rk3328_integrated_phy_powerup, + .integrated_phy_powerup = rk3328_integrated_phy_powerup, + .integrated_phy_powerdown = rk_gmac_integrated_ephy_powerdown, }; #define RK3366_GRF_SOC_CON6 0x0418 @@ -711,13 +675,6 @@ static const struct rk_gmac_ops rk3328_ops = { static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, RK3366_GMAC_PHY_INTF_SEL_RGMII | RK3366_GMAC_RMII_MODE_CLR); @@ -729,13 +686,6 @@ static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE); } @@ -744,11 +694,6 @@ static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - if (speed == 10) regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, RK3366_GMAC_CLK_2_5M); @@ -766,11 +711,6 @@ static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - if (speed == 10) { regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6, RK3366_GMAC_RMII_CLK_2_5M | @@ -822,13 +762,6 @@ static const struct rk_gmac_ops rk3366_ops = { static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, RK3368_GMAC_PHY_INTF_SEL_RGMII | RK3368_GMAC_RMII_MODE_CLR); @@ -840,13 +773,6 @@ static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE); } @@ -855,11 +781,6 @@ static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - if (speed == 10) regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, RK3368_GMAC_CLK_2_5M); @@ -877,11 +798,6 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - if (speed == 10) { regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15, RK3368_GMAC_RMII_CLK_2_5M | @@ -933,13 +849,6 @@ static const struct rk_gmac_ops rk3368_ops = { static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, RK3399_GMAC_PHY_INTF_SEL_RGMII | RK3399_GMAC_RMII_MODE_CLR); @@ -951,13 +860,6 @@ static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE); } @@ -966,11 +868,6 @@ static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - if (speed == 10) regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, RK3399_GMAC_CLK_2_5M); @@ -988,11 +885,6 @@ static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - if (speed == 10) { regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5, RK3399_GMAC_RMII_CLK_2_5M | @@ -1013,6 +905,149 @@ static const struct rk_gmac_ops rk3399_ops = { .set_rmii_speed = rk3399_set_rmii_speed, }; +#define RK3528_VO_GRF_GMAC_CON 0x0018 +#define RK3528_VO_GRF_MACPHY_CON0 0x001c +#define RK3528_VO_GRF_MACPHY_CON1 0x0020 +#define RK3528_VPU_GRF_GMAC_CON5 0x0018 +#define RK3528_VPU_GRF_GMAC_CON6 0x001c + +#define RK3528_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) +#define RK3528_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) +#define RK3528_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14) +#define RK3528_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14) + +#define RK3528_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8) +#define RK3528_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0) + +#define RK3528_GMAC0_PHY_INTF_SEL_RMII GRF_BIT(1) +#define RK3528_GMAC1_PHY_INTF_SEL_RGMII GRF_CLR_BIT(8) +#define RK3528_GMAC1_PHY_INTF_SEL_RMII GRF_BIT(8) + +#define RK3528_GMAC1_CLK_SELECT_CRU GRF_CLR_BIT(12) +#define RK3528_GMAC1_CLK_SELECT_IO GRF_BIT(12) + +#define RK3528_GMAC0_CLK_RMII_DIV2 GRF_BIT(3) +#define RK3528_GMAC0_CLK_RMII_DIV20 GRF_CLR_BIT(3) +#define RK3528_GMAC1_CLK_RMII_DIV2 GRF_BIT(10) +#define RK3528_GMAC1_CLK_RMII_DIV20 GRF_CLR_BIT(10) + +#define RK3528_GMAC1_CLK_RGMII_DIV1 (GRF_CLR_BIT(11) | GRF_CLR_BIT(10)) +#define RK3528_GMAC1_CLK_RGMII_DIV5 (GRF_BIT(11) | GRF_BIT(10)) +#define RK3528_GMAC1_CLK_RGMII_DIV50 (GRF_BIT(11) | GRF_CLR_BIT(10)) + +#define RK3528_GMAC0_CLK_RMII_GATE GRF_BIT(2) +#define RK3528_GMAC0_CLK_RMII_NOGATE GRF_CLR_BIT(2) +#define RK3528_GMAC1_CLK_RMII_GATE GRF_BIT(9) +#define RK3528_GMAC1_CLK_RMII_NOGATE GRF_CLR_BIT(9) + +static void rk3528_set_to_rgmii(struct rk_priv_data *bsp_priv, + int tx_delay, int rx_delay) +{ + regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, + RK3528_GMAC1_PHY_INTF_SEL_RGMII); + + regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, + DELAY_ENABLE(RK3528, tx_delay, rx_delay)); + + regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON6, + RK3528_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3528_GMAC_CLK_TX_DL_CFG(tx_delay)); +} + +static void rk3528_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + if (bsp_priv->id == 1) + regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, + RK3528_GMAC1_PHY_INTF_SEL_RMII); + else + regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON, + RK3528_GMAC0_PHY_INTF_SEL_RMII | + RK3528_GMAC0_CLK_RMII_DIV2); +} + +static void rk3528_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, + RK3528_GMAC1_CLK_RGMII_DIV50); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, + RK3528_GMAC1_CLK_RGMII_DIV5); + else if (speed == 1000) + regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, + RK3528_GMAC1_CLK_RGMII_DIV1); + else + dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); +} + +static void rk3528_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + unsigned int reg, val; + + if (speed == 10) + val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV20 : + RK3528_GMAC0_CLK_RMII_DIV20; + else if (speed == 100) + val = bsp_priv->id == 1 ? RK3528_GMAC1_CLK_RMII_DIV2 : + RK3528_GMAC0_CLK_RMII_DIV2; + else { + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + return; + } + + reg = bsp_priv->id == 1 ? RK3528_VPU_GRF_GMAC_CON5 : + RK3528_VO_GRF_GMAC_CON; + + regmap_write(bsp_priv->grf, reg, val); +} + +static void rk3528_set_clock_selection(struct rk_priv_data *bsp_priv, + bool input, bool enable) +{ + unsigned int val; + + if (bsp_priv->id == 1) { + val = input ? RK3528_GMAC1_CLK_SELECT_IO : + RK3528_GMAC1_CLK_SELECT_CRU; + val |= enable ? RK3528_GMAC1_CLK_RMII_NOGATE : + RK3528_GMAC1_CLK_RMII_GATE; + regmap_write(bsp_priv->grf, RK3528_VPU_GRF_GMAC_CON5, val); + } else { + val = enable ? RK3528_GMAC0_CLK_RMII_NOGATE : + RK3528_GMAC0_CLK_RMII_GATE; + regmap_write(bsp_priv->grf, RK3528_VO_GRF_GMAC_CON, val); + } +} + +static void rk3528_integrated_phy_powerup(struct rk_priv_data *bsp_priv) +{ + rk_gmac_integrated_fephy_powerup(bsp_priv, RK3528_VO_GRF_MACPHY_CON0); +} + +static void rk3528_integrated_phy_powerdown(struct rk_priv_data *bsp_priv) +{ + rk_gmac_integrated_fephy_powerdown(bsp_priv, RK3528_VO_GRF_MACPHY_CON0); +} + +static const struct rk_gmac_ops rk3528_ops = { + .set_to_rgmii = rk3528_set_to_rgmii, + .set_to_rmii = rk3528_set_to_rmii, + .set_rgmii_speed = rk3528_set_rgmii_speed, + .set_rmii_speed = rk3528_set_rmii_speed, + .set_clock_selection = rk3528_set_clock_selection, + .integrated_phy_powerup = rk3528_integrated_phy_powerup, + .integrated_phy_powerdown = rk3528_integrated_phy_powerdown, + .regs_valid = true, + .regs = { + 0xffbd0000, /* gmac0 */ + 0xffbe0000, /* gmac1 */ + 0x0, /* sentinel */ + }, +}; + #define RK3568_GRF_GMAC0_CON0 0x0380 #define RK3568_GRF_GMAC0_CON1 0x0384 #define RK3568_GRF_GMAC1_CON0 0x0388 @@ -1037,14 +1072,8 @@ static const struct rk_gmac_ops rk3399_ops = { static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; u32 con0, con1; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - con0 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON0 : RK3568_GRF_GMAC0_CON0; con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 : @@ -1062,14 +1091,8 @@ static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; u32 con1; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 : RK3568_GRF_GMAC0_CON1; regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII); @@ -1147,14 +1170,8 @@ static const struct rk_gmac_ops rk3568_ops = { static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; unsigned int offset_con; - if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) { - dev_err(dev, "Missing rockchip,grf or rockchip,php-grf property\n"); - return; - } - offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 : RK3576_GRF_GMAC_CON0; @@ -1180,14 +1197,8 @@ static void rk3576_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3576_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; unsigned int offset_con; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - offset_con = bsp_priv->id == 1 ? RK3576_GRF_GMAC_CON1 : RK3576_GRF_GMAC_CON0; @@ -1254,6 +1265,7 @@ static const struct rk_gmac_ops rk3576_ops = { .set_rgmii_speed = rk3576_set_gmac_speed, .set_rmii_speed = rk3576_set_gmac_speed, .set_clock_selection = rk3576_set_clock_selection, + .php_grf_required = true, .regs_valid = true, .regs = { 0x2a220000, /* gmac0 */ @@ -1306,14 +1318,8 @@ static const struct rk_gmac_ops rk3576_ops = { static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; u32 offset_con, id = bsp_priv->id; - if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) { - dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n"); - return; - } - offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 : RK3588_GRF_GMAC_CON8; @@ -1334,13 +1340,6 @@ static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->php_grf)) { - dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__); - return; - } - regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0, RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id)); @@ -1401,6 +1400,7 @@ static const struct rk_gmac_ops rk3588_ops = { .set_rgmii_speed = rk3588_set_gmac_speed, .set_rmii_speed = rk3588_set_gmac_speed, .set_clock_selection = rk3588_set_clock_selection, + .php_grf_required = true, .regs_valid = true, .regs = { 0xfe1b0000, /* gmac0 */ @@ -1423,13 +1423,6 @@ static const struct rk_gmac_ops rk3588_ops = { static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, RV1108_GMAC_PHY_INTF_SEL_RMII); } @@ -1438,11 +1431,6 @@ static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - if (speed == 10) { regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, RV1108_GMAC_RMII_CLK_2_5M | @@ -1491,13 +1479,6 @@ static const struct rk_gmac_ops rv1108_ops = { static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "Missing rockchip,grf property\n"); - return; - } - regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0, RV1126_GMAC_PHY_INTF_SEL_RGMII | RV1126_GMAC_M0_RXCLK_DLY_ENABLE | @@ -1516,13 +1497,6 @@ static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv) { - struct device *dev = &bsp_priv->pdev->dev; - - if (IS_ERR(bsp_priv->grf)) { - dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); - return; - } - regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0, RV1126_GMAC_PHY_INTF_SEL_RMII); } @@ -1578,50 +1552,6 @@ static const struct rk_gmac_ops rv1126_ops = { .set_rmii_speed = rv1126_set_rmii_speed, }; -#define RK_GRF_MACPHY_CON0 0xb00 -#define RK_GRF_MACPHY_CON1 0xb04 -#define RK_GRF_MACPHY_CON2 0xb08 -#define RK_GRF_MACPHY_CON3 0xb0c - -#define RK_MACPHY_ENABLE GRF_BIT(0) -#define RK_MACPHY_DISABLE GRF_CLR_BIT(0) -#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14) -#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7)) -#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0) -#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0) - -static void rk_gmac_integrated_phy_powerup(struct rk_priv_data *priv) -{ - if (priv->ops->integrated_phy_powerup) - priv->ops->integrated_phy_powerup(priv); - - regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M); - regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE); - - regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID); - regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID); - - if (priv->phy_reset) { - /* PHY needs to be disabled before trying to reset it */ - regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE); - if (priv->phy_reset) - reset_control_assert(priv->phy_reset); - usleep_range(10, 20); - if (priv->phy_reset) - reset_control_deassert(priv->phy_reset); - usleep_range(10, 20); - regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE); - msleep(30); - } -} - -static void rk_gmac_integrated_phy_powerdown(struct rk_priv_data *priv) -{ - regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE); - if (priv->phy_reset) - reset_control_assert(priv->phy_reset); -} - static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat) { struct rk_priv_data *bsp_priv = plat->bsp_priv; @@ -1749,7 +1679,7 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, if (!bsp_priv) return ERR_PTR(-ENOMEM); - of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface); + bsp_priv->phy_iface = plat->phy_interface; bsp_priv->ops = ops; /* Some SoCs have multiple MAC controllers, which need @@ -1812,8 +1742,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); - bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node, - "rockchip,php-grf"); + if (IS_ERR(bsp_priv->grf)) { + dev_err_probe(dev, PTR_ERR(bsp_priv->grf), + "failed to lookup rockchip,grf\n"); + return ERR_CAST(bsp_priv->grf); + } + + if (ops->php_grf_required) { + bsp_priv->php_grf = + syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,php-grf"); + if (IS_ERR(bsp_priv->php_grf)) { + dev_err_probe(dev, PTR_ERR(bsp_priv->php_grf), + "failed to lookup rockchip,php-grf\n"); + return ERR_CAST(bsp_priv->php_grf); + } + } if (plat->phy_node) { bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node, @@ -1903,16 +1847,16 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) pm_runtime_get_sync(dev); - if (bsp_priv->integrated_phy) - rk_gmac_integrated_phy_powerup(bsp_priv); + if (bsp_priv->integrated_phy && bsp_priv->ops->integrated_phy_powerup) + bsp_priv->ops->integrated_phy_powerup(bsp_priv); return 0; } static void rk_gmac_powerdown(struct rk_priv_data *gmac) { - if (gmac->integrated_phy) - rk_gmac_integrated_phy_powerdown(gmac); + if (gmac->integrated_phy && gmac->ops->integrated_phy_powerdown) + gmac->ops->integrated_phy_powerdown(gmac); pm_runtime_put_sync(&gmac->pdev->dev); @@ -1920,9 +1864,10 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac) gmac_clk_enable(gmac, false); } -static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode) +static int rk_set_clk_tx_rate(void *bsp_priv_, struct clk *clk_tx_i, + phy_interface_t interface, int speed) { - struct rk_priv_data *bsp_priv = priv; + struct rk_priv_data *bsp_priv = bsp_priv_; struct device *dev = &bsp_priv->pdev->dev; switch (bsp_priv->phy_iface) { @@ -1940,6 +1885,8 @@ static void rk_fix_speed(void *priv, unsigned int speed, unsigned int mode) default: dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface); } + + return 0; } static int rk_gmac_probe(struct platform_device *pdev) @@ -1966,9 +1913,13 @@ static int rk_gmac_probe(struct platform_device *pdev) /* If the stmmac is not already selected as gmac4, * then make sure we fallback to gmac. */ - if (!plat_dat->has_gmac4) + if (!plat_dat->has_gmac4) { plat_dat->has_gmac = true; - plat_dat->fix_mac_speed = rk_fix_speed; + plat_dat->rx_fifo_size = 4096; + plat_dat->tx_fifo_size = 2048; + } + + plat_dat->set_clk_tx_rate = rk_set_clk_tx_rate; plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data); if (IS_ERR(plat_dat->bsp_priv)) @@ -2044,6 +1995,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = { { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops }, + { .compatible = "rockchip,rk3528-gmac", .data = &rk3528_ops }, { .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops }, { .compatible = "rockchip,rk3576-gmac", .data = &rk3576_ops }, { .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops }, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c index 9cc0e5817416e..221539d760bc8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c @@ -100,24 +100,6 @@ static void s32_gmac_exit(struct platform_device *pdev, void *priv) clk_disable_unprepare(gmac->rx_clk); } -static void s32_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) -{ - struct s32_priv_data *gmac = priv; - long tx_clk_rate; - int ret; - - tx_clk_rate = rgmii_clock(speed); - if (tx_clk_rate < 0) { - dev_err(gmac->dev, "Unsupported/Invalid speed: %d\n", speed); - return; - } - - dev_dbg(gmac->dev, "Set tx clock to %ld Hz\n", tx_clk_rate); - ret = clk_set_rate(gmac->tx_clk, tx_clk_rate); - if (ret) - dev_err(gmac->dev, "Can't set tx clock\n"); -} - static int s32_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat; @@ -172,7 +154,9 @@ static int s32_dwmac_probe(struct platform_device *pdev) plat->init = s32_gmac_init; plat->exit = s32_gmac_exit; - plat->fix_mac_speed = s32_fix_mac_speed; + + plat->clk_tx_i = gmac->tx_clk; + plat->set_clk_tx_rate = stmmac_set_clk_tx_rate; plat->bsp_priv = gmac; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 16020b72dec83..1168556585597 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -61,7 +61,7 @@ struct socfpga_dwmac { struct mdio_device *pcs_mdiodev; }; -static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) +static void socfpga_dwmac_fix_mac_speed(void *priv, int speed, unsigned int mode) { struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; void __iomem *splitter_base = dwmac->splitter_base; @@ -523,24 +523,6 @@ static int socfpga_dwmac_resume(struct device *dev) dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv); - /* Before the enet controller is suspended, the phy is suspended. - * This causes the phy clock to be gated. The enet controller is - * resumed before the phy, so the clock is still gated "off" when - * the enet controller is resumed. This code makes sure the phy - * is "resumed" before reinitializing the enet controller since - * the enet controller depends on an active phy clock to complete - * a DMA reset. A DMA reset will "time out" if executed - * with no phy clock input on the Synopsys enet controller. - * Verified through Synopsys Case #8000711656. - * - * Note that the phy clock is also gated when the phy is isolated. - * Phy "suspend" and "isolate" controls are located in phy basic - * control register 0, and can be modified by the phy driver - * framework. - */ - if (ndev->phydev) - phy_resume(ndev->phydev); - return stmmac_resume(dev); } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c new file mode 100644 index 0000000000000..3303784cbbf8e --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Sophgo DWMAC platform driver + * + * Copyright (C) 2024 Inochi Amaoto + */ + +#include +#include +#include +#include + +#include "stmmac_platform.h" + +static int sophgo_sg2044_dwmac_init(struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat, + struct stmmac_resources *stmmac_res) +{ + plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx"); + if (IS_ERR(plat_dat->clk_tx_i)) + return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i), + "failed to get tx clock\n"); + + plat_dat->flags |= STMMAC_FLAG_SPH_DISABLE; + plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate; + plat_dat->multicast_filter_bins = 0; + plat_dat->unicast_filter_entries = 1; + + return 0; +} + +static int sophgo_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct device *dev = &pdev->dev; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return dev_err_probe(dev, ret, + "failed to get platform resources\n"); + + plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); + if (IS_ERR(plat_dat)) + return dev_err_probe(dev, PTR_ERR(plat_dat), + "failed to parse DT parameters\n"); + + ret = sophgo_sg2044_dwmac_init(pdev, plat_dat, &stmmac_res); + if (ret) + return ret; + + return stmmac_dvr_probe(dev, plat_dat, &stmmac_res); +} + +static const struct of_device_id sophgo_dwmac_match[] = { + { .compatible = "sophgo,sg2044-dwmac" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sophgo_dwmac_match); + +static struct platform_driver sophgo_dwmac_driver = { + .probe = sophgo_dwmac_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "sophgo-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = sophgo_dwmac_match, + }, +}; +module_platform_driver(sophgo_dwmac_driver); + +MODULE_AUTHOR("Inochi Amaoto "); +MODULE_DESCRIPTION("Sophgo DWMAC platform driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c index 0a0a363d37309..2013d7477eb76 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c @@ -27,27 +27,9 @@ struct starfive_dwmac_data { struct starfive_dwmac { struct device *dev; - struct clk *clk_tx; const struct starfive_dwmac_data *data; }; -static void starfive_dwmac_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) -{ - struct starfive_dwmac *dwmac = priv; - long rate; - int err; - - rate = rgmii_clock(speed); - if (rate < 0) { - dev_err(dwmac->dev, "invalid speed %u\n", speed); - return; - } - - err = clk_set_rate(dwmac->clk_tx, rate); - if (err) - dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate); -} - static int starfive_dwmac_set_mode(struct plat_stmmacenet_data *plat_dat) { struct starfive_dwmac *dwmac = plat_dat->bsp_priv; @@ -122,9 +104,9 @@ static int starfive_dwmac_probe(struct platform_device *pdev) dwmac->data = device_get_match_data(&pdev->dev); - dwmac->clk_tx = devm_clk_get_enabled(&pdev->dev, "tx"); - if (IS_ERR(dwmac->clk_tx)) - return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->clk_tx), + plat_dat->clk_tx_i = devm_clk_get_enabled(&pdev->dev, "tx"); + if (IS_ERR(plat_dat->clk_tx_i)) + return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat->clk_tx_i), "error getting tx clock\n"); clk_gtx = devm_clk_get_enabled(&pdev->dev, "gtx"); @@ -139,9 +121,10 @@ static int starfive_dwmac_probe(struct platform_device *pdev) * internally, because rgmii_rxin will be adaptively adjusted. */ if (!device_property_read_bool(&pdev->dev, "starfive,tx-use-rgmii-clk")) - plat_dat->fix_mac_speed = starfive_dwmac_fix_mac_speed; + plat_dat->set_clk_tx_rate = stmmac_set_clk_tx_rate; dwmac->dev = &pdev->dev; + plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP; plat_dat->bsp_priv = dwmac; plat_dat->dma_cfg->dche = true; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index f25461c292fec..be57c6c12c1c2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -99,12 +99,12 @@ struct sti_dwmac { int clk_sel_reg; /* GMAC ext clk selection register */ struct regmap *regmap; bool gmac_en; - u32 speed; - void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode); + int speed; + void (*fix_retime_src)(void *priv, int speed, unsigned int mode); }; struct sti_dwmac_of_data { - void (*fix_retime_src)(void *priv, unsigned int speed, unsigned int mode); + void (*fix_retime_src)(void *priv, int speed, unsigned int mode); }; static u32 phy_intf_sels[] = { @@ -132,7 +132,7 @@ static u32 stih4xx_tx_retime_val[] = { | STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK, }; -static void stih4xx_fix_retime_src(void *priv, u32 spd, unsigned int mode) +static void stih4xx_fix_retime_src(void *priv, int spd, unsigned int mode) { struct sti_dwmac *dwmac = priv; u32 src = dwmac->tx_retime_src; @@ -185,7 +185,8 @@ static int sti_dwmac_set_mode(struct sti_dwmac *dwmac) } static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, - struct platform_device *pdev) + struct platform_device *pdev, + struct plat_stmmacenet_data *plat_dat) { struct resource *res; struct device *dev = &pdev->dev; @@ -204,12 +205,7 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, if (IS_ERR(regmap)) return PTR_ERR(regmap); - err = of_get_phy_mode(np, &dwmac->interface); - if (err && err != -ENODEV) { - dev_err(dev, "Can't get phy-mode\n"); - return err; - } - + dwmac->interface = plat_dat->phy_interface; dwmac->regmap = regmap; dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en"); dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk"); @@ -268,7 +264,7 @@ static int sti_dwmac_probe(struct platform_device *pdev) if (!dwmac) return -ENOMEM; - ret = sti_dwmac_parse_data(dwmac, pdev); + ret = sti_dwmac_parse_data(dwmac, pdev, plat_dat); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); return ret; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index 1fcb74e9e3ffa..c3d321192581f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -538,6 +538,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev) return ret; } + plat_dat->flags |= STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP; plat_dat->bsp_priv = dwmac; ret = stm32_dwmac_init(plat_dat, false); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 4b7b2582a1201..85723a78793ab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1155,11 +1155,10 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) struct stmmac_resources stmmac_res; struct sunxi_priv_data *gmac; struct device *dev = &pdev->dev; - phy_interface_t interface; - int ret; struct stmmac_priv *priv; struct net_device *ndev; struct regmap *regmap; + int ret; ret = stmmac_get_platform_resources(pdev, &stmmac_res); if (ret) @@ -1219,10 +1218,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) return ret; } - ret = of_get_phy_mode(dev->of_node, &interface); - if (ret) - return -EINVAL; - plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac); if (IS_ERR(plat_dat)) return PTR_ERR(plat_dat); @@ -1230,7 +1225,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) /* platform data specifying hardware features and callbacks. * hardware features were copied from Allwinner drivers. */ - plat_dat->mac_interface = interface; plat_dat->rx_coe = STMMAC_RX_COE_TYPE2; plat_dat->tx_coe = 1; plat_dat->flags |= STMMAC_FLAG_HAS_SUN8I; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 9ae318436c4a7..9f098ff0ff058 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -72,7 +72,7 @@ static void sun7i_gmac_exit(struct platform_device *pdev, void *priv) regulator_disable(gmac->regulator); } -static void sun7i_fix_speed(void *priv, unsigned int speed, unsigned int mode) +static void sun7i_fix_speed(void *priv, int speed, unsigned int mode) { struct sunxi_priv_data *gmac = priv; @@ -116,11 +116,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev) if (!gmac) return -ENOMEM; - ret = of_get_phy_mode(dev->of_node, &gmac->interface); - if (ret && ret != -ENODEV) { - dev_err(dev, "Can't get phy-mode\n"); - return ret; - } + gmac->interface = plat_dat->phy_interface; gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); if (IS_ERR(gmac->tx_clk)) { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c index dce84ed184e9a..c72ee759aae58 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c @@ -45,9 +45,6 @@ #define TXCLK_DIR_OUTPUT FIELD_PREP(TXCLK_DIR_MASK, 0) #define TXCLK_DIR_INPUT FIELD_PREP(TXCLK_DIR_MASK, 1) -#define GMAC_GMII_RGMII_RATE 125000000 -#define GMAC_MII_RATE 25000000 - struct thead_dwmac { struct plat_stmmacenet_data *plat; void __iomem *apb_base; @@ -104,11 +101,13 @@ static int thead_dwmac_set_txclk_dir(struct plat_stmmacenet_data *plat) return 0; } -static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int mode) +static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, + phy_interface_t interface, int speed) { + struct thead_dwmac *dwmac = bsp_priv; struct plat_stmmacenet_data *plat; - struct thead_dwmac *dwmac = priv; unsigned long rate; + long tx_rate; u32 div, reg; plat = dwmac->plat; @@ -116,44 +115,37 @@ static void thead_dwmac_fix_speed(void *priv, unsigned int speed, unsigned int m switch (plat->mac_interface) { /* For MII, rxc/txc is provided by phy */ case PHY_INTERFACE_MODE_MII: - return; + return 0; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: rate = clk_get_rate(plat->stmmac_clk); - if (!rate || rate % GMAC_GMII_RGMII_RATE != 0 || - rate % GMAC_MII_RATE != 0) { - dev_err(dwmac->dev, "invalid gmac rate %ld\n", rate); - return; - } writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV); - switch (speed) { - case SPEED_1000: - div = rate / GMAC_GMII_RGMII_RATE; - break; - case SPEED_100: - div = rate / GMAC_MII_RATE; - break; - case SPEED_10: - div = rate * 10 / GMAC_MII_RATE; - break; - default: - dev_err(dwmac->dev, "invalid speed %u\n", speed); - return; + tx_rate = rgmii_clock(speed); + if (tx_rate < 0) { + dev_err(dwmac->dev, "invalid speed %d\n", speed); + return tx_rate; + } + + div = rate / tx_rate; + if (rate != tx_rate * div) { + dev_err(dwmac->dev, "invalid gmac rate %lu\n", rate); + return -EINVAL; } reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) | FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div); writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV); - break; + return 0; + default: dev_err(dwmac->dev, "unsupported phy interface %d\n", plat->mac_interface); - return; + return -EINVAL; } } @@ -245,7 +237,7 @@ static int thead_dwmac_probe(struct platform_device *pdev) dwmac->plat = plat; dwmac->apb_base = apb; plat->bsp_priv = dwmac; - plat->fix_mac_speed = thead_dwmac_fix_speed; + plat->set_clk_tx_rate = thead_set_clk_tx_rate; plat->init = thead_dwmac_init; return devm_stmmac_pltfr_probe(pdev, plat, &stmmac_res); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index eccf7f5374678..33cf99797df53 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -54,7 +54,7 @@ struct visconti_eth { spinlock_t lock; /* lock to protect register update */ }; -static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed, unsigned int mode) +static void visconti_eth_fix_mac_speed(void *priv, int speed, unsigned int mode) { struct visconti_eth *dwmac = priv; struct net_device *netdev = dev_get_drvdata(dwmac->dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 600fea8f712fd..967a16212faf0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -59,22 +59,11 @@ enum power_event { /* Energy Efficient Ethernet (EEE) * * LPI status, timer and control register offset + * For LPI control and status bit definitions, see common.h. */ #define LPI_CTRL_STATUS 0x0030 #define LPI_TIMER_CTRL 0x0034 -/* LPI control and status defines */ -#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */ -#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */ -#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */ -#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */ -#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */ -#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */ -#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */ -#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */ -#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */ -#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */ - /* GMAC HW ADDR regs */ #define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \ 0x00000040 + (reg * 8)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 96bcda0856ec6..a8b901cdf5cbb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "stmmac.h" #include "stmmac_pcs.h" #include "stmmac_ptp.h" @@ -342,31 +343,24 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, return ret; } -static void dwmac1000_set_eee_mode(struct mac_device_info *hw, - bool en_tx_lpi_clockgating) +static int dwmac1000_set_lpi_mode(struct mac_device_info *hw, + enum stmmac_lpi_mode mode, + bool en_tx_lpi_clockgating, u32 et) { void __iomem *ioaddr = hw->pcsr; u32 value; - /*TODO - en_tx_lpi_clockgating treatment */ + if (mode == STMMAC_LPI_TIMER) + return -EOPNOTSUPP; - /* Enable the link status receive on RGMII, SGMII ore SMII - * receive path and instruct the transmit to enter in LPI - * state. - */ value = readl(ioaddr + LPI_CTRL_STATUS); - value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA; + if (mode == STMMAC_LPI_FORCED) + value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA; + else + value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA); writel(value, ioaddr + LPI_CTRL_STATUS); -} - -static void dwmac1000_reset_eee_mode(struct mac_device_info *hw) -{ - void __iomem *ioaddr = hw->pcsr; - u32 value; - value = readl(ioaddr + LPI_CTRL_STATUS); - value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA); - writel(value, ioaddr + LPI_CTRL_STATUS); + return 0; } static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link) @@ -509,8 +503,7 @@ const struct stmmac_ops dwmac1000_ops = { .pmt = dwmac1000_pmt, .set_umac_addr = dwmac1000_set_umac_addr, .get_umac_addr = dwmac1000_get_umac_addr, - .set_eee_mode = dwmac1000_set_eee_mode, - .reset_eee_mode = dwmac1000_reset_eee_mode, + .set_lpi_mode = dwmac1000_set_lpi_mode, .set_eee_timer = dwmac1000_set_eee_timer, .set_eee_pls = dwmac1000_set_eee_pls, .debug = dwmac1000_debug, @@ -633,7 +626,7 @@ int dwmac1000_ptp_enable(struct ptp_clock_info *ptp, } netdev_dbg(priv->dev, "Auxiliary Snapshot %s.\n", - on ? "enabled" : "disabled"); + str_enabled_disabled(on)); writel(tcr_val, ptpaddr + PTP_TCR); /* wait for auxts fifo clear to finish */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index 184d41a306af0..42fe29a4e300b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -177,23 +177,13 @@ enum power_event { /* Energy Efficient Ethernet (EEE) for GMAC4 * * LPI status, timer and control register offset + * For LPI control and status bit definitions, see common.h. */ #define GMAC4_LPI_CTRL_STATUS 0xd0 #define GMAC4_LPI_TIMER_CTRL 0xd4 #define GMAC4_LPI_ENTRY_TIMER 0xd8 #define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc -/* LPI control and status defines */ -#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */ -#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */ -#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ -#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ -#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ -#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */ -#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */ -#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */ -#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */ - /* MAC Debug bitmap */ #define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) #define GMAC_DEBUG_TFCSTS_SHIFT 17 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 9ed8620580a86..cc4ddf608652c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -376,33 +376,46 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw, GMAC_ADDR_LOW(reg_n)); } -static void dwmac4_set_eee_mode(struct mac_device_info *hw, - bool en_tx_lpi_clockgating) +static int dwmac4_set_lpi_mode(struct mac_device_info *hw, + enum stmmac_lpi_mode mode, + bool en_tx_lpi_clockgating, u32 et) { void __iomem *ioaddr = hw->pcsr; - u32 value; + u32 value, mask; - /* Enable the link status receive on RGMII, SGMII ore SMII - * receive path and instruct the transmit to enter in LPI - * state. - */ - value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); - value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; + if (mode == STMMAC_LPI_DISABLE) { + value = 0; + } else { + value = LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA; - if (en_tx_lpi_clockgating) - value |= GMAC4_LPI_CTRL_STATUS_LPITCSE; + if (mode == STMMAC_LPI_TIMER) { + /* Return ERANGE if the timer is larger than the + * register field. + */ + if (et > STMMAC_ET_MAX) + return -ERANGE; - writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); -} + /* Set the hardware LPI entry timer */ + writel(et, ioaddr + GMAC4_LPI_ENTRY_TIMER); -static void dwmac4_reset_eee_mode(struct mac_device_info *hw) -{ - void __iomem *ioaddr = hw->pcsr; - u32 value; + /* Interpret a zero LPI entry timer to mean + * immediate entry into LPI mode. + */ + if (et) + value |= LPI_CTRL_STATUS_LPIATE; + } - value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); - value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA); + if (en_tx_lpi_clockgating) + value |= LPI_CTRL_STATUS_LPITCSE; + } + + mask = LPI_CTRL_STATUS_LPIATE | LPI_CTRL_STATUS_LPIEN | + LPI_CTRL_STATUS_LPITXA | LPI_CTRL_STATUS_LPITCSE; + + value |= readl(ioaddr + GMAC4_LPI_CTRL_STATUS) & ~mask; writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); + + return 0; } static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) @@ -413,34 +426,13 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link) value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); if (link) - value |= GMAC4_LPI_CTRL_STATUS_PLS; + value |= LPI_CTRL_STATUS_PLS; else - value &= ~GMAC4_LPI_CTRL_STATUS_PLS; + value &= ~LPI_CTRL_STATUS_PLS; writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS); } -static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, u32 et) -{ - void __iomem *ioaddr = hw->pcsr; - u32 value = et & STMMAC_ET_MAX; - int regval; - - /* Program LPI entry timer value into register */ - writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER); - - /* Enable/disable LPI entry timer */ - regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); - regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA; - - if (et) - regval |= GMAC4_LPI_CTRL_STATUS_LPIATE; - else - regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE; - - writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS); -} - static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw) { void __iomem *ioaddr = hw->pcsr; @@ -849,17 +841,17 @@ static int dwmac4_irq_status(struct mac_device_info *hw, /* Clear LPI interrupt by reading MAC_LPI_Control_Status */ u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); - if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) { + if (status & LPI_CTRL_STATUS_TLPIEN) { ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; x->irq_tx_path_in_lpi_mode_n++; } - if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) { + if (status & LPI_CTRL_STATUS_TLPIEX) { ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; x->irq_tx_path_exit_lpi_mode_n++; } - if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN) + if (status & LPI_CTRL_STATUS_RLPIEN) x->irq_rx_path_in_lpi_mode_n++; - if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX) + if (status & LPI_CTRL_STATUS_RLPIEX) x->irq_rx_path_exit_lpi_mode_n++; } @@ -1201,9 +1193,7 @@ const struct stmmac_ops dwmac4_ops = { .pmt = dwmac4_pmt, .set_umac_addr = dwmac4_set_umac_addr, .get_umac_addr = dwmac4_get_umac_addr, - .set_eee_mode = dwmac4_set_eee_mode, - .reset_eee_mode = dwmac4_reset_eee_mode, - .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, + .set_lpi_mode = dwmac4_set_lpi_mode, .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, @@ -1245,9 +1235,7 @@ const struct stmmac_ops dwmac410_ops = { .pmt = dwmac4_pmt, .set_umac_addr = dwmac4_set_umac_addr, .get_umac_addr = dwmac4_get_umac_addr, - .set_eee_mode = dwmac4_set_eee_mode, - .reset_eee_mode = dwmac4_reset_eee_mode, - .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, + .set_lpi_mode = dwmac4_set_lpi_mode, .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, @@ -1291,9 +1279,7 @@ const struct stmmac_ops dwmac510_ops = { .pmt = dwmac4_pmt, .set_umac_addr = dwmac4_set_umac_addr, .get_umac_addr = dwmac4_get_umac_addr, - .set_eee_mode = dwmac4_set_eee_mode, - .reset_eee_mode = dwmac4_reset_eee_mode, - .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer, + .set_lpi_mode = dwmac4_set_lpi_mode, .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 20027d3c25a79..a03f5d7715663 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -112,14 +112,7 @@ #define XGMAC_MGKPKTEN BIT(1) #define XGMAC_PWRDWN BIT(0) #define XGMAC_LPI_CTRL 0x000000d0 -#define XGMAC_TXCGE BIT(21) -#define XGMAC_LPITXA BIT(19) -#define XGMAC_PLS BIT(17) -#define XGMAC_LPITXEN BIT(16) -#define XGMAC_RLPIEX BIT(3) -#define XGMAC_RLPIEN BIT(2) -#define XGMAC_TLPIEX BIT(1) -#define XGMAC_TLPIEN BIT(0) +/* For definitions, see LPI_CTRL_STATUS_xxx in common.h */ #define XGMAC_LPI_TIMER_CTRL 0x000000d4 #define XGMAC_HW_FEATURE0 0x0000011c #define XGMAC_HWFEAT_EDMA BIT(31) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 9a60a6e8f6331..a6d395c6bacd1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -316,17 +316,17 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw, if (stat & XGMAC_LPIIS) { u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL); - if (lpi & XGMAC_TLPIEN) { + if (lpi & LPI_CTRL_STATUS_TLPIEN) { ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; x->irq_tx_path_in_lpi_mode_n++; } - if (lpi & XGMAC_TLPIEX) { + if (lpi & LPI_CTRL_STATUS_TLPIEX) { ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; x->irq_tx_path_exit_lpi_mode_n++; } - if (lpi & XGMAC_RLPIEN) + if (lpi & LPI_CTRL_STATUS_RLPIEN) x->irq_rx_path_in_lpi_mode_n++; - if (lpi & XGMAC_RLPIEX) + if (lpi & LPI_CTRL_STATUS_RLPIEX) x->irq_rx_path_exit_lpi_mode_n++; } @@ -425,29 +425,28 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw, addr[5] = (hi_addr >> 8) & 0xff; } -static void dwxgmac2_set_eee_mode(struct mac_device_info *hw, - bool en_tx_lpi_clockgating) +static int dwxgmac2_set_lpi_mode(struct mac_device_info *hw, + enum stmmac_lpi_mode mode, + bool en_tx_lpi_clockgating, u32 et) { void __iomem *ioaddr = hw->pcsr; u32 value; - value = readl(ioaddr + XGMAC_LPI_CTRL); - - value |= XGMAC_LPITXEN | XGMAC_LPITXA; - if (en_tx_lpi_clockgating) - value |= XGMAC_TXCGE; - - writel(value, ioaddr + XGMAC_LPI_CTRL); -} - -static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw) -{ - void __iomem *ioaddr = hw->pcsr; - u32 value; + if (mode == STMMAC_LPI_TIMER) + return -EOPNOTSUPP; value = readl(ioaddr + XGMAC_LPI_CTRL); - value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE); + if (mode == STMMAC_LPI_FORCED) { + value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA; + if (en_tx_lpi_clockgating) + value |= LPI_CTRL_STATUS_LPITCSE; + } else { + value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA | + LPI_CTRL_STATUS_LPITCSE); + } writel(value, ioaddr + XGMAC_LPI_CTRL); + + return 0; } static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link) @@ -457,9 +456,9 @@ static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link) value = readl(ioaddr + XGMAC_LPI_CTRL); if (link) - value |= XGMAC_PLS; + value |= LPI_CTRL_STATUS_PLS; else - value &= ~XGMAC_PLS; + value &= ~LPI_CTRL_STATUS_PLS; writel(value, ioaddr + XGMAC_LPI_CTRL); } @@ -1525,8 +1524,7 @@ const struct stmmac_ops dwxgmac210_ops = { .pmt = dwxgmac2_pmt, .set_umac_addr = dwxgmac2_set_umac_addr, .get_umac_addr = dwxgmac2_get_umac_addr, - .set_eee_mode = dwxgmac2_set_eee_mode, - .reset_eee_mode = dwxgmac2_reset_eee_mode, + .set_lpi_mode = dwxgmac2_set_lpi_mode, .set_eee_timer = dwxgmac2_set_eee_timer, .set_eee_pls = dwxgmac2_set_eee_pls, .debug = NULL, @@ -1582,8 +1580,7 @@ const struct stmmac_ops dwxlgmac2_ops = { .pmt = dwxgmac2_pmt, .set_umac_addr = dwxgmac2_set_umac_addr, .get_umac_addr = dwxgmac2_get_umac_addr, - .set_eee_mode = dwxgmac2_set_eee_mode, - .reset_eee_mode = dwxgmac2_reset_eee_mode, + .set_lpi_mode = dwxgmac2_set_lpi_mode, .set_eee_timer = dwxgmac2_set_eee_timer, .set_eee_pls = dwxgmac2_set_eee_pls, .debug = NULL, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 0f200b72c2250..27c63a9fc163d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -306,6 +306,12 @@ struct stmmac_pps_cfg; struct stmmac_rss; struct stmmac_est; +enum stmmac_lpi_mode { + STMMAC_LPI_DISABLE, + STMMAC_LPI_FORCED, + STMMAC_LPI_TIMER, +}; + /* Helpers to program the MAC core */ struct stmmac_ops { /* MAC core initialization */ @@ -360,10 +366,9 @@ struct stmmac_ops { unsigned int reg_n); void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr, unsigned int reg_n); - void (*set_eee_mode)(struct mac_device_info *hw, - bool en_tx_lpi_clockgating); - void (*reset_eee_mode)(struct mac_device_info *hw); - void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, u32 et); + int (*set_lpi_mode)(struct mac_device_info *hw, + enum stmmac_lpi_mode mode, + bool en_tx_lpi_clockgating, u32 et); void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); void (*set_eee_pls)(struct mac_device_info *hw, int link); void (*debug)(struct stmmac_priv *priv, void __iomem *ioaddr, @@ -467,12 +472,8 @@ struct stmmac_ops { stmmac_do_void_callback(__priv, mac, set_umac_addr, __args) #define stmmac_get_umac_addr(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, get_umac_addr, __args) -#define stmmac_set_eee_mode(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, set_eee_mode, __args) -#define stmmac_reset_eee_mode(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args) -#define stmmac_set_eee_lpi_timer(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args) +#define stmmac_set_lpi_mode(__priv, __args...) \ + stmmac_do_callback(__priv, mac, set_lpi_mode, __args) #define stmmac_set_eee_timer(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, set_eee_timer, __args) #define stmmac_set_eee_pls(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f05cae103d836..bddfa0f4aa217 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -106,6 +106,8 @@ struct stmmac_metadata_request { struct stmmac_priv *priv; struct dma_desc *tx_desc; bool *set_ic; + struct dma_edesc *edesc; + int tbs; }; struct stmmac_xsk_tx_complete { @@ -257,7 +259,7 @@ struct stmmac_priv { /* Frequently used values are kept adjacent for cache effect */ u32 tx_coal_frames[MTL_MAX_TX_QUEUES]; u32 tx_coal_timer[MTL_MAX_TX_QUEUES]; - u32 rx_coal_frames[MTL_MAX_TX_QUEUES]; + u32 rx_coal_frames[MTL_MAX_RX_QUEUES]; int hwts_tx_en; bool tx_path_in_lpi_mode; @@ -265,8 +267,7 @@ struct stmmac_priv { int sph; int sph_cap; u32 sarc_type; - - u32 rx_riwt[MTL_MAX_TX_QUEUES]; + u32 rx_riwt[MTL_MAX_RX_QUEUES]; int hwts_rx_en; void __iomem *ioaddr; @@ -281,9 +282,7 @@ struct stmmac_priv { /* Generic channel for NAPI */ struct stmmac_channel channel[STMMAC_CH_MAX]; - int speed; - unsigned int flow_ctrl; - unsigned int pause; + unsigned int pause_time; struct mii_bus *mii; struct phylink_config phylink_config; @@ -307,6 +306,7 @@ struct stmmac_priv { struct timer_list eee_ctrl_timer; int lpi_irq; u32 tx_lpi_timer; + bool tx_lpi_clk_stop; bool eee_enabled; bool eee_active; bool eee_sw_timer_en; @@ -343,7 +343,7 @@ struct stmmac_priv { char int_name_sfty[IFNAMSIZ + 10]; char int_name_sfty_ce[IFNAMSIZ + 10]; char int_name_sfty_ue[IFNAMSIZ + 10]; - char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14]; + char int_name_rx_irq[MTL_MAX_RX_QUEUES][IFNAMSIZ + 14]; char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18]; #ifdef CONFIG_DEBUG_FS @@ -407,6 +407,8 @@ int stmmac_dvr_probe(struct device *device, int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt); int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size); int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled); +int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, + phy_interface_t interface, int speed); static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c0ae7db96f46f..6c83ee2ad0743 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -88,19 +88,20 @@ MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_XDP_TX BIT(1) #define STMMAC_XDP_REDIRECT BIT(2) -static int flow_ctrl = FLOW_AUTO; +static int flow_ctrl = 0xdead; module_param(flow_ctrl, int, 0644); -MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); +MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)"); static int pause = PAUSE_TIME; module_param(pause, int, 0644); -MODULE_PARM_DESC(pause, "Flow Control Pause Time"); +MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)"); #define TC_DEFAULT 64 static int tc = TC_DEFAULT; module_param(tc, int, 0644); MODULE_PARM_DESC(tc, "DMA threshold control value"); +/* This is unused */ #define DEFAULT_BUFSIZE 1536 static int buf_sz = DEFAULT_BUFSIZE; module_param(buf_sz, int, 0644); @@ -177,6 +178,38 @@ int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) } EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); +/** + * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock + * @bsp_priv: BSP private data structure (unused) + * @clk_tx_i: the transmit clock + * @interface: the selected interface mode + * @speed: the speed that the MAC will be operating at + * + * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps, + * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least + * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into + * the plat_data->set_clk_tx_rate method directly, call it via their own + * implementation, or implement their own method should they have more + * complex requirements. It is intended to only be used in this method. + * + * plat_data->clk_tx_i must be filled in. + */ +int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i, + phy_interface_t interface, int speed) +{ + long rate = rgmii_clock(speed); + + /* Silently ignore unsupported speeds as rgmii_clock() only + * supports 10, 100 and 1000Mbps. We do not want to spit + * errors for 2500 and higher speeds here. + */ + if (rate < 0) + return 0; + + return clk_set_rate(clk_tx_i, rate); +} +EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate); + /** * stmmac_verify_args - verify the driver parameters. * Description: it checks the driver parameters and set a default in case of @@ -186,14 +219,11 @@ static void stmmac_verify_args(void) { if (unlikely(watchdog < 0)) watchdog = TX_TIMEO; - if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) - buf_sz = DEFAULT_BUFSIZE; - if (unlikely(flow_ctrl > 1)) - flow_ctrl = FLOW_AUTO; - else if (likely(flow_ctrl < 0)) - flow_ctrl = FLOW_OFF; if (unlikely((pause < 0) || (pause > 0xffff))) pause = PAUSE_TIME; + + if (flow_ctrl != 0xdead) + pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n"); } static void __stmmac_disable_all_queues(struct stmmac_priv *priv) @@ -390,16 +420,6 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) return dirty; } -static void stmmac_disable_hw_lpi_timer(struct stmmac_priv *priv) -{ - stmmac_set_eee_lpi_timer(priv, priv->hw, 0); -} - -static void stmmac_enable_hw_lpi_timer(struct stmmac_priv *priv) -{ - stmmac_set_eee_lpi_timer(priv, priv->hw, priv->tx_lpi_timer); -} - static bool stmmac_eee_tx_busy(struct stmmac_priv *priv) { u32 tx_cnt = priv->plat->tx_queues_to_use; @@ -436,8 +456,8 @@ static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv) /* Check and enter in LPI mode */ if (!priv->tx_path_in_lpi_mode) - stmmac_set_eee_mode(priv, priv->hw, - priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); + stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED, + priv->tx_lpi_clk_stop, 0); } /** @@ -447,8 +467,8 @@ static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv) */ static void stmmac_stop_sw_lpi(struct stmmac_priv *priv) { - stmmac_reset_eee_mode(priv, priv->hw); del_timer_sync(&priv->eee_ctrl_timer); + stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); priv->tx_path_in_lpi_mode = false; } @@ -466,74 +486,6 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t) stmmac_try_to_start_sw_lpi(priv); } -/** - * stmmac_eee_init - init EEE - * @priv: driver private structure - * @active: indicates whether EEE should be enabled. - * Description: - * if the GMAC supports the EEE (from the HW cap reg) and the phy device - * can also manage EEE, this function enable the LPI state and start related - * timer. - */ -static void stmmac_eee_init(struct stmmac_priv *priv, bool active) -{ - priv->eee_active = active; - - /* Check if MAC core supports the EEE feature. */ - if (!priv->dma_cap.eee) { - priv->eee_enabled = false; - return; - } - - mutex_lock(&priv->lock); - - /* Check if it needs to be deactivated */ - if (!priv->eee_active) { - if (priv->eee_enabled) { - netdev_dbg(priv->dev, "disable EEE\n"); - priv->eee_sw_timer_en = false; - stmmac_disable_hw_lpi_timer(priv); - del_timer_sync(&priv->eee_ctrl_timer); - stmmac_set_eee_timer(priv, priv->hw, 0, - STMMAC_DEFAULT_TWT_LS); - if (priv->hw->xpcs) - xpcs_config_eee(priv->hw->xpcs, - priv->plat->mult_fact_100ns, - false); - } - priv->eee_enabled = false; - mutex_unlock(&priv->lock); - return; - } - - if (priv->eee_active && !priv->eee_enabled) { - stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, - STMMAC_DEFAULT_TWT_LS); - if (priv->hw->xpcs) - xpcs_config_eee(priv->hw->xpcs, - priv->plat->mult_fact_100ns, - true); - } - - if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { - /* Use hardware LPI mode */ - del_timer_sync(&priv->eee_ctrl_timer); - priv->tx_path_in_lpi_mode = false; - priv->eee_sw_timer_en = false; - stmmac_enable_hw_lpi_timer(priv); - } else { - /* Use software LPI mode */ - priv->eee_sw_timer_en = true; - stmmac_disable_hw_lpi_timer(priv); - stmmac_restart_sw_lpi_timer(priv); - } - - priv->eee_enabled = true; - - mutex_unlock(&priv->lock); - netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); -} - /* stmmac_get_tx_hwtstamp - get HW TX timestamps * @priv: driver private structure * @p : descriptor pointer @@ -935,14 +887,16 @@ static void stmmac_release_ptp(struct stmmac_priv *priv) * stmmac_mac_flow_ctrl - Configure flow control in all queues * @priv: driver private structure * @duplex: duplex passed to the next function + * @flow_ctrl: desired flow control modes * Description: It is used for configuring the flow control in all queues */ -static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) +static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex, + unsigned int flow_ctrl) { u32 tx_cnt = priv->plat->tx_queues_to_use; - stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, - priv->pause, tx_cnt); + stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time, + tx_cnt); } static unsigned long stmmac_mac_get_caps(struct phylink_config *config, @@ -1002,7 +956,9 @@ static void stmmac_mac_link_up(struct phylink_config *config, bool tx_pause, bool rx_pause) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + unsigned int flow_ctrl; u32 old_ctrl, ctrl; + int ret; if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && priv->plat->serdes_powerup) @@ -1070,8 +1026,6 @@ static void stmmac_mac_link_up(struct phylink_config *config, } } - priv->speed = speed; - if (priv->plat->fix_mac_speed) priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); @@ -1082,19 +1036,29 @@ static void stmmac_mac_link_up(struct phylink_config *config, /* Flow Control operation */ if (rx_pause && tx_pause) - priv->flow_ctrl = FLOW_AUTO; + flow_ctrl = FLOW_AUTO; else if (rx_pause && !tx_pause) - priv->flow_ctrl = FLOW_RX; + flow_ctrl = FLOW_RX; else if (!rx_pause && tx_pause) - priv->flow_ctrl = FLOW_TX; + flow_ctrl = FLOW_TX; else - priv->flow_ctrl = FLOW_OFF; + flow_ctrl = FLOW_OFF; - stmmac_mac_flow_ctrl(priv, duplex); + stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl); if (ctrl != old_ctrl) writel(ctrl, priv->ioaddr + MAC_CTRL_REG); + if (priv->plat->set_clk_tx_rate) { + ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv, + priv->plat->clk_tx_i, + interface, speed); + if (ret < 0) + netdev_err(priv->dev, + "failed to configure transmit clock for %dMbps: %pe\n", + speed, ERR_PTR(ret)); + } + stmmac_mac_set(priv, priv->ioaddr, true); if (priv->dma_cap.eee) stmmac_set_eee_pls(priv, priv->hw, true); @@ -1110,16 +1074,70 @@ static void stmmac_mac_disable_tx_lpi(struct phylink_config *config) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); - stmmac_eee_init(priv, false); + priv->eee_active = false; + + mutex_lock(&priv->lock); + + priv->eee_enabled = false; + + netdev_dbg(priv->dev, "disable EEE\n"); + priv->eee_sw_timer_en = false; + del_timer_sync(&priv->eee_ctrl_timer); + stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); + priv->tx_path_in_lpi_mode = false; + + stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS); + mutex_unlock(&priv->lock); } static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer, bool tx_clk_stop) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + int ret; priv->tx_lpi_timer = timer; - stmmac_eee_init(priv, true); + priv->eee_active = true; + + mutex_lock(&priv->lock); + + priv->eee_enabled = true; + + /* Update the transmit clock stop according to PHY capability if + * the platform allows + */ + if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP) + priv->tx_lpi_clk_stop = tx_clk_stop; + + stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, + STMMAC_DEFAULT_TWT_LS); + + /* Try to cnfigure the hardware timer. */ + ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER, + priv->tx_lpi_clk_stop, priv->tx_lpi_timer); + + if (ret) { + /* Hardware timer mode not supported, or value out of range. + * Fall back to using software LPI mode + */ + priv->eee_sw_timer_en = true; + stmmac_restart_sw_lpi_timer(priv); + } + + mutex_unlock(&priv->lock); + netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); + + return 0; +} + +static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + if (priv->plat->mac_finish) + priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface); return 0; } @@ -1132,6 +1150,7 @@ static const struct phylink_mac_ops stmmac_phylink_mac_ops = { .mac_link_up = stmmac_mac_link_up, .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi, .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi, + .mac_finish = stmmac_mac_finish, }; /** @@ -1254,6 +1273,10 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) priv->phylink_config.eee_rx_clk_stop_enable = true; + /* Set the default transmit clock stop bit based on the platform glue */ + priv->tx_lpi_clk_stop = priv->plat->flags & + STMMAC_FLAG_EN_TX_LPI_CLOCKGATING; + mdio_bus_data = priv->plat->mdio_bus_data; if (mdio_bus_data) priv->phylink_config.default_an_inband = @@ -2524,9 +2547,20 @@ static u64 stmmac_xsk_fill_timestamp(void *_priv) return 0; } +static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv) +{ + struct timespec64 ts = ns_to_timespec64(launch_time); + struct stmmac_metadata_request *meta_req = _priv; + + if (meta_req->tbs & STMMAC_TBS_EN) + stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec, + ts.tv_nsec); +} + static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = { .tmo_request_timestamp = stmmac_xsk_request_timestamp, .tmo_fill_timestamp = stmmac_xsk_fill_timestamp, + .tmo_request_launch_time = stmmac_xsk_request_launch_time, }; static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) @@ -2610,6 +2644,8 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) meta_req.priv = priv; meta_req.tx_desc = tx_desc; meta_req.set_ic = &set_ic; + meta_req.tbs = tx_q->tbs; + meta_req.edesc = &tx_q->dma_entx[entry]; xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops, &meta_req); if (set_ic) { @@ -3072,7 +3108,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) int ret = 0; if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { - dev_err(priv->device, "Invalid DMA configuration\n"); + netdev_err(priv->dev, "Invalid DMA configuration\n"); return -EINVAL; } @@ -3081,7 +3117,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) ret = stmmac_reset(priv, priv->ioaddr); if (ret) { - dev_err(priv->device, "Failed to reset the dma\n"); + netdev_err(priv->dev, "Failed to reset the dma\n"); return ret; } @@ -3448,9 +3484,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) if (priv->hw->phylink_pcs) phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs); + /* Note that clk_rx_i must be running for reset to complete. This + * clock may also be required when setting the MAC address. + * + * Block the receive clock stop for LPI mode at the PHY in case + * the link is established with EEE mode active. + */ + phylink_rx_clk_stop_block(priv->phylink); + /* DMA initialization and SW reset */ ret = stmmac_init_dma_engine(priv); if (ret < 0) { + phylink_rx_clk_stop_unblock(priv->phylink); netdev_err(priv->dev, "%s: DMA engine initialization failed\n", __func__); return ret; @@ -3458,6 +3503,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) /* Copy the MAC addr into the HW */ stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); + phylink_rx_clk_stop_unblock(priv->phylink); /* PS and related bits will be programmed according to the speed */ if (priv->hw->pcs) { @@ -3568,7 +3614,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register) /* Start the ball rolling... */ stmmac_start_all_dma(priv); + phylink_rx_clk_stop_block(priv->phylink); stmmac_set_hw_vlan_mode(priv, priv->hw); + phylink_rx_clk_stop_unblock(priv->phylink); return 0; } @@ -3640,7 +3688,6 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); enum request_irq_err irq_err; - cpumask_t cpu_mask; int irq_idx = 0; char *int_name; int ret; @@ -3769,9 +3816,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) irq_idx = i; goto irq_error; } - cpumask_clear(&cpu_mask); - cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); - irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); + irq_set_affinity_hint(priv->rx_irq[i], + cpumask_of(i % num_online_cpus())); } /* Request Tx MSI irq */ @@ -3794,9 +3840,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) irq_idx = i; goto irq_error; } - cpumask_clear(&cpu_mask); - cpumask_set_cpu(i % num_online_cpus(), &cpu_mask); - irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); + irq_set_affinity_hint(priv->tx_irq[i], + cpumask_of(i % num_online_cpus())); } return 0; @@ -3997,7 +4042,6 @@ static int __stmmac_open(struct net_device *dev, } } - buf_sz = dma_conf->dma_buf_sz; for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; @@ -4102,9 +4146,6 @@ static int stmmac_release(struct net_device *dev) /* Release and free the Rx/Tx resources */ free_dma_desc_resources(priv, &priv->dma_conf); - /* Disable the MAC Rx/Tx */ - stmmac_mac_set(priv, priv->ioaddr, false); - /* Powerdown Serdes if there is */ if (priv->plat->serdes_powerdown) priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); @@ -5454,10 +5495,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) struct sk_buff *skb = NULL; struct stmmac_xdp_buff ctx; int xdp_status = 0; - int buf_sz; + int bufsz; dma_dir = page_pool_get_dma_dir(rx_q->page_pool); - buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; + bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); if (netif_msg_rx_status(priv)) { @@ -5570,7 +5611,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) net_prefetch(page_address(buf->page) + buf->page_offset); - xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); + xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq); xdp_prepare_buff(&ctx.xdp, page_address(buf->page), buf->page_offset, buf1_len, true); @@ -5856,6 +5897,9 @@ static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue) * whenever multicast addresses must be enabled/disabled. * Return value: * void. + * + * FIXME: This may need RXC to be running, but it may be called with BH + * disabled, which means we can't call phylink_rx_clk_stop*(). */ static void stmmac_set_rx_mode(struct net_device *dev) { @@ -5988,7 +6032,9 @@ static int stmmac_set_features(struct net_device *netdev, else priv->hw->hw_vlan_en = false; + phylink_rx_clk_stop_block(priv->phylink); stmmac_set_hw_vlan_mode(priv, priv->hw); + phylink_rx_clk_stop_unblock(priv->phylink); return 0; } @@ -6272,7 +6318,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr) if (ret) goto set_mac_error; + phylink_rx_clk_stop_block(priv->phylink); stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); + phylink_rx_clk_stop_unblock(priv->phylink); set_mac_error: pm_runtime_put(priv->device); @@ -6628,6 +6676,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); } +/* FIXME: This may need RXC to be running, but it may be called with BH + * disabled, which means we can't call phylink_rx_clk_stop*(). + */ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct stmmac_priv *priv = netdev_priv(ndev); @@ -6659,6 +6710,9 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid return ret; } +/* FIXME: This may need RXC to be running, but it may be called with BH + * disabled, which means we can't call phylink_rx_clk_stop*(). + */ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct stmmac_priv *priv = netdev_priv(ndev); @@ -7444,7 +7498,7 @@ int stmmac_dvr_probe(struct device *device, return -ENOMEM; stmmac_set_ethtool_ops(ndev); - priv->pause = pause; + priv->pause_time = pause; priv->plat = plat_dat; priv->ioaddr = res->addr; priv->dev->base_addr = (unsigned long)res->addr; @@ -7640,9 +7694,6 @@ int stmmac_dvr_probe(struct device *device, "%s: warning: maxmtu having invalid value (%d)\n", __func__, priv->plat->maxmtu); - if (flow_ctrl) - priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ - ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; /* Setup channels NAPI */ @@ -7744,8 +7795,6 @@ void stmmac_dvr_remove(struct device *dev) pm_runtime_get_sync(dev); - stmmac_stop_all_dma(priv); - stmmac_mac_set(priv, priv->ioaddr, false); unregister_netdev(ndev); #ifdef CONFIG_DEBUG_FS @@ -7816,19 +7865,16 @@ int stmmac_suspend(struct device *dev) mutex_unlock(&priv->lock); rtnl_lock(); - if (device_may_wakeup(priv->device) && priv->plat->pmt) { - phylink_suspend(priv->phylink, true); - } else { - if (device_may_wakeup(priv->device)) - phylink_speed_down(priv->phylink, false); - phylink_suspend(priv->phylink, false); - } + if (device_may_wakeup(priv->device) && !priv->plat->pmt) + phylink_speed_down(priv->phylink, false); + + phylink_suspend(priv->phylink, + device_may_wakeup(priv->device) && priv->plat->pmt); rtnl_unlock(); if (stmmac_fpe_supported(priv)) timer_shutdown_sync(&priv->fpe_cfg.verify_timer); - priv->speed = SPEED_UNKNOWN; return 0; } EXPORT_SYMBOL_GPL(stmmac_suspend); @@ -7912,16 +7958,12 @@ int stmmac_resume(struct device *dev) } rtnl_lock(); - if (device_may_wakeup(priv->device) && priv->plat->pmt) { - phylink_resume(priv->phylink); - } else { - phylink_resume(priv->phylink); - if (device_may_wakeup(priv->device)) - phylink_speed_up(priv->phylink); - } - rtnl_unlock(); - rtnl_lock(); + /* Prepare the PHY to resume, ensuring that its clocks which are + * necessary for the MAC DMA reset to complete are running + */ + phylink_prepare_resume(priv->phylink); + mutex_lock(&priv->lock); stmmac_reset_queues_param(priv); @@ -7931,14 +7973,25 @@ int stmmac_resume(struct device *dev) stmmac_hw_setup(ndev, false); stmmac_init_coalesce(priv); + phylink_rx_clk_stop_block(priv->phylink); stmmac_set_rx_mode(ndev); stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); + phylink_rx_clk_stop_unblock(priv->phylink); stmmac_enable_all_queues(priv); stmmac_enable_all_dma_irq(priv); mutex_unlock(&priv->lock); + + /* phylink_resume() must be called after the hardware has been + * initialised because it may bring the link up immediately in a + * workqueue thread, which will race with initialisation. + */ + phylink_resume(priv->phylink); + if (device_may_wakeup(priv->device) && !priv->plat->pmt) + phylink_speed_up(priv->phylink); + rtnl_unlock(); netif_device_attach(ndev); @@ -7961,9 +8014,6 @@ static int __init stmmac_cmdline_opt(char *str) } else if (!strncmp(opt, "phyaddr:", 8)) { if (kstrtoint(opt + 8, 0, &phyaddr)) goto err; - } else if (!strncmp(opt, "buf_sz:", 7)) { - if (kstrtoint(opt + 7, 0, &buf_sz)) - goto err; } else if (!strncmp(opt, "tc:", 3)) { if (kstrtoint(opt + 3, 0, &tc)) goto err; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 0c7d81ddd4406..836f2848dfeb8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -524,6 +524,9 @@ int stmmac_pcs_setup(struct net_device *ndev) if (ret) return dev_err_probe(priv->device, ret, "No xPCS found\n"); + if (xpcs) + xpcs_config_eee_mult_fact(xpcs, priv->plat->mult_fact_100ns); + priv->hw->xpcs = xpcs; return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 352b01678c223..9c1b54b701f75 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -155,9 +155,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev, { struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data; struct plat_stmmacenet_data *plat; - struct stmmac_resources res; - int i; + struct stmmac_resources res = {}; int ret; + int i; plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) @@ -192,9 +192,9 @@ static int stmmac_pci_probe(struct pci_dev *pdev, for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (pci_resource_len(pdev, i) == 0) continue; - ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev)); - if (ret) - return ret; + res.addr = pcim_iomap_region(pdev, i, STMMAC_RESOURCE_NAME); + if (IS_ERR(res.addr)) + return PTR_ERR(res.addr); break; } @@ -204,8 +204,6 @@ static int stmmac_pci_probe(struct pci_dev *pdev, if (ret) return ret; - memset(&res, 0, sizeof(res)); - res.addr = pcim_iomap_table(pdev)[i]; res.wol_irq = pdev->irq; res.irq = pdev->irq; @@ -226,21 +224,11 @@ static int stmmac_pci_probe(struct pci_dev *pdev, * stmmac_pci_remove * * @pdev: platform device pointer - * Description: this function calls the main to free the net resources - * and releases the PCI resources. + * Description: this function calls the main to free the net resources. */ static void stmmac_pci_remove(struct pci_dev *pdev) { - int i; - stmmac_dvr_remove(&pdev->dev); - - for (i = 0; i < PCI_STD_NUM_BARS; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - pcim_iounmap_regions(pdev, BIT(i)); - break; - } } static int __maybe_unused stmmac_pci_suspend(struct device *dev) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index d0e61aa1a4952..c73eff6a56b87 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -405,6 +405,17 @@ static int stmmac_of_get_mac_mode(struct device_node *np) return -ENODEV; } +/* Compatible string array for all gmac4 devices */ +static const char * const stmmac_gmac4_compats[] = { + "snps,dwmac-4.00", + "snps,dwmac-4.10a", + "snps,dwmac-4.20a", + "snps,dwmac-5.10a", + "snps,dwmac-5.20", + "snps,dwmac-5.30a", + NULL +}; + /** * stmmac_probe_config_dt - parse device-tree driver parameters * @pdev: platform_device structure @@ -486,8 +497,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->force_sf_dma_mode = of_property_read_bool(np, "snps,force_sf_dma_mode"); - if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating")) + if (of_property_read_bool(np, "snps,en-tx-lpi-clockgating")) { + dev_warn(&pdev->dev, + "OF property snps,en-tx-lpi-clockgating is deprecated, please convert driver to use STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP\n"); plat->flags |= STMMAC_FLAG_EN_TX_LPI_CLOCKGATING; + } /* Set the maxmtu to a default of JUMBO_LEN in case the * parameter is not present in the device tree. @@ -538,11 +552,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) plat->pmt = 1; } - if (of_device_is_compatible(np, "snps,dwmac-4.00") || - of_device_is_compatible(np, "snps,dwmac-4.10a") || - of_device_is_compatible(np, "snps,dwmac-4.20a") || - of_device_is_compatible(np, "snps,dwmac-5.10a") || - of_device_is_compatible(np, "snps,dwmac-5.20")) { + if (of_device_compatible_match(np, stmmac_gmac4_compats)) { plat->has_gmac4 = 1; plat->has_gmac = 0; plat->pmt = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index 3ca1c2a816ff1..a01bc394d1ac4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -382,14 +382,14 @@ static int stmmac_test_phy_loopback(struct stmmac_priv *priv) if (!priv->dev->phydev) return -EOPNOTSUPP; - ret = phy_loopback(priv->dev->phydev, true); + ret = phy_loopback(priv->dev->phydev, true, 0); if (ret) return ret; attr.dst = priv->dev->dev_addr; ret = __stmmac_test_loopback(priv, &attr); - phy_loopback(priv->dev->phydev, false); + phy_loopback(priv->dev->phydev, false, 0); return ret; } @@ -1985,7 +1985,7 @@ void stmmac_selftest_run(struct net_device *dev, case STMMAC_LOOPBACK_PHY: ret = -EOPNOTSUPP; if (dev->phydev) - ret = phy_loopback(dev->phydev, true); + ret = phy_loopback(dev->phydev, true, 0); if (!ret) break; fallthrough; @@ -2018,7 +2018,7 @@ void stmmac_selftest_run(struct net_device *dev, case STMMAC_LOOPBACK_PHY: ret = -EOPNOTSUPP; if (dev->phydev) - ret = phy_loopback(dev->phydev, false); + ret = phy_loopback(dev->phydev, false, 0); if (!ret) break; fallthrough; diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c index 259bdac24cf21..558b791a97edd 100644 --- a/drivers/net/ethernet/tehuti/tn40.c +++ b/drivers/net/ethernet/tehuti/tn40.c @@ -1778,7 +1778,7 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ret = tn40_phy_register(priv); if (ret) { dev_err(&pdev->dev, "failed to set up PHY.\n"); - goto err_free_irq; + goto err_cleanup_swnodes; } ret = tn40_priv_init(priv); @@ -1795,6 +1795,8 @@ static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_unregister_phydev: tn40_phy_unregister(priv); +err_cleanup_swnodes: + tn40_swnodes_cleanup(priv); err_free_irq: pci_free_irq_vectors(pdev); err_unset_drvdata: @@ -1816,6 +1818,7 @@ static void tn40_remove(struct pci_dev *pdev) unregister_netdev(ndev); tn40_phy_unregister(priv); + tn40_swnodes_cleanup(priv); pci_free_irq_vectors(priv->pdev); pci_set_drvdata(pdev, NULL); iounmap(priv->regs); @@ -1832,6 +1835,10 @@ static const struct pci_device_id tn40_id_table[] = { PCI_VENDOR_ID_ASUSTEK, 0x8709) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022, PCI_VENDOR_ID_EDIMAX, 0x8103) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510, + PCI_VENDOR_ID_TEHUTI, 0x3015) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, PCI_DEVICE_ID_TEHUTI_TN9510, + PCI_VENDOR_ID_EDIMAX, 0x8102) }, { } }; diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h index 490781fe51205..25da8686d4691 100644 --- a/drivers/net/ethernet/tehuti/tn40.h +++ b/drivers/net/ethernet/tehuti/tn40.h @@ -4,10 +4,13 @@ #ifndef _TN40_H_ #define _TN40_H_ +#include #include "tn40_regs.h" #define TN40_DRV_NAME "tn40xx" +#define PCI_DEVICE_ID_TEHUTI_TN9510 0x4025 + #define TN40_MDIO_SPEED_1MHZ (1) #define TN40_MDIO_SPEED_6MHZ (6) @@ -102,10 +105,39 @@ struct tn40_txdb { int size; /* Number of elements in the db */ }; +#define NODE_PROP(_NAME, _PROP) ( \ + (const struct software_node) { \ + .name = _NAME, \ + .properties = _PROP, \ + }) + +#define NODE_PAR_PROP(_NAME, _PAR, _PROP) ( \ + (const struct software_node) { \ + .name = _NAME, \ + .parent = _PAR, \ + .properties = _PROP, \ + }) + +enum tn40_swnodes { + SWNODE_MDIO, + SWNODE_PHY, + SWNODE_MAX +}; + +struct tn40_nodes { + char phy_name[32]; + char mdio_name[32]; + struct property_entry phy_props[3]; + struct software_node swnodes[SWNODE_MAX]; + const struct software_node *group[SWNODE_MAX + 1]; +}; + struct tn40_priv { struct net_device *ndev; struct pci_dev *pdev; + struct tn40_nodes nodes; + struct napi_struct napi; /* RX FIFOs: 1 for data (full) descs, and 2 for free descs */ struct tn40_rxd_fifo rxd_fifo0; @@ -225,6 +257,7 @@ static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val) int tn40_set_link_speed(struct tn40_priv *priv, u32 speed); +void tn40_swnodes_cleanup(struct tn40_priv *priv); int tn40_mdiobus_init(struct tn40_priv *priv); int tn40_phy_register(struct tn40_priv *priv); diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c index af18615d64a8a..fb1a4a2e4dbc5 100644 --- a/drivers/net/ethernet/tehuti/tn40_mdio.c +++ b/drivers/net/ethernet/tehuti/tn40_mdio.c @@ -14,6 +14,8 @@ (FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port)))) #define TN40_MDIO_CMD_READ BIT(15) +#define AQR105_FIRMWARE "tehuti/aqr105-tn40xx.cld" + static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed) { void __iomem *regs = priv->regs; @@ -111,6 +113,56 @@ static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum, return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val); } +/* registers an mdio node and an aqr105 PHY at address 1 + * tn40_mdio-%id { + * ethernet-phy@1 { + * compatible = "ethernet-phy-id03a1.b4a3"; + * reg = <1>; + * firmware-name = AQR105_FIRMWARE; + * }; + * }; + */ +static int tn40_swnodes_register(struct tn40_priv *priv) +{ + struct tn40_nodes *nodes = &priv->nodes; + struct pci_dev *pdev = priv->pdev; + struct software_node *swnodes; + u32 id; + + id = pci_dev_id(pdev); + + snprintf(nodes->phy_name, sizeof(nodes->phy_name), "ethernet-phy@1"); + snprintf(nodes->mdio_name, sizeof(nodes->mdio_name), "tn40_mdio-%x", + id); + + swnodes = nodes->swnodes; + + swnodes[SWNODE_MDIO] = NODE_PROP(nodes->mdio_name, NULL); + + nodes->phy_props[0] = PROPERTY_ENTRY_STRING("compatible", + "ethernet-phy-id03a1.b4a3"); + nodes->phy_props[1] = PROPERTY_ENTRY_U32("reg", 1); + nodes->phy_props[2] = PROPERTY_ENTRY_STRING("firmware-name", + AQR105_FIRMWARE); + swnodes[SWNODE_PHY] = NODE_PAR_PROP(nodes->phy_name, + &swnodes[SWNODE_MDIO], + nodes->phy_props); + + nodes->group[SWNODE_PHY] = &swnodes[SWNODE_PHY]; + nodes->group[SWNODE_MDIO] = &swnodes[SWNODE_MDIO]; + return software_node_register_node_group(nodes->group); +} + +void tn40_swnodes_cleanup(struct tn40_priv *priv) +{ + /* cleanup of swnodes is only needed for AQR105-based cards */ + if (priv->pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) { + fwnode_handle_put(dev_fwnode(&priv->mdio->dev)); + device_remove_software_node(&priv->mdio->dev); + software_node_unregister_node_group(priv->nodes.group); + } +} + int tn40_mdiobus_init(struct tn40_priv *priv) { struct pci_dev *pdev = priv->pdev; @@ -129,14 +181,40 @@ int tn40_mdiobus_init(struct tn40_priv *priv) bus->read_c45 = tn40_mdio_read_c45; bus->write_c45 = tn40_mdio_write_c45; + priv->mdio = bus; + /* provide swnodes for AQR105-based cards only */ + if (pdev->device == PCI_DEVICE_ID_TEHUTI_TN9510) { + ret = tn40_swnodes_register(priv); + if (ret) { + pr_err("swnodes failed\n"); + return ret; + } + + ret = device_add_software_node(&bus->dev, + priv->nodes.group[SWNODE_MDIO]); + if (ret) { + dev_err(&pdev->dev, + "device_add_software_node failed: %d\n", ret); + goto err_swnodes_unregister; + } + } + + tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ); ret = devm_mdiobus_register(&pdev->dev, bus); if (ret) { dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n", ret, bus->state, MDIOBUS_UNREGISTERED); - return ret; + goto err_swnodes_cleanup; } - tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ); - priv->mdio = bus; return 0; + +err_swnodes_unregister: + software_node_unregister_node_group(priv->nodes.group); + return ret; +err_swnodes_cleanup: + tn40_swnodes_cleanup(priv); + return ret; } + +MODULE_FIRMWARE(AQR105_FIRMWARE); diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 0d5a862cd78a6..a07c910c497a9 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS select NET_DEVLINK select TI_DAVINCI_MDIO select PHYLINK + select PAGE_POOL select TI_K3_CPPI_DESC_POOL imply PHY_TI_GMII_SEL depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS @@ -204,6 +205,7 @@ config TI_ICSSG_PRUETH_SR1 select PHYLIB select TI_ICSS_IEP select TI_K3_CPPI_DESC_POOL + select PAGE_POOL depends on PRU_REMOTEPROC depends on NET_SWITCHDEV depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 2806238629f82..8b202e8e10e0a 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -164,6 +164,7 @@ #define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7 /* XDP */ +#define AM65_CPSW_XDP_TX BIT(2) #define AM65_CPSW_XDP_CONSUMED BIT(1) #define AM65_CPSW_XDP_REDIRECT BIT(0) #define AM65_CPSW_XDP_PASS 0 @@ -829,19 +830,19 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) { struct am65_cpsw_tx_chn *tx_chn = data; enum am65_cpsw_tx_buf_type buf_type; + struct am65_cpsw_tx_swdata *swdata; struct cppi5_host_desc_t *desc_tx; struct xdp_frame *xdpf; struct sk_buff *skb; - void **swdata; desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_tx); buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { - skb = *(swdata); + skb = swdata->skb; dev_kfree_skb_any(skb); } else { - xdpf = *(swdata); + xdpf = swdata->xdpf; xdp_return_frame(xdpf); } @@ -1098,10 +1099,10 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_port *port = am65_ndev_to_port(ndev); struct cppi5_host_desc_t *host_desc; + struct am65_cpsw_tx_swdata *swdata; struct netdev_queue *netif_txq; dma_addr_t dma_desc, dma_buf; u32 pkt_len = xdpf->len; - void **swdata; int ret; host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); @@ -1131,7 +1132,8 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len); swdata = cppi5_hdesc_get_swdata(host_desc); - *(swdata) = xdpf; + swdata->ndev = ndev; + swdata->xdpf = xdpf; /* Report BQL before sending the packet */ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); @@ -1167,17 +1169,16 @@ static int am65_cpsw_xdp_tx_frame(struct net_device *ndev, static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, struct am65_cpsw_port *port, - struct xdp_buff *xdp, - int cpu, int *len) + struct xdp_buff *xdp, int *len) { struct am65_cpsw_common *common = flow->common; struct net_device *ndev = port->ndev; int ret = AM65_CPSW_XDP_CONSUMED; struct am65_cpsw_tx_chn *tx_chn; struct netdev_queue *netif_txq; + int cpu = smp_processor_id(); struct xdp_frame *xdpf; struct bpf_prog *prog; - struct page *page; int pkt_len; u32 act; int err; @@ -1193,8 +1194,7 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, switch (act) { case XDP_PASS: - ret = AM65_CPSW_XDP_PASS; - goto out; + return AM65_CPSW_XDP_PASS; case XDP_TX: tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_QUEUES]; netif_txq = netdev_get_tx_queue(ndev, tx_chn->id); @@ -1213,15 +1213,13 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, goto drop; dev_sw_netstats_rx_add(ndev, pkt_len); - ret = AM65_CPSW_XDP_CONSUMED; - goto out; + return AM65_CPSW_XDP_TX; case XDP_REDIRECT: if (unlikely(xdp_do_redirect(ndev, xdp, prog))) goto drop; dev_sw_netstats_rx_add(ndev, pkt_len); - ret = AM65_CPSW_XDP_REDIRECT; - goto out; + return AM65_CPSW_XDP_REDIRECT; default: bpf_warn_invalid_xdp_action(ndev, prog, act); fallthrough; @@ -1233,10 +1231,6 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow, ndev->stats.rx_dropped++; } - page = virt_to_head_page(xdp->data); - am65_cpsw_put_page(flow, page, true); - -out: return ret; } @@ -1274,7 +1268,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info) } static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, - int cpu, int *xdp_state) + int *xdp_state) { struct am65_cpsw_rx_chn *rx_chn = &flow->common->rx_chns; u32 buf_dma_len, pkt_len, port_id = 0, csum_info; @@ -1334,8 +1328,13 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]); xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM, pkt_len, false); - *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, - cpu, &pkt_len); + *xdp_state = am65_cpsw_run_xdp(flow, port, &xdp, &pkt_len); + if (*xdp_state == AM65_CPSW_XDP_CONSUMED) { + page = virt_to_head_page(xdp.data); + am65_cpsw_put_page(flow, page, true); + goto allocate; + } + if (*xdp_state != AM65_CPSW_XDP_PASS) goto allocate; @@ -1401,7 +1400,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) { struct am65_cpsw_rx_flow *flow = am65_cpsw_napi_to_rx_flow(napi_rx); struct am65_cpsw_common *common = flow->common; - int cpu = smp_processor_id(); int xdp_state_or = 0; int cur_budget, ret; int xdp_state; @@ -1410,7 +1408,7 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) /* process only this flow */ cur_budget = budget; while (cur_budget--) { - ret = am65_cpsw_nuss_rx_packets(flow, cpu, &xdp_state); + ret = am65_cpsw_nuss_rx_packets(flow, &xdp_state); xdp_state_or |= xdp_state; if (ret) break; @@ -1438,52 +1436,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) return num_rx; } -static struct sk_buff * -am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn, - dma_addr_t desc_dma) -{ - struct cppi5_host_desc_t *desc_tx; - struct sk_buff *skb; - void **swdata; - - desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, - desc_dma); - swdata = cppi5_hdesc_get_swdata(desc_tx); - skb = *(swdata); - am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); - - am65_cpts_tx_timestamp(tx_chn->common->cpts, skb); - - dev_sw_netstats_tx_add(skb->dev, 1, skb->len); - - return skb; -} - -static struct xdp_frame * -am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common, - struct am65_cpsw_tx_chn *tx_chn, - dma_addr_t desc_dma, - struct net_device **ndev) -{ - struct cppi5_host_desc_t *desc_tx; - struct am65_cpsw_port *port; - struct xdp_frame *xdpf; - u32 port_id = 0; - void **swdata; - - desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); - cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id); - swdata = cppi5_hdesc_get_swdata(desc_tx); - xdpf = *(swdata); - am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); - - port = am65_common_get_port(common, port_id); - dev_sw_netstats_tx_add(port->ndev, 1, xdpf->len); - *ndev = port->ndev; - - return xdpf; -} - static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev, struct netdev_queue *netif_txq) { @@ -1504,13 +1456,17 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, int chn, unsigned int budget, bool *tdown) { + bool single_port = AM65_CPSW_IS_CPSW2G(common); enum am65_cpsw_tx_buf_type buf_type; + struct am65_cpsw_tx_swdata *swdata; + struct cppi5_host_desc_t *desc_tx; struct device *dev = common->dev; struct am65_cpsw_tx_chn *tx_chn; struct netdev_queue *netif_txq; unsigned int total_bytes = 0; struct net_device *ndev; struct xdp_frame *xdpf; + unsigned int pkt_len; struct sk_buff *skb; dma_addr_t desc_dma; int res, num_tx = 0; @@ -1518,9 +1474,12 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, tx_chn = &common->tx_chns[chn]; while (true) { - spin_lock(&tx_chn->lock); + if (!single_port) + spin_lock(&tx_chn->lock); res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); - spin_unlock(&tx_chn->lock); + if (!single_port) + spin_unlock(&tx_chn->lock); + if (res == -ENODATA) break; @@ -1531,27 +1490,43 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, break; } + desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, + desc_dma); + swdata = cppi5_hdesc_get_swdata(desc_tx); + ndev = swdata->ndev; buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { - skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma); - ndev = skb->dev; - total_bytes = skb->len; + skb = swdata->skb; + am65_cpts_tx_timestamp(tx_chn->common->cpts, skb); + pkt_len = skb->len; napi_consume_skb(skb, budget); } else { - xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn, - desc_dma, &ndev); - total_bytes = xdpf->len; + xdpf = swdata->xdpf; + pkt_len = xdpf->len; if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) xdp_return_frame_rx_napi(xdpf); else xdp_return_frame(xdpf); } + + total_bytes += pkt_len; num_tx++; + am65_cpsw_nuss_xmit_free(tx_chn, desc_tx); + dev_sw_netstats_tx_add(ndev, 1, pkt_len); + if (!single_port) { + /* as packets from multi ports can be interleaved + * on the same channel, we have to figure out the + * port/queue at every packet and report it/wake queue. + */ + netif_txq = netdev_get_tx_queue(ndev, chn); + netdev_tx_completed_queue(netif_txq, 1, pkt_len); + am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); + } + } + if (single_port) { netif_txq = netdev_get_tx_queue(ndev, chn); - netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); - am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); } @@ -1560,66 +1535,6 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, return num_tx; } -static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common, - int chn, unsigned int budget, bool *tdown) -{ - enum am65_cpsw_tx_buf_type buf_type; - struct device *dev = common->dev; - struct am65_cpsw_tx_chn *tx_chn; - struct netdev_queue *netif_txq; - unsigned int total_bytes = 0; - struct net_device *ndev; - struct xdp_frame *xdpf; - struct sk_buff *skb; - dma_addr_t desc_dma; - int res, num_tx = 0; - - tx_chn = &common->tx_chns[chn]; - - while (true) { - res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); - if (res == -ENODATA) - break; - - if (cppi5_desc_is_tdcm(desc_dma)) { - if (atomic_dec_and_test(&common->tdown_cnt)) - complete(&common->tdown_complete); - *tdown = true; - break; - } - - buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma); - if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) { - skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma); - ndev = skb->dev; - total_bytes += skb->len; - napi_consume_skb(skb, budget); - } else { - xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn, - desc_dma, &ndev); - total_bytes += xdpf->len; - if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX) - xdp_return_frame_rx_napi(xdpf); - else - xdp_return_frame(xdpf); - } - num_tx++; - } - - if (!num_tx) - return 0; - - netif_txq = netdev_get_tx_queue(ndev, chn); - - netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); - - am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq); - - dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); - - return num_tx; -} - static enum hrtimer_restart am65_cpsw_nuss_tx_timer_callback(struct hrtimer *timer) { struct am65_cpsw_tx_chn *tx_chns = @@ -1635,13 +1550,8 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) bool tdown = false; int num_tx; - if (AM65_CPSW_IS_CPSW2G(tx_chn->common)) - num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, - budget, &tdown); - else - num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, - tx_chn->id, budget, &tdown); - + num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, + tx_chn->id, budget, &tdown); if (num_tx >= budget) return budget; @@ -1685,12 +1595,12 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; struct am65_cpsw_port *port = am65_ndev_to_port(ndev); + struct am65_cpsw_tx_swdata *swdata; struct device *dev = common->dev; struct am65_cpsw_tx_chn *tx_chn; struct netdev_queue *netif_txq; dma_addr_t desc_dma, buf_dma; int ret, q_idx, i; - void **swdata; u32 *psdata; u32 pkt_len; @@ -1736,7 +1646,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); swdata = cppi5_hdesc_get_swdata(first_desc); - *(swdata) = skb; + swdata->ndev = ndev; + swdata->skb = skb; psdata = cppi5_hdesc_get_psdata(first_desc); /* HW csum offload if enabled */ @@ -2306,14 +2217,18 @@ static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) { struct device *dev = common->dev; + struct am65_cpsw_tx_chn *tx_chn; int i, ret = 0; for (i = 0; i < common->tx_ch_num; i++) { - struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; + tx_chn = &common->tx_chns[i]; hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); tx_chn->tx_hrtimer.function = &am65_cpsw_nuss_tx_timer_callback; + netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, + am65_cpsw_nuss_tx_poll); + ret = devm_request_irq(dev, tx_chn->irq, am65_cpsw_nuss_tx_irq, IRQF_TRIGGER_HIGH, @@ -2323,19 +2238,16 @@ static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common) tx_chn->id, tx_chn->irq, ret); goto err; } - - netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx, - am65_cpsw_nuss_tx_poll); } return 0; err: - for (--i ; i >= 0 ; i--) { - struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; - - netif_napi_del(&tx_chn->napi_tx); + netif_napi_del(&tx_chn->napi_tx); + for (--i; i >= 0; i--) { + tx_chn = &common->tx_chns[i]; devm_free_irq(dev, tx_chn->irq, tx_chn); + netif_napi_del(&tx_chn->napi_tx); } return ret; @@ -2569,6 +2481,9 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) HRTIMER_MODE_REL_PINNED); flow->rx_hrtimer.function = &am65_cpsw_nuss_rx_timer_callback; + netif_napi_add(common->dma_ndev, &flow->napi_rx, + am65_cpsw_nuss_rx_poll); + ret = devm_request_irq(dev, flow->irq, am65_cpsw_nuss_rx_irq, IRQF_TRIGGER_HIGH, @@ -2577,11 +2492,8 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) dev_err(dev, "failure requesting rx %d irq %u, %d\n", i, flow->irq, ret); flow->irq = -EINVAL; - goto err_flow; + goto err_request_irq; } - - netif_napi_add(common->dma_ndev, &flow->napi_rx, - am65_cpsw_nuss_rx_poll); } /* setup classifier to route priorities to flows */ @@ -2589,11 +2501,14 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) return 0; +err_request_irq: + netif_napi_del(&flow->napi_rx); + err_flow: - for (--i; i >= 0 ; i--) { + for (--i; i >= 0; i--) { flow = &rx_chn->flows[i]; - netif_napi_del(&flow->napi_rx); devm_free_irq(dev, flow->irq, flow); + netif_napi_del(&flow->napi_rx); } err: @@ -3578,6 +3493,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) __be64 id_temp; int ret, i; + BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_tx_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE, + "TX SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE"); + BUILD_BUG_ON_MSG(sizeof(struct am65_cpsw_swdata) > AM65_CPSW_NAV_SW_DATA_SIZE, + "SW_DATA size exceeds AM65_CPSW_NAV_SW_DATA_SIZE"); common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL); if (!common) return -ENOMEM; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index e7832a5cf3ccd..917c37e4e89bd 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -104,6 +104,14 @@ struct am65_cpsw_rx_flow { char name[32]; }; +struct am65_cpsw_tx_swdata { + struct net_device *ndev; + union { + struct sk_buff *skb; + struct xdp_frame *xdpf; + }; +}; + struct am65_cpsw_swdata { u32 flow_id; struct page *page; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 0cb6fa6e5b7d4..a984b7d84e5e5 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -351,6 +351,7 @@ static void cpsw_rx_handler(void *token, int len, int status) int ret = 0, port, ch = xmeta->ch; int headroom = CPSW_HEADROOM_NA; struct net_device *ndev = xmeta->ndev; + u32 metasize = 0; struct cpsw_priv *priv; struct page_pool *pool; struct sk_buff *skb; @@ -400,7 +401,7 @@ static void cpsw_rx_handler(void *token, int len, int status) size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE; } - xdp_prepare_buff(&xdp, pa, headroom, size, false); + xdp_prepare_buff(&xdp, pa, headroom, size, true); port = priv->emac_port + cpsw->data.dual_emac; ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len); @@ -408,6 +409,7 @@ static void cpsw_rx_handler(void *token, int len, int status) goto requeue; headroom = xdp.data - xdp.data_hard_start; + metasize = xdp.data - xdp.data_meta; /* XDP prog can modify vlan tag, so can't use encap header */ status &= ~CPDMA_RX_VLAN_ENCAP; @@ -423,6 +425,8 @@ static void cpsw_rx_handler(void *token, int len, int status) skb_reserve(skb, headroom); skb_put(skb, len); + if (metasize) + skb_metadata_set(skb, metasize); skb->dev = ndev; if (status & CPDMA_RX_VLAN_ENCAP) cpsw_rx_vlan_encap(skb); diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index cec0a90659d94..5b5b52e4e7a75 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -293,6 +293,7 @@ static void cpsw_rx_handler(void *token, int len, int status) struct page_pool *pool; struct sk_buff *skb; struct xdp_buff xdp; + u32 metasize = 0; int ret = 0; dma_addr_t dma; @@ -345,13 +346,14 @@ static void cpsw_rx_handler(void *token, int len, int status) size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE; } - xdp_prepare_buff(&xdp, pa, headroom, size, false); + xdp_prepare_buff(&xdp, pa, headroom, size, true); ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len); if (ret != CPSW_XDP_PASS) goto requeue; headroom = xdp.data - xdp.data_hard_start; + metasize = xdp.data - xdp.data_meta; /* XDP prog can modify vlan tag, so can't use encap header */ status &= ~CPDMA_RX_VLAN_ENCAP; @@ -368,6 +370,8 @@ static void cpsw_rx_handler(void *token, int len, int status) skb->offload_fwd_mark = priv->offload_fwd_mark; skb_reserve(skb, headroom); skb_put(skb, len); + if (metasize) + skb_metadata_set(skb, metasize); skb->dev = ndev; if (status & CPDMA_RX_VLAN_ENCAP) cpsw_rx_vlan_encap(skb); @@ -1409,7 +1413,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC; - ndev->netns_local = true; + ndev->netns_immutable = true; ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | @@ -1418,6 +1422,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; SET_NETDEV_DEV(ndev, dev); + ndev->dev.of_node = slave_data->slave_node; if (!napi_ndev) { /* CPSW Host port CPDMA interface is shared between diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 768578c0d9587..aeebdc4c121e6 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -406,9 +406,25 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns) static int icss_iep_perout_enable_hw(struct icss_iep *iep, struct ptp_perout_request *req, int on) { + struct timespec64 ts; + u64 ns_start; + u64 ns_width; int ret; u64 cmp; + /* Calculate width of the signal for PPS/PEROUT handling */ + ts.tv_sec = req->on.sec; + ts.tv_nsec = req->on.nsec; + ns_width = timespec64_to_ns(&ts); + + if (req->flags & PTP_PEROUT_PHASE) { + ts.tv_sec = req->phase.sec; + ts.tv_nsec = req->phase.nsec; + ns_start = timespec64_to_ns(&ts); + } else { + ns_start = 0; + } + if (iep->ops && iep->ops->perout_enable) { ret = iep->ops->perout_enable(iep->clockops_data, req, on, &cmp); if (ret) @@ -419,10 +435,12 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep, regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp)); if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp)); - /* Configure SYNC, 1ms pulse width */ - regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, 1000000); + /* Configure SYNC, based on req on width */ + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, + div_u64(ns_width, iep->def_inc)); regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); - regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, 0); + regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, + div_u64(ns_start, iep->def_inc)); regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */ /* Enable CMP 1 */ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, @@ -447,6 +465,10 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep, + req->period.nsec; icss_iep_update_to_next_boundary(iep, start_ns); + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, + div_u64(ns_width, iep->def_inc)); + regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, + div_u64(ns_start, iep->def_inc)); /* Enable Sync in single shot mode */ regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN); @@ -476,6 +498,15 @@ static int icss_iep_perout_enable(struct icss_iep *iep, { int ret = 0; + /* Return error if the req is NULL */ + if (!req) + return -EINVAL; + + /* Reject requests with unsupported flags */ + if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + mutex_lock(&iep->ptp_clk_mutex); if (iep->pps_enabled) { @@ -486,6 +517,12 @@ static int icss_iep_perout_enable(struct icss_iep *iep, if (iep->perout_enabled == !!on) goto exit; + /* Set default "on" time (1ms) for the signal if not passed by the app */ + if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) { + req->on.sec = 0; + req->on.nsec = NSEC_PER_MSEC; + } + ret = icss_iep_perout_enable_hw(iep, req, on); if (!ret) iep->perout_enabled = !!on; @@ -568,10 +605,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on) if (on) { ns = icss_iep_gettime(iep, NULL); ts = ns_to_timespec64(ns); + rq.perout.flags = 0; rq.perout.period.sec = 1; rq.perout.period.nsec = 0; rq.perout.start.sec = ts.tv_sec + 2; rq.perout.start.nsec = 0; + rq.perout.on.sec = 0; + rq.perout.on.nsec = NSEC_PER_MSEC; ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); } else { ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 74f0f200a89d4..f0224576b95fd 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -45,6 +45,11 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac, struct prueth_rx_chn *rx_chn, int max_rflows) { + if (rx_chn->pg_pool) { + page_pool_destroy(rx_chn->pg_pool); + rx_chn->pg_pool = NULL; + } + if (rx_chn->desc_pool) k3_cppi_desc_pool_destroy(rx_chn->desc_pool); @@ -93,11 +98,20 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn, { struct cppi5_host_desc_t *first_desc, *next_desc; dma_addr_t buf_dma, next_desc_dma; + struct prueth_swdata *swdata; + struct page *page; u32 buf_dma_len; first_desc = desc; next_desc = first_desc; + swdata = cppi5_hdesc_get_swdata(desc); + if (swdata->type == PRUETH_SWDATA_PAGE) { + page = swdata->data.page; + page_pool_recycle_direct(page->pp, swdata->data.page); + goto free_desc; + } + cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma); @@ -121,6 +135,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn, k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); } +free_desc: k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); } EXPORT_SYMBOL_GPL(prueth_xmit_free); @@ -131,12 +146,13 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, struct net_device *ndev = emac->ndev; struct cppi5_host_desc_t *desc_tx; struct netdev_queue *netif_txq; + struct prueth_swdata *swdata; struct prueth_tx_chn *tx_chn; unsigned int total_bytes = 0; + struct xdp_frame *xdpf; struct sk_buff *skb; dma_addr_t desc_dma; int res, num_tx = 0; - void **swdata; tx_chn = &emac->tx_chns[chn]; @@ -157,20 +173,27 @@ int emac_tx_complete_packets(struct prueth_emac *emac, int chn, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_tx); - /* was this command's TX complete? */ - if (emac->is_sr1 && *(swdata) == emac->cmd_data) { + switch (swdata->type) { + case PRUETH_SWDATA_SKB: + skb = swdata->data.skb; + dev_sw_netstats_tx_add(skb->dev, 1, skb->len); + total_bytes += skb->len; + napi_consume_skb(skb, budget); + break; + case PRUETH_SWDATA_XDPF: + xdpf = swdata->data.xdpf; + dev_sw_netstats_tx_add(ndev, 1, xdpf->len); + total_bytes += xdpf->len; + xdp_return_frame(xdpf); + break; + default: + netdev_err(ndev, "tx_complete: invalid swdata type %d\n", swdata->type); prueth_xmit_free(tx_chn, desc_tx); + ndev->stats.tx_dropped++; continue; } - skb = *(swdata); prueth_xmit_free(tx_chn, desc_tx); - - ndev = skb->dev; - ndev->stats.tx_packets++; - ndev->stats.tx_bytes += skb->len; - total_bytes += skb->len; - napi_consume_skb(skb, budget); num_tx++; } @@ -461,17 +484,17 @@ int prueth_init_rx_chns(struct prueth_emac *emac, } EXPORT_SYMBOL_GPL(prueth_init_rx_chns); -int prueth_dma_rx_push(struct prueth_emac *emac, - struct sk_buff *skb, - struct prueth_rx_chn *rx_chn) +int prueth_dma_rx_push_mapped(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + struct page *page, u32 buf_len) { struct net_device *ndev = emac->ndev; struct cppi5_host_desc_t *desc_rx; - u32 pkt_len = skb_tailroom(skb); + struct prueth_swdata *swdata; dma_addr_t desc_dma; dma_addr_t buf_dma; - void **swdata; + buf_dma = page_pool_get_dma_addr(page) + PRUETH_HEADROOM; desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); if (!desc_rx) { netdev_err(ndev, "rx push: failed to allocate descriptor\n"); @@ -479,25 +502,19 @@ int prueth_dma_rx_push(struct prueth_emac *emac, } desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); - buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) { - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - netdev_err(ndev, "rx push: failed to map rx pkt buffer\n"); - return -EINVAL; - } - cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, PRUETH_NAV_PS_DATA_SIZE); k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma); - cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb)); + cppi5_hdesc_attach_buf(desc_rx, buf_dma, buf_len, buf_dma, buf_len); swdata = cppi5_hdesc_get_swdata(desc_rx); - *swdata = skb; + swdata->type = PRUETH_SWDATA_PAGE; + swdata->data.page = page; - return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, + return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, PRUETH_RX_FLOW_DATA, desc_rx, desc_dma); } -EXPORT_SYMBOL_GPL(prueth_dma_rx_push); +EXPORT_SYMBOL_GPL(prueth_dma_rx_push_mapped); u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) { @@ -535,18 +552,172 @@ void emac_rx_timestamp(struct prueth_emac *emac, ssh->hwtstamp = ns_to_ktime(ns); } -static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id) +/** + * emac_xmit_xdp_frame - transmits an XDP frame + * @emac: emac device + * @xdpf: data to transmit + * @page: page from page pool if already DMA mapped + * @q_idx: queue id + * + * Return: XDP state + */ +u32 emac_xmit_xdp_frame(struct prueth_emac *emac, + struct xdp_frame *xdpf, + struct page *page, + unsigned int q_idx) +{ + struct cppi5_host_desc_t *first_desc; + struct net_device *ndev = emac->ndev; + struct prueth_tx_chn *tx_chn; + dma_addr_t desc_dma, buf_dma; + struct prueth_swdata *swdata; + u32 *epib; + int ret; + + if (q_idx >= PRUETH_MAX_TX_QUEUES) { + netdev_err(ndev, "xdp tx: invalid q_id %d\n", q_idx); + return ICSSG_XDP_CONSUMED; /* drop */ + } + + tx_chn = &emac->tx_chns[q_idx]; + + first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); + if (!first_desc) { + netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n"); + return ICSSG_XDP_CONSUMED; /* drop */ + } + + if (page) { /* already DMA mapped by page_pool */ + buf_dma = page_pool_get_dma_addr(page); + buf_dma += xdpf->headroom + sizeof(struct xdp_frame); + } else { /* Map the linear buffer */ + buf_dma = dma_map_single(tx_chn->dma_dev, xdpf->data, xdpf->len, DMA_TO_DEVICE); + if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) { + netdev_err(ndev, "xdp tx: failed to map data buffer\n"); + goto drop_free_descs; /* drop */ + } + } + + cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, + PRUETH_NAV_PS_DATA_SIZE); + cppi5_hdesc_set_pkttype(first_desc, 0); + epib = first_desc->epib; + epib[0] = 0; + epib[1] = 0; + + /* set dst tag to indicate internal qid at the firmware which is at + * bit8..bit15. bit0..bit7 indicates port num for directed + * packets in case of switch mode operation + */ + cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8))); + k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); + cppi5_hdesc_attach_buf(first_desc, buf_dma, xdpf->len, buf_dma, xdpf->len); + swdata = cppi5_hdesc_get_swdata(first_desc); + if (page) { + swdata->type = PRUETH_SWDATA_PAGE; + swdata->data.page = page; + } else { + swdata->type = PRUETH_SWDATA_XDPF; + swdata->data.xdpf = xdpf; + } + + cppi5_hdesc_set_pktlen(first_desc, xdpf->len); + desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); + + ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); + if (ret) { + netdev_err(ndev, "xdp tx: push failed: %d\n", ret); + goto drop_free_descs; + } + + return ICSSG_XDP_TX; + +drop_free_descs: + prueth_xmit_free(tx_chn, first_desc); + return ICSSG_XDP_CONSUMED; +} +EXPORT_SYMBOL_GPL(emac_xmit_xdp_frame); + +/** + * emac_run_xdp - run an XDP program + * @emac: emac device + * @xdp: XDP buffer containing the frame + * @page: page with RX data if already DMA mapped + * @len: Rx descriptor packet length + * + * Return: XDP state + */ +static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, + struct page *page, u32 *len) +{ + struct net_device *ndev = emac->ndev; + struct bpf_prog *xdp_prog; + struct xdp_frame *xdpf; + u32 pkt_len = *len; + u32 act, result; + int q_idx, err; + + xdp_prog = READ_ONCE(emac->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); + switch (act) { + case XDP_PASS: + return ICSSG_XDP_PASS; + case XDP_TX: + /* Send packet to TX ring for immediate transmission */ + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) { + ndev->stats.tx_dropped++; + goto drop; + } + + q_idx = smp_processor_id() % emac->tx_ch_num; + result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx); + if (result == ICSSG_XDP_CONSUMED) { + ndev->stats.tx_dropped++; + goto drop; + } + + dev_sw_netstats_rx_add(ndev, xdpf->len); + return result; + case XDP_REDIRECT: + err = xdp_do_redirect(emac->ndev, xdp, xdp_prog); + if (err) + goto drop; + + dev_sw_netstats_rx_add(ndev, pkt_len); + return ICSSG_XDP_REDIR; + default: + bpf_warn_invalid_xdp_action(emac->ndev, xdp_prog, act); + fallthrough; + case XDP_ABORTED: +drop: + trace_xdp_exception(emac->ndev, xdp_prog, act); + fallthrough; /* handle aborts by dropping packet */ + case XDP_DROP: + ndev->stats.rx_dropped++; + page_pool_recycle_direct(emac->rx_chns.pg_pool, page); + return ICSSG_XDP_CONSUMED; + } +} + +static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) { struct prueth_rx_chn *rx_chn = &emac->rx_chns; u32 buf_dma_len, pkt_len, port_id = 0; struct net_device *ndev = emac->ndev; struct cppi5_host_desc_t *desc_rx; - struct sk_buff *skb, *new_skb; + struct prueth_swdata *swdata; dma_addr_t desc_dma, buf_dma; - void **swdata; + struct page *page, *new_page; + struct page_pool *pool; + struct sk_buff *skb; + struct xdp_buff xdp; u32 *psdata; + void *pa; int ret; + *xdp_state = 0; + pool = rx_chn->pg_pool; ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); if (ret) { if (ret != -ENODATA) @@ -558,15 +729,15 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id) return 0; desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); - swdata = cppi5_hdesc_get_swdata(desc_rx); - skb = *swdata; - - psdata = cppi5_hdesc_get_psdata(desc_rx); - /* RX HW timestamp */ - if (emac->rx_ts_enabled) - emac_rx_timestamp(emac, skb, psdata); + if (swdata->type != PRUETH_SWDATA_PAGE) { + netdev_err(ndev, "rx_pkt: invalid swdata->type %d\n", swdata->type); + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); + return 0; + } + page = swdata->data.page; + page_pool_dma_sync_for_cpu(pool, page, 0, PAGE_SIZE); cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); pkt_len = cppi5_hdesc_get_pktlen(desc_rx); @@ -574,32 +745,63 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id) pkt_len -= 4; cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); - dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - skb->dev = ndev; - new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE); /* if allocation fails we drop the packet but push the - * descriptor back to the ring with old skb to prevent a stall + * descriptor back to the ring with old page to prevent a stall */ - if (!new_skb) { + new_page = page_pool_dev_alloc_pages(pool); + if (unlikely(!new_page)) { + new_page = page; ndev->stats.rx_dropped++; - new_skb = skb; + goto requeue; + } + + pa = page_address(page); + if (emac->xdp_prog) { + xdp_init_buff(&xdp, PAGE_SIZE, &rx_chn->xdp_rxq); + xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); + + *xdp_state = emac_run_xdp(emac, &xdp, page, &pkt_len); + if (*xdp_state == ICSSG_XDP_PASS) + skb = xdp_build_skb_from_buff(&xdp); + else + goto requeue; } else { - /* send the filled skb up the n/w stack */ - skb_put(skb, pkt_len); - if (emac->prueth->is_switch_mode) - skb->offload_fwd_mark = emac->offload_fwd_mark; - skb->protocol = eth_type_trans(skb, ndev); - napi_gro_receive(&emac->napi_rx, skb); - ndev->stats.rx_bytes += pkt_len; - ndev->stats.rx_packets++; + /* prepare skb and send to n/w stack */ + skb = napi_build_skb(pa, PAGE_SIZE); + } + + if (!skb) { + ndev->stats.rx_dropped++; + page_pool_recycle_direct(pool, page); + goto requeue; } + skb_reserve(skb, PRUETH_HEADROOM); + skb_put(skb, pkt_len); + skb->dev = ndev; + + psdata = cppi5_hdesc_get_psdata(desc_rx); + /* RX HW timestamp */ + if (emac->rx_ts_enabled) + emac_rx_timestamp(emac, skb, psdata); + + if (emac->prueth->is_switch_mode) + skb->offload_fwd_mark = emac->offload_fwd_mark; + skb->protocol = eth_type_trans(skb, ndev); + + skb_mark_for_recycle(skb); + napi_gro_receive(&emac->napi_rx, skb); + ndev->stats.rx_bytes += pkt_len; + ndev->stats.rx_packets++; + +requeue: /* queue another RX DMA */ - ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns); + ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page, + PRUETH_MAX_PKT_SIZE); if (WARN_ON(ret < 0)) { - dev_kfree_skb_any(new_skb); + page_pool_recycle_direct(pool, new_page); ndev->stats.rx_errors++; ndev->stats.rx_dropped++; } @@ -611,22 +813,19 @@ static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma) { struct prueth_rx_chn *rx_chn = data; struct cppi5_host_desc_t *desc_rx; - struct sk_buff *skb; - dma_addr_t buf_dma; - u32 buf_dma_len; - void **swdata; + struct prueth_swdata *swdata; + struct page_pool *pool; + struct page *page; + pool = rx_chn->pg_pool; desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_rx); - skb = *swdata; - cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); - k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma); + if (swdata->type == PRUETH_SWDATA_PAGE) { + page = swdata->data.page; + page_pool_recycle_direct(pool, page); + } - dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, - DMA_FROM_DEVICE); k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - - dev_kfree_skb_any(skb); } static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) @@ -662,13 +861,13 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev struct prueth_emac *emac = netdev_priv(ndev); struct prueth *prueth = emac->prueth; struct netdev_queue *netif_txq; + struct prueth_swdata *swdata; struct prueth_tx_chn *tx_chn; dma_addr_t desc_dma, buf_dma; u32 pkt_len, dst_tag_id; int i, ret = 0, q_idx; bool in_tx_ts = 0; int tx_ts_cookie; - void **swdata; u32 *epib; pkt_len = skb_headlen(skb); @@ -730,7 +929,8 @@ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma); cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); swdata = cppi5_hdesc_get_swdata(first_desc); - *swdata = skb; + swdata->type = PRUETH_SWDATA_SKB; + swdata->data.skb = skb; /* Handle the case where skb is fragmented in pages */ cur_desc = first_desc; @@ -833,15 +1033,27 @@ static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) { struct prueth_tx_chn *tx_chn = data; struct cppi5_host_desc_t *desc_tx; + struct prueth_swdata *swdata; + struct xdp_frame *xdpf; struct sk_buff *skb; - void **swdata; desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); swdata = cppi5_hdesc_get_swdata(desc_tx); - skb = *(swdata); - prueth_xmit_free(tx_chn, desc_tx); - dev_kfree_skb_any(skb); + switch (swdata->type) { + case PRUETH_SWDATA_SKB: + skb = swdata->data.skb; + dev_kfree_skb_any(skb); + break; + case PRUETH_SWDATA_XDPF: + xdpf = swdata->data.xdpf; + xdp_return_frame(xdpf); + break; + default: + break; + } + + prueth_xmit_free(tx_chn, desc_tx); } irqreturn_t prueth_rx_irq(int irq, void *dev_id) @@ -875,15 +1087,18 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA; int flow = emac->is_sr1 ? PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS; + int xdp_state_or = 0; int num_rx = 0; int cur_budget; + u32 xdp_state; int ret; while (flow--) { cur_budget = budget - num_rx; while (cur_budget--) { - ret = emac_rx_packet(emac, flow); + ret = emac_rx_packet(emac, flow, &xdp_state); + xdp_state_or |= xdp_state; if (ret) break; num_rx++; @@ -893,6 +1108,9 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) break; } + if (xdp_state_or & ICSSG_XDP_REDIR) + xdp_do_flush(); + if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { if (unlikely(emac->rx_pace_timeout_ns)) { hrtimer_start(&emac->rx_hrtimer, @@ -907,29 +1125,71 @@ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) } EXPORT_SYMBOL_GPL(icssg_napi_rx_poll); +static struct page_pool *prueth_create_page_pool(struct prueth_emac *emac, + struct device *dma_dev, + int size) +{ + struct page_pool_params pp_params = { 0 }; + struct page_pool *pool; + + pp_params.order = 0; + pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + pp_params.pool_size = size; + pp_params.nid = dev_to_node(emac->prueth->dev); + pp_params.dma_dir = DMA_BIDIRECTIONAL; + pp_params.dev = dma_dev; + pp_params.napi = &emac->napi_rx; + pp_params.max_len = PAGE_SIZE; + + pool = page_pool_create(&pp_params); + if (IS_ERR(pool)) + netdev_err(emac->ndev, "cannot create rx page pool\n"); + + return pool; +} + int prueth_prepare_rx_chan(struct prueth_emac *emac, struct prueth_rx_chn *chn, int buf_size) { - struct sk_buff *skb; + struct page_pool *pool; + struct page *page; int i, ret; + pool = prueth_create_page_pool(emac, chn->dma_dev, chn->descs_num); + if (IS_ERR(pool)) + return PTR_ERR(pool); + + chn->pg_pool = pool; + for (i = 0; i < chn->descs_num; i++) { - skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL); - if (!skb) - return -ENOMEM; + /* NOTE: we're not using memory efficiently here. + * 1 full page (4KB?) used here instead of + * PRUETH_MAX_PKT_SIZE (~1.5KB?) + */ + page = page_pool_dev_alloc_pages(pool); + if (!page) { + netdev_err(emac->ndev, "couldn't allocate rx page\n"); + ret = -ENOMEM; + goto recycle_alloc_pg; + } - ret = prueth_dma_rx_push(emac, skb, chn); + ret = prueth_dma_rx_push_mapped(emac, chn, page, buf_size); if (ret < 0) { netdev_err(emac->ndev, - "cannot submit skb for rx chan %s ret %d\n", + "cannot submit page for rx chan %s ret %d\n", chn->name, ret); - kfree_skb(skb); - return ret; + page_pool_recycle_direct(pool, page); + goto recycle_alloc_pg; } } return 0; + +recycle_alloc_pg: + prueth_reset_rx_chan(&emac->rx_chns, PRUETH_MAX_RX_FLOWS, false); + + return ret; } EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 00ed978605478..78434f24a15fa 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -559,6 +559,33 @@ const struct icss_iep_clockops prueth_iep_clockops = { .perout_enable = prueth_perout_enable, }; +static int prueth_create_xdp_rxqs(struct prueth_emac *emac) +{ + struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; + struct page_pool *pool = emac->rx_chns.pg_pool; + int ret; + + ret = xdp_rxq_info_reg(rxq, emac->ndev, 0, emac->napi_rx.napi_id); + if (ret) + return ret; + + ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool); + if (ret) + xdp_rxq_info_unreg(rxq); + + return ret; +} + +static void prueth_destroy_xdp_rxqs(struct prueth_emac *emac) +{ + struct xdp_rxq_info *rxq = &emac->rx_chns.xdp_rxq; + + if (!xdp_rxq_info_is_reg(rxq)) + return; + + xdp_rxq_info_unreg(rxq); +} + static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) { struct net_device *real_dev; @@ -780,10 +807,14 @@ static int emac_ndo_open(struct net_device *ndev) if (ret) goto free_tx_ts_irq; - ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); + ret = prueth_create_xdp_rxqs(emac); if (ret) goto reset_rx_chn; + ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn); + if (ret) + goto destroy_xdp_rxqs; + for (i = 0; i < emac->tx_ch_num; i++) { ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn); if (ret) @@ -809,6 +840,8 @@ static int emac_ndo_open(struct net_device *ndev) * any SKB for completion. So set false to free_skb */ prueth_reset_tx_chan(emac, i, false); +destroy_xdp_rxqs: + prueth_destroy_xdp_rxqs(emac); reset_rx_chn: prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false); free_tx_ts_irq: @@ -879,7 +912,7 @@ static int emac_ndo_stop(struct net_device *ndev) k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true); prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true); - + prueth_destroy_xdp_rxqs(emac); napi_disable(&emac->napi_rx); hrtimer_cancel(&emac->rx_hrtimer); @@ -1024,6 +1057,93 @@ static int emac_ndo_vlan_rx_del_vid(struct net_device *ndev, return 0; } +/** + * emac_xdp_xmit - Implements ndo_xdp_xmit + * @dev: netdev + * @n: number of frames + * @frames: array of XDP buffer pointers + * @flags: XDP extra info + * + * Return: number of frames successfully sent. Failed frames + * will be free'ed by XDP core. + * + * For error cases, a negative errno code is returned and no-frames + * are transmitted (caller must handle freeing frames). + **/ +static int emac_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, + u32 flags) +{ + struct prueth_emac *emac = netdev_priv(dev); + struct net_device *ndev = emac->ndev; + struct xdp_frame *xdpf; + unsigned int q_idx; + int nxmit = 0; + u32 err; + int i; + + q_idx = smp_processor_id() % emac->tx_ch_num; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + for (i = 0; i < n; i++) { + xdpf = frames[i]; + err = emac_xmit_xdp_frame(emac, xdpf, NULL, q_idx); + if (err != ICSSG_XDP_TX) { + ndev->stats.tx_dropped++; + break; + } + nxmit++; + } + + return nxmit; +} + +/** + * emac_xdp_setup - add/remove an XDP program + * @emac: emac device + * @bpf: XDP program + * + * Return: Always 0 (Success) + **/ +static int emac_xdp_setup(struct prueth_emac *emac, struct netdev_bpf *bpf) +{ + struct bpf_prog *prog = bpf->prog; + xdp_features_t val; + + val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT; + xdp_set_features_flag(emac->ndev, val); + + if (!emac->xdpi.prog && !prog) + return 0; + + WRITE_ONCE(emac->xdp_prog, prog); + + xdp_attachment_setup(&emac->xdpi, bpf); + + return 0; +} + +/** + * emac_ndo_bpf - implements ndo_bpf for icssg_prueth + * @ndev: network adapter device + * @bpf: XDP program + * + * Return: 0 on success, error code on failure. + **/ +static int emac_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) +{ + struct prueth_emac *emac = netdev_priv(ndev); + + switch (bpf->command) { + case XDP_SETUP_PROG: + return emac_xdp_setup(emac, bpf); + default: + return -EINVAL; + } +} + static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, @@ -1038,6 +1158,8 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_fix_features = emac_ndo_fix_features, .ndo_vlan_rx_add_vid = emac_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = emac_ndo_vlan_rx_del_vid, + .ndo_bpf = emac_ndo_bpf, + .ndo_xdp_xmit = emac_xdp_xmit, }; static int prueth_netdev_init(struct prueth *prueth, @@ -1066,6 +1188,8 @@ static int prueth_netdev_init(struct prueth *prueth, emac->prueth = prueth; emac->ndev = ndev; emac->port_id = port; + emac->xdp_prog = NULL; + emac->ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq"); if (!emac->cmd_wq) { ret = -ENOMEM; @@ -1152,7 +1276,7 @@ static int prueth_netdev_init(struct prueth *prueth, /* get mac address from DT and set private and netdev addr */ ret = of_get_ethdev_address(eth_node, ndev); - if (!is_valid_ether_addr(ndev->dev_addr)) { + if (ret || !is_valid_ether_addr(ndev->dev_addr)) { eth_hw_addr_random(ndev); dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n", port, ndev->dev_addr); @@ -1522,6 +1646,9 @@ static int prueth_probe(struct platform_device *pdev) np = dev->of_node; + BUILD_BUG_ON_MSG((sizeof(struct prueth_swdata) > PRUETH_NAV_SW_DATA_SIZE), + "insufficient SW_DATA size"); + prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL); if (!prueth) return -ENOMEM; @@ -1679,6 +1806,7 @@ static int prueth_probe(struct platform_device *pdev) } spin_lock_init(&prueth->vtbl_lock); + spin_lock_init(&prueth->stats_lock); /* setup netdev interfaces */ if (eth0_node) { ret = prueth_netdev_init(prueth, eth0_node); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index 329b46e9ee530..b6be4aa57a615 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -8,6 +8,8 @@ #ifndef __NET_TI_ICSSG_PRUETH_H #define __NET_TI_ICSSG_PRUETH_H +#include +#include #include #include #include @@ -33,6 +35,8 @@ #include #include +#include +#include #include "icssg_config.h" #include "icss_iep.h" @@ -131,6 +135,26 @@ struct prueth_rx_chn { u32 descs_num; unsigned int irq[ICSSG_MAX_RFLOWS]; /* separate irq per flow */ char name[32]; + struct page_pool *pg_pool; + struct xdp_rxq_info xdp_rxq; +}; + +enum prueth_swdata_type { + PRUETH_SWDATA_INVALID = 0, + PRUETH_SWDATA_SKB, + PRUETH_SWDATA_PAGE, + PRUETH_SWDATA_CMD, + PRUETH_SWDATA_XDPF, +}; + +struct prueth_swdata { + enum prueth_swdata_type type; + union prueth_data { + struct sk_buff *skb; + struct page *page; + u32 cmd; + struct xdp_frame *xdpf; + } data; }; /* There are 4 Tx DMA channels, but the highest priority is CH3 (thread 3) @@ -140,6 +164,12 @@ struct prueth_rx_chn { #define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */ +/* XDP BPF state */ +#define ICSSG_XDP_PASS 0 +#define ICSSG_XDP_CONSUMED BIT(0) +#define ICSSG_XDP_TX BIT(1) +#define ICSSG_XDP_REDIR BIT(2) + /* Minimum coalesce time in usecs for both Tx and Rx */ #define ICSSG_MIN_COALESCE_USECS 20 @@ -208,8 +238,14 @@ struct prueth_emac { unsigned long rx_pace_timeout_ns; struct netdev_hw_addr_list vlan_mcast_list[MAX_VLAN_ID]; + struct bpf_prog *xdp_prog; + struct xdp_attachment_info xdpi; }; +/* The buf includes headroom compatible with both skb and xdpf */ +#define PRUETH_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN) +#define PRUETH_HEADROOM ALIGN(PRUETH_HEADROOM_NA, sizeof(long)) + /** * struct prueth_pdata - PRUeth platform data * @fdqring_mode: Free desc queue mode @@ -305,6 +341,8 @@ struct prueth { int default_vlan; /** @vtbl_lock: Lock for vtbl in shared memory */ spinlock_t vtbl_lock; + /** @stats_lock: Lock for reading icssg stats */ + spinlock_t stats_lock; }; struct emac_tx_ts_response { @@ -410,9 +448,10 @@ int prueth_init_rx_chns(struct prueth_emac *emac, struct prueth_rx_chn *rx_chn, char *name, u32 max_rflows, u32 max_desc_num); -int prueth_dma_rx_push(struct prueth_emac *emac, - struct sk_buff *skb, - struct prueth_rx_chn *rx_chn); +int prueth_dma_rx_push_mapped(struct prueth_emac *emac, + struct prueth_rx_chn *rx_chn, + struct page *page, u32 buf_len); +unsigned int prueth_rxbuf_total_len(unsigned int len); void emac_rx_timestamp(struct prueth_emac *emac, struct sk_buff *skb, u32 *psdata); enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev); @@ -441,5 +480,9 @@ void prueth_put_cores(struct prueth *prueth, int slice); /* Revision specific helper */ u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns); +u32 emac_xmit_xdp_frame(struct prueth_emac *emac, + struct xdp_frame *xdpf, + struct page *page, + unsigned int q_idx); #endif /* __NET_TI_ICSSG_PRUETH_H */ diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c index 64a19ff39562f..e4ba86c493146 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -84,7 +84,7 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd) __le32 *data = emac->cmd_data; dma_addr_t desc_dma, buf_dma; struct prueth_tx_chn *tx_chn; - void **swdata; + struct prueth_swdata *swdata; int ret = 0; u32 *epib; @@ -122,7 +122,8 @@ static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd) cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); swdata = cppi5_hdesc_get_swdata(first_desc); - *swdata = data; + swdata->type = PRUETH_SWDATA_CMD; + swdata->data.cmd = le32_to_cpu(data[0]); cppi5_hdesc_set_pktlen(first_desc, pkt_len); desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); @@ -268,16 +269,16 @@ static int emac_phy_connect(struct prueth_emac *emac) * Returns skb pointer if packet found else NULL * Caller must free the returned skb. */ -static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac, - u32 flow_id) +static struct page *prueth_process_rx_mgm(struct prueth_emac *emac, + u32 flow_id) { struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn; struct net_device *ndev = emac->ndev; struct cppi5_host_desc_t *desc_rx; - struct sk_buff *skb, *new_skb; + struct page *page, *new_page; + struct prueth_swdata *swdata; dma_addr_t desc_dma, buf_dma; - u32 buf_dma_len, pkt_len; - void **swdata; + u32 buf_dma_len; int ret; ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma); @@ -299,34 +300,31 @@ static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac, } swdata = cppi5_hdesc_get_swdata(desc_rx); - skb = *swdata; + page = swdata->data.page; cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); - pkt_len = cppi5_hdesc_get_pktlen(desc_rx); dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); - new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE); + new_page = page_pool_dev_alloc_pages(rx_chn->pg_pool); /* if allocation fails we drop the packet but push the * descriptor back to the ring with old skb to prevent a stall */ - if (!new_skb) { + if (!new_page) { netdev_err(ndev, - "skb alloc failed, dropped mgm pkt from flow %d\n", + "page alloc failed, dropped mgm pkt from flow %d\n", flow_id); - new_skb = skb; - skb = NULL; /* return NULL */ - } else { - /* return the filled skb */ - skb_put(skb, pkt_len); + new_page = page; + page = NULL; /* return NULL */ } /* queue another DMA */ - ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn); + ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page, + PRUETH_MAX_PKT_SIZE); if (WARN_ON(ret < 0)) - dev_kfree_skb_any(new_skb); + page_pool_recycle_direct(rx_chn->pg_pool, new_page); - return skb; + return page; } static void prueth_tx_ts_sr1(struct prueth_emac *emac, @@ -362,14 +360,14 @@ static void prueth_tx_ts_sr1(struct prueth_emac *emac, static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id) { struct prueth_emac *emac = dev_id; - struct sk_buff *skb; + struct page *page; - skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1); - if (!skb) + page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1); + if (!page) return IRQ_NONE; - prueth_tx_ts_sr1(emac, (void *)skb->data); - dev_kfree_skb_any(skb); + prueth_tx_ts_sr1(emac, (void *)page_address(page)); + page_pool_recycle_direct(page->pp, page); return IRQ_HANDLED; } @@ -377,15 +375,15 @@ static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id) static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id) { struct prueth_emac *emac = dev_id; - struct sk_buff *skb; + struct page *page; u32 rsp; - skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1); - if (!skb) + page = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1); + if (!page) return IRQ_NONE; /* Process command response */ - rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000; + rsp = le32_to_cpu(*(__le32 *)page_address(page)) & 0xffff0000; if (rsp == ICSSG_SHUTDOWN_CMD_SR1) { netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp); complete(&emac->cmd_complete); @@ -394,7 +392,7 @@ static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id) complete(&emac->cmd_complete); } - dev_kfree_skb_any(skb); + page_pool_recycle_direct(page->pp, page); return IRQ_HANDLED; } @@ -862,7 +860,7 @@ static int prueth_netdev_init(struct prueth *prueth, /* get mac address from DT and set private and netdev addr */ ret = of_get_ethdev_address(eth_node, ndev); - if (!is_valid_ether_addr(ndev->dev_addr)) { + if (ret || !is_valid_ether_addr(ndev->dev_addr)) { eth_hw_addr_random(ndev); dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n", port, ndev->dev_addr); diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c index 8800bd3a8d074..6f0edae38ea24 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_stats.c +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c @@ -26,6 +26,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac) u32 val, reg; int i; + spin_lock(&prueth->stats_lock); + for (i = 0; i < ARRAY_SIZE(icssg_all_miig_stats); i++) { regmap_read(prueth->miig_rt, base + icssg_all_miig_stats[i].offset, @@ -51,6 +53,8 @@ void emac_update_hardware_stats(struct prueth_emac *emac) emac->pa_stats[i] += val; } } + + spin_unlock(&prueth->stats_lock); } void icssg_stats_work_handler(struct work_struct *work) diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index e46ccebcfd22f..47e3e8434b9ef 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN config LIBWX tristate + depends on PTP_1588_CLOCK_OPTIONAL select PAGE_POOL help Common library for Wangxun(R) Ethernet drivers. @@ -25,6 +26,7 @@ config LIBWX config NGBE tristate "Wangxun(R) GbE PCI Express adapters support" depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL select LIBWX select PHYLINK help @@ -42,6 +44,7 @@ config TXGBE depends on PCI depends on COMMON_CLK depends on I2C_DESIGNWARE_PLATFORM + depends on PTP_1588_CLOCK_OPTIONAL select MARVELL_10G_PHY select REGMAP select PHYLINK diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile index 42ccd6e4052e5..9b78b604a94e4 100644 --- a/drivers/net/ethernet/wangxun/libwx/Makefile +++ b/drivers/net/ethernet/wangxun/libwx/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_LIBWX) += libwx.o -libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o +libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o wx_ptp.o wx_mbx.o wx_sriov.o diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index abe5921dde020..43019ec9329c8 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -41,6 +41,9 @@ static const struct wx_stats wx_gstrings_stats[] = { WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + WX_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), + WX_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), + WX_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), }; static const struct wx_stats wx_gstrings_fdir_stats[] = { @@ -69,7 +72,7 @@ int wx_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: - return (wx->mac.type == wx_mac_sp) ? + return (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) ? WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN; default: return -EOPNOTSUPP; @@ -87,7 +90,7 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) case ETH_SS_STATS: for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) ethtool_puts(&p, wx_gstrings_stats[i].stat_string); - if (wx->mac.type == wx_mac_sp) { + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { for (i = 0; i < WX_FDIR_STATS_LEN; i++) ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string); } @@ -121,7 +124,7 @@ void wx_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - if (wx->mac.type == wx_mac_sp) { + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { for (k = 0; k < WX_FDIR_STATS_LEN; k++) { p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset; data[i++] = *(u64 *)p; @@ -196,7 +199,7 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) unsigned int stats_len = WX_STATS_LEN; struct wx *wx = netdev_priv(netdev); - if (wx->mac.type == wx_mac_sp) + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) stats_len += WX_FDIR_STATS_LEN; strscpy(info->driver, wx->driver_name, sizeof(info->driver)); @@ -216,6 +219,9 @@ int wx_nway_reset(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_aml) + return -EOPNOTSUPP; + return phylink_ethtool_nway_reset(wx->phylink); } EXPORT_SYMBOL(wx_nway_reset); @@ -225,6 +231,9 @@ int wx_get_link_ksettings(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_aml) + return -EOPNOTSUPP; + return phylink_ethtool_ksettings_get(wx->phylink, cmd); } EXPORT_SYMBOL(wx_get_link_ksettings); @@ -234,6 +243,9 @@ int wx_set_link_ksettings(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_aml) + return -EOPNOTSUPP; + return phylink_ethtool_ksettings_set(wx->phylink, cmd); } EXPORT_SYMBOL(wx_set_link_ksettings); @@ -243,6 +255,9 @@ void wx_get_pauseparam(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_aml) + return; + phylink_ethtool_get_pauseparam(wx->phylink, pause); } EXPORT_SYMBOL(wx_get_pauseparam); @@ -252,6 +267,9 @@ int wx_set_pauseparam(struct net_device *netdev, { struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_aml) + return -EOPNOTSUPP; + return phylink_ethtool_set_pauseparam(wx->phylink, pause); } EXPORT_SYMBOL(wx_set_pauseparam); @@ -322,10 +340,17 @@ int wx_set_coalesce(struct net_device *netdev, if (ec->tx_max_coalesced_frames_irq) wx->tx_work_limit = ec->tx_max_coalesced_frames_irq; - if (wx->mac.type == wx_mac_sp) + switch (wx->mac.type) { + case wx_mac_sp: max_eitr = WX_SP_MAX_EITR; - else + break; + case wx_mac_aml: + max_eitr = WX_AML_MAX_EITR; + break; + default: max_eitr = WX_EM_MAX_EITR; + break; + } if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) || (ec->tx_coalesce_usecs > (max_eitr >> 2))) @@ -347,10 +372,15 @@ int wx_set_coalesce(struct net_device *netdev, wx->tx_itr_setting = ec->tx_coalesce_usecs; if (wx->tx_itr_setting == 1) { - if (wx->mac.type == wx_mac_sp) + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: tx_itr_param = WX_12K_ITR; - else + break; + default: tx_itr_param = WX_20K_ITR; + break; + } } else { tx_itr_param = wx->tx_itr_setting; } @@ -383,10 +413,15 @@ static unsigned int wx_max_channels(struct wx *wx) max_combined = 1; } else { /* support up to max allowed queues with RSS */ - if (wx->mac.type == wx_mac_sp) + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: max_combined = 63; - else + break; + default: max_combined = 8; + break; + } } return max_combined; @@ -452,3 +487,53 @@ void wx_set_msglevel(struct net_device *netdev, u32 data) wx->msg_enable = data; } EXPORT_SYMBOL(wx_set_msglevel); + +int wx_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) +{ + struct wx *wx = netdev_priv(dev); + + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); + + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (wx->ptp_clock) + info->phc_index = ptp_clock_index(wx->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); + + return 0; +} +EXPORT_SYMBOL(wx_get_ts_info); + +void wx_get_ptp_stats(struct net_device *dev, + struct ethtool_ts_stats *ts_stats) +{ + struct wx *wx = netdev_priv(dev); + + if (wx->ptp_clock) { + ts_stats->pkts = wx->tx_hwtstamp_pkts; + ts_stats->lost = wx->tx_hwtstamp_timeouts + + wx->tx_hwtstamp_skipped + + wx->rx_hwtstamp_cleared; + ts_stats->err = wx->tx_hwtstamp_errors; + } +} +EXPORT_SYMBOL(wx_get_ptp_stats); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index 600c3b597d1a9..9e002e699ecab 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -40,4 +40,8 @@ int wx_set_channels(struct net_device *dev, struct ethtool_channels *ch); u32 wx_get_msglevel(struct net_device *netdev); void wx_set_msglevel(struct net_device *netdev, u32 data); +int wx_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info); +void wx_get_ptp_stats(struct net_device *dev, + struct ethtool_ts_stats *ts_stats); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index deaf670c160eb..3c3aa5f4ebbff 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -10,6 +10,7 @@ #include "wx_type.h" #include "wx_lib.h" +#include "wx_sriov.h" #include "wx_hw.h" static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum) @@ -112,10 +113,15 @@ static void wx_intr_disable(struct wx *wx, u64 qmask) if (mask) wr32(wx, WX_PX_IMS(0), mask); - if (wx->mac.type == wx_mac_sp) { + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: mask = (qmask >> 32); if (mask) wr32(wx, WX_PX_IMS(1), mask); + break; + default: + break; } } @@ -126,10 +132,16 @@ void wx_intr_enable(struct wx *wx, u64 qmask) mask = (qmask & U32_MAX); if (mask) wr32(wx, WX_PX_IMC(0), mask); - if (wx->mac.type == wx_mac_sp) { + + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: mask = (qmask >> 32); if (mask) wr32(wx, WX_PX_IMC(1), mask); + break; + default: + break; } } EXPORT_SYMBOL(wx_intr_enable); @@ -278,22 +290,8 @@ static int wx_acquire_sw_sync(struct wx *wx, u32 mask) return ret; } -/** - * wx_host_interface_command - Issue command to manageability block - * @wx: pointer to the HW structure - * @buffer: contains the command to write and where the return status will - * be placed - * @length: length of buffer, must be multiple of 4 bytes - * @timeout: time in ms to wait for command completion - * @return_data: read and return data from the buffer (true) or not (false) - * Needed because FW structures are big endian and decoding of - * these fields can be 8 bit or 16 bit based on command. Decoding - * is not easily understood without making a table of commands. - * So we will leave this up to the caller to read back the data - * in these cases. - **/ -int wx_host_interface_command(struct wx *wx, u32 *buffer, - u32 length, u32 timeout, bool return_data) +static int wx_host_interface_command_s(struct wx *wx, u32 *buffer, + u32 length, u32 timeout, bool return_data) { u32 hdr_size = sizeof(struct wx_hic_hdr); u32 hicr, i, bi, buf[64] = {}; @@ -301,22 +299,10 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer, u32 dword_len; u16 buf_len; - if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) { - wx_err(wx, "Buffer length failure buffersize=%d.\n", length); - return -EINVAL; - } - status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); if (status != 0) return status; - /* Calculate length in DWORDs. We must be DWORD aligned */ - if ((length % (sizeof(u32))) != 0) { - wx_err(wx, "Buffer length failure, not aligned to dword"); - status = -EINVAL; - goto rel_out; - } - dword_len = length >> 2; /* The device driver writes the relevant command block @@ -391,8 +377,160 @@ int wx_host_interface_command(struct wx *wx, u32 *buffer, wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB); return status; } + +static bool wx_poll_fw_reply(struct wx *wx, u32 *buffer, u8 send_cmd) +{ + u32 dword_len = sizeof(struct wx_hic_hdr) >> 2; + struct wx_hic_hdr *recv_hdr; + u32 i; + + /* read hdr */ + for (i = 0; i < dword_len; i++) { + buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i); + le32_to_cpus(&buffer[i]); + } + + /* check hdr */ + recv_hdr = (struct wx_hic_hdr *)buffer; + if (recv_hdr->cmd == send_cmd && + recv_hdr->index == wx->swfw_index) + return true; + + return false; +} + +static int wx_host_interface_command_r(struct wx *wx, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + struct wx_hic_hdr *hdr = (struct wx_hic_hdr *)buffer; + u32 hdr_size = sizeof(struct wx_hic_hdr); + bool busy, reply; + u32 dword_len; + u16 buf_len; + int err = 0; + u8 send_cmd; + u32 i; + + /* wait to get lock */ + might_sleep(); + err = read_poll_timeout(test_and_set_bit, busy, !busy, 1000, timeout * 1000, + false, WX_STATE_SWFW_BUSY, wx->state); + if (err) + return err; + + /* index to unique seq id for each mbox message */ + hdr->index = wx->swfw_index; + send_cmd = hdr->cmd; + + dword_len = length >> 2; + /* write data to SW-FW mbox array */ + for (i = 0; i < dword_len; i++) { + wr32a(wx, WX_SW2FW_MBOX, i, (__force u32)cpu_to_le32(buffer[i])); + /* write flush */ + rd32a(wx, WX_SW2FW_MBOX, i); + } + + /* generate interrupt to notify FW */ + wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, 0); + wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD); + + /* polling reply from FW */ + err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 1000, 50000, + true, wx, buffer, send_cmd); + if (err) { + wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n", + send_cmd, wx->swfw_index); + goto rel_out; + } + + /* expect no reply from FW then return */ + if (!return_data) + goto rel_out; + + /* If there is any thing in data position pull it in */ + buf_len = hdr->buf_len; + if (buf_len == 0) + goto rel_out; + + if (length < buf_len + hdr_size) { + wx_err(wx, "Buffer not large enough for reply message.\n"); + err = -EFAULT; + goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + for (i = hdr_size >> 2; i <= dword_len; i++) { + buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i); + le32_to_cpus(&buffer[i]); + } + +rel_out: + /* index++, index replace wx_hic_hdr.checksum */ + if (wx->swfw_index == WX_HIC_HDR_INDEX_MAX) + wx->swfw_index = 0; + else + wx->swfw_index++; + + clear_bit(WX_STATE_SWFW_BUSY, wx->state); + return err; +} + +/** + * wx_host_interface_command - Issue command to manageability block + * @wx: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @length: length of buffer, must be multiple of 4 bytes + * @timeout: time in ms to wait for command completion + * @return_data: read and return data from the buffer (true) or not (false) + * Needed because FW structures are big endian and decoding of + * these fields can be 8 bit or 16 bit based on command. Decoding + * is not easily understood without making a table of commands. + * So we will leave this up to the caller to read back the data + * in these cases. + **/ +int wx_host_interface_command(struct wx *wx, u32 *buffer, + u32 length, u32 timeout, bool return_data) +{ + if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) { + wx_err(wx, "Buffer length failure buffersize=%d.\n", length); + return -EINVAL; + } + + /* Calculate length in DWORDs. We must be DWORD aligned */ + if ((length % (sizeof(u32))) != 0) { + wx_err(wx, "Buffer length failure, not aligned to dword"); + return -EINVAL; + } + + if (test_bit(WX_FLAG_SWFW_RING, wx->flags)) + return wx_host_interface_command_r(wx, buffer, length, + timeout, return_data); + + return wx_host_interface_command_s(wx, buffer, length, timeout, return_data); +} EXPORT_SYMBOL(wx_host_interface_command); +int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles) +{ + struct wx_hic_set_pps pps_cmd; + + pps_cmd.hdr.cmd = FW_PPS_SET_CMD; + pps_cmd.hdr.buf_len = FW_PPS_SET_LEN; + pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + pps_cmd.lan_id = wx->bus.func; + pps_cmd.enable = (u8)enable; + pps_cmd.nsec = nsec; + pps_cmd.cycles = cycles; + pps_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + + return wx_host_interface_command(wx, (u32 *)&pps_cmd, + sizeof(pps_cmd), + WX_HI_COMMAND_TIMEOUT, + false); +} + /** * wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd * assuming that the semaphore is already obtained. @@ -423,7 +561,10 @@ static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data) if (status != 0) return status; - *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); + if (!test_bit(WX_FLAG_SWFW_RING, wx->flags)) + *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET); + else + *data = (u16)rd32a(wx, WX_FW2SW_MBOX, FW_NVM_DATA_OFFSET); return status; } @@ -467,6 +608,7 @@ int wx_read_ee_hostif_buffer(struct wx *wx, u16 words_to_read; u32 value = 0; int status; + u32 mbox; u32 i; /* Take semaphore for the entire operation. */ @@ -499,8 +641,12 @@ int wx_read_ee_hostif_buffer(struct wx *wx, goto out; } + if (!test_bit(WX_FLAG_SWFW_RING, wx->flags)) + mbox = WX_MNG_MBOX; + else + mbox = WX_FW2SW_MBOX; for (i = 0; i < words_to_read; i++) { - u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i; + u32 reg = mbox + (FW_NVM_DATA_OFFSET << 2) + 2 * i; value = rd32(wx, reg); data[current_word] = (u16)(value & 0xffff); @@ -550,12 +696,17 @@ void wx_init_eeprom_params(struct wx *wx) } } - if (wx->mac.type == wx_mac_sp) { + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) { wx_err(wx, "NVM Read Error\n"); return; } data = data >> 1; + break; + default: + break; } eeprom->sw_region_offset = data; @@ -616,8 +767,15 @@ static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools, /* setup VMDq pool mapping */ wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); - if (wx->mac.type == wx_mac_sp) + + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32); + break; + default: + break; + } /* HW expects these in little endian so we reverse the byte * order from network order (big endian) to little endian @@ -755,9 +913,14 @@ void wx_init_rx_addrs(struct wx *wx) wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); - if (wx->mac.type == wx_mac_sp) { + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: /* clear VMDq pool/queue selection for RAR 0 */ wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL); + break; + default: + break; } } @@ -802,11 +965,28 @@ static void wx_sync_mac_table(struct wx *wx) } } +static void wx_full_sync_mac_table(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->mac.num_rar_entries; i++) { + if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) { + wx_set_rar(wx, i, + wx->mac_table[i].addr, + wx->mac_table[i].pools, + WX_PSR_MAC_SWC_AD_H_AV); + } else { + wx_clear_rar(wx, i); + } + wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED); + } +} + /* this function destroys the first RAR entry */ void wx_mac_set_default_filter(struct wx *wx, u8 *addr) { memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN); - wx->mac_table[0].pools = 1ULL; + wx->mac_table[0].pools = BIT(VMDQ_P(0)); wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE); wx_set_rar(wx, 0, wx->mac_table[0].addr, wx->mac_table[0].pools, @@ -831,7 +1011,7 @@ void wx_flush_sw_mac_table(struct wx *wx) } EXPORT_SYMBOL(wx_flush_sw_mac_table); -static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) +int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) { u32 i; @@ -862,7 +1042,7 @@ static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool) return -ENOMEM; } -static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) +int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool) { u32 i; @@ -1044,6 +1224,35 @@ static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev) wx_dbg(wx, "Update mc addr list Complete\n"); } +static void wx_restore_vf_multicasts(struct wx *wx) +{ + u32 i, j, vector_bit, vector_reg; + struct vf_data_storage *vfinfo; + + for (i = 0; i < wx->num_vfs; i++) { + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(i)); + + vfinfo = &wx->vfinfo[i]; + for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) { + wx->addr_ctrl.mta_in_use++; + vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[j]); + vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[j]); + wr32m(wx, WX_PSR_MC_TBL(vector_reg), + BIT(vector_bit), BIT(vector_bit)); + /* errata 5: maintain a copy of the reg table conf */ + wx->mac.mta_shadow[vector_reg] |= BIT(vector_bit); + } + if (vfinfo->num_vf_mc_hashes) + vmolr |= WX_PSR_VM_L2CTL_ROMPE; + else + vmolr &= ~WX_PSR_VM_L2CTL_ROMPE; + wr32(wx, WX_PSR_VM_L2CTL(i), vmolr); + } + + /* Restore any VF macvlans */ + wx_full_sync_mac_table(wx); +} + /** * wx_write_mc_addr_list - write multicast addresses to MTA * @netdev: network interface device structure @@ -1061,6 +1270,9 @@ static int wx_write_mc_addr_list(struct net_device *netdev) wx_update_mc_addr_list(wx, netdev); + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + wx_restore_vf_multicasts(wx); + return netdev_mc_count(netdev); } @@ -1081,7 +1293,7 @@ int wx_set_mac(struct net_device *netdev, void *p) if (retval) return retval; - wx_del_mac_filter(wx, wx->mac.addr, 0); + wx_del_mac_filter(wx, wx->mac.addr, VMDQ_P(0)); eth_hw_addr_set(netdev, addr->sa_data); memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len); @@ -1183,6 +1395,10 @@ static int wx_hpbthresh(struct wx *wx) /* Calculate delay value for device */ dv_id = WX_DV(link, tc); + /* Loopback switch introduces additional latency */ + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + dv_id += WX_B2BT(tc); + /* Delay value is calculated in bit times convert to KB */ kb = WX_BT2KB(dv_id); rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT; @@ -1238,12 +1454,107 @@ static void wx_pbthresh_setup(struct wx *wx) wx->fc.low_water = 0; } +static void wx_set_ethertype_anti_spoofing(struct wx *wx, bool enable, int vf) +{ + u32 pfvfspoof, reg_offset, vf_shift; + + vf_shift = WX_VF_IND_SHIFT(vf); + reg_offset = WX_VF_REG_OFFSET(vf); + + pfvfspoof = rd32(wx, WX_TDM_ETYPE_AS(reg_offset)); + if (enable) + pfvfspoof |= BIT(vf_shift); + else + pfvfspoof &= ~BIT(vf_shift); + wr32(wx, WX_TDM_ETYPE_AS(reg_offset), pfvfspoof); +} + +int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) +{ + u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf); + struct wx *wx = netdev_priv(netdev); + u32 regval; + + if (vf >= wx->num_vfs) + return -EINVAL; + + wx->vfinfo[vf].spoofchk_enabled = setting; + + regval = (setting << vf_bit); + wr32m(wx, WX_TDM_MAC_AS(index), regval | BIT(vf_bit), regval); + + if (wx->vfinfo[vf].vlan_count) + wr32m(wx, WX_TDM_VLAN_AS(index), regval | BIT(vf_bit), regval); + + return 0; +} + +static void wx_configure_virtualization(struct wx *wx) +{ + u16 pool = wx->num_rx_pools; + u32 reg_offset, vf_shift; + u32 i; + + if (!test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + return; + + wr32m(wx, WX_PSR_VM_CTL, + WX_PSR_VM_CTL_POOL_MASK | WX_PSR_VM_CTL_REPLEN, + FIELD_PREP(WX_PSR_VM_CTL_POOL_MASK, VMDQ_P(0)) | + WX_PSR_VM_CTL_REPLEN); + while (pool--) + wr32m(wx, WX_PSR_VM_L2CTL(pool), + WX_PSR_VM_L2CTL_AUPE, WX_PSR_VM_L2CTL_AUPE); + + if (wx->mac.type == wx_mac_em) { + vf_shift = BIT(VMDQ_P(0)); + /* Enable only the PF pools for Tx/Rx */ + wr32(wx, WX_RDM_VF_RE(0), vf_shift); + wr32(wx, WX_TDM_VF_TE(0), vf_shift); + } else { + vf_shift = WX_VF_IND_SHIFT(VMDQ_P(0)); + reg_offset = WX_VF_REG_OFFSET(VMDQ_P(0)); + + /* Enable only the PF pools for Tx/Rx */ + wr32(wx, WX_RDM_VF_RE(reg_offset), GENMASK(31, vf_shift)); + wr32(wx, WX_RDM_VF_RE(reg_offset ^ 1), reg_offset - 1); + wr32(wx, WX_TDM_VF_TE(reg_offset), GENMASK(31, vf_shift)); + wr32(wx, WX_TDM_VF_TE(reg_offset ^ 1), reg_offset - 1); + } + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags); + + for (i = 0; i < wx->num_vfs; i++) { + if (!wx->vfinfo[i].spoofchk_enabled) + wx_set_vf_spoofchk(wx->netdev, i, false); + /* enable ethertype anti spoofing if hw supports it */ + wx_set_ethertype_anti_spoofing(wx, true, i); + } +} + static void wx_configure_port(struct wx *wx) { u32 value, i; - value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ; + if (wx->mac.type == wx_mac_em) { + value = (wx->num_vfs == 0) ? + WX_CFG_PORT_CTL_NUM_VT_NONE : + WX_CFG_PORT_CTL_NUM_VT_8; + } else { + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) { + if (wx->ring_feature[RING_F_RSS].indices == 4) + value = WX_CFG_PORT_CTL_NUM_VT_32; + else + value = WX_CFG_PORT_CTL_NUM_VT_64; + } else { + value = 0; + } + } + + value |= WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ; wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_NUM_VT_MASK | WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ, value); @@ -1304,6 +1615,83 @@ static void wx_vlan_strip_control(struct wx *wx, bool enable) } } +static void wx_vlan_promisc_enable(struct wx *wx) +{ + u32 vlnctrl, i, vind, bits, reg_idx; + + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL); + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) { + /* we need to keep the VLAN filter on in SRIOV */ + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + } else { + vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + return; + } + /* We are already in VLAN promisc, nothing to do */ + if (test_bit(WX_FLAG_VLAN_PROMISC, wx->flags)) + return; + /* Set flag so we don't redo unnecessary work */ + set_bit(WX_FLAG_VLAN_PROMISC, wx->flags); + /* Add PF to all active pools */ + for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, i); + vind = WX_VF_IND_SHIFT(VMDQ_P(0)); + reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0)); + bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx)); + bits |= BIT(vind); + wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits); + } + /* Set all bits in the VLAN filter table array */ + for (i = 0; i < wx->mac.vft_size; i++) + wr32(wx, WX_PSR_VLAN_TBL(i), U32_MAX); +} + +static void wx_scrub_vfta(struct wx *wx) +{ + u32 i, vid, bits, vfta, vind, vlvf, reg_idx; + + for (i = WX_PSR_VLAN_SWC_ENTRIES; --i;) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, i); + vlvf = rd32(wx, WX_PSR_VLAN_SWC_IDX); + /* pull VLAN ID from VLVF */ + vid = vlvf & ~WX_PSR_VLAN_SWC_VIEN; + if (vlvf & WX_PSR_VLAN_SWC_VIEN) { + /* if PF is part of this then continue */ + if (test_bit(vid, wx->active_vlans)) + continue; + } + /* remove PF from the pool */ + vind = WX_VF_IND_SHIFT(VMDQ_P(0)); + reg_idx = WX_VF_REG_OFFSET(VMDQ_P(0)); + bits = rd32(wx, WX_PSR_VLAN_SWC_VM(reg_idx)); + bits &= ~BIT(vind); + wr32(wx, WX_PSR_VLAN_SWC_VM(reg_idx), bits); + } + /* extract values from vft_shadow and write back to VFTA */ + for (i = 0; i < wx->mac.vft_size; i++) { + vfta = wx->mac.vft_shadow[i]; + wr32(wx, WX_PSR_VLAN_TBL(i), vfta); + } +} + +static void wx_vlan_promisc_disable(struct wx *wx) +{ + u32 vlnctrl; + + /* configure vlan filtering */ + vlnctrl = rd32(wx, WX_PSR_VLAN_CTL); + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); + /* We are not in VLAN promisc, nothing to do */ + if (!test_bit(WX_FLAG_VLAN_PROMISC, wx->flags)) + return; + /* Set flag so we don't redo unnecessary work */ + clear_bit(WX_FLAG_VLAN_PROMISC, wx->flags); + wx_scrub_vfta(wx); +} + void wx_set_rx_mode(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); @@ -1316,7 +1704,7 @@ void wx_set_rx_mode(struct net_device *netdev) /* Check for Promiscuous and All Multicast modes */ fctrl = rd32(wx, WX_PSR_CTL); fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE); - vmolr = rd32(wx, WX_PSR_VM_L2CTL(0)); + vmolr = rd32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0))); vmolr &= ~(WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_ROPE | @@ -1337,7 +1725,10 @@ void wx_set_rx_mode(struct net_device *netdev) fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE; /* pf don't want packets routing to vf, so clear UPE */ vmolr |= WX_PSR_VM_L2CTL_MPE; - vlnctrl &= ~WX_PSR_VLAN_CTL_VFE; + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags) && + test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) + vlnctrl |= WX_PSR_VLAN_CTL_VFE; + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; } if (netdev->flags & IFF_ALLMULTI) { @@ -1360,7 +1751,7 @@ void wx_set_rx_mode(struct net_device *netdev) * sufficient space to store all the addresses then enable * unicast promiscuous mode */ - count = wx_write_uc_addr_list(netdev, 0); + count = wx_write_uc_addr_list(netdev, VMDQ_P(0)); if (count < 0) { vmolr &= ~WX_PSR_VM_L2CTL_ROPE; vmolr |= WX_PSR_VM_L2CTL_UPE; @@ -1378,7 +1769,7 @@ void wx_set_rx_mode(struct net_device *netdev) wr32(wx, WX_PSR_VLAN_CTL, vlnctrl); wr32(wx, WX_PSR_CTL, fctrl); - wr32(wx, WX_PSR_VM_L2CTL(0), vmolr); + wr32(wx, WX_PSR_VM_L2CTL(VMDQ_P(0)), vmolr); if ((features & NETIF_F_HW_VLAN_CTAG_RX) && (features & NETIF_F_HW_VLAN_STAG_RX)) @@ -1386,6 +1777,10 @@ void wx_set_rx_mode(struct net_device *netdev) else wx_vlan_strip_control(wx, false); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + wx_vlan_promisc_disable(wx); + else + wx_vlan_promisc_enable(wx); } EXPORT_SYMBOL(wx_set_rx_mode); @@ -1635,6 +2030,13 @@ static void wx_setup_reta(struct wx *wx) u32 random_key_size = WX_RSS_KEY_SIZE / 4; u32 i, j; + if (test_bit(WX_FLAG_SRIOV_ENABLED, wx->flags)) { + if (wx->mac.type == wx_mac_em) + rss_i = 1; + else + rss_i = rss_i < 4 ? 4 : rss_i; + } + /* Fill out hash function seeds */ for (i = 0; i < random_key_size; i++) wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]); @@ -1652,10 +2054,42 @@ static void wx_setup_reta(struct wx *wx) wx_store_reta(wx); } +#define WX_RDB_RSS_PL_2 FIELD_PREP(GENMASK(31, 29), 1) +#define WX_RDB_RSS_PL_4 FIELD_PREP(GENMASK(31, 29), 2) +static void wx_setup_psrtype(struct wx *wx) +{ + int rss_i = wx->ring_feature[RING_F_RSS].indices; + u32 psrtype; + int pool; + + psrtype = WX_RDB_PL_CFG_L4HDR | + WX_RDB_PL_CFG_L3HDR | + WX_RDB_PL_CFG_L2HDR | + WX_RDB_PL_CFG_TUN_OUTL2HDR | + WX_RDB_PL_CFG_TUN_TUNHDR; + + if (wx->mac.type == wx_mac_em) { + for_each_set_bit(pool, &wx->fwd_bitmask, 8) + wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype); + } else { + if (rss_i > 3) + psrtype |= WX_RDB_RSS_PL_4; + else if (rss_i > 1) + psrtype |= WX_RDB_RSS_PL_2; + + for_each_set_bit(pool, &wx->fwd_bitmask, 32) + wr32(wx, WX_RDB_PL_CFG(VMDQ_P(pool)), psrtype); + } +} + static void wx_setup_mrqc(struct wx *wx) { u32 rss_field = 0; + /* VT, and RSS do not coexist at the same time */ + if (test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) + return; + /* Disable indicating checksum in descriptor, enables RSS hash */ wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD); @@ -1685,21 +2119,16 @@ static void wx_setup_mrqc(struct wx *wx) **/ void wx_configure_rx(struct wx *wx) { - u32 psrtype, i; int ret; + u32 i; wx_disable_rx(wx); - - psrtype = WX_RDB_PL_CFG_L4HDR | - WX_RDB_PL_CFG_L3HDR | - WX_RDB_PL_CFG_L2HDR | - WX_RDB_PL_CFG_TUN_TUNHDR; - wr32(wx, WX_RDB_PL_CFG(0), psrtype); + wx_setup_psrtype(wx); /* enable hw crc stripping */ wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP); - if (wx->mac.type == wx_mac_sp) { + if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) { u32 psrctl; /* RSC Setup */ @@ -1742,6 +2171,7 @@ void wx_configure(struct wx *wx) { wx_set_rxpba(wx); wx_pbthresh_setup(wx); + wx_configure_virtualization(wx); wx_configure_port(wx); wx_set_rx_mode(wx->netdev); @@ -2100,7 +2530,7 @@ static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on, * * Turn on/off specified VLAN in the VLAN filter table. **/ -static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on) +int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on) { u32 bitindex, vfta, targetbit; bool vfta_changed = false; @@ -2351,7 +2781,7 @@ void wx_update_stats(struct wx *wx) hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); - if (wx->mac.type == wx_mac_sp) { + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH); hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS); } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 11fb333494824..91c1d6135045b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -18,6 +18,7 @@ void wx_control_hw(struct wx *wx, bool drv); int wx_mng_present(struct wx *wx); int wx_host_interface_command(struct wx *wx, u32 *buffer, u32 length, u32 timeout, bool return_data); +int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles); int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data); int wx_read_ee_hostif_buffer(struct wx *wx, u16 offset, u16 words, u16 *data); @@ -25,9 +26,12 @@ void wx_init_eeprom_params(struct wx *wx); void wx_get_mac_addr(struct wx *wx, u8 *mac_addr); void wx_init_rx_addrs(struct wx *wx); void wx_mac_set_default_filter(struct wx *wx, u8 *addr); +int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool); +int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool); void wx_flush_sw_mac_table(struct wx *wx); int wx_set_mac(struct net_device *netdev, void *p); void wx_disable_rx(struct wx *wx); +int wx_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); int wx_disable_sec_rx_path(struct wx *wx); void wx_enable_sec_rx_path(struct wx *wx); void wx_set_rx_mode(struct net_device *netdev); @@ -41,6 +45,7 @@ int wx_stop_adapter(struct wx *wx); void wx_reset_misc(struct wx *wx); int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); +int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 2b3d6586f44a5..59d904f237640 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -13,6 +13,7 @@ #include "wx_type.h" #include "wx_lib.h" +#include "wx_ptp.h" #include "wx_hw.h" /* Lookup table mapping the HW PTYPE to the bit field for decoding */ @@ -597,8 +598,17 @@ static void wx_process_skb_fields(struct wx_ring *rx_ring, union wx_rx_desc *rx_desc, struct sk_buff *skb) { + struct wx *wx = netdev_priv(rx_ring->netdev); + wx_rx_hash(rx_ring, rx_desc, skb); wx_rx_checksum(rx_ring, rx_desc, skb); + + if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, wx->flags)) && + unlikely(wx_test_staterr(rx_desc, WX_RXD_STAT_TS))) { + wx_ptp_rx_hwtstamp(rx_ring->q_vector->wx, skb); + rx_ring->last_rx_timestamp = jiffies; + } + wx_rx_vlan(rx_ring, rx_desc, skb); skb_record_rx_queue(skb, rx_ring->queue_index); skb->protocol = eth_type_trans(skb, rx_ring->netdev); @@ -705,6 +715,7 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, { unsigned int budget = q_vector->wx->tx_work_limit; unsigned int total_bytes = 0, total_packets = 0; + struct wx *wx = netdev_priv(tx_ring->netdev); unsigned int i = tx_ring->next_to_clean; struct wx_tx_buffer *tx_buffer; union wx_tx_desc *tx_desc; @@ -737,6 +748,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; + /* schedule check for Tx timestamp */ + if (unlikely(test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state)) && + skb_shinfo(tx_buffer->skb)->tx_flags & SKBTX_IN_PROGRESS) + ptp_schedule_worker(wx->ptp_clock, 0); + /* free the skb */ napi_consume_skb(tx_buffer->skb, napi_budget); @@ -932,9 +948,9 @@ static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc, tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); } -static void wx_tx_map(struct wx_ring *tx_ring, - struct wx_tx_buffer *first, - const u8 hdr_len) +static int wx_tx_map(struct wx_ring *tx_ring, + struct wx_tx_buffer *first, + const u8 hdr_len) { struct sk_buff *skb = first->skb; struct wx_tx_buffer *tx_buffer; @@ -1013,6 +1029,8 @@ static void wx_tx_map(struct wx_ring *tx_ring, netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount); + /* set the timestamp */ + first->time_stamp = jiffies; skb_tx_timestamp(skb); /* Force memory writes to complete before letting h/w know there @@ -1038,7 +1056,7 @@ static void wx_tx_map(struct wx_ring *tx_ring, if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more()) writel(i, tx_ring->tail); - return; + return 0; dma_error: dev_err(tx_ring->dev, "TX DMA map failed\n"); @@ -1062,6 +1080,8 @@ static void wx_tx_map(struct wx_ring *tx_ring, first->skb = NULL; tx_ring->next_to_use = i; + + return -ENOMEM; } static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, @@ -1082,26 +1102,6 @@ static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens, context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } -static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr) -{ - struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset); - - *nexthdr = hdr->nexthdr; - offset += sizeof(struct ipv6hdr); - while (ipv6_ext_hdr(*nexthdr)) { - struct ipv6_opt_hdr _hdr, *hp; - - if (*nexthdr == NEXTHDR_NONE) - return; - hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr); - if (!hp) - return; - if (*nexthdr == NEXTHDR_FRAGMENT) - break; - *nexthdr = hp->nexthdr; - } -} - union network_header { struct iphdr *ipv4; struct ipv6hdr *ipv6; @@ -1112,6 +1112,8 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) { u8 tun_prot = 0, l4_prot = 0, ptype = 0; struct sk_buff *skb = first->skb; + unsigned char *exthdr, *l4_hdr; + __be16 frag_off; if (skb->encapsulation) { union network_header hdr; @@ -1122,14 +1124,18 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) ptype = WX_PTYPE_TUN_IPV4; break; case htons(ETH_P_IPV6): - wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot); + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off); ptype = WX_PTYPE_TUN_IPV6; break; default: return ptype; } - if (tun_prot == IPPROTO_IPIP) { + if (tun_prot == IPPROTO_IPIP || tun_prot == IPPROTO_IPV6) { hdr.raw = (void *)inner_ip_hdr(skb); ptype |= WX_PTYPE_PKT_IPIP; } else if (tun_prot == IPPROTO_UDP) { @@ -1166,7 +1172,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) l4_prot = hdr.ipv4->protocol; break; case 6: - wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot); + l4_hdr = skb_inner_transport_header(skb); + exthdr = skb_inner_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = inner_ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off); ptype |= WX_PTYPE_PKT_IPV6; break; default: @@ -1179,7 +1189,11 @@ static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first) ptype = WX_PTYPE_PKT_IP; break; case htons(ETH_P_IPV6): - wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot); + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); + l4_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off); ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6; break; default: @@ -1269,13 +1283,20 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ if (enc) { + unsigned char *exthdr, *l4_hdr; + __be16 frag_off; + switch (first->protocol) { case htons(ETH_P_IP): tun_prot = ip_hdr(skb)->protocol; first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4; break; case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &tun_prot, &frag_off); break; default: break; @@ -1298,6 +1319,7 @@ static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first, WX_TXD_TUNNEL_LEN_SHIFT); break; case IPPROTO_IPIP: + case IPPROTO_IPV6: tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - (char *)ip_hdr(skb)) >> 2) << WX_TXD_OUTER_IPLEN_SHIFT; @@ -1335,12 +1357,15 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, u8 tun_prot = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) && !(first->tx_flags & WX_TX_FLAGS_CC)) return; vlan_macip_lens = skb_network_offset(skb) << WX_TXD_MACLEN_SHIFT; } else { + unsigned char *exthdr, *l4_hdr; + __be16 frag_off; u8 l4_prot = 0; union { struct iphdr *ipv4; @@ -1362,7 +1387,12 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, tun_prot = ip_hdr(skb)->protocol; break; case htons(ETH_P_IPV6): + l4_hdr = skb_transport_header(skb); + exthdr = skb_network_header(skb) + sizeof(struct ipv6hdr); tun_prot = ipv6_hdr(skb)->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &tun_prot, &frag_off); break; default: return; @@ -1386,6 +1416,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, WX_TXD_TUNNEL_LEN_SHIFT); break; case IPPROTO_IPIP: + case IPPROTO_IPV6: tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) - (char *)ip_hdr(skb)) >> 2) << WX_TXD_OUTER_IPLEN_SHIFT; @@ -1408,7 +1439,10 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, break; case 6: vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1; + exthdr = network_hdr.raw + sizeof(struct ipv6hdr); l4_prot = network_hdr.ipv6->nexthdr; + if (transport_hdr.raw != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_prot, &frag_off); break; default: break; @@ -1428,7 +1462,8 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, WX_TXD_L4LEN_SHIFT; break; default: - break; + skb_checksum_help(skb); + goto csum_failed; } /* update TX checksum flag */ @@ -1486,6 +1521,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, tx_flags |= WX_TX_FLAGS_HW_VLAN; } + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + wx->ptp_clock) { + if (wx->tstamp_config.tx_type == HWTSTAMP_TX_ON && + !test_and_set_bit_lock(WX_STATE_PTP_TX_IN_PROGRESS, + wx->state)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= WX_TX_FLAGS_TSTAMP; + wx->ptp_tx_skb = skb_get(skb); + wx->ptp_tx_start = jiffies; + } else { + wx->tx_hwtstamp_skipped++; + } + } + /* record initial flags and protocol */ first->tx_flags = tx_flags; first->protocol = vlan_get_protocol(skb); @@ -1501,12 +1550,20 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate) wx->atr(tx_ring, first, ptype); - wx_tx_map(tx_ring, first, hdr_len); + if (wx_tx_map(tx_ring, first, hdr_len)) + goto cleanup_tx_tstamp; return NETDEV_TX_OK; out_drop: dev_kfree_skb_any(first->skb); first->skb = NULL; +cleanup_tx_tstamp: + if (unlikely(tx_flags & WX_TX_FLAGS_TSTAMP)) { + dev_kfree_skb_any(wx->ptp_tx_skb); + wx->ptp_tx_skb = NULL; + wx->tx_hwtstamp_errors++; + clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state); + } return NETDEV_TX_OK; } @@ -1561,6 +1618,65 @@ void wx_napi_disable_all(struct wx *wx) } EXPORT_SYMBOL(wx_napi_disable_all); +static bool wx_set_vmdq_queues(struct wx *wx) +{ + u16 vmdq_i = wx->ring_feature[RING_F_VMDQ].limit; + u16 rss_i = wx->ring_feature[RING_F_RSS].limit; + u16 rss_m = WX_RSS_DISABLED_MASK; + u16 vmdq_m = 0; + + /* only proceed if VMDq is enabled */ + if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) + return false; + /* Add starting offset to total pool count */ + vmdq_i += wx->ring_feature[RING_F_VMDQ].offset; + + if (wx->mac.type == wx_mac_sp) { + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, 64, vmdq_i); + + /* 64 pool mode with 2 queues per pool, or + * 16/32/64 pool mode with 1 queue per pool + */ + if (vmdq_i > 32 || rss_i < 4) { + vmdq_m = WX_VMDQ_2Q_MASK; + rss_m = WX_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with 4 queues per pool */ + } else { + vmdq_m = WX_VMDQ_4Q_MASK; + rss_m = WX_RSS_4Q_MASK; + rss_i = 4; + } + } else { + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, 8, vmdq_i); + + /* when VMDQ on, disable RSS */ + rss_i = 1; + } + + /* remove the starting offset from the pool count */ + vmdq_i -= wx->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + wx->ring_feature[RING_F_VMDQ].indices = vmdq_i; + wx->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + wx->ring_feature[RING_F_RSS].indices = rss_i; + wx->ring_feature[RING_F_RSS].mask = rss_m; + + wx->queues_per_pool = rss_i;/*maybe same to num_rx_queues_per_pool*/ + wx->num_rx_pools = vmdq_i; + wx->num_rx_queues_per_pool = rss_i; + + wx->num_rx_queues = vmdq_i * rss_i; + wx->num_tx_queues = vmdq_i * rss_i; + + return true; +} + /** * wx_set_rss_queues: Allocate queues for RSS * @wx: board private structure to initialize @@ -1575,6 +1691,10 @@ static void wx_set_rss_queues(struct wx *wx) /* set mask for 16 queue limit of RSS */ f = &wx->ring_feature[RING_F_RSS]; + if (wx->mac.type == wx_mac_sp) + f->mask = WX_RSS_64Q_MASK; + else + f->mask = WX_RSS_8Q_MASK; f->indices = f->limit; if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) @@ -1607,6 +1727,9 @@ static void wx_set_num_queues(struct wx *wx) wx->num_tx_queues = 1; wx->queues_per_pool = 1; + if (wx_set_vmdq_queues(wx)) + return; + wx_set_rss_queues(wx); } @@ -1687,6 +1810,10 @@ static int wx_set_interrupt_capability(struct wx *wx) if (ret == 0 || (ret == -ENOMEM)) return ret; + /* Disable VMDq support */ + dev_warn(&wx->pdev->dev, "Disabling VMQQ support\n"); + clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags); + /* Disable RSS */ dev_warn(&wx->pdev->dev, "Disabling RSS support\n"); wx->ring_feature[RING_F_RSS].limit = 1; @@ -1713,6 +1840,49 @@ static int wx_set_interrupt_capability(struct wx *wx) return 0; } +static bool wx_cache_ring_vmdq(struct wx *wx) +{ + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; + struct wx_ring_feature *rss = &wx->ring_feature[RING_F_RSS]; + u16 reg_idx; + int i; + + /* only proceed if VMDq is enabled */ + if (!test_bit(WX_FLAG_VMDQ_ENABLED, wx->flags)) + return false; + + if (wx->mac.type == wx_mac_sp) { + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < wx->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + wx->rx_ring[i]->reg_idx = reg_idx; + } + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < wx->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + wx->tx_ring[i]->reg_idx = reg_idx; + } + } else { + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset; + for (i = 0; i < wx->num_rx_queues; i++) + /* If we are greater than indices move to next pool */ + wx->rx_ring[i]->reg_idx = reg_idx + i; + + reg_idx = vmdq->offset; + for (i = 0; i < wx->num_tx_queues; i++) + /* If we are greater than indices move to next pool */ + wx->tx_ring[i]->reg_idx = reg_idx + i; + } + + return true; +} + /** * wx_cache_ring_rss - Descriptor ring to register mapping for RSS * @wx: board private structure to initialize @@ -1724,6 +1894,9 @@ static void wx_cache_ring_rss(struct wx *wx) { u16 i; + if (wx_cache_ring_vmdq(wx)) + return; + for (i = 0; i < wx->num_rx_queues; i++) wx->rx_ring[i]->reg_idx = i; @@ -1781,10 +1954,16 @@ static int wx_alloc_q_vector(struct wx *wx, /* initialize pointer to rings */ ring = q_vector->ring; - if (wx->mac.type == wx_mac_sp) + switch (wx->mac.type) { + case wx_mac_sp: + case wx_mac_aml: default_itr = WX_12K_ITR; - else + break; + default: default_itr = WX_7K_ITR; + break; + } + /* initialize ITR */ if (txr_count && !rxr_count) /* tx only vector */ @@ -2116,7 +2295,8 @@ static void wx_set_ivar(struct wx *wx, s8 direction, wr32(wx, WX_PX_MISC_IVAR, ivar); } else { /* tx or rx causes */ - msix_vector += 1; /* offset for queue vectors */ + if (!(wx->mac.type == wx_mac_em && wx->num_vfs == 7)) + msix_vector += 1; /* offset for queue vectors */ msix_vector |= WX_PX_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); @@ -2140,10 +2320,17 @@ void wx_write_eitr(struct wx_q_vector *q_vector) int v_idx = q_vector->v_idx; u32 itr_reg; - if (wx->mac.type == wx_mac_sp) + switch (wx->mac.type) { + case wx_mac_sp: itr_reg = q_vector->itr & WX_SP_MAX_EITR; - else + break; + case wx_mac_aml: + itr_reg = (q_vector->itr >> 3) & WX_AML_MAX_EITR; + break; + default: itr_reg = q_vector->itr & WX_EM_MAX_EITR; + break; + } itr_reg |= WX_PX_ITR_CNT_WDIS; @@ -2161,10 +2348,17 @@ void wx_configure_vectors(struct wx *wx) { struct pci_dev *pdev = wx->pdev; u32 eitrsel = 0; - u16 v_idx; + u16 v_idx, i; if (pdev->msix_enabled) { /* Populate MSIX to EITR Select */ + if (wx->mac.type == wx_mac_sp) { + if (wx->num_vfs >= 32) + eitrsel = BIT(wx->num_vfs % 32) - 1; + } else if (wx->mac.type == wx_mac_em) { + for (i = 0; i < wx->num_vfs; i++) + eitrsel |= BIT(i); + } wr32(wx, WX_PX_ITRSEL, eitrsel); /* use EIAM to auto-mask when MSI-X interrupt is asserted * this saves a register write for every interrupt @@ -2719,7 +2913,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) netdev->features = features; - if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX) + if (changed & NETIF_F_HW_VLAN_CTAG_RX && wx->do_reset) wx->do_reset(netdev); else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) wx_set_rx_mode(netdev); @@ -2751,7 +2945,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) break; } - if (need_reset) + if (need_reset && wx->do_reset) wx->do_reset(netdev); return 0; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.c b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c new file mode 100644 index 0000000000000..73af5f11c3bdc --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#include +#include "wx_type.h" +#include "wx_mbx.h" + +/** + * wx_obtain_mbx_lock_pf - obtain mailbox lock + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 on success and -EBUSY on failure + **/ +static int wx_obtain_mbx_lock_pf(struct wx *wx, u16 vf) +{ + int count = 5; + u32 mailbox; + + while (count--) { + /* Take ownership of the buffer */ + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_PFU); + + /* reserve mailbox for vf use */ + mailbox = rd32(wx, WX_PXMAILBOX(vf)); + if (mailbox & WX_PXMAILBOX_PFU) + return 0; + else if (count) + udelay(10); + } + wx_err(wx, "Failed to obtain mailbox lock for PF%d", vf); + + return -EBUSY; +} + +static int wx_check_for_bit_pf(struct wx *wx, u32 mask, int index) +{ + u32 mbvficr = rd32(wx, WX_MBVFICR(index)); + + if (!(mbvficr & mask)) + return -EBUSY; + wr32(wx, WX_MBVFICR(index), mask); + + return 0; +} + +/** + * wx_check_for_ack_pf - checks to see if the VF has acked + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 if the VF has set the status bit or else -EBUSY + **/ +int wx_check_for_ack_pf(struct wx *wx, u16 vf) +{ + u32 index = vf / 16, vf_bit = vf % 16; + + return wx_check_for_bit_pf(wx, + FIELD_PREP(WX_MBVFICR_VFACK_MASK, + BIT(vf_bit)), + index); +} + +/** + * wx_check_for_msg_pf - checks to see if the VF has sent mail + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 if the VF has got req bit or else -EBUSY + **/ +int wx_check_for_msg_pf(struct wx *wx, u16 vf) +{ + u32 index = vf / 16, vf_bit = vf % 16; + + return wx_check_for_bit_pf(wx, + FIELD_PREP(WX_MBVFICR_VFREQ_MASK, + BIT(vf_bit)), + index); +} + +/** + * wx_write_mbx_pf - Places a message in the mailbox + * @wx: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * Return: return 0 on success and -EINVAL/-EBUSY on failure + **/ +int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf) +{ + struct wx_mbx_info *mbx = &wx->mbx; + int ret, i; + + /* mbx->size is up to 15 */ + if (size > mbx->size) { + wx_err(wx, "Invalid mailbox message size %d", size); + return -EINVAL; + } + + /* lock the mailbox to prevent pf/vf race condition */ + ret = wx_obtain_mbx_lock_pf(wx, vf); + if (ret) + return ret; + + /* flush msg and acks as we are overwriting the message buffer */ + wx_check_for_msg_pf(wx, vf); + wx_check_for_ack_pf(wx, vf); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + wr32a(wx, WX_PXMBMEM(vf), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer */ + /* set mirrored mailbox flags */ + wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_STS); + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_STS); + + return 0; +} + +/** + * wx_read_mbx_pf - Read a message from the mailbox + * @wx: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf: the VF index + * + * Return: return 0 on success and -EBUSY on failure + **/ +int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf) +{ + struct wx_mbx_info *mbx = &wx->mbx; + int ret; + u16 i; + + /* limit read to size of mailbox and mbx->size is up to 15 */ + if (size > mbx->size) + size = mbx->size; + + /* lock the mailbox to prevent pf/vf race condition */ + ret = wx_obtain_mbx_lock_pf(wx, vf); + if (ret) + return ret; + + for (i = 0; i < size; i++) + msg[i] = rd32a(wx, WX_PXMBMEM(vf), i); + + /* Acknowledge the message and release buffer */ + /* set mirrored mailbox flags */ + wr32a(wx, WX_PXMBMEM(vf), WX_VXMAILBOX_SIZE, WX_PXMAILBOX_ACK); + wr32(wx, WX_PXMAILBOX(vf), WX_PXMAILBOX_ACK); + + return 0; +} + +/** + * wx_check_for_rst_pf - checks to see if the VF has reset + * @wx: pointer to the HW structure + * @vf: the VF index + * + * Return: return 0 on success and -EBUSY on failure + **/ +int wx_check_for_rst_pf(struct wx *wx, u16 vf) +{ + u32 reg_offset = WX_VF_REG_OFFSET(vf); + u32 vf_shift = WX_VF_IND_SHIFT(vf); + u32 vflre = 0; + + vflre = rd32(wx, WX_VFLRE(reg_offset)); + if (!(vflre & BIT(vf_shift))) + return -EBUSY; + wr32(wx, WX_VFLREC(reg_offset), BIT(vf_shift)); + + return 0; +} diff --git a/drivers/net/ethernet/wangxun/libwx/wx_mbx.h b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h new file mode 100644 index 0000000000000..05aae138dbc31 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_mbx.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ +#ifndef _WX_MBX_H_ +#define _WX_MBX_H_ + +#define WX_VXMAILBOX_SIZE 15 + +/* PF Registers */ +#define WX_PXMAILBOX(i) (0x600 + (4 * (i))) /* i=[0,63] */ +#define WX_PXMAILBOX_STS BIT(0) /* Initiate message send to VF */ +#define WX_PXMAILBOX_ACK BIT(1) /* Ack message recv'd from VF */ +#define WX_PXMAILBOX_PFU BIT(3) /* PF owns the mailbox buffer */ + +#define WX_PXMBMEM(i) (0x5000 + (64 * (i))) /* i=[0,63] */ + +#define WX_VFLRE(i) (0x4A0 + (4 * (i))) /* i=[0,1] */ +#define WX_VFLREC(i) (0x4A8 + (4 * (i))) /* i=[0,1] */ + +/* SR-IOV specific macros */ +#define WX_MBVFICR(i) (0x480 + (4 * (i))) /* i=[0,3] */ +#define WX_MBVFICR_VFREQ_MASK GENMASK(15, 0) +#define WX_MBVFICR_VFACK_MASK GENMASK(31, 16) + +#define WX_VT_MSGTYPE_ACK BIT(31) +#define WX_VT_MSGTYPE_NACK BIT(30) +#define WX_VT_MSGTYPE_CTS BIT(29) +#define WX_VT_MSGINFO_SHIFT 16 +#define WX_VT_MSGINFO_MASK GENMASK(23, 16) + +enum wx_pfvf_api_rev { + wx_mbox_api_null, + wx_mbox_api_13 = 4, /* API version 1.3 */ + wx_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API */ +#define WX_VF_RESET 0x01 /* VF requests reset */ +#define WX_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define WX_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define WX_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define WX_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define WX_VF_SET_MACVLAN 0x06 /* VF requests PF unicast filter */ +#define WX_VF_API_NEGOTIATE 0x08 /* negotiate API version */ +#define WX_VF_GET_QUEUES 0x09 /* get queue configuration */ +#define WX_VF_GET_RETA 0x0a /* VF request for RETA */ +#define WX_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define WX_VF_UPDATE_XCAST_MODE 0x0c +#define WX_VF_GET_LINK_STATE 0x10 /* get vf link state */ +#define WX_VF_GET_FW_VERSION 0x11 /* get fw version */ + +#define WX_VF_BACKUP 0x8001 /* VF requests backup */ + +#define WX_PF_CONTROL_MSG BIT(8) /* PF control message */ +#define WX_PF_NOFITY_VF_LINK_STATUS 0x1 +#define WX_PF_NOFITY_VF_NET_NOT_RUNNING BIT(31) + +#define WX_VF_TX_QUEUES 1 /* number of Tx queues supported */ +#define WX_VF_RX_QUEUES 2 /* number of Rx queues supported */ +#define WX_VF_TRANS_VLAN 3 /* Indication of port vlan */ +#define WX_VF_DEF_QUEUE 4 /* Default queue offset */ + +#define WX_VF_PERMADDR_MSG_LEN 4 + +enum wxvf_xcast_modes { + WXVF_XCAST_MODE_NONE = 0, + WXVF_XCAST_MODE_MULTI, + WXVF_XCAST_MODE_ALLMULTI, + WXVF_XCAST_MODE_PROMISC, +}; + +int wx_write_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf); +int wx_read_mbx_pf(struct wx *wx, u32 *msg, u16 size, u16 vf); +int wx_check_for_rst_pf(struct wx *wx, u16 mbx_id); +int wx_check_for_msg_pf(struct wx *wx, u16 mbx_id); +int wx_check_for_ack_pf(struct wx *wx, u16 mbx_id); + +#endif /* _WX_MBX_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.c b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c new file mode 100644 index 0000000000000..07c015ba338f4 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.c @@ -0,0 +1,883 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ +/* Copyright (c) 1999 - 2025 Intel Corporation. */ + +#include +#include +#include + +#include "wx_type.h" +#include "wx_ptp.h" +#include "wx_hw.h" + +#define WX_INCVAL_10GB 0xCCCCCC +#define WX_INCVAL_1GB 0x800000 +#define WX_INCVAL_100 0xA00000 +#define WX_INCVAL_10 0xC7F380 +#define WX_INCVAL_EM 0x2000000 + +#define WX_INCVAL_SHIFT_10GB 20 +#define WX_INCVAL_SHIFT_1GB 18 +#define WX_INCVAL_SHIFT_100 15 +#define WX_INCVAL_SHIFT_10 12 +#define WX_INCVAL_SHIFT_EM 22 + +#define WX_OVERFLOW_PERIOD (HZ * 30) +#define WX_PTP_TX_TIMEOUT (HZ) + +#define WX_1588_PPS_WIDTH_EM 120 + +#define WX_NS_PER_SEC 1000000000ULL + +static u64 wx_ptp_timecounter_cyc2time(struct wx *wx, u64 timestamp) +{ + unsigned int seq; + u64 ns; + + do { + seq = read_seqbegin(&wx->hw_tc_lock); + ns = timecounter_cyc2time(&wx->hw_tc, timestamp); + } while (read_seqretry(&wx->hw_tc_lock, seq)); + + return ns; +} + +static u64 wx_ptp_readtime(struct wx *wx, struct ptp_system_timestamp *sts) +{ + u32 timeh1, timeh2, timel; + + timeh1 = rd32ptp(wx, WX_TSC_1588_SYSTIMH); + ptp_read_system_prets(sts); + timel = rd32ptp(wx, WX_TSC_1588_SYSTIML); + ptp_read_system_postts(sts); + timeh2 = rd32ptp(wx, WX_TSC_1588_SYSTIMH); + + if (timeh1 != timeh2) { + ptp_read_system_prets(sts); + timel = rd32ptp(wx, WX_TSC_1588_SYSTIML); + ptp_read_system_prets(sts); + } + return (u64)timel | (u64)timeh2 << 32; +} + +static int wx_ptp_adjfine(struct ptp_clock_info *ptp, long ppb) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + u64 incval, mask; + + smp_mb(); /* Force any pending update before accessing. */ + incval = READ_ONCE(wx->base_incval); + incval = adjust_by_scaled_ppm(incval, ppb); + + mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF; + incval &= mask; + if (wx->mac.type != wx_mac_em) + incval |= 2 << 24; + + wr32ptp(wx, WX_TSC_1588_INC, incval); + + return 0; +} + +static int wx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + unsigned long flags; + + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + timecounter_adjtime(&wx->hw_tc, delta); + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); + + if (wx->ptp_setup_sdp) + wx->ptp_setup_sdp(wx); + + return 0; +} + +static int wx_ptp_gettimex64(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + u64 ns, stamp; + + stamp = wx_ptp_readtime(wx, sts); + ns = wx_ptp_timecounter_cyc2time(wx, stamp); + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int wx_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + unsigned long flags; + u64 ns; + + ns = timespec64_to_ns(ts); + /* reset the timecounter */ + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + timecounter_init(&wx->hw_tc, &wx->hw_cc, ns); + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); + + if (wx->ptp_setup_sdp) + wx->ptp_setup_sdp(wx); + + return 0; +} + +/** + * wx_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state + * @wx: the private board structure + * + * This function should be called whenever the state related to a Tx timestamp + * needs to be cleared. This helps ensure that all related bits are reset for + * the next Tx timestamp event. + */ +static void wx_ptp_clear_tx_timestamp(struct wx *wx) +{ + rd32ptp(wx, WX_TSC_1588_STMPH); + if (wx->ptp_tx_skb) { + dev_kfree_skb_any(wx->ptp_tx_skb); + wx->ptp_tx_skb = NULL; + } + clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state); +} + +/** + * wx_ptp_convert_to_hwtstamp - convert register value to hw timestamp + * @wx: private board structure + * @hwtstamp: stack timestamp structure + * @timestamp: unsigned 64bit system time value + * + * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value + * which can be used by the stack's ptp functions. + * + * The lock is used to protect consistency of the cyclecounter and the SYSTIME + * registers. However, it does not need to protect against the Rx or Tx + * timestamp registers, as there can't be a new timestamp until the old one is + * unlatched by reading. + * + * In addition to the timestamp in hardware, some controllers need a software + * overflow cyclecounter, and this function takes this into account as well. + **/ +static void wx_ptp_convert_to_hwtstamp(struct wx *wx, + struct skb_shared_hwtstamps *hwtstamp, + u64 timestamp) +{ + u64 ns; + + ns = wx_ptp_timecounter_cyc2time(wx, timestamp); + hwtstamp->hwtstamp = ns_to_ktime(ns); +} + +/** + * wx_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @wx: the private board struct + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +static void wx_ptp_tx_hwtstamp(struct wx *wx) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff *skb = wx->ptp_tx_skb; + u64 regval = 0; + + regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPL); + regval |= (u64)rd32ptp(wx, WX_TSC_1588_STMPH) << 32; + + wx_ptp_convert_to_hwtstamp(wx, &shhwtstamps, regval); + + wx->ptp_tx_skb = NULL; + clear_bit_unlock(WX_STATE_PTP_TX_IN_PROGRESS, wx->state); + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); + wx->tx_hwtstamp_pkts++; +} + +static int wx_ptp_tx_hwtstamp_work(struct wx *wx) +{ + u32 tsynctxctl; + + /* we have to have a valid skb to poll for a timestamp */ + if (!wx->ptp_tx_skb) { + wx_ptp_clear_tx_timestamp(wx); + return 0; + } + + /* stop polling once we have a valid timestamp */ + tsynctxctl = rd32ptp(wx, WX_TSC_1588_CTL); + if (tsynctxctl & WX_TSC_1588_CTL_VALID) { + wx_ptp_tx_hwtstamp(wx); + return 0; + } + + return -1; +} + +/** + * wx_ptp_overflow_check - watchdog task to detect SYSTIME overflow + * @wx: pointer to wx struct + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap + * around. This needs to be run approximately twice a minute for the fastest + * overflowing hardware. We run it for all hardware since it shouldn't have a + * large impact. + */ +static void wx_ptp_overflow_check(struct wx *wx) +{ + bool timeout = time_is_before_jiffies(wx->last_overflow_check + + WX_OVERFLOW_PERIOD); + unsigned long flags; + + if (timeout) { + /* Update the timecounter */ + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + timecounter_read(&wx->hw_tc); + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); + + wx->last_overflow_check = jiffies; + } +} + +/** + * wx_ptp_rx_hang - detect error case when Rx timestamp registers latched + * @wx: pointer to wx struct + * + * this watchdog task is scheduled to detect error case where hardware has + * dropped an Rx packet that was timestamped when the ring is full. The + * particular error is rare but leaves the device in a state unable to + * timestamp any future packets. + */ +static void wx_ptp_rx_hang(struct wx *wx) +{ + struct wx_ring *rx_ring; + unsigned long rx_event; + u32 tsyncrxctl; + int n; + + tsyncrxctl = rd32(wx, WX_PSR_1588_CTL); + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit + */ + if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID)) { + wx->last_rx_ptp_check = jiffies; + return; + } + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = wx->last_rx_ptp_check; + for (n = 0; n < wx->num_rx_queues; n++) { + rx_ring = wx->rx_ring[n]; + if (time_after(rx_ring->last_rx_timestamp, rx_event)) + rx_event = rx_ring->last_rx_timestamp; + } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5 * HZ)) { + rd32(wx, WX_PSR_1588_STMPH); + wx->last_rx_ptp_check = jiffies; + + wx->rx_hwtstamp_cleared++; + dev_warn(&wx->pdev->dev, "clearing RX Timestamp hang"); + } +} + +/** + * wx_ptp_tx_hang - detect error case where Tx timestamp never finishes + * @wx: private network wx structure + */ +static void wx_ptp_tx_hang(struct wx *wx) +{ + bool timeout = time_is_before_jiffies(wx->ptp_tx_start + + WX_PTP_TX_TIMEOUT); + + if (!wx->ptp_tx_skb) + return; + + if (!test_bit(WX_STATE_PTP_TX_IN_PROGRESS, wx->state)) + return; + + /* If we haven't received a timestamp within the timeout, it is + * reasonable to assume that it will never occur, so we can unlock the + * timestamp bit when this occurs. + */ + if (timeout) { + wx_ptp_clear_tx_timestamp(wx); + wx->tx_hwtstamp_timeouts++; + dev_warn(&wx->pdev->dev, "clearing Tx timestamp hang\n"); + } +} + +static long wx_ptp_do_aux_work(struct ptp_clock_info *ptp) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + int ts_done; + + ts_done = wx_ptp_tx_hwtstamp_work(wx); + + wx_ptp_overflow_check(wx); + if (unlikely(test_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, + wx->flags))) + wx_ptp_rx_hang(wx); + wx_ptp_tx_hang(wx); + + return ts_done ? 1 : HZ; +} + +static u64 wx_ptp_trigger_calc(struct wx *wx) +{ + struct cyclecounter *cc = &wx->hw_cc; + unsigned long flags; + u64 ns = 0; + u32 rem; + + /* Read the current clock time, and save the cycle counter value */ + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + ns = timecounter_read(&wx->hw_tc); + wx->pps_edge_start = wx->hw_tc.cycle_last; + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); + wx->pps_edge_end = wx->pps_edge_start; + + /* Figure out how far past the next second we are */ + div_u64_rem(ns, WX_NS_PER_SEC, &rem); + + /* Figure out how many nanoseconds to add to round the clock edge up + * to the next full second + */ + rem = (WX_NS_PER_SEC - rem); + + /* Adjust the clock edge to align with the next full second. */ + wx->pps_edge_start += div_u64(((u64)rem << cc->shift), cc->mult); + wx->pps_edge_end += div_u64(((u64)(rem + wx->pps_width) << + cc->shift), cc->mult); + + return (ns + rem); +} + +static int wx_ptp_setup_sdp(struct wx *wx) +{ + struct cyclecounter *cc = &wx->hw_cc; + u32 tsauxc; + u64 nsec; + + if (wx->pps_width >= WX_NS_PER_SEC) { + wx_err(wx, "PTP pps width cannot be longer than 1s!\n"); + return -EINVAL; + } + + /* disable the pin first */ + wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0); + WX_WRITE_FLUSH(wx); + + if (!test_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags)) { + if (wx->pps_enabled) { + wx->pps_enabled = false; + wx_set_pps(wx, false, 0, 0); + } + return 0; + } + + wx->pps_enabled = true; + nsec = wx_ptp_trigger_calc(wx); + wx_set_pps(wx, wx->pps_enabled, nsec, wx->pps_edge_start); + + tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 | + WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0; + wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start); + wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32)); + wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end); + wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32)); + wr32ptp(wx, WX_TSC_1588_SDP(0), + WX_TSC_1588_SDP_FUN_SEL_TT0 | WX_TSC_1588_SDP_OUT_LEVEL_H); + wr32ptp(wx, WX_TSC_1588_SDP(1), WX_TSC_1588_SDP_FUN_SEL_TS0); + wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc); + wr32ptp(wx, WX_TSC_1588_INT_EN, WX_TSC_1588_INT_EN_TT1); + WX_WRITE_FLUSH(wx); + + /* Adjust the clock edge to align with the next full second. */ + wx->sec_to_cc = div_u64(((u64)WX_NS_PER_SEC << cc->shift), cc->mult); + + return 0; +} + +static int wx_ptp_feature_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct wx *wx = container_of(ptp, struct wx, ptp_caps); + + /** + * When PPS is enabled, unmask the interrupt for the ClockOut + * feature, so that the interrupt handler can send the PPS + * event when the clock SDP triggers. Clear mask when PPS is + * disabled + */ + if (rq->type != PTP_CLK_REQ_PEROUT || !wx->ptp_setup_sdp) + return -EOPNOTSUPP; + + /* Reject requests with unsupported flags */ + if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + + if (rq->perout.phase.sec || rq->perout.phase.nsec) { + wx_err(wx, "Absolute start time not supported.\n"); + return -EINVAL; + } + + if (rq->perout.period.sec != 1 || rq->perout.period.nsec) { + wx_err(wx, "Only 1pps is supported.\n"); + return -EINVAL; + } + + if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) { + struct timespec64 ts_on; + + ts_on.tv_sec = rq->perout.on.sec; + ts_on.tv_nsec = rq->perout.on.nsec; + wx->pps_width = timespec64_to_ns(&ts_on); + } else { + wx->pps_width = 120000000; + } + + if (on) + set_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags); + else + clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags); + + return wx->ptp_setup_sdp(wx); +} + +void wx_ptp_check_pps_event(struct wx *wx) +{ + u32 tsauxc, int_status; + + /* this check is necessary in case the interrupt was enabled via some + * alternative means (ex. debug_fs). Better to check here than + * everywhere that calls this function. + */ + if (!wx->ptp_clock) + return; + + int_status = rd32ptp(wx, WX_TSC_1588_INT_ST); + if (int_status & WX_TSC_1588_INT_ST_TT1) { + /* disable the pin first */ + wr32ptp(wx, WX_TSC_1588_AUX_CTL, 0); + WX_WRITE_FLUSH(wx); + + wx_ptp_trigger_calc(wx); + + tsauxc = WX_TSC_1588_AUX_CTL_PLSG | WX_TSC_1588_AUX_CTL_EN_TT0 | + WX_TSC_1588_AUX_CTL_EN_TT1 | WX_TSC_1588_AUX_CTL_EN_TS0; + wr32ptp(wx, WX_TSC_1588_TRGT_L(0), (u32)wx->pps_edge_start); + wr32ptp(wx, WX_TSC_1588_TRGT_H(0), (u32)(wx->pps_edge_start >> 32)); + wr32ptp(wx, WX_TSC_1588_TRGT_L(1), (u32)wx->pps_edge_end); + wr32ptp(wx, WX_TSC_1588_TRGT_H(1), (u32)(wx->pps_edge_end >> 32)); + wr32ptp(wx, WX_TSC_1588_AUX_CTL, tsauxc); + WX_WRITE_FLUSH(wx); + } +} +EXPORT_SYMBOL(wx_ptp_check_pps_event); + +static long wx_ptp_create_clock(struct wx *wx) +{ + struct net_device *netdev = wx->netdev; + long err; + + /* do nothing if we already have a clock device */ + if (!IS_ERR_OR_NULL(wx->ptp_clock)) + return 0; + + snprintf(wx->ptp_caps.name, sizeof(wx->ptp_caps.name), + "%s", netdev->name); + wx->ptp_caps.owner = THIS_MODULE; + wx->ptp_caps.n_alarm = 0; + wx->ptp_caps.n_ext_ts = 0; + wx->ptp_caps.pps = 0; + wx->ptp_caps.adjfine = wx_ptp_adjfine; + wx->ptp_caps.adjtime = wx_ptp_adjtime; + wx->ptp_caps.gettimex64 = wx_ptp_gettimex64; + wx->ptp_caps.settime64 = wx_ptp_settime64; + wx->ptp_caps.do_aux_work = wx_ptp_do_aux_work; + if (wx->mac.type == wx_mac_em) { + wx->ptp_caps.max_adj = 500000000; + wx->ptp_caps.n_per_out = 1; + wx->ptp_setup_sdp = wx_ptp_setup_sdp; + wx->ptp_caps.enable = wx_ptp_feature_enable; + } else { + wx->ptp_caps.max_adj = 250000000; + wx->ptp_caps.n_per_out = 0; + wx->ptp_setup_sdp = NULL; + } + + wx->ptp_clock = ptp_clock_register(&wx->ptp_caps, &wx->pdev->dev); + if (IS_ERR(wx->ptp_clock)) { + err = PTR_ERR(wx->ptp_clock); + wx->ptp_clock = NULL; + wx_err(wx, "ptp clock register failed\n"); + return err; + } else if (wx->ptp_clock) { + dev_info(&wx->pdev->dev, "registered PHC device on %s\n", + netdev->name); + } + + /* Set the default timestamp mode to disabled here. We do this in + * create_clock instead of initialization, because we don't want to + * override the previous settings during a suspend/resume cycle. + */ + wx->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + wx->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + return 0; +} + +static int wx_ptp_set_timestamp_mode(struct wx *wx, + struct kernel_hwtstamp_config *config) +{ + u32 tsync_tx_ctl = WX_TSC_1588_CTL_ENABLED; + u32 tsync_rx_ctl = WX_PSR_1588_CTL_ENABLED; + DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS); + u32 tsync_rx_mtrl = PTP_EV_PORT << 16; + bool is_l2 = false; + u32 regval; + + memcpy(flags, wx->flags, sizeof(wx->flags)); + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; + clear_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags); + clear_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_SYNC; + set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags); + set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_L4_V1; + tsync_rx_mtrl |= WX_PSR_1588_MSG_V1_DELAY_REQ; + set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags); + set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= WX_PSR_1588_CTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + set_bit(WX_FLAG_RX_HWTSTAMP_ENABLED, flags); + set_bit(WX_FLAG_RX_HWTSTAMP_IN_REGISTER, flags); + break; + default: + /* register PSR_1588_MSG must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages unless hardware supports timestamping all + * packets => return error + */ + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588), + (WX_PSR_ETYPE_SWC_FILTER_EN | /* enable filter */ + WX_PSR_ETYPE_SWC_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(wx, WX_PSR_ETYPE_SWC(WX_PSR_ETYPE_SWC_FILTER_1588), 0); + + /* enable/disable TX */ + regval = rd32ptp(wx, WX_TSC_1588_CTL); + regval &= ~WX_TSC_1588_CTL_ENABLED; + regval |= tsync_tx_ctl; + wr32ptp(wx, WX_TSC_1588_CTL, regval); + + /* enable/disable RX */ + regval = rd32(wx, WX_PSR_1588_CTL); + regval &= ~(WX_PSR_1588_CTL_ENABLED | WX_PSR_1588_CTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(wx, WX_PSR_1588_CTL, regval); + + /* define which PTP packets are time stamped */ + wr32(wx, WX_PSR_1588_MSG, tsync_rx_mtrl); + + WX_WRITE_FLUSH(wx); + + /* configure adapter flags only when HW is actually configured */ + memcpy(wx->flags, flags, sizeof(wx->flags)); + + /* clear TX/RX timestamp state, just to be sure */ + wx_ptp_clear_tx_timestamp(wx); + rd32(wx, WX_PSR_1588_STMPH); + + return 0; +} + +static u64 wx_ptp_read(const struct cyclecounter *hw_cc) +{ + struct wx *wx = container_of(hw_cc, struct wx, hw_cc); + + return wx_ptp_readtime(wx, NULL); +} + +static void wx_ptp_link_speed_adjust(struct wx *wx, u32 *shift, u32 *incval) +{ + if (wx->mac.type == wx_mac_em) { + *shift = WX_INCVAL_SHIFT_EM; + *incval = WX_INCVAL_EM; + return; + } + + switch (wx->speed) { + case SPEED_10: + *shift = WX_INCVAL_SHIFT_10; + *incval = WX_INCVAL_10; + break; + case SPEED_100: + *shift = WX_INCVAL_SHIFT_100; + *incval = WX_INCVAL_100; + break; + case SPEED_1000: + *shift = WX_INCVAL_SHIFT_1GB; + *incval = WX_INCVAL_1GB; + break; + case SPEED_10000: + default: + *shift = WX_INCVAL_SHIFT_10GB; + *incval = WX_INCVAL_10GB; + break; + } +} + +/** + * wx_ptp_reset_cyclecounter - create the cycle counter from hw + * @wx: pointer to the wx structure + * + * This function should be called to set the proper values for the TSC_1588_INC + * register and tell the cyclecounter structure what the tick rate of SYSTIME + * is. It does not directly modify SYSTIME registers or the timecounter + * structure. It should be called whenever a new TSC_1588_INC value is + * necessary, such as during initialization or when the link speed changes. + */ +void wx_ptp_reset_cyclecounter(struct wx *wx) +{ + u32 incval = 0, mask = 0; + struct cyclecounter cc; + unsigned long flags; + + /* For some of the boards below this mask is technically incorrect. + * The timestamp mask overflows at approximately 61bits. However the + * particular hardware does not overflow on an even bitmask value. + * Instead, it overflows due to conversion of upper 32bits billions of + * cycles. Timecounters are not really intended for this purpose so + * they do not properly function if the overflow point isn't 2^N-1. + * However, the actual SYSTIME values in question take ~138 years to + * overflow. In practice this means they won't actually overflow. A + * proper fix to this problem would require modification of the + * timecounter delta calculations. + */ + cc.mask = CLOCKSOURCE_MASK(64); + cc.mult = 1; + cc.shift = 0; + + cc.read = wx_ptp_read; + wx_ptp_link_speed_adjust(wx, &cc.shift, &incval); + + /* update the base incval used to calculate frequency adjustment */ + WRITE_ONCE(wx->base_incval, incval); + + mask = (wx->mac.type == wx_mac_em) ? 0x7FFFFFF : 0xFFFFFF; + incval &= mask; + if (wx->mac.type != wx_mac_em) + incval |= 2 << 24; + wr32ptp(wx, WX_TSC_1588_INC, incval); + + smp_mb(); /* Force the above update. */ + + /* need lock to prevent incorrect read while modifying cyclecounter */ + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + memcpy(&wx->hw_cc, &cc, sizeof(wx->hw_cc)); + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); +} +EXPORT_SYMBOL(wx_ptp_reset_cyclecounter); + +void wx_ptp_reset(struct wx *wx) +{ + unsigned long flags; + + /* reset the hardware timestamping mode */ + wx_ptp_set_timestamp_mode(wx, &wx->tstamp_config); + wx_ptp_reset_cyclecounter(wx); + + wr32ptp(wx, WX_TSC_1588_SYSTIML, 0); + wr32ptp(wx, WX_TSC_1588_SYSTIMH, 0); + WX_WRITE_FLUSH(wx); + + write_seqlock_irqsave(&wx->hw_tc_lock, flags); + timecounter_init(&wx->hw_tc, &wx->hw_cc, + ktime_to_ns(ktime_get_real())); + write_sequnlock_irqrestore(&wx->hw_tc_lock, flags); + + wx->last_overflow_check = jiffies; + ptp_schedule_worker(wx->ptp_clock, HZ); + + /* Now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature + */ + if (wx->ptp_setup_sdp) + wx->ptp_setup_sdp(wx); +} +EXPORT_SYMBOL(wx_ptp_reset); + +void wx_ptp_init(struct wx *wx) +{ + /* Initialize the seqlock_t first, since the user might call the clock + * functions any time after we've initialized the ptp clock device. + */ + seqlock_init(&wx->hw_tc_lock); + + /* obtain a ptp clock device, or re-use an existing device */ + if (wx_ptp_create_clock(wx)) + return; + + wx->tx_hwtstamp_pkts = 0; + wx->tx_hwtstamp_timeouts = 0; + wx->tx_hwtstamp_skipped = 0; + wx->tx_hwtstamp_errors = 0; + wx->rx_hwtstamp_cleared = 0; + /* reset the ptp related hardware bits */ + wx_ptp_reset(wx); + + /* enter the WX_STATE_PTP_RUNNING state */ + set_bit(WX_STATE_PTP_RUNNING, wx->state); +} +EXPORT_SYMBOL(wx_ptp_init); + +/** + * wx_ptp_suspend - stop ptp work items + * @wx: pointer to wx struct + * + * This function suspends ptp activity, and prevents more work from being + * generated, but does not destroy the clock device. + */ +void wx_ptp_suspend(struct wx *wx) +{ + /* leave the WX_STATE_PTP_RUNNING STATE */ + if (!test_and_clear_bit(WX_STATE_PTP_RUNNING, wx->state)) + return; + + clear_bit(WX_FLAG_PTP_PPS_ENABLED, wx->flags); + if (wx->ptp_setup_sdp) + wx->ptp_setup_sdp(wx); + + wx_ptp_clear_tx_timestamp(wx); +} +EXPORT_SYMBOL(wx_ptp_suspend); + +/** + * wx_ptp_stop - destroy the ptp_clock device + * @wx: pointer to wx struct + * + * Completely destroy the ptp_clock device, and disable all PTP related + * features. Intended to be run when the device is being closed. + */ +void wx_ptp_stop(struct wx *wx) +{ + /* first, suspend ptp activity */ + wx_ptp_suspend(wx); + + /* now destroy the ptp clock device */ + if (wx->ptp_clock) { + ptp_clock_unregister(wx->ptp_clock); + wx->ptp_clock = NULL; + dev_info(&wx->pdev->dev, "removed PHC on %s\n", wx->netdev->name); + } +} +EXPORT_SYMBOL(wx_ptp_stop); + +/** + * wx_ptp_rx_hwtstamp - utility function which checks for RX time stamp + * @wx: pointer to wx struct + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb) +{ + u64 regval = 0; + u32 tsyncrxctl; + + /* Read the tsyncrxctl register afterwards in order to prevent taking an + * I/O hit on every packet. + */ + tsyncrxctl = rd32(wx, WX_PSR_1588_CTL); + if (!(tsyncrxctl & WX_PSR_1588_CTL_VALID)) + return; + + regval |= (u64)rd32(wx, WX_PSR_1588_STMPL); + regval |= (u64)rd32(wx, WX_PSR_1588_STMPH) << 32; + + wx_ptp_convert_to_hwtstamp(wx, skb_hwtstamps(skb), regval); +} + +int wx_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg) +{ + struct wx *wx = netdev_priv(dev); + + if (!netif_running(dev)) + return -EINVAL; + + *cfg = wx->tstamp_config; + + return 0; +} +EXPORT_SYMBOL(wx_hwtstamp_get); + +int wx_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(dev); + int err; + + if (!netif_running(dev)) + return -EINVAL; + + err = wx_ptp_set_timestamp_mode(wx, cfg); + if (err) + return err; + + /* save these settings for future reference */ + memcpy(&wx->tstamp_config, cfg, sizeof(wx->tstamp_config)); + + return 0; +} +EXPORT_SYMBOL(wx_hwtstamp_set); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ptp.h b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h new file mode 100644 index 0000000000000..50db90a6e3ee6 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_ptp.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _WX_PTP_H_ +#define _WX_PTP_H_ + +void wx_ptp_check_pps_event(struct wx *wx); +void wx_ptp_reset_cyclecounter(struct wx *wx); +void wx_ptp_reset(struct wx *wx); +void wx_ptp_init(struct wx *wx); +void wx_ptp_suspend(struct wx *wx); +void wx_ptp_stop(struct wx *wx); +void wx_ptp_rx_hwtstamp(struct wx *wx, struct sk_buff *skb); +int wx_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *cfg); +int wx_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); + +#endif /* _WX_PTP_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.c b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c new file mode 100644 index 0000000000000..52e6a6faf7153 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.c @@ -0,0 +1,909 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#include +#include + +#include "wx_type.h" +#include "wx_hw.h" +#include "wx_mbx.h" +#include "wx_sriov.h" + +static void wx_vf_configuration(struct pci_dev *pdev, int event_mask) +{ + bool enable = !!WX_VF_ENABLE_CHECK(event_mask); + struct wx *wx = pci_get_drvdata(pdev); + u32 vfn = WX_VF_NUM_GET(event_mask); + + if (enable) + eth_zero_addr(wx->vfinfo[vfn].vf_mac_addr); +} + +static int wx_alloc_vf_macvlans(struct wx *wx, u8 num_vfs) +{ + struct vf_macvlans *mv_list; + int num_vf_macvlans, i; + + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&wx->vf_mvs.mvlist); + + num_vf_macvlans = wx->mac.num_rar_entries - + (WX_MAX_PF_MACVLANS + 1 + num_vfs); + if (!num_vf_macvlans) + return -EINVAL; + + mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), + GFP_KERNEL); + if (!mv_list) + return -ENOMEM; + + for (i = 0; i < num_vf_macvlans; i++) { + mv_list[i].vf = -1; + mv_list[i].free = true; + list_add(&mv_list[i].mvlist, &wx->vf_mvs.mvlist); + } + wx->mv_list = mv_list; + + return 0; +} + +static void wx_sriov_clear_data(struct wx *wx) +{ + /* set num VFs to 0 to prevent access to vfinfo */ + wx->num_vfs = 0; + + /* free VF control structures */ + kfree(wx->vfinfo); + wx->vfinfo = NULL; + + /* free macvlan list */ + kfree(wx->mv_list); + wx->mv_list = NULL; + + /* set default pool back to 0 */ + wr32m(wx, WX_PSR_VM_CTL, WX_PSR_VM_CTL_POOL_MASK, 0); + wx->ring_feature[RING_F_VMDQ].offset = 0; + + clear_bit(WX_FLAG_SRIOV_ENABLED, wx->flags); + /* Disable VMDq flag so device will be set in NM mode */ + if (wx->ring_feature[RING_F_VMDQ].limit == 1) + clear_bit(WX_FLAG_VMDQ_ENABLED, wx->flags); +} + +static int __wx_enable_sriov(struct wx *wx, u8 num_vfs) +{ + int i, ret = 0; + u32 value = 0; + + set_bit(WX_FLAG_SRIOV_ENABLED, wx->flags); + wx_err(wx, "SR-IOV enabled with %d VFs\n", num_vfs); + + /* Enable VMDq flag so device will be set in VM mode */ + set_bit(WX_FLAG_VMDQ_ENABLED, wx->flags); + if (!wx->ring_feature[RING_F_VMDQ].limit) + wx->ring_feature[RING_F_VMDQ].limit = 1; + wx->ring_feature[RING_F_VMDQ].offset = num_vfs; + + wx->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), + GFP_KERNEL); + if (!wx->vfinfo) + return -ENOMEM; + + ret = wx_alloc_vf_macvlans(wx, num_vfs); + if (ret) + return ret; + + /* Initialize default switching mode VEB */ + wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_SW_EN, WX_PSR_CTL_SW_EN); + + for (i = 0; i < num_vfs; i++) { + /* enable spoof checking for all VFs */ + wx->vfinfo[i].spoofchk_enabled = true; + wx->vfinfo[i].link_enable = true; + /* untrust all VFs */ + wx->vfinfo[i].trusted = false; + /* set the default xcast mode */ + wx->vfinfo[i].xcast_mode = WXVF_XCAST_MODE_NONE; + } + + if (wx->mac.type == wx_mac_em) { + value = WX_CFG_PORT_CTL_NUM_VT_8; + } else { + if (num_vfs < 32) + value = WX_CFG_PORT_CTL_NUM_VT_32; + else + value = WX_CFG_PORT_CTL_NUM_VT_64; + } + wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_NUM_VT_MASK, + value); + + return ret; +} + +static void wx_sriov_reinit(struct wx *wx) +{ + rtnl_lock(); + wx->setup_tc(wx->netdev, netdev_get_num_tc(wx->netdev)); + rtnl_unlock(); +} + +void wx_disable_sriov(struct wx *wx) +{ + if (!pci_vfs_assigned(wx->pdev)) + pci_disable_sriov(wx->pdev); + else + wx_err(wx, "Unloading driver while VFs are assigned.\n"); + + /* clear flags and free allloced data */ + wx_sriov_clear_data(wx); +} +EXPORT_SYMBOL(wx_disable_sriov); + +static int wx_pci_sriov_enable(struct pci_dev *dev, + int num_vfs) +{ + struct wx *wx = pci_get_drvdata(dev); + int err = 0, i; + + err = __wx_enable_sriov(wx, num_vfs); + if (err) + return err; + + wx->num_vfs = num_vfs; + for (i = 0; i < wx->num_vfs; i++) + wx_vf_configuration(dev, (i | WX_VF_ENABLE)); + + /* reset before enabling SRIOV to avoid mailbox issues */ + wx_sriov_reinit(wx); + + err = pci_enable_sriov(dev, num_vfs); + if (err) { + wx_err(wx, "Failed to enable PCI sriov: %d\n", err); + goto err_out; + } + + return num_vfs; +err_out: + wx_sriov_clear_data(wx); + return err; +} + +static void wx_pci_sriov_disable(struct pci_dev *dev) +{ + struct wx *wx = pci_get_drvdata(dev); + + wx_disable_sriov(wx); + wx_sriov_reinit(wx); +} + +int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) +{ + struct wx *wx = pci_get_drvdata(pdev); + int err; + + if (!num_vfs) { + if (!pci_vfs_assigned(pdev)) { + wx_pci_sriov_disable(pdev); + return 0; + } + + wx_err(wx, "can't free VFs because some are assigned to VMs.\n"); + return -EBUSY; + } + + err = wx_pci_sriov_enable(pdev, num_vfs); + if (err) + return err; + + return num_vfs; +} +EXPORT_SYMBOL(wx_pci_sriov_configure); + +static int wx_set_vf_mac(struct wx *wx, u16 vf, const u8 *mac_addr) +{ + u8 hw_addr[ETH_ALEN]; + int ret = 0; + + ether_addr_copy(hw_addr, mac_addr); + wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf); + ret = wx_add_mac_filter(wx, hw_addr, vf); + if (ret >= 0) + ether_addr_copy(wx->vfinfo[vf].vf_mac_addr, mac_addr); + else + eth_zero_addr(wx->vfinfo[vf].vf_mac_addr); + + return ret; +} + +static void wx_set_vmolr(struct wx *wx, u16 vf, bool aupe) +{ + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf)); + + vmolr |= WX_PSR_VM_L2CTL_BAM; + if (aupe) + vmolr |= WX_PSR_VM_L2CTL_AUPE; + else + vmolr &= ~WX_PSR_VM_L2CTL_AUPE; + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr); +} + +static void wx_set_vmvir(struct wx *wx, u16 vid, u16 qos, u16 vf) +{ + u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | + WX_TDM_VLAN_INS_VLANA_DEFAULT; + + wr32(wx, WX_TDM_VLAN_INS(vf), vmvir); +} + +static int wx_set_vf_vlan(struct wx *wx, int add, int vid, u16 vf) +{ + if (!vid && !add) + return 0; + + return wx_set_vfta(wx, vid, vf, (bool)add); +} + +static void wx_set_vlan_anti_spoofing(struct wx *wx, bool enable, int vf) +{ + u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf); + u32 pfvfspoof; + + pfvfspoof = rd32(wx, WX_TDM_VLAN_AS(index)); + if (enable) + pfvfspoof |= BIT(vf_bit); + else + pfvfspoof &= ~BIT(vf_bit); + wr32(wx, WX_TDM_VLAN_AS(index), pfvfspoof); +} + +static void wx_write_qde(struct wx *wx, u32 vf, u32 qde) +{ + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; + u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u32 reg = 0, n = vf * q_per_pool / 32; + u32 i = vf * q_per_pool; + + reg = rd32(wx, WX_RDM_PF_QDE(n)); + for (i = (vf * q_per_pool - n * 32); + i < ((vf + 1) * q_per_pool - n * 32); + i++) { + if (qde == 1) + reg |= qde << i; + else + reg &= qde << i; + } + + wr32(wx, WX_RDM_PF_QDE(n), reg); +} + +static void wx_clear_vmvir(struct wx *wx, u32 vf) +{ + wr32(wx, WX_TDM_VLAN_INS(vf), 0); +} + +static void wx_ping_vf(struct wx *wx, int vf) +{ + u32 ping = WX_PF_CONTROL_MSG; + + if (wx->vfinfo[vf].clear_to_send) + ping |= WX_VT_MSGTYPE_CTS; + wx_write_mbx_pf(wx, &ping, 1, vf); +} + +static void wx_set_vf_rx_tx(struct wx *wx, int vf) +{ + u32 index = WX_VF_REG_OFFSET(vf), vf_bit = WX_VF_IND_SHIFT(vf); + u32 reg_cur_tx, reg_cur_rx, reg_req_tx, reg_req_rx; + + reg_cur_tx = rd32(wx, WX_TDM_VF_TE(index)); + reg_cur_rx = rd32(wx, WX_RDM_VF_RE(index)); + + if (wx->vfinfo[vf].link_enable) { + reg_req_tx = reg_cur_tx | BIT(vf_bit); + reg_req_rx = reg_cur_rx | BIT(vf_bit); + /* Enable particular VF */ + if (reg_cur_tx != reg_req_tx) + wr32(wx, WX_TDM_VF_TE(index), reg_req_tx); + if (reg_cur_rx != reg_req_rx) + wr32(wx, WX_RDM_VF_RE(index), reg_req_rx); + } else { + reg_req_tx = BIT(vf_bit); + reg_req_rx = BIT(vf_bit); + /* Disable particular VF */ + if (reg_cur_tx & reg_req_tx) + wr32(wx, WX_TDM_VFTE_CLR(index), reg_req_tx); + if (reg_cur_rx & reg_req_rx) + wr32(wx, WX_RDM_VFRE_CLR(index), reg_req_rx); + } +} + +static int wx_get_vf_queues(struct wx *wx, u32 *msgbuf, u32 vf) +{ + struct wx_ring_feature *vmdq = &wx->ring_feature[RING_F_VMDQ]; + unsigned int default_tc = 0; + + msgbuf[WX_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + msgbuf[WX_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask); + + if (wx->vfinfo[vf].pf_vlan || wx->vfinfo[vf].pf_qos) + msgbuf[WX_VF_TRANS_VLAN] = 1; + else + msgbuf[WX_VF_TRANS_VLAN] = 0; + + /* notify VF of default queue */ + msgbuf[WX_VF_DEF_QUEUE] = default_tc; + + return 0; +} + +static void wx_vf_reset_event(struct wx *wx, u16 vf) +{ + struct vf_data_storage *vfinfo = &wx->vfinfo[vf]; + u8 num_tcs = netdev_get_num_tc(wx->netdev); + + /* add PF assigned VLAN */ + wx_set_vf_vlan(wx, true, vfinfo->pf_vlan, vf); + + /* reset offloads to defaults */ + wx_set_vmolr(wx, vf, !vfinfo->pf_vlan); + + /* set outgoing tags for VFs */ + if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { + wx_clear_vmvir(wx, vf); + } else { + if (vfinfo->pf_qos || !num_tcs) + wx_set_vmvir(wx, vfinfo->pf_vlan, + vfinfo->pf_qos, vf); + else + wx_set_vmvir(wx, vfinfo->pf_vlan, + wx->default_up, vf); + } + + /* reset multicast table array for vf */ + wx->vfinfo[vf].num_vf_mc_hashes = 0; + + /* Flush and reset the mta with the new values */ + wx_set_rx_mode(wx->netdev); + + wx_del_mac_filter(wx, wx->vfinfo[vf].vf_mac_addr, vf); + /* reset VF api back to unknown */ + wx->vfinfo[vf].vf_api = wx_mbox_api_null; +} + +static void wx_vf_reset_msg(struct wx *wx, u16 vf) +{ + const u8 *vf_mac = wx->vfinfo[vf].vf_mac_addr; + struct net_device *dev = wx->netdev; + u32 msgbuf[5] = {0, 0, 0, 0, 0}; + u8 *addr = (u8 *)(&msgbuf[1]); + u32 reg = 0, index, vf_bit; + int pf_max_frame; + + /* reset the filters for the device */ + wx_vf_reset_event(wx, vf); + + /* set vf mac address */ + if (!is_zero_ether_addr(vf_mac)) + wx_set_vf_mac(wx, vf, vf_mac); + + index = WX_VF_REG_OFFSET(vf); + vf_bit = WX_VF_IND_SHIFT(vf); + + /* force drop enable for all VF Rx queues */ + wx_write_qde(wx, vf, 1); + + /* set transmit and receive for vf */ + wx_set_vf_rx_tx(wx, vf); + + pf_max_frame = dev->mtu + ETH_HLEN; + + if (pf_max_frame > ETH_FRAME_LEN) + reg = BIT(vf_bit); + wr32(wx, WX_RDM_VFRE_CLR(index), reg); + + /* enable VF mailbox for further messages */ + wx->vfinfo[vf].clear_to_send = true; + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = WX_VF_RESET; + if (!is_zero_ether_addr(vf_mac)) { + msgbuf[0] |= WX_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= WX_VT_MSGTYPE_NACK; + wx_err(wx, "VF %d has no MAC address assigned", vf); + } + + msgbuf[3] = wx->mac.mc_filter_type; + wx_write_mbx_pf(wx, msgbuf, WX_VF_PERMADDR_MSG_LEN, vf); +} + +static int wx_set_vf_mac_addr(struct wx *wx, u32 *msgbuf, u16 vf) +{ + const u8 *new_mac = ((u8 *)(&msgbuf[1])); + int ret; + + if (!is_valid_ether_addr(new_mac)) { + wx_err(wx, "VF %d attempted to set invalid mac\n", vf); + return -EINVAL; + } + + if (wx->vfinfo[vf].pf_set_mac && + memcmp(wx->vfinfo[vf].vf_mac_addr, new_mac, ETH_ALEN)) { + wx_err(wx, + "VF %d attempt to set a MAC but it already had a MAC.", + vf); + return -EBUSY; + } + + ret = wx_set_vf_mac(wx, vf, new_mac); + if (ret < 0) + return ret; + + return 0; +} + +static void wx_set_vf_multicasts(struct wx *wx, u32 *msgbuf, u32 vf) +{ + struct vf_data_storage *vfinfo = &wx->vfinfo[vf]; + u16 entries = (msgbuf[0] & WX_VT_MSGINFO_MASK) + >> WX_VT_MSGINFO_SHIFT; + u32 vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf)); + u32 vector_bit, vector_reg, mta_reg, i; + u16 *hash_list = (u16 *)&msgbuf[1]; + + /* only so many hash values supported */ + entries = min_t(u16, entries, WX_MAX_VF_MC_ENTRIES); + vfinfo->num_vf_mc_hashes = entries; + + for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = WX_PSR_MC_TBL_REG(vfinfo->vf_mc_hashes[i]); + vector_bit = WX_PSR_MC_TBL_BIT(vfinfo->vf_mc_hashes[i]); + mta_reg = wx->mac.mta_shadow[vector_reg]; + mta_reg |= BIT(vector_bit); + wx->mac.mta_shadow[vector_reg] = mta_reg; + wr32(wx, WX_PSR_MC_TBL(vector_reg), mta_reg); + } + vmolr |= WX_PSR_VM_L2CTL_ROMPE; + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr); +} + +static void wx_set_vf_lpe(struct wx *wx, u32 max_frame, u32 vf) +{ + u32 index, vf_bit, vfre; + u32 max_frs, reg_val; + + /* determine VF receive enable location */ + index = WX_VF_REG_OFFSET(vf); + vf_bit = WX_VF_IND_SHIFT(vf); + + vfre = rd32(wx, WX_RDM_VF_RE(index)); + vfre |= BIT(vf_bit); + wr32(wx, WX_RDM_VF_RE(index), vfre); + + /* pull current max frame size from hardware */ + max_frs = DIV_ROUND_UP(max_frame, 1024); + reg_val = rd32(wx, WX_MAC_WDG_TIMEOUT) & WX_MAC_WDG_TIMEOUT_WTO_MASK; + if (max_frs > (reg_val + WX_MAC_WDG_TIMEOUT_WTO_DELTA)) + wr32(wx, WX_MAC_WDG_TIMEOUT, + max_frs - WX_MAC_WDG_TIMEOUT_WTO_DELTA); +} + +static int wx_find_vlvf_entry(struct wx *wx, u32 vlan) +{ + int regindex; + u32 vlvf; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the vlan id in the VLVF entries */ + for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) { + wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex); + vlvf = rd32(wx, WX_PSR_VLAN_SWC); + if ((vlvf & VLAN_VID_MASK) == vlan) + break; + } + + /* Return a negative value if not found */ + if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) + regindex = -EINVAL; + + return regindex; +} + +static int wx_set_vf_macvlan(struct wx *wx, + u16 vf, int index, unsigned char *mac_addr) +{ + struct vf_macvlans *entry; + struct list_head *pos; + int retval = 0; + + if (index <= 1) { + list_for_each(pos, &wx->vf_mvs.mvlist) { + entry = list_entry(pos, struct vf_macvlans, mvlist); + if (entry->vf == vf) { + entry->vf = -1; + entry->free = true; + entry->is_macvlan = false; + wx_del_mac_filter(wx, entry->vf_macvlan, vf); + } + } + } + + if (!index) + return 0; + + entry = NULL; + list_for_each(pos, &wx->vf_mvs.mvlist) { + entry = list_entry(pos, struct vf_macvlans, mvlist); + if (entry->free) + break; + } + + if (!entry || !entry->free) + return -ENOSPC; + + retval = wx_add_mac_filter(wx, mac_addr, vf); + if (retval >= 0) { + entry->free = false; + entry->is_macvlan = true; + entry->vf = vf; + memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); + } + + return retval; +} + +static int wx_set_vf_vlan_msg(struct wx *wx, u32 *msgbuf, u16 vf) +{ + int add = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> WX_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & WX_PSR_VLAN_SWC_VLANID_MASK); + int ret; + + if (add) + wx->vfinfo[vf].vlan_count++; + else if (wx->vfinfo[vf].vlan_count) + wx->vfinfo[vf].vlan_count--; + + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ + if (add && wx->netdev->flags & IFF_PROMISC) + wx_set_vf_vlan(wx, add, vid, VMDQ_P(0)); + + ret = wx_set_vf_vlan(wx, add, vid, vf); + if (!ret && wx->vfinfo[vf].spoofchk_enabled) + wx_set_vlan_anti_spoofing(wx, true, vf); + + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && wx->netdev->flags & IFF_PROMISC) { + u32 bits = 0, vlvf; + int reg_ndx; + + reg_ndx = wx_find_vlvf_entry(wx, vid); + if (reg_ndx < 0) + return -ENOSPC; + wr32(wx, WX_PSR_VLAN_SWC_IDX, reg_ndx); + vlvf = rd32(wx, WX_PSR_VLAN_SWC); + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + if (VMDQ_P(0) < 32) { + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L); + bits &= ~BIT(VMDQ_P(0)); + if (wx->mac.type != wx_mac_em) + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H); + } else { + if (wx->mac.type != wx_mac_em) + bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H); + bits &= ~BIT(VMDQ_P(0) % 32); + bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L); + } + /* If the filter was removed then ensure PF pool bit + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ + if ((vlvf & VLAN_VID_MASK) == vid && !bits) + wx_set_vf_vlan(wx, add, vid, VMDQ_P(0)); + } + + return 0; +} + +static int wx_set_vf_macvlan_msg(struct wx *wx, u32 *msgbuf, u16 vf) +{ + int index = (msgbuf[0] & WX_VT_MSGINFO_MASK) >> + WX_VT_MSGINFO_SHIFT; + u8 *new_mac = ((u8 *)(&msgbuf[1])); + int err; + + if (wx->vfinfo[vf].pf_set_mac && index > 0) { + wx_err(wx, "VF %d request MACVLAN filter but is denied\n", vf); + return -EINVAL; + } + + /* An non-zero index indicates the VF is setting a filter */ + if (index) { + if (!is_valid_ether_addr(new_mac)) { + wx_err(wx, "VF %d attempted to set invalid mac\n", vf); + return -EINVAL; + } + /* If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ + if (wx->vfinfo[vf].spoofchk_enabled) + wx_set_vf_spoofchk(wx->netdev, vf, false); + } + + err = wx_set_vf_macvlan(wx, vf, index, new_mac); + if (err == -ENOSPC) + wx_err(wx, + "VF %d request MACVLAN filter but there is no space\n", + vf); + if (err < 0) + return err; + + return 0; +} + +static int wx_negotiate_vf_api(struct wx *wx, u32 *msgbuf, u32 vf) +{ + int api = msgbuf[1]; + + switch (api) { + case wx_mbox_api_13: + wx->vfinfo[vf].vf_api = api; + return 0; + default: + wx_err(wx, "VF %d requested invalid api version %u\n", vf, api); + return -EINVAL; + } +} + +static int wx_get_vf_link_state(struct wx *wx, u32 *msgbuf, u32 vf) +{ + msgbuf[1] = wx->vfinfo[vf].link_enable; + + return 0; +} + +static int wx_get_fw_version(struct wx *wx, u32 *msgbuf, u32 vf) +{ + unsigned long fw_version = 0ULL; + int ret = 0; + + ret = kstrtoul(wx->eeprom_id, 16, &fw_version); + if (ret) + return -EOPNOTSUPP; + msgbuf[1] = fw_version; + + return 0; +} + +static int wx_update_vf_xcast_mode(struct wx *wx, u32 *msgbuf, u32 vf) +{ + int xcast_mode = msgbuf[1]; + u32 vmolr, disable, enable; + + if (wx->vfinfo[vf].xcast_mode == xcast_mode) + return 0; + + switch (xcast_mode) { + case WXVF_XCAST_MODE_NONE: + disable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | + WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_VPE; + enable = 0; + break; + case WXVF_XCAST_MODE_MULTI: + disable = WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_VPE; + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE; + break; + case WXVF_XCAST_MODE_ALLMULTI: + disable = WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_VPE; + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | + WX_PSR_VM_L2CTL_MPE; + break; + case WXVF_XCAST_MODE_PROMISC: + disable = 0; + enable = WX_PSR_VM_L2CTL_BAM | WX_PSR_VM_L2CTL_ROMPE | + WX_PSR_VM_L2CTL_MPE | WX_PSR_VM_L2CTL_UPE | + WX_PSR_VM_L2CTL_VPE; + break; + default: + return -EOPNOTSUPP; + } + + vmolr = rd32(wx, WX_PSR_VM_L2CTL(vf)); + vmolr &= ~disable; + vmolr |= enable; + wr32(wx, WX_PSR_VM_L2CTL(vf), vmolr); + + wx->vfinfo[vf].xcast_mode = xcast_mode; + msgbuf[1] = xcast_mode; + + return 0; +} + +static void wx_rcv_msg_from_vf(struct wx *wx, u16 vf) +{ + u16 mbx_size = WX_VXMAILBOX_SIZE; + u32 msgbuf[WX_VXMAILBOX_SIZE]; + int retval; + + retval = wx_read_mbx_pf(wx, msgbuf, mbx_size, vf); + if (retval) { + wx_err(wx, "Error receiving message from VF\n"); + return; + } + + /* this is a message we already processed, do nothing */ + if (msgbuf[0] & (WX_VT_MSGTYPE_ACK | WX_VT_MSGTYPE_NACK)) + return; + + if (msgbuf[0] == WX_VF_RESET) { + wx_vf_reset_msg(wx, vf); + return; + } + + /* until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ + if (!wx->vfinfo[vf].clear_to_send) { + msgbuf[0] |= WX_VT_MSGTYPE_NACK; + wx_write_mbx_pf(wx, msgbuf, 1, vf); + return; + } + + switch ((msgbuf[0] & U16_MAX)) { + case WX_VF_SET_MAC_ADDR: + retval = wx_set_vf_mac_addr(wx, msgbuf, vf); + break; + case WX_VF_SET_MULTICAST: + wx_set_vf_multicasts(wx, msgbuf, vf); + retval = 0; + break; + case WX_VF_SET_VLAN: + retval = wx_set_vf_vlan_msg(wx, msgbuf, vf); + break; + case WX_VF_SET_LPE: + wx_set_vf_lpe(wx, msgbuf[1], vf); + retval = 0; + break; + case WX_VF_SET_MACVLAN: + retval = wx_set_vf_macvlan_msg(wx, msgbuf, vf); + break; + case WX_VF_API_NEGOTIATE: + retval = wx_negotiate_vf_api(wx, msgbuf, vf); + break; + case WX_VF_GET_QUEUES: + retval = wx_get_vf_queues(wx, msgbuf, vf); + break; + case WX_VF_GET_LINK_STATE: + retval = wx_get_vf_link_state(wx, msgbuf, vf); + break; + case WX_VF_GET_FW_VERSION: + retval = wx_get_fw_version(wx, msgbuf, vf); + break; + case WX_VF_UPDATE_XCAST_MODE: + retval = wx_update_vf_xcast_mode(wx, msgbuf, vf); + break; + case WX_VF_BACKUP: + break; + default: + wx_err(wx, "Unhandled Msg %8.8x\n", msgbuf[0]); + break; + } + + /* notify the VF of the results of what it sent us */ + if (retval) + msgbuf[0] |= WX_VT_MSGTYPE_NACK; + else + msgbuf[0] |= WX_VT_MSGTYPE_ACK; + + msgbuf[0] |= WX_VT_MSGTYPE_CTS; + + wx_write_mbx_pf(wx, msgbuf, mbx_size, vf); +} + +static void wx_rcv_ack_from_vf(struct wx *wx, u16 vf) +{ + u32 msg = WX_VT_MSGTYPE_NACK; + + /* if device isn't clear to send it shouldn't be reading either */ + if (!wx->vfinfo[vf].clear_to_send) + wx_write_mbx_pf(wx, &msg, 1, vf); +} + +void wx_msg_task(struct wx *wx) +{ + u16 vf; + + for (vf = 0; vf < wx->num_vfs; vf++) { + /* process any reset requests */ + if (!wx_check_for_rst_pf(wx, vf)) + wx_vf_reset_event(wx, vf); + + /* process any messages pending */ + if (!wx_check_for_msg_pf(wx, vf)) + wx_rcv_msg_from_vf(wx, vf); + + /* process any acks */ + if (!wx_check_for_ack_pf(wx, vf)) + wx_rcv_ack_from_vf(wx, vf); + } +} +EXPORT_SYMBOL(wx_msg_task); + +void wx_disable_vf_rx_tx(struct wx *wx) +{ + wr32(wx, WX_TDM_VFTE_CLR(0), U32_MAX); + wr32(wx, WX_RDM_VFRE_CLR(0), U32_MAX); + if (wx->mac.type != wx_mac_em) { + wr32(wx, WX_TDM_VFTE_CLR(1), U32_MAX); + wr32(wx, WX_RDM_VFRE_CLR(1), U32_MAX); + } +} +EXPORT_SYMBOL(wx_disable_vf_rx_tx); + +void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up) +{ + u32 msgbuf[2] = {0, 0}; + u16 i; + + if (!wx->num_vfs) + return; + msgbuf[0] = WX_PF_NOFITY_VF_LINK_STATUS | WX_PF_CONTROL_MSG; + if (link_up) + msgbuf[1] = FIELD_PREP(GENMASK(31, 1), wx->speed) | link_up; + if (wx->notify_down) + msgbuf[1] |= WX_PF_NOFITY_VF_NET_NOT_RUNNING; + for (i = 0; i < wx->num_vfs; i++) { + if (wx->vfinfo[i].clear_to_send) + msgbuf[0] |= WX_VT_MSGTYPE_CTS; + wx_write_mbx_pf(wx, msgbuf, 2, i); + } +} +EXPORT_SYMBOL(wx_ping_all_vfs_with_link_status); + +static void wx_set_vf_link_state(struct wx *wx, int vf, int state) +{ + wx->vfinfo[vf].link_state = state; + switch (state) { + case IFLA_VF_LINK_STATE_AUTO: + if (netif_running(wx->netdev)) + wx->vfinfo[vf].link_enable = true; + else + wx->vfinfo[vf].link_enable = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + wx->vfinfo[vf].link_enable = true; + break; + case IFLA_VF_LINK_STATE_DISABLE: + wx->vfinfo[vf].link_enable = false; + break; + } + /* restart the VF */ + wx->vfinfo[vf].clear_to_send = false; + wx_ping_vf(wx, vf); + + wx_set_vf_rx_tx(wx, vf); +} + +void wx_set_all_vfs(struct wx *wx) +{ + int i; + + for (i = 0; i < wx->num_vfs; i++) + wx_set_vf_link_state(wx, i, wx->vfinfo[i].link_state); +} +EXPORT_SYMBOL(wx_set_all_vfs); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_sriov.h b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h new file mode 100644 index 0000000000000..8a3a47bb58155 --- /dev/null +++ b/drivers/net/ethernet/wangxun/libwx/wx_sriov.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2025 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _WX_SRIOV_H_ +#define _WX_SRIOV_H_ + +#define WX_VF_ENABLE_CHECK(_m) FIELD_GET(BIT(31), (_m)) +#define WX_VF_NUM_GET(_m) FIELD_GET(GENMASK(5, 0), (_m)) +#define WX_VF_ENABLE BIT(31) + +void wx_disable_sriov(struct wx *wx); +int wx_pci_sriov_configure(struct pci_dev *pdev, int num_vfs); +void wx_msg_task(struct wx *wx); +void wx_disable_vf_rx_tx(struct wx *wx); +void wx_ping_all_vfs_with_link_status(struct wx *wx, bool link_up); +void wx_set_all_vfs(struct wx *wx); + +#endif /* _WX_SRIOV_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index b54bffda027b4..e13172c9eeedd 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -4,6 +4,8 @@ #ifndef _WX_TYPE_H_ #define _WX_TYPE_H_ +#include +#include #include #include #include @@ -18,8 +20,13 @@ /* MSI-X capability fields masks */ #define WX_PCIE_MSIX_TBL_SZ_MASK 0x7FF #define WX_PCI_LINK_STATUS 0xB2 +#define WX_MAX_PF_MACVLANS 15 +#define WX_MAX_VF_MC_ENTRIES 30 /**************** Global Registers ****************************/ +#define WX_VF_REG_OFFSET(_v) FIELD_GET(GENMASK(15, 5), (_v)) +#define WX_VF_IND_SHIFT(_v) FIELD_GET(GENMASK(4, 0), (_v)) + /* chip control Registers */ #define WX_MIS_PWR 0x10000 #define WX_MIS_RST 0x1000C @@ -74,6 +81,9 @@ #define WX_MAC_LXONOFFRXC 0x11E0C /*********************** Receive DMA registers **************************/ +#define WX_RDM_VF_RE(_i) (0x12004 + ((_i) * 4)) +#define WX_RDM_PF_QDE(_i) (0x12080 + ((_i) * 4)) +#define WX_RDM_VFRE_CLR(_i) (0x120A0 + ((_i) * 4)) #define WX_RDM_DRP_PKT 0x12500 #define WX_RDM_PKT_CNT 0x12504 #define WX_RDM_BYTE_CNT_LSB 0x12508 @@ -82,12 +92,17 @@ /************************* Port Registers ************************************/ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 +#define WX_CFG_PORT_CTL_PFRSTD BIT(14) #define WX_CFG_PORT_CTL_DRV_LOAD BIT(3) #define WX_CFG_PORT_CTL_QINQ BIT(2) #define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/ #define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4)) #define WX_CFG_PORT_CTL_NUM_VT_MASK GENMASK(13, 12) /* number of TVs */ +#define WX_CFG_PORT_CTL_NUM_VT_NONE 0 +#define WX_CFG_PORT_CTL_NUM_VT_8 FIELD_PREP(GENMASK(13, 12), 1) +#define WX_CFG_PORT_CTL_NUM_VT_32 FIELD_PREP(GENMASK(13, 12), 2) +#define WX_CFG_PORT_CTL_NUM_VT_64 FIELD_PREP(GENMASK(13, 12), 3) /* GPIO Registers */ #define WX_GPIO_DR 0x14800 @@ -110,6 +125,11 @@ /*********************** Transmit DMA registers **************************/ /* transmit global control */ #define WX_TDM_CTL 0x18000 +#define WX_TDM_VF_TE(_i) (0x18004 + ((_i) * 4)) +#define WX_TDM_MAC_AS(_i) (0x18060 + ((_i) * 4)) +#define WX_TDM_VLAN_AS(_i) (0x18070 + ((_i) * 4)) +#define WX_TDM_VFTE_CLR(_i) (0x180A0 + ((_i) * 4)) + /* TDM CTL BIT */ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) @@ -163,6 +183,7 @@ /******************************* PSR Registers *******************************/ /* psr control */ #define WX_PSR_CTL 0x15000 +#define WX_PSR_VM_CTL 0x151B0 /* Header split receive */ #define WX_PSR_CTL_SW_EN BIT(18) #define WX_PSR_CTL_RSC_ACK BIT(17) @@ -180,14 +201,36 @@ #define WX_PSR_VLAN_CTL 0x15088 #define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */ #define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */ +/* EType Queue Filter */ +#define WX_PSR_ETYPE_SWC(_i) (0x15128 + ((_i) * 4)) +#define WX_PSR_ETYPE_SWC_FILTER_1588 3 +#define WX_PSR_ETYPE_SWC_FILTER_EN BIT(31) +#define WX_PSR_ETYPE_SWC_1588 BIT(30) +/* 1588 */ +#define WX_PSR_1588_MSG 0x15120 +#define WX_PSR_1588_MSG_V1_SYNC FIELD_PREP(GENMASK(7, 0), 0) +#define WX_PSR_1588_MSG_V1_DELAY_REQ FIELD_PREP(GENMASK(7, 0), 1) +#define WX_PSR_1588_STMPL 0x151E8 +#define WX_PSR_1588_STMPH 0x151A4 +#define WX_PSR_1588_CTL 0x15188 +#define WX_PSR_1588_CTL_ENABLED BIT(4) +#define WX_PSR_1588_CTL_TYPE_MASK GENMASK(3, 1) +#define WX_PSR_1588_CTL_TYPE_L4_V1 FIELD_PREP(GENMASK(3, 1), 1) +#define WX_PSR_1588_CTL_TYPE_EVENT_V2 FIELD_PREP(GENMASK(3, 1), 5) +#define WX_PSR_1588_CTL_VALID BIT(0) /* mcasst/ucast overflow tbl */ #define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) +#define WX_PSR_MC_TBL_REG(_i) FIELD_GET(GENMASK(11, 5), (_i)) +#define WX_PSR_MC_TBL_BIT(_i) FIELD_GET(GENMASK(4, 0), (_i)) #define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) +#define WX_PSR_VM_CTL_REPLEN BIT(30) /* replication enabled */ +#define WX_PSR_VM_CTL_POOL_MASK GENMASK(12, 7) /* VM L2 contorl */ #define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4)) #define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */ #define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */ +#define WX_PSR_VM_L2CTL_VPE BIT(7) /* vlan promiscuous mode */ #define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */ #define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */ #define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */ @@ -226,10 +269,12 @@ #define WX_PSR_VLAN_SWC 0x16220 #define WX_PSR_VLAN_SWC_VM_L 0x16224 #define WX_PSR_VLAN_SWC_VM_H 0x16228 +#define WX_PSR_VLAN_SWC_VM(_i) (0x16224 + ((_i) * 4)) #define WX_PSR_VLAN_SWC_IDX 0x16230 /* 64 vlan entries */ /* VLAN pool filtering masks */ #define WX_PSR_VLAN_SWC_VIEN BIT(31) /* filter is valid */ #define WX_PSR_VLAN_SWC_ENTRIES 64 +#define WX_PSR_VLAN_SWC_VLANID_MASK GENMASK(11, 0) /********************************* RSEC **************************************/ /* general rsec */ @@ -240,6 +285,13 @@ #define WX_RSC_ST 0x17004 #define WX_RSC_ST_RSEC_RDY BIT(0) +/*********************** Transmit DMA registers **************************/ +/* transmit global control */ +#define WX_TDM_ETYPE_AS(_i) (0x18058 + ((_i) * 4)) +#define WX_TDM_VLAN_INS(_i) (0x18100 + ((_i) * 4)) +/* Per VF Port VLAN insertion rules */ +#define WX_TDM_VLAN_INS_VLANA_DEFAULT BIT(30) /* Always use default VLAN*/ + /****************************** TDB ******************************************/ #define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4)) #define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ @@ -253,6 +305,32 @@ #define WX_TSC_ST_SECTX_RDY BIT(0) #define WX_TSC_BUF_AE 0x1D00C #define WX_TSC_BUF_AE_THR GENMASK(9, 0) +/* 1588 */ +#define WX_TSC_1588_CTL 0x11F00 +#define WX_TSC_1588_CTL_ENABLED BIT(4) +#define WX_TSC_1588_CTL_VALID BIT(0) +#define WX_TSC_1588_STMPL 0x11F04 +#define WX_TSC_1588_STMPH 0x11F08 +#define WX_TSC_1588_SYSTIML 0x11F0C +#define WX_TSC_1588_SYSTIMH 0x11F10 +#define WX_TSC_1588_INC 0x11F14 +#define WX_TSC_1588_INT_ST 0x11F20 +#define WX_TSC_1588_INT_ST_TT1 BIT(5) +#define WX_TSC_1588_INT_EN 0x11F24 +#define WX_TSC_1588_INT_EN_TT1 BIT(5) +#define WX_TSC_1588_AUX_CTL 0x11F28 +#define WX_TSC_1588_AUX_CTL_EN_TS0 BIT(8) +#define WX_TSC_1588_AUX_CTL_EN_TT1 BIT(2) +#define WX_TSC_1588_AUX_CTL_PLSG BIT(1) +#define WX_TSC_1588_AUX_CTL_EN_TT0 BIT(0) +#define WX_TSC_1588_TRGT_L(i) (0x11F2C + ((i) * 8)) /* [0,1] */ +#define WX_TSC_1588_TRGT_H(i) (0x11F30 + ((i) * 8)) /* [0,1] */ +#define WX_TSC_1588_SDP(i) (0x11F5C + ((i) * 4)) /* [0,3] */ +#define WX_TSC_1588_SDP_OUT_LEVEL_H FIELD_PREP(BIT(4), 0) +#define WX_TSC_1588_SDP_OUT_LEVEL_L FIELD_PREP(BIT(4), 1) +#define WX_TSC_1588_SDP_FUN_SEL_MASK GENMASK(2, 0) +#define WX_TSC_1588_SDP_FUN_SEL_TT0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 1) +#define WX_TSC_1588_SDP_FUN_SEL_TS0 FIELD_PREP(WX_TSC_1588_SDP_FUN_SEL_MASK, 5) /************************************** MNG ********************************/ #define WX_MNG_SWFW_SYNC 0x1E008 @@ -264,6 +342,10 @@ #define WX_MNG_MBOX_CTL_FWRDY BIT(2) #define WX_MNG_BMC2OS_CNT 0x1E090 #define WX_MNG_OS2BMC_CNT 0x1E094 +#define WX_SW2FW_MBOX_CMD 0x1E0A0 +#define WX_SW2FW_MBOX_CMD_VLD BIT(31) +#define WX_SW2FW_MBOX 0x1E200 +#define WX_FW2SW_MBOX 0x1E300 /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 @@ -279,6 +361,9 @@ #define WX_MAC_WDG_TIMEOUT 0x1100C #define WX_MAC_RX_FLOW_CTRL 0x11090 #define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ + +#define WX_MAC_WDG_TIMEOUT_WTO_MASK GENMASK(3, 0) +#define WX_MAC_WDG_TIMEOUT_WTO_DELTA 2 /* MDIO Registers */ #define WX_MSCA 0x11200 #define WX_MSCA_RA(v) FIELD_PREP(U16_MAX, v) @@ -327,6 +412,7 @@ enum WX_MSCA_CMD_value { #define WX_12K_ITR 336 #define WX_20K_ITR 200 #define WX_SP_MAX_EITR 0x00000FF8U +#define WX_AML_MAX_EITR 0x00000FFFU #define WX_EM_MAX_EITR 0x00007FFCU /* transmit DMA Registers */ @@ -367,9 +453,19 @@ enum WX_MSCA_CMD_value { /* Number of 80 microseconds we wait for PCI Express master disable */ #define WX_PCI_MASTER_DISABLE_TIMEOUT 80000 +#define WX_RSS_64Q_MASK 0x3F +#define WX_RSS_8Q_MASK 0x7 +#define WX_RSS_4Q_MASK 0x3 +#define WX_RSS_2Q_MASK 0x1 +#define WX_RSS_DISABLED_MASK 0x0 + +#define WX_VMDQ_4Q_MASK 0x7C +#define WX_VMDQ_2Q_MASK 0x7E + /****************** Manageablility Host Interface defines ********************/ #define WX_HI_MAX_BLOCK_BYTE_LENGTH 256 /* Num of bytes in range */ #define WX_HI_COMMAND_TIMEOUT 1000 /* Process HI command limit */ +#define WX_HIC_HDR_INDEX_MAX 255 #define FW_READ_SHADOW_RAM_CMD 0x31 #define FW_READ_SHADOW_RAM_LEN 0x6 @@ -382,6 +478,8 @@ enum WX_MSCA_CMD_value { #define FW_CEM_CMD_RESERVED 0X0 #define FW_CEM_MAX_RETRIES 3 #define FW_CEM_RESP_STATUS_SUCCESS 0x1 +#define FW_PPS_SET_CMD 0xF6 +#define FW_PPS_SET_LEN 0x14 #define WX_SW_REGION_PTR 0x1C @@ -431,7 +529,7 @@ enum WX_MSCA_CMD_value { #define WX_REQ_TX_DESCRIPTOR_MULTIPLE 128 #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ -#define VMDQ_P(p) p +#define VMDQ_P(p) ((p) + wx->ring_feature[RING_F_VMDQ].offset) /* Supported Rx Buffer Sizes */ #define WX_RXBUFFER_256 256 /* Used for skb receive header */ @@ -460,6 +558,7 @@ enum WX_MSCA_CMD_value { #define WX_RXD_STAT_L4CS BIT(7) /* L4 xsum calculated */ #define WX_RXD_STAT_IPCS BIT(8) /* IP xsum calculated */ #define WX_RXD_STAT_OUTERIPCS BIT(10) /* Cloud IP xsum calculated*/ +#define WX_RXD_STAT_TS BIT(14) /* IEEE1588 Time Stamp */ #define WX_RXD_ERR_OUTERIPER BIT(26) /* CRC IP Header error */ #define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */ @@ -663,21 +762,30 @@ struct wx_hic_hdr { u8 cmd_resv; u8 ret_status; } cmd_or_resp; - u8 checksum; + union { + u8 checksum; + u8 index; + }; }; struct wx_hic_hdr2_req { u8 cmd; u8 buf_lenh; u8 buf_lenl; - u8 checksum; + union { + u8 checksum; + u8 index; + }; }; struct wx_hic_hdr2_rsp { u8 cmd; u8 buf_lenl; u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ - u8 checksum; + union { + u8 checksum; + u8 index; + }; }; union wx_hic_hdr2 { @@ -701,12 +809,25 @@ struct wx_hic_reset { u16 reset_type; }; +struct wx_hic_set_pps { + struct wx_hic_hdr hdr; + u8 lan_id; + u8 enable; + u16 pad2; + u64 nsec; + u64 cycles; +}; + /* Bus parameters */ struct wx_bus_info { u8 func; u16 device; }; +struct wx_mbx_info { + u16 size; +}; + struct wx_thermal_sensor_data { s16 temp; s16 alarm_thresh; @@ -716,7 +837,8 @@ struct wx_thermal_sensor_data { enum wx_mac_type { wx_mac_unknown = 0, wx_mac_sp, - wx_mac_em + wx_mac_em, + wx_mac_aml, }; enum sp_media_type { @@ -863,6 +985,7 @@ struct wx_tx_context_desc { */ struct wx_tx_buffer { union wx_tx_desc *next_to_watch; + unsigned long time_stamp; struct sk_buff *skb; unsigned int bytecount; unsigned short gso_segs; @@ -924,6 +1047,7 @@ struct wx_ring { unsigned int size; /* length in bytes */ u16 count; /* amount of descriptors */ + unsigned long last_rx_timestamp; u8 queue_index; /* needed for multiqueue queue management */ u8 reg_idx; /* holds the special value that gets @@ -977,6 +1101,7 @@ struct wx_ring_feature { enum wx_ring_f_enum { RING_F_NONE = 0, + RING_F_VMDQ, RING_F_RSS, RING_F_FDIR, RING_F_ARRAY_SIZE /* must be last in enum set */ @@ -1026,13 +1151,51 @@ struct wx_hw_stats { enum wx_state { WX_STATE_RESETTING, - WX_STATE_NBITS, /* must be last */ + WX_STATE_SWFW_BUSY, + WX_STATE_PTP_RUNNING, + WX_STATE_PTP_TX_IN_PROGRESS, + WX_STATE_NBITS /* must be last */ +}; + +struct vf_data_storage { + struct pci_dev *vfdev; + unsigned char vf_mac_addr[ETH_ALEN]; + bool spoofchk_enabled; + bool link_enable; + bool trusted; + int xcast_mode; + unsigned int vf_api; + bool clear_to_send; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + bool pf_set_mac; + + u16 vf_mc_hashes[WX_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 vlan_count; + int link_state; +}; + +struct vf_macvlans { + struct list_head mvlist; + int vf; + bool free; + bool is_macvlan; + u8 vf_macvlan[ETH_ALEN]; }; enum wx_pf_flags { + WX_FLAG_SWFW_RING, + WX_FLAG_VMDQ_ENABLED, + WX_FLAG_VLAN_PROMISC, + WX_FLAG_SRIOV_ENABLED, WX_FLAG_FDIR_CAPABLE, WX_FLAG_FDIR_HASH, WX_FLAG_FDIR_PERFECT, + WX_FLAG_RSC_CAPABLE, + WX_FLAG_RX_HWTSTAMP_ENABLED, + WX_FLAG_RX_HWTSTAMP_IN_REGISTER, + WX_FLAG_PTP_PPS_ENABLED, WX_PF_FLAGS_NBITS /* must be last */ }; @@ -1046,6 +1209,7 @@ struct wx { struct pci_dev *pdev; struct net_device *netdev; struct wx_bus_info bus; + struct wx_mbx_info mbx; struct wx_mac_info mac; enum em_mac_type mac_type; enum sp_media_type media_type; @@ -1066,8 +1230,10 @@ struct wx { char eeprom_id[32]; char *driver_name; enum wx_reset_type reset_type; + u8 swfw_index; /* PHY stuff */ + bool notify_down; unsigned int link; int speed; int duplex; @@ -1099,6 +1265,8 @@ struct wx { struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp; struct wx_ring *rx_ring[64]; struct wx_q_vector *q_vector[64]; + int num_rx_pools; + int num_rx_queues_per_pool; unsigned int queues_per_pool; struct msix_entry *msix_q_entries; @@ -1120,6 +1288,7 @@ struct wx { u32 wol; u16 bd_number; + bool default_up; struct wx_hw_stats stats; u64 tx_busy; @@ -1128,11 +1297,40 @@ struct wx { u64 hw_csum_rx_good; u64 hw_csum_rx_error; u64 alloc_rx_buff_failed; + unsigned int num_vfs; + struct vf_data_storage *vfinfo; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; + unsigned long fwd_bitmask; u32 atr_sample_rate; void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype); void (*configure_fdir)(struct wx *wx); + int (*setup_tc)(struct net_device *netdev, u8 tc); void (*do_reset)(struct net_device *netdev); + int (*ptp_setup_sdp)(struct wx *wx); + + bool pps_enabled; + u64 pps_width; + u64 pps_edge_start; + u64 pps_edge_end; + u64 sec_to_cc; + u32 base_incval; + u32 tx_hwtstamp_pkts; + u32 tx_hwtstamp_timeouts; + u32 tx_hwtstamp_skipped; + u32 tx_hwtstamp_errors; + u32 rx_hwtstamp_cleared; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; + unsigned long ptp_tx_start; + seqlock_t hw_tc_lock; /* seqlock for ptp */ + struct cyclecounter hw_cc; + struct timecounter hw_tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct kernel_hwtstamp_config tstamp_config; + struct sk_buff *ptp_tx_skb; }; #define WX_INTR_ALL (~0ULL) @@ -1177,6 +1375,24 @@ rd64(struct wx *wx, u32 reg) return (lsb | msb << 32); } +static inline u32 +rd32ptp(struct wx *wx, u32 reg) +{ + if (wx->mac.type == wx_mac_em) + return rd32(wx, reg); + + return rd32(wx, reg + 0xB500); +} + +static inline void +wr32ptp(struct wx *wx, u32 reg, u32 value) +{ + if (wx->mac.type == wx_mac_em) + return wr32(wx, reg, value); + + return wr32(wx, reg + 0xB500, value); +} + /* On some domestic CPU platforms, sometimes IO is not synchronized with * flushing memory, here use readl() to flush PCI read and write. */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index e868f7ef49203..7e2d9ec38a306 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -138,6 +138,8 @@ static const struct ethtool_ops ngbe_ethtool_ops = { .set_channels = ngbe_set_channels, .get_msglevel = wx_get_msglevel, .set_msglevel = wx_set_msglevel, + .get_ts_info = wx_get_ts_info, + .get_ts_stats = wx_get_ptp_stats, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 53aeae2f884b0..d321c7f23b0b1 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -14,6 +14,9 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_hw.h" #include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" +#include "../libwx/wx_mbx.h" +#include "../libwx/wx_sriov.h" #include "ngbe_type.h" #include "ngbe_mdio.h" #include "ngbe_hw.h" @@ -128,6 +131,10 @@ static int ngbe_sw_init(struct wx *wx) wx->tx_work_limit = NGBE_DEFAULT_TX_WORK; wx->rx_work_limit = NGBE_DEFAULT_RX_WORK; + wx->mbx.size = WX_VXMAILBOX_SIZE; + wx->setup_tc = ngbe_setup_tc; + set_bit(0, &wx->fwd_bitmask); + return 0; } @@ -167,7 +174,7 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data) struct wx_q_vector *q_vector; struct wx *wx = data; struct pci_dev *pdev; - u32 eicr; + u32 eicr, eicr_misc; q_vector = wx->q_vector[0]; pdev = wx->pdev; @@ -185,6 +192,10 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data) if (!(pdev->msi_enabled)) wr32(wx, WX_PX_INTA, 1); + eicr_misc = wx_misc_isb(wx, WX_ISB_MISC); + if (unlikely(eicr_misc & NGBE_PX_MISC_IC_TIMESYNC)) + wx_ptp_check_pps_event(wx); + wx->isb_mem[WX_ISB_MISC] = 0; /* would disable interrupts here but it is auto disabled */ napi_schedule_irqoff(&q_vector->napi); @@ -195,9 +206,13 @@ static irqreturn_t ngbe_intr(int __always_unused irq, void *data) return IRQ_HANDLED; } -static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) +static irqreturn_t ngbe_msix_common(struct wx *wx, u32 eicr) { - struct wx *wx = data; + if (eicr & NGBE_PX_MISC_IC_VF_MBOX) + wx_msg_task(wx); + + if (unlikely(eicr & NGBE_PX_MISC_IC_TIMESYNC)) + wx_ptp_check_pps_event(wx); /* re-enable the original interrupt state, no lsc, no queues */ if (netif_running(wx->netdev)) @@ -206,6 +221,35 @@ static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) return IRQ_HANDLED; } +static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data) +{ + struct wx *wx = data; + u32 eicr; + + eicr = wx_misc_isb(wx, WX_ISB_MISC); + + return ngbe_msix_common(wx, eicr); +} + +static irqreturn_t ngbe_msic_and_queue(int __always_unused irq, void *data) +{ + struct wx_q_vector *q_vector; + struct wx *wx = data; + u32 eicr; + + eicr = wx_misc_isb(wx, WX_ISB_MISC); + if (!eicr) { + /* queue */ + q_vector = wx->q_vector[0]; + napi_schedule_irqoff(&q_vector->napi); + if (netif_running(wx->netdev)) + ngbe_irq_enable(wx, true); + return IRQ_HANDLED; + } + + return ngbe_msix_common(wx, eicr); +} + /** * ngbe_request_msix_irqs - Initialize MSI-X interrupts * @wx: board private structure @@ -238,8 +282,16 @@ static int ngbe_request_msix_irqs(struct wx *wx) } } - err = request_irq(wx->msix_entry->vector, - ngbe_msix_other, 0, netdev->name, wx); + /* Due to hardware design, when num_vfs < 7, pf can use 0 for misc and 1 + * for queue. But when num_vfs == 7, vector[1] is assigned to vf6. + * Misc and queue should reuse interrupt vector[0]. + */ + if (wx->num_vfs == 7) + err = request_irq(wx->msix_entry->vector, + ngbe_msic_and_queue, 0, netdev->name, wx); + else + err = request_irq(wx->msix_entry->vector, + ngbe_msix_other, 0, netdev->name, wx); if (err) { wx_err(wx, "request_irq for msix_other failed: %d\n", err); @@ -291,6 +343,22 @@ static void ngbe_disable_device(struct wx *wx) struct net_device *netdev = wx->netdev; u32 i; + if (wx->num_vfs) { + /* Clear EITR Select mapping */ + wr32(wx, WX_PX_ITRSEL, 0); + + /* Mark all the VFs as inactive */ + for (i = 0; i < wx->num_vfs; i++) + wx->vfinfo[i].clear_to_send = 0; + wx->notify_down = true; + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); + wx->notify_down = false; + + /* Disable all VFTE/VFRE TX/RX */ + wx_disable_vf_rx_tx(wx); + } + /* disable all enabled rx queues */ for (i = 0; i < wx->num_rx_queues; i++) /* this call also flushes the previous write */ @@ -313,10 +381,19 @@ static void ngbe_disable_device(struct wx *wx) wx_update_stats(wx); } +static void ngbe_reset(struct wx *wx) +{ + wx_flush_sw_mac_table(wx); + wx_mac_set_default_filter(wx, wx->mac.addr); + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset(wx); +} + void ngbe_down(struct wx *wx) { phylink_stop(wx->phylink); ngbe_disable_device(wx); + ngbe_reset(wx); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); } @@ -339,6 +416,11 @@ void ngbe_up(struct wx *wx) ngbe_sfp_modules_txrx_powerctl(wx, true); phylink_start(wx->phylink); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(wx, WX_CFG_PORT_CTL, + WX_CFG_PORT_CTL_PFRSTD, WX_CFG_PORT_CTL_PFRSTD); + if (wx->num_vfs) + wx_ping_all_vfs_with_link_status(wx, false); } /** @@ -379,6 +461,8 @@ static int ngbe_open(struct net_device *netdev) if (err) goto err_dis_phy; + wx_ptp_init(wx); + ngbe_up(wx); return 0; @@ -407,6 +491,7 @@ static int ngbe_close(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); + wx_ptp_stop(wx); ngbe_down(wx); wx_free_irq(wx); wx_free_isb_resources(wx); @@ -507,6 +592,8 @@ static const struct net_device_ops ngbe_netdev_ops = { .ndo_get_stats64 = wx_get_stats64, .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, + .ndo_hwtstamp_set = wx_hwtstamp_set, + .ndo_hwtstamp_get = wx_hwtstamp_get, }; /** @@ -578,6 +665,10 @@ static int ngbe_probe(struct pci_dev *pdev, goto err_pci_release_regions; } + /* The emerald supports up to 8 VFs per pf, but physical + * function also need one pool for basic networking. + */ + pci_sriov_set_totalvfs(pdev, NGBE_MAX_VFS_DRV_LIMIT); wx->driver_name = ngbe_driver_name; ngbe_set_ethtool_ops(netdev); netdev->netdev_ops = &ngbe_netdev_ops; @@ -725,6 +816,7 @@ static void ngbe_remove(struct pci_dev *pdev) struct net_device *netdev; netdev = wx->netdev; + wx_disable_sriov(wx); unregister_netdev(netdev); phylink_destroy(wx->phylink); pci_release_selected_regions(pdev, @@ -784,6 +876,7 @@ static struct pci_driver ngbe_driver = { .suspend = ngbe_suspend, .resume = ngbe_resume, .shutdown = ngbe_shutdown, + .sriov_configure = wx_pci_sriov_configure, }; module_pci_driver(ngbe_driver); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index a5e9b779c44d0..c63bb6e6f4056 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -7,7 +7,9 @@ #include #include "../libwx/wx_type.h" +#include "../libwx/wx_ptp.h" #include "../libwx/wx_hw.h" +#include "../libwx/wx_sriov.h" #include "ngbe_type.h" #include "ngbe_mdio.h" @@ -64,6 +66,13 @@ static void ngbe_mac_config(struct phylink_config *config, unsigned int mode, static void ngbe_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { + struct wx *wx = phylink_to_wx(config); + + wx->speed = SPEED_UNKNOWN; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); } static void ngbe_mac_link_up(struct phylink_config *config, @@ -103,6 +112,13 @@ static void ngbe_mac_link_up(struct phylink_config *config, wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); reg = rd32(wx, WX_MAC_WDG_TIMEOUT); wr32(wx, WX_MAC_WDG_TIMEOUT, reg); + + wx->speed = speed; + wx->last_rx_ptp_check = jiffies; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going up */ + wx_ping_all_vfs_with_link_status(wx, true); } static const struct phylink_mac_ops ngbe_mac_ops = { diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index f48ed7fc1805a..bb74263f04985 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -70,15 +70,22 @@ /* Extended Interrupt Enable Set */ #define NGBE_PX_MISC_IEN_DEV_RST BIT(10) +#define NGBE_PX_MISC_IEN_TIMESYNC BIT(11) #define NGBE_PX_MISC_IEN_ETH_LK BIT(18) #define NGBE_PX_MISC_IEN_INT_ERR BIT(20) +#define NGBE_PX_MISC_IC_VF_MBOX BIT(23) #define NGBE_PX_MISC_IEN_GPIO BIT(26) #define NGBE_PX_MISC_IEN_MASK ( \ NGBE_PX_MISC_IEN_DEV_RST | \ + NGBE_PX_MISC_IEN_TIMESYNC | \ NGBE_PX_MISC_IEN_ETH_LK | \ NGBE_PX_MISC_IEN_INT_ERR | \ + NGBE_PX_MISC_IC_VF_MBOX | \ NGBE_PX_MISC_IEN_GPIO) +/* Extended Interrupt Cause Read */ +#define NGBE_PX_MISC_IC_TIMESYNC BIT(11) /* time sync */ + #define NGBE_INTR_ALL 0x1FF #define NGBE_INTR_MISC BIT(0) @@ -129,6 +136,7 @@ #define NGBE_MAX_RXD 8192 #define NGBE_MIN_RXD 128 +#define NGBE_MAX_VFS_DRV_LIMIT 7 extern char ngbe_driver_name[]; void ngbe_down(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index d98314b26c194..78999d484f18e 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -529,6 +529,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_rxnfc = txgbe_set_rxnfc, .get_msglevel = wx_get_msglevel, .set_msglevel = wx_set_msglevel, + .get_ts_info = wx_get_ts_info, + .get_ts_stats = wx_get_ptp_stats, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index cd1372da92a94..4b9921b7bb112 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -197,6 +197,12 @@ int txgbe_reset_hw(struct wx *wx) txgbe_reset_misc(wx); + if (wx->mac.type != wx_mac_sp) { + wr32(wx, TXGBE_PX_PF_BME, 0x1); + wr32m(wx, TXGBE_RDM_RSC_CTL, TXGBE_RDM_RSC_CTL_FREE_CTL, + TXGBE_RDM_RSC_CTL_FREE_CTL); + } + wx_clear_hw_cntrs(wx); /* Store the permanent mac address */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c index 0ee73a265545c..280c74a57f6e2 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c @@ -7,6 +7,7 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" #include "../libwx/wx_hw.h" +#include "../libwx/wx_sriov.h" #include "txgbe_type.h" #include "txgbe_phy.h" #include "txgbe_irq.h" @@ -109,8 +110,17 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data) struct wx *wx = txgbe->wx; u32 eicr; - if (wx->pdev->msix_enabled) + if (wx->pdev->msix_enabled) { + eicr = wx_misc_isb(wx, WX_ISB_MISC) & TXGBE_PX_MISC_IEN_MASK; + if (!eicr) + return IRQ_NONE; + txgbe->eicr = eicr; + if (eicr & TXGBE_PX_MISC_IC_VF_MBOX) { + wx_msg_task(txgbe->wx); + wx_intr_enable(wx, TXGBE_INTR_MISC); + } return IRQ_WAKE_THREAD; + } eicr = wx_misc_isb(wx, WX_ISB_VEC0); if (!eicr) { @@ -129,6 +139,11 @@ static irqreturn_t txgbe_misc_irq_handle(int irq, void *data) q_vector = wx->q_vector[0]; napi_schedule_irqoff(&q_vector->napi); + eicr = wx_misc_isb(wx, WX_ISB_MISC) & TXGBE_PX_MISC_IEN_MASK; + if (!eicr) + return IRQ_NONE; + txgbe->eicr = eicr; + return IRQ_WAKE_THREAD; } @@ -140,7 +155,7 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data) unsigned int sub_irq; u32 eicr; - eicr = wx_misc_isb(wx, WX_ISB_MISC); + eicr = txgbe->eicr; if (eicr & (TXGBE_PX_MISC_ETH_LK | TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_ETH_AN)) { sub_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_LINK); @@ -166,6 +181,9 @@ static void txgbe_del_irq_domain(struct txgbe *txgbe) void txgbe_free_misc_irq(struct txgbe *txgbe) { + if (txgbe->wx->mac.type == wx_mac_aml) + return; + free_irq(txgbe->link_irq, txgbe); free_irq(txgbe->misc.irq, txgbe); txgbe_del_irq_domain(txgbe); @@ -177,7 +195,10 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) struct wx *wx = txgbe->wx; int hwirq, err; - txgbe->misc.nirqs = 1; + if (wx->mac.type == wx_mac_aml) + goto skip_sp_irq; + + txgbe->misc.nirqs = TXGBE_IRQ_MAX; txgbe->misc.domain = irq_domain_add_simple(NULL, txgbe->misc.nirqs, 0, &txgbe_misc_irq_domain_ops, txgbe); if (!txgbe->misc.domain) @@ -206,6 +227,7 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe) if (err) goto free_msic_irq; +skip_sp_irq: wx->misc_irq_domain = true; return 0; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index f774502680364..239270c28a899 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -13,7 +13,10 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" #include "../libwx/wx_hw.h" +#include "../libwx/wx_mbx.h" +#include "../libwx/wx_sriov.h" #include "txgbe_type.h" #include "txgbe_hw.h" #include "txgbe_phy.h" @@ -34,6 +37,12 @@ char txgbe_driver_name[] = "txgbe"; static const struct pci_device_id txgbe_pci_tbl[] = { { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_SP1000), 0}, { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_WX1820), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5010), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5110), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5025), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5125), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5040), 0}, + { PCI_VDEVICE(WANGXUN, TXGBE_DEV_ID_AML5140), 0}, /* required last entry */ { .device = 0 } }; @@ -89,7 +98,18 @@ static void txgbe_up_complete(struct wx *wx) smp_mb__before_atomic(); wx_napi_enable_all(wx); - phylink_start(wx->phylink); + if (wx->mac.type == wx_mac_aml) { + u32 reg; + + reg = rd32(wx, TXGBE_AML_MAC_TX_CFG); + reg &= ~TXGBE_AML_MAC_TX_CFG_SPEED_MASK; + reg |= TXGBE_AML_MAC_TX_CFG_SPEED_25G; + wr32(wx, WX_MAC_TX_CFG, reg); + txgbe_enable_sec_tx_path(wx); + netif_carrier_on(wx->netdev); + } else { + phylink_start(wx->phylink); + } /* clear any pending interrupts, may auto mask */ rd32(wx, WX_PX_IC(0)); @@ -99,6 +119,12 @@ static void txgbe_up_complete(struct wx *wx) /* enable transmits */ netif_tx_start_all_queues(netdev); + + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_PFRSTD, + WX_CFG_PORT_CTL_PFRSTD); + /* update setting rx tx for all active vfs */ + wx_set_all_vfs(wx); } static void txgbe_reset(struct wx *wx) @@ -116,6 +142,9 @@ static void txgbe_reset(struct wx *wx) memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len); wx_flush_sw_mac_table(wx); wx_mac_set_default_filter(wx, old_addr); + + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset(wx); } static void txgbe_disable_device(struct wx *wx) @@ -144,6 +173,16 @@ static void txgbe_disable_device(struct wx *wx) wx_err(wx, "%s: invalid bus lan id %d\n", __func__, wx->bus.func); + if (wx->num_vfs) { + /* Clear EITR Select mapping */ + wr32(wx, WX_PX_ITRSEL, 0); + /* Mark all the VFs as inactive */ + for (i = 0 ; i < wx->num_vfs; i++) + wx->vfinfo[i].clear_to_send = 0; + /* update setting rx tx for all active vfs */ + wx_set_all_vfs(wx); + } + if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { /* disable mac transmiter */ @@ -167,7 +206,10 @@ void txgbe_down(struct wx *wx) { txgbe_disable_device(wx); txgbe_reset(wx); - phylink_stop(wx->phylink); + if (wx->mac.type == wx_mac_aml) + netif_carrier_off(wx->netdev); + else + phylink_stop(wx->phylink); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); @@ -176,6 +218,7 @@ void txgbe_down(struct wx *wx) void txgbe_up(struct wx *wx) { wx_configure(wx); + wx_ptp_init(wx); txgbe_up_complete(wx); } @@ -192,6 +235,14 @@ static void txgbe_init_type_code(struct wx *wx) case TXGBE_DEV_ID_WX1820: wx->mac.type = wx_mac_sp; break; + case TXGBE_DEV_ID_AML5010: + case TXGBE_DEV_ID_AML5110: + case TXGBE_DEV_ID_AML5025: + case TXGBE_DEV_ID_AML5125: + case TXGBE_DEV_ID_AML5040: + case TXGBE_DEV_ID_AML5140: + wx->mac.type = wx_mac_aml; + break; default: wx->mac.type = wx_mac_unknown; break; @@ -265,6 +316,8 @@ static int txgbe_sw_init(struct wx *wx) wx->atr = txgbe_atr; wx->configure_fdir = txgbe_configure_fdir; + set_bit(WX_FLAG_RSC_CAPABLE, wx->flags); + /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; wx->tx_itr_setting = 1; @@ -272,12 +325,26 @@ static int txgbe_sw_init(struct wx *wx) /* set default ring sizes */ wx->tx_ring_count = TXGBE_DEFAULT_TXD; wx->rx_ring_count = TXGBE_DEFAULT_RXD; + wx->mbx.size = WX_VXMAILBOX_SIZE; /* set default work limits */ wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK; wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK; + wx->setup_tc = txgbe_setup_tc; wx->do_reset = txgbe_do_reset; + set_bit(0, &wx->fwd_bitmask); + + switch (wx->mac.type) { + case wx_mac_sp: + break; + case wx_mac_aml: + set_bit(WX_FLAG_SWFW_RING, wx->flags); + wx->swfw_index = 0; + break; + default: + break; + } return 0; } @@ -321,6 +388,8 @@ static int txgbe_open(struct net_device *netdev) if (err) goto err_free_irq; + wx_ptp_init(wx); + txgbe_up_complete(wx); return 0; @@ -344,6 +413,7 @@ static int txgbe_open(struct net_device *netdev) */ static void txgbe_close_suspend(struct wx *wx) { + wx_ptp_suspend(wx); txgbe_disable_device(wx); wx_free_resources(wx); } @@ -363,6 +433,7 @@ static int txgbe_close(struct net_device *netdev) { struct wx *wx = netdev_priv(netdev); + wx_ptp_stop(wx); txgbe_down(wx); wx_free_irq(wx); wx_free_resources(wx); @@ -479,6 +550,8 @@ static const struct net_device_ops txgbe_netdev_ops = { .ndo_get_stats64 = wx_get_stats64, .ndo_vlan_rx_add_vid = wx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = wx_vlan_rx_kill_vid, + .ndo_hwtstamp_set = wx_hwtstamp_set, + .ndo_hwtstamp_get = wx_hwtstamp_get, }; /** @@ -552,6 +625,10 @@ static int txgbe_probe(struct pci_dev *pdev, goto err_pci_release_regions; } + /* The sapphire supports up to 63 VFs per pf, but physical + * function also need one pool for basic networking. + */ + pci_sriov_set_totalvfs(pdev, TXGBE_MAX_VFS_DRV_LIMIT); wx->driver_name = txgbe_driver_name; txgbe_set_ethtool_ops(netdev); netdev->netdev_ops = &txgbe_netdev_ops; @@ -742,6 +819,7 @@ static void txgbe_remove(struct pci_dev *pdev) struct net_device *netdev; netdev = wx->netdev; + wx_disable_sriov(wx); unregister_netdev(netdev); txgbe_remove_phy(txgbe); @@ -764,6 +842,7 @@ static struct pci_driver txgbe_driver = { .probe = txgbe_probe, .remove = txgbe_remove, .shutdown = txgbe_shutdown, + .sriov_configure = wx_pci_sriov_configure, }; module_pci_driver(txgbe_driver); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 1ae68f94dd490..1863cfd27ee79 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -15,6 +15,9 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" +#include "../libwx/wx_ptp.h" +#include "../libwx/wx_sriov.h" +#include "../libwx/wx_mbx.h" #include "../libwx/wx_hw.h" #include "txgbe_type.h" #include "txgbe_phy.h" @@ -179,6 +182,12 @@ static void txgbe_mac_link_down(struct phylink_config *config, struct wx *wx = phylink_to_wx(config); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); + + wx->speed = SPEED_UNKNOWN; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going down */ + wx_ping_all_vfs_with_link_status(wx, false); } static void txgbe_mac_link_up(struct phylink_config *config, @@ -215,6 +224,13 @@ static void txgbe_mac_link_up(struct phylink_config *config, wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); wdg = rd32(wx, WX_MAC_WDG_TIMEOUT); wr32(wx, WX_MAC_WDG_TIMEOUT, wdg); + + wx->speed = speed; + wx->last_rx_ptp_check = jiffies; + if (test_bit(WX_STATE_PTP_RUNNING, wx->state)) + wx_ptp_reset_cyclecounter(wx); + /* ping all the active vfs to let them know we are going up */ + wx_ping_all_vfs_with_link_status(wx, true); } static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, @@ -557,6 +573,9 @@ int txgbe_init_phy(struct txgbe *txgbe) struct wx *wx = txgbe->wx; int ret; + if (wx->mac.type == wx_mac_aml) + return 0; + if (txgbe->wx->media_type == sp_media_copper) return txgbe_ext_phy_init(txgbe); @@ -621,6 +640,9 @@ int txgbe_init_phy(struct txgbe *txgbe) void txgbe_remove_phy(struct txgbe *txgbe) { + if (txgbe->wx->mac.type == wx_mac_aml) + return; + if (txgbe->wx->media_type == sp_media_copper) { phylink_disconnect_phy(txgbe->wx->phylink); phylink_destroy(txgbe->wx->phylink); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 629a13e96b859..5937cbc6bd057 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -10,6 +10,12 @@ /* Device IDs */ #define TXGBE_DEV_ID_SP1000 0x1001 #define TXGBE_DEV_ID_WX1820 0x2001 +#define TXGBE_DEV_ID_AML5010 0x5010 +#define TXGBE_DEV_ID_AML5110 0x5110 +#define TXGBE_DEV_ID_AML5025 0x5025 +#define TXGBE_DEV_ID_AML5125 0x5125 +#define TXGBE_DEV_ID_AML5040 0x5040 +#define TXGBE_DEV_ID_AML5140 0x5140 /* Subsystem IDs */ /* SFP */ @@ -71,11 +77,13 @@ #define TXGBE_PX_MISC_ETH_LK BIT(18) #define TXGBE_PX_MISC_ETH_AN BIT(19) #define TXGBE_PX_MISC_INT_ERR BIT(20) +#define TXGBE_PX_MISC_IC_VF_MBOX BIT(23) #define TXGBE_PX_MISC_GPIO BIT(26) #define TXGBE_PX_MISC_IEN_MASK \ (TXGBE_PX_MISC_ETH_LKDN | TXGBE_PX_MISC_DEV_RST | \ TXGBE_PX_MISC_ETH_EVENT | TXGBE_PX_MISC_ETH_LK | \ - TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR) + TXGBE_PX_MISC_ETH_AN | TXGBE_PX_MISC_INT_ERR | \ + TXGBE_PX_MISC_IC_VF_MBOX) /* Port cfg registers */ #define TXGBE_CFG_PORT_ST 0x14404 @@ -137,6 +145,14 @@ #define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2) #define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v) +/*************************** Amber Lite Registers ****************************/ +#define TXGBE_PX_PF_BME 0x4B8 +#define TXGBE_AML_MAC_TX_CFG 0x11000 +#define TXGBE_AML_MAC_TX_CFG_SPEED_MASK GENMASK(30, 27) +#define TXGBE_AML_MAC_TX_CFG_SPEED_25G BIT(28) +#define TXGBE_RDM_RSC_CTL 0x1200C +#define TXGBE_RDM_RSC_CTL_FREE_CTL BIT(7) + /* Checksum and EEPROM pointers */ #define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_CHECKSUM 0x2F @@ -160,6 +176,8 @@ #define TXGBE_SP_RX_PB_SIZE 512 #define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ +#define TXGBE_MAX_VFS_DRV_LIMIT 63 + #define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20 /* Software ATR hash keys */ @@ -334,6 +352,7 @@ struct txgbe { struct clk *clk; struct gpio_chip *gpio; unsigned int link_irq; + u32 eicr; /* flow director */ struct hlist_head fdir_filter_list; diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig index 35d96c633a33d..7502214cc7d50 100644 --- a/drivers/net/ethernet/xilinx/Kconfig +++ b/drivers/net/ethernet/xilinx/Kconfig @@ -28,6 +28,7 @@ config XILINX_AXI_EMAC depends on HAS_IOMEM depends on XILINX_DMA select PHYLINK + select DIMLIB help This driver supports the 10/100/1000 Ethernet from Xilinx for the AXI bus interface used in Xilinx Virtex FPGAs and Soc's. diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index a3f4f3e42587b..5ff742103beb9 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -9,6 +9,7 @@ #ifndef XILINX_AXIENET_H #define XILINX_AXIENET_H +#include #include #include #include @@ -112,9 +113,6 @@ #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */ #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */ -#define XAXIDMA_DELAY_SHIFT 24 -#define XAXIDMA_COALESCE_SHIFT 16 - #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */ #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */ #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ @@ -126,8 +124,7 @@ /* Default TX/RX Threshold and delay timer values for SGDMA mode */ #define XAXIDMA_DFT_TX_THRESHOLD 24 #define XAXIDMA_DFT_TX_USEC 50 -#define XAXIDMA_DFT_RX_THRESHOLD 1 -#define XAXIDMA_DFT_RX_USEC 50 +#define XAXIDMA_DFT_RX_USEC 16 #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ @@ -487,7 +484,12 @@ struct skbuf_dma_descriptor { * @regs: Base address for the axienet_local device address space * @dma_regs: Base address for the axidma device address space * @napi_rx: NAPI RX control structure + * @rx_dim: DIM state for the receive queue + * @rx_dim_enabled: Whether DIM is enabled or not + * @rx_irqs: Number of interrupts + * @rx_cr_lock: Lock protecting @rx_dma_cr, its register, and @rx_dma_started * @rx_dma_cr: Nominal content of RX DMA control register + * @rx_dma_started: Set when RX DMA is started * @rx_bd_v: Virtual address of the RX buffer descriptor ring * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring * @rx_bd_num: Size of RX buffer descriptor ring @@ -497,7 +499,9 @@ struct skbuf_dma_descriptor { * @rx_bytes: RX byte count for statistics * @rx_stat_sync: Synchronization object for RX stats * @napi_tx: NAPI TX control structure + * @tx_cr_lock: Lock protecting @tx_dma_cr, its register, and @tx_dma_started * @tx_dma_cr: Nominal content of TX DMA control register + * @tx_dma_started: Set when TX DMA is started * @tx_bd_v: Virtual address of the TX buffer descriptor ring * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring * @tx_bd_num: Size of TX buffer descriptor ring @@ -532,10 +536,6 @@ struct skbuf_dma_descriptor { * supported, the maximum frame size would be 9k. Else it is * 1522 bytes (assuming support for basic VLAN) * @rxmem: Stores rx memory size for jumbo frame handling. - * @coalesce_count_rx: Store the irq coalesce on RX side. - * @coalesce_usec_rx: IRQ coalesce delay for RX - * @coalesce_count_tx: Store the irq coalesce on TX side. - * @coalesce_usec_tx: IRQ coalesce delay for TX * @use_dmaengine: flag to check dmaengine framework usage. * @tx_chan: TX DMA channel. * @rx_chan: RX DMA channel. @@ -569,7 +569,12 @@ struct axienet_local { void __iomem *dma_regs; struct napi_struct napi_rx; + struct dim rx_dim; + bool rx_dim_enabled; + u16 rx_irqs; + spinlock_t rx_cr_lock; u32 rx_dma_cr; + bool rx_dma_started; struct axidma_bd *rx_bd_v; dma_addr_t rx_bd_p; u32 rx_bd_num; @@ -579,7 +584,9 @@ struct axienet_local { struct u64_stats_sync rx_stat_sync; struct napi_struct napi_tx; + spinlock_t tx_cr_lock; u32 tx_dma_cr; + bool tx_dma_started; struct axidma_bd *tx_bd_v; dma_addr_t tx_bd_p; u32 tx_bd_num; @@ -610,10 +617,6 @@ struct axienet_local { u32 max_frm_size; u32 rxmem; - u32 coalesce_count_rx; - u32 coalesce_usec_rx; - u32 coalesce_count_tx; - u32 coalesce_usec_tx; u8 use_dmaengine; struct dma_chan *tx_chan; struct dma_chan *rx_chan; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index f33178f90c42e..054abf283ab33 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -223,23 +223,62 @@ static void axienet_dma_bd_release(struct net_device *ndev) lp->rx_bd_p); } +static u64 axienet_dma_rate(struct axienet_local *lp) +{ + if (lp->axi_clk) + return clk_get_rate(lp->axi_clk); + return 125000000; /* arbitrary guess if no clock rate set */ +} + /** - * axienet_usec_to_timer - Calculate IRQ delay timer value - * @lp: Pointer to the axienet_local structure - * @coalesce_usec: Microseconds to convert into timer value + * axienet_calc_cr() - Calculate control register value + * @lp: Device private data + * @count: Number of completions before an interrupt + * @usec: Microseconds after the last completion before an interrupt + * + * Calculate a control register value based on the coalescing settings. The + * run/stop bit is not set. */ -static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) +static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec) { - u32 result; - u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */ + u32 cr; - if (lp->axi_clk) - clk_rate = clk_get_rate(lp->axi_clk); + cr = FIELD_PREP(XAXIDMA_COALESCE_MASK, count) | XAXIDMA_IRQ_IOC_MASK | + XAXIDMA_IRQ_ERROR_MASK; + /* Only set interrupt delay timer if not generating an interrupt on + * the first packet. Otherwise leave at 0 to disable delay interrupt. + */ + if (count > 1) { + u64 clk_rate = axienet_dma_rate(lp); + u32 timer; + + /* 1 Timeout Interval = 125 * (clock period of SG clock) */ + timer = DIV64_U64_ROUND_CLOSEST((u64)usec * clk_rate, + XAXIDMA_DELAY_SCALE); - /* 1 Timeout Interval = 125 * (clock period of SG clock) */ - result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate, - XAXIDMA_DELAY_SCALE); - return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK)); + timer = min(timer, FIELD_MAX(XAXIDMA_DELAY_MASK)); + cr |= FIELD_PREP(XAXIDMA_DELAY_MASK, timer) | + XAXIDMA_IRQ_DELAY_MASK; + } + + return cr; +} + +/** + * axienet_coalesce_params() - Extract coalesce parameters from the CR + * @lp: Device private data + * @cr: The control register to parse + * @count: Number of packets before an interrupt + * @usec: Idle time (in usec) before an interrupt + */ +static void axienet_coalesce_params(struct axienet_local *lp, u32 cr, + u32 *count, u32 *usec) +{ + u64 clk_rate = axienet_dma_rate(lp); + u64 timer = FIELD_GET(XAXIDMA_DELAY_MASK, cr); + + *count = FIELD_GET(XAXIDMA_COALESCE_MASK, cr); + *usec = DIV64_U64_ROUND_CLOSEST(timer * XAXIDMA_DELAY_SCALE, clk_rate); } /** @@ -248,30 +287,12 @@ static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec) */ static void axienet_dma_start(struct axienet_local *lp) { + spin_lock_irq(&lp->rx_cr_lock); + /* Start updating the Rx channel control register */ - lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) | - XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; - /* Only set interrupt delay timer if not generating an interrupt on - * the first RX packet. Otherwise leave at 0 to disable delay interrupt. - */ - if (lp->coalesce_count_rx > 1) - lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx) - << XAXIDMA_DELAY_SHIFT) | - XAXIDMA_IRQ_DELAY_MASK; + lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); - /* Start updating the Tx channel control register */ - lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) | - XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK; - /* Only set interrupt delay timer if not generating an interrupt on - * the first TX packet. Otherwise leave at 0 to disable delay interrupt. - */ - if (lp->coalesce_count_tx > 1) - lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx) - << XAXIDMA_DELAY_SHIFT) | - XAXIDMA_IRQ_DELAY_MASK; - axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); - /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ @@ -280,6 +301,14 @@ static void axienet_dma_start(struct axienet_local *lp) axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); + lp->rx_dma_started = true; + + spin_unlock_irq(&lp->rx_cr_lock); + spin_lock_irq(&lp->tx_cr_lock); + + /* Start updating the Tx channel control register */ + lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the @@ -288,6 +317,9 @@ static void axienet_dma_start(struct axienet_local *lp) axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); + lp->tx_dma_started = true; + + spin_unlock_irq(&lp->tx_cr_lock); } /** @@ -623,14 +655,22 @@ static void axienet_dma_stop(struct axienet_local *lp) int count; u32 cr, sr; - cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); - cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); + spin_lock_irq(&lp->rx_cr_lock); + + cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + lp->rx_dma_started = false; + + spin_unlock_irq(&lp->rx_cr_lock); synchronize_irq(lp->rx_irq); - cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); - cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); + spin_lock_irq(&lp->tx_cr_lock); + + cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + lp->tx_dma_started = false; + + spin_unlock_irq(&lp->tx_cr_lock); synchronize_irq(lp->tx_irq); /* Give DMAs a chance to halt gracefully */ @@ -962,6 +1002,7 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget) &size, budget); if (packets) { + netdev_completed_queue(ndev, packets, size); u64_stats_update_begin(&lp->tx_stat_sync); u64_stats_add(&lp->tx_packets, packets); u64_stats_add(&lp->tx_bytes, size); @@ -979,7 +1020,9 @@ static int axienet_tx_poll(struct napi_struct *napi, int budget) * cause an immediate interrupt if any TX packets are * already pending. */ + spin_lock_irq(&lp->tx_cr_lock); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); + spin_unlock_irq(&lp->tx_cr_lock); } return packets; } @@ -1083,6 +1126,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (++new_tail_ptr >= lp->tx_bd_num) new_tail_ptr = 0; WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); + netdev_sent_queue(ndev, skb->len); /* Start the transfer */ axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); @@ -1241,11 +1285,25 @@ static int axienet_rx_poll(struct napi_struct *napi, int budget) axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); if (packets < budget && napi_complete_done(napi, packets)) { + if (READ_ONCE(lp->rx_dim_enabled)) { + struct dim_sample sample = { + .time = ktime_get(), + /* Safe because we are the only writer */ + .pkt_ctr = u64_stats_read(&lp->rx_packets), + .byte_ctr = u64_stats_read(&lp->rx_bytes), + .event_ctr = READ_ONCE(lp->rx_irqs), + }; + + net_dim(&lp->rx_dim, &sample); + } + /* Re-enable RX completion interrupts. This should * cause an immediate interrupt if any RX packets are * already pending. */ + spin_lock_irq(&lp->rx_cr_lock); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); + spin_unlock_irq(&lp->rx_cr_lock); } return packets; } @@ -1283,11 +1341,14 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) /* Disable further TX completion interrupts and schedule * NAPI to handle the completions. */ - u32 cr = lp->tx_dma_cr; - - cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); if (napi_schedule_prep(&lp->napi_tx)) { + u32 cr; + + spin_lock(&lp->tx_cr_lock); + cr = lp->tx_dma_cr; + cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + spin_unlock(&lp->tx_cr_lock); __napi_schedule(&lp->napi_tx); } } @@ -1328,11 +1389,16 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) /* Disable further RX completion interrupts and schedule * NAPI receive. */ - u32 cr = lp->rx_dma_cr; - - cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); + WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1); if (napi_schedule_prep(&lp->napi_rx)) { + u32 cr; + + spin_lock(&lp->rx_cr_lock); + cr = lp->rx_dma_cr; + cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK); axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + spin_unlock(&lp->rx_cr_lock); + __napi_schedule(&lp->napi_rx); } } @@ -1625,6 +1691,7 @@ static int axienet_open(struct net_device *ndev) if (lp->eth_irq > 0) free_irq(lp->eth_irq, ndev); err_phy: + cancel_work_sync(&lp->rx_dim.work); cancel_delayed_work_sync(&lp->stats_work); phylink_stop(lp->phylink); phylink_disconnect_phy(lp->phylink); @@ -1654,6 +1721,7 @@ static int axienet_stop(struct net_device *ndev) napi_disable(&lp->napi_rx); } + cancel_work_sync(&lp->rx_dim.work); cancel_delayed_work_sync(&lp->stats_work); phylink_stop(lp->phylink); @@ -1685,6 +1753,7 @@ static int axienet_stop(struct net_device *ndev) dma_release_channel(lp->tx_chan); } + netdev_reset_queue(ndev); axienet_iow(lp, XAE_IE_OFFSET, 0); if (lp->eth_irq > 0) @@ -1998,6 +2067,87 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev, return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); } +/** + * axienet_update_coalesce_rx() - Set RX CR + * @lp: Device private data + * @cr: Value to write to the RX CR + * @mask: Bits to set from @cr + */ +static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr, + u32 mask) +{ + spin_lock_irq(&lp->rx_cr_lock); + lp->rx_dma_cr &= ~mask; + lp->rx_dma_cr |= cr; + /* If DMA isn't started, then the settings will be applied the next + * time dma_start() is called. + */ + if (lp->rx_dma_started) { + u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); + + /* Don't enable IRQs if they are disabled by NAPI */ + if (reg & XAXIDMA_IRQ_ALL_MASK) + cr = lp->rx_dma_cr; + else + cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); + } + spin_unlock_irq(&lp->rx_cr_lock); +} + +/** + * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM + * @lp: Device private data + */ +static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp) +{ + return min(1 << (lp->rx_dim.profile_ix << 1), 255); +} + +/** + * axienet_rx_dim_work() - Adjust RX DIM settings + * @work: The work struct + */ +static void axienet_rx_dim_work(struct work_struct *work) +{ + struct axienet_local *lp = + container_of(work, struct axienet_local, rx_dim.work); + u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0); + u32 mask = XAXIDMA_COALESCE_MASK | XAXIDMA_IRQ_IOC_MASK | + XAXIDMA_IRQ_ERROR_MASK; + + axienet_update_coalesce_rx(lp, cr, mask); + lp->rx_dim.state = DIM_START_MEASURE; +} + +/** + * axienet_update_coalesce_tx() - Set TX CR + * @lp: Device private data + * @cr: Value to write to the TX CR + * @mask: Bits to set from @cr + */ +static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr, + u32 mask) +{ + spin_lock_irq(&lp->tx_cr_lock); + lp->tx_dma_cr &= ~mask; + lp->tx_dma_cr |= cr; + /* If DMA isn't started, then the settings will be applied the next + * time dma_start() is called. + */ + if (lp->tx_dma_started) { + u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); + + /* Don't enable IRQs if they are disabled by NAPI */ + if (reg & XAXIDMA_IRQ_ALL_MASK) + cr = lp->tx_dma_cr; + else + cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); + } + spin_unlock_irq(&lp->tx_cr_lock); +} + /** * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. * @ndev: Pointer to net_device structure @@ -2018,11 +2168,23 @@ axienet_ethtools_get_coalesce(struct net_device *ndev, struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); - - ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx; - ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx; - ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx; - ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx; + u32 cr; + + ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled; + + spin_lock_irq(&lp->rx_cr_lock); + cr = lp->rx_dma_cr; + spin_unlock_irq(&lp->rx_cr_lock); + axienet_coalesce_params(lp, cr, + &ecoalesce->rx_max_coalesced_frames, + &ecoalesce->rx_coalesce_usecs); + + spin_lock_irq(&lp->tx_cr_lock); + cr = lp->tx_dma_cr; + spin_unlock_irq(&lp->tx_cr_lock); + axienet_coalesce_params(lp, cr, + &ecoalesce->tx_max_coalesced_frames, + &ecoalesce->tx_coalesce_usecs); return 0; } @@ -2046,12 +2208,9 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); - - if (netif_running(ndev)) { - NL_SET_ERR_MSG(extack, - "Please stop netif before applying configuration"); - return -EBUSY; - } + bool new_dim = ecoalesce->use_adaptive_rx_coalesce; + bool old_dim = lp->rx_dim_enabled; + u32 cr, mask = ~XAXIDMA_CR_RUNSTOP_MASK; if (ecoalesce->rx_max_coalesced_frames > 255 || ecoalesce->tx_max_coalesced_frames > 255) { @@ -2065,7 +2224,7 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, return -EINVAL; } - if ((ecoalesce->rx_max_coalesced_frames > 1 && + if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) && !ecoalesce->rx_coalesce_usecs) || (ecoalesce->tx_max_coalesced_frames > 1 && !ecoalesce->tx_coalesce_usecs)) { @@ -2074,11 +2233,31 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, return -EINVAL; } - lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; - lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs; - lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; - lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs; + if (new_dim && !old_dim) { + cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), + ecoalesce->rx_coalesce_usecs); + } else if (!new_dim) { + if (old_dim) { + WRITE_ONCE(lp->rx_dim_enabled, false); + napi_synchronize(&lp->napi_rx); + flush_work(&lp->rx_dim.work); + } + + cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames, + ecoalesce->rx_coalesce_usecs); + } else { + /* Dummy value for count just to calculate timer */ + cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs); + mask = XAXIDMA_DELAY_MASK | XAXIDMA_IRQ_DELAY_MASK; + } + + axienet_update_coalesce_rx(lp, cr, mask); + if (new_dim && !old_dim) + WRITE_ONCE(lp->rx_dim_enabled, true); + cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames, + ecoalesce->tx_coalesce_usecs); + axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK); return 0; } @@ -2316,7 +2495,8 @@ axienet_ethtool_get_rmon_stats(struct net_device *dev, static const struct ethtool_ops axienet_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | - ETHTOOL_COALESCE_USECS, + ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_drvinfo = axienet_ethtools_get_drvinfo, .get_regs_len = axienet_ethtools_get_regs_len, .get_regs = axienet_ethtools_get_regs, @@ -2499,6 +2679,7 @@ static void axienet_dma_err_handler(struct work_struct *work) ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); axienet_dma_stop(lp); + netdev_reset_queue(ndev); for (i = 0; i < lp->tx_bd_num; i++) { cur_p = &lp->tx_bd_v[i]; @@ -2858,10 +3039,15 @@ static int axienet_probe(struct platform_device *pdev) axienet_set_mac_address(ndev, NULL); } - lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; - lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; - lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC; - lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC; + spin_lock_init(&lp->rx_cr_lock); + spin_lock_init(&lp->tx_cr_lock); + INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work); + lp->rx_dim_enabled = true; + lp->rx_dim.profile_ix = 1; + lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), + XAXIDMA_DFT_RX_USEC); + lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD, + XAXIDMA_DFT_TX_USEC); ret = axienet_mdio_setup(lp); if (ret) @@ -2891,7 +3077,6 @@ static int axienet_probe(struct platform_device *pdev) } of_node_put(np); lp->pcs.ops = &axienet_pcs_ops; - lp->pcs.neg_mode = true; lp->pcs.poll = true; } diff --git a/drivers/net/ethernet/yunsilicon/Kconfig b/drivers/net/ethernet/yunsilicon/Kconfig new file mode 100644 index 0000000000000..1bf5b7fcf1ba1 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon driver configuration +# + +config NET_VENDOR_YUNSILICON + bool "Yunsilicon devices" + default y + depends on PCI + depends on ARM64 || X86_64 || COMPILE_TEST + help + If you have a network (Ethernet) device belonging to this class, + say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Yunsilicon cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_YUNSILICON + +source "drivers/net/ethernet/yunsilicon/xsc/net/Kconfig" +source "drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig" + +endif # NET_VENDOR_YUNSILICON diff --git a/drivers/net/ethernet/yunsilicon/Makefile b/drivers/net/ethernet/yunsilicon/Makefile new file mode 100644 index 0000000000000..86e241b015f7c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Makefile for the Yunsilicon device drivers. +# + +obj-$(CONFIG_YUNSILICON_XSC_ETH) += xsc/net/ +obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc/pci/ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h new file mode 100644 index 0000000000000..d9d0419e315b8 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_auto_hw.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_HW_H +#define __XSC_HW_H + +/* hif_irq_csr_defines.h */ +#define HIF_IRQ_TBL2IRQ_TBL_RD_DONE_INT_MSIX_REG_ADDR 0xa1100070 + +/* hif_cpm_csr_defines.h */ +#define HIF_CPM_LOCK_GET_REG_ADDR 0xa0000104 +#define HIF_CPM_LOCK_PUT_REG_ADDR 0xa0000108 +#define HIF_CPM_LOCK_AVAIL_REG_ADDR 0xa000010c +#define HIF_CPM_IDA_DATA_MEM_ADDR 0xa0000800 +#define HIF_CPM_IDA_CMD_REG_ADDR 0xa0000020 +#define HIF_CPM_IDA_ADDR_REG_ADDR 0xa0000080 +#define HIF_CPM_IDA_BUSY_REG_ADDR 0xa0000100 +#define HIF_CPM_IDA_CMD_REG_IDA_IDX_WIDTH 5 +#define HIF_CPM_IDA_CMD_REG_IDA_LEN_WIDTH 4 +#define HIF_CPM_IDA_CMD_REG_IDA_R0W1_WIDTH 1 +#define HIF_CPM_LOCK_GET_REG_LOCK_VLD_SHIFT 5 +#define HIF_CPM_LOCK_GET_REG_LOCK_IDX_MASK 0x1f +#define HIF_CPM_IDA_ADDR_REG_STRIDE 0x4 +#define HIF_CPM_CHIP_VERSION_H_REG_ADDR 0xa0000010 + +/* mmc_csr_defines.h */ +#define MMC_MPT_TBL_MEM_DEPTH 32768 +#define MMC_MTT_TBL_MEM_DEPTH 262144 +#define MMC_MPT_TBL_MEM_WIDTH 256 +#define MMC_MTT_TBL_MEM_WIDTH 64 +#define MMC_MPT_TBL_MEM_ADDR 0xa4100000 +#define MMC_MTT_TBL_MEM_ADDR 0xa4200000 + +/* clsf_dma_csr_defines.h */ +#define CLSF_DMA_DMA_UL_BUSY_REG_ADDR 0xa6010048 +#define CLSF_DMA_DMA_DL_DONE_REG_ADDR 0xa60100d0 +#define CLSF_DMA_DMA_DL_SUCCESS_REG_ADDR 0xa60100c0 +#define CLSF_DMA_ERR_CODE_CLR_REG_ADDR 0xa60100d4 +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_DMA_RD_TBL_ID_MASK 0x7f +#define CLSF_DMA_DMA_RD_TABLE_ID_REG_ADDR 0xa6010020 +#define CLSF_DMA_DMA_RD_ADDR_REG_DMA_RD_BURST_NUM_SHIFT 16 +#define CLSF_DMA_DMA_RD_ADDR_REG_ADDR 0xa6010024 +#define CLSF_DMA_INDRW_RD_START_REG_ADDR 0xa6010028 + +/* hif_tbl_csr_defines.h */ +#define HIF_TBL_TBL_DL_BUSY_REG_ADDR 0xa1060030 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_LEN_SHIFT 12 +#define HIF_TBL_TBL_DL_REQ_REG_TBL_DL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_DL_REQ_REG_ADDR 0xa1060020 +#define HIF_TBL_TBL_DL_ADDR_L_REG_TBL_DL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_L_REG_ADDR 0xa1060024 +#define HIF_TBL_TBL_DL_ADDR_H_REG_TBL_DL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_DL_ADDR_H_REG_ADDR 0xa1060028 +#define HIF_TBL_TBL_DL_START_REG_ADDR 0xa106002c +#define HIF_TBL_TBL_UL_REQ_REG_TBL_UL_HOST_ID_SHIFT 11 +#define HIF_TBL_TBL_UL_REQ_REG_ADDR 0xa106007c +#define HIF_TBL_TBL_UL_ADDR_L_REG_TBL_UL_ADDR_L_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_L_REG_ADDR 0xa1060080 +#define HIF_TBL_TBL_UL_ADDR_H_REG_TBL_UL_ADDR_H_MASK 0xffffffff +#define HIF_TBL_TBL_UL_ADDR_H_REG_ADDR 0xa1060084 +#define HIF_TBL_TBL_UL_START_REG_ADDR 0xa1060088 +#define HIF_TBL_MSG_RDY_REG_ADDR 0xa1060044 + +/* hif_cmdqm_csr_defines.h */ +#define HIF_CMDQM_HOST_REQ_PID_MEM_ADDR 0xa1026000 +#define HIF_CMDQM_HOST_REQ_CID_MEM_ADDR 0xa1028000 +#define HIF_CMDQM_HOST_RSP_PID_MEM_ADDR 0xa102e000 +#define HIF_CMDQM_HOST_RSP_CID_MEM_ADDR 0xa1030000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR 0xa1022000 +#define HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR 0xa1024000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR 0xa102a000 +#define HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR 0xa102c000 +#define HIF_CMDQM_VECTOR_ID_MEM_ADDR 0xa1034000 +#define HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR 0xa1020020 +#define HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR 0xa1020028 +#define HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR 0xa1032000 + +/* PSV use */ +/* hif_irq_csr_defines.h */ +#define HIF_IRQ_CONTROL_TBL_MEM_ADDR 0xa1102000 +#define HIF_IRQ_INT_DB_REG_ADDR 0xa11000b4 +#define HIF_IRQ_CFG_VECTOR_TABLE_BUSY_REG_ADDR 0xa1100114 +#define HIF_IRQ_CFG_VECTOR_TABLE_ADDR_REG_ADDR 0xa11000f0 +#define HIF_IRQ_CFG_VECTOR_TABLE_CMD_REG_ADDR 0xa11000ec +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_LADDR_REG_ADDR 0xa11000f4 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_UADDR_REG_ADDR 0xa11000f8 +#define HIF_IRQ_CFG_VECTOR_TABLE_MSG_DATA_REG_ADDR 0xa11000fc +#define HIF_IRQ_CFG_VECTOR_TABLE_CTRL_REG_ADDR 0xa1100100 +#define HIF_IRQ_CFG_VECTOR_TABLE_START_REG_ADDR 0xa11000e8 + +#endif /* __XSC_HW_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h new file mode 100644 index 0000000000000..b830e2a633e0c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmd.h @@ -0,0 +1,630 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_CMD_H +#define __XSC_CMD_H + +#define XSC_CMDQ_VERSION 0x32 + +#define XSC_BOARD_SN_LEN 32 + +enum { + XSC_CMD_STAT_OK = 0x0, + XSC_CMD_STAT_INT_ERR = 0x1, + XSC_CMD_STAT_BAD_OP_ERR = 0x2, + XSC_CMD_STAT_BAD_PARAM_ERR = 0x3, + XSC_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, + XSC_CMD_STAT_BAD_RES_ERR = 0x5, + XSC_CMD_STAT_RES_BUSY = 0x6, + XSC_CMD_STAT_LIM_ERR = 0x8, + XSC_CMD_STAT_BAD_RES_STATE_ERR = 0x9, + XSC_CMD_STAT_IX_ERR = 0xa, + XSC_CMD_STAT_NO_RES_ERR = 0xf, + XSC_CMD_STAT_BAD_QP_STATE_ERR = 0x10, + XSC_CMD_STAT_BAD_PKT_ERR = 0x30, + XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, + XSC_CMD_STAT_BAD_INP_LEN_ERR = 0x50, + XSC_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, +}; + +enum { + XSC_CMD_OP_QUERY_HCA_CAP = 0x100, + XSC_CMD_OP_QUERY_CMDQ_VERSION = 0x10a, + XSC_CMD_OP_FUNCTION_RESET = 0x10c, + XSC_CMD_OP_DUMMY = 0x10d, + XSC_CMD_OP_QUERY_GUID = 0x113, + XSC_CMD_OP_ACTIVATE_HW_CONFIG = 0x114, + + XSC_CMD_OP_CREATE_EQ = 0x301, + XSC_CMD_OP_DESTROY_EQ = 0x302, + + XSC_CMD_OP_CREATE_CQ = 0x400, + XSC_CMD_OP_DESTROY_CQ = 0x401, + + XSC_CMD_OP_CREATE_QP = 0x500, + XSC_CMD_OP_DESTROY_QP = 0x501, + XSC_CMD_OP_RST2INIT_QP = 0x502, + XSC_CMD_OP_INIT2RTR_QP = 0x503, + XSC_CMD_OP_RTR2RTS_QP = 0x504, + XSC_CMD_OP_RTS2RTS_QP = 0x505, + XSC_CMD_OP_SQERR2RTS_QP = 0x506, + XSC_CMD_OP_2ERR_QP = 0x507, + XSC_CMD_OP_RTS2SQD_QP = 0x508, + XSC_CMD_OP_SQD2RTS_QP = 0x509, + XSC_CMD_OP_2RST_QP = 0x50a, + XSC_CMD_OP_INIT2INIT_QP = 0x50e, + XSC_CMD_OP_CREATE_MULTI_QP = 0x515, + + XSC_CMD_OP_MODIFY_RAW_QP = 0x81f, + + XSC_CMD_OP_ENABLE_NIC_HCA = 0x810, + XSC_CMD_OP_DISABLE_NIC_HCA = 0x811, + + XSC_CMD_OP_QUERY_VPORT_STATE = 0x822, + XSC_CMD_OP_QUERY_EVENT_TYPE = 0x831, + + XSC_CMD_OP_ENABLE_MSIX = 0x850, + + XSC_CMD_OP_SET_MTU = 0x1100, + XSC_CMD_OP_QUERY_ETH_MAC = 0X1101, + + XSC_CMD_OP_SET_PORT_ADMIN_STATUS = 0x1801, + + XSC_CMD_OP_MAX +}; + +enum xsc_dma_direct { + XSC_DMA_DIR_TO_MAC, + XSC_DMA_DIR_READ, + XSC_DMA_DIR_WRITE, + XSC_DMA_DIR_LOOPBACK, + XSC_DMA_DIR_MAX +}; + +/* hw feature bitmap, 32bit */ +enum xsc_hw_feature_flag { + XSC_HW_RDMA_SUPPORT = BIT(0), + XSC_HW_PFC_PRIO_STATISTIC_SUPPORT = BIT(1), + XSC_HW_PFC_STALL_STATS_SUPPORT = BIT(3), + XSC_HW_RDMA_CM_SUPPORT = BIT(5), + + XSC_HW_LAST_FEATURE = BIT(31) +}; + +struct xsc_inbox_hdr { + __be16 opcode; + u8 rsvd[4]; + __be16 ver; /* cmd version */ +}; + +struct xsc_outbox_hdr { + u8 status; + u8 rsvd[5]; + __be16 ver; +}; + +/*CQ mbox*/ +struct xsc_cq_context { + __be16 eqn; /* event queue number */ + __be16 pa_num; /* physical address count in the ctx */ + __be16 glb_func_id; + u8 log_cq_sz; + u8 cq_type; +}; + +struct xsc_create_cq_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_cq_context ctx; + __be64 pas[]; /* physical address list */ +}; + +struct xsc_create_cq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 cqn; /* completion queue number */ + u8 rsvd[4]; +}; + +struct xsc_destroy_cq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 cqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_cq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +/*QP mbox*/ +struct xsc_create_qp_request { + __be16 input_qpn; + __be16 pa_num; + u8 qp_type; + u8 log_sq_sz; + u8 log_rq_sz; + u8 dma_direct; + __be32 pdn; /* protect domain number */ + __be16 cqn_send; + __be16 cqn_recv; + __be16 glb_funcid; + /*rsvd, the old logic_port */ + u8 rsvd[2]; + __be64 pas[]; +}; + +struct xsc_create_qp_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_create_qp_request req; +}; + +struct xsc_create_qp_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn; /* queue pair number */ + u8 rsvd[4]; +}; + +struct xsc_destroy_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + u8 rsvd[4]; +}; + +struct xsc_destroy_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_qp_context { + __be32 remote_qpn; + __be32 cqn_send; + __be32 cqn_recv; + __be32 next_send_psn; + __be32 next_recv_psn; + __be32 pdn; + __be16 src_udp_port; + __be16 path_id; + u8 mtu_mode; + u8 lag_sel; + u8 lag_sel_en; + u8 retry_cnt; + u8 rnr_retry; + u8 dscp; + u8 state; + u8 hop_limit; + u8 dmac[6]; + u8 smac[6]; + __be32 dip[4]; + __be32 sip[4]; + __be16 ip_type; + __be16 grp_id; + u8 vlan_valid; + u8 dci_cfi_prio_sl; + __be16 vlan_id; + u8 qp_out_port; + u8 pcie_no; + __be16 lag_id; + __be16 func_id; + __be16 rsvd; +}; + +struct xsc_modify_qp_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 qpn; + struct xsc_qp_context ctx; + u8 no_need_wait; +}; + +struct xsc_modify_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_create_multiqp_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 qp_num; + u8 qp_type; + u8 rsvd; + __be32 req_len; + u8 data[]; +}; + +struct xsc_create_multiqp_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 qpn_base; +}; + +/* MSIX TABLE mbox */ +struct xsc_msix_table_info_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 index; + u8 rsvd[6]; +}; + +struct xsc_msix_table_info_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 addr_lo; + __be32 addr_hi; + __be32 data; +}; + +/*EQ mbox*/ +struct xsc_eq_context { + __be16 vecidx; + __be16 pa_num; + u8 log_eq_sz; + __be16 glb_func_id; + u8 is_async_eq; + u8 rsvd; +}; + +struct xsc_create_eq_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_eq_context ctx; + __be64 pas[]; +}; + +struct xsc_create_eq_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 eqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_eq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 eqn; + u8 rsvd[4]; +}; + +struct xsc_destroy_eq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_query_eq_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd0[3]; + u8 eqn; + u8 rsvd1[4]; +}; + +struct xsc_query_eq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; + struct xsc_eq_context ctx; +}; + +struct xsc_query_cq_mbox_in { + struct xsc_inbox_hdr hdr; + __be32 cqn; + u8 rsvd0[4]; +}; + +struct xsc_query_cq_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[8]; + struct xsc_cq_context ctx; + u8 rsvd6[16]; + __be64 pas[]; +}; + +struct xsc_cmd_query_cmdq_ver_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_cmdq_ver_mbox_out { + struct xsc_outbox_hdr hdr; + __be16 cmdq_ver; + u8 rsvd[6]; +}; + +struct xsc_cmd_dummy_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_dummy_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_fw_version { + u8 fw_version_major; + u8 fw_version_minor; + __be16 fw_version_patch; + __be32 fw_version_tweak; + u8 fw_version_extra_flag; + u8 rsvd[7]; +}; + +struct xsc_hca_cap { + u8 rsvd1[12]; + u8 send_seg_num; + u8 send_wqe_shift; + u8 recv_seg_num; + u8 recv_wqe_shift; + u8 log_max_srq_sz; + u8 log_max_qp_sz; + u8 log_max_mtt; + u8 log_max_qp; + u8 log_max_strq_sz; + u8 log_max_srqs; + u8 rsvd4[2]; + u8 log_max_tso; + u8 log_max_cq_sz; + u8 rsvd6; + u8 log_max_cq; + u8 log_max_eq_sz; + u8 log_max_mkey; + u8 log_max_msix; + u8 log_max_eq; + u8 max_indirection; + u8 log_max_mrw_sz; + u8 log_max_bsf_list_sz; + u8 log_max_klm_list_sz; + u8 rsvd_8_0; + u8 log_max_ra_req_dc; + u8 rsvd_8_1; + u8 log_max_ra_res_dc; + u8 rsvd9; + u8 log_max_ra_req_qp; + u8 log_max_qp_depth; + u8 log_max_ra_res_qp; + __be16 max_vfs; + __be16 raweth_qp_id_end; + __be16 raw_tpe_qp_num; + __be16 max_qp_count; + __be16 raweth_qp_id_base; + u8 rsvd13; + u8 local_ca_ack_delay; + u8 max_num_eqs; + u8 num_ports; + u8 log_max_msg; + u8 mac_port; + __be16 raweth_rss_qp_id_base; + __be16 stat_rate_support; + u8 rsvd16[2]; + __be64 flags; + u8 rsvd17; + u8 uar_sz; + u8 rsvd18; + u8 log_pg_sz; + __be16 bf_log_bf_reg_size; + __be16 msix_base; + __be16 msix_num; + __be16 max_desc_sz_sq; + u8 rsvd20[2]; + __be16 max_desc_sz_rq; + u8 rsvd21[2]; + __be16 max_desc_sz_sq_dc; + u8 rsvd22[4]; + __be16 max_qp_mcg; + u8 rsvd23; + u8 log_max_mcg; + u8 rsvd24; + u8 log_max_pd; + u8 rsvd25; + u8 log_max_xrcd; + u8 rsvd26[40]; + __be32 uar_page_sz; + u8 rsvd27[8]; + __be32 hw_feature_flag; /* enum xsc_hw_feature_flag */ + __be16 pf0_vf_funcid_base; + __be16 pf0_vf_funcid_top; + __be16 pf1_vf_funcid_base; + __be16 pf1_vf_funcid_top; + __be16 pcie0_pf_funcid_base; + __be16 pcie0_pf_funcid_top; + __be16 pcie1_pf_funcid_base; + __be16 pcie1_pf_funcid_top; + u8 log_msx_atomic_size_qp; + u8 pcie_host; + u8 rsvd28; + u8 log_msx_atomic_size_dc; + u8 board_sn[XSC_BOARD_SN_LEN]; + u8 max_tc; + u8 mac_bit; + __be16 funcid_to_logic_port; + u8 rsvd29[6]; + u8 nif_port_num; + u8 reg_mr_via_cmdq; + __be32 hca_core_clock; + __be32 max_rwq_indirection_tables; /* rss_caps */ + __be32 max_rwq_indirection_table_size; /* rss_caps */ + __be32 chip_ver_h; + __be32 chip_ver_m; + __be32 chip_ver_l; + __be32 hotfix_num; + __be32 feature_flag; + __be32 rx_pkt_len_max; + __be32 glb_func_id; + __be64 tx_db; + __be64 rx_db; + __be64 complete_db; + __be64 complete_reg; + __be64 event_db; + __be32 qp_rate_limit_min; + __be32 qp_rate_limit_max; + struct xsc_fw_version fw_ver; + u8 lag_logic_port_ofst; +}; + +struct xsc_cmd_query_hca_cap_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 cpu_num; + u8 rsvd[6]; +}; + +struct xsc_cmd_query_hca_cap_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[8]; + struct xsc_hca_cap hca_cap; +}; + +struct xsc_query_vport_state_out { + struct xsc_outbox_hdr hdr; + u8 admin_state:4; + u8 state:4; +}; + +struct xsc_query_vport_state_in { + struct xsc_inbox_hdr hdr; + __be32 data0; +#define XSC_QUERY_VPORT_OTHER_VPORT BIT(0) +#define XSC_QUERY_VPORT_VPORT_NUM_MASK GENMASK(16, 1) +}; + +enum { + XSC_CMD_EVENT_RESP_CHANGE_LINK = BIT(0), + XSC_CMD_EVENT_RESP_TEMP_WARN = BIT(1), + XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION = BIT(2), +}; + +struct xsc_event_resp { + u8 resp_cmd_type; +}; + +struct xsc_event_query_type_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[2]; +}; + +struct xsc_event_query_type_mbox_out { + struct xsc_outbox_hdr hdr; + struct xsc_event_resp ctx; +}; + +struct xsc_modify_raw_qp_request { + __be16 qpn; + __be16 lag_id; + __be16 func_id; + u8 dma_direct; + u8 prio; + u8 qp_out_port; + u8 rsvd[7]; +}; + +struct xsc_modify_raw_qp_mbox_in { + struct xsc_inbox_hdr hdr; + u8 pcie_no; + u8 rsvd[7]; + struct xsc_modify_raw_qp_request req; +}; + +struct xsc_modify_raw_qp_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_set_mtu_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 mtu; + __be16 rx_buf_sz_min; + u8 mac_port; + u8 rsvd; +}; + +struct xsc_set_mtu_mbox_out { + struct xsc_outbox_hdr hdr; +}; + +struct xsc_query_eth_mac_mbox_in { + struct xsc_inbox_hdr hdr; + u8 index; +}; + +struct xsc_query_eth_mac_mbox_out { + struct xsc_outbox_hdr hdr; + u8 mac[ETH_ALEN]; +}; + +enum { + XSC_TBM_CAP_HASH_PPH = 0, + XSC_TBM_CAP_RSS, + XSC_TBM_CAP_PP_BYPASS, + XSC_TBM_CAP_PCT_DROP_CONFIG, +}; + +struct xsc_nic_attr { + __be16 caps; + __be16 caps_mask; + u8 mac_addr[ETH_ALEN]; +}; + +struct xsc_rss_attr { + u8 rss_en; + u8 hfunc; + __be16 rqn_base; + __be16 rqn_num; + __be32 hash_tmpl; +}; + +struct xsc_cmd_enable_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_attr nic; + struct xsc_rss_attr rss; +}; + +struct xsc_cmd_enable_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[2]; +}; + +struct xsc_nic_dis_attr { + __be16 caps; +}; + +struct xsc_cmd_disable_nic_hca_mbox_in { + struct xsc_inbox_hdr hdr; + struct xsc_nic_dis_attr nic; +}; + +struct xsc_cmd_disable_nic_hca_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd0[4]; +}; + +struct xsc_function_reset_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 glb_func_id; + u8 rsvd[6]; +}; + +struct xsc_function_reset_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_guid_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_query_guid_mbox_out { + struct xsc_outbox_hdr hdr; + __be64 guid; +}; + +struct xsc_cmd_activate_hw_config_mbox_in { + struct xsc_inbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_cmd_activate_hw_config_mbox_out { + struct xsc_outbox_hdr hdr; + u8 rsvd[8]; +}; + +struct xsc_event_set_port_admin_status_mbox_in { + struct xsc_inbox_hdr hdr; + __be16 status; +}; + +struct xsc_event_set_port_admin_status_mbox_out { + struct xsc_outbox_hdr hdr; + __be32 status; +}; + +#endif /* __XSC_CMD_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmdq.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmdq.h new file mode 100644 index 0000000000000..0b29fc2f7b667 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_cmdq.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_CMDQ_H +#define __XSC_CMDQ_H + +#include + +#include "common/xsc_cmd.h" + +enum { + XSC_CMD_TIMEOUT_MSEC = 10 * 1000, + XSC_CMD_WQ_MAX_NAME = 32, +}; + +enum { + XSC_CMD_DATA, /* print command payload only */ + XSC_CMD_TIME, /* print command execution time */ +}; + +enum { + XSC_MAX_COMMANDS = 32, + XSC_CMD_DATA_BLOCK_SIZE = 512, + XSC_PCI_CMD_XPORT = 7, +}; + +struct xsc_cmd_prot_block { + u8 data[XSC_CMD_DATA_BLOCK_SIZE]; + u8 rsvd0[48]; + __be64 next; + __be32 block_num; + /* init to 0, dma user should change this val to 1 */ + u8 owner_status; + u8 token; + u8 ctrl_sig; + u8 sig; +}; + +struct cache_ent { + /* protect block chain allocations */ + spinlock_t lock; + struct list_head head; +}; + +struct cmd_msg_cache { + struct cache_ent large; + struct cache_ent med; + +}; + +#define CMD_FIRST_SIZE 8 +struct xsc_cmd_first { + __be32 data[CMD_FIRST_SIZE]; +}; + +struct xsc_cmd_mailbox { + void *buf; + dma_addr_t dma; + struct xsc_cmd_mailbox *next; +}; + +struct xsc_cmd_msg { + struct list_head list; + struct cache_ent *cache; + u32 len; + struct xsc_cmd_first first; + struct xsc_cmd_mailbox *next; +}; + +#define RSP_FIRST_SIZE 14 +struct xsc_rsp_first { + /* can be larger, xsc_rsp_layout */ + __be32 data[RSP_FIRST_SIZE]; +}; + +struct xsc_rsp_msg { + struct list_head list; + struct cache_ent *cache; + u32 len; + struct xsc_rsp_first first; + struct xsc_cmd_mailbox *next; +}; + +typedef void (*xsc_cmd_cbk_t)(int status, void *context); + +/* hw will use this for some records(e.g. vf_id) */ +struct cmdq_rsv { + u16 vf_id; + u8 rsv[2]; +}; + +/* related with hw, won't change */ +#define CMDQ_ENTRY_SIZE 64 + +struct xsc_cmd_layout { + struct cmdq_rsv rsv0; + __be32 inlen; + __be64 in_ptr; + __be32 in[CMD_FIRST_SIZE]; + __be64 out_ptr; + __be32 outlen; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + /* rsv for hw, arm will check this bit to make sure mem written */ + u8 owner_bit: 1; +}; + +struct xsc_rsp_layout { + struct cmdq_rsv rsv0; + __be32 out[RSP_FIRST_SIZE]; + u8 token; + u8 sig; + u8 idx; + u8 type: 7; + /* rsv for hw, driver will check this bit to make sure mem written */ + u8 owner_bit: 1; +}; + +struct xsc_cmd_work_ent { + struct xsc_cmd_msg *in; + struct xsc_rsp_msg *out; + int idx; + struct completion done; + struct xsc_cmd *cmd; + struct work_struct work; + struct xsc_cmd_layout *lay; + struct xsc_rsp_layout *rsp_lay; + int ret; + u8 status; + u8 token; + struct timespec64 ts1; + struct timespec64 ts2; +}; + +struct xsc_cmd_debug { + struct dentry *dbg_root; + struct dentry *dbg_in; + struct dentry *dbg_out; + struct dentry *dbg_outlen; + struct dentry *dbg_status; + struct dentry *dbg_run; + void *in_msg; + void *out_msg; + u8 status; + u16 inlen; + u16 outlen; +}; + +struct xsc_cmd_stats { + u64 sum; + u64 n; + struct dentry *root; + struct dentry *avg; + struct dentry *count; + /* protect command average calculations */ + spinlock_t lock; +}; + +struct xsc_cmd_reg { + u32 req_pid_addr; + u32 req_cid_addr; + u32 rsp_pid_addr; + u32 rsp_cid_addr; + u32 req_buf_h_addr; + u32 req_buf_l_addr; + u32 rsp_buf_h_addr; + u32 rsp_buf_l_addr; + u32 msix_vec_addr; + u32 element_sz_addr; + u32 q_depth_addr; + u32 interrupt_stat_addr; +}; + +enum xsc_cmd_status { + XSC_CMD_STATUS_NORMAL, + XSC_CMD_STATUS_TIMEDOUT, +}; + +enum xsc_cmd_mode { + XSC_CMD_MODE_POLLING, + XSC_CMD_MODE_EVENTS, +}; + +struct xsc_cmd { + struct xsc_cmd_reg reg; + void *cmd_buf; + void *cq_buf; + dma_addr_t dma; + dma_addr_t cq_dma; + u16 cmd_pid; + u16 cq_cid; + u8 owner_bit; + u8 cmdif_rev; + u8 log_sz; + u8 log_stride; + int max_reg_cmds; + int events; + u32 __iomem *vector; + + /* protect command queue allocations */ + spinlock_t alloc_lock; + /* protect token allocations */ + spinlock_t token_lock; + /* protect cmdq req pid doorbell */ + spinlock_t doorbell_lock; + u8 token; + unsigned long cmd_entry_mask; + char wq_name[XSC_CMD_WQ_MAX_NAME]; + struct workqueue_struct *wq; + struct task_struct *cq_task; + /* The semaphore limits the number of working commands + * to max_reg_cmds. Each exec_cmd call does a down + * to acquire the semaphore before execution and an up + * after completion, ensuring no more than max_reg_cmds + * commands run at the same time. + */ + struct semaphore sem; + enum xsc_cmd_mode mode; + struct xsc_cmd_work_ent *ent_arr[XSC_MAX_COMMANDS]; + struct dma_pool *pool; + struct xsc_cmd_debug dbg; + struct cmd_msg_cache cache; + int checksum_disabled; + struct xsc_cmd_stats stats[XSC_CMD_OP_MAX]; + unsigned int irqn; + u8 ownerbit_learned; + u8 cmd_status; +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h new file mode 100644 index 0000000000000..bc938292a5251 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_core.h @@ -0,0 +1,498 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_CORE_H +#define __XSC_CORE_H + +#include +#include +#include + +#include "common/xsc_cmdq.h" + +#define XSC_PCI_VENDOR_ID 0x1f67 + +#define XSC_MC_PF_DEV_ID 0x1011 +#define XSC_MC_VF_DEV_ID 0x1012 +#define XSC_MC_PF_DEV_ID_DIAMOND 0x1021 + +#define XSC_MF_HOST_PF_DEV_ID 0x1051 +#define XSC_MF_HOST_VF_DEV_ID 0x1052 +#define XSC_MF_SOC_PF_DEV_ID 0x1053 + +#define XSC_MS_PF_DEV_ID 0x1111 +#define XSC_MS_VF_DEV_ID 0x1112 + +#define XSC_MV_HOST_PF_DEV_ID 0x1151 +#define XSC_MV_HOST_VF_DEV_ID 0x1152 +#define XSC_MV_SOC_PF_DEV_ID 0x1153 + +#define PAGE_SHIFT_4K 12 +#define PAGE_SIZE_4K (_AC(1, UL) << PAGE_SHIFT_4K) +#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) + +#define XSC_REG_ADDR(dev, offset) (((dev)->bar) + ((offset) - 0xA0000000)) + +enum { + XSC_MAX_EQ_NAME = 20 +}; + +enum { + XSC_MAX_IRQ_NAME = 32 +}; + +enum { + XSC_MAX_PORTS = 2, +}; + +enum { + XSC_MAX_FW_PORTS = 1, +}; + +enum { + XSC_BF_REGS_PER_PAGE = 4, + XSC_MAX_UAR_PAGES = 1 << 8, + XSC_MAX_UUARS = XSC_MAX_UAR_PAGES * XSC_BF_REGS_PER_PAGE, +}; + +/* alloc */ +struct xsc_buf_list { + void *buf; + dma_addr_t map; +}; + +struct xsc_buf { + struct xsc_buf_list direct; + struct xsc_buf_list *page_list; + unsigned long nbufs; + unsigned long npages; + unsigned long page_shift; + unsigned long size; +}; + +struct xsc_frag_buf { + struct xsc_buf_list *frags; + unsigned long npages; + unsigned long size; + u8 page_shift; +}; + +struct xsc_frag_buf_ctrl { + struct xsc_buf_list *frags; + u32 sz_m1; + u16 frag_sz_m1; + u16 strides_offset; + u8 log_sz; + u8 log_stride; + u8 log_frag_strides; +}; + +/* qp */ +struct xsc_send_wqe_ctrl_seg { + __le32 data0; +#define XSC_WQE_CTRL_SEG_MSG_OPCODE_MASK GENMASK(7, 0) +#define XSC_WQE_CTRL_SEG_WITH_IMM BIT(8) +#define XSC_WQE_CTRL_SEG_CSUM_EN_MASK GENMASK(10, 9) +#define XSC_WQE_CTRL_SEG_DS_DATA_NUM_MASK GENMASK(15, 11) +#define XSC_WQE_CTRL_SEG_WQE_ID_MASK GENMASK(31, 16) + + __le32 msg_len; + __le32 data2; +#define XSC_WQE_CTRL_SEG_HAS_PPH BIT(0) +#define XSC_WQE_CTRL_SEG_SO_TYPE BIT(1) +#define XSC_WQE_CTRL_SEG_SO_DATA_SIZE_MASK GENMASK(15, 2) +#define XSC_WQE_CTRL_SEG_SO_HDR_LEN_MASK GENMASK(31, 24) + + __le32 data3; +#define XSC_WQE_CTRL_SEG_SE BIT(0) +#define XSC_WQE_CTRL_SEG_CE BIT(1) +}; + +struct xsc_wqe_data_seg { + __le32 data0; +#define XSC_WQE_DATA_SEG_SEG_LEN_MASK GENMASK(31, 1) + + __le32 mkey; + __le64 va; +}; + +struct xsc_core_qp { + void (*event)(struct xsc_core_qp *qp, int type); + u32 qpn; + atomic_t refcount; + struct completion free; + int pid; + u16 qp_type; + u16 eth_queue_type; + u16 qp_type_internal; + u16 grp_id; + u8 mac_id; +}; + +struct xsc_qp_table { + spinlock_t lock; /* protect radix tree */ + struct radix_tree_root tree; +}; + +/* cq */ +enum xsc_event { + XSC_EVENT_TYPE_COMP = 0x0, + /* mad */ + XSC_EVENT_TYPE_COMM_EST = 0x02, + XSC_EVENT_TYPE_CQ_ERROR = 0x04, + XSC_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + XSC_EVENT_TYPE_INTERNAL_ERROR = 0x08, + /* IBV_EVENT_QP_REQ_ERR */ + XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, + /* IBV_EVENT_QP_ACCESS_ERR */ + XSC_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, +}; + +struct xsc_cqe { + __le32 data0; +#define XSC_CQE_MSG_OPCD_MASK GENMASK(7, 0) +#define XSC_CQE_ERR_CODE_MASK GENMASK(6, 0) +#define XSC_CQE_IS_ERR BIT(7) +#define XSC_CQE_QP_ID_MASK GENMASK(22, 8) +#define XSC_CQE_SE BIT(24) +#define XSC_CQE_HAS_PPH BIT(25) +#define XSC_CQE_TYPE BIT(26) +#define XSC_CQE_WITH_IMM BIT(27) +#define XSC_CQE_CSUM_ERR_MASK GENMASK(31, 28) + + __le32 imm_data; + __le32 msg_len; + __le32 vni; + __le32 ts_l; + __le16 ts_h; + __le16 wqe_id; + __le32 rsv; + __le32 data1; +#define XSC_CQE_OWNER BIT(31) +}; + +/* cq doorbell bitfields */ +#define XSC_CQ_DB_NEXT_CID_MASK GENMASK(15, 0) +#define XSC_CQ_DB_CQ_ID_MASK GENMASK(30, 16) +#define XSC_CQ_DB_ARM BIT(31) + +struct xsc_core_cq { + u32 cqn; + u32 cqe_sz; + u64 arm_db; + u64 ci_db; + struct xsc_core_device *xdev; + atomic_t refcount; + struct completion free; + unsigned int vector; + unsigned int irqn; + void (*comp)(struct xsc_core_cq *cq); + void (*event)(struct xsc_core_cq *cq, + enum xsc_event); + u32 cons_index; + unsigned int arm_sn; + int pid; + u32 reg_next_cid; + u32 reg_done_pid; + struct xsc_eq *eq; +}; + +struct xsc_cq_table { + spinlock_t lock; /* protect radix tree */ + struct radix_tree_root tree; +}; + +/* eq */ +struct xsc_eq { + struct xsc_core_device *dev; + struct xsc_cq_table cq_table; + /* offset from bar0/2 space start */ + u32 doorbell; + u32 cons_index; + struct xsc_buf buf; + unsigned int irqn; + u32 eqn; + u32 nent; + cpumask_var_t mask; + char name[XSC_MAX_EQ_NAME]; + struct list_head list; + u16 index; +}; + +struct xsc_eq_table { + void __iomem *update_ci; + void __iomem *update_arm_ci; + struct list_head comp_eqs_list; + struct xsc_eq pages_eq; + struct xsc_eq async_eq; + struct xsc_eq cmd_eq; + int num_comp_vectors; + int eq_vec_comp_base; + /* protect EQs list */ + spinlock_t lock; +}; + +/* irq */ +struct xsc_irq_info { + cpumask_var_t mask; + char name[XSC_MAX_IRQ_NAME]; +}; + +/* adev */ +#define XSC_PCI_DRV_NAME "xsc_pci" +#define XSC_ETH_ADEV_NAME "eth" + +struct xsc_adev { + struct auxiliary_device adev; + struct xsc_core_device *xdev; + + int idx; +}; + +/* hw */ +struct xsc_reg_addr { + u64 tx_db; + u64 rx_db; + u64 complete_db; + u64 complete_reg; + u64 event_db; + u64 cpm_get_lock; + u64 cpm_put_lock; + u64 cpm_lock_avail; + u64 cpm_data_mem; + u64 cpm_cmd; + u64 cpm_addr; + u64 cpm_busy; +}; + +struct xsc_board_info { + u32 board_id; + char board_sn[XSC_BOARD_SN_LEN]; + __be64 guid; + u8 guid_valid; + u8 hw_config_activated; +}; + +struct xsc_port_caps { + int gid_table_len; + int pkey_table_len; +}; + +struct xsc_caps { + u8 log_max_eq; + u8 log_max_cq; + u8 log_max_qp; + u8 log_max_mkey; + u8 log_max_pd; + u8 log_max_srq; + u8 log_max_msix; + u32 max_cqes; + u32 max_wqes; + u32 max_sq_desc_sz; + u32 max_rq_desc_sz; + u64 flags; + u16 stat_rate_support; + u32 log_max_msg; + u32 num_ports; + u32 max_ra_res_qp; + u32 max_ra_req_qp; + u32 max_srq_wqes; + u32 bf_reg_size; + u32 bf_regs_per_page; + struct xsc_port_caps port[XSC_MAX_PORTS]; + u8 ext_port_cap[XSC_MAX_PORTS]; + u32 reserved_lkey; + u8 local_ca_ack_delay; + u8 log_max_mcg; + u16 max_qp_mcg; + u32 min_page_sz; + u32 send_ds_num; + u32 send_wqe_shift; + u32 recv_ds_num; + u32 recv_wqe_shift; + u32 rx_pkt_len_max; + + u32 msix_enable:1; + u32 port_type:1; + u32 embedded_cpu:1; + u32 eswitch_manager:1; + u32 ecpf_vport_exists:1; + u32 vport_group_manager:1; + u32 sf:1; + u32 wqe_inline_mode:3; + u32 raweth_qp_id_base:15; + u32 rsvd0:7; + + u16 max_vfs; + u8 log_max_qp_depth; + u8 log_max_current_uc_list; + u8 log_max_current_mc_list; + u16 log_max_vlan_list; + u8 fdb_multi_path_to_table; + u8 log_esw_max_sched_depth; + + u8 max_num_sf_partitions; + u8 log_max_esw_sf; + u16 sf_base_id; + + u32 max_tc:8; + u32 ets:1; + u32 dcbx:1; + u32 dscp:1; + u32 sbcam_reg:1; + u32 qos:1; + u32 port_buf:1; + u32 rsvd1:2; + u32 raw_tpe_qp_num:16; + u32 max_num_eqs:8; + u32 mac_port:8; + u32 raweth_rss_qp_id_base:16; + u16 msix_base; + u16 msix_num; + u8 log_max_mtt; + u8 log_max_tso; + u32 hca_core_clock; + u32 max_rwq_indirection_tables; /* rss_caps */ + u32 max_rwq_indirection_table_size; /* rss_caps */ + u16 raweth_qp_id_end; + u32 qp_rate_limit_min; + u32 qp_rate_limit_max; + u32 hw_feature_flag; + u16 pf0_vf_funcid_base; + u16 pf0_vf_funcid_top; + u16 pf1_vf_funcid_base; + u16 pf1_vf_funcid_top; + u16 pcie0_pf_funcid_base; + u16 pcie0_pf_funcid_top; + u16 pcie1_pf_funcid_base; + u16 pcie1_pf_funcid_top; + u8 nif_port_num; + u8 pcie_host; + u8 mac_bit; + u16 funcid_to_logic_port; + u8 lag_logic_port_ofst; +}; + +/* xsc_core */ +struct xsc_dev_resource { + struct xsc_qp_table qp_table; + struct xsc_cq_table cq_table; + struct xsc_eq_table eq_table; + struct xsc_irq_info *irq_info; + /* protect buffer allocation according to numa node */ + struct mutex alloc_mutex; +}; + +struct xsc_core_device { + struct pci_dev *pdev; + struct device *device; + int adev_id; + struct xsc_adev **xsc_adev_list; + void *eth_priv; + struct xsc_dev_resource *dev_res; + int numa_node; + + void (*event_handler)(void *adapter); + + void __iomem *bar; + int bar_num; + + u8 mac_port; + u8 pcie_no; + u8 pf_id; + u16 glb_func_id; + + u16 msix_vec_base; + + struct xsc_cmd cmd; + u16 cmdq_ver; + + struct xsc_caps caps; + struct xsc_board_info *board_info; + + struct xsc_reg_addr regs; + u32 chip_ver_h; + u32 chip_ver_m; + u32 chip_ver_l; + u32 hotfix_num; + u32 feature_flag; + + u8 fw_version_major; + u8 fw_version_minor; + u16 fw_version_patch; + u32 fw_version_tweak; + u8 fw_version_extra_flag; + cpumask_var_t xps_cpumask; + + u8 user_mode; +}; + +int xsc_core_create_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); +void xsc_core_destroy_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp); +int xsc_core_eth_create_qp(struct xsc_core_device *xdev, + struct xsc_create_qp_mbox_in *in, + int insize, u32 *p_qpn); +int xsc_core_eth_modify_qp_status(struct xsc_core_device *xdev, + u32 qpn, u16 status); +int xsc_core_eth_destroy_qp(struct xsc_core_device *xdev, u32 qpn); +int xsc_core_eth_create_rss_qp_rqs(struct xsc_core_device *xdev, + struct xsc_create_multiqp_mbox_in *in, + int insize, + u32 *p_qpn_base); +int xsc_core_eth_modify_raw_qp(struct xsc_core_device *xdev, + struct xsc_modify_raw_qp_mbox_in *in); +int xsc_core_eth_create_cq(struct xsc_core_device *xdev, + struct xsc_core_cq *xcq, + struct xsc_create_cq_mbox_in *in, + int insize); +int xsc_core_eth_destroy_cq(struct xsc_core_device *xdev, + struct xsc_core_cq *xcq); + +struct xsc_eq *xsc_core_eq_get(struct xsc_core_device *xdev, int i); +int xsc_core_vector2eqn(struct xsc_core_device *xdev, int vector, u32 *eqn, + unsigned int *irqn); +void xsc_core_fill_page_frag_array(struct xsc_frag_buf *buf, + __be64 *pas, unsigned int npages); +int xsc_core_frag_buf_alloc_node(struct xsc_core_device *xdev, + unsigned long size, + struct xsc_frag_buf *buf, + int node); +void xsc_core_frag_buf_free(struct xsc_core_device *xdev, + struct xsc_frag_buf *buf); + +u8 xsc_core_query_vport_state(struct xsc_core_device *xdev, u16 vport); + +static inline void *xsc_buf_offset(struct xsc_buf *buf, unsigned long offset) +{ + if (likely(buf->nbufs == 1)) + return buf->direct.buf + offset; + else + return buf->page_list[offset >> PAGE_SHIFT].buf + + (offset & (PAGE_SIZE - 1)); +} + +static inline bool xsc_fw_is_available(struct xsc_core_device *xdev) +{ + return xdev->cmd.cmd_status == XSC_CMD_STATUS_NORMAL; +} + +static inline void xsc_set_user_mode(struct xsc_core_device *xdev, u8 mode) +{ + xdev->user_mode = mode; +} + +static inline u8 xsc_get_user_mode(struct xsc_core_device *xdev) +{ + return xdev->user_mode; +} + +static inline u8 get_cqe_opcode(struct xsc_cqe *cqe) +{ + return FIELD_GET(XSC_CQE_MSG_OPCD_MASK, le32_to_cpu(cqe->data0)); +} + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_device.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_device.h new file mode 100644 index 0000000000000..154a4e0272070 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_device.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_DEVICE_H +#define __XSC_DEVICE_H + +#include +#include + +/* QP type */ +enum { + XSC_QUEUE_TYPE_RDMA_RC = 0, + XSC_QUEUE_TYPE_RDMA_MAD = 1, + XSC_QUEUE_TYPE_RAW = 2, + XSC_QUEUE_TYPE_VIRTIO_NET = 3, + XSC_QUEUE_TYPE_VIRTIO_BLK = 4, + XSC_QUEUE_TYPE_RAW_TPE = 5, + XSC_QUEUE_TYPE_RAW_TSO = 6, + XSC_QUEUE_TYPE_RAW_TX = 7, + XSC_QUEUE_TYPE_INVALID = 0xFF, +}; + +enum xsc_traffic_types { + XSC_TT_IPV4, + XSC_TT_IPV4_TCP, + XSC_TT_IPV4_UDP, + XSC_TT_IPV6, + XSC_TT_IPV6_TCP, + XSC_TT_IPV6_UDP, + XSC_TT_IPV4_IPSEC_AH, + XSC_TT_IPV6_IPSEC_AH, + XSC_TT_IPV4_IPSEC_ESP, + XSC_TT_IPV6_IPSEC_ESP, + XSC_TT_ANY, + XSC_NUM_TT, +}; + +#define XSC_NUM_INDIR_TIRS XSC_NUM_TT + +enum { + XSC_L3_PROT_TYPE_IPV4 = BIT(0), + XSC_L3_PROT_TYPE_IPV6 = BIT(1), +}; + +enum { + XSC_L4_PROT_TYPE_TCP = BIT(0), + XSC_L4_PROT_TYPE_UDP = BIT(1), +}; + +struct xsc_tirc_config { + u8 l3_prot_type; + u8 l4_prot_type; + u32 rx_hash_fields; +}; + +enum { + XSC_HASH_FUNC_XOR = 0, + XSC_HASH_FUNC_TOP = 1, + XSC_HASH_FUNC_TOP_SYM = 2, + XSC_HASH_FUNC_RSV = 3, +}; + +static inline u8 xsc_hash_func_type(u8 hash_func) +{ + switch (hash_func) { + case ETH_RSS_HASH_TOP: + return XSC_HASH_FUNC_TOP; + case ETH_RSS_HASH_XOR: + return XSC_HASH_FUNC_XOR; + default: + return XSC_HASH_FUNC_TOP; + } +} + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_driver.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_driver.h new file mode 100644 index 0000000000000..d8a945fcce3bc --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_driver.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_DRIVER_H +#define __XSC_DRIVER_H + +#include "common/xsc_core.h" +#include "common/xsc_cmd.h" + +int xsc_cmd_init(struct xsc_core_device *xdev); +void xsc_cmd_cleanup(struct xsc_core_device *xdev); +void xsc_cmd_use_events(struct xsc_core_device *xdev); +void xsc_cmd_use_polling(struct xsc_core_device *xdev); +int xsc_cmd_err_handler(struct xsc_core_device *xdev); +void xsc_cmd_resp_handler(struct xsc_core_device *xdev); +int xsc_cmd_status_to_err(struct xsc_outbox_hdr *hdr); +int xsc_cmd_exec(struct xsc_core_device *xdev, + void *in, unsigned int in_size, + void *out, unsigned int out_size); +int xsc_cmd_version_check(struct xsc_core_device *xdev); +const char *xsc_command_str(int command); + +#endif + diff --git a/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h new file mode 100644 index 0000000000000..582f99d8ca939 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/common/xsc_pp.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_PP_H +#define __XSC_PP_H + +enum { + XSC_HASH_FIELD_SEL_SRC_IP = BIT(0), + XSC_HASH_FIELD_SEL_PROTO = BIT(1), + XSC_HASH_FIELD_SEL_DST_IP = BIT(2), + XSC_HASH_FIELD_SEL_SPORT = BIT(3), + XSC_HASH_FIELD_SEL_DPORT = BIT(4), + XSC_HASH_FIELD_SEL_SRC_IPV6 = BIT(5), + XSC_HASH_FIELD_SEL_DST_IPV6 = BIT(6), + XSC_HASH_FIELD_SEL_SPORT_V6 = BIT(7), + XSC_HASH_FIELD_SEL_DPORT_V6 = BIT(8), +}; + +#define XSC_HASH_IP (XSC_HASH_FIELD_SEL_SRC_IP |\ + XSC_HASH_FIELD_SEL_DST_IP |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP_PORTS (XSC_HASH_FIELD_SEL_SRC_IP |\ + XSC_HASH_FIELD_SEL_DST_IP |\ + XSC_HASH_FIELD_SEL_SPORT |\ + XSC_HASH_FIELD_SEL_DPORT |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP6 (XSC_HASH_FIELD_SEL_SRC_IPV6 |\ + XSC_HASH_FIELD_SEL_DST_IPV6 |\ + XSC_HASH_FIELD_SEL_PROTO) +#define XSC_HASH_IP6_PORTS (XSC_HASH_FIELD_SEL_SRC_IPV6 |\ + XSC_HASH_FIELD_SEL_DST_IPV6 |\ + XSC_HASH_FIELD_SEL_SPORT_V6 |\ + XSC_HASH_FIELD_SEL_DPORT_V6 |\ + XSC_HASH_FIELD_SEL_PROTO) + +#endif /* __XSC_PP_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig b/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig new file mode 100644 index 0000000000000..b2f95370fdaa3 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon driver configuration +# + +config YUNSILICON_XSC_ETH + tristate "Yunsilicon XSC ethernet driver" + depends on YUNSILICON_XSC_PCI + depends on NET + select PAGE_POOL + help + This driver provides ethernet support for + Yunsilicon XSC devices. + + To compile this driver as a module, choose M here. The module + will be called xsc_eth. diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/Makefile b/drivers/net/ethernet/yunsilicon/xsc/net/Makefile new file mode 100644 index 0000000000000..e1cfa3cdf0fd0 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc + +obj-$(CONFIG_YUNSILICON_XSC_ETH) += xsc_eth.o + +xsc_eth-y := main.o xsc_eth_wq.o xsc_eth_txrx.o xsc_eth_tx.o xsc_eth_rx.o xsc_eth_stats.o diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/main.c b/drivers/net/ethernet/yunsilicon/xsc/net/main.c new file mode 100644 index 0000000000000..7cfad5ffebdad --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/main.c @@ -0,0 +1,1993 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "common/xsc_core.h" +#include "common/xsc_driver.h" +#include "common/xsc_device.h" +#include "common/xsc_pp.h" +#include "xsc_eth_common.h" +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" + +static const struct xsc_tirc_config tirc_default_config[XSC_NUM_INDIR_TIRS] = { + [XSC_TT_IPV4] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = XSC_HASH_IP, + }, + [XSC_TT_IPV4_TCP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = XSC_L4_PROT_TYPE_TCP, + .rx_hash_fields = XSC_HASH_IP_PORTS, + }, + [XSC_TT_IPV4_UDP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV4, + .l4_prot_type = XSC_L4_PROT_TYPE_UDP, + .rx_hash_fields = XSC_HASH_IP_PORTS, + }, + [XSC_TT_IPV6] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = XSC_HASH_IP6, + }, + [XSC_TT_IPV6_TCP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = XSC_L4_PROT_TYPE_TCP, + .rx_hash_fields = XSC_HASH_IP6_PORTS, + }, + [XSC_TT_IPV6_UDP] = { + .l3_prot_type = XSC_L3_PROT_TYPE_IPV6, + .l4_prot_type = XSC_L4_PROT_TYPE_UDP, + .rx_hash_fields = XSC_HASH_IP6_PORTS, + }, +}; + +static int xsc_get_max_num_channels(struct xsc_core_device *xdev) +{ + return min_t(int, xdev->dev_res->eq_table.num_comp_vectors, + XSC_ETH_MAX_NUM_CHANNELS); +} + +static void xsc_build_default_indir_rqt(u32 *indirection_rqt, int len, + int num_channels) +{ + int i; + + for (i = 0; i < len; i++) + indirection_rqt[i] = i % num_channels; +} + +static void xsc_build_rss_param(struct xsc_rss_params *rss_param, + u16 num_channels) +{ + enum xsc_traffic_types tt; + + rss_param->hfunc = ETH_RSS_HASH_TOP; + netdev_rss_key_fill(rss_param->toeplitz_hash_key, + sizeof(rss_param->toeplitz_hash_key)); + + xsc_build_default_indir_rqt(rss_param->indirection_rqt, + XSC_INDIR_RQT_SIZE, num_channels); + + for (tt = 0; tt < XSC_NUM_INDIR_TIRS; tt++) { + rss_param->rx_hash_fields[tt] = + tirc_default_config[tt].rx_hash_fields; + } + rss_param->rss_hash_tmpl = XSC_HASH_IP_PORTS | XSC_HASH_IP6_PORTS; +} + +static void xsc_eth_build_nic_params(struct xsc_adapter *adapter, + u32 ch_num, u32 tc_num) +{ + struct xsc_eth_params *params = &adapter->nic_param; + struct xsc_core_device *xdev = adapter->xdev; + + params->mtu = SW_DEFAULT_MTU; + params->num_tc = tc_num; + + params->comp_vectors = xdev->dev_res->eq_table.num_comp_vectors; + params->max_num_ch = ch_num; + params->num_channels = ch_num; + + params->rq_max_size = BIT(xdev->caps.log_max_qp_depth); + params->sq_max_size = BIT(xdev->caps.log_max_qp_depth); + xsc_build_rss_param(&adapter->rss_param, + adapter->nic_param.num_channels); +} + +static int xsc_eth_netdev_init(struct xsc_adapter *adapter) +{ + unsigned int node, tc, nch; + + tc = adapter->nic_param.num_tc; + nch = adapter->nic_param.max_num_ch; + node = dev_to_node(adapter->dev); + adapter->txq2sq = kcalloc_node(nch * tc, + sizeof(*adapter->txq2sq), + GFP_KERNEL, node); + if (!adapter->txq2sq) + goto err_out; + + mutex_init(&adapter->status_lock); + + adapter->workq = create_singlethread_workqueue("xsc_eth"); + if (!adapter->workq) + goto err_free_priv; + + netif_carrier_off(adapter->netdev); + + return 0; + +err_free_priv: + kfree(adapter->txq2sq); +err_out: + return -ENOMEM; +} + +static void xsc_eth_build_queue_param(struct xsc_adapter *adapter, + struct xsc_queue_attr *attr, u8 type) +{ + struct xsc_core_device *xdev = adapter->xdev; + + if (adapter->nic_param.sq_size == 0) + adapter->nic_param.sq_size = BIT(xdev->caps.log_max_qp_depth); + if (adapter->nic_param.rq_size == 0) + adapter->nic_param.rq_size = BIT(xdev->caps.log_max_qp_depth); + + if (type == XSC_QUEUE_TYPE_EQ) { + attr->q_type = XSC_QUEUE_TYPE_EQ; + attr->ele_num = XSC_EQ_ELE_NUM; + attr->ele_size = XSC_EQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_EQ_ELE_SZ); + attr->q_log_size = order_base_2(XSC_EQ_ELE_NUM); + } else if (type == XSC_QUEUE_TYPE_RQCQ) { + attr->q_type = XSC_QUEUE_TYPE_RQCQ; + attr->ele_num = min_t(int, XSC_RQCQ_ELE_NUM, + xdev->caps.max_cqes); + attr->ele_size = XSC_RQCQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_RQCQ_ELE_SZ); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_SQCQ) { + attr->q_type = XSC_QUEUE_TYPE_SQCQ; + attr->ele_num = min_t(int, XSC_SQCQ_ELE_NUM, + xdev->caps.max_cqes); + attr->ele_size = XSC_SQCQ_ELE_SZ; + attr->ele_log_size = order_base_2(XSC_SQCQ_ELE_SZ); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_RQ) { + attr->q_type = XSC_QUEUE_TYPE_RQ; + attr->ele_num = adapter->nic_param.rq_size; + attr->ele_size = xdev->caps.recv_ds_num * XSC_RECV_WQE_DS; + attr->ele_log_size = order_base_2(attr->ele_size); + attr->q_log_size = order_base_2(attr->ele_num); + } else if (type == XSC_QUEUE_TYPE_SQ) { + attr->q_type = XSC_QUEUE_TYPE_SQ; + attr->ele_num = adapter->nic_param.sq_size; + attr->ele_size = xdev->caps.send_ds_num * XSC_SEND_WQE_DS; + attr->ele_log_size = order_base_2(attr->ele_size); + attr->q_log_size = order_base_2(attr->ele_num); + } +} + +static u32 xsc_rx_get_linear_frag_sz(u32 mtu) +{ + u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); + + return XSC_SKB_FRAG_SZ(byte_count); +} + +static bool xsc_rx_is_linear_skb(u32 mtu) +{ + u32 linear_frag_sz = xsc_rx_get_linear_frag_sz(mtu); + + return linear_frag_sz <= PAGE_SIZE; +} + +static u32 xsc_get_rq_frag_info(struct xsc_rq_frags_info *frags_info, u32 mtu) +{ + u32 byte_count = XSC_SW2HW_FRAG_SIZE(mtu); + int frag_stride; + int i = 0; + + if (xsc_rx_is_linear_skb(mtu)) { + frag_stride = xsc_rx_get_linear_frag_sz(mtu); + frag_stride = roundup_pow_of_two(frag_stride); + + frags_info->arr[0].frag_size = byte_count; + frags_info->arr[0].frag_stride = frag_stride; + frags_info->num_frags = 1; + frags_info->wqe_bulk = PAGE_SIZE / frag_stride; + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + goto out; + } + + if (byte_count <= DEFAULT_FRAG_SIZE) { + frags_info->arr[0].frag_size = DEFAULT_FRAG_SIZE; + frags_info->arr[0].frag_stride = DEFAULT_FRAG_SIZE; + frags_info->num_frags = 1; + } else if (byte_count <= PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 1; + } else if (byte_count <= (PAGE_SIZE_4K + DEFAULT_FRAG_SIZE)) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->arr[1].frag_size = PAGE_SIZE_4K; + frags_info->arr[1].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 2; + } else { + frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } else if (byte_count <= 2 * PAGE_SIZE_4K) { + if (PAGE_SIZE < 2 * PAGE_SIZE_4K) { + frags_info->arr[0].frag_size = PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = PAGE_SIZE_4K; + frags_info->arr[1].frag_size = PAGE_SIZE_4K; + frags_info->arr[1].frag_stride = PAGE_SIZE_4K; + frags_info->num_frags = 2; + } else { + frags_info->arr[0].frag_size = 2 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 2 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } else { + if (PAGE_SIZE < 4 * PAGE_SIZE_4K) { + frags_info->num_frags = + roundup(byte_count, + PAGE_SIZE_4K) / PAGE_SIZE_4K; + for (i = 0; i < frags_info->num_frags; i++) { + frags_info->arr[i].frag_size = PAGE_SIZE_4K; + frags_info->arr[i].frag_stride = PAGE_SIZE_4K; + } + } else { + frags_info->arr[0].frag_size = 4 * PAGE_SIZE_4K; + frags_info->arr[0].frag_stride = 4 * PAGE_SIZE_4K; + frags_info->num_frags = 1; + } + } + + if (PAGE_SIZE <= PAGE_SIZE_4K) { + frags_info->wqe_bulk_min = 4; + frags_info->wqe_bulk = max_t(u8, frags_info->wqe_bulk_min, 8); + } else if (PAGE_SIZE <= 2 * PAGE_SIZE_4K) { + frags_info->wqe_bulk = 2; + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + } else { + frags_info->wqe_bulk = + PAGE_SIZE / + (frags_info->num_frags * frags_info->arr[0].frag_size); + frags_info->wqe_bulk_min = frags_info->wqe_bulk; + } + +out: + frags_info->log_num_frags = order_base_2(frags_info->num_frags); + + return frags_info->num_frags * frags_info->arr[0].frag_size; +} + +static void xsc_build_rq_frags_info(struct xsc_queue_attr *attr, + struct xsc_rq_frags_info *frags_info, + struct xsc_eth_params *params) +{ + params->rq_frags_size = xsc_get_rq_frag_info(frags_info, params->mtu); + frags_info->frags_max_num = attr->ele_size / XSC_RECV_WQE_DS; +} + +static void xsc_eth_build_channel_param(struct xsc_adapter *adapter, + struct xsc_channel_param *chl_param) +{ + xsc_eth_build_queue_param(adapter, &chl_param->rqcq_param.cq_attr, + XSC_QUEUE_TYPE_RQCQ); + chl_param->rqcq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->sqcq_param.cq_attr, + XSC_QUEUE_TYPE_SQCQ); + chl_param->sqcq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->sq_param.sq_attr, + XSC_QUEUE_TYPE_SQ); + chl_param->sq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_eth_build_queue_param(adapter, &chl_param->rq_param.rq_attr, + XSC_QUEUE_TYPE_RQ); + chl_param->rq_param.wq.buf_numa_node = dev_to_node(adapter->dev); + + xsc_build_rq_frags_info(&chl_param->rq_param.rq_attr, + &chl_param->rq_param.frags_info, + &adapter->nic_param); +} + +static void xsc_eth_cq_error_event(struct xsc_core_cq *xcq, + enum xsc_event event) +{ + struct xsc_cq *xsc_cq = container_of(xcq, struct xsc_cq, xcq); + struct xsc_core_device *xdev = xsc_cq->xdev; + + if (event != XSC_EVENT_TYPE_CQ_ERROR) { + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, + "Unexpected event type %d on CQ %06x\n", + event, xcq->cqn); + return; + } + + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, + "Eth catch CQ ERROR: %x, cqn: %d\n", event, xcq->cqn); +} + +static void xsc_eth_completion_event(struct xsc_core_cq *xcq) +{ + struct xsc_cq *cq = container_of(xcq, struct xsc_cq, xcq); + struct xsc_core_device *xdev = cq->xdev; + struct xsc_rq *rq = NULL; + + if (unlikely(!cq->channel)) { + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, + "cq%d->channel is null\n", xcq->cqn); + return; + } + + rq = &cq->channel->qp.rq[0]; + + set_bit(XSC_CHANNEL_NAPI_SCHED, &cq->channel->flags); + + if (!test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, + "ch%d_cq%d, napi_flag=0x%lx\n", + cq->channel->chl_idx, xcq->cqn, + cq->napi->state); + + napi_schedule(cq->napi); + cq->event_ctr++; +} + +static int xsc_eth_alloc_cq(struct xsc_channel *c, struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + u8 ele_log_size = pcq_param->cq_attr.ele_log_size; + struct xsc_core_device *xdev = c->adapter->xdev; + u8 q_log_size = pcq_param->cq_attr.q_log_size; + struct xsc_core_cq *core_cq = &pcq->xcq; + int ret; + u32 i; + + pcq_param->wq.db_numa_node = cpu_to_node(c->cpu); + pcq_param->wq.buf_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_cqwq_create(xdev, &pcq_param->wq, + q_log_size, ele_log_size, &pcq->wq, + &pcq->wq_ctrl); + if (ret) + return ret; + + core_cq->cqe_sz = pcq_param->cq_attr.ele_num; + core_cq->comp = xsc_eth_completion_event; + core_cq->event = xsc_eth_cq_error_event; + core_cq->vector = c->chl_idx; + + for (i = 0; i < xsc_cqwq_get_size(&pcq->wq); i++) { + struct xsc_cqe *cqe = xsc_cqwq_get_wqe(&pcq->wq, i); + + cqe->data1 |= cpu_to_le32(XSC_CQE_OWNER); + } + pcq->xdev = xdev; + + return ret; +} + +static int xsc_eth_set_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + struct xsc_core_device *xdev = c->adapter->xdev; + struct xsc_create_cq_mbox_in *in; + unsigned int hw_npages; + unsigned int irqn; + int inlen; + u32 eqn; + int ret; + + hw_npages = DIV_ROUND_UP(pcq->wq_ctrl.buf.size, PAGE_SIZE_4K); + /*mbox size + pas size*/ + inlen = sizeof(struct xsc_create_cq_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + /*construct param of in struct*/ + ret = xsc_core_vector2eqn(xdev, c->chl_idx, &eqn, &irqn); + if (ret) + goto err_free; + + in->ctx.eqn = cpu_to_be16(eqn); + in->ctx.log_cq_sz = pcq_param->cq_attr.q_log_size; + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(xdev->glb_func_id); + + xsc_core_fill_page_frag_array(&pcq->wq_ctrl.buf, + &in->pas[0], + hw_npages); + + ret = xsc_core_eth_create_cq(c->adapter->xdev, &pcq->xcq, in, inlen); + if (ret) + goto err_free; + pcq->xcq.irqn = irqn; + pcq->xcq.eq = xsc_core_eq_get(xdev, pcq->xcq.vector); + return 0; + +err_free: + kvfree(in); + return ret; +} + +static void xsc_eth_free_cq(struct xsc_cq *cq) +{ + xsc_eth_wq_destroy(&cq->wq_ctrl); +} + +static int xsc_eth_open_cq(struct xsc_channel *c, + struct xsc_cq *pcq, + struct xsc_cq_param *pcq_param) +{ + int ret; + + ret = xsc_eth_alloc_cq(c, pcq, pcq_param); + if (ret) + return ret; + + ret = xsc_eth_set_cq(c, pcq, pcq_param); + if (ret) + goto err_free_cq; + + xsc_cq_notify_hw_rearm(pcq); + + pcq->napi = &c->napi; + pcq->channel = c; + pcq->rx = (pcq_param->cq_attr.q_type == XSC_QUEUE_TYPE_RQCQ) ? 1 : 0; + + return 0; + +err_free_cq: + xsc_eth_free_cq(pcq); + return ret; +} + +static int xsc_eth_close_cq(struct xsc_channel *c, struct xsc_cq *pcq) +{ + struct xsc_core_device *xdev = c->adapter->xdev; + int ret; + + ret = xsc_core_eth_destroy_cq(xdev, &pcq->xcq); + if (ret) { + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, "failed to close ch%d cq%d, ret=%d\n", + c->chl_idx, pcq->xcq.cqn, ret); + return ret; + } + + xsc_eth_free_cq(pcq); + + return 0; +} + +static void xsc_free_qp_sq_db(struct xsc_sq *sq) +{ + kvfree(sq->db.wqe_info); + kvfree(sq->db.dma_fifo); +} + +static void xsc_free_qp_sq(struct xsc_sq *sq) +{ + xsc_free_qp_sq_db(sq); + xsc_eth_wq_destroy(&sq->wq_ctrl); +} + +static int xsc_eth_alloc_qp_sq_db(struct xsc_sq *sq, int numa) +{ + struct xsc_core_device *xdev = sq->cq.xdev; + int wq_sz; + int df_sz; + + wq_sz = xsc_wq_cyc_get_size(&sq->wq); + df_sz = wq_sz * xdev->caps.send_ds_num; + sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, + sizeof(*sq->db.dma_fifo)), + GFP_KERNEL, + numa); + sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, + sizeof(*sq->db.wqe_info)), + GFP_KERNEL, + numa); + + if (!sq->db.dma_fifo || !sq->db.wqe_info) { + xsc_free_qp_sq_db(sq); + return -ENOMEM; + } + + sq->dma_fifo_mask = df_sz - 1; + + return 0; +} + +static void xsc_eth_qp_event(struct xsc_core_qp *qp, int type) +{ + struct xsc_core_device *xdev; + struct xsc_rq *rq; + struct xsc_sq *sq; + + if (qp->eth_queue_type == XSC_RES_RQ) { + rq = container_of(qp, struct xsc_rq, cqp); + xdev = rq->cq.xdev; + } else if (qp->eth_queue_type == XSC_RES_SQ) { + sq = container_of(qp, struct xsc_sq, cqp); + xdev = sq->cq.xdev; + } else { + pr_err("%s:Unknown eth qp type %d\n", __func__, type); + return; + } + + switch (type) { + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, + "%s:Async event %x on QP %d\n", + __func__, type, qp->qpn); + break; + default: + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, + "%s: Unexpected event type %d on QP %d\n", + __func__, type, qp->qpn); + return; + } +} + +static int xsc_eth_open_qp_sq(struct xsc_channel *c, + struct xsc_sq *psq, + struct xsc_sq_param *psq_param, + u32 sq_idx) +{ + u8 ele_log_size = psq_param->sq_attr.ele_log_size; + u8 q_log_size = psq_param->sq_attr.q_log_size; + struct xsc_modify_raw_qp_mbox_in *modify_in; + struct xsc_channel_stats *channel_stats; + struct xsc_create_qp_mbox_in *in; + struct xsc_core_device *xdev; + struct xsc_adapter *adapter; + struct xsc_stats *stats; + unsigned int hw_npages; + int inlen; + int ret; + + adapter = c->adapter; + xdev = adapter->xdev; + stats = adapter->stats; + channel_stats = &stats->channel_stats[c->chl_idx]; + psq->stats = &channel_stats->sq[sq_idx]; + psq_param->wq.db_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_wq_cyc_create(xdev, &psq_param->wq, + q_log_size, ele_log_size, &psq->wq, + &psq->wq_ctrl); + if (ret) + return ret; + + hw_npages = DIV_ROUND_UP(psq->wq_ctrl.buf.size, PAGE_SIZE_4K); + inlen = sizeof(struct xsc_create_qp_mbox_in) + + sizeof(__be64) * hw_npages; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_sq_wq_destroy; + } + in->req.input_qpn = cpu_to_be16(XSC_QPN_SQN_STUB); /*no use for eth*/ + in->req.qp_type = XSC_QUEUE_TYPE_RAW_TSO; /*default sq is tso qp*/ + in->req.log_sq_sz = ilog2(xdev->caps.send_ds_num) + q_log_size; + in->req.pa_num = cpu_to_be16(hw_npages); + in->req.cqn_send = cpu_to_be16(psq->cq.xcq.cqn); + in->req.cqn_recv = in->req.cqn_send; + in->req.glb_funcid = cpu_to_be16(xdev->glb_func_id); + + xsc_core_fill_page_frag_array(&psq->wq_ctrl.buf, + &in->req.pas[0], hw_npages); + + ret = xsc_core_eth_create_qp(xdev, in, inlen, &psq->sqn); + if (ret) + goto err_sq_in_destroy; + + psq->cqp.qpn = psq->sqn; + psq->cqp.event = xsc_eth_qp_event; + psq->cqp.eth_queue_type = XSC_RES_SQ; + + ret = xsc_core_create_resource_common(xdev, &psq->cqp); + if (ret) { + netdev_err(adapter->netdev, "%s:error qp:%d errno:%d\n", + __func__, psq->sqn, ret); + goto err_sq_destroy; + } + + psq->channel = c; + psq->ch_ix = c->chl_idx; + psq->txq_ix = psq->ch_ix + sq_idx * adapter->channels.num_chl; + + /*need to querify from hardware*/ + psq->hw_mtu = XSC_ETH_HW_MTU_SEND; + psq->stop_room = 1; + + ret = xsc_eth_alloc_qp_sq_db(psq, psq_param->wq.db_numa_node); + if (ret) + goto err_sq_common_destroy; + + inlen = sizeof(struct xsc_modify_raw_qp_mbox_in); + modify_in = kvzalloc(inlen, GFP_KERNEL); + if (!modify_in) { + ret = -ENOMEM; + goto err_sq_common_destroy; + } + + modify_in->req.qp_out_port = xdev->pf_id; + modify_in->pcie_no = xdev->pcie_no; + modify_in->req.qpn = cpu_to_be16((u16)(psq->sqn)); + modify_in->req.func_id = cpu_to_be16(xdev->glb_func_id); + modify_in->req.dma_direct = XSC_DMA_DIR_TO_MAC; + modify_in->req.prio = sq_idx; + ret = xsc_core_eth_modify_raw_qp(xdev, modify_in); + if (ret) + goto err_sq_modify_in_destroy; + + kvfree(modify_in); + kvfree(in); + + return 0; + +err_sq_modify_in_destroy: + kvfree(modify_in); + +err_sq_common_destroy: + xsc_core_destroy_resource_common(xdev, &psq->cqp); + +err_sq_destroy: + xsc_core_eth_destroy_qp(xdev, psq->cqp.qpn); + +err_sq_in_destroy: + kvfree(in); + +err_sq_wq_destroy: + xsc_eth_wq_destroy(&psq->wq_ctrl); + return ret; +} + +static int xsc_eth_close_qp_sq(struct xsc_channel *c, struct xsc_sq *psq) +{ + struct xsc_core_device *xdev = c->adapter->xdev; + int ret; + + xsc_core_destroy_resource_common(xdev, &psq->cqp); + + ret = xsc_core_eth_destroy_qp(xdev, psq->cqp.qpn); + if (ret) + return ret; + + xsc_free_qp_sq(psq); + + return 0; +} + +static int xsc_eth_open_channel(struct xsc_adapter *adapter, + int idx, + struct xsc_channel *c, + struct xsc_channel_param *chl_param) +{ + struct xsc_core_device *xdev = adapter->xdev; + struct net_device *netdev = adapter->netdev; + const struct cpumask *aff; + unsigned int irqn; + int i, j; + u32 eqn; + int ret; + + c->adapter = adapter; + c->netdev = adapter->netdev; + c->chl_idx = idx; + c->num_tc = adapter->nic_param.num_tc; + + /*1rq per channel, and may have multi sqs per channel*/ + c->qp.rq_num = 1; + c->qp.sq_num = c->num_tc; + + if (xdev->caps.msix_enable) { + ret = xsc_core_vector2eqn(xdev, c->chl_idx, &eqn, &irqn); + if (ret) + goto err_out; + aff = irq_get_affinity_mask(irqn); + c->aff_mask = aff; + c->cpu = cpumask_first(aff); + } + + if (c->qp.sq_num > XSC_MAX_NUM_TC || c->qp.rq_num > XSC_MAX_NUM_TC) { + ret = -EINVAL; + goto err_out; + } + + for (i = 0; i < c->qp.rq_num; i++) { + ret = xsc_eth_open_cq(c, &c->qp.rq[i].cq, + &chl_param->rqcq_param); + if (ret) { + j = i - 1; + goto err_close_rq_cq; + } + } + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_open_cq(c, &c->qp.sq[i].cq, + &chl_param->sqcq_param); + if (ret) { + j = i - 1; + goto err_close_sq_cq; + } + } + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_eth_open_qp_sq(c, &c->qp.sq[i], + &chl_param->sq_param, i); + if (ret) { + j = i - 1; + goto err_close_sq; + } + } + netif_napi_add(netdev, &c->napi, xsc_eth_napi_poll); + + netdev_dbg(adapter->netdev, "open channel%d ok\n", idx); + return 0; + +err_close_sq: + for (; j >= 0; j--) + xsc_eth_close_qp_sq(c, &c->qp.sq[j]); + j = (c->qp.rq_num - 1); +err_close_sq_cq: + for (; j >= 0; j--) + xsc_eth_close_cq(c, &c->qp.sq[j].cq); + j = (c->qp.rq_num - 1); +err_close_rq_cq: + for (; j >= 0; j--) + xsc_eth_close_cq(c, &c->qp.rq[j].cq); +err_out: + netdev_err(adapter->netdev, + "failed to open channel: ch%d, sq_num=%d, rq_num=%d, err=%d\n", + idx, c->qp.sq_num, c->qp.rq_num, ret); + return ret; +} + +static int xsc_eth_modify_qps_channel(struct xsc_adapter *adapter, + struct xsc_channel *c) +{ + int ret = 0; + int i; + + for (i = 0; i < c->qp.rq_num; i++) { + c->qp.rq[i].post_wqes(&c->qp.rq[i]); + ret = xsc_core_eth_modify_qp_status(adapter->xdev, + c->qp.rq[i].rqn, + XSC_CMD_OP_RTR2RTS_QP); + if (ret) + return ret; + } + + for (i = 0; i < c->qp.sq_num; i++) { + ret = xsc_core_eth_modify_qp_status(adapter->xdev, + c->qp.sq[i].sqn, + XSC_CMD_OP_RTR2RTS_QP); + if (ret) + return ret; + } + return 0; +} + +static int xsc_eth_modify_qps(struct xsc_adapter *adapter, + struct xsc_eth_channels *chls) +{ + int ret; + int i; + + for (i = 0; i < chls->num_chl; i++) { + struct xsc_channel *c = &chls->c[i]; + + ret = xsc_eth_modify_qps_channel(adapter, c); + if (ret) + return ret; + } + + return 0; +} + +static void xsc_eth_init_frags_partition(struct xsc_rq *rq) +{ + struct xsc_wqe_frag_info next_frag = {}; + struct xsc_wqe_frag_info *prev; + int i; + + next_frag.di = &rq->wqe.di[0]; + next_frag.offset = 0; + prev = NULL; + + for (i = 0; i < xsc_wq_cyc_get_size(&rq->wqe.wq); i++) { + struct xsc_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + struct xsc_wqe_frag_info *frag = + &rq->wqe.frags[i << rq->wqe.info.log_num_frags]; + int f; + + for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { + if (next_frag.offset + frag_info[f].frag_stride > + XSC_RX_FRAG_SZ) { + next_frag.di++; + next_frag.offset = 0; + if (prev) + prev->last_in_page = 1; + } + *frag = next_frag; + + /* prepare next */ + next_frag.offset += frag_info[f].frag_stride; + prev = frag; + } + } + + if (prev) + prev->last_in_page = 1; +} + +static int xsc_eth_init_di_list(struct xsc_rq *rq, u32 wq_sz, int cpu) +{ + int len = wq_sz << rq->wqe.info.log_num_frags; + + rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), + GFP_KERNEL, cpu_to_node(cpu)); + if (!rq->wqe.di) + return -ENOMEM; + + xsc_eth_init_frags_partition(rq); + + return 0; +} + +static void xsc_eth_free_di_list(struct xsc_rq *rq) +{ + kvfree(rq->wqe.di); +} + +static int xsc_eth_alloc_rq(struct xsc_channel *c, + struct xsc_rq *prq, + struct xsc_rq_param *prq_param) +{ + u8 ele_log_size = prq_param->rq_attr.ele_log_size; + u8 q_log_size = prq_param->rq_attr.q_log_size; + struct page_pool_params pagepool_params = {0}; + struct xsc_adapter *adapter = c->adapter; + struct xsc_channel_stats *channel_stats; + u32 pool_size = 1 << q_log_size; + struct xsc_stats *stats; + int ret = 0; + u32 wq_sz; + int i, f; + + stats = c->adapter->stats; + channel_stats = &stats->channel_stats[c->chl_idx]; + prq->stats = &channel_stats->rq; + prq_param->wq.db_numa_node = cpu_to_node(c->cpu); + + ret = xsc_eth_wq_cyc_create(c->adapter->xdev, &prq_param->wq, + q_log_size, ele_log_size, &prq->wqe.wq, + &prq->wq_ctrl); + if (ret) + return ret; + + wq_sz = xsc_wq_cyc_get_size(&prq->wqe.wq); + + prq->wqe.info = prq_param->frags_info; + prq->wqe.frags = + kvzalloc_node(array_size((wq_sz << prq->wqe.info.log_num_frags), + sizeof(*prq->wqe.frags)), + GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!prq->wqe.frags) { + ret = -ENOMEM; + goto err_destroy_wq; + } + + ret = xsc_eth_init_di_list(prq, wq_sz, c->cpu); + if (ret) + goto err_free_frags; + + prq->buff.map_dir = DMA_FROM_DEVICE; + + /* Create a page_pool and register it with rxq */ + pool_size = wq_sz << prq->wqe.info.log_num_frags; + pagepool_params.order = XSC_RX_FRAG_SZ_ORDER; + /* No-internal DMA mapping in page_pool */ + pagepool_params.flags = 0; + pagepool_params.pool_size = pool_size; + pagepool_params.nid = cpu_to_node(c->cpu); + pagepool_params.dev = c->adapter->dev; + pagepool_params.dma_dir = prq->buff.map_dir; + + prq->page_pool = page_pool_create(&pagepool_params); + if (IS_ERR(prq->page_pool)) { + ret = PTR_ERR(prq->page_pool); + prq->page_pool = NULL; + goto err_free_di_list; + } + + if (c->chl_idx == 0) + netdev_dbg(adapter->netdev, + "page pool: size=%d, cpu=%d, pool_numa=%d, mtu=%d, wqe_numa=%d\n", + pool_size, c->cpu, pagepool_params.nid, + adapter->nic_param.mtu, + prq_param->wq.buf_numa_node); + + for (i = 0; i < wq_sz; i++) { + struct xsc_eth_rx_wqe_cyc *wqe = + xsc_wq_cyc_get_wqe(&prq->wqe.wq, i); + + for (f = 0; f < prq->wqe.info.num_frags; f++) { + u32 frag_size = prq->wqe.info.arr[f].frag_size; + u32 val = 0; + + val |= FIELD_PREP(XSC_WQE_DATA_SEG_SEG_LEN_MASK, + frag_size); + wqe->data[f].data0 = cpu_to_le32(val); + wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); + } + + for (; f < prq->wqe.info.frags_max_num; f++) { + wqe->data[f].data0 = 0; + wqe->data[f].mkey = cpu_to_le32(XSC_INVALID_LKEY); + wqe->data[f].va = 0; + } + } + + prq->post_wqes = xsc_eth_post_rx_wqes; + prq->handle_rx_cqe = xsc_eth_handle_rx_cqe; + prq->dealloc_wqe = xsc_eth_dealloc_rx_wqe; + prq->wqe.skb_from_cqe = xsc_rx_is_linear_skb(adapter->nic_param.mtu) ? + xsc_skb_from_cqe_linear : + xsc_skb_from_cqe_nonlinear; + prq->ix = c->chl_idx; + prq->frags_sz = adapter->nic_param.rq_frags_size; + + return 0; + +err_free_di_list: + xsc_eth_free_di_list(prq); +err_free_frags: + kvfree(prq->wqe.frags); +err_destroy_wq: + xsc_eth_wq_destroy(&prq->wq_ctrl); + return ret; +} + +static void xsc_free_qp_rq(struct xsc_rq *rq) +{ + kvfree(rq->wqe.frags); + kvfree(rq->wqe.di); + + if (rq->page_pool) + page_pool_destroy(rq->page_pool); + + xsc_eth_wq_destroy(&rq->wq_ctrl); +} + +static int xsc_eth_open_rss_qp_rqs(struct xsc_adapter *adapter, + struct xsc_rq_param *prq_param, + struct xsc_eth_channels *chls, + unsigned int num_chl) +{ + u8 q_log_size = prq_param->rq_attr.q_log_size; + struct xsc_create_multiqp_mbox_in *in; + struct xsc_create_qp_request *req; + unsigned int hw_npages; + struct xsc_channel *c; + int ret = 0, err = 0; + struct xsc_rq *prq; + int paslen = 0; + int entry_len; + u32 rqn_base; + int i, j, n; + int inlen; + + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + ret = xsc_eth_alloc_rq(c, prq, prq_param); + if (ret) + goto err_alloc_rqs; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, + PAGE_SIZE_4K); + /*support different npages number smoothly*/ + entry_len = sizeof(struct xsc_create_qp_request) + + sizeof(__be64) * hw_npages; + + paslen += entry_len; + } + } + + inlen = sizeof(struct xsc_create_multiqp_mbox_in) + paslen; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + ret = -ENOMEM; + goto err_create_rss_rqs; + } + + in->qp_num = cpu_to_be16(num_chl); + in->qp_type = XSC_QUEUE_TYPE_RAW; + in->req_len = cpu_to_be32(inlen); + + req = (struct xsc_create_qp_request *)&in->data[0]; + n = 0; + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + + hw_npages = DIV_ROUND_UP(prq->wq_ctrl.buf.size, + PAGE_SIZE_4K); + /* no use for eth */ + req->input_qpn = cpu_to_be16(0); + req->qp_type = XSC_QUEUE_TYPE_RAW; + req->log_rq_sz = ilog2(adapter->xdev->caps.recv_ds_num) + + q_log_size; + req->pa_num = cpu_to_be16(hw_npages); + req->cqn_recv = cpu_to_be16(prq->cq.xcq.cqn); + req->cqn_send = req->cqn_recv; + req->glb_funcid = + cpu_to_be16(adapter->xdev->glb_func_id); + + xsc_core_fill_page_frag_array(&prq->wq_ctrl.buf, + &req->pas[0], + hw_npages); + n++; + req = (struct xsc_create_qp_request *) + (&in->data[0] + entry_len * n); + } + } + + ret = xsc_core_eth_create_rss_qp_rqs(adapter->xdev, in, inlen, + &rqn_base); + kvfree(in); + if (ret) + goto err_create_rss_rqs; + + n = 0; + for (i = 0; i < num_chl; i++) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + prq->rqn = rqn_base + n; + prq->cqp.qpn = prq->rqn; + prq->cqp.event = xsc_eth_qp_event; + prq->cqp.eth_queue_type = XSC_RES_RQ; + ret = xsc_core_create_resource_common(adapter->xdev, + &prq->cqp); + if (ret) { + err = ret; + netdev_err(adapter->netdev, + "create resource common error qp:%d errno:%d\n", + prq->rqn, ret); + continue; + } + + n++; + } + } + if (err) + return err; + + adapter->channels.rqn_base = rqn_base; + return 0; + +err_create_rss_rqs: + i = num_chl; +err_alloc_rqs: + for (--i; i >= 0; i--) { + c = &chls->c[i]; + for (j = 0; j < c->qp.rq_num; j++) { + prq = &c->qp.rq[j]; + xsc_free_qp_rq(prq); + } + } + return ret; +} + +static void xsc_eth_free_rx_wqe(struct xsc_rq *rq) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + u16 wqe_ix; + + while (!xsc_wq_cyc_is_empty(wq)) { + wqe_ix = xsc_wq_cyc_get_tail(wq); + rq->dealloc_wqe(rq, wqe_ix); + xsc_wq_cyc_pop(wq); + } +} + +static int xsc_eth_close_qp_rq(struct xsc_channel *c, struct xsc_rq *prq) +{ + struct xsc_core_device *xdev = c->adapter->xdev; + int ret; + + xsc_core_destroy_resource_common(xdev, &prq->cqp); + + ret = xsc_core_eth_destroy_qp(xdev, prq->cqp.qpn); + if (ret) + return ret; + + xsc_eth_free_rx_wqe(prq); + xsc_free_qp_rq(prq); + + return 0; +} + +static void xsc_eth_close_channel(struct xsc_channel *c, bool free_rq) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) { + if (free_rq) + xsc_eth_close_qp_rq(c, &c->qp.rq[i]); + xsc_eth_close_cq(c, &c->qp.rq[i].cq); + memset(&c->qp.rq[i], 0, sizeof(struct xsc_rq)); + } + + for (i = 0; i < c->qp.sq_num; i++) { + xsc_eth_close_qp_sq(c, &c->qp.sq[i]); + xsc_eth_close_cq(c, &c->qp.sq[i].cq); + } + + netif_napi_del(&c->napi); +} + +static int xsc_eth_open_channels(struct xsc_adapter *adapter) +{ + struct xsc_eth_channels *chls = &adapter->channels; + struct xsc_core_device *xdev = adapter->xdev; + struct xsc_channel_param *chl_param; + bool free_rq = false; + int ret = 0; + int i; + + chls->num_chl = adapter->nic_param.num_channels; + chls->c = kcalloc_node(chls->num_chl, sizeof(struct xsc_channel), + GFP_KERNEL, xdev->numa_node); + if (!chls->c) { + ret = -ENOMEM; + goto err_out; + } + + chl_param = kvzalloc(sizeof(*chl_param), GFP_KERNEL); + if (!chl_param) { + ret = -ENOMEM; + goto err_free_ch; + } + + xsc_eth_build_channel_param(adapter, chl_param); + + for (i = 0; i < chls->num_chl; i++) { + ret = xsc_eth_open_channel(adapter, i, &chls->c[i], chl_param); + if (ret) + goto err_close_channel; + } + + ret = xsc_eth_open_rss_qp_rqs(adapter, + &chl_param->rq_param, + chls, + chls->num_chl); + if (ret) + goto err_close_channel; + free_rq = true; + + for (i = 0; i < chls->num_chl; i++) + napi_enable(&chls->c[i].napi); + + /* flush cache to memory before interrupt and napi_poll running */ + smp_wmb(); + + ret = xsc_eth_modify_qps(adapter, chls); + if (ret) + goto err_close_channel; + + kvfree(chl_param); + return 0; + +err_close_channel: + for (--i; i >= 0; i--) + xsc_eth_close_channel(&chls->c[i], free_rq); + + kvfree(chl_param); +err_free_ch: + kfree(chls->c); +err_out: + chls->num_chl = 0; + netdev_err(adapter->netdev, "failed to open %d channels, err=%d\n", + chls->num_chl, ret); + return ret; +} + +static void xsc_eth_close_channels(struct xsc_adapter *adapter) +{ + struct xsc_channel *c; + int i; + + for (i = 0; i < adapter->channels.num_chl; i++) { + c = &adapter->channels.c[i]; + + xsc_eth_close_channel(c, true); + } + + kfree(adapter->channels.c); + adapter->channels.num_chl = 0; +} + +static void xsc_netdev_set_tcs(struct xsc_adapter *priv, u16 nch, u8 ntc) +{ + int tc; + + netdev_reset_tc(priv->netdev); + + if (ntc == 1) + return; + + netdev_set_num_tc(priv->netdev, ntc); + + /* Map netdev TCs to offset 0 + * We have our own UP to TXQ mapping for QoS + */ + for (tc = 0; tc < ntc; tc++) + netdev_set_tc_queue(priv->netdev, tc, nch, 0); +} + +static void xsc_eth_build_tx2sq_maps(struct xsc_adapter *adapter) +{ + struct xsc_channel *c; + struct xsc_sq *psq; + int i, tc; + + for (i = 0; i < adapter->channels.num_chl; i++) { + c = &adapter->channels.c[i]; + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + adapter->txq2sq[psq->txq_ix] = psq; + } + } +} + +static void xsc_eth_activate_txqsq(struct xsc_channel *c) +{ + int tc = c->num_tc; + struct xsc_sq *psq; + + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + psq->txq = netdev_get_tx_queue(psq->channel->netdev, + psq->txq_ix); + set_bit(XSC_ETH_SQ_STATE_ENABLED, &psq->state); + netdev_tx_reset_queue(psq->txq); + netif_tx_start_queue(psq->txq); + } +} + +static void xsc_eth_deactivate_txqsq(struct xsc_channel *c) +{ + int tc = c->num_tc; + struct xsc_sq *psq; + + for (tc = 0; tc < c->num_tc; tc++) { + psq = &c->qp.sq[tc]; + clear_bit(XSC_ETH_SQ_STATE_ENABLED, &psq->state); + } +} + +static void xsc_activate_rq(struct xsc_channel *c) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) + set_bit(XSC_ETH_RQ_STATE_ENABLED, &c->qp.rq[i].state); +} + +static void xsc_deactivate_rq(struct xsc_channel *c) +{ + int i; + + for (i = 0; i < c->qp.rq_num; i++) + clear_bit(XSC_ETH_RQ_STATE_ENABLED, &c->qp.rq[i].state); +} + +static void xsc_eth_activate_channel(struct xsc_channel *c) +{ + xsc_eth_activate_txqsq(c); + xsc_activate_rq(c); +} + +static void xsc_eth_deactivate_channel(struct xsc_channel *c) +{ + xsc_deactivate_rq(c); + xsc_eth_deactivate_txqsq(c); +} + +static void xsc_eth_activate_channels(struct xsc_eth_channels *chs) +{ + int i; + + for (i = 0; i < chs->num_chl; i++) + xsc_eth_activate_channel(&chs->c[i]); +} + +static void xsc_eth_deactivate_channels(struct xsc_eth_channels *chs) +{ + int i; + + for (i = 0; i < chs->num_chl; i++) + xsc_eth_deactivate_channel(&chs->c[i]); + + /* Sync with all NAPIs to wait until they stop using queues. */ + synchronize_net(); + + for (i = 0; i < chs->num_chl; i++) + /* last doorbell out */ + napi_disable(&chs->c[i].napi); +} + +static void xsc_eth_activate_priv_channels(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int num_txqs; + + num_txqs = adapter->channels.num_chl * adapter->nic_param.num_tc; + xsc_netdev_set_tcs(adapter, + adapter->channels.num_chl, + adapter->nic_param.num_tc); + netif_set_real_num_tx_queues(netdev, num_txqs); + netif_set_real_num_rx_queues(netdev, adapter->channels.num_chl); + + xsc_eth_build_tx2sq_maps(adapter); + xsc_eth_activate_channels(&adapter->channels); + netif_tx_start_all_queues(adapter->netdev); +} + +static void xsc_eth_deactivate_priv_channels(struct xsc_adapter *adapter) +{ + netif_tx_disable(adapter->netdev); + xsc_eth_deactivate_channels(&adapter->channels); +} + +static int xsc_eth_sw_init(struct xsc_adapter *adapter) +{ + int ret; + + ret = xsc_eth_open_channels(adapter); + if (ret) + return ret; + + xsc_eth_activate_priv_channels(adapter); + + return 0; +} + +static void xsc_eth_sw_deinit(struct xsc_adapter *adapter) +{ + xsc_eth_deactivate_priv_channels(adapter); + + return xsc_eth_close_channels(adapter); +} + +static bool xsc_eth_get_link_status(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + u16 vport = 0; + bool link_up; + + link_up = xsc_core_query_vport_state(xdev, vport); + + return link_up ? true : false; +} + +static int xsc_eth_change_link_status(struct xsc_adapter *adapter) +{ + bool link_up; + + link_up = xsc_eth_get_link_status(adapter); + + if (link_up && !netif_carrier_ok(adapter->netdev)) { + netdev_info(adapter->netdev, "Link up\n"); + netif_carrier_on(adapter->netdev); + } else if (!link_up && netif_carrier_ok(adapter->netdev)) { + netdev_info(adapter->netdev, "Link down\n"); + netif_carrier_off(adapter->netdev); + } + + return 0; +} + +static void xsc_eth_event_work(struct work_struct *work) +{ + struct xsc_adapter *adapter = container_of(work, + struct xsc_adapter, + event_work); + struct xsc_event_query_type_mbox_out out; + struct xsc_event_query_type_mbox_in in; + int err; + + if (adapter->status != XSCALE_ETH_DRIVER_OK) + return; + + /*query cmd_type cmd*/ + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_EVENT_TYPE); + + err = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + netdev_err(adapter->netdev, "failed to query event type, err=%d, status=%d\n", + err, out.hdr.status); + goto err_out; + } + + switch (out.ctx.resp_cmd_type) { + case XSC_CMD_EVENT_RESP_CHANGE_LINK: + err = xsc_eth_change_link_status(adapter); + if (err) { + netdev_err(adapter->netdev, "failed to change linkstatus, err=%d\n", + err); + goto err_out; + } + break; + case XSC_CMD_EVENT_RESP_TEMP_WARN: + netdev_err(adapter->netdev, "[Minor]nic chip temperature high warning\n"); + break; + case XSC_CMD_EVENT_RESP_OVER_TEMP_PROTECTION: + netdev_err(adapter->netdev, "[Critical]nic chip was over-temperature\n"); + break; + default: + break; + } + +err_out: + return; +} + +static void xsc_eth_event_handler(void *arg) +{ + struct xsc_adapter *adapter = (struct xsc_adapter *)arg; + + queue_work(adapter->workq, &adapter->event_work); +} + +static bool xsc_get_pct_drop_config(struct xsc_core_device *xdev) +{ + return (xdev->pdev->device == XSC_MC_PF_DEV_ID) || + (xdev->pdev->device == XSC_MF_SOC_PF_DEV_ID) || + (xdev->pdev->device == XSC_MS_PF_DEV_ID) || + (xdev->pdev->device == XSC_MV_SOC_PF_DEV_ID); +} + +static int xsc_eth_enable_nic_hca(struct xsc_adapter *adapter) +{ + struct xsc_cmd_enable_nic_hca_mbox_out out = {}; + struct xsc_cmd_enable_nic_hca_mbox_in in = {}; + struct xsc_core_device *xdev = adapter->xdev; + struct net_device *netdev = adapter->netdev; + u16 caps_mask = 0; + u16 caps = 0; + int err; + + if (xsc_get_user_mode(xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_NIC_HCA); + + in.rss.rss_en = 1; + in.rss.rqn_base = cpu_to_be16(adapter->channels.rqn_base - + xdev->caps.raweth_rss_qp_id_base); + in.rss.rqn_num = cpu_to_be16(adapter->channels.num_chl); + in.rss.hash_tmpl = cpu_to_be32(adapter->rss_param.rss_hash_tmpl); + in.rss.hfunc = xsc_hash_func_type(adapter->rss_param.hfunc); + caps_mask |= BIT(XSC_TBM_CAP_RSS); + + if (netdev->features & NETIF_F_RXCSUM) + caps |= BIT(XSC_TBM_CAP_HASH_PPH); + caps_mask |= BIT(XSC_TBM_CAP_HASH_PPH); + + if (xsc_get_pct_drop_config(xdev) && !(netdev->flags & IFF_SLAVE)) + caps |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + caps_mask |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + + memcpy(in.nic.mac_addr, netdev->dev_addr, ETH_ALEN); + + in.nic.caps = cpu_to_be16(caps); + in.nic.caps_mask = cpu_to_be16(caps_mask); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + netdev_err(netdev, "failed!! err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static int xsc_eth_disable_nic_hca(struct xsc_adapter *adapter) +{ + struct xsc_cmd_disable_nic_hca_mbox_out out = {}; + struct xsc_cmd_disable_nic_hca_mbox_in in = {}; + struct xsc_core_device *xdev = adapter->xdev; + struct net_device *netdev = adapter->netdev; + u16 caps = 0; + int err; + + if (xsc_get_user_mode(xdev)) + return 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DISABLE_NIC_HCA); + + if (xsc_get_pct_drop_config(xdev) && + !(netdev->priv_flags & IFF_BONDING)) + caps |= BIT(XSC_TBM_CAP_PCT_DROP_CONFIG); + + in.nic.caps = cpu_to_be16(caps); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + netdev_err(netdev, "failed!! err=%d, status=%d\n", + err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +static void xsc_set_default_xps_cpumasks(struct xsc_adapter *priv, + struct xsc_eth_params *params) +{ + struct xsc_core_device *xdev = priv->xdev; + int num_comp_vectors, irq; + + num_comp_vectors = priv->nic_param.comp_vectors; + cpumask_clear(xdev->xps_cpumask); + + for (irq = 0; irq < num_comp_vectors; irq++) { + cpumask_set_cpu(cpumask_local_spread(irq, xdev->numa_node), + xdev->xps_cpumask); + netif_set_xps_queue(priv->netdev, xdev->xps_cpumask, irq); + } +} + +static int xsc_set_port_admin_status(struct xsc_adapter *adapter, + enum xsc_port_status status) +{ + struct xsc_event_set_port_admin_status_mbox_out out; + struct xsc_event_set_port_admin_status_mbox_in in; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_PORT_ADMIN_STATUS); + in.status = cpu_to_be16(status); + + ret = xsc_cmd_exec(adapter->xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + netdev_err(adapter->netdev, "failed to set port admin status, err=%d, status=%d\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return ret; +} + +static int xsc_eth_open(struct net_device *netdev) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + struct xsc_core_device *xdev = adapter->xdev; + int ret = 0; + + mutex_lock(&adapter->status_lock); + if (adapter->status == XSCALE_ETH_DRIVER_OK) { + netdev_err(adapter->netdev, "unnormal ndo_open when status=%d\n", + adapter->status); + goto out; + } + + ret = xsc_eth_sw_init(adapter); + if (ret) + goto out; + + ret = xsc_eth_enable_nic_hca(adapter); + if (ret) + goto err_sw_deinit; + + /*INIT_WORK*/ + INIT_WORK(&adapter->event_work, xsc_eth_event_work); + xdev->event_handler = xsc_eth_event_handler; + + if (xsc_eth_get_link_status(adapter)) { + netdev_info(netdev, "Link up\n"); + netif_carrier_on(adapter->netdev); + } else { + netdev_info(netdev, "Link down\n"); + } + + adapter->status = XSCALE_ETH_DRIVER_OK; + + xsc_set_default_xps_cpumasks(adapter, &adapter->nic_param); + + xsc_set_port_admin_status(adapter, XSC_PORT_UP); + +err_sw_deinit: + xsc_eth_sw_deinit(adapter); +out: + mutex_unlock(&adapter->status_lock); + return ret; +} + +static int xsc_eth_close(struct net_device *netdev) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + mutex_lock(&adapter->status_lock); + + if (!netif_device_present(netdev)) { + ret = -ENODEV; + goto out; + } + + if (adapter->status != XSCALE_ETH_DRIVER_OK) + goto out; + + adapter->status = XSCALE_ETH_DRIVER_CLOSE; + + netif_carrier_off(adapter->netdev); + + xsc_eth_sw_deinit(adapter); + + ret = xsc_eth_disable_nic_hca(adapter); + if (ret) + netdev_err(adapter->netdev, "failed to disable nic hca, err=%d\n", + ret); + + xsc_set_port_admin_status(adapter, XSC_PORT_DOWN); + +out: + mutex_unlock(&adapter->status_lock); + + return ret; +} + +static void xsc_eth_get_stats(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + + xsc_eth_fold_sw_stats64(adapter, stats); +} + +static int xsc_eth_set_hw_mtu(struct xsc_core_device *xdev, + u16 mtu, u16 rx_buf_sz) +{ + struct xsc_set_mtu_mbox_out out; + struct xsc_set_mtu_mbox_in in; + int ret; + + memset(&in, 0, sizeof(struct xsc_set_mtu_mbox_in)); + memset(&out, 0, sizeof(struct xsc_set_mtu_mbox_out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_SET_MTU); + in.mtu = cpu_to_be16(mtu); + in.rx_buf_sz_min = cpu_to_be16(rx_buf_sz); + in.mac_port = xdev->mac_port; + + ret = xsc_cmd_exec(xdev, &in, sizeof(struct xsc_set_mtu_mbox_in), &out, + sizeof(struct xsc_set_mtu_mbox_out)); + if (ret || out.hdr.status) { + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, + "failed to set hw_mtu=%u rx_buf_sz=%u, err=%d, status=%d\n", + mtu, rx_buf_sz, ret, out.hdr.status); + ret = -ENOEXEC; + } + + return ret; +} + +static const struct net_device_ops xsc_netdev_ops = { + .ndo_open = xsc_eth_open, + .ndo_stop = xsc_eth_close, + .ndo_start_xmit = xsc_eth_xmit_start, + .ndo_get_stats64 = xsc_eth_get_stats, +}; + +static void xsc_eth_build_nic_netdev(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* Set up network device as normal. */ + netdev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; + netdev->netdev_ops = &xsc_netdev_ops; + + netdev->min_mtu = SW_MIN_MTU; + netdev->max_mtu = SW_MAX_MTU; + /*mtu - macheaderlen - ipheaderlen should be aligned in 8B*/ + netdev->mtu = SW_DEFAULT_MTU; + + netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_GRO; + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + + netdev->vlan_features |= NETIF_F_RXCSUM; + netdev->vlan_features |= NETIF_F_RXHASH; + + netdev->hw_features = netdev->vlan_features; + + netdev->hw_enc_features |= NETIF_F_IP_CSUM; + netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; + netdev->hw_enc_features |= NETIF_F_TSO; + netdev->hw_enc_features |= NETIF_F_TSO6; + netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; + + netdev->hw_features |= NETIF_F_GSO_PARTIAL; + netdev->hw_features |= NETIF_F_GSO_UDP_L4; + netdev->features |= netdev->hw_features; + netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; + + netdev->features |= NETIF_F_HIGHDMA; +} + +static int xsc_eth_nic_init(struct xsc_adapter *adapter, + void *rep_priv, u32 ch_num, u32 tc_num) +{ + int err; + + xsc_eth_build_nic_params(adapter, ch_num, tc_num); + + err = xsc_eth_netdev_init(adapter); + if (err) + return err; + + xsc_eth_build_nic_netdev(adapter); + + return 0; +} + +static void xsc_eth_nic_cleanup(struct xsc_adapter *adapter) +{ + destroy_workqueue(adapter->workq); + kfree(adapter->txq2sq); +} + +static int xsc_eth_get_mac(struct xsc_core_device *xdev, char *mac) +{ + struct xsc_query_eth_mac_mbox_out *out; + struct xsc_query_eth_mac_mbox_in in; + int err = 0; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_ETH_MAC); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), out, sizeof(*out)); + if (err || out->hdr.status) { + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, + "get mac failed! err=%d, out.status=%u\n", + err, out->hdr.status); + err = -ENOEXEC; + goto err_free; + } + + memcpy(mac, out->mac, 6); + +err_free: + kfree(out); + return err; +} + +static void xsc_eth_l2_addr_init(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + char mac[6] = {0}; + int ret = 0; + + ret = xsc_eth_get_mac(adapter->xdev, mac); + if (ret) { + netdev_err(netdev, "get mac failed %d, generate random mac...", + ret); + eth_random_addr(mac); + } + dev_addr_mod(netdev, 0, mac, 6); + + if (!is_valid_ether_addr(netdev->perm_addr)) + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); +} + +static int xsc_eth_nic_enable(struct xsc_adapter *adapter) +{ + struct xsc_core_device *xdev = adapter->xdev; + + xsc_eth_l2_addr_init(adapter); + + xsc_eth_set_hw_mtu(xdev, XSC_SW2HW_MTU(adapter->nic_param.mtu), + XSC_SW2HW_RX_PKT_LEN(adapter->nic_param.mtu)); + + rtnl_lock(); + netif_device_attach(adapter->netdev); + rtnl_unlock(); + + return 0; +} + +static void xsc_eth_nic_disable(struct xsc_adapter *adapter) +{ + rtnl_lock(); + if (netif_running(adapter->netdev)) + xsc_eth_close(adapter->netdev); + netif_device_detach(adapter->netdev); + rtnl_unlock(); +} + +static int xsc_attach_netdev(struct xsc_adapter *adapter) +{ + int err = -1; + + err = xsc_eth_nic_enable(adapter); + if (err) + return err; + + return 0; +} + +static void xsc_detach_netdev(struct xsc_adapter *adapter) +{ + xsc_eth_nic_disable(adapter); + + flush_workqueue(adapter->workq); + adapter->status = XSCALE_ETH_DRIVER_DETACH; +} + +static int xsc_eth_attach(struct xsc_core_device *xdev, + struct xsc_adapter *adapter) +{ + int err = -1; + + if (netif_device_present(adapter->netdev)) + return 0; + + err = xsc_attach_netdev(adapter); + if (err) + return err; + + return 0; +} + +static void xsc_eth_detach(struct xsc_core_device *xdev, + struct xsc_adapter *adapter) +{ + if (!netif_device_present(adapter->netdev)) + return; + + xsc_detach_netdev(adapter); +} + +static int xsc_eth_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *adev_id) +{ + struct xsc_adev *xsc_adev = container_of(adev, struct xsc_adev, adev); + struct xsc_core_device *xdev = xsc_adev->xdev; + struct xsc_adapter *adapter; + struct net_device *netdev; + void *rep_priv = NULL; + int num_chl, num_tc; + int err; + + num_chl = xsc_get_max_num_channels(xdev); + num_tc = xdev->caps.max_tc; + + netdev = alloc_etherdev_mqs(sizeof(struct xsc_adapter), + num_chl * num_tc, num_chl); + if (!netdev) { + pr_err("alloc_etherdev_mqs failed, txq=%d, rxq=%d\n", + (num_chl * num_tc), num_chl); + return -ENOMEM; + } + + netdev->dev.parent = &xdev->pdev->dev; + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = xdev->pdev; + adapter->dev = &adapter->pdev->dev; + adapter->xdev = xdev; + xdev->eth_priv = adapter; + + err = xsc_eth_nic_init(adapter, rep_priv, num_chl, num_tc); + if (err) { + netdev_err(netdev, "xsc_eth_nic_init failed, err=%d\n", err); + goto err_free_netdev; + } + + err = xsc_eth_attach(xdev, adapter); + if (err) { + netdev_err(netdev, "xsc_eth_attach failed, err=%d\n", err); + goto err_nic_cleanup; + } + + adapter->stats = kvzalloc(sizeof(*adapter->stats), GFP_KERNEL); + if (!adapter->stats) + goto err_detach; + + err = register_netdev(netdev); + if (err) { + netdev_err(netdev, "register_netdev failed, err=%d\n", err); + goto err_free_stats; + } + + return 0; + +err_free_stats: + kvfree(adapter->stats); +err_detach: + xsc_eth_detach(xdev, adapter); +err_nic_cleanup: + xsc_eth_nic_cleanup(adapter); +err_free_netdev: + free_netdev(netdev); + + return err; +} + +static void xsc_eth_remove(struct auxiliary_device *adev) +{ + struct xsc_adev *xsc_adev = container_of(adev, struct xsc_adev, adev); + struct xsc_core_device *xdev = xsc_adev->xdev; + struct xsc_adapter *adapter; + + if (!xdev) + return; + + adapter = xdev->eth_priv; + if (!adapter) + return; + + unregister_netdev(adapter->netdev); + kvfree(adapter->stats); + free_netdev(adapter->netdev); + + xdev->eth_priv = NULL; +} + +static const struct auxiliary_device_id xsc_eth_id_table[] = { + { .name = XSC_PCI_DRV_NAME "." XSC_ETH_ADEV_NAME }, + {}, +}; +MODULE_DEVICE_TABLE(auxiliary, xsc_eth_id_table); + +static struct auxiliary_driver xsc_eth_driver = { + .name = "eth", + .probe = xsc_eth_probe, + .remove = xsc_eth_remove, + .id_table = xsc_eth_id_table, +}; +module_auxiliary_driver(xsc_eth_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Yunsilicon XSC ethernet driver"); diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h new file mode 100644 index 0000000000000..eef2cca36d501 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_ETH_H +#define __XSC_ETH_H + +#include + +#include "common/xsc_device.h" +#include "xsc_eth_common.h" +#include "xsc_eth_stats.h" + +#define XSC_INVALID_LKEY 0x100 + +#define XSCALE_DRIVER_NAME "xsc_eth" + +enum { + XSCALE_ETH_DRIVER_INIT, + XSCALE_ETH_DRIVER_OK, + XSCALE_ETH_DRIVER_CLOSE, + XSCALE_ETH_DRIVER_DETACH, +}; + +struct xsc_rss_params { + u32 indirection_rqt[XSC_INDIR_RQT_SIZE]; + u32 rx_hash_fields[XSC_NUM_INDIR_TIRS]; + u8 toeplitz_hash_key[52]; + u8 hfunc; + u32 rss_hash_tmpl; +}; + +struct xsc_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct device *dev; + struct xsc_core_device *xdev; + + struct xsc_eth_params nic_param; + struct xsc_rss_params rss_param; + + struct workqueue_struct *workq; + struct work_struct event_work; + + struct xsc_eth_channels channels; + struct xsc_sq **txq2sq; + + u32 status; + struct mutex status_lock; /*protect status */ + + struct xsc_stats *stats; +}; + +#endif /* __XSC_ETH_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h new file mode 100644 index 0000000000000..97bbf7e4fed1b --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_common.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_ETH_COMMON_H +#define __XSC_ETH_COMMON_H + +#include "xsc_queue.h" +#include "xsc_pph.h" + +#define SW_MIN_MTU ETH_MIN_MTU +#define SW_DEFAULT_MTU ETH_DATA_LEN +#define SW_MAX_MTU 9600 + +#define XSC_ETH_HW_MTU_SEND 9800 +#define XSC_ETH_HW_MTU_RECV 9800 +#define XSC_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN * 2 + ETH_FCS_LEN) +#define XSC_SW2HW_MTU(mtu) ((mtu) + XSC_ETH_HARD_MTU) +#define XSC_SW2HW_FRAG_SIZE(mtu) ((mtu) + XSC_ETH_HARD_MTU) +#define XSC_ETH_RX_MAX_HEAD_ROOM 256 +#define XSC_SW2HW_RX_PKT_LEN(mtu) \ + ((mtu) + ETH_HLEN + XSC_ETH_RX_MAX_HEAD_ROOM) + +#define XSC_RX_MAX_HEAD (256) + +#define XSC_QPN_SQN_STUB 1025 +#define XSC_QPN_RQN_STUB 1024 + +#define XSC_LOG_INDIR_RQT_SIZE 0x8 + +#define XSC_INDIR_RQT_SIZE BIT(XSC_LOG_INDIR_RQT_SIZE) +#define XSC_ETH_MIN_NUM_CHANNELS 2 +#define XSC_ETH_MAX_NUM_CHANNELS XSC_INDIR_RQT_SIZE + +#define XSC_TX_NUM_TC 1 +#define XSC_MAX_NUM_TC 8 +#define XSC_ETH_MAX_TC_TOTAL \ + (XSC_ETH_MAX_NUM_CHANNELS * XSC_MAX_NUM_TC) +#define XSC_ETH_MAX_QP_NUM_PER_CH (XSC_MAX_NUM_TC + 1) + +#define XSC_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +#define XSC_RQCQ_ELE_SZ 32 +#define XSC_SQCQ_ELE_SZ 32 +#define XSC_RQ_ELE_SZ XSC_RECV_WQE_BB +#define XSC_SQ_ELE_SZ XSC_SEND_WQE_BB +#define XSC_EQ_ELE_SZ 8 + +#define XSC_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define XSC_MIN_SKB_FRAG_SZ (XSC_SKB_FRAG_SZ(XSC_RX_HEADROOM)) +#define XSC_LOG_MAX_RX_WQE_BULK \ + (ilog2(PAGE_SIZE / roundup_pow_of_two(XSC_MIN_SKB_FRAG_SZ))) + +#define XSC_MIN_LOG_RQ_SZ (1 + XSC_LOG_MAX_RX_WQE_BULK) +#define XSC_DEF_LOG_RQ_SZ 0xa +#define XSC_MAX_LOG_RQ_SZ 0xd + +#define XSC_MIN_LOG_SQ_SZ 0x6 +#define XSC_DEF_LOG_SQ_SZ 0xa +#define XSC_MAX_LOG_SQ_SZ 0xd + +/* DS num */ +#define XSC_SQ_ELE_NUM_DEF BIT(XSC_DEF_LOG_SQ_SZ) +#define XSC_RQ_ELE_NUM_DEF BIT(XSC_DEF_LOG_RQ_SZ) + +#define XSC_LOG_RQCQ_SZ 0xb +#define XSC_LOG_SQCQ_SZ 0xa + +#define XSC_RQCQ_ELE_NUM BIT(XSC_LOG_RQCQ_SZ) +#define XSC_SQCQ_ELE_NUM BIT(XSC_LOG_SQCQ_SZ) +#define XSC_RQ_ELE_NUM XSC_RQ_ELE_NUM_DEF +#define XSC_SQ_ELE_NUM XSC_SQ_ELE_NUM_DEF +#define XSC_EQ_ELE_NUM XSC_SQ_ELE_NUM_DEF + +enum xsc_port_status { + XSC_PORT_DOWN = 0, + XSC_PORT_UP = 1, +}; + +enum xsc_queue_type { + XSC_QUEUE_TYPE_EQ = 0, + XSC_QUEUE_TYPE_RQCQ, + XSC_QUEUE_TYPE_SQCQ, + XSC_QUEUE_TYPE_RQ, + XSC_QUEUE_TYPE_SQ, + XSC_QUEUE_TYPE_MAX, +}; + +struct xsc_queue_attr { + u8 q_type; + u32 ele_num; + u32 ele_size; + u8 ele_log_size; + u8 q_log_size; +}; + +struct xsc_eth_rx_wqe_cyc { + DECLARE_FLEX_ARRAY(struct xsc_wqe_data_seg, data); +}; + +struct xsc_eq_param { + struct xsc_queue_attr eq_attr; +}; + +struct xsc_cq_param { + struct xsc_wq_param wq; + struct cq_cmd { + u8 abc[16]; + } cqc; + struct xsc_queue_attr cq_attr; +}; + +struct xsc_rq_param { + struct xsc_wq_param wq; + struct xsc_queue_attr rq_attr; + struct xsc_rq_frags_info frags_info; +}; + +struct xsc_sq_param { + struct xsc_wq_param wq; + struct xsc_queue_attr sq_attr; +}; + +struct xsc_qp_param { + struct xsc_queue_attr qp_attr; +}; + +struct xsc_channel_param { + struct xsc_cq_param rqcq_param; + struct xsc_cq_param sqcq_param; + struct xsc_rq_param rq_param; + struct xsc_sq_param sq_param; + struct xsc_qp_param qp_param; +}; + +struct xsc_eth_qp { + u16 rq_num; + u16 sq_num; + struct xsc_rq rq[XSC_MAX_NUM_TC]; /*may be use one only*/ + struct xsc_sq sq[XSC_MAX_NUM_TC]; /*reserved to tc*/ +}; + +enum channel_flags { + XSC_CHANNEL_NAPI_SCHED = 1, +}; + +enum xsc_eth_priv_flag { + XSC_PFLAG_RX_NO_CSUM_COMPLETE, + XSC_PFLAG_SNIFFER, + XSC_PFLAG_DROPLESS_RQ, + XSC_PFLAG_RX_COPY_BREAK, + XSC_NUM_PFLAGS, /* Keep last */ +}; + +#define XSC_SET_PFLAG(params, pflag, enable) \ + do { \ + if (enable) \ + (params)->pflags |= BIT(pflag); \ + else \ + (params)->pflags &= ~(BIT(pflag)); \ + } while (0) + +#define XSC_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) + +struct xsc_eth_params { + u16 num_channels; + u16 max_num_ch; + u8 num_tc; + u32 mtu; + u32 hard_mtu; + u32 comp_vectors; + u32 sq_size; + u32 sq_max_size; + u8 rq_wq_type; + u32 rq_size; + u32 rq_max_size; + u32 rq_frags_size; + u32 pflags; +}; + +struct xsc_channel { + /* data path */ + struct xsc_eth_qp qp; + struct napi_struct napi; + u8 num_tc; + int chl_idx; + + /*relationship*/ + struct xsc_adapter *adapter; + struct net_device *netdev; + int cpu; + unsigned long flags; + + /* data path - accessed per napi poll */ + const struct cpumask *aff_mask; + struct irq_desc *irq_desc; +} ____cacheline_aligned_in_smp; + +struct xsc_eth_channels { + struct xsc_channel *c; + unsigned int num_chl; + u32 rqn_base; +}; + +union xsc_send_doorbell { + struct{ + s32 next_pid : 16; + u32 qp_num : 15; + }; + u32 send_data; +}; + +union xsc_recv_doorbell { + struct{ + s32 next_pid : 13; + u32 qp_num : 15; + }; + u32 recv_data; +}; + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c new file mode 100644 index 0000000000000..1d4c5574c9f1c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_rx.c @@ -0,0 +1,601 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. All + * rights reserved. + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. + */ + +#include +#include + +#include "common/xsc_pp.h" +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" +#include "xsc_eth_common.h" +#include "xsc_pph.h" + +#define PAGE_REF_ELEV (U16_MAX) +/* Upper bound on number of packets that share a single page */ +#define PAGE_REF_THRSD (PAGE_SIZE / 64) + +static void xsc_rq_notify_hw(struct xsc_rq *rq) +{ + struct xsc_core_device *xdev = rq->cq.xdev; + union xsc_recv_doorbell doorbell_value; + struct xsc_wq_cyc *wq = &rq->wqe.wq; + u64 rqwqe_id; + + rqwqe_id = wq->wqe_ctr << (ilog2(xdev->caps.recv_ds_num)); + /*reverse wqe index to ds index*/ + doorbell_value.next_pid = rqwqe_id; + doorbell_value.qp_num = rq->rqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + writel(doorbell_value.recv_data, XSC_REG_ADDR(xdev, xdev->regs.rx_db)); +} + +static void xsc_skb_set_hash(struct xsc_adapter *adapter, + struct xsc_cqe *cqe, + struct sk_buff *skb) +{ + struct xsc_rss_params *rss = &adapter->rss_param; + bool l3_hash = false; + bool l4_hash = false; + u32 hash_field; + int ht = 0; + + if (adapter->netdev->features & NETIF_F_RXHASH) { + if (skb->protocol == htons(ETH_P_IP)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV4_TCP]; + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IP || + hash_field & XSC_HASH_FIELD_SEL_DST_IP) + l3_hash = true; + + if (hash_field & XSC_HASH_FIELD_SEL_SPORT || + hash_field & XSC_HASH_FIELD_SEL_DPORT) + l4_hash = true; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hash_field = rss->rx_hash_fields[XSC_TT_IPV6_TCP]; + if (hash_field & XSC_HASH_FIELD_SEL_SRC_IPV6 || + hash_field & XSC_HASH_FIELD_SEL_DST_IPV6) + l3_hash = true; + + if (hash_field & XSC_HASH_FIELD_SEL_SPORT_V6 || + hash_field & XSC_HASH_FIELD_SEL_DPORT_V6) + l4_hash = true; + } + + if (l3_hash && l4_hash) + ht = PKT_HASH_TYPE_L4; + else if (l3_hash) + ht = PKT_HASH_TYPE_L3; + if (ht) + skb_set_hash(skb, le32_to_cpu(cqe->vni), ht); + } +} + +static void xsc_handle_csum(struct xsc_cqe *cqe, struct xsc_rq *rq, + struct sk_buff *skb, struct xsc_wqe_frag_info *wi) +{ + struct xsc_dma_info *dma_info; + struct net_device *netdev; + struct epp_pph *hw_pph; + struct xsc_channel *c; + int offset_from; + + c = rq->cq.channel; + netdev = c->adapter->netdev; + dma_info = wi->di; + offset_from = wi->offset; + hw_pph = page_address(dma_info->page) + offset_from; + + if (unlikely((netdev->features & NETIF_F_RXCSUM) == 0)) + goto csum_none; + + if (unlikely(XSC_GET_EPP2SOC_PPH_ERROR_BITMAP(hw_pph) & PACKET_UNKNOWN)) + goto csum_none; + + if (XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(FIELD_GET(XSC_CQE_CSUM_ERR_MASK, le32_to_cpu(cqe->data0)) & + OUTER_AND_INNER))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 1; + skb->encapsulation = 1; + } else if (XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(FIELD_GET(XSC_CQE_CSUM_ERR_MASK, + le32_to_cpu(cqe->data0)) & + OUTER_BIT) && + (FIELD_GET(XSC_CQE_CSUM_ERR_MASK, + le32_to_cpu(cqe->data0)) & + INNER_BIT))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = 0; + skb->encapsulation = 1; + } else if (!XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(hw_pph) && + (!(FIELD_GET(XSC_CQE_CSUM_ERR_MASK, + le32_to_cpu(cqe->data0)) & + OUTER_BIT))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + goto out; + +csum_none: + skb->csum = 0; + skb->ip_summed = CHECKSUM_NONE; +out: + return; +} + +static void xsc_build_rx_skb(struct xsc_cqe *cqe, + u32 cqe_bcnt, + struct xsc_rq *rq, + struct sk_buff *skb, + struct xsc_wqe_frag_info *wi) +{ + struct xsc_adapter *adapter; + struct net_device *netdev; + struct xsc_channel *c; + + c = rq->cq.channel; + adapter = c->adapter; + netdev = c->netdev; + + skb->mac_len = ETH_HLEN; + + skb_record_rx_queue(skb, rq->ix); + xsc_handle_csum(cqe, rq, skb, wi); + + skb->protocol = eth_type_trans(skb, netdev); + xsc_skb_set_hash(adapter, cqe, skb); +} + +static void xsc_complete_rx_cqe(struct xsc_rq *rq, + struct xsc_cqe *cqe, + u32 cqe_bcnt, + struct sk_buff *skb, + struct xsc_wqe_frag_info *wi) +{ + struct xsc_rq_stats *stats = rq->stats; + + stats->packets++; + stats->bytes += cqe_bcnt; + xsc_build_rx_skb(cqe, cqe_bcnt, rq, skb, wi); +} + +static void xsc_add_skb_frag(struct xsc_rq *rq, + struct sk_buff *skb, + struct xsc_dma_info *di, + u32 frag_offset, u32 len, + unsigned int truesize) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + + dma_sync_single_for_cpu(dev, di->addr + frag_offset, + len, DMA_FROM_DEVICE); + page_ref_inc(di->page); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + di->page, frag_offset, len, truesize); +} + +static void xsc_copy_skb_header(struct device *dev, + struct sk_buff *skb, + struct xsc_dma_info *dma_info, + int offset_from, u32 headlen) +{ + void *from = page_address(dma_info->page) + offset_from; + /* Aligning len to sizeof(long) optimizes memcpy performance */ + unsigned int len = ALIGN(headlen, sizeof(long)); + + dma_sync_single_for_cpu(dev, dma_info->addr + offset_from, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, from, len); +} + +static struct sk_buff *xsc_build_linear_skb(struct xsc_rq *rq, void *va, + u32 frag_size, u16 headroom, + u32 cqe_bcnt) +{ + struct sk_buff *skb = build_skb(va, frag_size); + + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, headroom); + skb_put(skb, cqe_bcnt); + + return skb; +} + +struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph) +{ + int pph_len = has_pph ? XSC_PPH_HEAD_LEN : 0; + u16 rx_headroom = rq->buff.headroom; + struct xsc_dma_info *di = wi->di; + struct sk_buff *skb; + void *va, *data; + u32 frag_size; + + va = page_address(di->page) + wi->offset; + data = va + rx_headroom + pph_len; + frag_size = XSC_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); + + dma_sync_single_range_for_cpu(rq->cq.xdev->device, di->addr, wi->offset, + frag_size, DMA_FROM_DEVICE); + net_prefetchw(va); /* xdp_frame data area */ + net_prefetch(data); + + skb = xsc_build_linear_skb(rq, va, frag_size, (rx_headroom + pph_len), + (cqe_bcnt - pph_len)); + if (unlikely(!skb)) + return NULL; + + /* queue up for recycling/reuse */ + page_ref_inc(di->page); + + return skb; +} + +struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph) +{ + struct xsc_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; + u16 headlen = min_t(u32, XSC_RX_MAX_HEAD, cqe_bcnt); + struct xsc_wqe_frag_info *head_wi = wi; + struct xsc_wqe_frag_info *rx_wi = wi; + u16 head_offset = head_wi->offset; + u16 byte_cnt = cqe_bcnt - headlen; + u16 frag_consumed_bytes = 0; + u16 frag_headlen = headlen; + struct net_device *netdev; + struct xsc_channel *c; + struct sk_buff *skb; + struct device *dev; + u8 fragcnt = 0; + int i = 0; + + c = rq->cq.channel; + dev = c->adapter->dev; + netdev = c->adapter->netdev; + + skb = napi_alloc_skb(rq->cq.napi, ALIGN(XSC_RX_MAX_HEAD, sizeof(long))); + if (unlikely(!skb)) + return NULL; + + net_prefetchw(skb->data); + + if (likely(has_pph)) { + headlen = min_t(u32, XSC_RX_MAX_HEAD, + (cqe_bcnt - XSC_PPH_HEAD_LEN)); + frag_headlen = headlen + XSC_PPH_HEAD_LEN; + byte_cnt = cqe_bcnt - headlen - XSC_PPH_HEAD_LEN; + head_offset += XSC_PPH_HEAD_LEN; + } + + if (byte_cnt == 0 && + (XSC_GET_PFLAG(&c->adapter->nic_param, XSC_PFLAG_RX_COPY_BREAK))) { + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) + wi->is_available = 1; + goto out; + } + + for (i = 0; i < rq->wqe.info.num_frags; i++, rx_wi++) + rx_wi->is_available = 0; + + while (byte_cnt) { + /*figure out whether the first fragment can be a page ?*/ + frag_consumed_bytes = + min_t(u16, frag_info->frag_size - frag_headlen, + byte_cnt); + + xsc_add_skb_frag(rq, skb, wi->di, wi->offset + frag_headlen, + frag_consumed_bytes, frag_info->frag_stride); + byte_cnt -= frag_consumed_bytes; + + /*to protect extend wqe read, drop exceed bytes*/ + frag_headlen = 0; + fragcnt++; + if (fragcnt == rq->wqe.info.num_frags) { + if (byte_cnt) { + netdev_warn(netdev, + "large packet reach the maximum rev-wqe num.\n"); + netdev_warn(netdev, + "%u bytes dropped: frag_num=%d, headlen=%d, cqe_cnt=%d, frag0_bytes=%d, frag_size=%d\n", + byte_cnt, fragcnt, + headlen, cqe_bcnt, + frag_consumed_bytes, + frag_info->frag_size); + } + break; + } + + frag_info++; + wi++; + } + +out: + /* copy header */ + xsc_copy_skb_header(dev, skb, head_wi->di, head_offset, headlen); + + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += headlen; + skb->len += headlen; + + return skb; +} + +static void xsc_page_dma_unmap(struct xsc_rq *rq, struct xsc_dma_info *dma_info) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + + dma_unmap_page(dev, dma_info->addr, XSC_RX_FRAG_SZ, rq->buff.map_dir); +} + +static void xsc_page_release_dynamic(struct xsc_rq *rq, + struct xsc_dma_info *dma_info, + bool recycle) +{ + xsc_page_dma_unmap(rq, dma_info); + page_pool_recycle_direct(rq->page_pool, dma_info->page); +} + +static void xsc_put_rx_frag(struct xsc_rq *rq, + struct xsc_wqe_frag_info *frag, bool recycle) +{ + if (frag->last_in_page) + xsc_page_release_dynamic(rq, frag->di, recycle); +} + +static struct xsc_wqe_frag_info *get_frag(struct xsc_rq *rq, u16 ix) +{ + return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags]; +} + +static void xsc_free_rx_wqe(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, bool recycle) +{ + int i; + + for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) { + if (wi->is_available && recycle) + continue; + xsc_put_rx_frag(rq, wi, recycle); + } +} + +static void xsc_dump_error_rqcqe(struct xsc_rq *rq, + struct xsc_cqe *cqe) +{ + struct net_device *netdev; + struct xsc_channel *c; + u32 ci; + + c = rq->cq.channel; + netdev = c->adapter->netdev; + ci = xsc_cqwq_get_ci(&rq->cq.wq); + + net_err_ratelimited("Error cqe on dev=%s, cqn=%d, ci=%d, rqn=%d, qpn=%ld, error_code=0x%x\n", + netdev->name, rq->cq.xcq.cqn, ci, + rq->rqn, + FIELD_GET(XSC_CQE_QP_ID_MASK, + le32_to_cpu(cqe->data0)), + get_cqe_opcode(cqe)); +} + +void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, + struct xsc_rq *rq, struct xsc_cqe *cqe) +{ + struct xsc_channel *c = rq->cq.channel; + struct xsc_wq_cyc *wq = &rq->wqe.wq; + u8 cqe_opcode = get_cqe_opcode(cqe); + struct xsc_wqe_frag_info *wi; + struct sk_buff *skb; + u32 cqe_bcnt; + u16 ci; + + ci = xsc_wq_cyc_ctr2ix(wq, cqwq->cc); + wi = get_frag(rq, ci); + if (unlikely(cqe_opcode & BIT(7))) { + xsc_dump_error_rqcqe(rq, cqe); + goto free_wqe; + } + + cqe_bcnt = le32_to_cpu(cqe->msg_len); + if ((le32_to_cpu(cqe->data0) & XSC_CQE_HAS_PPH) && + cqe_bcnt <= XSC_PPH_HEAD_LEN) + goto free_wqe; + + if (unlikely(cqe_bcnt > rq->frags_sz)) { + if (!XSC_GET_PFLAG(&c->adapter->nic_param, + XSC_PFLAG_DROPLESS_RQ)) + goto free_wqe; + } + + cqe_bcnt = min_t(u32, cqe_bcnt, rq->frags_sz); + skb = rq->wqe.skb_from_cqe(rq, wi, cqe_bcnt, + !!(le32_to_cpu(cqe->data0) & + XSC_CQE_HAS_PPH)); + if (!skb) + goto free_wqe; + + xsc_complete_rx_cqe(rq, cqe, + (le32_to_cpu(cqe->data0) & XSC_CQE_HAS_PPH) ? + cqe_bcnt - XSC_PPH_HEAD_LEN : cqe_bcnt, + skb, wi); + + napi_gro_receive(rq->cq.napi, skb); + +free_wqe: + xsc_free_rx_wqe(rq, wi, true); + xsc_wq_cyc_pop(wq); +} + +int xsc_poll_rx_cq(struct xsc_cq *cq, int budget) +{ + struct xsc_rq *rq = container_of(cq, struct xsc_rq, cq); + struct xsc_cqwq *cqwq = &cq->wq; + struct xsc_cqe *cqe; + int work_done = 0; + + if (!test_bit(XSC_ETH_RQ_STATE_ENABLED, &rq->state)) + return 0; + + while ((work_done < budget) && (cqe = xsc_cqwq_get_cqe(cqwq))) { + rq->handle_rx_cqe(cqwq, rq, cqe); + ++work_done; + + xsc_cqwq_pop(cqwq); + } + + if (!work_done) + goto out; + + xsc_cq_notify_hw(cq); + /* ensure cq space is freed before enabling more cqes */ + wmb(); + +out: + return work_done; +} + +static int xsc_page_alloc_mapped(struct xsc_rq *rq, + struct xsc_dma_info *dma_info) +{ + struct xsc_channel *c = rq->cq.channel; + struct device *dev = c->adapter->dev; + + dma_info->page = page_pool_dev_alloc_pages(rq->page_pool); + if (unlikely(!dma_info->page)) + return -ENOMEM; + + dma_info->addr = dma_map_page(dev, dma_info->page, 0, + XSC_RX_FRAG_SZ, rq->buff.map_dir); + if (unlikely(dma_mapping_error(dev, dma_info->addr))) { + page_pool_recycle_direct(rq->page_pool, dma_info->page); + dma_info->page = NULL; + return -ENOMEM; + } + + return 0; +} + +static int xsc_get_rx_frag(struct xsc_rq *rq, + struct xsc_wqe_frag_info *frag) +{ + int err = 0; + + if (!frag->offset && !frag->is_available) + /* On first frag (offset == 0), replenish page (dma_info + * actually). Other frags that point to the same dma_info + * (with a different offset) should just use the new one + * without replenishing again by themselves. + */ + err = xsc_page_alloc_mapped(rq, frag->di); + + return err; +} + +static int xsc_alloc_rx_wqe(struct xsc_rq *rq, + struct xsc_eth_rx_wqe_cyc *wqe, + u16 ix) +{ + struct xsc_wqe_frag_info *frag = get_frag(rq, ix); + u64 addr; + int err; + int i; + + for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) { + err = xsc_get_rx_frag(rq, frag); + if (unlikely(err)) + goto err_free_frags; + + addr = frag->di->addr + frag->offset + rq->buff.headroom; + wqe->data[i].va = cpu_to_le64(addr); + } + + return 0; + +err_free_frags: + while (--i >= 0) + xsc_put_rx_frag(rq, --frag, true); + + return err; +} + +void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix) +{ + struct xsc_wqe_frag_info *wi = get_frag(rq, ix); + + xsc_free_rx_wqe(rq, wi, false); +} + +static int xsc_alloc_rx_wqes(struct xsc_rq *rq, u16 ix, u8 wqe_bulk) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + struct xsc_eth_rx_wqe_cyc *wqe; + int err; + int idx; + int i; + + for (i = 0; i < wqe_bulk; i++) { + idx = xsc_wq_cyc_ctr2ix(wq, (ix + i)); + wqe = xsc_wq_cyc_get_wqe(wq, idx); + + err = xsc_alloc_rx_wqe(rq, wqe, idx); + if (unlikely(err)) + goto err_free_wqes; + } + + return 0; + +err_free_wqes: + while (--i >= 0) + xsc_eth_dealloc_rx_wqe(rq, ix + i); + + return err; +} + +bool xsc_eth_post_rx_wqes(struct xsc_rq *rq) +{ + struct xsc_wq_cyc *wq = &rq->wqe.wq; + u8 wqe_bulk, wqe_bulk_min; + int alloc; + u16 head; + int err; + + wqe_bulk = rq->wqe.info.wqe_bulk; + wqe_bulk_min = rq->wqe.info.wqe_bulk_min; + if (xsc_wq_cyc_missing(wq) < wqe_bulk) + return false; + + do { + head = xsc_wq_cyc_get_head(wq); + + alloc = min_t(int, wqe_bulk, xsc_wq_cyc_missing(wq)); + if (alloc < wqe_bulk && alloc >= wqe_bulk_min) + alloc = alloc & 0xfffffffe; + + if (alloc > 0) { + err = xsc_alloc_rx_wqes(rq, head, alloc); + if (unlikely(err)) + break; + + xsc_wq_cyc_push_n(wq, alloc); + } + } while (xsc_wq_cyc_missing(wq) >= wqe_bulk_min); + + dma_wmb(); + + /* ensure wqes are visible to device before updating doorbell record */ + xsc_rq_notify_hw(rq); + + return !!err; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c new file mode 100644 index 0000000000000..38bab39b999b1 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth_stats.h" +#include "xsc_eth.h" + +static int xsc_get_netdev_max_channels(struct xsc_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + return min_t(unsigned int, netdev->num_rx_queues, + netdev->num_tx_queues); +} + +static int xsc_get_netdev_max_tc(struct xsc_adapter *adapter) +{ + return adapter->nic_param.num_tc; +} + +void xsc_eth_fold_sw_stats64(struct xsc_adapter *adapter, + struct rtnl_link_stats64 *s) +{ + int i, j; + + for (i = 0; i < xsc_get_netdev_max_channels(adapter); i++) { + struct xsc_channel_stats *channel_stats; + struct xsc_rq_stats *rq_stats; + + channel_stats = &adapter->stats->channel_stats[i]; + rq_stats = &channel_stats->rq; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + + for (j = 0; j < xsc_get_netdev_max_tc(adapter); j++) { + struct xsc_sq_stats *sq_stats = &channel_stats->sq[j]; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + s->tx_dropped += sq_stats->dropped; + } + } +} + diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h new file mode 100644 index 0000000000000..1828f4608d0c3 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_stats.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_EN_STATS_H +#define __XSC_EN_STATS_H + +#include "xsc_eth_common.h" + +struct xsc_rq_stats { + u64 packets; + u64 bytes; +}; + +struct xsc_sq_stats { + u64 packets; + u64 bytes; + u64 dropped; +}; + +struct xsc_channel_stats { + struct xsc_sq_stats sq[XSC_MAX_NUM_TC]; + struct xsc_rq_stats rq; +} ____cacheline_aligned_in_smp; + +struct xsc_stats { + struct xsc_channel_stats channel_stats[XSC_ETH_MAX_NUM_CHANNELS]; +}; + +void xsc_eth_fold_sw_stats64(struct xsc_adapter *adapter, + struct rtnl_link_stats64 *s); + +#endif /* XSC_EN_STATS_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c new file mode 100644 index 0000000000000..044b5a0e46730 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_tx.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include + +#include "xsc_eth.h" +#include "xsc_eth_txrx.h" + +#define XSC_OPCODE_RAW 7 + +static void xsc_dma_push(struct xsc_sq *sq, dma_addr_t addr, u32 size, + enum xsc_dma_map_type map_type) +{ + struct xsc_sq_dma *dma = xsc_dma_get(sq, sq->dma_fifo_pc++); + + dma->addr = addr; + dma->size = size; + dma->type = map_type; +} + +static void xsc_dma_unmap_wqe(struct xsc_sq *sq, u8 num_dma) +{ + struct xsc_adapter *adapter = sq->channel->adapter; + struct device *dev = adapter->dev; + int i; + + for (i = 0; i < num_dma; i++) { + struct xsc_sq_dma *last_pushed_dma; + + last_pushed_dma = xsc_dma_get(sq, --sq->dma_fifo_pc); + xsc_tx_dma_unmap(dev, last_pushed_dma); + } +} + +static void *xsc_sq_fetch_wqe(struct xsc_sq *sq, size_t size, u16 *pi) +{ + struct xsc_wq_cyc *wq = &sq->wq; + void *wqe; + + /*caution, sp->pc is default to be zero*/ + *pi = xsc_wq_cyc_ctr2ix(wq, sq->pc); + wqe = xsc_wq_cyc_get_wqe(wq, *pi); + memset(wqe, 0, size); + + return wqe; +} + +static u16 xsc_tx_get_gso_ihs(struct xsc_sq *sq, struct sk_buff *skb) +{ + u16 ihs; + + if (skb->encapsulation) { + ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + } else { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) + ihs = skb_transport_offset(skb) + sizeof(struct udphdr); + else + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + } + + return ihs; +} + +static void xsc_txwqe_build_cseg_csum(struct xsc_sq *sq, + struct sk_buff *skb, + struct xsc_send_wqe_ctrl_seg *cseg) +{ + u32 val = le32_to_cpu(cseg->data0); + + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + if (skb->encapsulation) + val |= FIELD_PREP(XSC_WQE_CTRL_SEG_CSUM_EN_MASK, + XSC_ETH_WQE_INNER_AND_OUTER_CSUM); + else + val |= FIELD_PREP(XSC_WQE_CTRL_SEG_CSUM_EN_MASK, + XSC_ETH_WQE_OUTER_CSUM); + } else { + val |= FIELD_PREP(XSC_WQE_CTRL_SEG_CSUM_EN_MASK, + XSC_ETH_WQE_NONE_CSUM); + } + cseg->data0 = cpu_to_le32(val); +} + +static void xsc_txwqe_build_csegs(struct xsc_sq *sq, struct sk_buff *skb, + u16 mss, u16 ihs, u16 headlen, + u8 opcode, u16 ds_cnt, u32 num_bytes, + struct xsc_send_wqe_ctrl_seg *cseg) +{ + struct xsc_core_device *xdev = sq->cq.xdev; + int send_wqe_ds_num_log; + u32 val = 0; + + send_wqe_ds_num_log = ilog2(xdev->caps.send_ds_num); + xsc_txwqe_build_cseg_csum(sq, skb, cseg); + + if (mss != 0) { + val |= XSC_WQE_CTRL_SEG_HAS_PPH | + XSC_WQE_CTRL_SEG_SO_TYPE | + FIELD_PREP(XSC_WQE_CTRL_SEG_SO_HDR_LEN_MASK, ihs) | + FIELD_PREP(XSC_WQE_CTRL_SEG_SO_DATA_SIZE_MASK, mss); + cseg->data2 = cpu_to_le32(val); + } + + val = le32_to_cpu(cseg->data0); + val |= FIELD_PREP(XSC_WQE_CTRL_SEG_MSG_OPCODE_MASK, opcode) | + FIELD_PREP(XSC_WQE_CTRL_SEG_WQE_ID_MASK, + sq->pc << send_wqe_ds_num_log) | + FIELD_PREP(XSC_WQE_CTRL_SEG_DS_DATA_NUM_MASK, + ds_cnt - XSC_SEND_WQEBB_CTRL_NUM_DS); + cseg->data0 = cpu_to_le32(val); + cseg->msg_len = cpu_to_le32(num_bytes); + cseg->data3 = cpu_to_le32(XSC_WQE_CTRL_SEG_CE); +} + +static int xsc_txwqe_build_dsegs(struct xsc_sq *sq, struct sk_buff *skb, + u16 ihs, u16 headlen, + struct xsc_wqe_data_seg *dseg) +{ + struct xsc_adapter *adapter = sq->channel->adapter; + struct device *dev = adapter->dev; + dma_addr_t dma_addr = 0; + u8 num_dma = 0; + int i; + + if (headlen) { + dma_addr = dma_map_single(dev, + skb->data, + headlen, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) + goto err_dma_unmap_wqe; + + dseg->va = cpu_to_le64(dma_addr); + dseg->mkey = cpu_to_le32(be32_to_cpu(sq->mkey_be)); + dseg->data0 |= + cpu_to_le32(FIELD_PREP(XSC_WQE_DATA_SEG_SEG_LEN_MASK, + headlen)); + + xsc_dma_push(sq, dma_addr, headlen, XSC_DMA_MAP_SINGLE); + num_dma++; + dseg++; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int fsz = skb_frag_size(frag); + + dma_addr = skb_frag_dma_map(dev, frag, 0, fsz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) + goto err_dma_unmap_wqe; + + dseg->va = cpu_to_le64(dma_addr); + dseg->mkey = cpu_to_le32(be32_to_cpu(sq->mkey_be)); + dseg->data0 |= + cpu_to_le32(FIELD_PREP(XSC_WQE_DATA_SEG_SEG_LEN_MASK, + fsz)); + + xsc_dma_push(sq, dma_addr, fsz, XSC_DMA_MAP_PAGE); + num_dma++; + dseg++; + } + + return num_dma; + +err_dma_unmap_wqe: + xsc_dma_unmap_wqe(sq, num_dma); + return -ENOMEM; +} + +static void xsc_sq_notify_hw(struct xsc_wq_cyc *wq, u16 pc, + struct xsc_sq *sq) +{ + struct xsc_adapter *adapter = sq->channel->adapter; + struct xsc_core_device *xdev = adapter->xdev; + union xsc_send_doorbell doorbell_value; + int send_ds_num_log; + + send_ds_num_log = ilog2(xdev->caps.send_ds_num); + /*reverse wqe index to ds index*/ + doorbell_value.next_pid = pc << send_ds_num_log; + doorbell_value.qp_num = sq->sqn; + + /* Make sure that descriptors are written before + * updating doorbell record and ringing the doorbell + */ + wmb(); + writel(doorbell_value.send_data, XSC_REG_ADDR(xdev, xdev->regs.tx_db)); +} + +static void xsc_txwqe_complete(struct xsc_sq *sq, struct sk_buff *skb, + u8 opcode, u16 ds_cnt, + u8 num_wqebbs, u32 num_bytes, u8 num_dma, + struct xsc_tx_wqe_info *wi) +{ + struct xsc_wq_cyc *wq = &sq->wq; + + wi->num_bytes = num_bytes; + wi->num_dma = num_dma; + wi->num_wqebbs = num_wqebbs; + wi->skb = skb; + + netdev_tx_sent_queue(sq->txq, num_bytes); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + sq->pc += wi->num_wqebbs; + + if (unlikely(!xsc_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) + netif_tx_stop_queue(sq->txq); + + if (!netdev_xmit_more() || netif_xmit_stopped(sq->txq)) + xsc_sq_notify_hw(wq, sq->pc, sq); +} + +static uint32_t xsc_eth_xmit_frame(struct sk_buff *skb, + struct xsc_sq *sq, + struct xsc_tx_wqe *wqe, + u16 pi) +{ + struct xsc_core_device *xdev = sq->cq.xdev; + struct xsc_sq_stats *stats = sq->stats; + struct xsc_send_wqe_ctrl_seg *cseg; + struct xsc_wqe_data_seg *dseg; + struct xsc_tx_wqe_info *wi; + u16 mss, ihs, headlen; + u32 num_bytes; + u8 num_wqebbs; + int num_dma; + u16 ds_cnt; + u8 opcode; + +retry_send: + /* Calc ihs and ds cnt, no writes to wqe yet */ + /*ctrl-ds, it would be reduce in ds_data_num*/ + ds_cnt = XSC_SEND_WQEBB_CTRL_NUM_DS; + + /*in andes inline is bonding with gso*/ + if (skb_is_gso(skb)) { + opcode = XSC_OPCODE_RAW; + mss = skb_shinfo(skb)->gso_size; + ihs = xsc_tx_get_gso_ihs(sq, skb); + num_bytes = skb->len; + stats->packets += skb_shinfo(skb)->gso_segs; + } else { + opcode = XSC_OPCODE_RAW; + mss = 0; + ihs = 0; + num_bytes = skb->len; + stats->packets++; + } + + /*linear data in skb*/ + headlen = skb->len - skb->data_len; + ds_cnt += !!headlen; + ds_cnt += skb_shinfo(skb)->nr_frags; + + /* Check packet size. */ + if (unlikely(mss == 0 && num_bytes > sq->hw_mtu)) + goto err_drop; + + num_wqebbs = DIV_ROUND_UP(ds_cnt, xdev->caps.send_ds_num); + /*if ds_cnt exceed one wqe, drop it*/ + if (num_wqebbs != 1) { + if (skb_linearize(skb)) + goto err_drop; + goto retry_send; + } + + /* fill wqe */ + wi = (struct xsc_tx_wqe_info *)&sq->db.wqe_info[pi]; + cseg = &wqe->ctrl; + dseg = &wqe->data[0]; + + if (unlikely(num_bytes == 0)) + goto err_drop; + + xsc_txwqe_build_csegs(sq, skb, mss, ihs, headlen, + opcode, ds_cnt, num_bytes, cseg); + + /*inline header is also use dma to transport*/ + num_dma = xsc_txwqe_build_dsegs(sq, skb, ihs, headlen, dseg); + if (unlikely(num_dma < 0)) + goto err_drop; + + xsc_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, + num_dma, wi); + stats->bytes += num_bytes; + + return NETDEV_TX_OK; + +err_drop: + stats->dropped++; + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev) +{ + struct xsc_adapter *adapter = netdev_priv(netdev); + int ds_num = adapter->xdev->caps.send_ds_num; + struct xsc_tx_wqe *wqe; + struct xsc_sq *sq; + u16 pi; + + if (!adapter || + !adapter->xdev || + adapter->status != XSCALE_ETH_DRIVER_OK) + return NETDEV_TX_BUSY; + + sq = adapter->txq2sq[skb_get_queue_mapping(skb)]; + if (unlikely(!sq)) + return NETDEV_TX_BUSY; + + wqe = xsc_sq_fetch_wqe(sq, ds_num * XSC_SEND_WQE_DS, &pi); + + return xsc_eth_xmit_frame(skb, sq, wqe, pi); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c new file mode 100644 index 0000000000000..8140b66bb3020 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.c @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "xsc_eth_common.h" +#include "xsc_eth_txrx.h" + +void xsc_cq_notify_hw_rearm(struct xsc_cq *cq) +{ + u32 db_val = 0; + + db_val |= FIELD_PREP(XSC_CQ_DB_NEXT_CID_MASK, cq->wq.cc) | + FIELD_PREP(XSC_CQ_DB_CQ_ID_MASK, cq->xcq.cqn); + + /* ensure doorbell record is visible to device + * before ringing the doorbell + */ + wmb(); + writel(db_val, XSC_REG_ADDR(cq->xdev, cq->xdev->regs.complete_db)); +} + +void xsc_cq_notify_hw(struct xsc_cq *cq) +{ + struct xsc_core_device *xdev = cq->xdev; + u32 db_val = 0; + + db_val |= FIELD_PREP(XSC_CQ_DB_NEXT_CID_MASK, cq->wq.cc) | + FIELD_PREP(XSC_CQ_DB_CQ_ID_MASK, cq->xcq.cqn); + + /* ensure doorbell record is visible to device + * before ringing the doorbell + */ + wmb(); + writel(db_val, XSC_REG_ADDR(xdev, xdev->regs.complete_reg)); +} + +static bool xsc_channel_no_affinity_change(struct xsc_channel *c) +{ + int current_cpu = smp_processor_id(); + + return cpumask_test_cpu(current_cpu, c->aff_mask); +} + +static void xsc_dump_error_sqcqe(struct xsc_sq *sq, + struct xsc_cqe *cqe) +{ + struct net_device *netdev = sq->channel->netdev; + u32 ci = xsc_cqwq_get_ci(&sq->cq.wq); + + net_err_ratelimited("Err cqe on dev %s cqn=0x%x ci=0x%x sqn=0x%x err_code=0x%x qpid=0x%lx\n", + netdev->name, sq->cq.xcq.cqn, ci, + sq->sqn, get_cqe_opcode(cqe), + FIELD_GET(XSC_CQE_QP_ID_MASK, + le32_to_cpu(cqe->data0))); +} + +static bool xsc_poll_tx_cq(struct xsc_cq *cq, int napi_budget) +{ + struct xsc_adapter *adapter; + struct xsc_cqe *cqe; + struct device *dev; + struct xsc_sq *sq; + u32 dma_fifo_cc; + u32 nbytes = 0; + u16 npkts = 0; + int i = 0; + u16 sqcc; + + sq = container_of(cq, struct xsc_sq, cq); + if (!test_bit(XSC_ETH_SQ_STATE_ENABLED, &sq->state)) + return false; + + adapter = sq->channel->adapter; + dev = adapter->dev; + + cqe = xsc_cqwq_get_cqe(&cq->wq); + if (!cqe) + goto out; + + if (unlikely(get_cqe_opcode(cqe) & BIT(7))) { + xsc_dump_error_sqcqe(sq, cqe); + return false; + } + + sqcc = sq->cc; + + /* avoid dirtying sq cache line every cqe */ + dma_fifo_cc = sq->dma_fifo_cc; + i = 0; + do { + struct xsc_tx_wqe_info *wi; + struct sk_buff *skb; + int j; + u16 ci; + + xsc_cqwq_pop(&cq->wq); + + ci = xsc_wq_cyc_ctr2ix(&sq->wq, sqcc); + wi = &sq->db.wqe_info[ci]; + skb = wi->skb; + + /*cqe may be overstanding in real test, not by nop in other*/ + if (unlikely(!skb)) + continue; + + for (j = 0; j < wi->num_dma; j++) { + struct xsc_sq_dma *dma = xsc_dma_get(sq, dma_fifo_cc++); + + xsc_tx_dma_unmap(dev, dma); + } + + npkts++; + nbytes += wi->num_bytes; + sqcc += wi->num_wqebbs; + napi_consume_skb(skb, 0); + + } while ((++i <= napi_budget) && (cqe = xsc_cqwq_get_cqe(&cq->wq))); + + xsc_cq_notify_hw(cq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->dma_fifo_cc = dma_fifo_cc; + sq->cc = sqcc; + + netdev_tx_completed_queue(sq->txq, npkts, nbytes); + + if (netif_tx_queue_stopped(sq->txq) && + xsc_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room)) { + netif_tx_wake_queue(sq->txq); + } + +out: + return (i == napi_budget); +} + +int xsc_eth_napi_poll(struct napi_struct *napi, int budget) +{ + struct xsc_channel *c = container_of(napi, struct xsc_channel, napi); + struct xsc_eth_params *params = &c->adapter->nic_param; + struct xsc_rq *rq = &c->qp.rq[0]; + struct xsc_sq *sq = NULL; + bool busy = false; + int work_done = 0; + int tx_budget = 0; + int i; + + rcu_read_lock(); + + clear_bit(XSC_CHANNEL_NAPI_SCHED, &c->flags); + + tx_budget = params->sq_size >> 2; + for (i = 0; i < c->num_tc; i++) + busy |= xsc_poll_tx_cq(&c->qp.sq[i].cq, tx_budget); + + /* budget=0 means: don't poll rx rings */ + if (likely(budget)) { + work_done = xsc_poll_rx_cq(&rq->cq, budget); + busy |= work_done == budget; + } + + busy |= rq->post_wqes(rq); + + if (busy) { + if (likely(xsc_channel_no_affinity_change(c))) { + rcu_read_unlock(); + return budget; + } + if (budget && work_done == budget) + work_done--; + } + + if (unlikely(!napi_complete_done(napi, work_done))) + goto err_out; + + for (i = 0; i < c->num_tc; i++) { + sq = &c->qp.sq[i]; + xsc_cq_notify_hw_rearm(&sq->cq); + } + + xsc_cq_notify_hw_rearm(&rq->cq); +err_out: + rcu_read_unlock(); + return work_done; +} + diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h new file mode 100644 index 0000000000000..d0d303efaba4d --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_txrx.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_RXTX_H +#define __XSC_RXTX_H + +#include +#include + +#include "xsc_eth.h" + +enum { + XSC_ETH_WQE_NONE_CSUM, + XSC_ETH_WQE_INNER_CSUM, + XSC_ETH_WQE_OUTER_CSUM, + XSC_ETH_WQE_INNER_AND_OUTER_CSUM, +}; + +void xsc_cq_notify_hw_rearm(struct xsc_cq *cq); +void xsc_cq_notify_hw(struct xsc_cq *cq); +int xsc_eth_napi_poll(struct napi_struct *napi, int budget); +bool xsc_eth_post_rx_wqes(struct xsc_rq *rq); +void xsc_eth_handle_rx_cqe(struct xsc_cqwq *cqwq, + struct xsc_rq *rq, struct xsc_cqe *cqe); +void xsc_eth_dealloc_rx_wqe(struct xsc_rq *rq, u16 ix); +struct sk_buff *xsc_skb_from_cqe_linear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph); +struct sk_buff *xsc_skb_from_cqe_nonlinear(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, u8 has_pph); +int xsc_poll_rx_cq(struct xsc_cq *cq, int budget); + +netdev_tx_t xsc_eth_xmit_start(struct sk_buff *skb, struct net_device *netdev); + +static inline void xsc_tx_dma_unmap(struct device *dev, struct xsc_sq_dma *dma) +{ + switch (dma->type) { + case XSC_DMA_MAP_SINGLE: + dma_unmap_single(dev, dma->addr, dma->size, DMA_TO_DEVICE); + break; + case XSC_DMA_MAP_PAGE: + dma_unmap_page(dev, dma->addr, dma->size, DMA_TO_DEVICE); + break; + default: + break; + } +} + +static inline struct xsc_sq_dma *xsc_dma_get(struct xsc_sq *sq, u32 i) +{ + return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; +} + +static inline bool xsc_wqc_has_room_for(struct xsc_wq_cyc *wq, + u16 cc, u16 pc, u16 n) +{ + return (xsc_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); +} + +static inline struct xsc_cqe *xsc_cqwq_get_cqe_buff(struct xsc_cqwq *wq, u32 ix) +{ + struct xsc_cqe *cqe = xsc_frag_buf_get_wqe(&wq->fbc, ix); + + return cqe; +} + +static inline struct xsc_cqe *xsc_cqwq_get_cqe(struct xsc_cqwq *wq) +{ + u32 ci = xsc_cqwq_get_ci(wq); + u8 cqe_ownership_bit; + struct xsc_cqe *cqe; + u8 sw_ownership_val; + + cqe = xsc_cqwq_get_cqe_buff(wq, ci); + + cqe_ownership_bit = !!(le32_to_cpu(cqe->data1) & XSC_CQE_OWNER); + sw_ownership_val = xsc_cqwq_get_wrap_cnt(wq) & 1; + + if (cqe_ownership_bit != sw_ownership_val) + return NULL; + + /* ensure cqe content is read after cqe ownership bit */ + dma_rmb(); + + return cqe; +} + +#endif /* XSC_RXTX_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_wq.c b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_wq.c new file mode 100644 index 0000000000000..20895b3638ec8 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_wq.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. All + * rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + */ + +#include "xsc_eth_wq.h" +#include "xsc_eth.h" + +u32 xsc_wq_cyc_get_size(struct xsc_wq_cyc *wq) +{ + return (u32)wq->fbc.sz_m1 + 1; +} + +static u32 wq_get_byte_sz(u8 log_sz, u8 log_stride) +{ + return ((u32)1 << log_sz) << log_stride; +} + +int xsc_eth_cqwq_create(struct xsc_core_device *xdev, + struct xsc_wq_param *param, + u8 q_log_size, + u8 ele_log_size, + struct xsc_cqwq *wq, + struct xsc_wq_ctrl *wq_ctrl) +{ + u8 log_wq_stride = ele_log_size; + u8 log_wq_sz = q_log_size; + int err; + + err = xsc_core_frag_buf_alloc_node(xdev, + wq_get_byte_sz(log_wq_sz, + log_wq_stride), + &wq_ctrl->buf, + param->buf_numa_node); + if (err) { + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, + "xsc_core_frag_buf_alloc_node failed, %d\n", err); + return err; + } + + xsc_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc); + wq_ctrl->xdev = xdev; + return 0; +} + +int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, + struct xsc_wq_param *param, + u8 q_log_size, + u8 ele_log_size, + struct xsc_wq_cyc *wq, + struct xsc_wq_ctrl *wq_ctrl) +{ + struct xsc_frag_buf_ctrl *fbc = &wq->fbc; + u8 log_wq_stride = ele_log_size; + u8 log_wq_sz = q_log_size; + int err; + + err = xsc_core_frag_buf_alloc_node(xdev, + wq_get_byte_sz(log_wq_sz, + log_wq_stride), + &wq_ctrl->buf, + param->buf_numa_node); + if (err) { + netdev_err(((struct xsc_adapter *)xdev->eth_priv)->netdev, + "xsc_core_frag_buf_alloc_node failed, %d\n", err); + return err; + } + + xsc_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc); + wq->sz = xsc_wq_cyc_get_size(wq); + wq_ctrl->xdev = xdev; + return 0; +} + +void xsc_eth_wq_destroy(struct xsc_wq_ctrl *wq_ctrl) +{ + xsc_core_frag_buf_free(wq_ctrl->xdev, &wq_ctrl->buf); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_wq.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_wq.h new file mode 100644 index 0000000000000..5abe155c09ba0 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_eth_wq.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. All + * rights reserved. + * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. + */ + +#ifndef __XSC_WQ_H +#define __XSC_WQ_H + +#include "common/xsc_core.h" + +struct xsc_wq_param { + int buf_numa_node; + int db_numa_node; +}; + +struct xsc_wq_ctrl { + struct xsc_core_device *xdev; + struct xsc_frag_buf buf; +}; + +struct xsc_wq_cyc { + struct xsc_frag_buf_ctrl fbc; + u16 sz; + u16 wqe_ctr; + u16 cur_sz; +}; + +struct xsc_cqwq { + struct xsc_frag_buf_ctrl fbc; + __be32 *db; + u32 cc; /* consumer counter */ +}; + +enum xsc_res_type { + XSC_RES_UND = 0, + XSC_RES_RQ, + XSC_RES_SQ, + XSC_RES_MAX, +}; + +u32 xsc_wq_cyc_get_size(struct xsc_wq_cyc *wq); + +/*api for eth driver*/ +int xsc_eth_cqwq_create(struct xsc_core_device *xdev, + struct xsc_wq_param *param, + u8 q_log_size, + u8 ele_log_size, + struct xsc_cqwq *wq, + struct xsc_wq_ctrl *wq_ctrl); + +int xsc_eth_wq_cyc_create(struct xsc_core_device *xdev, + struct xsc_wq_param *param, + u8 q_log_size, + u8 ele_log_size, + struct xsc_wq_cyc *wq, + struct xsc_wq_ctrl *wq_ctrl); +void xsc_eth_wq_destroy(struct xsc_wq_ctrl *wq_ctrl); + +static inline void xsc_init_fbc_offset(struct xsc_buf_list *frags, + u8 log_stride, u8 log_sz, + u16 strides_offset, + struct xsc_frag_buf_ctrl *fbc) +{ + fbc->frags = frags; + fbc->log_stride = log_stride; + fbc->log_sz = log_sz; + fbc->sz_m1 = (1 << fbc->log_sz) - 1; + fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; + fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; + fbc->strides_offset = strides_offset; +} + +static inline void xsc_init_fbc(struct xsc_buf_list *frags, + u8 log_stride, u8 log_sz, + struct xsc_frag_buf_ctrl *fbc) +{ + xsc_init_fbc_offset(frags, log_stride, log_sz, 0, fbc); +} + +static inline void *xsc_frag_buf_get_wqe(struct xsc_frag_buf_ctrl *fbc, + u32 ix) +{ + unsigned int frag; + + ix += fbc->strides_offset; + frag = ix >> fbc->log_frag_strides; + + return fbc->frags[frag].buf + + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); +} + +static inline u32 +xsc_frag_buf_get_idx_last_contig_stride(struct xsc_frag_buf_ctrl *fbc, u32 ix) +{ + u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; + + return min_t(u32, last_frag_stride_idx - fbc->strides_offset, + fbc->sz_m1); +} + +static inline int xsc_wq_cyc_missing(struct xsc_wq_cyc *wq) +{ + return wq->sz - wq->cur_sz; +} + +static inline int xsc_wq_cyc_is_empty(struct xsc_wq_cyc *wq) +{ + return !wq->cur_sz; +} + +static inline void xsc_wq_cyc_push(struct xsc_wq_cyc *wq) +{ + wq->wqe_ctr++; + wq->cur_sz++; +} + +static inline void xsc_wq_cyc_push_n(struct xsc_wq_cyc *wq, u8 n) +{ + wq->wqe_ctr += n; + wq->cur_sz += n; +} + +static inline void xsc_wq_cyc_pop(struct xsc_wq_cyc *wq) +{ + wq->cur_sz--; +} + +static inline u16 xsc_wq_cyc_ctr2ix(struct xsc_wq_cyc *wq, u16 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + +static inline u16 xsc_wq_cyc_get_head(struct xsc_wq_cyc *wq) +{ + return xsc_wq_cyc_ctr2ix(wq, wq->wqe_ctr); +} + +static inline u16 xsc_wq_cyc_get_tail(struct xsc_wq_cyc *wq) +{ + return xsc_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz); +} + +static inline void *xsc_wq_cyc_get_wqe(struct xsc_wq_cyc *wq, u16 ix) +{ + return xsc_frag_buf_get_wqe(&wq->fbc, ix); +} + +static inline u32 xsc_cqwq_ctr2ix(struct xsc_cqwq *wq, u32 ctr) +{ + return ctr & wq->fbc.sz_m1; +} + +static inline u32 xsc_cqwq_get_ci(struct xsc_cqwq *wq) +{ + return xsc_cqwq_ctr2ix(wq, wq->cc); +} + +static inline u32 xsc_cqwq_get_ctr_wrap_cnt(struct xsc_cqwq *wq, u32 ctr) +{ + return ctr >> wq->fbc.log_sz; +} + +static inline u32 xsc_cqwq_get_wrap_cnt(struct xsc_cqwq *wq) +{ + return xsc_cqwq_get_ctr_wrap_cnt(wq, wq->cc); +} + +static inline void xsc_cqwq_pop(struct xsc_cqwq *wq) +{ + wq->cc++; +} + +static inline u32 xsc_cqwq_get_size(struct xsc_cqwq *wq) +{ + return wq->fbc.sz_m1 + 1; +} + +static inline struct xsc_cqe *xsc_cqwq_get_wqe(struct xsc_cqwq *wq, u32 ix) +{ + struct xsc_cqe *cqe = xsc_frag_buf_get_wqe(&wq->fbc, ix); + + return cqe; +} + +#endif /* __XSC_WQ_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_pph.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_pph.h new file mode 100644 index 0000000000000..34f1200e8bb58 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_pph.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __XSC_PPH_H +#define __XSC_PPH_H + +#define XSC_PPH_HEAD_LEN 64 + +enum { + L4_PROTO_NONE = 0, + L4_PROTO_TCP = 1, + L4_PROTO_UDP = 2, + L4_PROTO_ICMP = 3, + L4_PROTO_GRE = 4, +}; + +enum { + L3_PROTO_NONE = 0, + L3_PROTO_IP = 2, + L3_PROTO_IP6 = 3, +}; + +struct epp_pph { + u16 outer_eth_type; + u16 inner_eth_type; + + u16 rsv1:1; + u16 outer_vlan_flag:2; + u16 outer_ip_type:2; + u16 outer_ip_ofst:5; + u16 outer_ip_len:6; + + u16 rsv2:1; + u16 outer_tp_type:3; + u16 outer_tp_csum_flag:1; + u16 outer_tp_ofst:7; + u16 ext_tunnel_type:4; + + u8 tunnel_ofst; + u8 inner_mac_ofst; + + u32 rsv3:2; + u32 inner_mac_flag:1; + u32 inner_vlan_flag:2; + u32 inner_ip_type:2; + u32 inner_ip_ofst:8; + u32 inner_ip_len:6; + u32 inner_tp_type:2; + u32 inner_tp_csum_flag:1; + u32 inner_tp_ofst:8; + + u16 rsv4:1; + u16 payload_type:4; + u16 payload_ofst:8; + u16 pkt_type:3; + + u16 rsv5:2; + u16 pri:3; + u16 logical_in_port:11; + u16 vlan_info; + u8 error_bitmap:8; + + u8 rsv6:7; + u8 recirc_id_vld:1; + u16 recirc_id; + + u8 rsv7:7; + u8 recirc_data_vld:1; + u32 recirc_data; + + u8 rsv8:6; + u8 mark_tag_vld:2; + u16 mark_tag; + + u8 rsv9:4; + u8 upa_to_soc:1; + u8 upa_from_soc:1; + u8 upa_re_up_call:1; + u8 upa_pkt_drop:1; + + u8 ucdv; + u16 rsv10:2; + u16 pkt_len:14; + + u16 rsv11:2; + u16 pkt_hdr_ptr:14; + + u64 rsv12:5; + u64 csum_ofst:8; + u64 csum_val:29; + u64 csum_plen:14; + u64 rsv11_0:8; + + u64 rsv11_1; + u64 rsv11_2; + u16 rsv11_3; +}; + +#define OUTER_L3_BIT BIT(3) +#define OUTER_L4_BIT BIT(2) +#define INNER_L3_BIT BIT(1) +#define INNER_L4_BIT BIT(0) +#define OUTER_BIT (OUTER_L3_BIT | OUTER_L4_BIT) +#define INNER_BIT (INNER_L3_BIT | INNER_L4_BIT) +#define OUTER_AND_INNER (OUTER_BIT | INNER_BIT) + +#define PACKET_UNKNOWN BIT(4) + +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_OFFSET (6UL) +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_MASK (0XF00) +#define EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_OFFSET (8) + +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_OFFSET (20UL) +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_MASK (0XFF) +#define EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_OFFSET (0) + +#define XSC_GET_EPP2SOC_PPH_EXT_TUNNEL_TYPE(PPH_BASE_ADDR) \ + ((*(u16 *)((u8 *)(PPH_BASE_ADDR) + \ + EPP2SOC_PPH_EXT_TUNNEL_TYPE_OFFSET) & \ + EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_MASK) >> \ + EPP2SOC_PPH_EXT_TUNNEL_TYPE_BIT_OFFSET) + +#define XSC_GET_EPP2SOC_PPH_ERROR_BITMAP(PPH_BASE_ADDR) \ + ((*(u8 *)((u8 *)(PPH_BASE_ADDR) + \ + EPP2SOC_PPH_EXT_ERROR_BITMAP_OFFSET) & \ + EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_MASK) >> \ + EPP2SOC_PPH_EXT_ERROR_BITMAP_BIT_OFFSET) + +#define PPH_OUTER_IP_TYPE_OFF (4UL) +#define PPH_OUTER_IP_TYPE_MASK (0x3) +#define PPH_OUTER_IP_TYPE_SHIFT (11) +#define PPH_OUTER_IP_TYPE(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_TYPE_OFF)) >> \ + PPH_OUTER_IP_TYPE_SHIFT) & PPH_OUTER_IP_TYPE_MASK) + +#define PPH_OUTER_IP_OFST_OFF (4UL) +#define PPH_OUTER_IP_OFST_MASK (0x1f) +#define PPH_OUTER_IP_OFST_SHIFT (6) +#define PPH_OUTER_IP_OFST(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_OFST_OFF)) >> \ + PPH_OUTER_IP_OFST_SHIFT) & PPH_OUTER_IP_OFST_MASK) + +#define PPH_OUTER_IP_LEN_OFF (4UL) +#define PPH_OUTER_IP_LEN_MASK (0x3f) +#define PPH_OUTER_IP_LEN_SHIFT (0) +#define PPH_OUTER_IP_LEN(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_IP_LEN_OFF)) >> \ + PPH_OUTER_IP_LEN_SHIFT) & PPH_OUTER_IP_LEN_MASK) + +#define PPH_OUTER_TP_TYPE_OFF (6UL) +#define PPH_OUTER_TP_TYPE_MASK (0x7) +#define PPH_OUTER_TP_TYPE_SHIFT (12) +#define PPH_OUTER_TP_TYPE(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_OUTER_TP_TYPE_OFF)) >> \ + PPH_OUTER_TP_TYPE_SHIFT) & PPH_OUTER_TP_TYPE_MASK) + +#define PPH_PAYLOAD_OFST_OFF (14UL) +#define PPH_PAYLOAD_OFST_MASK (0xff) +#define PPH_PAYLOAD_OFST_SHIFT (3) +#define PPH_PAYLOAD_OFST(base) \ + ((ntohs(*(u16 *)((u8 *)(base) + PPH_PAYLOAD_OFST_OFF)) >> \ + PPH_PAYLOAD_OFST_SHIFT) & PPH_PAYLOAD_OFST_MASK) + +#define PPH_CSUM_OFST_OFF (38UL) +#define PPH_CSUM_OFST_MASK (0xff) +#define PPH_CSUM_OFST_SHIFT (51) +#define PPH_CSUM_OFST(base) \ + ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_OFST_OFF)) >> \ + PPH_CSUM_OFST_SHIFT) & PPH_CSUM_OFST_MASK) + +#define PPH_CSUM_VAL_OFF (38UL) +#define PPH_CSUM_VAL_MASK (0xeffffff) +#define PPH_CSUM_VAL_SHIFT (22) +#define PPH_CSUM_VAL(base) \ + ((be64_to_cpu(*(u64 *)((u8 *)(base) + PPH_CSUM_VAL_OFF)) >> \ + PPH_CSUM_VAL_SHIFT) & PPH_CSUM_VAL_MASK) +#endif /* __XSC_TBM_H */ + diff --git a/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h new file mode 100644 index 0000000000000..83fb13d8e380d --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/net/xsc_queue.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. + */ + +#ifndef __XSC_QUEUE_H +#define __XSC_QUEUE_H + +#include +#include + +#include "common/xsc_core.h" +#include "xsc_eth_wq.h" + +enum { + XSC_SEND_WQE_DS = 16, + XSC_SEND_WQE_BB = 64, +}; + +enum { + XSC_RECV_WQE_DS = 16, + XSC_RECV_WQE_BB = 16, +}; + +enum { + XSC_ETH_RQ_STATE_ENABLED, + XSC_ETH_RQ_STATE_AM, + XSC_ETH_RQ_STATE_CACHE_REDUCE_PENDING, +}; + +#define XSC_SEND_WQEBB_NUM_DS (XSC_SEND_WQE_BB / XSC_SEND_WQE_DS) +#define XSC_LOG_SEND_WQEBB_NUM_DS ilog2(XSC_SEND_WQEBB_NUM_DS) + +#define XSC_RECV_WQEBB_NUM_DS (XSC_RECV_WQE_BB / XSC_RECV_WQE_DS) +#define XSC_LOG_RECV_WQEBB_NUM_DS ilog2(XSC_RECV_WQEBB_NUM_DS) + +#define XSC_SEND_WQEBB_CTRL_NUM_DS 1 + +/* each ds holds one fragment in skb */ +#define XSC_MAX_RX_FRAGS 4 +#define XSC_RX_FRAG_SZ_ORDER 0 +#define XSC_RX_FRAG_SZ (PAGE_SIZE << XSC_RX_FRAG_SZ_ORDER) +#define DEFAULT_FRAG_SIZE (2048) + +enum { + XSC_ETH_SQ_STATE_ENABLED, + XSC_ETH_SQ_STATE_AM, +}; + +struct xsc_dma_info { + struct page *page; + dma_addr_t addr; +}; + +struct xsc_page_cache { + struct xsc_dma_info *page_cache; + u32 head; + u32 tail; + u32 sz; + u32 resv; +}; + +struct xsc_cq { + /* data path - accessed per cqe */ + struct xsc_cqwq wq; + + /* data path - accessed per napi poll */ + u16 event_ctr; + struct napi_struct *napi; + struct xsc_core_cq xcq; + struct xsc_channel *channel; + + /* control */ + struct xsc_core_device *xdev; + struct xsc_wq_ctrl wq_ctrl; + u8 rx; +} ____cacheline_aligned_in_smp; + +struct xsc_wqe_frag_info { + struct xsc_dma_info *di; + u32 offset; + u8 last_in_page; + u8 is_available; +}; + +struct xsc_rq_frag_info { + int frag_size; + int frag_stride; +}; + +struct xsc_rq_frags_info { + struct xsc_rq_frag_info arr[XSC_MAX_RX_FRAGS]; + u8 num_frags; + u8 log_num_frags; + u8 wqe_bulk; + u8 wqe_bulk_min; + u8 frags_max_num; +}; + +struct xsc_rq; +typedef void (*xsc_fp_handle_rx_cqe)(struct xsc_cqwq *cqwq, struct xsc_rq *rq, + struct xsc_cqe *cqe); +typedef bool (*xsc_fp_post_rx_wqes)(struct xsc_rq *rq); +typedef void (*xsc_fp_dealloc_wqe)(struct xsc_rq *rq, u16 ix); +typedef struct sk_buff * (*xsc_fp_skb_from_cqe)(struct xsc_rq *rq, + struct xsc_wqe_frag_info *wi, + u32 cqe_bcnt, + u8 has_pph); + +struct xsc_rq { + struct xsc_core_qp cqp; + struct { + struct xsc_wq_cyc wq; + struct xsc_wqe_frag_info *frags; + struct xsc_dma_info *di; + struct xsc_rq_frags_info info; + xsc_fp_skb_from_cqe skb_from_cqe; + } wqe; + + struct { + u16 headroom; + u8 map_dir; /* dma map direction */ + } buff; + + struct page_pool *page_pool; + struct xsc_wq_ctrl wq_ctrl; + struct xsc_cq cq; + u32 rqn; + int ix; + + unsigned long state; + struct work_struct recover_work; + struct xsc_rq_stats *stats; + + u32 hw_mtu; + u32 frags_sz; + + xsc_fp_handle_rx_cqe handle_rx_cqe; + xsc_fp_post_rx_wqes post_wqes; + xsc_fp_dealloc_wqe dealloc_wqe; + struct xsc_page_cache page_cache; +} ____cacheline_aligned_in_smp; + +enum xsc_dma_map_type { + XSC_DMA_MAP_SINGLE, + XSC_DMA_MAP_PAGE +}; + +struct xsc_sq_dma { + dma_addr_t addr; + u32 size; + enum xsc_dma_map_type type; +}; + +struct xsc_tx_wqe_info { + struct sk_buff *skb; + u32 num_bytes; + u8 num_wqebbs; + u8 num_dma; +}; + +struct xsc_tx_wqe { + struct xsc_send_wqe_ctrl_seg ctrl; + struct xsc_wqe_data_seg data[]; +}; + +struct xsc_sq { + struct xsc_core_qp cqp; + /* dirtied @completion */ + u16 cc; + u32 dma_fifo_cc; + + /* dirtied @xmit */ + u16 pc ____cacheline_aligned_in_smp; + u32 dma_fifo_pc; + + struct xsc_cq cq; + + /* read only */ + struct xsc_wq_cyc wq; + u32 dma_fifo_mask; + struct xsc_sq_stats *stats; + struct { + struct xsc_sq_dma *dma_fifo; + struct xsc_tx_wqe_info *wqe_info; + } db; + void __iomem *uar_map; + struct netdev_queue *txq; + u32 sqn; + u16 stop_room; + + __be32 mkey_be; + unsigned long state; + unsigned int hw_mtu; + + /* control path */ + struct xsc_wq_ctrl wq_ctrl; + struct xsc_channel *channel; + int ch_ix; + int txq_ix; + struct work_struct recover_work; +} ____cacheline_aligned_in_smp; + +#endif /* __XSC_QUEUE_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig b/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig new file mode 100644 index 0000000000000..b707da28b0a9c --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. +# Yunsilicon PCI configuration +# + +config YUNSILICON_XSC_PCI + tristate "Yunsilicon XSC PCI driver" + help + This driver is common for Yunsilicon XSC + ethernet and RDMA drivers. + + To compile this driver as a module, choose M here. The module + will be called xsc_pci. diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile new file mode 100644 index 0000000000000..e8c13c6fda463 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. +# All rights reserved. + +ccflags-y += -I$(srctree)/drivers/net/ethernet/yunsilicon/xsc + +obj-$(CONFIG_YUNSILICON_XSC_PCI) += xsc_pci.o + +xsc_pci-y := main.o cmdq.o hw.o qp.o cq.o alloc.o eq.o pci_irq.o adev.o vport.o + diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/adev.c b/drivers/net/ethernet/yunsilicon/xsc/pci/adev.c new file mode 100644 index 0000000000000..c9c6328a4b0a6 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/adev.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include + +#include "adev.h" + +static DEFINE_IDA(xsc_adev_ida); + +enum xsc_adev_idx { + XSC_ADEV_IDX_ETH, +}; + +static const char * const xsc_adev_name[] = { + [XSC_ADEV_IDX_ETH] = XSC_ETH_ADEV_NAME, +}; + +static void xsc_release_adev(struct device *dev) +{ + struct xsc_adev *xsc_adev = + container_of(dev, struct xsc_adev, adev.dev); + struct xsc_core_device *xdev = xsc_adev->xdev; + int idx = xsc_adev->idx; + + kfree(xsc_adev); + xdev->xsc_adev_list[idx] = NULL; +} + +static int xsc_reg_adev(struct xsc_core_device *xdev, int idx) +{ + struct auxiliary_device *adev; + struct xsc_adev *xsc_adev; + int ret; + + xsc_adev = kzalloc(sizeof(*xsc_adev), GFP_KERNEL); + if (!xsc_adev) + return -ENOMEM; + + adev = &xsc_adev->adev; + adev->name = xsc_adev_name[idx]; + adev->id = xdev->adev_id; + adev->dev.parent = &xdev->pdev->dev; + adev->dev.release = xsc_release_adev; + xsc_adev->xdev = xdev; + xsc_adev->idx = idx; + + ret = auxiliary_device_init(adev); + if (ret) { + kfree(xsc_adev); + return ret; + } + + ret = auxiliary_device_add(adev); + if (ret) { + auxiliary_device_uninit(adev); + return ret; + } + + xdev->xsc_adev_list[idx] = xsc_adev; + + return 0; +} + +static void xsc_unreg_adev(struct xsc_core_device *xdev, int idx) +{ + struct xsc_adev *xsc_adev = xdev->xsc_adev_list[idx]; + struct auxiliary_device *adev = &xsc_adev->adev; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); +} + +int xsc_adev_init(struct xsc_core_device *xdev) +{ + struct xsc_adev **xsc_adev_list; + int adev_id; + int ret; + + xsc_adev_list = kcalloc(ARRAY_SIZE(xsc_adev_name), + sizeof(struct xsc_adev *), + GFP_KERNEL); + if (!xsc_adev_list) + return -ENOMEM; + xdev->xsc_adev_list = xsc_adev_list; + + adev_id = ida_alloc(&xsc_adev_ida, GFP_KERNEL); + if (adev_id < 0) { + ret = adev_id; + goto err_free_adev_list; + } + xdev->adev_id = adev_id; + + ret = xsc_reg_adev(xdev, XSC_ADEV_IDX_ETH); + if (ret) + goto err_dalloc_adev_id; + + return 0; +err_dalloc_adev_id: + ida_free(&xsc_adev_ida, xdev->adev_id); +err_free_adev_list: + kfree(xsc_adev_list); + + return ret; +} + +void xsc_adev_uninit(struct xsc_core_device *xdev) +{ + xsc_unreg_adev(xdev, XSC_ADEV_IDX_ETH); + ida_free(&xsc_adev_ida, xdev->adev_id); + kfree(xdev->xsc_adev_list); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/adev.h b/drivers/net/ethernet/yunsilicon/xsc/pci/adev.h new file mode 100644 index 0000000000000..3de4dd26f27f5 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/adev.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __ADEV_H +#define __ADEV_H + +#include "common/xsc_core.h" + +int xsc_adev_init(struct xsc_core_device *xdev); +void xsc_adev_uninit(struct xsc_core_device *xdev); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c new file mode 100644 index 0000000000000..eeb6846541d16 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "alloc.h" + +/* Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ +int xsc_buf_alloc(struct xsc_core_device *xdev, + unsigned long size, + unsigned long max_direct, + struct xsc_buf *buf) +{ + dma_addr_t t; + + buf->size = size; + if (size <= max_direct) { + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->direct.buf = dma_alloc_coherent(&xdev->pdev->dev, + size, + &t, + GFP_KERNEL | __GFP_ZERO); + if (!buf->direct.buf) + return -ENOMEM; + + buf->direct.map = t; + + while (t & GENMASK(buf->page_shift - 1, 0)) { + --buf->page_shift; + buf->npages *= 2; + } + } else { + struct page **pages; + int i; + + buf->direct.buf = NULL; + buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE); + buf->npages = buf->nbufs; + buf->page_shift = PAGE_SHIFT; + buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), + GFP_KERNEL); + if (!buf->page_list) + return -ENOMEM; + + for (i = 0; i < buf->nbufs; i++) { + buf->page_list[i].buf = + dma_alloc_coherent(&xdev->pdev->dev, PAGE_SIZE, + &t, GFP_KERNEL | __GFP_ZERO); + if (!buf->page_list[i].buf) + goto err_free; + + buf->page_list[i].map = t; + } + + pages = kmalloc_array(buf->nbufs, sizeof(*pages), GFP_KERNEL); + if (!pages) + goto err_free; + for (i = 0; i < buf->nbufs; i++) { + void *addr = buf->page_list[i].buf; + + if (is_vmalloc_addr(addr)) + pages[i] = vmalloc_to_page(addr); + else + pages[i] = virt_to_page(addr); + } + buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); + kfree(pages); + if (!buf->direct.buf) + goto err_free; + } + + return 0; + +err_free: + xsc_buf_free(xdev, buf); + + return -ENOMEM; +} + +void xsc_buf_free(struct xsc_core_device *xdev, struct xsc_buf *buf) +{ + int i; + + if (buf->nbufs == 1) { + dma_free_coherent(&xdev->pdev->dev, buf->size, buf->direct.buf, + buf->direct.map); + } else { + if (BITS_PER_LONG == 64 && buf->direct.buf) + vunmap(buf->direct.buf); + + for (i = 0; i < buf->nbufs; i++) + if (buf->page_list[i].buf) + dma_free_coherent(&xdev->pdev->dev, PAGE_SIZE, + buf->page_list[i].buf, + buf->page_list[i].map); + kfree(buf->page_list); + } +} + +void xsc_fill_page_array(struct xsc_buf *buf, __be64 *pas, unsigned int npages) +{ + unsigned int shift = PAGE_SHIFT - PAGE_SHIFT_4K; + unsigned int mask = (1 << shift) - 1; + u64 addr; + int i; + + for (i = 0; i < npages; i++) { + if (buf->nbufs == 1) + addr = buf->direct.map + (i << PAGE_SHIFT_4K); + else + addr = buf->page_list[i >> shift].map + + ((i & mask) << PAGE_SHIFT_4K); + + pas[i] = cpu_to_be64(addr); + } +} + +void xsc_core_fill_page_frag_array(struct xsc_frag_buf *buf, + __be64 *pas, + unsigned int npages) +{ + int i; + dma_addr_t addr; + unsigned int shift = PAGE_SHIFT - PAGE_SHIFT_4K; + unsigned int mask = (1 << shift) - 1; + + for (i = 0; i < npages; i++) { + addr = buf->frags[i >> shift].map + + ((i & mask) << PAGE_SHIFT_4K); + pas[i] = cpu_to_be64(addr); + } +} +EXPORT_SYMBOL(xsc_core_fill_page_frag_array); + +static void *xsc_dma_zalloc_coherent_node(struct xsc_core_device *xdev, + size_t size, dma_addr_t *dma_handle, + int node) +{ + struct xsc_dev_resource *dev_res = xdev->dev_res; + struct device *device = &xdev->pdev->dev; + int original_node; + void *cpu_handle; + + /* WA for kernels that don't use numa_mem_id in alloc_pages_node */ + if (node == NUMA_NO_NODE) + node = numa_mem_id(); + + mutex_lock(&dev_res->alloc_mutex); + original_node = dev_to_node(device); + set_dev_node(device, node); + cpu_handle = dma_alloc_coherent(device, size, dma_handle, + GFP_KERNEL); + set_dev_node(device, original_node); + mutex_unlock(&dev_res->alloc_mutex); + return cpu_handle; +} + +int xsc_core_frag_buf_alloc_node(struct xsc_core_device *xdev, + unsigned long size, + struct xsc_frag_buf *buf, + int node) +{ + int i; + + buf->size = size; + buf->npages = DIV_ROUND_UP(size, PAGE_SIZE); + buf->page_shift = PAGE_SHIFT; + buf->frags = kcalloc(buf->npages, sizeof(struct xsc_buf_list), + GFP_KERNEL); + if (!buf->frags) + goto err_out; + + for (i = 0; i < buf->npages; i++) { + unsigned long frag_sz = min_t(unsigned long, size, PAGE_SIZE); + struct xsc_buf_list *frag = &buf->frags[i]; + + frag->buf = xsc_dma_zalloc_coherent_node(xdev, frag_sz, + &frag->map, node); + if (!frag->buf) + goto err_free_buf; + if (frag->map & ((1 << buf->page_shift) - 1)) { + dma_free_coherent(&xdev->pdev->dev, frag_sz, + buf->frags[i].buf, buf->frags[i].map); + pci_err(xdev->pdev, "unexpected map alignment: %pad, page_shift=%d\n", + &frag->map, buf->page_shift); + goto err_free_buf; + } + size -= frag_sz; + } + + return 0; + +err_free_buf: + while (i--) + dma_free_coherent(&xdev->pdev->dev, + PAGE_SIZE, + buf->frags[i].buf, + buf->frags[i].map); + kfree(buf->frags); +err_out: + return -ENOMEM; +} +EXPORT_SYMBOL(xsc_core_frag_buf_alloc_node); + +void xsc_core_frag_buf_free(struct xsc_core_device *xdev, + struct xsc_frag_buf *buf) +{ + unsigned long size = buf->size; + int i; + + for (i = 0; i < buf->npages; i++) { + unsigned long frag_sz = min_t(unsigned long, size, PAGE_SIZE); + + dma_free_coherent(&xdev->pdev->dev, frag_sz, buf->frags[i].buf, + buf->frags[i].map); + size -= frag_sz; + } + kfree(buf->frags); +} +EXPORT_SYMBOL(xsc_core_frag_buf_free); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.h b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.h new file mode 100644 index 0000000000000..a1c4b92a5aa2a --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/alloc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __ALLOC_H +#define __ALLOC_H + +#include "common/xsc_core.h" + +int xsc_buf_alloc(struct xsc_core_device *xdev, + unsigned long size, + unsigned long max_direct, + struct xsc_buf *buf); +void xsc_buf_free(struct xsc_core_device *xdev, struct xsc_buf *buf); +void xsc_fill_page_array(struct xsc_buf *buf, __be64 *pas, unsigned int npages); +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cmdq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cmdq.c new file mode 100644 index 0000000000000..4d461f45aa291 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cmdq.c @@ -0,0 +1,1571 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common/xsc_driver.h" +#include "common/xsc_cmd.h" +#include "common/xsc_auto_hw.h" +#include "common/xsc_core.h" + +enum { + CMD_IF_REV = 3, +}; + +enum { + NUM_LONG_LISTS = 2, + NUM_MED_LISTS = 64, + LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + + XSC_CMD_DATA_BLOCK_SIZE, + MED_LIST_SIZE = 16 + XSC_CMD_DATA_BLOCK_SIZE, +}; + +enum { + XSC_CMD_DELIVERY_STAT_OK = 0x0, + XSC_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, + XSC_CMD_DELIVERY_STAT_TOK_ERR = 0x2, + XSC_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, + XSC_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, + XSC_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, + XSC_CMD_DELIVERY_STAT_FW_ERR = 0x6, + XSC_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, + XSC_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, + XSC_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, + XSC_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, +}; + +static struct xsc_cmd_work_ent *xsc_alloc_cmd(struct xsc_cmd *cmd, + struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out) +{ + struct xsc_cmd_work_ent *ent; + + ent = kzalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return ERR_PTR(-ENOMEM); + + ent->in = in; + ent->out = out; + ent->cmd = cmd; + + return ent; +} + +static void xsc_free_cmd(struct xsc_cmd_work_ent *ent) +{ + kfree(ent); +} + +static u8 xsc_alloc_token(struct xsc_cmd *cmd) +{ + u8 token; + + spin_lock(&cmd->token_lock); + token = cmd->token++ % 255 + 1; + spin_unlock(&cmd->token_lock); + + return token; +} + +static int xsc_alloc_ent(struct xsc_cmd *cmd) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + ret = find_first_bit(&cmd->cmd_entry_mask, cmd->max_reg_cmds); + if (ret < cmd->max_reg_cmds) + clear_bit(ret, &cmd->cmd_entry_mask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); + + return ret < cmd->max_reg_cmds ? ret : -ENOSPC; +} + +static void xsc_free_ent(struct xsc_cmd *cmd, int idx) +{ + unsigned long flags; + + spin_lock_irqsave(&cmd->alloc_lock, flags); + set_bit(idx, &cmd->cmd_entry_mask); + spin_unlock_irqrestore(&cmd->alloc_lock, flags); +} + +static struct xsc_cmd_layout *xsc_get_inst(struct xsc_cmd *cmd, int idx) +{ + return cmd->cmd_buf + (idx << cmd->log_stride); +} + +static struct xsc_rsp_layout *xsc_get_cq_inst(struct xsc_cmd *cmd, int idx) +{ + return cmd->cq_buf + (idx << cmd->log_stride); +} + +static u8 xsc_xor8_buf(void *buf, int len) +{ + u8 *ptr = buf; + u8 sum = 0; + int i; + + for (i = 0; i < len; i++) + sum ^= ptr[i]; + + return sum; +} + +static int xsc_verify_block_sig(struct xsc_cmd_prot_block *block) +{ + /* rsvd0 was set to 0xFF by fw */ + if (xsc_xor8_buf(block->rsvd0, + sizeof(*block) - sizeof(block->data) - 1) != 0xff) + return -EINVAL; + + if (xsc_xor8_buf(block, sizeof(*block)) != 0xff) + return -EINVAL; + + return 0; +} + +static void xsc_calc_block_sig(struct xsc_cmd_prot_block *block, u8 token) +{ + block->token = token; + block->ctrl_sig = ~xsc_xor8_buf(block->rsvd0, + sizeof(*block) - sizeof(block->data) - 2); + block->sig = ~xsc_xor8_buf(block, sizeof(*block) - 1); +} + +static void xsc_calc_chain_sig(struct xsc_cmd_mailbox *head, u8 token) +{ + struct xsc_cmd_mailbox *next = head; + + while (next) { + xsc_calc_block_sig(next->buf, token); + next = next->next; + } +} + +static void xsc_set_signature(struct xsc_cmd_work_ent *ent) +{ + ent->lay->sig = ~xsc_xor8_buf(ent->lay, sizeof(*ent->lay)); + xsc_calc_chain_sig(ent->in->next, ent->token); + xsc_calc_chain_sig(ent->out->next, ent->token); +} + +static int xsc_verify_signature(struct xsc_cmd_work_ent *ent) +{ + struct xsc_cmd_mailbox *next = ent->out->next; + int err; + u8 sig; + + sig = xsc_xor8_buf(ent->rsp_lay, sizeof(*ent->rsp_lay)); + if (sig != 0xff) + return -EINVAL; + + while (next) { + err = xsc_verify_block_sig(next->buf); + if (err) + return err; + + next = next->next; + } + + return 0; +} + +const char *xsc_command_str(int command) +{ + switch (command) { + case XSC_CMD_OP_QUERY_HCA_CAP: + return "QUERY_HCA_CAP"; + + case XSC_CMD_OP_QUERY_CMDQ_VERSION: + return "QUERY_CMDQ_VERSION"; + + case XSC_CMD_OP_FUNCTION_RESET: + return "FUNCTION_RESET"; + + case XSC_CMD_OP_DUMMY: + return "DUMMY_CMD"; + + case XSC_CMD_OP_CREATE_EQ: + return "CREATE_EQ"; + + case XSC_CMD_OP_DESTROY_EQ: + return "DESTROY_EQ"; + + case XSC_CMD_OP_CREATE_CQ: + return "CREATE_CQ"; + + case XSC_CMD_OP_DESTROY_CQ: + return "DESTROY_CQ"; + + case XSC_CMD_OP_CREATE_QP: + return "CREATE_QP"; + + case XSC_CMD_OP_DESTROY_QP: + return "DESTROY_QP"; + + case XSC_CMD_OP_RST2INIT_QP: + return "RST2INIT_QP"; + + case XSC_CMD_OP_INIT2RTR_QP: + return "INIT2RTR_QP"; + + case XSC_CMD_OP_RTR2RTS_QP: + return "RTR2RTS_QP"; + + case XSC_CMD_OP_RTS2RTS_QP: + return "RTS2RTS_QP"; + + case XSC_CMD_OP_SQERR2RTS_QP: + return "SQERR2RTS_QP"; + + case XSC_CMD_OP_2ERR_QP: + return "2ERR_QP"; + + case XSC_CMD_OP_RTS2SQD_QP: + return "RTS2SQD_QP"; + + case XSC_CMD_OP_SQD2RTS_QP: + return "SQD2RTS_QP"; + + case XSC_CMD_OP_2RST_QP: + return "2RST_QP"; + + case XSC_CMD_OP_INIT2INIT_QP: + return "INIT2INIT_QP"; + + case XSC_CMD_OP_MODIFY_RAW_QP: + return "MODIFY_RAW_QP"; + + case XSC_CMD_OP_ENABLE_NIC_HCA: + return "ENABLE_NIC_HCA"; + + case XSC_CMD_OP_DISABLE_NIC_HCA: + return "DISABLE_NIC_HCA"; + + case XSC_CMD_OP_QUERY_VPORT_STATE: + return "QUERY_VPORT_STATE"; + + case XSC_CMD_OP_QUERY_EVENT_TYPE: + return "QUERY_EVENT_TYPE"; + + case XSC_CMD_OP_ENABLE_MSIX: + return "ENABLE_MSIX"; + + case XSC_CMD_OP_SET_MTU: + return "SET_MTU"; + + case XSC_CMD_OP_QUERY_ETH_MAC: + return "QUERY_ETH_MAC"; + + default: return "unknown command opcode"; + } +} + +static void cmd_work_handler(struct work_struct *work) +{ + struct xsc_cmd_work_ent *ent; + struct xsc_core_device *xdev; + struct xsc_cmd_layout *lay; + struct semaphore *sem; + struct xsc_cmd *cmd; + unsigned long flags; + + ent = container_of(work, struct xsc_cmd_work_ent, work); + cmd = ent->cmd; + xdev = container_of(cmd, struct xsc_core_device, cmd); + sem = &cmd->sem; + down(sem); + ent->idx = xsc_alloc_ent(cmd); + if (ent->idx < 0) { + pci_err(xdev->pdev, "failed to allocate command entry\n"); + up(sem); + return; + } + + ent->token = xsc_alloc_token(cmd); + cmd->ent_arr[ent->idx] = ent; + + spin_lock_irqsave(&cmd->doorbell_lock, flags); + lay = xsc_get_inst(cmd, cmd->cmd_pid); + ent->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); + if (ent->in->next) + lay->in_ptr = cpu_to_be64(ent->in->next->dma); + lay->inlen = cpu_to_be32(ent->in->len); + if (ent->out->next) + lay->out_ptr = cpu_to_be64(ent->out->next->dma); + lay->outlen = cpu_to_be32(ent->out->len); + lay->type = XSC_PCI_CMD_XPORT; + lay->token = ent->token; + lay->idx = ent->idx; + if (!cmd->checksum_disabled) + xsc_set_signature(ent); + else + lay->sig = 0xff; + + ktime_get_ts64(&ent->ts1); + + /* ring doorbell after the descriptor is valid */ + wmb(); + + cmd->cmd_pid = (cmd->cmd_pid + 1) % (1 << cmd->log_sz); + writel(cmd->cmd_pid, XSC_REG_ADDR(xdev, cmd->reg.req_pid_addr)); + spin_unlock_irqrestore(&cmd->doorbell_lock, flags); +} + +static const char *xsc_deliv_status_to_str(u8 status) +{ + switch (status) { + case XSC_CMD_DELIVERY_STAT_OK: + return "no errors"; + case XSC_CMD_DELIVERY_STAT_SIGNAT_ERR: + return "signature error"; + case XSC_CMD_DELIVERY_STAT_TOK_ERR: + return "token error"; + case XSC_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: + return "bad block number"; + case XSC_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: + return "output pointer not aligned to block size"; + case XSC_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: + return "input pointer not aligned to block size"; + case XSC_CMD_DELIVERY_STAT_FW_ERR: + return "firmware internal error"; + case XSC_CMD_DELIVERY_STAT_IN_LENGTH_ERR: + return "command input length error"; + case XSC_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: + return "command output length error"; + case XSC_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: + return "reserved fields not cleared"; + case XSC_CMD_DELIVERY_STAT_CMD_DESCR_ERR: + return "bad command descriptor type"; + default: + return "unknown status code"; + } +} + +static u16 xsc_msg_to_opcode(struct xsc_cmd_msg *in) +{ + struct xsc_inbox_hdr *hdr = (struct xsc_inbox_hdr *)(in->first.data); + + return be16_to_cpu(hdr->opcode); +} + +static int xsc_wait_func(struct xsc_core_device *xdev, + struct xsc_cmd_work_ent *ent) +{ + unsigned long timeout = msecs_to_jiffies(XSC_CMD_TIMEOUT_MSEC); + struct xsc_cmd *cmd = &xdev->cmd; + int err; + + if (!wait_for_completion_timeout(&ent->done, timeout)) + err = -ETIMEDOUT; + else + err = ent->ret; + + if (err == -ETIMEDOUT) { + cmd->cmd_status = XSC_CMD_STATUS_TIMEDOUT; + pci_err(xdev->pdev, "wait for %s(0x%x) response timeout!\n", + xsc_command_str(xsc_msg_to_opcode(ent->in)), + xsc_msg_to_opcode(ent->in)); + } else if (err) { + pci_err(xdev->pdev, "err %d, delivery status %s(%d)\n", err, + xsc_deliv_status_to_str(ent->status), ent->status); + } + + return err; +} + +/* Notes: + * 1. Callback functions may not sleep + * 2. page queue commands do not support asynchrous completion + */ +static int xsc_cmd_invoke(struct xsc_core_device *xdev, struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out, u8 *status) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent *ent; + struct xsc_cmd_stats *stats; + ktime_t t1, t2, delta; + struct semaphore *sem; + int err = 0; + s64 ds; + u16 op; + + ent = xsc_alloc_cmd(cmd, in, out); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + init_completion(&ent->done); + INIT_WORK(&ent->work, cmd_work_handler); + if (!queue_work(cmd->wq, &ent->work)) { + pci_err(xdev->pdev, "failed to queue work\n"); + err = -ENOMEM; + goto err_free; + } + + err = xsc_wait_func(xdev, ent); + if (err == -ETIMEDOUT) + goto err_sem_up; + t1 = timespec64_to_ktime(ent->ts1); + t2 = timespec64_to_ktime(ent->ts2); + delta = ktime_sub(t2, t1); + ds = ktime_to_ns(delta); + op = be16_to_cpu(((struct xsc_inbox_hdr *)in->first.data)->opcode); + if (op < ARRAY_SIZE(cmd->stats)) { + stats = &cmd->stats[op]; + spin_lock(&stats->lock); + stats->sum += ds; + ++stats->n; + spin_unlock(&stats->lock); + } + *status = ent->status; + xsc_free_cmd(ent); + + return err; + +err_sem_up: + sem = &cmd->sem; + up(sem); +err_free: + xsc_free_cmd(ent); + return err; +} + +static int xsc_copy_to_cmd_msg(struct xsc_cmd_msg *to, void *from, + unsigned int size) +{ + struct xsc_cmd_prot_block *block; + struct xsc_cmd_mailbox *next; + unsigned int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min(size, sizeof(to->first.data)); + memcpy(to->first.data, from, copy); + size -= copy; + from += copy; + + next = to->next; + while (size) { + if (!next) { + WARN_ONCE(1, "Mail box not enough\n"); + return -ENOMEM; + } + + copy = min(size, XSC_CMD_DATA_BLOCK_SIZE); + block = next->buf; + memcpy(block->data, from, copy); + block->owner_status = 0; + from += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static int xsc_copy_from_rsp_msg(void *to, struct xsc_rsp_msg *from, + unsigned int size) +{ + struct xsc_cmd_prot_block *block; + struct xsc_cmd_mailbox *next; + unsigned int copy; + + if (!to || !from) + return -ENOMEM; + + copy = min(size, sizeof(from->first.data)); + memcpy(to, from->first.data, copy); + size -= copy; + to += copy; + + next = from->next; + while (size) { + if (!next) { + WARN_ONCE(1, "Mail too short\n"); + return -ENOMEM; + } + + copy = min(size, XSC_CMD_DATA_BLOCK_SIZE); + block = next->buf; + if (!block->owner_status) + pr_err("block ownership check failed\n"); + + memcpy(to, block->data, copy); + to += copy; + size -= copy; + next = next->next; + } + + return 0; +} + +static struct xsc_cmd_mailbox *xsc_alloc_cmd_box(struct xsc_core_device *xdev, + gfp_t flags) +{ + struct xsc_cmd_mailbox *mailbox; + + mailbox = kmalloc(sizeof(*mailbox), flags); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = dma_pool_zalloc(xdev->cmd.pool, flags, + &mailbox->dma); + if (!mailbox->buf) { + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + mailbox->next = NULL; + + return mailbox; +} + +static void xsc_free_cmd_box(struct xsc_core_device *xdev, + struct xsc_cmd_mailbox *mailbox) +{ + dma_pool_free(xdev->cmd.pool, mailbox->buf, mailbox->dma); + + kfree(mailbox); +} + +static struct xsc_cmd_msg *xsc_alloc_cmd_msg(struct xsc_core_device *xdev, + gfp_t flags, unsigned int size) +{ + struct xsc_cmd_mailbox *tmp, *head = NULL; + struct xsc_cmd_prot_block *block; + struct xsc_cmd_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min(sizeof(msg->first.data), size); + n = (blen + XSC_CMD_DATA_BLOCK_SIZE - 1) / XSC_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = xsc_alloc_cmd_box(xdev, flags); + if (IS_ERR(tmp)) { + pci_err(xdev->pdev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_free; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_free: + while (head) { + tmp = head->next; + xsc_free_cmd_box(xdev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void xsc_free_cmd_msg(struct xsc_core_device *xdev, + struct xsc_cmd_msg *msg) +{ + struct xsc_cmd_mailbox *head = msg->next; + struct xsc_cmd_mailbox *next; + + while (head) { + next = head->next; + xsc_free_cmd_box(xdev, head); + head = next; + } + kfree(msg); +} + +static struct xsc_rsp_msg *xsc_alloc_rsp_msg(struct xsc_core_device *xdev, + gfp_t flags, unsigned int size) +{ + struct xsc_cmd_mailbox *tmp, *head = NULL; + struct xsc_cmd_prot_block *block; + struct xsc_rsp_msg *msg; + int blen; + int err; + int n; + int i; + + msg = kzalloc(sizeof(*msg), GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + blen = size - min_t(int, sizeof(msg->first.data), size); + n = (blen + XSC_CMD_DATA_BLOCK_SIZE - 1) / XSC_CMD_DATA_BLOCK_SIZE; + + for (i = 0; i < n; i++) { + tmp = xsc_alloc_cmd_box(xdev, flags); + if (IS_ERR(tmp)) { + pci_err(xdev->pdev, "failed allocating block\n"); + err = PTR_ERR(tmp); + goto err_free; + } + + block = tmp->buf; + tmp->next = head; + block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); + block->block_num = cpu_to_be32(n - i - 1); + head = tmp; + } + msg->next = head; + msg->len = size; + return msg; + +err_free: + while (head) { + tmp = head->next; + xsc_free_cmd_box(xdev, head); + head = tmp; + } + kfree(msg); + + return ERR_PTR(err); +} + +static void xsc_free_rsp_msg(struct xsc_core_device *xdev, + struct xsc_rsp_msg *msg) +{ + struct xsc_cmd_mailbox *head = msg->next; + struct xsc_cmd_mailbox *next; + + while (head) { + next = head->next; + xsc_free_cmd_box(xdev, head); + head = next; + } + kfree(msg); +} + +static void xsc_set_wqname(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + + snprintf(cmd->wq_name, sizeof(cmd->wq_name), "xsc_cmd_%s", + dev_name(&xdev->pdev->dev)); +} + +void xsc_cmd_use_events(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + flush_workqueue(cmd->wq); + + cmd->mode = XSC_CMD_MODE_EVENTS; + + while (cmd->cmd_pid != cmd->cq_cid) + msleep(20); + kthread_stop(cmd->cq_task); + cmd->cq_task = NULL; + + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +static int xsc_cmd_cq_polling(void *data); +void xsc_cmd_use_polling(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + int i; + + for (i = 0; i < cmd->max_reg_cmds; i++) + down(&cmd->sem); + + flush_workqueue(cmd->wq); + cmd->mode = XSC_CMD_MODE_POLLING; + cmd->cq_task = kthread_create(xsc_cmd_cq_polling, + (void *)xdev, + "xsc_cmd_cq_polling"); + if (cmd->cq_task) + wake_up_process(cmd->cq_task); + + for (i = 0; i < cmd->max_reg_cmds; i++) + up(&cmd->sem); +} + +static int xsc_status_to_err(u8 status) +{ + return status ? -1 : 0; +} + +static struct xsc_cmd_msg *xsc_alloc_msg(struct xsc_core_device *xdev, + unsigned int in_size) +{ + struct xsc_cmd_msg *msg = ERR_PTR(-ENOMEM); + struct xsc_cmd *cmd = &xdev->cmd; + struct cache_ent *ent = NULL; + + if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) + ent = &cmd->cache.large; + else if (in_size > 16 && in_size <= MED_LIST_SIZE) + ent = &cmd->cache.med; + + if (ent) { + spin_lock(&ent->lock); + if (!list_empty(&ent->head)) { + msg = list_entry(ent->head.next, typeof(*msg), list); + /* For cached lists, we must explicitly state what is + * the real size + */ + msg->len = in_size; + list_del(&msg->list); + } + spin_unlock(&ent->lock); + } + + if (IS_ERR(msg)) + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, in_size); + + return msg; +} + +static void xsc_free_msg(struct xsc_core_device *xdev, struct xsc_cmd_msg *msg) +{ + if (msg->cache) { + spin_lock(&msg->cache->lock); + list_add_tail(&msg->list, &msg->cache->head); + spin_unlock(&msg->cache->lock); + } else { + xsc_free_cmd_msg(xdev, msg); + } +} + +static int xsc_dummy_work(struct xsc_core_device *xdev, + struct xsc_cmd_msg *in, + struct xsc_rsp_msg *out, + u16 dummy_cnt, + u16 dummy_start_pid) +{ + struct xsc_cmd_work_ent **dummy_ent_arr; + struct xsc_cmd *cmd = &xdev->cmd; + u16 temp_pid = dummy_start_pid; + struct xsc_cmd_layout *lay; + struct semaphore *sem; + u16 free_cnt = 0; + int err = 0; + u16 i; + + sem = &cmd->sem; + + dummy_ent_arr = kcalloc(dummy_cnt, sizeof(struct xsc_cmd_work_ent *), + GFP_KERNEL); + if (!dummy_ent_arr) { + err = -ENOMEM; + goto err_out; + } + + for (i = 0; i < dummy_cnt; i++) { + dummy_ent_arr[i] = xsc_alloc_cmd(cmd, in, out); + if (IS_ERR(dummy_ent_arr[i])) { + pci_err(xdev->pdev, "failed to alloc cmd buffer\n"); + err = -ENOMEM; + free_cnt = i; + goto err_free; + } + + down(sem); + + dummy_ent_arr[i]->idx = xsc_alloc_ent(cmd); + if (dummy_ent_arr[i]->idx < 0) { + pci_err(xdev->pdev, "failed to allocate command entry\n"); + err = -1; + free_cnt = i; + up(sem); + xsc_free_cmd(dummy_ent_arr[i]); + goto err_free; + } + dummy_ent_arr[i]->token = xsc_alloc_token(cmd); + cmd->ent_arr[dummy_ent_arr[i]->idx] = dummy_ent_arr[i]; + init_completion(&dummy_ent_arr[i]->done); + + lay = xsc_get_inst(cmd, temp_pid); + dummy_ent_arr[i]->lay = lay; + memset(lay, 0, sizeof(*lay)); + memcpy(lay->in, + dummy_ent_arr[i]->in->first.data, + sizeof(dummy_ent_arr[i]->in)); + lay->inlen = cpu_to_be32(dummy_ent_arr[i]->in->len); + lay->outlen = cpu_to_be32(dummy_ent_arr[i]->out->len); + lay->type = XSC_PCI_CMD_XPORT; + lay->token = dummy_ent_arr[i]->token; + lay->idx = dummy_ent_arr[i]->idx; + if (!cmd->checksum_disabled) + xsc_set_signature(dummy_ent_arr[i]); + else + lay->sig = 0xff; + temp_pid = (temp_pid + 1) % (1 << cmd->log_sz); + } + + /* ring doorbell after the descriptor is valid */ + wmb(); + writel(cmd->cmd_pid, XSC_REG_ADDR(xdev, cmd->reg.req_pid_addr)); + if (readl(XSC_REG_ADDR(xdev, cmd->reg.interrupt_stat_addr)) != 0) + writel(0xF, XSC_REG_ADDR(xdev, cmd->reg.interrupt_stat_addr)); + + if (wait_for_completion_timeout(&dummy_ent_arr[dummy_cnt - 1]->done, + msecs_to_jiffies(3000)) == 0) { + pci_err(xdev->pdev, "dummy_cmd %d ent timeout, cmdq fail\n", + dummy_cnt - 1); + err = -ETIMEDOUT; + } + + for (i = 0; i < dummy_cnt; i++) + xsc_free_cmd(dummy_ent_arr[i]); + + kfree(dummy_ent_arr); + return err; + +err_free: + for (i = 0; i < free_cnt; i++) { + xsc_free_ent(cmd, dummy_ent_arr[i]->idx); + up(sem); + xsc_free_cmd(dummy_ent_arr[i]); + } + kfree(dummy_ent_arr); +err_out: + return err; +} + +static int xsc_dummy_cmd_exec(struct xsc_core_device *xdev, + void *in, unsigned int in_size, + void *out, unsigned int out_size, + u16 dmmy_cnt, u16 dummy_start) +{ + struct xsc_rsp_msg *outb; + struct xsc_cmd_msg *inb; + int err; + + inb = xsc_alloc_msg(xdev, in_size); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = xsc_copy_to_cmd_msg(inb, in, in_size); + if (err) { + pci_err(xdev->pdev, "err %d\n", err); + goto err_free_msg; + } + + outb = xsc_alloc_rsp_msg(xdev, GFP_KERNEL, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto err_free_msg; + } + + err = xsc_dummy_work(xdev, inb, outb, dmmy_cnt, dummy_start); + + if (err) + goto err_free_rsp; + + err = xsc_copy_from_rsp_msg(out, outb, out_size); + +err_free_rsp: + xsc_free_rsp_msg(xdev, outb); + +err_free_msg: + xsc_free_msg(xdev, inb); + return err; +} + +static int xsc_send_dummy_cmd(struct xsc_core_device *xdev, + u16 gap, u16 dummy_start) +{ + struct xsc_cmd_dummy_mbox_out *out; + struct xsc_cmd_dummy_mbox_in in; + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DUMMY); + + err = xsc_dummy_cmd_exec(xdev, &in, sizeof(in), out, sizeof(*out), + gap, dummy_start); + if (err) + goto err_free; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto err_free; + } + +err_free: + kfree(out); + return err; +} + +static int xsc_request_pid_cid_mismatch_restore(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + u16 req_pid, req_cid; + u16 gap; + + int err; + + req_pid = readl(XSC_REG_ADDR(xdev, cmd->reg.req_pid_addr)); + req_cid = readl(XSC_REG_ADDR(xdev, cmd->reg.req_cid_addr)); + if (req_pid >= (1 << cmd->log_sz) || req_cid >= (1 << cmd->log_sz)) { + pci_err(xdev->pdev, + "req_pid %d, req_cid %d, out of normal range!!! max value is %d\n", + req_pid, req_cid, (1 << cmd->log_sz)); + return -EFAULT; + } + + if (req_pid == req_cid) + return 0; + + gap = (req_pid > req_cid) ? (req_pid - req_cid) + : ((1 << cmd->log_sz) + req_pid - req_cid); + + err = xsc_send_dummy_cmd(xdev, gap, req_cid); + if (err) { + pci_err(xdev->pdev, "Send dummy cmd failed\n"); + return err; + } + + return 0; +} + +static int _xsc_cmd_exec(struct xsc_core_device *xdev, + void *in, unsigned int in_size, + void *out, unsigned int out_size) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_rsp_msg *outb; + struct xsc_cmd_msg *inb; + u8 status = 0; + int err; + + if (cmd->cmd_status == XSC_CMD_STATUS_TIMEDOUT) + return -ETIMEDOUT; + + inb = xsc_alloc_msg(xdev, in_size); + if (IS_ERR(inb)) { + err = PTR_ERR(inb); + return err; + } + + err = xsc_copy_to_cmd_msg(inb, in, in_size); + if (err) { + pci_err(xdev->pdev, "copy to cmd_msg err %d\n", err); + goto err_free_msg; + } + + outb = xsc_alloc_rsp_msg(xdev, GFP_KERNEL, out_size); + if (IS_ERR(outb)) { + err = PTR_ERR(outb); + goto err_free_msg; + } + + err = xsc_cmd_invoke(xdev, inb, outb, &status); + if (err) + goto err_free_rsp; + + if (status) { + pci_err(xdev->pdev, "opcode:%#x, err %d, status %d\n", + xsc_msg_to_opcode(inb), err, status); + err = xsc_status_to_err(status); + goto err_free_rsp; + } + + err = xsc_copy_from_rsp_msg(out, outb, out_size); + +err_free_rsp: + xsc_free_rsp_msg(xdev, outb); + +err_free_msg: + xsc_free_msg(xdev, inb); + return err; +} + +int xsc_cmd_exec(struct xsc_core_device *xdev, + void *in, unsigned int in_size, + void *out, unsigned int out_size) +{ + struct xsc_inbox_hdr *hdr = (struct xsc_inbox_hdr *)in; + + hdr->ver = 0; + if (hdr->ver != 0) { + pci_err(xdev->pdev, "recv an unexpected cmd ver = %d, opcode = %d\n", + be16_to_cpu(hdr->ver), be16_to_cpu(hdr->opcode)); + WARN_ON(hdr->ver != 0); + } + + return _xsc_cmd_exec(xdev, in, in_size, out, out_size); +} +EXPORT_SYMBOL(xsc_cmd_exec); + +static void xsc_destroy_msg_cache(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_msg *msg; + struct xsc_cmd_msg *n; + + list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { + list_del(&msg->list); + xsc_free_cmd_msg(xdev, msg); + } + + list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { + list_del(&msg->list); + xsc_free_cmd_msg(xdev, msg); + } +} + +static int xsc_create_msg_cache(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_msg *msg; + int err; + int i; + + spin_lock_init(&cmd->cache.large.lock); + INIT_LIST_HEAD(&cmd->cache.large.head); + spin_lock_init(&cmd->cache.med.lock); + INIT_LIST_HEAD(&cmd->cache.med.head); + + for (i = 0; i < NUM_LONG_LISTS; i++) { + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, LONG_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto err_destroy; + } + msg->cache = &cmd->cache.large; + list_add_tail(&msg->list, &cmd->cache.large.head); + } + + for (i = 0; i < NUM_MED_LISTS; i++) { + msg = xsc_alloc_cmd_msg(xdev, GFP_KERNEL, MED_LIST_SIZE); + if (IS_ERR(msg)) { + err = PTR_ERR(msg); + goto err_destroy; + } + msg->cache = &cmd->cache.med; + list_add_tail(&msg->list, &cmd->cache.med.head); + } + + return 0; + +err_destroy: + xsc_destroy_msg_cache(xdev); + return err; +} + +static void xsc_cmd_comp_handler(struct xsc_core_device *xdev, u8 idx, + struct xsc_rsp_layout *rsp) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_cmd_work_ent *ent; + + if (idx > cmd->max_reg_cmds || (cmd->cmd_entry_mask & (1 << idx))) { + pci_err(xdev->pdev, "idx[%d] exceed max cmds, or has no relative request.\n", + idx); + return; + } + ent = cmd->ent_arr[idx]; + ent->rsp_lay = rsp; + ktime_get_ts64(&ent->ts2); + + memcpy(ent->out->first.data, + ent->rsp_lay->out, + sizeof(ent->rsp_lay->out)); + if (!cmd->checksum_disabled) + ent->ret = xsc_verify_signature(ent); + else + ent->ret = 0; + ent->status = 0; + + xsc_free_ent(cmd, ent->idx); + complete(&ent->done); + up(&cmd->sem); +} + +static int xsc_cmd_cq_polling(void *data) +{ + struct xsc_core_device *xdev = data; + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_rsp_layout *rsp; + u32 cq_pid; + + while (!kthread_should_stop()) { + if (need_resched()) + schedule(); + cq_pid = readl(XSC_REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + if (cmd->cq_cid == cq_pid) { + mdelay(3); + continue; + } + + rsp = xsc_get_cq_inst(cmd, cmd->cq_cid); + if (!cmd->ownerbit_learned) { + cmd->ownerbit_learned = 1; + cmd->owner_bit = rsp->owner_bit; + } + if (cmd->owner_bit != rsp->owner_bit) { + pci_err(xdev->pdev, "hw update cq doorbell but buf not ready %u %u\n", + cmd->cq_cid, cq_pid); + continue; + } + + xsc_cmd_comp_handler(xdev, rsp->idx, rsp); + + cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); + + writel(cmd->cq_cid, XSC_REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (cmd->cq_cid == 0) + cmd->owner_bit = !cmd->owner_bit; + } + + return 0; +} + +int xsc_cmd_err_handler(struct xsc_core_device *xdev) +{ + union interrupt_stat { + struct { + u32 hw_read_req_err:1; + u32 hw_write_req_err:1; + u32 req_pid_err:1; + u32 rsp_cid_err:1; + }; + u32 raw; + } stat; + int retry = 0; + int err = 0; + + stat.raw = readl(XSC_REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + while (stat.raw != 0) { + err++; + if (stat.hw_read_req_err) { + retry = 1; + stat.hw_read_req_err = 0; + pci_err(xdev->pdev, "hw report read req from host failed!\n"); + } else if (stat.hw_write_req_err) { + retry = 1; + stat.hw_write_req_err = 0; + pci_err(xdev->pdev, "hw report write req to fw failed!\n"); + } else if (stat.req_pid_err) { + stat.req_pid_err = 0; + pci_err(xdev->pdev, "hw report unexpected req pid!\n"); + } else if (stat.rsp_cid_err) { + stat.rsp_cid_err = 0; + pci_err(xdev->pdev, "hw report unexpected rsp cid!\n"); + } else { + stat.raw = 0; + pci_err(xdev->pdev, "ignore unknown interrupt!\n"); + } + } + + if (retry) + writel(xdev->cmd.cmd_pid, + XSC_REG_ADDR(xdev, xdev->cmd.reg.req_pid_addr)); + + if (err) + writel(0xf, + XSC_REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + + return err; +} + +void xsc_cmd_resp_handler(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + struct xsc_rsp_layout *rsp; + const int budget = 32; + int count = 0; + u32 cq_pid; + + while (count < budget) { + cq_pid = readl(XSC_REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + if (cq_pid == cmd->cq_cid) + return; + + rsp = xsc_get_cq_inst(cmd, cmd->cq_cid); + if (!cmd->ownerbit_learned) { + cmd->ownerbit_learned = 1; + cmd->owner_bit = rsp->owner_bit; + } + if (cmd->owner_bit != rsp->owner_bit) { + pci_err(xdev->pdev, "hw update cq doorbell but buf not ready %u %u\n", + cmd->cq_cid, cq_pid); + return; + } + + xsc_cmd_comp_handler(xdev, rsp->idx, rsp); + + cmd->cq_cid = (cmd->cq_cid + 1) % (1 << cmd->log_sz); + writel(cmd->cq_cid, XSC_REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (cmd->cq_cid == 0) + cmd->owner_bit = !cmd->owner_bit; + + count++; + } +} + +static void xsc_cmd_handle_rsp_before_reload +(struct xsc_cmd *cmd, struct xsc_core_device *xdev) +{ + u32 rsp_pid, rsp_cid; + + rsp_pid = readl(XSC_REG_ADDR(xdev, cmd->reg.rsp_pid_addr)); + rsp_cid = readl(XSC_REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + if (rsp_pid == rsp_cid) + return; + + cmd->cq_cid = rsp_pid; + + writel(cmd->cq_cid, XSC_REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); +} + +int xsc_cmd_init(struct xsc_core_device *xdev) +{ + unsigned int size = sizeof(struct xsc_cmd_prot_block); + int align = roundup_pow_of_two(size); + struct xsc_cmd *cmd = &xdev->cmd; + u32 cmd_h, cmd_l; + u32 err_stat; + int err; + int i; + + /* now there is 544 cmdq resource, soc using from id 514 */ + cmd->reg.req_pid_addr = HIF_CMDQM_HOST_REQ_PID_MEM_ADDR; + cmd->reg.req_cid_addr = HIF_CMDQM_HOST_REQ_CID_MEM_ADDR; + cmd->reg.rsp_pid_addr = HIF_CMDQM_HOST_RSP_PID_MEM_ADDR; + cmd->reg.rsp_cid_addr = HIF_CMDQM_HOST_RSP_CID_MEM_ADDR; + cmd->reg.req_buf_h_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.req_buf_l_addr = HIF_CMDQM_HOST_REQ_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_h_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_H_ADDR_MEM_ADDR; + cmd->reg.rsp_buf_l_addr = HIF_CMDQM_HOST_RSP_BUF_BASE_L_ADDR_MEM_ADDR; + cmd->reg.msix_vec_addr = HIF_CMDQM_VECTOR_ID_MEM_ADDR; + cmd->reg.element_sz_addr = HIF_CMDQM_Q_ELEMENT_SZ_REG_ADDR; + cmd->reg.q_depth_addr = HIF_CMDQM_HOST_Q_DEPTH_REG_ADDR; + cmd->reg.interrupt_stat_addr = HIF_CMDQM_HOST_VF_ERR_STS_MEM_ADDR; + + cmd->pool = dma_pool_create("xsc_cmd", + &xdev->pdev->dev, + size, align, 0); + if (!cmd->pool) + return -ENOMEM; + + cmd->cmd_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); + if (!cmd->cmd_buf) { + err = -ENOMEM; + goto err_free_pool; + } + cmd->cq_buf = (void *)__get_free_pages(GFP_ATOMIC, 0); + if (!cmd->cq_buf) { + err = -ENOMEM; + goto err_free_cmd; + } + + cmd->dma = dma_map_single(&xdev->pdev->dev, cmd->cmd_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&xdev->pdev->dev, cmd->dma)) { + err = -ENOMEM; + goto err_free_cq; + } + + cmd->cq_dma = dma_map_single(&xdev->pdev->dev, cmd->cq_buf, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(&xdev->pdev->dev, cmd->cq_dma)) { + err = -ENOMEM; + goto err_unmap_cmd; + } + + cmd->cmd_pid = readl(XSC_REG_ADDR(xdev, cmd->reg.req_pid_addr)); + cmd->cq_cid = readl(XSC_REG_ADDR(xdev, cmd->reg.rsp_cid_addr)); + cmd->ownerbit_learned = 0; + + xsc_cmd_handle_rsp_before_reload(cmd, xdev); + +#define ELEMENT_SIZE_LOG 6 /* 64B */ +#define Q_DEPTH_LOG 5 /* 32 */ + + cmd->log_sz = Q_DEPTH_LOG; + cmd->log_stride = readl(XSC_REG_ADDR(xdev, cmd->reg.element_sz_addr)); + writel(1 << cmd->log_sz, XSC_REG_ADDR(xdev, cmd->reg.q_depth_addr)); + if (cmd->log_stride != ELEMENT_SIZE_LOG) { + dev_err(&xdev->pdev->dev, "firmware failed to init cmdq, log_stride=(%d, %d)\n", + cmd->log_stride, ELEMENT_SIZE_LOG); + err = -ENODEV; + goto err_unmap_cq; + } + + if (1 << cmd->log_sz > XSC_MAX_COMMANDS) { + dev_err(&xdev->pdev->dev, "firmware reports too many outstanding commands %d\n", + 1 << cmd->log_sz); + err = -EINVAL; + goto err_unmap_cq; + } + + if (cmd->log_sz + cmd->log_stride > PAGE_SHIFT) { + dev_err(&xdev->pdev->dev, "command queue size overflow\n"); + err = -EINVAL; + goto err_unmap_cq; + } + + cmd->checksum_disabled = 1; + cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; + cmd->cmd_entry_mask = (1 << cmd->max_reg_cmds) - 1; + + spin_lock_init(&cmd->alloc_lock); + spin_lock_init(&cmd->token_lock); + spin_lock_init(&cmd->doorbell_lock); + for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) + spin_lock_init(&cmd->stats[i].lock); + + sema_init(&cmd->sem, cmd->max_reg_cmds); + + cmd_h = upper_32_bits(cmd->dma); + cmd_l = lower_32_bits(cmd->dma); + if (cmd_l & 0xfff) { + dev_err(&xdev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_unmap_cq; + } + + writel(cmd_h, XSC_REG_ADDR(xdev, cmd->reg.req_buf_h_addr)); + writel(cmd_l, XSC_REG_ADDR(xdev, cmd->reg.req_buf_l_addr)); + + cmd_h = upper_32_bits(cmd->cq_dma); + cmd_l = lower_32_bits(cmd->cq_dma); + if (cmd_l & 0xfff) { + dev_err(&xdev->pdev->dev, "invalid command queue address\n"); + err = -ENOMEM; + goto err_unmap_cq; + } + writel(cmd_h, XSC_REG_ADDR(xdev, cmd->reg.rsp_buf_h_addr)); + writel(cmd_l, XSC_REG_ADDR(xdev, cmd->reg.rsp_buf_l_addr)); + + /* Make sure firmware sees the complete address before we proceed */ + wmb(); + + cmd->mode = XSC_CMD_MODE_POLLING; + cmd->cmd_status = XSC_CMD_STATUS_NORMAL; + + err = xsc_create_msg_cache(xdev); + if (err) { + dev_err(&xdev->pdev->dev, "failed to create command cache\n"); + goto err_unmap_cq; + } + + xsc_set_wqname(xdev); + cmd->wq = create_singlethread_workqueue(cmd->wq_name); + if (!cmd->wq) { + dev_err(&xdev->pdev->dev, "failed to create command workqueue\n"); + err = -ENOMEM; + goto err_destroy_cache; + } + + cmd->cq_task = kthread_create(xsc_cmd_cq_polling, + (void *)xdev, + "xsc_cmd_cq_polling"); + if (!cmd->cq_task) { + dev_err(&xdev->pdev->dev, "failed to create cq task\n"); + err = -ENOMEM; + goto err_destroy_wq; + } + wake_up_process(cmd->cq_task); + + err = xsc_request_pid_cid_mismatch_restore(xdev); + if (err) { + dev_err(&xdev->pdev->dev, "request pid,cid wrong, restore failed\n"); + goto err_stop_cq_task; + } + + /* clear abnormal state to avoid the impact of previous error */ + err_stat = readl(XSC_REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + if (err_stat) { + pci_err(xdev->pdev, "err_stat 0x%x when init, clear it\n", + err_stat); + writel(0xf, + XSC_REG_ADDR(xdev, xdev->cmd.reg.interrupt_stat_addr)); + } + + return 0; + +err_stop_cq_task: + kthread_stop(cmd->cq_task); + +err_destroy_wq: + destroy_workqueue(cmd->wq); + +err_destroy_cache: + xsc_destroy_msg_cache(xdev); + +err_unmap_cq: + dma_unmap_single(&xdev->pdev->dev, cmd->cq_dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + +err_unmap_cmd: + dma_unmap_single(&xdev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); +err_free_cq: + free_pages((unsigned long)cmd->cq_buf, 0); + +err_free_cmd: + free_pages((unsigned long)cmd->cmd_buf, 0); + +err_free_pool: + dma_pool_destroy(cmd->pool); + + return err; +} + +void xsc_cmd_cleanup(struct xsc_core_device *xdev) +{ + struct xsc_cmd *cmd = &xdev->cmd; + + destroy_workqueue(cmd->wq); + if (cmd->cq_task) + kthread_stop(cmd->cq_task); + xsc_destroy_msg_cache(xdev); + dma_unmap_single(&xdev->pdev->dev, cmd->dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)cmd->cq_buf, 0); + dma_unmap_single(&xdev->pdev->dev, cmd->cq_dma, PAGE_SIZE, + DMA_BIDIRECTIONAL); + free_pages((unsigned long)cmd->cmd_buf, 0); + dma_pool_destroy(cmd->pool); +} + +int xsc_cmd_version_check(struct xsc_core_device *xdev) +{ + struct xsc_cmd_query_cmdq_ver_mbox_out *out; + struct xsc_cmd_query_cmdq_ver_mbox_in in; + + int err; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_CMDQ_VERSION); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto err_free; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto err_free; + } + + if (be16_to_cpu(out->cmdq_ver) != XSC_CMDQ_VERSION) { + pci_err(xdev->pdev, "cmdq version check failed, expecting %d, actual %d\n", + XSC_CMDQ_VERSION, be16_to_cpu(out->cmdq_ver)); + err = -EINVAL; + goto err_free; + } + xdev->cmdq_ver = XSC_CMDQ_VERSION; + +err_free: + kfree(out); + return err; +} + +static const char *cmd_status_str(u8 status) +{ + switch (status) { + case XSC_CMD_STAT_OK: + return "OK"; + case XSC_CMD_STAT_INT_ERR: + return "internal error"; + case XSC_CMD_STAT_BAD_OP_ERR: + return "bad operation"; + case XSC_CMD_STAT_BAD_PARAM_ERR: + return "bad parameter"; + case XSC_CMD_STAT_BAD_SYS_STATE_ERR: + return "bad system state"; + case XSC_CMD_STAT_BAD_RES_ERR: + return "bad resource"; + case XSC_CMD_STAT_RES_BUSY: + return "resource busy"; + case XSC_CMD_STAT_LIM_ERR: + return "limits exceeded"; + case XSC_CMD_STAT_BAD_RES_STATE_ERR: + return "bad resource state"; + case XSC_CMD_STAT_IX_ERR: + return "bad index"; + case XSC_CMD_STAT_NO_RES_ERR: + return "no resources"; + case XSC_CMD_STAT_BAD_INP_LEN_ERR: + return "bad input length"; + case XSC_CMD_STAT_BAD_OUTP_LEN_ERR: + return "bad output length"; + case XSC_CMD_STAT_BAD_QP_STATE_ERR: + return "bad QP state"; + case XSC_CMD_STAT_BAD_PKT_ERR: + return "bad packet (discarded)"; + case XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: + return "bad size too many outstanding CQEs"; + default: + return "unknown status"; + } +} + +int xsc_cmd_status_to_err(struct xsc_outbox_hdr *hdr) +{ + if (!hdr->status) + return 0; + + pr_warn("command failed, status %s(0x%x)\n", + cmd_status_str(hdr->status), hdr->status); + + switch (hdr->status) { + case XSC_CMD_STAT_OK: return 0; + case XSC_CMD_STAT_INT_ERR: return -EIO; + case XSC_CMD_STAT_BAD_OP_ERR: return -EOPNOTSUPP; + case XSC_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; + case XSC_CMD_STAT_BAD_RES_ERR: return -EINVAL; + case XSC_CMD_STAT_RES_BUSY: return -EBUSY; + case XSC_CMD_STAT_LIM_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; + case XSC_CMD_STAT_IX_ERR: return -EINVAL; + case XSC_CMD_STAT_NO_RES_ERR: return -EAGAIN; + case XSC_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; + case XSC_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; + case XSC_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_PKT_ERR: return -EINVAL; + case XSC_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; + default: return -EIO; + } +} + diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c new file mode 100644 index 0000000000000..493d846cf8af5 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "common/xsc_driver.h" +#include "cq.h" + +void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_core_cq *cq; + + spin_lock(&table->lock); + + cq = radix_tree_lookup(&table->tree, cqn); + if (cq) + atomic_inc(&cq->refcount); + + spin_unlock(&table->lock); + + if (!cq) { + pci_err(xdev->pdev, "Async event for bogus CQ 0x%x\n", cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +void xsc_init_cq_table(struct xsc_core_device *xdev) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); +} + +static int xsc_create_cq(struct xsc_core_device *xdev, u32 *p_cqn, + struct xsc_create_cq_mbox_in *in, int insize) +{ + struct xsc_create_cq_mbox_out out; + int ret; + + memset(&out, 0, sizeof(out)); + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_CQ); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + pci_err(xdev->pdev, "failed to create cq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + *p_cqn = be32_to_cpu(out.cqn) & 0xffffff; + return 0; +} + +static int xsc_destroy_cq(struct xsc_core_device *xdev, u32 cqn) +{ + struct xsc_destroy_cq_mbox_out out; + struct xsc_destroy_cq_mbox_in in; + int ret; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_CQ); + in.cqn = cpu_to_be32(cqn); + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status) { + pci_err(xdev->pdev, "failed to destroy cq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} + +int xsc_core_eth_create_cq(struct xsc_core_device *xdev, + struct xsc_core_cq *xcq, + struct xsc_create_cq_mbox_in *in, + int insize) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + u32 cqn; + int ret; + int err; + + ret = xsc_create_cq(xdev, &cqn, in, insize); + if (ret) { + pci_err(xdev->pdev, "xsc_create_cq failed\n"); + return -ENOEXEC; + } + xcq->cqn = cqn; + xcq->cons_index = 0; + xcq->arm_sn = 0; + atomic_set(&xcq->refcount, 1); + init_completion(&xcq->free); + + spin_lock_irq(&table->lock); + ret = radix_tree_insert(&table->tree, xcq->cqn, xcq); + spin_unlock_irq(&table->lock); + if (ret) + goto err_insert_cq; + return 0; +err_insert_cq: + err = xsc_destroy_cq(xdev, cqn); + if (err) + pci_err(xdev->pdev, "failed to destroy cqn=%d, err=%d\n", + xcq->cqn, err); + return ret; +} +EXPORT_SYMBOL(xsc_core_eth_create_cq); + +int xsc_core_eth_destroy_cq(struct xsc_core_device *xdev, + struct xsc_core_cq *xcq) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_core_cq *tmp; + int err; + + spin_lock_irq(&table->lock); + tmp = radix_tree_delete(&table->tree, xcq->cqn); + spin_unlock_irq(&table->lock); + if (!tmp) { + err = -ENOENT; + goto err_delete_cq; + } + + if (tmp != xcq) { + err = -EINVAL; + goto err_delete_cq; + } + + err = xsc_destroy_cq(xdev, xcq->cqn); + if (err) + goto err_destroy_cq; + + if (atomic_dec_and_test(&xcq->refcount)) + complete(&xcq->free); + wait_for_completion(&xcq->free); + return 0; + +err_destroy_cq: + pci_err(xdev->pdev, "failed to destroy cqn=%d, err=%d\n", + xcq->cqn, err); + return err; +err_delete_cq: + pci_err(xdev->pdev, "cqn=%d not found in tree, err=%d\n", + xcq->cqn, err); + return err; +} +EXPORT_SYMBOL(xsc_core_eth_destroy_cq); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/cq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.h new file mode 100644 index 0000000000000..902a7f1f23700 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/cq.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __CQ_H +#define __CQ_H + +#include "common/xsc_core.h" + +void xsc_cq_event(struct xsc_core_device *xdev, u32 cqn, int event_type); +void xsc_init_cq_table(struct xsc_core_device *xdev); + +#endif /* __CQ_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c new file mode 100644 index 0000000000000..aa2c0ba61cda8 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.c @@ -0,0 +1,340 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ +#include +#include +#include + +#include "common/xsc_driver.h" +#include "common/xsc_core.h" +#include "qp.h" +#include "alloc.h" +#include "eq.h" + +enum { + XSC_EQE_SIZE = sizeof(struct xsc_eqe), +}; + +enum { + XSC_NUM_SPARE_EQE = 0x80, + XSC_NUM_ASYNC_EQE = 0x100, +}; + +static int xsc_cmd_destroy_eq(struct xsc_core_device *xdev, u32 eqn) +{ + struct xsc_destroy_eq_mbox_out out = {0}; + struct xsc_destroy_eq_mbox_in in = {0}; + int err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_EQ); + in.eqn = cpu_to_be32(eqn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (!err) + return 0; + + if (out.hdr.status) + err = xsc_cmd_status_to_err(&out.hdr); + + return err; +} + +static struct xsc_eqe *get_eqe(struct xsc_eq *eq, u32 entry) +{ + return xsc_buf_offset(&eq->buf, entry * XSC_EQE_SIZE); +} + +static struct xsc_eqe *next_eqe_sw(struct xsc_eq *eq) +{ + struct xsc_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); + + return ((eqe->owner_data & XSC_EQE_OWNER) ^ + !!(eq->cons_index & eq->nent)) ? NULL : eqe; +} + +static void eq_update_ci(struct xsc_eq *eq, int arm) +{ + u32 db_val = 0; + + db_val = FIELD_PREP(XSC_EQ_DB_NEXT_CID_MASK, eq->cons_index) | + FIELD_PREP(XSC_EQ_DB_EQ_ID_MASK, eq->eqn); + if (arm) + db_val |= XSC_EQ_DB_ARM; + /* make sure memory is written before device access */ + wmb(); + writel(db_val, XSC_REG_ADDR(eq->dev, eq->doorbell)); +} + +static void xsc_cq_completion(struct xsc_core_device *xdev, u32 cqn) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_core_cq *cq; + + rcu_read_lock(); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + rcu_read_unlock(); + + if (!cq) { + pci_err(xdev->pdev, "Completion event for bogus CQ, cqn=%d\n", + cqn); + return; + } + + ++cq->arm_sn; + + if (!cq->comp) + pci_err(xdev->pdev, "cq->comp is NULL\n"); + else + cq->comp(cq); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +static void xsc_eq_cq_event(struct xsc_core_device *xdev, + u32 cqn, int event_type) +{ + struct xsc_cq_table *table = &xdev->dev_res->cq_table; + struct xsc_core_cq *cq; + + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + atomic_inc(&cq->refcount); + spin_unlock(&table->lock); + + if (unlikely(!cq)) { + pci_err(xdev->pdev, "Async event for bogus CQ, cqn=%d\n", + cqn); + return; + } + + cq->event(cq, event_type); + + if (atomic_dec_and_test(&cq->refcount)) + complete(&cq->free); +} + +static int xsc_eq_int(struct xsc_core_device *xdev, struct xsc_eq *eq) +{ + u32 cqn, qpn, queue_id; + struct xsc_eqe *eqe; + int eqes_found = 0; + int set_ci = 0; + + while ((eqe = next_eqe_sw(eq))) { + /* Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + rmb(); + switch (eqe->type) { + case XSC_EVENT_TYPE_COMP: + case XSC_EVENT_TYPE_INTERNAL_ERROR: + /* eqe is changing */ + queue_id = FIELD_GET(XSC_EQE_QUEUE_ID_MASK, + le16_to_cpu(eqe->queue_id_data)); + cqn = queue_id; + xsc_cq_completion(xdev, cqn); + break; + + case XSC_EVENT_TYPE_CQ_ERROR: + queue_id = FIELD_GET(XSC_EQE_QUEUE_ID_MASK, + le16_to_cpu(eqe->queue_id_data)); + cqn = queue_id; + xsc_eq_cq_event(xdev, cqn, eqe->type); + break; + case XSC_EVENT_TYPE_WQ_CATAS_ERROR: + case XSC_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case XSC_EVENT_TYPE_WQ_ACCESS_ERROR: + queue_id = FIELD_GET(XSC_EQE_QUEUE_ID_MASK, + le16_to_cpu(eqe->queue_id_data)); + qpn = queue_id; + xsc_qp_event(xdev, qpn, eqe->type); + break; + default: + break; + } + + ++eq->cons_index; + eqes_found = 1; + ++set_ci; + + /* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with XSC_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= XSC_NUM_SPARE_EQE)) { + eq_update_ci(eq, 0); + set_ci = 0; + } + } + + eq_update_ci(eq, 1); + + return eqes_found; +} + +static irqreturn_t xsc_msix_handler(int irq, void *eq_ptr) +{ + struct xsc_core_device *xdev; + struct xsc_eq *eq = eq_ptr; + + xdev = eq->dev; + xsc_eq_int(xdev, eq); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} + +static void init_eq_buf(struct xsc_eq *eq) +{ + struct xsc_eqe *eqe; + int i; + + for (i = 0; i < eq->nent; i++) { + eqe = get_eqe(eq, i); + eqe->owner_data |= XSC_EQE_OWNER; + } +} + +int xsc_create_map_eq(struct xsc_core_device *xdev, + struct xsc_eq *eq, u16 vecidx, + u32 nent, const char *name) +{ + u16 msix_vec_offset = xdev->msix_vec_base + vecidx; + struct xsc_dev_resource *dev_res = xdev->dev_res; + struct xsc_create_eq_mbox_out out; + struct xsc_create_eq_mbox_in *in; + unsigned int hw_npages; + u32 inlen; + int err; + + eq->nent = roundup_pow_of_two(roundup(nent, XSC_NUM_SPARE_EQE)); + err = xsc_buf_alloc(xdev, eq->nent * XSC_EQE_SIZE, PAGE_SIZE, &eq->buf); + if (err) + return err; + + init_eq_buf(eq); + + hw_npages = DIV_ROUND_UP(eq->nent * XSC_EQE_SIZE, PAGE_SIZE_4K); + inlen = sizeof(*in) + sizeof(in->pas[0]) * hw_npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_free_eq_buf; + } + memset(&out, 0, sizeof(out)); + + xsc_fill_page_array(&eq->buf, in->pas, hw_npages); + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_EQ); + in->ctx.log_eq_sz = ilog2(eq->nent); + in->ctx.vecidx = cpu_to_be16(msix_vec_offset); + in->ctx.pa_num = cpu_to_be16(hw_npages); + in->ctx.glb_func_id = cpu_to_be16(xdev->glb_func_id); + in->ctx.is_async_eq = (vecidx == XSC_EQ_VEC_ASYNC ? 1 : 0); + + err = xsc_cmd_exec(xdev, in, inlen, &out, sizeof(out)); + if (err) + goto err_free_in; + + if (out.hdr.status) { + err = -ENOSPC; + goto err_free_in; + } + + snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + name, pci_name(xdev->pdev)); + + eq->eqn = be32_to_cpu(out.eqn); + eq->irqn = pci_irq_vector(xdev->pdev, vecidx); + eq->dev = xdev; + eq->doorbell = xdev->regs.event_db; + eq->index = vecidx; + + err = request_irq(eq->irqn, xsc_msix_handler, 0, + dev_res->irq_info[vecidx].name, eq); + if (err) + goto err_destroy_eq; + + /* EQs are created in ARMED state + */ + eq_update_ci(eq, 1); + kvfree(in); + return 0; + +err_destroy_eq: + xsc_cmd_destroy_eq(xdev, eq->eqn); + +err_free_in: + kvfree(in); + +err_free_eq_buf: + xsc_buf_free(xdev, &eq->buf); + return err; +} + +int xsc_destroy_unmap_eq(struct xsc_core_device *xdev, struct xsc_eq *eq) +{ + int err; + + if (!xsc_fw_is_available(xdev)) + return 0; + + free_irq(eq->irqn, eq); + err = xsc_cmd_destroy_eq(xdev, eq->eqn); + if (err) + pci_err(xdev->pdev, "failed to destroy a previously created eq: eqn %d\n", + eq->eqn); + xsc_buf_free(xdev, &eq->buf); + + return err; +} + +void xsc_eq_init(struct xsc_core_device *xdev) +{ + spin_lock_init(&xdev->dev_res->eq_table.lock); +} + +int xsc_start_eqs(struct xsc_core_device *xdev) +{ + struct xsc_eq_table *table = &xdev->dev_res->eq_table; + int err; + + err = xsc_create_map_eq(xdev, &table->async_eq, XSC_EQ_VEC_ASYNC, + XSC_NUM_ASYNC_EQE, "xsc_async_eq"); + if (err) + pci_err(xdev->pdev, "failed to create async EQ %d\n", err); + + return err; +} + +void xsc_stop_eqs(struct xsc_core_device *xdev) +{ + struct xsc_eq_table *table = &xdev->dev_res->eq_table; + + xsc_destroy_unmap_eq(xdev, &table->async_eq); +} + +struct xsc_eq *xsc_core_eq_get(struct xsc_core_device *xdev, int i) +{ + struct xsc_eq_table *table = &xdev->dev_res->eq_table; + struct xsc_eq *eq_ret = NULL; + struct xsc_eq *eq, *n; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == i) { + eq_ret = eq; + break; + } + } + spin_unlock(&table->lock); + + return eq_ret; +} +EXPORT_SYMBOL(xsc_core_eq_get); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/eq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.h new file mode 100644 index 0000000000000..b863a459b01f7 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/eq.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __EQ_H +#define __EQ_H + +#include "common/xsc_core.h" + +enum { + XSC_EQ_VEC_ASYNC = 0, + XSC_VEC_CMD = 1, + XSC_VEC_CMD_EVENT = 2, + XSC_DMA_READ_DONE_VEC = 3, + XSC_EQ_VEC_COMP_BASE, +}; + +struct xsc_eqe { + u8 type; + u8 sub_type; + __le16 queue_id_data; +#define XSC_EQE_QUEUE_ID_MASK GENMASK(14, 0) + + u8 err_code; + u8 rsvd[2]; + u8 owner_data; +#define XSC_EQE_OWNER BIT(7) +}; + +/* eq doorbell bitfields */ +#define XSC_EQ_DB_NEXT_CID_SHIFT 0 +#define XSC_EQ_DB_NEXT_CID_MASK GENMASK(10, 0) +#define XSC_EQ_DB_EQ_ID_SHIFT 11 +#define XSC_EQ_DB_EQ_ID_MASK GENMASK(21, 11) +#define XSC_EQ_DB_ARM BIT(22) + +int xsc_create_map_eq(struct xsc_core_device *xdev, + struct xsc_eq *eq, u16 vecidx, + u32 nent, const char *name); +int xsc_destroy_unmap_eq(struct xsc_core_device *xdev, struct xsc_eq *eq); +void xsc_eq_init(struct xsc_core_device *xdev); +int xsc_start_eqs(struct xsc_core_device *xdev); +void xsc_stop_eqs(struct xsc_core_device *xdev); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/hw.c b/drivers/net/ethernet/yunsilicon/xsc/pci/hw.c new file mode 100644 index 0000000000000..6c39aac56231f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/hw.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include + +#include "common/xsc_driver.h" +#include "hw.h" + +#define MAX_BOARD_NUM 32 + +static struct xsc_board_info *board_info[MAX_BOARD_NUM]; + +static struct xsc_board_info *xsc_get_board_info(char *board_sn) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) { + if (!board_info[i]) + continue; + if (!strncmp(board_info[i]->board_sn, board_sn, + XSC_BOARD_SN_LEN)) + return board_info[i]; + } + + return NULL; +} + +static struct xsc_board_info *xsc_alloc_board_info(void) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) { + if (!board_info[i]) + break; + } + if (i == MAX_BOARD_NUM) + return NULL; + board_info[i] = vzalloc(sizeof(*board_info[i])); + if (!board_info[i]) + return NULL; + board_info[i]->board_id = i; + return board_info[i]; +} + +void xsc_free_board_info(void) +{ + int i; + + for (i = 0; i < MAX_BOARD_NUM; i++) + vfree(board_info[i]); +} + +int xsc_cmd_query_hca_cap(struct xsc_core_device *xdev, + struct xsc_caps *caps) +{ + struct xsc_cmd_query_hca_cap_mbox_out *out; + struct xsc_cmd_query_hca_cap_mbox_in in; + struct xsc_board_info *board_info; + int err; + u16 t16; + + out = kzalloc(sizeof(*out), GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_HCA_CAP); + in.cpu_num = cpu_to_be16(num_online_cpus()); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), out, sizeof(*out)); + if (err) + goto err_free; + + if (out->hdr.status) { + err = xsc_cmd_status_to_err(&out->hdr); + goto err_free; + } + + xdev->glb_func_id = be32_to_cpu(out->hca_cap.glb_func_id); + caps->pcie0_pf_funcid_base = + be16_to_cpu(out->hca_cap.pcie0_pf_funcid_base); + caps->pcie0_pf_funcid_top = + be16_to_cpu(out->hca_cap.pcie0_pf_funcid_top); + caps->pcie1_pf_funcid_base = + be16_to_cpu(out->hca_cap.pcie1_pf_funcid_base); + caps->pcie1_pf_funcid_top = + be16_to_cpu(out->hca_cap.pcie1_pf_funcid_top); + caps->funcid_to_logic_port = + be16_to_cpu(out->hca_cap.funcid_to_logic_port); + + caps->pcie_host = out->hca_cap.pcie_host; + caps->nif_port_num = out->hca_cap.nif_port_num; + caps->hw_feature_flag = be32_to_cpu(out->hca_cap.hw_feature_flag); + + caps->raweth_qp_id_base = be16_to_cpu(out->hca_cap.raweth_qp_id_base); + caps->raweth_qp_id_end = be16_to_cpu(out->hca_cap.raweth_qp_id_end); + caps->raweth_rss_qp_id_base = + be16_to_cpu(out->hca_cap.raweth_rss_qp_id_base); + caps->raw_tpe_qp_num = be16_to_cpu(out->hca_cap.raw_tpe_qp_num); + caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz; + caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz; + caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq); + caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq); + caps->flags = be64_to_cpu(out->hca_cap.flags); + caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support); + caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f; + caps->num_ports = out->hca_cap.num_ports & 0xf; + caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f; + caps->log_max_eq = out->hca_cap.log_max_eq & 0xf; + caps->log_max_msix = out->hca_cap.log_max_msix & 0xf; + caps->mac_port = out->hca_cap.mac_port & 0xff; + xdev->mac_port = caps->mac_port; + if (caps->num_ports > XSC_MAX_FW_PORTS) { + pci_err(xdev->pdev, "device has %d ports while the driver supports max %d ports\n", + caps->num_ports, XSC_MAX_FW_PORTS); + err = -EINVAL; + goto err_free; + } + caps->send_ds_num = out->hca_cap.send_seg_num; + caps->send_wqe_shift = out->hca_cap.send_wqe_shift; + caps->recv_ds_num = out->hca_cap.recv_seg_num; + caps->recv_wqe_shift = out->hca_cap.recv_wqe_shift; + + caps->embedded_cpu = 0; + caps->ecpf_vport_exists = 0; + caps->log_max_current_uc_list = 0; + caps->log_max_current_mc_list = 0; + caps->log_max_vlan_list = 8; + caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f; + caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f; + caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f; + caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; + caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; + caps->log_max_mcg = out->hca_cap.log_max_mcg; + caps->log_max_mtt = out->hca_cap.log_max_mtt; + caps->log_max_tso = out->hca_cap.log_max_tso; + caps->hca_core_clock = be32_to_cpu(out->hca_cap.hca_core_clock); + caps->max_rwq_indirection_tables = + be32_to_cpu(out->hca_cap.max_rwq_indirection_tables); + caps->max_rwq_indirection_table_size = + be32_to_cpu(out->hca_cap.max_rwq_indirection_table_size); + caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); + caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); + caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); + caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; + caps->rx_pkt_len_max = be32_to_cpu(out->hca_cap.rx_pkt_len_max); + caps->max_vfs = be16_to_cpu(out->hca_cap.max_vfs); + caps->qp_rate_limit_min = be32_to_cpu(out->hca_cap.qp_rate_limit_min); + caps->qp_rate_limit_max = be32_to_cpu(out->hca_cap.qp_rate_limit_max); + + caps->msix_enable = 1; + caps->msix_base = be16_to_cpu(out->hca_cap.msix_base); + caps->msix_num = be16_to_cpu(out->hca_cap.msix_num); + + t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size); + if (t16 & 0x8000) { + caps->bf_reg_size = 1 << (t16 & 0x1f); + caps->bf_regs_per_page = XSC_BF_REGS_PER_PAGE; + } else { + caps->bf_reg_size = 0; + caps->bf_regs_per_page = 0; + } + caps->min_page_sz = ~(u32)((1 << PAGE_SHIFT) - 1); + + caps->dcbx = 1; + caps->qos = 1; + caps->ets = 1; + caps->dscp = 1; + caps->max_tc = out->hca_cap.max_tc; + caps->log_max_qp_depth = out->hca_cap.log_max_qp_depth & 0xff; + caps->mac_bit = out->hca_cap.mac_bit; + caps->lag_logic_port_ofst = out->hca_cap.lag_logic_port_ofst; + + xdev->chip_ver_h = be32_to_cpu(out->hca_cap.chip_ver_h); + xdev->chip_ver_m = be32_to_cpu(out->hca_cap.chip_ver_m); + xdev->chip_ver_l = be32_to_cpu(out->hca_cap.chip_ver_l); + xdev->hotfix_num = be32_to_cpu(out->hca_cap.hotfix_num); + xdev->feature_flag = be32_to_cpu(out->hca_cap.feature_flag); + + board_info = xsc_get_board_info(out->hca_cap.board_sn); + if (!board_info) { + board_info = xsc_alloc_board_info(); + if (!board_info) { + err = -ENOMEM; + goto err_free; + } + + memcpy(board_info->board_sn, + out->hca_cap.board_sn, + sizeof(out->hca_cap.board_sn)); + } + + xdev->board_info = board_info; + + xdev->regs.tx_db = be64_to_cpu(out->hca_cap.tx_db); + xdev->regs.rx_db = be64_to_cpu(out->hca_cap.rx_db); + xdev->regs.complete_db = be64_to_cpu(out->hca_cap.complete_db); + xdev->regs.complete_reg = be64_to_cpu(out->hca_cap.complete_reg); + xdev->regs.event_db = be64_to_cpu(out->hca_cap.event_db); + + xdev->fw_version_major = out->hca_cap.fw_ver.fw_version_major; + xdev->fw_version_minor = out->hca_cap.fw_ver.fw_version_minor; + xdev->fw_version_patch = + be16_to_cpu(out->hca_cap.fw_ver.fw_version_patch); + xdev->fw_version_tweak = + be32_to_cpu(out->hca_cap.fw_ver.fw_version_tweak); + xdev->fw_version_extra_flag = out->hca_cap.fw_ver.fw_version_extra_flag; + + kfree(out); + return 0; +err_free: + kfree(out); + return err; +} + +static int xsc_cmd_query_guid(struct xsc_core_device *xdev) +{ + struct xsc_cmd_query_guid_mbox_out out; + struct xsc_cmd_query_guid_mbox_in in; + int err; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_GUID); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + xdev->board_info->guid = out.guid; + xdev->board_info->guid_valid = 1; + return 0; +} + +int xsc_query_guid(struct xsc_core_device *xdev) +{ + if (xdev->board_info->guid_valid) + return 0; + + return xsc_cmd_query_guid(xdev); +} + +static int xsc_cmd_activate_hw_config(struct xsc_core_device *xdev) +{ + struct xsc_cmd_activate_hw_config_mbox_out out; + struct xsc_cmd_activate_hw_config_mbox_in in; + int err = 0; + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ACTIVATE_HW_CONFIG); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) + return err; + if (out.hdr.status) + return xsc_cmd_status_to_err(&out.hdr); + xdev->board_info->hw_config_activated = 1; + return 0; +} + +int xsc_activate_hw_config(struct xsc_core_device *xdev) +{ + if (xdev->board_info->hw_config_activated) + return 0; + + return xsc_cmd_activate_hw_config(xdev); +} + +int xsc_reset_function_resource(struct xsc_core_device *xdev) +{ + struct xsc_function_reset_mbox_out out; + struct xsc_function_reset_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_FUNCTION_RESET); + in.glb_func_id = cpu_to_be16(xdev->glb_func_id); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) + return -EINVAL; + + return 0; +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/hw.h b/drivers/net/ethernet/yunsilicon/xsc/pci/hw.h new file mode 100644 index 0000000000000..d1030bfdec0d5 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/hw.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __HW_H +#define __HW_H + +#include "common/xsc_core.h" + +void xsc_free_board_info(void); +int xsc_cmd_query_hca_cap(struct xsc_core_device *xdev, + struct xsc_caps *caps); +int xsc_query_guid(struct xsc_core_device *xdev); +int xsc_activate_hw_config(struct xsc_core_device *xdev); +int xsc_reset_function_resource(struct xsc_core_device *xdev); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/main.c b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c new file mode 100644 index 0000000000000..e1a7fd148f27e --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/main.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "common/xsc_driver.h" +#include "hw.h" +#include "qp.h" +#include "cq.h" +#include "eq.h" +#include "pci_irq.h" +#include "adev.h" + +static const struct pci_device_id xsc_pci_id_table[] = { + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MC_PF_DEV_ID_DIAMOND) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_HOST_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MF_SOC_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MS_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_HOST_PF_DEV_ID) }, + { PCI_DEVICE(XSC_PCI_VENDOR_ID, XSC_MV_SOC_PF_DEV_ID) }, + { 0 } +}; + +static int xsc_set_dma_caps(struct pci_dev *pdev) +{ + int err; + + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) + dma_set_max_seg_size(&pdev->dev, SZ_2G); + + return err; +} + +static int xsc_pci_init(struct xsc_core_device *xdev, + const struct pci_device_id *id) +{ + struct pci_dev *pdev = xdev->pdev; + void __iomem *bar_base; + int bar_num = 0; + int err; + + xdev->numa_node = dev_to_node(&pdev->dev); + + err = pci_enable_device(pdev); + if (err) { + pci_err(pdev, "failed to enable PCI device: err=%d\n", err); + goto err_out; + } + + err = pci_request_region(pdev, bar_num, KBUILD_MODNAME); + if (err) { + pci_err(pdev, "failed to request %s pci_region=%d: err=%d\n", + KBUILD_MODNAME, bar_num, err); + goto err_disable; + } + + pci_set_master(pdev); + + err = xsc_set_dma_caps(pdev); + if (err) { + pci_err(pdev, "failed to set DMA capabilities mask: err=%d\n", + err); + goto err_clr_master; + } + + bar_base = pci_ioremap_bar(pdev, bar_num); + if (!bar_base) { + pci_err(pdev, "failed to ioremap %s bar%d\n", KBUILD_MODNAME, + bar_num); + err = -ENOMEM; + goto err_clr_master; + } + + err = pci_save_state(pdev); + if (err) { + pci_err(pdev, "pci_save_state failed: err=%d\n", err); + goto err_io_unmap; + } + + xdev->bar_num = bar_num; + xdev->bar = bar_base; + + return 0; + +err_io_unmap: + pci_iounmap(pdev, bar_base); +err_clr_master: + pci_clear_master(pdev); + pci_release_region(pdev, bar_num); +err_disable: + pci_disable_device(pdev); +err_out: + return err; +} + +static void xsc_pci_fini(struct xsc_core_device *xdev) +{ + struct pci_dev *pdev = xdev->pdev; + + pci_iounmap(pdev, xdev->bar); + pci_clear_master(pdev); + pci_release_region(pdev, xdev->bar_num); + pci_disable_device(pdev); +} + +static int xsc_dev_res_init(struct xsc_core_device *xdev) +{ + struct xsc_dev_resource *dev_res; + + dev_res = kvzalloc(sizeof(*dev_res), GFP_KERNEL); + if (!dev_res) + return -ENOMEM; + + xdev->dev_res = dev_res; + mutex_init(&dev_res->alloc_mutex); + + return 0; +} + +static void xsc_dev_res_cleanup(struct xsc_core_device *xdev) +{ + kfree(xdev->dev_res); +} + +static int xsc_core_dev_init(struct xsc_core_device *xdev) +{ + int err; + + err = xsc_dev_res_init(xdev); + if (err) { + pci_err(xdev->pdev, "xsc dev res init failed %d\n", err); + return err; + } + + return 0; +} + +static void xsc_core_dev_cleanup(struct xsc_core_device *xdev) +{ + xsc_dev_res_cleanup(xdev); +} + +static int xsc_hw_setup(struct xsc_core_device *xdev) +{ + int err; + + err = xsc_cmd_init(xdev); + if (err) { + pci_err(xdev->pdev, "Failed initializing command interface, aborting\n"); + goto err_out; + } + + err = xsc_cmd_version_check(xdev); + if (err) { + pci_err(xdev->pdev, "Failed to check cmdq version\n"); + goto err_cmd_cleanup; + } + + err = xsc_cmd_query_hca_cap(xdev, &xdev->caps); + if (err) { + pci_err(xdev->pdev, "Failed to query hca, err=%d\n", err); + goto err_cmd_cleanup; + } + + err = xsc_query_guid(xdev); + if (err) { + pci_err(xdev->pdev, "failed to query guid, err=%d\n", err); + goto err_cmd_cleanup; + } + + err = xsc_activate_hw_config(xdev); + if (err) { + pci_err(xdev->pdev, "failed to activate hw config, err=%d\n", + err); + goto err_cmd_cleanup; + } + + err = xsc_reset_function_resource(xdev); + if (err) { + pci_err(xdev->pdev, "Failed to reset function resource\n"); + goto err_cmd_cleanup; + } + + xsc_init_cq_table(xdev); + xsc_init_qp_table(xdev); + xsc_eq_init(xdev); + + return 0; +err_cmd_cleanup: + xsc_cmd_cleanup(xdev); +err_out: + return err; +} + +static void xsc_hw_cleanup(struct xsc_core_device *xdev) +{ + xsc_cmd_cleanup(xdev); +} + +static int xsc_load(struct xsc_core_device *xdev) +{ + int err = 0; + + err = xsc_hw_setup(xdev); + if (err) { + pci_err(xdev->pdev, "xsc_hw_setup failed %d\n", err); + goto err_out; + } + + err = xsc_irq_eq_create(xdev); + if (err) { + pci_err(xdev->pdev, "xsc_irq_eq_create failed %d\n", err); + goto err_hw_cleanup; + } + + err = xsc_adev_init(xdev); + if (err) { + pci_err(xdev->pdev, "xsc_adev_init failed %d\n", err); + goto err_irq_eq_destroy; + } + + return 0; +err_irq_eq_destroy: + xsc_irq_eq_destroy(xdev); +err_hw_cleanup: + xsc_hw_cleanup(xdev); +err_out: + return err; +} + +static void xsc_unload(struct xsc_core_device *xdev) +{ + xsc_adev_uninit(xdev); + xsc_irq_eq_destroy(xdev); + xsc_hw_cleanup(xdev); +} + +static int xsc_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + struct xsc_core_device *xdev; + int err; + + xdev = kzalloc(sizeof(*xdev), GFP_KERNEL); + if (!xdev) + return -ENOMEM; + + xdev->pdev = pci_dev; + xdev->device = &pci_dev->dev; + + pci_set_drvdata(pci_dev, xdev); + err = xsc_pci_init(xdev, id); + if (err) { + pci_err(pci_dev, "xsc_pci_init failed %d\n", err); + goto err_unset_pci_drvdata; + } + + err = xsc_core_dev_init(xdev); + if (err) { + pci_err(pci_dev, "xsc_core_dev_init failed %d\n", err); + goto err_pci_fini; + } + + err = xsc_load(xdev); + if (err) { + pci_err(xdev->pdev, "xsc_load failed %d\n", err); + goto err_core_dev_cleanup; + } + + return 0; +err_core_dev_cleanup: + xsc_core_dev_cleanup(xdev); +err_pci_fini: + xsc_pci_fini(xdev); +err_unset_pci_drvdata: + pci_set_drvdata(pci_dev, NULL); + kfree(xdev); + + return err; +} + +static void xsc_pci_remove(struct pci_dev *pci_dev) +{ + struct xsc_core_device *xdev = pci_get_drvdata(pci_dev); + + xsc_unload(xdev); + xsc_core_dev_cleanup(xdev); + xsc_pci_fini(xdev); + pci_set_drvdata(pci_dev, NULL); + kfree(xdev); +} + +static struct pci_driver xsc_pci_driver = { + .name = "xsc-pci", + .id_table = xsc_pci_id_table, + .probe = xsc_pci_probe, + .remove = xsc_pci_remove, +}; + +static int __init xsc_init(void) +{ + int err; + + err = pci_register_driver(&xsc_pci_driver); + if (err) { + pr_err("failed to register pci driver\n"); + return err; + } + + return 0; +} + +static void __exit xsc_fini(void) +{ + pci_unregister_driver(&xsc_pci_driver); + xsc_free_board_info(); +} + +module_init(xsc_init); +module_exit(xsc_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Yunsilicon XSC PCI driver"); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c new file mode 100644 index 0000000000000..e8b680cc26f06 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.c @@ -0,0 +1,426 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#ifdef CONFIG_RFS_ACCEL +#include +#endif + +#include "common/xsc_driver.h" +#include "common/xsc_core.h" +#include "eq.h" +#include "pci_irq.h" + +enum { + XSC_COMP_EQ_SIZE = 1024, +}; + +enum xsc_eq_type { + XSC_EQ_TYPE_COMP, + XSC_EQ_TYPE_ASYNC, +}; + +struct xsc_irq { + struct atomic_notifier_head nh; + cpumask_var_t mask; + char name[XSC_MAX_IRQ_NAME]; +}; + +struct xsc_irq_table { + struct xsc_irq *irq; + int nvec; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; +#endif +}; + +static void xsc_free_irq(struct xsc_core_device *xdev, unsigned int vector) +{ + unsigned int irqn = 0; + + irqn = pci_irq_vector(xdev->pdev, vector); + disable_irq(irqn); + + if (xsc_fw_is_available(xdev)) + free_irq(irqn, xdev); +} + +static int set_comp_irq_affinity_hint(struct xsc_core_device *xdev, int i) +{ + struct xsc_eq_table *table = &xdev->dev_res->eq_table; + struct xsc_eq *eq = xsc_core_eq_get(xdev, i); + int vecidx = table->eq_vec_comp_base + i; + unsigned int irqn; + int ret; + + irqn = pci_irq_vector(xdev->pdev, vecidx); + if (!zalloc_cpumask_var(&eq->mask, GFP_KERNEL)) { + pci_err(xdev->pdev, "zalloc_cpumask_var rx cpumask failed"); + return -ENOMEM; + } + + if (!zalloc_cpumask_var(&xdev->xps_cpumask, GFP_KERNEL)) { + pci_err(xdev->pdev, "zalloc_cpumask_var tx cpumask failed"); + return -ENOMEM; + } + + cpumask_set_cpu(cpumask_local_spread(i, xdev->numa_node), + xdev->xps_cpumask); + ret = irq_set_affinity_hint(irqn, eq->mask); + + return ret; +} + +static void clear_comp_irq_affinity_hint(struct xsc_core_device *xdev, int i) +{ + struct xsc_eq_table *table = &xdev->dev_res->eq_table; + struct xsc_eq *eq = xsc_core_eq_get(xdev, i); + int vecidx = table->eq_vec_comp_base + i; + int irqn; + + irqn = pci_irq_vector(xdev->pdev, vecidx); + irq_set_affinity_hint(irqn, NULL); + free_cpumask_var(eq->mask); +} + +static int set_comp_irq_affinity_hints(struct xsc_core_device *xdev) +{ + struct xsc_eq_table *table = &xdev->dev_res->eq_table; + int nvec = table->num_comp_vectors; + int err; + int i; + + for (i = 0; i < nvec; i++) { + err = set_comp_irq_affinity_hint(xdev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + clear_comp_irq_affinity_hint(xdev, i); + free_cpumask_var(xdev->xps_cpumask); + + return err; +} + +static void clear_comp_irq_affinity_hints(struct xsc_core_device *xdev) +{ + struct xsc_eq_table *table = &xdev->dev_res->eq_table; + int nvec = table->num_comp_vectors; + int i; + + for (i = 0; i < nvec; i++) + clear_comp_irq_affinity_hint(xdev, i); + free_cpumask_var(xdev->xps_cpumask); +} + +static int xsc_alloc_irq_vectors(struct xsc_core_device *xdev) +{ + struct xsc_dev_resource *dev_res = xdev->dev_res; + struct xsc_eq_table *table = &dev_res->eq_table; + int nvec = xdev->caps.msix_num; + int nvec_base; + int err; + + nvec_base = XSC_EQ_VEC_COMP_BASE; + if (nvec <= nvec_base) { + pci_err(xdev->pdev, "failed to alloc irq vector(%d)\n", nvec); + return -ENOMEM; + } + + dev_res->irq_info = kcalloc(nvec, sizeof(*dev_res->irq_info), + GFP_KERNEL); + if (!dev_res->irq_info) + return -ENOMEM; + + nvec = pci_alloc_irq_vectors(xdev->pdev, nvec_base + 1, nvec, + PCI_IRQ_MSIX); + if (nvec < 0) { + err = nvec; + goto err_free_irq_info; + } + + table->eq_vec_comp_base = nvec_base; + table->num_comp_vectors = nvec - nvec_base; + xdev->msix_vec_base = xdev->caps.msix_base; + + return 0; + +err_free_irq_info: + kfree(dev_res->irq_info); + return err; +} + +static void xsc_free_irq_vectors(struct xsc_core_device *xdev) +{ + struct xsc_dev_resource *dev_res = xdev->dev_res; + + if (!xsc_fw_is_available(xdev)) + return; + + pci_free_irq_vectors(xdev->pdev); + kfree(dev_res->irq_info); +} + +int xsc_core_vector2eqn(struct xsc_core_device *xdev, int vector, u32 *eqn, + unsigned int *irqn) +{ + struct xsc_eq_table *table = &xdev->dev_res->eq_table; + struct xsc_eq *eq, *n; + int err = -ENOENT; + + if (!xdev->caps.msix_enable) + return 0; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (eq->index == vector) { + *eqn = eq->eqn; + *irqn = eq->irqn; + err = 0; + break; + } + } + spin_unlock(&table->lock); + + return err; +} +EXPORT_SYMBOL(xsc_core_vector2eqn); + +static void free_comp_eqs(struct xsc_core_device *xdev) +{ + struct xsc_eq_table *table = &xdev->dev_res->eq_table; + struct xsc_eq *eq, *n; + + spin_lock(&table->lock); + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + list_del(&eq->list); + spin_unlock(&table->lock); + if (xsc_destroy_unmap_eq(xdev, eq)) + pci_err(xdev->pdev, "failed to destroy EQ 0x%x\n", + eq->eqn); + kfree(eq); + spin_lock(&table->lock); + } + spin_unlock(&table->lock); +} + +static int alloc_comp_eqs(struct xsc_core_device *xdev) +{ + struct xsc_eq_table *table = &xdev->dev_res->eq_table; + char name[XSC_MAX_IRQ_NAME]; + struct xsc_eq *eq; + int ncomp_vec; + u32 nent; + int err; + int i; + + INIT_LIST_HEAD(&table->comp_eqs_list); + ncomp_vec = table->num_comp_vectors; + nent = XSC_COMP_EQ_SIZE; + + for (i = 0; i < ncomp_vec; i++) { + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto err_free; + } + + snprintf(name, XSC_MAX_IRQ_NAME, "xsc_comp%d", i); + err = xsc_create_map_eq(xdev, eq, + i + table->eq_vec_comp_base, + nent, name); + if (err) { + kfree(eq); + goto err_free; + } + + eq->index = i; + spin_lock(&table->lock); + list_add_tail(&eq->list, &table->comp_eqs_list); + spin_unlock(&table->lock); + } + + return 0; + +err_free: + free_comp_eqs(xdev); + return err; +} + +static irqreturn_t xsc_cmd_handler(int irq, void *arg) +{ + struct xsc_core_device *xdev = (struct xsc_core_device *)arg; + int err; + + disable_irq_nosync(xdev->cmd.irqn); + err = xsc_cmd_err_handler(xdev); + if (!err) + xsc_cmd_resp_handler(xdev); + enable_irq(xdev->cmd.irqn); + + return IRQ_HANDLED; +} + +static int xsc_request_irq_for_cmdq(struct xsc_core_device *xdev, u8 vecidx) +{ + struct xsc_dev_resource *dev_res = xdev->dev_res; + + writel(xdev->msix_vec_base + vecidx, + XSC_REG_ADDR(xdev, xdev->cmd.reg.msix_vec_addr)); + + snprintf(dev_res->irq_info[vecidx].name, XSC_MAX_IRQ_NAME, "%s@pci:%s", + "xsc_cmd", pci_name(xdev->pdev)); + xdev->cmd.irqn = pci_irq_vector(xdev->pdev, vecidx); + return request_irq(xdev->cmd.irqn, xsc_cmd_handler, 0, + dev_res->irq_info[vecidx].name, xdev); +} + +static void xsc_free_irq_for_cmdq(struct xsc_core_device *xdev) +{ + xsc_free_irq(xdev, XSC_VEC_CMD); +} + +static irqreturn_t xsc_event_handler(int irq, void *arg) +{ + struct xsc_core_device *xdev = (struct xsc_core_device *)arg; + + if (!xdev->eth_priv) + return IRQ_NONE; + + if (!xdev->event_handler) + return IRQ_NONE; + + xdev->event_handler(xdev->eth_priv); + + return IRQ_HANDLED; +} + +static int xsc_request_irq_for_event(struct xsc_core_device *xdev) +{ + struct xsc_dev_resource *dev_res = xdev->dev_res; + + snprintf(dev_res->irq_info[XSC_VEC_CMD_EVENT].name, + XSC_MAX_IRQ_NAME, "%s@pci:%s", + "xsc_eth_event", pci_name(xdev->pdev)); + return request_irq(pci_irq_vector(xdev->pdev, XSC_VEC_CMD_EVENT), + xsc_event_handler, 0, + dev_res->irq_info[XSC_VEC_CMD_EVENT].name, xdev); +} + +static void xsc_free_irq_for_event(struct xsc_core_device *xdev) +{ + xsc_free_irq(xdev, XSC_VEC_CMD_EVENT); +} + +static int xsc_cmd_enable_msix(struct xsc_core_device *xdev) +{ + struct xsc_msix_table_info_mbox_out out; + struct xsc_msix_table_info_mbox_in in; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_ENABLE_MSIX); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err) { + pci_err(xdev->pdev, "xsc_cmd_exec enable msix failed %d\n", + err); + return err; + } + + return 0; +} + +int xsc_irq_eq_create(struct xsc_core_device *xdev) +{ + int err; + + if (xdev->caps.msix_enable == 0) + return 0; + + err = xsc_alloc_irq_vectors(xdev); + if (err) { + pci_err(xdev->pdev, "enable msix failed, err=%d\n", err); + goto err_out; + } + + err = xsc_start_eqs(xdev); + if (err) { + pci_err(xdev->pdev, "failed to start EQs, err=%d\n", err); + goto err_free_irq_vectors; + } + + err = alloc_comp_eqs(xdev); + if (err) { + pci_err(xdev->pdev, "failed to alloc comp EQs, err=%d\n", err); + goto err_stop_eqs; + } + + err = xsc_request_irq_for_cmdq(xdev, XSC_VEC_CMD); + if (err) { + pci_err(xdev->pdev, "failed to request irq for cmdq, err=%d\n", + err); + goto err_free_comp_eqs; + } + + err = xsc_request_irq_for_event(xdev); + if (err) { + pci_err(xdev->pdev, "failed to request irq for event, err=%d\n", + err); + goto err_free_irq_cmdq; + } + + err = set_comp_irq_affinity_hints(xdev); + if (err) { + pci_err(xdev->pdev, "failed to alloc affinity hint cpumask, err=%d\n", + err); + goto err_free_irq_evnt; + } + + xsc_cmd_use_events(xdev); + err = xsc_cmd_enable_msix(xdev); + if (err) { + pci_err(xdev->pdev, "xsc_cmd_enable_msix failed %d.\n", err); + xsc_cmd_use_polling(xdev); + goto err_free_irq_evnt; + } + return 0; + +err_free_irq_evnt: + xsc_free_irq_for_event(xdev); +err_free_irq_cmdq: + xsc_free_irq_for_cmdq(xdev); +err_free_comp_eqs: + free_comp_eqs(xdev); +err_stop_eqs: + xsc_stop_eqs(xdev); +err_free_irq_vectors: + xsc_free_irq_vectors(xdev); +err_out: + return err; +} + +void xsc_irq_eq_destroy(struct xsc_core_device *xdev) +{ + if (xdev->caps.msix_enable == 0) + return; + + xsc_stop_eqs(xdev); + clear_comp_irq_affinity_hints(xdev); + free_comp_eqs(xdev); + + xsc_free_irq_for_event(xdev); + xsc_free_irq_for_cmdq(xdev); + xsc_free_irq_vectors(xdev); +} diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.h b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.h new file mode 100644 index 0000000000000..e85c97cbc9f0f --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/pci_irq.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __PCI_IRQ_H +#define __PCI_IRQ_H + +#include "common/xsc_core.h" + +int xsc_irq_eq_create(struct xsc_core_device *xdev); +void xsc_irq_eq_destroy(struct xsc_core_device *xdev); + +#endif diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c new file mode 100644 index 0000000000000..61746e0579f79 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include +#include +#include +#include + +#include "common/xsc_core.h" +#include "common/xsc_driver.h" +#include "qp.h" + +int xsc_core_create_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + int err; + + spin_lock_irq(&table->lock); + err = radix_tree_insert(&table->tree, qp->qpn, qp); + spin_unlock_irq(&table->lock); + if (err) + return err; + + atomic_set(&qp->refcount, 1); + init_completion(&qp->free); + qp->pid = current->pid; + + return 0; +} +EXPORT_SYMBOL(xsc_core_create_resource_common); + +void xsc_core_destroy_resource_common(struct xsc_core_device *xdev, + struct xsc_core_qp *qp) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + unsigned long flags; + + spin_lock_irqsave(&table->lock, flags); + radix_tree_delete(&table->tree, qp->qpn); + spin_unlock_irqrestore(&table->lock, flags); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); + wait_for_completion(&qp->free); +} +EXPORT_SYMBOL(xsc_core_destroy_resource_common); + +void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + struct xsc_core_qp *qp; + + spin_lock(&table->lock); + + qp = radix_tree_lookup(&table->tree, qpn); + if (qp) + atomic_inc(&qp->refcount); + + spin_unlock(&table->lock); + + if (!qp) { + pci_err(xdev->pdev, "Async event for bogus QP 0x%x\n", qpn); + return; + } + + qp->event(qp, event_type); + + if (atomic_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + +void xsc_init_qp_table(struct xsc_core_device *xdev) +{ + struct xsc_qp_table *table = &xdev->dev_res->qp_table; + + spin_lock_init(&table->lock); + INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); +} + +int xsc_core_eth_create_qp(struct xsc_core_device *xdev, + struct xsc_create_qp_mbox_in *in, + int insize, u32 *p_qpn) +{ + struct xsc_create_qp_mbox_out out; + int ret; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + pci_err(xdev->pdev, "failed to create sq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + *p_qpn = be32_to_cpu(out.qpn) & 0xffffff; + + return 0; +} +EXPORT_SYMBOL(xsc_core_eth_create_qp); + +int xsc_core_eth_modify_qp_status(struct xsc_core_device *xdev, + u32 qpn, u16 status) +{ + struct xsc_modify_qp_mbox_out out; + struct xsc_modify_qp_mbox_in in; + int ret = 0; + + in.hdr.opcode = cpu_to_be16(status); + in.qpn = cpu_to_be32(qpn); + in.no_need_wait = 1; + + ret = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (ret || out.hdr.status != 0) { + pci_err(xdev->pdev, "failed to modify qp %u status=%u, err=%d out.status %u\n", + qpn, status, ret, out.hdr.status); + ret = -ENOEXEC; + } + + return ret; +} +EXPORT_SYMBOL_GPL(xsc_core_eth_modify_qp_status); + +int xsc_core_eth_destroy_qp(struct xsc_core_device *xdev, u32 qpn) +{ + struct xsc_destroy_qp_mbox_out out; + struct xsc_destroy_qp_mbox_in in; + int err; + + err = xsc_core_eth_modify_qp_status(xdev, qpn, XSC_CMD_OP_2RST_QP); + if (err) { + pci_err(xdev->pdev, "failed to set sq%d status=rst, err=%d\n", + qpn, err); + return err; + } + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_DESTROY_QP); + in.qpn = cpu_to_be32(qpn); + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) { + pci_err(xdev->pdev, "failed to destroy sq%d, err=%d out.status=%u\n", + qpn, err, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} +EXPORT_SYMBOL(xsc_core_eth_destroy_qp); + +int xsc_core_eth_modify_raw_qp(struct xsc_core_device *xdev, + struct xsc_modify_raw_qp_mbox_in *in) +{ + struct xsc_modify_raw_qp_mbox_out out; + int ret; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_MODIFY_RAW_QP); + + ret = xsc_cmd_exec(xdev, in, sizeof(struct xsc_modify_raw_qp_mbox_in), + &out, sizeof(struct xsc_modify_raw_qp_mbox_out)); + if (ret || out.hdr.status) { + pci_err(xdev->pdev, "failed to modify sq, err=%d out.status=%u\n", + ret, out.hdr.status); + return -ENOEXEC; + } + + return 0; +} +EXPORT_SYMBOL(xsc_core_eth_modify_raw_qp); + +int xsc_core_eth_create_rss_qp_rqs(struct xsc_core_device *xdev, + struct xsc_create_multiqp_mbox_in *in, + int insize, + u32 *p_qpn_base) +{ + struct xsc_create_multiqp_mbox_out out; + int ret; + + in->hdr.opcode = cpu_to_be16(XSC_CMD_OP_CREATE_MULTI_QP); + ret = xsc_cmd_exec(xdev, in, insize, &out, sizeof(out)); + if (ret || out.hdr.status) { + pci_err(xdev->pdev, + "failed to create rss rq, qp_num=%d, type=%d, err=%d out.status=%u\n", + in->qp_num, in->qp_type, ret, out.hdr.status); + return -ENOEXEC; + } + + *p_qpn_base = be32_to_cpu(out.qpn_base) & 0xffffff; + return 0; +} +EXPORT_SYMBOL(xsc_core_eth_create_rss_qp_rqs); diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/qp.h b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.h new file mode 100644 index 0000000000000..af081933e0923 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/qp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021-2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#ifndef __QP_H +#define __QP_H + +#include "common/xsc_core.h" + +void xsc_init_qp_table(struct xsc_core_device *xdev); +void xsc_qp_event(struct xsc_core_device *xdev, u32 qpn, int event_type); + +#endif /* __QP_H */ diff --git a/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c new file mode 100644 index 0000000000000..edac8a6bd39e4 --- /dev/null +++ b/drivers/net/ethernet/yunsilicon/xsc/pci/vport.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2021 - 2025, Shanghai Yunsilicon Technology Co., Ltd. + * All rights reserved. + */ + +#include "common/xsc_core.h" +#include "common/xsc_driver.h" + +u8 xsc_core_query_vport_state(struct xsc_core_device *xdev, u16 vport) +{ + struct xsc_query_vport_state_out out; + struct xsc_query_vport_state_in in; + u32 val = 0; + int err; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + in.hdr.opcode = cpu_to_be16(XSC_CMD_OP_QUERY_VPORT_STATE); + val |= FIELD_PREP(XSC_QUERY_VPORT_VPORT_NUM_MASK, vport); + if (vport) + val |= XSC_QUERY_VPORT_OTHER_VPORT; + in.data0 = cpu_to_be32(val); + + err = xsc_cmd_exec(xdev, &in, sizeof(in), &out, sizeof(out)); + if (err || out.hdr.status) + pci_err(xdev->pdev, "failed to query vport state, err=%d, status=%d\n", + err, out.hdr.status); + + return out.state; +} +EXPORT_SYMBOL(xsc_core_query_vport_state); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index dbb3960126ee7..66e38ce9cd1d3 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #define GENEVE_NETDEV_VER "0.6" @@ -57,6 +58,8 @@ struct geneve_config { bool ttl_inherit; enum ifla_geneve_df df; bool inner_proto_inherit; + u16 port_min; + u16 port_max; }; /* Pseudo network device */ @@ -835,7 +838,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, use_cache = ip_tunnel_dst_cache_usable(skb, info); tos = geneve_get_dsfield(skb, dev, info, &use_cache); - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + sport = udp_flow_src_port(geneve->net, skb, + geneve->cfg.port_min, + geneve->cfg.port_max, true); rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, &info->key, @@ -945,7 +950,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, use_cache = ip_tunnel_dst_cache_usable(skb, info); prio = geneve_get_dsfield(skb, dev, info, &use_cache); - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + sport = udp_flow_src_port(geneve->net, skb, + geneve->cfg.port_min, + geneve->cfg.port_max, true); dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0, &saddr, key, sport, @@ -1084,7 +1091,8 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) use_cache = ip_tunnel_dst_cache_usable(skb, info); tos = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, - 1, USHRT_MAX, true); + geneve->cfg.port_min, + geneve->cfg.port_max, true); rt = udp_tunnel_dst_lookup(skb, dev, geneve->net, 0, &saddr, &info->key, @@ -1110,7 +1118,8 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) use_cache = ip_tunnel_dst_cache_usable(skb, info); prio = geneve_get_dsfield(skb, dev, info, &use_cache); sport = udp_flow_src_port(geneve->net, skb, - 1, USHRT_MAX, true); + geneve->cfg.port_min, + geneve->cfg.port_max, true); dst = udp_tunnel6_dst_lookup(skb, dev, geneve->net, gs6->sock, 0, &saddr, &info->key, sport, @@ -1234,6 +1243,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 }, [IFLA_GENEVE_DF] = { .type = NLA_U8 }, [IFLA_GENEVE_INNER_PROTO_INHERIT] = { .type = NLA_FLAG }, + [IFLA_GENEVE_PORT_RANGE] = NLA_POLICY_EXACT_LEN(sizeof(struct ifla_geneve_port_range)), }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], @@ -1279,6 +1289,17 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], } } + if (data[IFLA_GENEVE_PORT_RANGE]) { + const struct ifla_geneve_port_range *p; + + p = nla_data(data[IFLA_GENEVE_PORT_RANGE]); + if (ntohs(p->high) < ntohs(p->low)) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_PORT_RANGE], + "Invalid source port range"); + return -EINVAL; + } + } + return 0; } @@ -1506,6 +1527,18 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); } + if (data[IFLA_GENEVE_PORT_RANGE]) { + const struct ifla_geneve_port_range *p; + + if (changelink) { + attrtype = IFLA_GENEVE_PORT_RANGE; + goto change_notsup; + } + p = nla_data(data[IFLA_GENEVE_PORT_RANGE]); + cfg->port_min = ntohs(p->low); + cfg->port_max = ntohs(p->high); + } + if (data[IFLA_GENEVE_COLLECT_METADATA]) { if (changelink) { attrtype = IFLA_GENEVE_COLLECT_METADATA; @@ -1614,15 +1647,20 @@ static void geneve_link_config(struct net_device *dev, geneve_change_mtu(dev, ldev_mtu - info->options_len); } -static int geneve_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int geneve_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct geneve_config cfg = { .df = GENEVE_DF_UNSET, .use_udp6_rx_checksums = false, .ttl_inherit = false, .collect_md = false, + .port_min = 1, + .port_max = USHRT_MAX, }; int err; @@ -1631,7 +1669,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (err) return err; - err = geneve_configure(net, dev, extack, &cfg); + err = geneve_configure(link_net, dev, extack, &cfg); if (err) return err; @@ -1741,6 +1779,7 @@ static size_t geneve_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */ nla_total_size(0) + /* IFLA_GENEVE_INNER_PROTO_INHERIT */ + nla_total_size(sizeof(struct ifla_geneve_port_range)) + /* IFLA_GENEVE_PORT_RANGE */ 0; } @@ -1750,6 +1789,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) struct ip_tunnel_info *info = &geneve->cfg.info; bool ttl_inherit = geneve->cfg.ttl_inherit; bool metadata = geneve->cfg.collect_md; + struct ifla_geneve_port_range ports = { + .low = htons(geneve->cfg.port_min), + .high = htons(geneve->cfg.port_max), + }; __u8 tmp_vni[3]; __u32 vni; @@ -1806,6 +1849,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_flag(skb, IFLA_GENEVE_INNER_PROTO_INHERIT)) goto nla_put_failure; + if (nla_put(skb, IFLA_GENEVE_PORT_RANGE, sizeof(ports), &ports)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -1838,6 +1884,8 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, .use_udp6_rx_checksums = true, .ttl_inherit = false, .collect_md = true, + .port_min = 1, + .port_max = USHRT_MAX, }; memset(tb, 0, sizeof(tb)); diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index b7b46c5e6399a..ef793607890d9 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1462,10 +1462,12 @@ static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla, #define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header)) #define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN) -static int gtp_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int gtp_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); + struct nlattr **data = params->data; unsigned int role = GTP_ROLE_GGSN; struct gtp_dev *gtp; struct gtp_net *gn; @@ -1496,7 +1498,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, gtp->restart_count = nla_get_u8_default(data[IFLA_GTP_RESTART_COUNT], 0); - gtp->net = src_net; + gtp->net = link_net; err = gtp_hashtable_new(gtp, hashsize); if (err < 0) @@ -1526,7 +1528,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, goto out_encap; } - gn = net_generic(src_net, gtp_net_id); + gn = net_generic(link_net, gtp_net_id); list_add(>p->list, &gn->gtp_dev_list); dev->priv_destructor = gtp_destructor; diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c index 00ebc25d0b22a..f03797103c6a2 100644 --- a/drivers/net/hamradio/baycom_par.c +++ b/drivers/net/hamradio/baycom_par.c @@ -427,7 +427,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data, break; case HDLCDRVCTL_GETMODE: - strcpy(hi->data.modename, bc->options ? "par96" : "picpar"); + strscpy(hi->data.modename, bc->options ? "par96" : "picpar"); if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; @@ -439,7 +439,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data, return baycom_setmode(bc, hi->data.modename); case HDLCDRVCTL_MODELIST: - strcpy(hi->data.modename, "par96,picpar"); + strscpy(hi->data.modename, "par96,picpar"); if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index 799f8ece7824f..ee5bd3c120400 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -531,7 +531,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data, return baycom_setmode(bc, hi->data.modename); case HDLCDRVCTL_MODELIST: - strcpy(hi->data.modename, "ser12,ser3,ser24"); + strscpy(hi->data.modename, "ser12,ser3,ser24"); if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c index 5d1ab4840753f..05bdad2147999 100644 --- a/drivers/net/hamradio/baycom_ser_hdx.c +++ b/drivers/net/hamradio/baycom_ser_hdx.c @@ -570,7 +570,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data, break; case HDLCDRVCTL_GETMODE: - strcpy(hi->data.modename, "ser12"); + strscpy(hi->data.modename, "ser12"); if (bc->opt_dcd <= 0) strcat(hi->data.modename, (!bc->opt_dcd) ? "*" : (bc->opt_dcd == -2) ? "@" : "+"); if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) @@ -584,7 +584,7 @@ static int baycom_ioctl(struct net_device *dev, void __user *data, return baycom_setmode(bc, hi->data.modename); case HDLCDRVCTL_MODELIST: - strcpy(hi->data.modename, "ser12"); + strscpy(hi->data.modename, "ser12"); if (copy_to_user(data, hi, sizeof(struct hdlcdrv_ioctl))) return -EFAULT; return 0; diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index bac1bb69d63a1..0e0fe32d2da4e 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -77,6 +77,7 @@ #include #include +#include #include #include @@ -107,27 +108,6 @@ struct bpqdev { static LIST_HEAD(bpq_devices); -/* - * bpqether network devices are paired with ethernet devices below them, so - * form a special "super class" of normal ethernet devices; split their locks - * off into a separate class since they always nest. - */ -static struct lock_class_key bpq_netdev_xmit_lock_key; -static struct lock_class_key bpq_netdev_addr_lock_key; - -static void bpq_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *_unused) -{ - lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key); -} - -static void bpq_set_lockdep_class(struct net_device *dev) -{ - lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key); - netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL); -} - /* ------------------------------------------------------------------------ */ @@ -454,6 +434,8 @@ static const struct net_device_ops bpq_netdev_ops = { static void bpq_setup(struct net_device *dev) { + netdev_lockdep_set_classes(dev); + dev->netdev_ops = &bpq_netdev_ops; dev->needs_free_netdev = true; @@ -499,7 +481,6 @@ static int bpq_new_device(struct net_device *edev) err = register_netdevice(ndev); if (err) goto error; - bpq_set_lockdep_class(ndev); /* List protected by RTNL */ list_add_rcu(&bpq->bpq_list, &bpq_devices); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 234db693cefa4..70f7cb383228e 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -1166,6 +1166,8 @@ struct netvsc_device { u32 max_chn; u32 num_chn; + u32 netvsc_gso_max_size; + atomic_t open_chn; struct work_struct subchan_work; wait_queue_head_t subchan_open; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index d6c4abfc3a28b..c51b318b8a72e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -2461,6 +2462,21 @@ static int netvsc_vf_changed(struct net_device *vf_netdev, unsigned long event) } else { netdev_info(ndev, "Data path switched %s VF: %s\n", vf_is_up ? "to" : "from", vf_netdev->name); + + /* In Azure, when accelerated networking in enabled, other NICs + * like MANA, MLX, are configured as a bonded nic with + * Netvsc(failover) NIC. For bonded NICs, the min of the max + * pkt aggregate size of the members is propagated in the stack. + * In order to allow these NICs (MANA/MLX) to use up to + * GSO_MAX_SIZE gso packet size, we need to allow Netvsc NIC to + * also support this in the guest. + * This value is only increased for netvsc NIC when datapath is + * switched over to the VF + */ + if (vf_is_up) + netif_set_tso_max_size(ndev, vf_netdev->tso_max_size); + else + netif_set_tso_max_size(ndev, netvsc_dev->netvsc_gso_max_size); } return NOTIFY_OK; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index c0ceeef4fcd81..82747dfacd70f 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1356,9 +1356,10 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, struct net_device_context *net_device_ctx = netdev_priv(net); struct ndis_offload hwcaps; struct ndis_offload_params offloads; - unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE; int ret; + nvdev->netvsc_gso_max_size = GSO_LEGACY_MAX_SIZE; + /* Find HW offload capabilities */ ret = rndis_query_hwcaps(rndis_device, nvdev, &hwcaps); if (ret != 0) @@ -1390,8 +1391,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; net->hw_features |= NETIF_F_TSO; - if (hwcaps.lsov2.ip4_maxsz < gso_max_size) - gso_max_size = hwcaps.lsov2.ip4_maxsz; + if (hwcaps.lsov2.ip4_maxsz < nvdev->netvsc_gso_max_size) + nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip4_maxsz; } if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) { @@ -1411,8 +1412,8 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; net->hw_features |= NETIF_F_TSO6; - if (hwcaps.lsov2.ip6_maxsz < gso_max_size) - gso_max_size = hwcaps.lsov2.ip6_maxsz; + if (hwcaps.lsov2.ip6_maxsz < nvdev->netvsc_gso_max_size) + nvdev->netvsc_gso_max_size = hwcaps.lsov2.ip6_maxsz; } if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) { @@ -1438,7 +1439,7 @@ static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, */ net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features; - netif_set_tso_max_size(net, gso_max_size); + netif_set_tso_max_size(net, nvdev->netvsc_gso_max_size); ret = rndis_filter_set_offload_params(net, nvdev, &offloads); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 753215ebc67c7..ebc4f1b18e7bc 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -52,12 +52,10 @@ #include #include #include -#include #include #include #include #include -#include #include #include #include @@ -350,8 +348,8 @@ struct work_priv_container { * @extclockenable: true if the external clock is to be enabled * @extclockfreq: frequency of the external clock * @extclockgpio: ca8210 output gpio of the external clock - * @gpio_reset: gpio number of ca8210 reset line - * @gpio_irq: gpio number of ca8210 interrupt line + * @reset_gpio: ca8210 reset GPIO descriptor + * @irq_gpio: ca8210 interrupt GPIO descriptor * @irq_id: identifier for the ca8210 irq * */ @@ -359,8 +357,8 @@ struct ca8210_platform_data { bool extclockenable; unsigned int extclockfreq; unsigned int extclockgpio; - int gpio_reset; - int gpio_irq; + struct gpio_desc *reset_gpio; + struct gpio_desc *irq_gpio; int irq_id; }; @@ -627,14 +625,15 @@ static int ca8210_spi_transfer( */ static void ca8210_reset_send(struct spi_device *spi, unsigned int ms) { - struct ca8210_platform_data *pdata = spi->dev.platform_data; + struct device *dev = &spi->dev; + struct ca8210_platform_data *pdata = dev_get_platdata(dev); struct ca8210_priv *priv = spi_get_drvdata(spi); long status; - gpio_set_value(pdata->gpio_reset, 0); + gpiod_set_value(pdata->reset_gpio, 1); reinit_completion(&priv->ca8210_is_awake); msleep(ms); - gpio_set_value(pdata->gpio_reset, 1); + gpiod_set_value(pdata->reset_gpio, 0); priv->promiscuous = false; /* Wait until wakeup indication seen */ @@ -1446,8 +1445,7 @@ static u8 mcps_data_request( command.pdata.data_req.src_addr_mode = src_addr_mode; command.pdata.data_req.dst.mode = dst_address_mode; if (dst_address_mode != MAC_MODE_NO_ADDR) { - command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id); - command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id); + put_unaligned_le16(dst_pan_id, command.pdata.data_req.dst.pan_id); if (dst_address_mode == MAC_MODE_SHORT_ADDR) { command.pdata.data_req.dst.address[0] = LS_BYTE( dst_addr->short_address @@ -1795,12 +1793,12 @@ static int ca8210_skb_rx( } hdr.source.mode = data_ind[0]; dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode); - hdr.source.pan_id = *(u16 *)&data_ind[1]; + hdr.source.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[1])); dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id); memcpy(&hdr.source.extended_addr, &data_ind[3], 8); hdr.dest.mode = data_ind[11]; dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode); - hdr.dest.pan_id = *(u16 *)&data_ind[12]; + hdr.dest.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[12])); dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id); memcpy(&hdr.dest.extended_addr, &data_ind[14], 8); @@ -1927,7 +1925,7 @@ static int ca8210_skb_tx( status = mcps_data_request( header.source.mode, header.dest.mode, - header.dest.pan_id, + le16_to_cpu(header.dest.pan_id), (union macaddr *)&header.dest.extended_addr, skb->len - mac_len, &skb->data[mac_len], @@ -2737,9 +2735,10 @@ static int ca8210_config_extern_clk( */ static int ca8210_register_ext_clock(struct spi_device *spi) { + struct device *dev = &spi->dev; + struct ca8210_platform_data *pdata = dev_get_platdata(dev); struct device_node *np = spi->dev.of_node; struct ca8210_priv *priv = spi_get_drvdata(spi); - struct ca8210_platform_data *pdata = spi->dev.platform_data; if (!np) return -EFAULT; @@ -2785,25 +2784,16 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi) */ static int ca8210_reset_init(struct spi_device *spi) { - int ret; - struct ca8210_platform_data *pdata = spi->dev.platform_data; - - pdata->gpio_reset = of_get_named_gpio( - spi->dev.of_node, - "reset-gpio", - 0 - ); + struct device *dev = &spi->dev; + struct ca8210_platform_data *pdata = dev_get_platdata(dev); - ret = gpio_direction_output(pdata->gpio_reset, 1); - if (ret < 0) { - dev_crit( - &spi->dev, - "Reset GPIO %d did not set to output mode\n", - pdata->gpio_reset - ); + pdata->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(pdata->reset_gpio)) { + dev_crit(dev, "Reset GPIO did not set to output mode\n"); + return PTR_ERR(pdata->reset_gpio); } - return ret; + return 0; } /** @@ -2814,23 +2804,19 @@ static int ca8210_reset_init(struct spi_device *spi) */ static int ca8210_interrupt_init(struct spi_device *spi) { + struct device *dev = &spi->dev; + struct ca8210_platform_data *pdata = dev_get_platdata(dev); int ret; - struct ca8210_platform_data *pdata = spi->dev.platform_data; - pdata->gpio_irq = of_get_named_gpio( - spi->dev.of_node, - "irq-gpio", - 0 - ); + pdata->irq_gpio = devm_gpiod_get(dev, "irq", GPIOD_IN); + if (IS_ERR(pdata->irq_gpio)) { + dev_crit(dev, "Could not retrieve IRQ GPIO\n"); + return PTR_ERR(pdata->irq_gpio); + } - pdata->irq_id = gpio_to_irq(pdata->gpio_irq); + pdata->irq_id = gpiod_to_irq(pdata->irq_gpio); if (pdata->irq_id < 0) { - dev_crit( - &spi->dev, - "Could not get irq for gpio pin %d\n", - pdata->gpio_irq - ); - gpio_free(pdata->gpio_irq); + dev_crit(dev, "Could not get irq for IRQ GPIO\n"); return pdata->irq_id; } @@ -2841,10 +2827,8 @@ static int ca8210_interrupt_init(struct spi_device *spi) "ca8210-irq", spi_get_drvdata(spi) ); - if (ret) { + if (ret) dev_crit(&spi->dev, "request_irq %d failed\n", pdata->irq_id); - gpio_free(pdata->gpio_irq); - } return ret; } diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c index c8c23d9be961b..41f212209993f 100644 --- a/drivers/net/ipa/data/ipa_data-v4.7.c +++ b/drivers/net/ipa/data/ipa_data-v4.7.c @@ -28,20 +28,18 @@ enum ipa_resource_type { enum ipa_rsrc_group_id { /* Source resource group identifiers */ IPA_RSRC_GROUP_SRC_UL_DL = 0, - IPA_RSRC_GROUP_SRC_UC_RX_Q, IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */ /* Destination resource group identifiers */ - IPA_RSRC_GROUP_DST_UL_DL_DPL = 0, - IPA_RSRC_GROUP_DST_UNUSED_1, + IPA_RSRC_GROUP_DST_UL_DL = 0, IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */ }; /* QSB configuration data for an SoC having IPA v4.7 */ static const struct ipa_qsb_data ipa_qsb_data[] = { [IPA_QSB_MASTER_DDR] = { - .max_writes = 8, - .max_reads = 0, /* no limit (hardware max) */ + .max_writes = 12, + .max_reads = 13, .max_reads_beats = 120, }, }; @@ -81,7 +79,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, .endpoint = { .config = { - .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .resource_group = IPA_RSRC_GROUP_DST_UL_DL, .aggregation = true, .status_enable = true, .rx = { @@ -106,6 +104,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { .filter_support = true, .config = { .resource_group = IPA_RSRC_GROUP_SRC_UL_DL, + .checksum = true, .qmap = true, .status_enable = true, .tx = { @@ -128,7 +127,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { }, .endpoint = { .config = { - .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL, + .resource_group = IPA_RSRC_GROUP_DST_UL_DL, + .checksum = true, .qmap = true, .aggregation = true, .rx = { @@ -197,12 +197,12 @@ static const struct ipa_resource ipa_resource_src[] = { /* Destination resource configuration data for an SoC having IPA v4.7 */ static const struct ipa_resource ipa_resource_dst[] = { [IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = { - .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL] = { .min = 7, .max = 7, }, }, [IPA_RESOURCE_TYPE_DST_DPS_DMARS] = { - .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = { + .limits[IPA_RSRC_GROUP_DST_UL_DL] = { .min = 2, .max = 2, }, }, diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 025e0c19ec255..50de3ee204dbc 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -166,8 +166,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type); void ipvlan_count_rx(const struct ipvl_dev *ipvlan, unsigned int len, bool success, bool mcast); -int ipvlan_link_new(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack); void ipvlan_link_delete(struct net_device *dev, struct list_head *head); void ipvlan_link_setup(struct net_device *dev); diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index fd591ddb3884d..ca62188a317ad 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -416,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) { - const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; struct net *net = dev_net(dev); - struct rtable *rt; int err, ret = NET_XMIT_DROP; + const struct iphdr *ip4h; + struct rtable *rt; struct flowi4 fl4 = { .flowi4_oif = dev->ifindex, - .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)), .flowi4_flags = FLOWI_FLAG_ANYSRC, .flowi4_mark = skb->mark, - .daddr = ip4h->daddr, - .saddr = ip4h->saddr, }; + if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + goto err; + + ip4h = ip_hdr(skb); + fl4.daddr = ip4h->daddr; + fl4.saddr = ip4h->saddr; + fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)); + rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto err; @@ -488,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) struct net_device *dev = skb->dev; int err, ret = NET_XMIT_DROP; + if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) { + DEV_STATS_INC(dev, tx_errors); + kfree_skb(skb); + return ret; + } + err = ipvlan_route_v6_outbound(dev, skb); if (unlikely(err)) { DEV_STATS_INC(dev, tx_errors); diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c index b4ef386bdb1ba..7c017fe35522a 100644 --- a/drivers/net/ipvlan/ipvlan_l3s.c +++ b/drivers/net/ipvlan/ipvlan_l3s.c @@ -226,5 +226,4 @@ void ipvlan_l3s_unregister(struct ipvl_port *port) dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); - dev->l3mdev_ops = NULL; } diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index da3a97a655070..0ed2fd833a5db 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -3,6 +3,7 @@ */ #include +#include #include "ipvlan.h" @@ -532,11 +533,13 @@ static int ipvlan_nl_fillinfo(struct sk_buff *skb, return ret; } -int ipvlan_link_new(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +int ipvlan_link_new(struct net_device *dev, struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); struct ipvl_dev *ipvlan = netdev_priv(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct ipvl_port *port; struct net_device *phy_dev; int err; @@ -545,7 +548,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, if (!tb[IFLA_LINK]) return -EINVAL; - phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + phy_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK])); if (!phy_dev) return -ENODEV; diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index 1afc4c47be73f..edd13916831a7 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c @@ -73,8 +73,8 @@ static void ipvtap_update_features(struct tap_dev *tap, netdev_update_features(vlan->dev); } -static int ipvtap_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int ipvtap_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct ipvtap_dev *vlantap = netdev_priv(dev); @@ -97,7 +97,7 @@ static int ipvtap_newlink(struct net *src_net, struct net_device *dev, /* Don't put anything that may fail after macvlan_common_newlink * because we can't undo what it does. */ - err = ipvlan_link_new(src_net, dev, tb, data, extack); + err = ipvlan_link_new(dev, params, extack); if (err) { netdev_rx_handler_unregister(dev); return err; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index c8840c3b9a1bc..1fb6ce6843ade 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -54,6 +54,7 @@ #include #include #include +#include #include /* blackhole_netdev - a device used for dsts that are marked expired! @@ -172,7 +173,7 @@ static void gen_lo_setup(struct net_device *dev, dev->flags = IFF_LOOPBACK; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->lltx = true; - dev->netns_local = true; + dev->netns_immutable = true; netif_keep_dst(dev); dev->hw_features = NETIF_F_GSO_SOFTWARE; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST @@ -244,8 +245,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb) +{ + kfree_skb(skb); + return 0; +} + +static int blackhole_neigh_construct(struct net_device *dev, + struct neighbour *n) +{ + n->output = blackhole_neigh_output; + return 0; +} + static const struct net_device_ops blackhole_netdev_ops = { .ndo_start_xmit = blackhole_netdev_xmit, + .ndo_neigh_construct = blackhole_neigh_construct, }; /* This is a dst-dummy device used specifically for invalidated diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 1bc1e5993f56e..3d315e30ee472 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -4141,11 +4142,14 @@ static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) static struct lock_class_key macsec_netdev_addr_lock_key; -static int macsec_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int macsec_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); struct macsec_dev *macsec = macsec_priv(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; rx_handler_func_t *rx_handler; u8 icv_len = MACSEC_DEFAULT_ICV_LEN; struct net_device *real_dev; @@ -4154,7 +4158,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (!tb[IFLA_LINK]) return -EINVAL; - real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); + real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) return -ENODEV; if (real_dev->type != ARPHRD_ETHER) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index fed4fe2a4748f..d0dfa6bca6cc2 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -1440,21 +1441,24 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, return 0; } -int macvlan_common_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +int macvlan_common_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); struct macvlan_dev *vlan = netdev_priv(dev); - struct macvlan_port *port; + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct net_device *lowerdev; - int err; - int macmode; + struct macvlan_port *port; bool create = false; + int macmode; + int err; if (!tb[IFLA_LINK]) return -EINVAL; - lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + lowerdev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK])); if (lowerdev == NULL) return -ENODEV; @@ -1565,11 +1569,11 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, } EXPORT_SYMBOL_GPL(macvlan_common_newlink); -static int macvlan_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int macvlan_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { - return macvlan_common_newlink(src_net, dev, tb, data, extack); + return macvlan_common_newlink(dev, params, extack); } void macvlan_dellink(struct net_device *dev, struct list_head *head) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 29a5929d48e55..b391a0f740a3f 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -77,8 +77,8 @@ static void macvtap_update_features(struct tap_dev *tap, netdev_update_features(vlan->dev); } -static int macvtap_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int macvtap_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct macvtap_dev *vlantap = netdev_priv(dev); @@ -105,7 +105,7 @@ static int macvtap_newlink(struct net *src_net, struct net_device *dev, /* Don't put anything that may fail after macvlan_common_newlink * because we can't undo what it does. */ - err = macvlan_common_newlink(src_net, dev, tb, data, extack); + err = macvlan_common_newlink(dev, params, extack); if (err) { netdev_rx_handler_unregister(dev); return err; diff --git a/drivers/net/mctp/Kconfig b/drivers/net/mctp/Kconfig index 15860d6ac39fe..cf325ab0b1ef5 100644 --- a/drivers/net/mctp/Kconfig +++ b/drivers/net/mctp/Kconfig @@ -47,6 +47,16 @@ config MCTP_TRANSPORT_I3C A MCTP protocol network device is created for each I3C bus having a "mctp-controller" devicetree property. +config MCTP_TRANSPORT_USB + tristate "MCTP USB transport" + depends on USB + help + Provides a driver to access MCTP devices over USB transport, + defined by DMTF specification DSP0283. + + MCTP-over-USB interfaces are peer-to-peer, so each interface + represents a physical connection to one remote MCTP endpoint. + endmenu endif diff --git a/drivers/net/mctp/Makefile b/drivers/net/mctp/Makefile index e1cb99ced54ac..c36006849a1e7 100644 --- a/drivers/net/mctp/Makefile +++ b/drivers/net/mctp/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_MCTP_SERIAL) += mctp-serial.o obj-$(CONFIG_MCTP_TRANSPORT_I2C) += mctp-i2c.o obj-$(CONFIG_MCTP_TRANSPORT_I3C) += mctp-i3c.o +obj-$(CONFIG_MCTP_TRANSPORT_USB) += mctp-usb.o diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index e3dcdeacc12c5..f782d93f826ef 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -537,7 +537,7 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb) rc = __i2c_transfer(midev->adapter, &msg, 1); /* on tx errors, the flow can no longer be considered valid */ - if (rc) + if (rc < 0) mctp_i2c_invalidate_tx_flow(midev, skb); break; @@ -583,6 +583,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, struct mctp_i2c_hdr *hdr; struct mctp_hdr *mhdr; u8 lldst, llsrc; + int rc; if (len > MCTP_I2C_MAXMTU) return -EMSGSIZE; @@ -593,6 +594,10 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, lldst = *((u8 *)daddr); llsrc = *((u8 *)saddr); + rc = skb_cow_head(skb, sizeof(struct mctp_i2c_hdr)); + if (rc) + return rc; + skb_push(skb, sizeof(struct mctp_i2c_hdr)); skb_reset_mac_header(skb); hdr = (void *)skb_mac_header(skb); diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c index d247fe483c588..c678f79aa3561 100644 --- a/drivers/net/mctp/mctp-i3c.c +++ b/drivers/net/mctp/mctp-i3c.c @@ -506,6 +506,14 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev, const void *saddr, unsigned int len) { struct mctp_i3c_internal_hdr *ihdr; + int rc; + + if (!daddr || !saddr) + return -EINVAL; + + rc = skb_cow_head(skb, sizeof(struct mctp_i3c_internal_hdr)); + if (rc) + return rc; skb_push(skb, sizeof(struct mctp_i3c_internal_hdr)); skb_reset_mac_header(skb); diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c new file mode 100644 index 0000000000000..e8d4b01c3f345 --- /dev/null +++ b/drivers/net/mctp/mctp-usb.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * mctp-usb.c - MCTP-over-USB (DMTF DSP0283) transport binding driver. + * + * DSP0283 is available at: + * https://www.dmtf.org/sites/default/files/standards/documents/DSP0283_1.0.1.pdf + * + * Copyright (C) 2024-2025 Code Construct Pty Ltd + */ + +#include +#include +#include +#include + +#include +#include +#include + +#include + +struct mctp_usb { + struct usb_device *usbdev; + struct usb_interface *intf; + bool stopped; + + struct net_device *netdev; + + u8 ep_in; + u8 ep_out; + + struct urb *tx_urb; + struct urb *rx_urb; + + struct delayed_work rx_retry_work; +}; + +static void mctp_usb_out_complete(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct net_device *netdev = skb->dev; + int status; + + status = urb->status; + + switch (status) { + case -ENOENT: + case -ECONNRESET: + case -ESHUTDOWN: + case -EPROTO: + dev_dstats_tx_dropped(netdev); + break; + case 0: + dev_dstats_tx_add(netdev, skb->len); + netif_wake_queue(netdev); + consume_skb(skb); + return; + default: + netdev_dbg(netdev, "unexpected tx urb status: %d\n", status); + dev_dstats_tx_dropped(netdev); + } + + kfree_skb(skb); +} + +static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct mctp_usb *mctp_usb = netdev_priv(dev); + struct mctp_usb_hdr *hdr; + unsigned int plen; + struct urb *urb; + int rc; + + plen = skb->len; + + if (plen + sizeof(*hdr) > MCTP_USB_XFER_SIZE) + goto err_drop; + + rc = skb_cow_head(skb, sizeof(*hdr)); + if (rc) + goto err_drop; + + hdr = skb_push(skb, sizeof(*hdr)); + if (!hdr) + goto err_drop; + + hdr->id = cpu_to_be16(MCTP_USB_DMTF_ID); + hdr->rsvd = 0; + hdr->len = plen + sizeof(*hdr); + + urb = mctp_usb->tx_urb; + + usb_fill_bulk_urb(urb, mctp_usb->usbdev, + usb_sndbulkpipe(mctp_usb->usbdev, mctp_usb->ep_out), + skb->data, skb->len, + mctp_usb_out_complete, skb); + + rc = usb_submit_urb(urb, GFP_ATOMIC); + if (rc) + goto err_drop; + else + netif_stop_queue(dev); + + return NETDEV_TX_OK; + +err_drop: + dev_dstats_tx_dropped(dev); + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static void mctp_usb_in_complete(struct urb *urb); + +/* If we fail to queue an in urb atomically (either due to skb allocation or + * urb submission), we will schedule a rx queue in nonatomic context + * after a delay, specified in jiffies + */ +static const unsigned long RX_RETRY_DELAY = HZ / 4; + +static int mctp_usb_rx_queue(struct mctp_usb *mctp_usb, gfp_t gfp) +{ + struct sk_buff *skb; + int rc; + + skb = __netdev_alloc_skb(mctp_usb->netdev, MCTP_USB_XFER_SIZE, gfp); + if (!skb) { + rc = -ENOMEM; + goto err_retry; + } + + usb_fill_bulk_urb(mctp_usb->rx_urb, mctp_usb->usbdev, + usb_rcvbulkpipe(mctp_usb->usbdev, mctp_usb->ep_in), + skb->data, MCTP_USB_XFER_SIZE, + mctp_usb_in_complete, skb); + + rc = usb_submit_urb(mctp_usb->rx_urb, gfp); + if (rc) { + netdev_dbg(mctp_usb->netdev, "rx urb submit failure: %d\n", rc); + kfree_skb(skb); + if (rc == -ENOMEM) + goto err_retry; + } + + return rc; + +err_retry: + schedule_delayed_work(&mctp_usb->rx_retry_work, RX_RETRY_DELAY); + return rc; +} + +static void mctp_usb_in_complete(struct urb *urb) +{ + struct sk_buff *skb = urb->context; + struct net_device *netdev = skb->dev; + struct mctp_usb *mctp_usb = netdev_priv(netdev); + struct mctp_skb_cb *cb; + unsigned int len; + int status; + + status = urb->status; + + switch (status) { + case -ENOENT: + case -ECONNRESET: + case -ESHUTDOWN: + case -EPROTO: + kfree_skb(skb); + return; + case 0: + break; + default: + netdev_dbg(netdev, "unexpected rx urb status: %d\n", status); + kfree_skb(skb); + return; + } + + len = urb->actual_length; + __skb_put(skb, len); + + while (skb) { + struct sk_buff *skb2 = NULL; + struct mctp_usb_hdr *hdr; + u8 pkt_len; /* length of MCTP packet, no USB header */ + + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) + break; + + if (be16_to_cpu(hdr->id) != MCTP_USB_DMTF_ID) { + netdev_dbg(netdev, "rx: invalid id %04x\n", + be16_to_cpu(hdr->id)); + break; + } + + if (hdr->len < + sizeof(struct mctp_hdr) + sizeof(struct mctp_usb_hdr)) { + netdev_dbg(netdev, "rx: short packet (hdr) %d\n", + hdr->len); + break; + } + + /* we know we have at least sizeof(struct mctp_usb_hdr) here */ + pkt_len = hdr->len - sizeof(struct mctp_usb_hdr); + if (pkt_len > skb->len) { + netdev_dbg(netdev, + "rx: short packet (xfer) %d, actual %d\n", + hdr->len, skb->len); + break; + } + + if (pkt_len < skb->len) { + /* more packets may follow - clone to a new + * skb to use on the next iteration + */ + skb2 = skb_clone(skb, GFP_ATOMIC); + if (skb2) { + if (!skb_pull(skb2, pkt_len)) { + kfree_skb(skb2); + skb2 = NULL; + } + } + skb_trim(skb, pkt_len); + } + + dev_dstats_rx_add(netdev, skb->len); + + skb->protocol = htons(ETH_P_MCTP); + skb_reset_network_header(skb); + cb = __mctp_cb(skb); + cb->halen = 0; + netif_rx(skb); + + skb = skb2; + } + + if (skb) + kfree_skb(skb); + + mctp_usb_rx_queue(mctp_usb, GFP_ATOMIC); +} + +static void mctp_usb_rx_retry_work(struct work_struct *work) +{ + struct mctp_usb *mctp_usb = container_of(work, struct mctp_usb, + rx_retry_work.work); + + if (READ_ONCE(mctp_usb->stopped)) + return; + + mctp_usb_rx_queue(mctp_usb, GFP_KERNEL); +} + +static int mctp_usb_open(struct net_device *dev) +{ + struct mctp_usb *mctp_usb = netdev_priv(dev); + + WRITE_ONCE(mctp_usb->stopped, false); + + return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL); +} + +static int mctp_usb_stop(struct net_device *dev) +{ + struct mctp_usb *mctp_usb = netdev_priv(dev); + + netif_stop_queue(dev); + + /* prevent RX submission retry */ + WRITE_ONCE(mctp_usb->stopped, true); + + usb_kill_urb(mctp_usb->rx_urb); + usb_kill_urb(mctp_usb->tx_urb); + + cancel_delayed_work_sync(&mctp_usb->rx_retry_work); + + return 0; +} + +static const struct net_device_ops mctp_usb_netdev_ops = { + .ndo_start_xmit = mctp_usb_start_xmit, + .ndo_open = mctp_usb_open, + .ndo_stop = mctp_usb_stop, +}; + +static void mctp_usb_netdev_setup(struct net_device *dev) +{ + dev->type = ARPHRD_MCTP; + + dev->mtu = MCTP_USB_MTU_MIN; + dev->min_mtu = MCTP_USB_MTU_MIN; + dev->max_mtu = MCTP_USB_MTU_MAX; + + dev->hard_header_len = sizeof(struct mctp_usb_hdr); + dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + dev->flags = IFF_NOARP; + dev->netdev_ops = &mctp_usb_netdev_ops; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; +} + +static int mctp_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + struct usb_endpoint_descriptor *ep_in, *ep_out; + struct usb_host_interface *iface_desc; + struct net_device *netdev; + struct mctp_usb *dev; + int rc; + + /* only one alternate */ + iface_desc = intf->cur_altsetting; + + rc = usb_find_common_endpoints(iface_desc, &ep_in, &ep_out, NULL, NULL); + if (rc) { + dev_err(&intf->dev, "invalid endpoints on device?\n"); + return rc; + } + + netdev = alloc_netdev(sizeof(*dev), "mctpusb%d", NET_NAME_ENUM, + mctp_usb_netdev_setup); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &intf->dev); + dev = netdev_priv(netdev); + dev->netdev = netdev; + dev->usbdev = usb_get_dev(interface_to_usbdev(intf)); + dev->intf = intf; + usb_set_intfdata(intf, dev); + + dev->ep_in = ep_in->bEndpointAddress; + dev->ep_out = ep_out->bEndpointAddress; + + dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL); + dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->tx_urb || !dev->rx_urb) { + rc = -ENOMEM; + goto err_free_urbs; + } + + INIT_DELAYED_WORK(&dev->rx_retry_work, mctp_usb_rx_retry_work); + + rc = mctp_register_netdev(netdev, NULL, MCTP_PHYS_BINDING_USB); + if (rc) + goto err_free_urbs; + + return 0; + +err_free_urbs: + usb_free_urb(dev->tx_urb); + usb_free_urb(dev->rx_urb); + free_netdev(netdev); + return rc; +} + +static void mctp_usb_disconnect(struct usb_interface *intf) +{ + struct mctp_usb *dev = usb_get_intfdata(intf); + + mctp_unregister_netdev(dev->netdev); + usb_free_urb(dev->tx_urb); + usb_free_urb(dev->rx_urb); + usb_put_dev(dev->usbdev); + free_netdev(dev->netdev); +} + +static const struct usb_device_id mctp_usb_devices[] = { + { USB_INTERFACE_INFO(USB_CLASS_MCTP, 0x0, 0x1) }, + { 0 }, +}; + +MODULE_DEVICE_TABLE(usb, mctp_usb_devices); + +static struct usb_driver mctp_usb_driver = { + .name = "mctp-usb", + .id_table = mctp_usb_devices, + .probe = mctp_usb_probe, + .disconnect = mctp_usb_disconnect, +}; + +module_usb_driver(mctp_usb_driver) + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jeremy Kerr "); +MODULE_DESCRIPTION("MCTP USB transport"); diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c index da2001ea1f993..53e96bfab5422 100644 --- a/drivers/net/mdio/mdio-i2c.c +++ b/drivers/net/mdio/mdio-i2c.c @@ -106,6 +106,62 @@ static int i2c_mii_write_default_c22(struct mii_bus *bus, int phy_id, int reg, return i2c_mii_write_default_c45(bus, phy_id, -1, reg, val); } +static int smbus_byte_mii_read_default_c22(struct mii_bus *bus, int phy_id, + int reg) +{ + struct i2c_adapter *i2c = bus->priv; + union i2c_smbus_data smbus_data; + int val = 0, ret; + + if (!i2c_mii_valid_phy_id(phy_id)) + return 0; + + ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, + I2C_SMBUS_READ, reg, + I2C_SMBUS_BYTE_DATA, &smbus_data); + if (ret < 0) + return ret; + + val = (smbus_data.byte & 0xff) << 8; + + ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, + I2C_SMBUS_READ, reg, + I2C_SMBUS_BYTE_DATA, &smbus_data); + if (ret < 0) + return ret; + + val |= smbus_data.byte & 0xff; + + return val; +} + +static int smbus_byte_mii_write_default_c22(struct mii_bus *bus, int phy_id, + int reg, u16 val) +{ + struct i2c_adapter *i2c = bus->priv; + union i2c_smbus_data smbus_data; + int ret; + + if (!i2c_mii_valid_phy_id(phy_id)) + return 0; + + smbus_data.byte = (val & 0xff00) >> 8; + + ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, + I2C_SMBUS_WRITE, reg, + I2C_SMBUS_BYTE_DATA, &smbus_data); + if (ret < 0) + return ret; + + smbus_data.byte = val & 0xff; + + ret = i2c_smbus_xfer(i2c, i2c_mii_phy_addr(phy_id), 0, + I2C_SMBUS_WRITE, reg, + I2C_SMBUS_BYTE_DATA, &smbus_data); + + return ret < 0 ? ret : 0; +} + /* RollBall SFPs do not access internal PHY via I2C address 0x56, but * instead via address 0x51, when SFP page is set to 0x03 and password to * 0xffffffff. @@ -378,13 +434,26 @@ static int i2c_mii_init_rollball(struct i2c_adapter *i2c) return 0; } +static bool mdio_i2c_check_functionality(struct i2c_adapter *i2c, + enum mdio_i2c_proto protocol) +{ + if (i2c_check_functionality(i2c, I2C_FUNC_I2C)) + return true; + + if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA) && + protocol == MDIO_I2C_MARVELL_C22) + return true; + + return false; +} + struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c, enum mdio_i2c_proto protocol) { struct mii_bus *mii; int ret; - if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) + if (!mdio_i2c_check_functionality(i2c, protocol)) return ERR_PTR(-EINVAL); mii = mdiobus_alloc(); @@ -395,6 +464,14 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c, mii->parent = parent; mii->priv = i2c; + /* Only use SMBus if we have no other choice */ + if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA) && + !i2c_check_functionality(i2c, I2C_FUNC_I2C)) { + mii->read = smbus_byte_mii_read_default_c22; + mii->write = smbus_byte_mii_write_default_c22; + return mii; + } + switch (protocol) { case MDIO_I2C_ROLLBALL: ret = i2c_mii_init_rollball(i2c); diff --git a/drivers/net/mii.c b/drivers/net/mii.c index 37bc3131d31a8..e305bf0f1d048 100644 --- a/drivers/net/mii.c +++ b/drivers/net/mii.c @@ -464,6 +464,8 @@ int mii_nway_restart (struct mii_if_info *mii) /* if autoneg is off, it's an error */ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); + if (bmcr < 0) + return bmcr; if (bmcr & BMCR_ANENABLE) { bmcr |= BMCR_ANRESTART; diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 54c8b9d5b5fcf..5b50d9186f121 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -734,7 +734,7 @@ struct failover *net_failover_create(struct net_device *standby_dev) failover_dev->lltx = true; /* Don't allow failover devices to change network namespaces. */ - failover_dev->netns_local = true; + failover_dev->netns_immutable = true; failover_dev->hw_features = FAILOVER_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 86ab4a42769a4..4289ccd3e41bf 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -45,12 +45,12 @@ MODULE_DESCRIPTION("Console driver for network interfaces"); MODULE_LICENSE("GPL"); #define MAX_PARAM_LENGTH 256 -#define MAX_USERDATA_ENTRY_LENGTH 256 -#define MAX_USERDATA_VALUE_LENGTH 200 +#define MAX_EXTRADATA_ENTRY_LEN 256 +#define MAX_EXTRADATA_VALUE_LEN 200 /* The number 3 comes from userdata entry format characters (' ', '=', '\n') */ -#define MAX_USERDATA_NAME_LENGTH (MAX_USERDATA_ENTRY_LENGTH - \ - MAX_USERDATA_VALUE_LENGTH - 3) -#define MAX_USERDATA_ITEMS 16 +#define MAX_EXTRADATA_NAME_LEN (MAX_EXTRADATA_ENTRY_LEN - \ + MAX_EXTRADATA_VALUE_LEN - 3) +#define MAX_EXTRADATA_ITEMS 16 #define MAX_PRINT_CHUNK 1000 static char config[MAX_PARAM_LENGTH]; @@ -97,13 +97,27 @@ struct netconsole_target_stats { struct u64_stats_sync syncp; }; +/* Features enabled in sysdata. Contrary to userdata, this data is populated by + * the kernel. The fields are designed as bitwise flags, allowing multiple + * features to be set in sysdata_fields. + */ +enum sysdata_feature { + /* Populate the CPU that sends the message */ + SYSDATA_CPU_NR = BIT(0), + /* Populate the task name (as in current->comm) in sysdata */ + SYSDATA_TASKNAME = BIT(1), + /* Kernel release/version as part of sysdata */ + SYSDATA_RELEASE = BIT(2), +}; + /** * struct netconsole_target - Represents a configured netconsole target. * @list: Links this target into the target_list. * @group: Links us into the configfs subsystem hierarchy. * @userdata_group: Links to the userdata configfs hierarchy - * @userdata_complete: Cached, formatted string of append - * @userdata_length: String length of userdata_complete + * @extradata_complete: Cached, formatted string of append + * @userdata_length: String length of usedata in extradata_complete. + * @sysdata_fields: Sysdata features enabled. * @stats: Packet send stats for the target. Used for debugging. * @enabled: On / off knob to enable / disable target. * Visible from userspace (read-write). @@ -123,20 +137,25 @@ struct netconsole_target_stats { * remote_ip (read-write) * local_mac (read-only) * remote_mac (read-write) + * @buf: The buffer used to send the full msg to the network stack */ struct netconsole_target { struct list_head list; #ifdef CONFIG_NETCONSOLE_DYNAMIC struct config_group group; struct config_group userdata_group; - char userdata_complete[MAX_USERDATA_ENTRY_LENGTH * MAX_USERDATA_ITEMS]; + char extradata_complete[MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS]; size_t userdata_length; + /* bit-wise with sysdata_feature bits */ + u32 sysdata_fields; #endif struct netconsole_target_stats stats; bool enabled; bool extended; bool release; struct netpoll np; + /* protected by target_list_lock */ + char buf[MAX_PRINT_CHUNK]; }; #ifdef CONFIG_NETCONSOLE_DYNAMIC @@ -396,6 +415,46 @@ static ssize_t transmit_errors_show(struct config_item *item, char *buf) return sysfs_emit(buf, "%llu\n", xmit_drop_count + enomem_count); } +/* configfs helper to display if cpu_nr sysdata feature is enabled */ +static ssize_t sysdata_cpu_nr_enabled_show(struct config_item *item, char *buf) +{ + struct netconsole_target *nt = to_target(item->ci_parent); + bool cpu_nr_enabled; + + mutex_lock(&dynamic_netconsole_mutex); + cpu_nr_enabled = !!(nt->sysdata_fields & SYSDATA_CPU_NR); + mutex_unlock(&dynamic_netconsole_mutex); + + return sysfs_emit(buf, "%d\n", cpu_nr_enabled); +} + +/* configfs helper to display if taskname sysdata feature is enabled */ +static ssize_t sysdata_taskname_enabled_show(struct config_item *item, + char *buf) +{ + struct netconsole_target *nt = to_target(item->ci_parent); + bool taskname_enabled; + + mutex_lock(&dynamic_netconsole_mutex); + taskname_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME); + mutex_unlock(&dynamic_netconsole_mutex); + + return sysfs_emit(buf, "%d\n", taskname_enabled); +} + +static ssize_t sysdata_release_enabled_show(struct config_item *item, + char *buf) +{ + struct netconsole_target *nt = to_target(item->ci_parent); + bool release_enabled; + + mutex_lock(&dynamic_netconsole_mutex); + release_enabled = !!(nt->sysdata_fields & SYSDATA_TASKNAME); + mutex_unlock(&dynamic_netconsole_mutex); + + return sysfs_emit(buf, "%d\n", release_enabled); +} + /* * This one is special -- targets created through the configfs interface * are not enabled (and the corresponding netpoll activated) by default. @@ -659,6 +718,28 @@ static ssize_t remote_ip_store(struct config_item *item, const char *buf, return ret; } +/* Count number of entries we have in extradata. + * This is important because the extradata_complete only supports + * MAX_EXTRADATA_ITEMS entries. Before enabling any new {user,sys}data + * feature, number of entries needs to checked for available space. + */ +static size_t count_extradata_entries(struct netconsole_target *nt) +{ + size_t entries; + + /* Userdata entries */ + entries = list_count_nodes(&nt->userdata_group.cg_children); + /* Plus sysdata entries */ + if (nt->sysdata_fields & SYSDATA_CPU_NR) + entries += 1; + if (nt->sysdata_fields & SYSDATA_TASKNAME) + entries += 1; + if (nt->sysdata_fields & SYSDATA_RELEASE) + entries += 1; + + return entries; +} + static ssize_t remote_mac_store(struct config_item *item, const char *buf, size_t count) { @@ -675,7 +756,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf, if (!mac_pton(buf, remote_mac)) goto out_unlock; - if (buf[3 * ETH_ALEN - 1] && buf[3 * ETH_ALEN - 1] != '\n') + if (buf[MAC_ADDR_STR_LEN] && buf[MAC_ADDR_STR_LEN] != '\n') goto out_unlock; memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN); @@ -687,7 +768,7 @@ static ssize_t remote_mac_store(struct config_item *item, const char *buf, struct userdatum { struct config_item item; - char value[MAX_USERDATA_VALUE_LENGTH]; + char value[MAX_EXTRADATA_VALUE_LEN]; }; static struct userdatum *to_userdatum(struct config_item *item) @@ -724,13 +805,13 @@ static void update_userdata(struct netconsole_target *nt) /* Clear the current string in case the last userdatum was deleted */ nt->userdata_length = 0; - nt->userdata_complete[0] = 0; + nt->extradata_complete[0] = 0; list_for_each(entry, &nt->userdata_group.cg_children) { struct userdatum *udm_item; struct config_item *item; - if (WARN_ON_ONCE(child_count >= MAX_USERDATA_ITEMS)) + if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS)) break; child_count++; @@ -738,19 +819,19 @@ static void update_userdata(struct netconsole_target *nt) udm_item = to_userdatum(item); /* Skip userdata with no value set */ - if (strnlen(udm_item->value, MAX_USERDATA_VALUE_LENGTH) == 0) + if (strnlen(udm_item->value, MAX_EXTRADATA_VALUE_LEN) == 0) continue; - /* This doesn't overflow userdata_complete since it will write - * one entry length (1/MAX_USERDATA_ITEMS long), entry count is + /* This doesn't overflow extradata_complete since it will write + * one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is * checked to not exceed MAX items with child_count above */ - complete_idx += scnprintf(&nt->userdata_complete[complete_idx], - MAX_USERDATA_ENTRY_LENGTH, " %s=%s\n", + complete_idx += scnprintf(&nt->extradata_complete[complete_idx], + MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n", item->ci_name, udm_item->value); } - nt->userdata_length = strnlen(nt->userdata_complete, - sizeof(nt->userdata_complete)); + nt->userdata_length = strnlen(nt->extradata_complete, + sizeof(nt->extradata_complete)); } static ssize_t userdatum_value_store(struct config_item *item, const char *buf, @@ -761,7 +842,7 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf, struct userdata *ud; ssize_t ret; - if (count > MAX_USERDATA_VALUE_LENGTH) + if (count > MAX_EXTRADATA_VALUE_LEN) return -EMSGSIZE; mutex_lock(&dynamic_netconsole_mutex); @@ -780,7 +861,132 @@ static ssize_t userdatum_value_store(struct config_item *item, const char *buf, return ret; } +/* disable_sysdata_feature - Disable sysdata feature and clean sysdata + * @nt: target that is disabling the feature + * @feature: feature being disabled + */ +static void disable_sysdata_feature(struct netconsole_target *nt, + enum sysdata_feature feature) +{ + nt->sysdata_fields &= ~feature; + nt->extradata_complete[nt->userdata_length] = 0; +} + +static ssize_t sysdata_release_enabled_store(struct config_item *item, + const char *buf, size_t count) +{ + struct netconsole_target *nt = to_target(item->ci_parent); + bool release_enabled, curr; + ssize_t ret; + + ret = kstrtobool(buf, &release_enabled); + if (ret) + return ret; + + mutex_lock(&dynamic_netconsole_mutex); + curr = !!(nt->sysdata_fields & SYSDATA_RELEASE); + if (release_enabled == curr) + goto unlock_ok; + + if (release_enabled && + count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) { + ret = -ENOSPC; + goto unlock; + } + + if (release_enabled) + nt->sysdata_fields |= SYSDATA_RELEASE; + else + disable_sysdata_feature(nt, SYSDATA_RELEASE); + +unlock_ok: + ret = strnlen(buf, count); +unlock: + mutex_unlock(&dynamic_netconsole_mutex); + return ret; +} + +static ssize_t sysdata_taskname_enabled_store(struct config_item *item, + const char *buf, size_t count) +{ + struct netconsole_target *nt = to_target(item->ci_parent); + bool taskname_enabled, curr; + ssize_t ret; + + ret = kstrtobool(buf, &taskname_enabled); + if (ret) + return ret; + + mutex_lock(&dynamic_netconsole_mutex); + curr = !!(nt->sysdata_fields & SYSDATA_TASKNAME); + if (taskname_enabled == curr) + goto unlock_ok; + + if (taskname_enabled && + count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) { + ret = -ENOSPC; + goto unlock; + } + + if (taskname_enabled) + nt->sysdata_fields |= SYSDATA_TASKNAME; + else + disable_sysdata_feature(nt, SYSDATA_TASKNAME); + +unlock_ok: + ret = strnlen(buf, count); +unlock: + mutex_unlock(&dynamic_netconsole_mutex); + return ret; +} + +/* configfs helper to sysdata cpu_nr feature */ +static ssize_t sysdata_cpu_nr_enabled_store(struct config_item *item, + const char *buf, size_t count) +{ + struct netconsole_target *nt = to_target(item->ci_parent); + bool cpu_nr_enabled, curr; + ssize_t ret; + + ret = kstrtobool(buf, &cpu_nr_enabled); + if (ret) + return ret; + + mutex_lock(&dynamic_netconsole_mutex); + curr = !!(nt->sysdata_fields & SYSDATA_CPU_NR); + if (cpu_nr_enabled == curr) + /* no change requested */ + goto unlock_ok; + + if (cpu_nr_enabled && + count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) { + /* user wants the new feature, but there is no space in the + * buffer. + */ + ret = -ENOSPC; + goto unlock; + } + + if (cpu_nr_enabled) + nt->sysdata_fields |= SYSDATA_CPU_NR; + else + /* This is special because extradata_complete might have + * remaining data from previous sysdata, and it needs to be + * cleaned. + */ + disable_sysdata_feature(nt, SYSDATA_CPU_NR); + +unlock_ok: + ret = strnlen(buf, count); +unlock: + mutex_unlock(&dynamic_netconsole_mutex); + return ret; +} + CONFIGFS_ATTR(userdatum_, value); +CONFIGFS_ATTR(sysdata_, cpu_nr_enabled); +CONFIGFS_ATTR(sysdata_, taskname_enabled); +CONFIGFS_ATTR(sysdata_, release_enabled); static struct configfs_attribute *userdatum_attrs[] = { &userdatum_attr_value, @@ -808,15 +1014,13 @@ static struct config_item *userdatum_make_item(struct config_group *group, struct netconsole_target *nt; struct userdatum *udm; struct userdata *ud; - size_t child_count; - if (strlen(name) > MAX_USERDATA_NAME_LENGTH) + if (strlen(name) > MAX_EXTRADATA_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); ud = to_userdata(&group->cg_item); nt = userdata_to_target(ud); - child_count = list_count_nodes(&nt->userdata_group.cg_children); - if (child_count >= MAX_USERDATA_ITEMS) + if (count_extradata_entries(nt) >= MAX_EXTRADATA_ITEMS) return ERR_PTR(-ENOSPC); udm = kzalloc(sizeof(*udm), GFP_KERNEL); @@ -842,6 +1046,9 @@ static void userdatum_drop(struct config_group *group, struct config_item *item) } static struct configfs_attribute *userdata_attrs[] = { + &sysdata_attr_cpu_nr_enabled, + &sysdata_attr_taskname_enabled, + &sysdata_attr_release_enabled, NULL, }; @@ -1017,6 +1224,63 @@ static void populate_configfs_item(struct netconsole_target *nt, init_target_config_group(nt, target_name); } +static int sysdata_append_cpu_nr(struct netconsole_target *nt, int offset) +{ + /* Append cpu=%d at extradata_complete after userdata str */ + return scnprintf(&nt->extradata_complete[offset], + MAX_EXTRADATA_ENTRY_LEN, " cpu=%u\n", + raw_smp_processor_id()); +} + +static int sysdata_append_taskname(struct netconsole_target *nt, int offset) +{ + return scnprintf(&nt->extradata_complete[offset], + MAX_EXTRADATA_ENTRY_LEN, " taskname=%s\n", + current->comm); +} + +static int sysdata_append_release(struct netconsole_target *nt, int offset) +{ + return scnprintf(&nt->extradata_complete[offset], + MAX_EXTRADATA_ENTRY_LEN, " release=%s\n", + init_utsname()->release); +} + +/* + * prepare_extradata - append sysdata at extradata_complete in runtime + * @nt: target to send message to + */ +static int prepare_extradata(struct netconsole_target *nt) +{ + u32 fields = SYSDATA_CPU_NR | SYSDATA_TASKNAME; + int extradata_len; + + /* userdata was appended when configfs write helper was called + * by update_userdata(). + */ + extradata_len = nt->userdata_length; + + if (!(nt->sysdata_fields & fields)) + goto out; + + if (nt->sysdata_fields & SYSDATA_CPU_NR) + extradata_len += sysdata_append_cpu_nr(nt, extradata_len); + if (nt->sysdata_fields & SYSDATA_TASKNAME) + extradata_len += sysdata_append_taskname(nt, extradata_len); + if (nt->sysdata_fields & SYSDATA_RELEASE) + extradata_len += sysdata_append_release(nt, extradata_len); + + WARN_ON_ONCE(extradata_len > + MAX_EXTRADATA_ENTRY_LEN * MAX_EXTRADATA_ITEMS); + +out: + return extradata_len; +} +#else /* CONFIG_NETCONSOLE_DYNAMIC not set */ +static int prepare_extradata(struct netconsole_target *nt) +{ + return 0; +} #endif /* CONFIG_NETCONSOLE_DYNAMIC */ /* Handle network interface device notifications */ @@ -1117,29 +1381,28 @@ static void send_msg_no_fragmentation(struct netconsole_target *nt, int msg_len, int release_len) { - static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */ - const char *userdata = NULL; + const char *extradata = NULL; const char *release; #ifdef CONFIG_NETCONSOLE_DYNAMIC - userdata = nt->userdata_complete; + extradata = nt->extradata_complete; #endif if (release_len) { release = init_utsname()->release; - scnprintf(buf, MAX_PRINT_CHUNK, "%s,%s", release, msg); + scnprintf(nt->buf, MAX_PRINT_CHUNK, "%s,%s", release, msg); msg_len += release_len; } else { - memcpy(buf, msg, msg_len); + memcpy(nt->buf, msg, msg_len); } - if (userdata) - msg_len += scnprintf(&buf[msg_len], + if (extradata) + msg_len += scnprintf(&nt->buf[msg_len], MAX_PRINT_CHUNK - msg_len, - "%s", userdata); + "%s", extradata); - send_udp(nt, buf, msg_len); + send_udp(nt, nt->buf, msg_len); } static void append_release(char *buf) @@ -1150,28 +1413,27 @@ static void append_release(char *buf) scnprintf(buf, MAX_PRINT_CHUNK, "%s,", release); } -static void send_fragmented_body(struct netconsole_target *nt, char *buf, +static void send_fragmented_body(struct netconsole_target *nt, const char *msgbody, int header_len, - int msgbody_len) + int msgbody_len, int extradata_len) { - const char *userdata = NULL; + int sent_extradata, preceding_bytes; + const char *extradata = NULL; int body_len, offset = 0; - int userdata_len = 0; #ifdef CONFIG_NETCONSOLE_DYNAMIC - userdata = nt->userdata_complete; - userdata_len = nt->userdata_length; + extradata = nt->extradata_complete; #endif /* body_len represents the number of bytes that will be sent. This is * bigger than MAX_PRINT_CHUNK, thus, it will be split in multiple * packets */ - body_len = msgbody_len + userdata_len; + body_len = msgbody_len + extradata_len; /* In each iteration of the while loop below, we send a packet * containing the header and a portion of the body. The body is - * composed of two parts: msgbody and userdata. We keep track of how + * composed of two parts: msgbody and extradata. We keep track of how * many bytes have been sent so far using the offset variable, which * ranges from 0 to the total length of the body. */ @@ -1181,7 +1443,7 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf, int this_offset = 0; int this_chunk = 0; - this_header += scnprintf(buf + this_header, + this_header += scnprintf(nt->buf + this_header, MAX_PRINT_CHUNK - this_header, ",ncfrag=%d/%d;", offset, body_len); @@ -1192,47 +1454,48 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf, MAX_PRINT_CHUNK - this_header); if (WARN_ON_ONCE(this_chunk <= 0)) return; - memcpy(buf + this_header, msgbody + offset, this_chunk); + memcpy(nt->buf + this_header, msgbody + offset, + this_chunk); this_offset += this_chunk; } /* msgbody was finally written, either in the previous * messages and/or in the current buf. Time to write - * the userdata. + * the extradata. */ msgbody_written |= offset + this_offset >= msgbody_len; - /* Msg body is fully written and there is pending userdata to - * write, append userdata in this chunk + /* Msg body is fully written and there is pending extradata to + * write, append extradata in this chunk */ if (msgbody_written && offset + this_offset < body_len) { /* Track how much user data was already sent. First * time here, sent_userdata is zero */ - int sent_userdata = (offset + this_offset) - msgbody_len; + sent_extradata = (offset + this_offset) - msgbody_len; /* offset of bytes used in current buf */ - int preceding_bytes = this_chunk + this_header; + preceding_bytes = this_chunk + this_header; - if (WARN_ON_ONCE(sent_userdata < 0)) + if (WARN_ON_ONCE(sent_extradata < 0)) return; - this_chunk = min(userdata_len - sent_userdata, + this_chunk = min(extradata_len - sent_extradata, MAX_PRINT_CHUNK - preceding_bytes); if (WARN_ON_ONCE(this_chunk < 0)) /* this_chunk could be zero if all the previous * message used all the buffer. This is not a - * problem, userdata will be sent in the next + * problem, extradata will be sent in the next * iteration */ return; - memcpy(buf + this_header + this_offset, - userdata + sent_userdata, + memcpy(nt->buf + this_header + this_offset, + extradata + sent_extradata, this_chunk); this_offset += this_chunk; } - send_udp(nt, buf, this_header + this_offset); + send_udp(nt, nt->buf, this_header + this_offset); offset += this_offset; } } @@ -1240,9 +1503,9 @@ static void send_fragmented_body(struct netconsole_target *nt, char *buf, static void send_msg_fragmented(struct netconsole_target *nt, const char *msg, int msg_len, - int release_len) + int release_len, + int extradata_len) { - static char buf[MAX_PRINT_CHUNK]; /* protected by target_list_lock */ int header_len, msgbody_len; const char *msgbody; @@ -1260,16 +1523,17 @@ static void send_msg_fragmented(struct netconsole_target *nt, * "ncfrag=/" */ if (release_len) - append_release(buf); + append_release(nt->buf); /* Copy the header into the buffer */ - memcpy(buf + release_len, msg, header_len); + memcpy(nt->buf + release_len, msg, header_len); header_len += release_len; /* for now on, the header will be persisted, and the msgbody * will be replaced */ - send_fragmented_body(nt, buf, msgbody, header_len, msgbody_len); + send_fragmented_body(nt, msgbody, header_len, msgbody_len, + extradata_len); } /** @@ -1285,20 +1549,19 @@ static void send_msg_fragmented(struct netconsole_target *nt, static void send_ext_msg_udp(struct netconsole_target *nt, const char *msg, int msg_len) { - int userdata_len = 0; int release_len = 0; + int extradata_len; -#ifdef CONFIG_NETCONSOLE_DYNAMIC - userdata_len = nt->userdata_length; -#endif + extradata_len = prepare_extradata(nt); if (nt->release) release_len = strlen(init_utsname()->release) + 1; - if (msg_len + release_len + userdata_len <= MAX_PRINT_CHUNK) + if (msg_len + release_len + extradata_len <= MAX_PRINT_CHUNK) return send_msg_no_fragmentation(nt, msg, msg_len, release_len); - return send_msg_fragmented(nt, msg, msg_len, release_len); + return send_msg_fragmented(nt, msg, msg_len, release_len, + extradata_len); } static void write_ext_msg(struct console *con, const char *msg, diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 608953d4f98da..49537d3c41205 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -296,7 +296,8 @@ static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf) NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv"); return -EINVAL; } - if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) { + if (bpf->prog && !bpf->prog->aux->xdp_has_frags && + ns->netdev->mtu > NSIM_XDP_MAX_MTU) { NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled"); return -EINVAL; } diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index 5c80fbee79138..4d191a3293c74 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -107,10 +107,8 @@ nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch) struct netdevsim *ns = netdev_priv(dev); int err; - netdev_lock(dev); err = netif_set_real_num_queues(dev, ch->combined_count, ch->combined_count); - netdev_unlock(dev); if (err) return err; @@ -184,9 +182,11 @@ static const struct ethtool_ops nsim_ethtool_ops = { static void nsim_ethtool_ring_init(struct netdevsim *ns) { + ns->ethtool.ring.rx_pending = 512; ns->ethtool.ring.rx_max_pending = 4096; ns->ethtool.ring.rx_jumbo_max_pending = 4096; ns->ethtool.ring.rx_mini_max_pending = 4096; + ns->ethtool.ring.tx_pending = 512; ns->ethtool.ring.tx_max_pending = 4096; } diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index 88187dd4eb2d4..d88bdb9a17176 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -217,20 +217,9 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs) ipsec->count--; } -static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) -{ - struct netdevsim *ns = netdev_priv(xs->xso.real_dev); - struct nsim_ipsec *ipsec = &ns->ipsec; - - ipsec->ok++; - - return true; -} - static const struct xfrmdev_ops nsim_xfrmdev_ops = { .xdo_dev_state_add = nsim_ipsec_add_sa, .xdo_dev_state_delete = nsim_ipsec_del_sa, - .xdo_dev_offload_ok = nsim_ipsec_offload_ok, }; bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 42f247cbdceec..b67af4651185b 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -87,7 +88,8 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP)) goto out_drop_cnt; - napi_schedule(&rq->napi); + if (!hrtimer_active(&rq->napi_timer)) + hrtimer_start(&rq->napi_timer, us_to_ktime(5), HRTIMER_MODE_REL); rcu_read_unlock(); u64_stats_update_begin(&ns->syncp); @@ -114,7 +116,8 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu) { struct netdevsim *ns = netdev_priv(dev); - if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU) + if (ns->xdp.prog && !ns->xdp.prog->aux->xdp_has_frags && + new_mtu > NSIM_XDP_MAX_MTU) return -EBUSY; WRITE_ONCE(dev->mtu, new_mtu); @@ -401,7 +404,7 @@ static int nsim_init_napi(struct netdevsim *ns) for (i = 0; i < dev->num_rx_queues; i++) { rq = ns->rq[i]; - netif_napi_add_config(dev, &rq->napi, nsim_poll, i); + netif_napi_add_config_locked(dev, &rq->napi, nsim_poll, i); } for (i = 0; i < dev->num_rx_queues; i++) { @@ -421,11 +424,27 @@ static int nsim_init_napi(struct netdevsim *ns) } for (i = 0; i < dev->num_rx_queues; i++) - __netif_napi_del(&ns->rq[i]->napi); + __netif_napi_del_locked(&ns->rq[i]->napi); return err; } +static enum hrtimer_restart nsim_napi_schedule(struct hrtimer *timer) +{ + struct nsim_rq *rq; + + rq = container_of(timer, struct nsim_rq, napi_timer); + napi_schedule(&rq->napi); + + return HRTIMER_NORESTART; +} + +static void nsim_rq_timer_init(struct nsim_rq *rq) +{ + hrtimer_init(&rq->napi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rq->napi_timer.function = nsim_napi_schedule; +} + static void nsim_enable_napi(struct netdevsim *ns) { struct net_device *dev = ns->netdev; @@ -435,7 +454,7 @@ static void nsim_enable_napi(struct netdevsim *ns) struct nsim_rq *rq = ns->rq[i]; netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi); - napi_enable(&rq->napi); + napi_enable_locked(&rq->napi); } } @@ -444,6 +463,8 @@ static int nsim_open(struct net_device *dev) struct netdevsim *ns = netdev_priv(dev); int err; + netdev_assert_locked(dev); + err = nsim_init_napi(ns); if (err) return err; @@ -461,8 +482,8 @@ static void nsim_del_napi(struct netdevsim *ns) for (i = 0; i < dev->num_rx_queues; i++) { struct nsim_rq *rq = ns->rq[i]; - napi_disable(&rq->napi); - __netif_napi_del(&rq->napi); + napi_disable_locked(&rq->napi); + __netif_napi_del_locked(&rq->napi); } synchronize_net(); @@ -477,6 +498,8 @@ static int nsim_stop(struct net_device *dev) struct netdevsim *ns = netdev_priv(dev); struct netdevsim *peer; + netdev_assert_locked(dev); + netif_carrier_off(dev); peer = rtnl_dereference(ns->peer); if (peer) @@ -615,11 +638,13 @@ static struct nsim_rq *nsim_queue_alloc(void) return NULL; skb_queue_head_init(&rq->skb_queue); + nsim_rq_timer_init(rq); return rq; } static void nsim_queue_free(struct nsim_rq *rq) { + hrtimer_cancel(&rq->napi_timer); skb_queue_purge_reason(&rq->skb_queue, SKB_DROP_REASON_QUEUE_PURGE); kfree(rq); } @@ -645,8 +670,11 @@ nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx) if (ns->rq_reset_mode > 3) return -EINVAL; - if (ns->rq_reset_mode == 1) + if (ns->rq_reset_mode == 1) { + if (!netif_running(ns->netdev)) + return -ENETDOWN; return nsim_create_page_pool(&qmem->pp, &ns->rq[idx]->napi); + } qmem->rq = nsim_queue_alloc(); if (!qmem->rq) @@ -657,7 +685,8 @@ nsim_queue_mem_alloc(struct net_device *dev, void *per_queue_mem, int idx) goto err_free; if (!ns->rq_reset_mode) - netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx); + netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll, + idx); return 0; @@ -674,7 +703,7 @@ static void nsim_queue_mem_free(struct net_device *dev, void *per_queue_mem) page_pool_destroy(qmem->pp); if (qmem->rq) { if (!ns->rq_reset_mode) - netif_napi_del(&qmem->rq->napi); + netif_napi_del_locked(&qmem->rq->napi); page_pool_destroy(qmem->rq->page_pool); nsim_queue_free(qmem->rq); } @@ -686,9 +715,11 @@ nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx) struct nsim_queue_mem *qmem = per_queue_mem; struct netdevsim *ns = netdev_priv(dev); + netdev_assert_locked(dev); + if (ns->rq_reset_mode == 1) { ns->rq[idx]->page_pool = qmem->pp; - napi_enable(&ns->rq[idx]->napi); + napi_enable_locked(&ns->rq[idx]->napi); return 0; } @@ -696,15 +727,17 @@ nsim_queue_start(struct net_device *dev, void *per_queue_mem, int idx) * here we want to test various call orders. */ if (ns->rq_reset_mode == 2) { - netif_napi_del(&ns->rq[idx]->napi); - netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx); + netif_napi_del_locked(&ns->rq[idx]->napi); + netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll, + idx); } else if (ns->rq_reset_mode == 3) { - netif_napi_add_config(dev, &qmem->rq->napi, nsim_poll, idx); - netif_napi_del(&ns->rq[idx]->napi); + netif_napi_add_config_locked(dev, &qmem->rq->napi, nsim_poll, + idx); + netif_napi_del_locked(&ns->rq[idx]->napi); } ns->rq[idx] = qmem->rq; - napi_enable(&ns->rq[idx]->napi); + napi_enable_locked(&ns->rq[idx]->napi); return 0; } @@ -714,7 +747,9 @@ static int nsim_queue_stop(struct net_device *dev, void *per_queue_mem, int idx) struct nsim_queue_mem *qmem = per_queue_mem; struct netdevsim *ns = netdev_priv(dev); - napi_disable(&ns->rq[idx]->napi); + netdev_assert_locked(dev); + + napi_disable_locked(&ns->rq[idx]->napi); if (ns->rq_reset_mode == 1) { qmem->pp = ns->rq[idx]->page_pool; @@ -753,12 +788,7 @@ nsim_qreset_write(struct file *file, const char __user *data, if (ret != 2) return -EINVAL; - rtnl_lock(); - if (!netif_running(ns->netdev)) { - ret = -ENETDOWN; - goto exit_unlock; - } - + netdev_lock(ns->netdev); if (queue >= ns->netdev->real_num_rx_queues) { ret = -EINVAL; goto exit_unlock; @@ -772,7 +802,7 @@ nsim_qreset_write(struct file *file, const char __user *data, ret = count; exit_unlock: - rtnl_unlock(); + netdev_unlock(ns->netdev); return ret; } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 96d54c08043d3..665020d18f294 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -54,7 +54,6 @@ struct nsim_ipsec { struct dentry *pfile; u32 count; u32 tx; - u32 ok; }; #define NSIM_MACSEC_MAX_SECY_COUNT 3 @@ -97,6 +96,7 @@ struct nsim_rq { struct napi_struct napi; struct sk_buff_head skb_queue; struct page_pool *page_pool; + struct hrtimer napi_timer; }; struct netdevsim { diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c index 1e1b00756be7d..d072a7968f567 100644 --- a/drivers/net/netkit.c +++ b/drivers/net/netkit.c @@ -65,7 +65,6 @@ static void netkit_prep_forward(struct sk_buff *skb, skb_reset_mac_header(skb); if (!xnet) return; - ipvs_reset(skb); skb_clear_tstamp(skb); if (xnet_scrub) netkit_xnet(skb); @@ -327,17 +326,20 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[], static struct rtnl_link_ops netkit_link_ops; -static int netkit_new_link(struct net *peer_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int netkit_new_link(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { - struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr; - enum netkit_action policy_prim = NETKIT_PASS; - enum netkit_action policy_peer = NETKIT_PASS; + struct net *peer_net = rtnl_newlink_peer_net(params); enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT; enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT; + struct nlattr *peer_tb[IFLA_MAX + 1], **tbp, *attr; + enum netkit_action policy_prim = NETKIT_PASS; + enum netkit_action policy_peer = NETKIT_PASS; + struct nlattr **data = params->data; enum netkit_mode mode = NETKIT_L3; unsigned char ifname_assign_type; + struct nlattr **tb = params->tb; u16 headroom = 0, tailroom = 0; struct ifinfomsg *ifmp = NULL; struct net_device *peer; @@ -345,6 +347,7 @@ static int netkit_new_link(struct net *peer_net, struct net_device *dev, struct netkit *nk; int err; + tbp = tb; if (data) { if (data[IFLA_NETKIT_MODE]) mode = nla_get_u32(data[IFLA_NETKIT_MODE]); diff --git a/drivers/net/ovpn/Makefile b/drivers/net/ovpn/Makefile new file mode 100644 index 0000000000000..229be66167e1f --- /dev/null +++ b/drivers/net/ovpn/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# ovpn -- OpenVPN data channel offload in kernel space +# +# Copyright (C) 2020-2025 OpenVPN, Inc. +# +# Author: Antonio Quartulli + +obj-$(CONFIG_OVPN) := ovpn.o +ovpn-y += bind.o +ovpn-y += crypto.o +ovpn-y += crypto_aead.o +ovpn-y += main.o +ovpn-y += io.o +ovpn-y += netlink.o +ovpn-y += netlink-gen.o +ovpn-y += peer.o +ovpn-y += pktid.o +ovpn-y += socket.o +ovpn-y += stats.o +ovpn-y += tcp.o +ovpn-y += udp.o diff --git a/drivers/net/ovpn/bind.c b/drivers/net/ovpn/bind.c new file mode 100644 index 0000000000000..24d2788a277e6 --- /dev/null +++ b/drivers/net/ovpn/bind.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2012-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#include +#include + +#include "ovpnpriv.h" +#include "bind.h" +#include "peer.h" + +/** + * ovpn_bind_from_sockaddr - retrieve binding matching sockaddr + * @ss: the sockaddr to match + * + * Return: the bind matching the passed sockaddr if found, NULL otherwise + */ +struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *ss) +{ + struct ovpn_bind *bind; + size_t sa_len; + + if (ss->ss_family == AF_INET) + sa_len = sizeof(struct sockaddr_in); + else if (ss->ss_family == AF_INET6) + sa_len = sizeof(struct sockaddr_in6); + else + return ERR_PTR(-EAFNOSUPPORT); + + bind = kzalloc(sizeof(*bind), GFP_ATOMIC); + if (unlikely(!bind)) + return ERR_PTR(-ENOMEM); + + memcpy(&bind->remote, ss, sa_len); + + return bind; +} + +/** + * ovpn_bind_reset - assign new binding to peer + * @peer: the peer whose binding has to be replaced + * @new: the new bind to assign + */ +void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *new) +{ + lockdep_assert_held(&peer->lock); + + kfree_rcu(rcu_replace_pointer(peer->bind, new, + lockdep_is_held(&peer->lock)), rcu); +} diff --git a/drivers/net/ovpn/bind.h b/drivers/net/ovpn/bind.h new file mode 100644 index 0000000000000..4e0b8398bfd9e --- /dev/null +++ b/drivers/net/ovpn/bind.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2012-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#ifndef _NET_OVPN_OVPNBIND_H_ +#define _NET_OVPN_OVPNBIND_H_ + +#include +#include +#include +#include +#include +#include + +struct ovpn_peer; + +/** + * union ovpn_sockaddr - basic transport layer address + * @in4: IPv4 address + * @in6: IPv6 address + */ +union ovpn_sockaddr { + struct sockaddr_in in4; + struct sockaddr_in6 in6; +}; + +/** + * struct ovpn_bind - remote peer binding + * @remote: the remote peer sockaddress + * @local: local endpoint used to talk to the peer + * @local.ipv4: local IPv4 used to talk to the peer + * @local.ipv6: local IPv6 used to talk to the peer + * @rcu: used to schedule RCU cleanup job + */ +struct ovpn_bind { + union ovpn_sockaddr remote; /* remote sockaddr */ + + union { + struct in_addr ipv4; + struct in6_addr ipv6; + } local; + + struct rcu_head rcu; +}; + +/** + * ovpn_bind_skb_src_match - match packet source with binding + * @bind: the binding to match + * @skb: the packet to match + * + * Return: true if the packet source matches the remote peer sockaddr + * in the binding + */ +static inline bool ovpn_bind_skb_src_match(const struct ovpn_bind *bind, + const struct sk_buff *skb) +{ + const union ovpn_sockaddr *remote; + + if (unlikely(!bind)) + return false; + + remote = &bind->remote; + + switch (skb->protocol) { + case htons(ETH_P_IP): + if (unlikely(remote->in4.sin_family != AF_INET)) + return false; + + if (unlikely(remote->in4.sin_addr.s_addr != ip_hdr(skb)->saddr)) + return false; + + if (unlikely(remote->in4.sin_port != udp_hdr(skb)->source)) + return false; + break; + case htons(ETH_P_IPV6): + if (unlikely(remote->in6.sin6_family != AF_INET6)) + return false; + + if (unlikely(!ipv6_addr_equal(&remote->in6.sin6_addr, + &ipv6_hdr(skb)->saddr))) + return false; + + if (unlikely(remote->in6.sin6_port != udp_hdr(skb)->source)) + return false; + break; + default: + return false; + } + + return true; +} + +struct ovpn_bind *ovpn_bind_from_sockaddr(const struct sockaddr_storage *sa); +void ovpn_bind_reset(struct ovpn_peer *peer, struct ovpn_bind *bind); + +#endif /* _NET_OVPN_OVPNBIND_H_ */ diff --git a/drivers/net/ovpn/crypto.c b/drivers/net/ovpn/crypto.c new file mode 100644 index 0000000000000..f6cb14c9ec4a8 --- /dev/null +++ b/drivers/net/ovpn/crypto.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#include +#include +#include +#include + +#include "ovpnpriv.h" +#include "main.h" +#include "pktid.h" +#include "crypto_aead.h" +#include "crypto.h" + +static void ovpn_ks_destroy_rcu(struct rcu_head *head) +{ + struct ovpn_crypto_key_slot *ks; + + ks = container_of(head, struct ovpn_crypto_key_slot, rcu); + ovpn_aead_crypto_key_slot_destroy(ks); +} + +void ovpn_crypto_key_slot_release(struct kref *kref) +{ + struct ovpn_crypto_key_slot *ks; + + ks = container_of(kref, struct ovpn_crypto_key_slot, refcount); + call_rcu(&ks->rcu, ovpn_ks_destroy_rcu); +} + +/* can only be invoked when all peer references have been dropped (i.e. RCU + * release routine) + */ +void ovpn_crypto_state_release(struct ovpn_crypto_state *cs) +{ + struct ovpn_crypto_key_slot *ks; + + ks = rcu_access_pointer(cs->slots[0]); + if (ks) { + RCU_INIT_POINTER(cs->slots[0], NULL); + ovpn_crypto_key_slot_put(ks); + } + + ks = rcu_access_pointer(cs->slots[1]); + if (ks) { + RCU_INIT_POINTER(cs->slots[1], NULL); + ovpn_crypto_key_slot_put(ks); + } +} + +/* removes the key matching the specified id from the crypto context */ +void ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id) +{ + struct ovpn_crypto_key_slot *ks = NULL; + + spin_lock_bh(&cs->lock); + if (rcu_access_pointer(cs->slots[0])->key_id == key_id) { + ks = rcu_replace_pointer(cs->slots[0], NULL, + lockdep_is_held(&cs->lock)); + } else if (rcu_access_pointer(cs->slots[1])->key_id == key_id) { + ks = rcu_replace_pointer(cs->slots[1], NULL, + lockdep_is_held(&cs->lock)); + } + spin_unlock_bh(&cs->lock); + + if (ks) + ovpn_crypto_key_slot_put(ks); +} + +/* Reset the ovpn_crypto_state object in a way that is atomic + * to RCU readers. + */ +int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs, + const struct ovpn_peer_key_reset *pkr) +{ + struct ovpn_crypto_key_slot *old = NULL, *new; + u8 idx; + + if (pkr->slot != OVPN_KEY_SLOT_PRIMARY && + pkr->slot != OVPN_KEY_SLOT_SECONDARY) + return -EINVAL; + + new = ovpn_aead_crypto_key_slot_new(&pkr->key); + if (IS_ERR(new)) + return PTR_ERR(new); + + spin_lock_bh(&cs->lock); + idx = cs->primary_idx; + switch (pkr->slot) { + case OVPN_KEY_SLOT_PRIMARY: + old = rcu_replace_pointer(cs->slots[idx], new, + lockdep_is_held(&cs->lock)); + break; + case OVPN_KEY_SLOT_SECONDARY: + old = rcu_replace_pointer(cs->slots[!idx], new, + lockdep_is_held(&cs->lock)); + break; + } + spin_unlock_bh(&cs->lock); + + if (old) + ovpn_crypto_key_slot_put(old); + + return 0; +} + +void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs, + enum ovpn_key_slot slot) +{ + struct ovpn_crypto_key_slot *ks = NULL; + u8 idx; + + if (slot != OVPN_KEY_SLOT_PRIMARY && + slot != OVPN_KEY_SLOT_SECONDARY) { + pr_warn("Invalid slot to release: %u\n", slot); + return; + } + + spin_lock_bh(&cs->lock); + idx = cs->primary_idx; + switch (slot) { + case OVPN_KEY_SLOT_PRIMARY: + ks = rcu_replace_pointer(cs->slots[idx], NULL, + lockdep_is_held(&cs->lock)); + break; + case OVPN_KEY_SLOT_SECONDARY: + ks = rcu_replace_pointer(cs->slots[!idx], NULL, + lockdep_is_held(&cs->lock)); + break; + } + spin_unlock_bh(&cs->lock); + + if (!ks) { + pr_debug("Key slot already released: %u\n", slot); + return; + } + + pr_debug("deleting key slot %u, key_id=%u\n", slot, ks->key_id); + ovpn_crypto_key_slot_put(ks); +} + +/* this swap is not atomic, but there will be a very short time frame where the + * old_secondary key won't be available. This should not be a big deal as most + * likely both peers are already using the new primary at this point. + */ +void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs) +{ + const struct ovpn_crypto_key_slot *old_primary, *old_secondary; + u8 idx; + + spin_lock_bh(&cs->lock); + idx = cs->primary_idx; + old_primary = rcu_dereference_protected(cs->slots[idx], + lockdep_is_held(&cs->lock)); + old_secondary = rcu_dereference_protected(cs->slots[!idx], + lockdep_is_held(&cs->lock)); + /* perform real swap by switching the index of the primary key */ + WRITE_ONCE(cs->primary_idx, !cs->primary_idx); + + pr_debug("key swapped: (old primary) %d <-> (new primary) %d\n", + old_primary ? old_primary->key_id : -1, + old_secondary ? old_secondary->key_id : -1); + + spin_unlock_bh(&cs->lock); +} + +/** + * ovpn_crypto_config_get - populate keyconf object with non-sensible key data + * @cs: the crypto state to extract the key data from + * @slot: the specific slot to inspect + * @keyconf: the output object to populate + * + * Return: 0 on success or a negative error code otherwise + */ +int ovpn_crypto_config_get(struct ovpn_crypto_state *cs, + enum ovpn_key_slot slot, + struct ovpn_key_config *keyconf) +{ + struct ovpn_crypto_key_slot *ks; + int idx; + + switch (slot) { + case OVPN_KEY_SLOT_PRIMARY: + idx = cs->primary_idx; + break; + case OVPN_KEY_SLOT_SECONDARY: + idx = !cs->primary_idx; + break; + default: + return -EINVAL; + } + + rcu_read_lock(); + ks = rcu_dereference(cs->slots[idx]); + if (!ks) { + rcu_read_unlock(); + return -ENOENT; + } + + keyconf->cipher_alg = ovpn_aead_crypto_alg(ks); + keyconf->key_id = ks->key_id; + rcu_read_unlock(); + + return 0; +} diff --git a/drivers/net/ovpn/crypto.h b/drivers/net/ovpn/crypto.h new file mode 100644 index 0000000000000..4397253893ff2 --- /dev/null +++ b/drivers/net/ovpn/crypto.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#ifndef _NET_OVPN_OVPNCRYPTO_H_ +#define _NET_OVPN_OVPNCRYPTO_H_ + +#include "pktid.h" +#include "proto.h" + +/* info needed for both encrypt and decrypt directions */ +struct ovpn_key_direction { + const u8 *cipher_key; + size_t cipher_key_size; + const u8 *nonce_tail; /* only needed for GCM modes */ + size_t nonce_tail_size; /* only needed for GCM modes */ +}; + +/* all info for a particular symmetric key (primary or secondary) */ +struct ovpn_key_config { + enum ovpn_cipher_alg cipher_alg; + u8 key_id; + struct ovpn_key_direction encrypt; + struct ovpn_key_direction decrypt; +}; + +/* used to pass settings from netlink to the crypto engine */ +struct ovpn_peer_key_reset { + enum ovpn_key_slot slot; + struct ovpn_key_config key; +}; + +struct ovpn_crypto_key_slot { + u8 key_id; + + struct crypto_aead *encrypt; + struct crypto_aead *decrypt; + u8 nonce_tail_xmit[OVPN_NONCE_TAIL_SIZE]; + u8 nonce_tail_recv[OVPN_NONCE_TAIL_SIZE]; + + struct ovpn_pktid_recv pid_recv ____cacheline_aligned_in_smp; + struct ovpn_pktid_xmit pid_xmit ____cacheline_aligned_in_smp; + struct kref refcount; + struct rcu_head rcu; +}; + +struct ovpn_crypto_state { + struct ovpn_crypto_key_slot __rcu *slots[2]; + u8 primary_idx; + + /* protects primary and secondary slots */ + spinlock_t lock; +}; + +static inline bool ovpn_crypto_key_slot_hold(struct ovpn_crypto_key_slot *ks) +{ + return kref_get_unless_zero(&ks->refcount); +} + +static inline void ovpn_crypto_state_init(struct ovpn_crypto_state *cs) +{ + RCU_INIT_POINTER(cs->slots[0], NULL); + RCU_INIT_POINTER(cs->slots[1], NULL); + cs->primary_idx = 0; + spin_lock_init(&cs->lock); +} + +static inline struct ovpn_crypto_key_slot * +ovpn_crypto_key_id_to_slot(const struct ovpn_crypto_state *cs, u8 key_id) +{ + struct ovpn_crypto_key_slot *ks; + u8 idx; + + if (unlikely(!cs)) + return NULL; + + rcu_read_lock(); + idx = READ_ONCE(cs->primary_idx); + ks = rcu_dereference(cs->slots[idx]); + if (ks && ks->key_id == key_id) { + if (unlikely(!ovpn_crypto_key_slot_hold(ks))) + ks = NULL; + goto out; + } + + ks = rcu_dereference(cs->slots[!idx]); + if (ks && ks->key_id == key_id) { + if (unlikely(!ovpn_crypto_key_slot_hold(ks))) + ks = NULL; + goto out; + } + + /* when both key slots are occupied but no matching key ID is found, ks + * has to be reset to NULL to avoid carrying a stale pointer + */ + ks = NULL; +out: + rcu_read_unlock(); + + return ks; +} + +static inline struct ovpn_crypto_key_slot * +ovpn_crypto_key_slot_primary(const struct ovpn_crypto_state *cs) +{ + struct ovpn_crypto_key_slot *ks; + + rcu_read_lock(); + ks = rcu_dereference(cs->slots[cs->primary_idx]); + if (unlikely(ks && !ovpn_crypto_key_slot_hold(ks))) + ks = NULL; + rcu_read_unlock(); + + return ks; +} + +void ovpn_crypto_key_slot_release(struct kref *kref); + +static inline void ovpn_crypto_key_slot_put(struct ovpn_crypto_key_slot *ks) +{ + kref_put(&ks->refcount, ovpn_crypto_key_slot_release); +} + +int ovpn_crypto_state_reset(struct ovpn_crypto_state *cs, + const struct ovpn_peer_key_reset *pkr); + +void ovpn_crypto_key_slot_delete(struct ovpn_crypto_state *cs, + enum ovpn_key_slot slot); + +void ovpn_crypto_state_release(struct ovpn_crypto_state *cs); + +void ovpn_crypto_key_slots_swap(struct ovpn_crypto_state *cs); + +int ovpn_crypto_config_get(struct ovpn_crypto_state *cs, + enum ovpn_key_slot slot, + struct ovpn_key_config *keyconf); + +void ovpn_crypto_kill_key(struct ovpn_crypto_state *cs, u8 key_id); + +#endif /* _NET_OVPN_OVPNCRYPTO_H_ */ diff --git a/drivers/net/ovpn/crypto_aead.c b/drivers/net/ovpn/crypto_aead.c new file mode 100644 index 0000000000000..26efe72987f3c --- /dev/null +++ b/drivers/net/ovpn/crypto_aead.c @@ -0,0 +1,409 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#include +#include +#include +#include +#include + +#include "ovpnpriv.h" +#include "main.h" +#include "io.h" +#include "pktid.h" +#include "crypto_aead.h" +#include "crypto.h" +#include "peer.h" +#include "proto.h" +#include "skb.h" + +#define OVPN_AUTH_TAG_SIZE 16 +#define OVPN_AAD_SIZE (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE) + +#define ALG_NAME_AES "gcm(aes)" +#define ALG_NAME_CHACHAPOLY "rfc7539(chacha20,poly1305)" + +static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks) +{ + return OVPN_OPCODE_SIZE + /* OP header size */ + sizeof(u32) + /* Packet ID */ + crypto_aead_authsize(ks->encrypt); /* Auth Tag */ +} + +int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, + struct sk_buff *skb) +{ + const unsigned int tag_size = crypto_aead_authsize(ks->encrypt); + struct aead_request *req; + struct sk_buff *trailer; + struct scatterlist *sg; + int nfrags, ret; + u32 pktid, op; + u8 *iv; + + ovpn_skb_cb(skb)->peer = peer; + ovpn_skb_cb(skb)->ks = ks; + + /* Sample AEAD header format: + * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a... + * [ OP32 ] [seq # ] [ auth tag ] [ payload ... ] + * [4-byte + * IV head] + */ + + /* check that there's enough headroom in the skb for packet + * encapsulation + */ + if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM))) + return -ENOBUFS; + + /* get number of skb frags and ensure that packet data is writable */ + nfrags = skb_cow_data(skb, 0, &trailer); + if (unlikely(nfrags < 0)) + return nfrags; + + if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2))) + return -ENOSPC; + + /* sg may be required by async crypto */ + ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) * + (nfrags + 2), GFP_ATOMIC); + if (unlikely(!ovpn_skb_cb(skb)->sg)) + return -ENOMEM; + + sg = ovpn_skb_cb(skb)->sg; + + /* sg table: + * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+OVPN_NONCE_WIRE_SIZE), + * 1, 2, 3, ..., n: payload, + * n+1: auth_tag (len=tag_size) + */ + sg_init_table(sg, nfrags + 2); + + /* build scatterlist to encrypt packet payload */ + ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len); + if (unlikely(nfrags != ret)) { + ret = -EINVAL; + goto free_sg; + } + + /* append auth_tag onto scatterlist */ + __skb_push(skb, tag_size); + sg_set_buf(sg + nfrags + 1, skb->data, tag_size); + + /* obtain packet ID, which is used both as a first + * 4 bytes of nonce and last 4 bytes of associated data. + */ + ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid); + if (unlikely(ret < 0)) + goto free_sg; + + /* iv may be required by async crypto */ + ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC); + if (unlikely(!ovpn_skb_cb(skb)->iv)) { + ret = -ENOMEM; + goto free_sg; + } + + iv = ovpn_skb_cb(skb)->iv; + + /* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes + * nonce + */ + ovpn_pktid_aead_write(pktid, ks->nonce_tail_xmit, iv); + + /* make space for packet id and push it to the front */ + __skb_push(skb, OVPN_NONCE_WIRE_SIZE); + memcpy(skb->data, iv, OVPN_NONCE_WIRE_SIZE); + + /* add packet op as head of additional data */ + op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->id); + __skb_push(skb, OVPN_OPCODE_SIZE); + BUILD_BUG_ON(sizeof(op) != OVPN_OPCODE_SIZE); + *((__force __be32 *)skb->data) = htonl(op); + + /* AEAD Additional data */ + sg_set_buf(sg, skb->data, OVPN_AAD_SIZE); + + req = aead_request_alloc(ks->encrypt, GFP_ATOMIC); + if (unlikely(!req)) { + ret = -ENOMEM; + goto free_iv; + } + + ovpn_skb_cb(skb)->req = req; + + /* setup async crypto operation */ + aead_request_set_tfm(req, ks->encrypt); + aead_request_set_callback(req, 0, ovpn_encrypt_post, skb); + aead_request_set_crypt(req, sg, sg, + skb->len - ovpn_aead_encap_overhead(ks), iv); + aead_request_set_ad(req, OVPN_AAD_SIZE); + + /* encrypt it */ + return crypto_aead_encrypt(req); +free_iv: + kfree(ovpn_skb_cb(skb)->iv); + ovpn_skb_cb(skb)->iv = NULL; +free_sg: + kfree(ovpn_skb_cb(skb)->sg); + ovpn_skb_cb(skb)->sg = NULL; + return ret; +} + +int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, + struct sk_buff *skb) +{ + const unsigned int tag_size = crypto_aead_authsize(ks->decrypt); + int ret, payload_len, nfrags; + unsigned int payload_offset; + struct aead_request *req; + struct sk_buff *trailer; + struct scatterlist *sg; + u8 *iv; + + payload_offset = OVPN_AAD_SIZE + tag_size; + payload_len = skb->len - payload_offset; + + ovpn_skb_cb(skb)->payload_offset = payload_offset; + ovpn_skb_cb(skb)->peer = peer; + ovpn_skb_cb(skb)->ks = ks; + + /* sanity check on packet size, payload size must be >= 0 */ + if (unlikely(payload_len < 0)) + return -EINVAL; + + /* Prepare the skb data buffer to be accessed up until the auth tag. + * This is required because this area is directly mapped into the sg + * list. + */ + if (unlikely(!pskb_may_pull(skb, payload_offset))) + return -ENODATA; + + /* get number of skb frags and ensure that packet data is writable */ + nfrags = skb_cow_data(skb, 0, &trailer); + if (unlikely(nfrags < 0)) + return nfrags; + + if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2))) + return -ENOSPC; + + /* sg may be required by async crypto */ + ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) * + (nfrags + 2), GFP_ATOMIC); + if (unlikely(!ovpn_skb_cb(skb)->sg)) + return -ENOMEM; + + sg = ovpn_skb_cb(skb)->sg; + + /* sg table: + * 0: op, wire nonce (AD, len=OVPN_OPCODE_SIZE+OVPN_NONCE_WIRE_SIZE), + * 1, 2, 3, ..., n: payload, + * n+1: auth_tag (len=tag_size) + */ + sg_init_table(sg, nfrags + 2); + + /* packet op is head of additional data */ + sg_set_buf(sg, skb->data, OVPN_AAD_SIZE); + + /* build scatterlist to decrypt packet payload */ + ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len); + if (unlikely(nfrags != ret)) { + ret = -EINVAL; + goto free_sg; + } + + /* append auth_tag onto scatterlist */ + sg_set_buf(sg + nfrags + 1, skb->data + OVPN_AAD_SIZE, tag_size); + + /* iv may be required by async crypto */ + ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC); + if (unlikely(!ovpn_skb_cb(skb)->iv)) { + ret = -ENOMEM; + goto free_sg; + } + + iv = ovpn_skb_cb(skb)->iv; + + /* copy nonce into IV buffer */ + memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE); + memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv, + OVPN_NONCE_TAIL_SIZE); + + req = aead_request_alloc(ks->decrypt, GFP_ATOMIC); + if (unlikely(!req)) { + ret = -ENOMEM; + goto free_iv; + } + + ovpn_skb_cb(skb)->req = req; + + /* setup async crypto operation */ + aead_request_set_tfm(req, ks->decrypt); + aead_request_set_callback(req, 0, ovpn_decrypt_post, skb); + aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv); + + aead_request_set_ad(req, OVPN_AAD_SIZE); + + /* decrypt it */ + return crypto_aead_decrypt(req); +free_iv: + kfree(ovpn_skb_cb(skb)->iv); + ovpn_skb_cb(skb)->iv = NULL; +free_sg: + kfree(ovpn_skb_cb(skb)->sg); + ovpn_skb_cb(skb)->sg = NULL; + return ret; +} + +/* Initialize a struct crypto_aead object */ +static struct crypto_aead *ovpn_aead_init(const char *title, + const char *alg_name, + const unsigned char *key, + unsigned int keylen) +{ + struct crypto_aead *aead; + int ret; + + aead = crypto_alloc_aead(alg_name, 0, 0); + if (IS_ERR(aead)) { + ret = PTR_ERR(aead); + pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret); + aead = NULL; + goto error; + } + + ret = crypto_aead_setkey(aead, key, keylen); + if (ret) { + pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title, + keylen, ret); + goto error; + } + + ret = crypto_aead_setauthsize(aead, OVPN_AUTH_TAG_SIZE); + if (ret) { + pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title, + ret); + goto error; + } + + /* basic AEAD assumption */ + if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) { + pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE); + ret = -EINVAL; + goto error; + } + + pr_debug("********* Cipher %s (%s)\n", alg_name, title); + pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead)); + pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead)); + pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead)); + pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead)); + pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead)); + + return aead; + +error: + crypto_free_aead(aead); + return ERR_PTR(ret); +} + +void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks) +{ + if (!ks) + return; + + crypto_free_aead(ks->encrypt); + crypto_free_aead(ks->decrypt); + kfree(ks); +} + +struct ovpn_crypto_key_slot * +ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc) +{ + struct ovpn_crypto_key_slot *ks = NULL; + const char *alg_name; + int ret; + + /* validate crypto alg */ + switch (kc->cipher_alg) { + case OVPN_CIPHER_ALG_AES_GCM: + alg_name = ALG_NAME_AES; + break; + case OVPN_CIPHER_ALG_CHACHA20_POLY1305: + alg_name = ALG_NAME_CHACHAPOLY; + break; + default: + return ERR_PTR(-EOPNOTSUPP); + } + + if (kc->encrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE || + kc->decrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE) + return ERR_PTR(-EINVAL); + + /* build the key slot */ + ks = kmalloc(sizeof(*ks), GFP_KERNEL); + if (!ks) + return ERR_PTR(-ENOMEM); + + ks->encrypt = NULL; + ks->decrypt = NULL; + kref_init(&ks->refcount); + ks->key_id = kc->key_id; + + ks->encrypt = ovpn_aead_init("encrypt", alg_name, + kc->encrypt.cipher_key, + kc->encrypt.cipher_key_size); + if (IS_ERR(ks->encrypt)) { + ret = PTR_ERR(ks->encrypt); + ks->encrypt = NULL; + goto destroy_ks; + } + + ks->decrypt = ovpn_aead_init("decrypt", alg_name, + kc->decrypt.cipher_key, + kc->decrypt.cipher_key_size); + if (IS_ERR(ks->decrypt)) { + ret = PTR_ERR(ks->decrypt); + ks->decrypt = NULL; + goto destroy_ks; + } + + memcpy(ks->nonce_tail_xmit, kc->encrypt.nonce_tail, + OVPN_NONCE_TAIL_SIZE); + memcpy(ks->nonce_tail_recv, kc->decrypt.nonce_tail, + OVPN_NONCE_TAIL_SIZE); + + /* init packet ID generation/validation */ + ovpn_pktid_xmit_init(&ks->pid_xmit); + ovpn_pktid_recv_init(&ks->pid_recv); + + return ks; + +destroy_ks: + ovpn_aead_crypto_key_slot_destroy(ks); + return ERR_PTR(ret); +} + +enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks) +{ + const char *alg_name; + + if (!ks->encrypt) + return OVPN_CIPHER_ALG_NONE; + + alg_name = crypto_tfm_alg_name(crypto_aead_tfm(ks->encrypt)); + + if (!strcmp(alg_name, ALG_NAME_AES)) + return OVPN_CIPHER_ALG_AES_GCM; + else if (!strcmp(alg_name, ALG_NAME_CHACHAPOLY)) + return OVPN_CIPHER_ALG_CHACHA20_POLY1305; + else + return OVPN_CIPHER_ALG_NONE; +} diff --git a/drivers/net/ovpn/crypto_aead.h b/drivers/net/ovpn/crypto_aead.h new file mode 100644 index 0000000000000..65a2ff3078986 --- /dev/null +++ b/drivers/net/ovpn/crypto_aead.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#ifndef _NET_OVPN_OVPNAEAD_H_ +#define _NET_OVPN_OVPNAEAD_H_ + +#include "crypto.h" + +#include +#include + +int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, + struct sk_buff *skb); +int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks, + struct sk_buff *skb); + +struct ovpn_crypto_key_slot * +ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc); +void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks); + +enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks); + +#endif /* _NET_OVPN_OVPNAEAD_H_ */ diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c new file mode 100644 index 0000000000000..c11ed75c62a25 --- /dev/null +++ b/drivers/net/ovpn/io.c @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2019-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#include +#include +#include +#include +#include +#include + +#include "ovpnpriv.h" +#include "peer.h" +#include "io.h" +#include "bind.h" +#include "crypto.h" +#include "crypto_aead.h" +#include "netlink.h" +#include "proto.h" +#include "tcp.h" +#include "udp.h" +#include "skb.h" +#include "socket.h" + +const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE] = { + 0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb, + 0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48 +}; + +/** + * ovpn_is_keepalive - check if skb contains a keepalive message + * @skb: packet to check + * + * Assumes that the first byte of skb->data is defined. + * + * Return: true if skb contains a keepalive or false otherwise + */ +static bool ovpn_is_keepalive(struct sk_buff *skb) +{ + if (*skb->data != ovpn_keepalive_message[0]) + return false; + + if (skb->len != OVPN_KEEPALIVE_SIZE) + return false; + + if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE)) + return false; + + return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE); +} + +/* Called after decrypt to write the IP packet to the device. + * This method is expected to manage/free the skb. + */ +static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb) +{ + unsigned int pkt_len; + int ret; + + /* we can't guarantee the packet wasn't corrupted before entering the + * VPN, therefore we give other layers a chance to check that + */ + skb->ip_summed = CHECKSUM_NONE; + + /* skb hash for transport packet no longer valid after decapsulation */ + skb_clear_hash(skb); + + /* post-decrypt scrub -- prepare to inject encapsulated packet onto the + * interface, based on __skb_tunnel_rx() in dst.h + */ + skb->dev = peer->ovpn->dev; + skb_set_queue_mapping(skb, 0); + skb_scrub_packet(skb, true); + + /* network header reset in ovpn_decrypt_post() */ + skb_reset_transport_header(skb); + skb_reset_inner_headers(skb); + + /* cause packet to be "received" by the interface */ + pkt_len = skb->len; + ret = gro_cells_receive(&peer->ovpn->gro_cells, skb); + if (likely(ret == NET_RX_SUCCESS)) { + /* update RX stats with the size of decrypted packet */ + ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len); + dev_sw_netstats_rx_add(peer->ovpn->dev, pkt_len); + } +} + +void ovpn_decrypt_post(void *data, int ret) +{ + struct ovpn_crypto_key_slot *ks; + unsigned int payload_offset = 0; + struct sk_buff *skb = data; + struct ovpn_socket *sock; + struct ovpn_peer *peer; + __be16 proto; + __be32 *pid; + + /* crypto is happening asynchronously. this function will be called + * again later by the crypto callback with a proper return code + */ + if (unlikely(ret == -EINPROGRESS)) + return; + + payload_offset = ovpn_skb_cb(skb)->payload_offset; + ks = ovpn_skb_cb(skb)->ks; + peer = ovpn_skb_cb(skb)->peer; + + /* crypto is done, cleanup skb CB and its members */ + + if (likely(ovpn_skb_cb(skb)->iv)) + kfree(ovpn_skb_cb(skb)->iv); + + if (likely(ovpn_skb_cb(skb)->sg)) + kfree(ovpn_skb_cb(skb)->sg); + + if (likely(ovpn_skb_cb(skb)->req)) + aead_request_free(ovpn_skb_cb(skb)->req); + + if (unlikely(ret < 0)) + goto drop; + + /* PID sits after the op */ + pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE); + ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0); + if (unlikely(ret < 0)) { + net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n", + netdev_name(peer->ovpn->dev), peer->id, + ret); + goto drop; + } + + /* keep track of last received authenticated packet for keepalive */ + WRITE_ONCE(peer->last_recv, ktime_get_real_seconds()); + + rcu_read_lock(); + sock = rcu_dereference(peer->sock); + if (sock && sock->sock->sk->sk_protocol == IPPROTO_UDP) + /* check if this peer changed local or remote endpoint */ + ovpn_peer_endpoints_update(peer, skb); + rcu_read_unlock(); + + /* point to encapsulated IP packet */ + __skb_pull(skb, payload_offset); + + /* check if this is a valid datapacket that has to be delivered to the + * ovpn interface + */ + skb_reset_network_header(skb); + proto = ovpn_ip_check_protocol(skb); + if (unlikely(!proto)) { + /* check if null packet */ + if (unlikely(!pskb_may_pull(skb, 1))) { + net_info_ratelimited("%s: NULL packet received from peer %u\n", + netdev_name(peer->ovpn->dev), + peer->id); + goto drop; + } + + if (ovpn_is_keepalive(skb)) { + net_dbg_ratelimited("%s: ping received from peer %u\n", + netdev_name(peer->ovpn->dev), + peer->id); + goto drop_nocount; + } + + net_info_ratelimited("%s: unsupported protocol received from peer %u\n", + netdev_name(peer->ovpn->dev), peer->id); + goto drop; + } + skb->protocol = proto; + + /* perform Reverse Path Filtering (RPF) */ + if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) { + if (skb->protocol == htons(ETH_P_IPV6)) + net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n", + netdev_name(peer->ovpn->dev), + peer->id, &ipv6_hdr(skb)->saddr); + else + net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n", + netdev_name(peer->ovpn->dev), + peer->id, &ip_hdr(skb)->saddr); + goto drop; + } + + ovpn_netdev_write(peer, skb); + /* skb is passed to upper layer - don't free it */ + skb = NULL; +drop: + if (unlikely(skb)) + dev_core_stats_rx_dropped_inc(peer->ovpn->dev); +drop_nocount: + if (likely(peer)) + ovpn_peer_put(peer); + if (likely(ks)) + ovpn_crypto_key_slot_put(ks); + kfree_skb(skb); +} + +/* RX path entry point: decrypt packet and forward it to the device */ +void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb) +{ + struct ovpn_crypto_key_slot *ks; + u8 key_id; + + ovpn_peer_stats_increment_rx(&peer->link_stats, skb->len); + + /* get the key slot matching the key ID in the received packet */ + key_id = ovpn_key_id_from_skb(skb); + ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id); + if (unlikely(!ks)) { + net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n", + netdev_name(peer->ovpn->dev), peer->id, + key_id); + dev_core_stats_rx_dropped_inc(peer->ovpn->dev); + kfree_skb(skb); + ovpn_peer_put(peer); + return; + } + + memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb)); + ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb)); +} + +void ovpn_encrypt_post(void *data, int ret) +{ + struct ovpn_crypto_key_slot *ks; + struct sk_buff *skb = data; + struct ovpn_socket *sock; + struct ovpn_peer *peer; + unsigned int orig_len; + + /* encryption is happening asynchronously. This function will be + * called later by the crypto callback with a proper return value + */ + if (unlikely(ret == -EINPROGRESS)) + return; + + ks = ovpn_skb_cb(skb)->ks; + peer = ovpn_skb_cb(skb)->peer; + + /* crypto is done, cleanup skb CB and its members */ + + if (likely(ovpn_skb_cb(skb)->iv)) + kfree(ovpn_skb_cb(skb)->iv); + + if (likely(ovpn_skb_cb(skb)->sg)) + kfree(ovpn_skb_cb(skb)->sg); + + if (likely(ovpn_skb_cb(skb)->req)) + aead_request_free(ovpn_skb_cb(skb)->req); + + if (unlikely(ret == -ERANGE)) { + /* we ran out of IVs and we must kill the key as it can't be + * use anymore + */ + netdev_warn(peer->ovpn->dev, + "killing key %u for peer %u\n", ks->key_id, + peer->id); + ovpn_crypto_kill_key(&peer->crypto, ks->key_id); + /* let userspace know so that a new key must be negotiated */ + ovpn_nl_key_swap_notify(peer, ks->key_id); + goto err; + } + + if (unlikely(ret < 0)) + goto err; + + skb_mark_not_on_list(skb); + orig_len = skb->len; + + rcu_read_lock(); + sock = rcu_dereference(peer->sock); + if (unlikely(!sock)) + goto err_unlock; + + switch (sock->sock->sk->sk_protocol) { + case IPPROTO_UDP: + ovpn_udp_send_skb(peer, sock->sock, skb); + break; + case IPPROTO_TCP: + ovpn_tcp_send_skb(peer, sock->sock, skb); + break; + default: + /* no transport configured yet */ + goto err_unlock; + } + + ovpn_peer_stats_increment_tx(&peer->link_stats, orig_len); + /* keep track of last sent packet for keepalive */ + WRITE_ONCE(peer->last_sent, ktime_get_real_seconds()); + /* skb passed down the stack - don't free it */ + skb = NULL; +err_unlock: + rcu_read_unlock(); +err: + if (unlikely(skb)) + dev_core_stats_tx_dropped_inc(peer->ovpn->dev); + if (likely(peer)) + ovpn_peer_put(peer); + if (likely(ks)) + ovpn_crypto_key_slot_put(ks); + kfree_skb(skb); +} + +static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb) +{ + struct ovpn_crypto_key_slot *ks; + + /* get primary key to be used for encrypting data */ + ks = ovpn_crypto_key_slot_primary(&peer->crypto); + if (unlikely(!ks)) + return false; + + /* take a reference to the peer because the crypto code may run async. + * ovpn_encrypt_post() will release it upon completion + */ + if (unlikely(!ovpn_peer_hold(peer))) { + DEBUG_NET_WARN_ON_ONCE(1); + ovpn_crypto_key_slot_put(ks); + return false; + } + + memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb)); + ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb)); + return true; +} + +/* send skb to connected peer, if any */ +static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb, + struct ovpn_peer *peer) +{ + struct sk_buff *curr, *next; + + /* this might be a GSO-segmented skb list: process each skb + * independently + */ + skb_list_walk_safe(skb, curr, next) { + if (unlikely(!ovpn_encrypt_one(peer, curr))) { + dev_core_stats_tx_dropped_inc(ovpn->dev); + kfree_skb(curr); + } + } + + ovpn_peer_put(peer); +} + +/* Send user data to the network + */ +netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ovpn_priv *ovpn = netdev_priv(dev); + struct sk_buff *segments, *curr, *next; + struct sk_buff_head skb_list; + struct ovpn_peer *peer; + __be16 proto; + int ret; + + /* reset netfilter state */ + nf_reset_ct(skb); + + /* verify IP header size in network packet */ + proto = ovpn_ip_check_protocol(skb); + if (unlikely(!proto || skb->protocol != proto)) + goto drop; + + if (skb_is_gso(skb)) { + segments = skb_gso_segment(skb, 0); + if (IS_ERR(segments)) { + ret = PTR_ERR(segments); + net_err_ratelimited("%s: cannot segment payload packet: %d\n", + netdev_name(dev), ret); + goto drop; + } + + consume_skb(skb); + skb = segments; + } + + /* from this moment on, "skb" might be a list */ + + __skb_queue_head_init(&skb_list); + skb_list_walk_safe(skb, curr, next) { + skb_mark_not_on_list(curr); + + curr = skb_share_check(curr, GFP_ATOMIC); + if (unlikely(!curr)) { + net_err_ratelimited("%s: skb_share_check failed for payload packet\n", + netdev_name(dev)); + dev_core_stats_tx_dropped_inc(ovpn->dev); + continue; + } + + __skb_queue_tail(&skb_list, curr); + } + skb_list.prev->next = NULL; + + /* retrieve peer serving the destination IP of this packet */ + peer = ovpn_peer_get_by_dst(ovpn, skb); + if (unlikely(!peer)) { + net_dbg_ratelimited("%s: no peer to send data to\n", + netdev_name(ovpn->dev)); + goto drop; + } + + ovpn_peer_stats_increment_tx(&peer->vpn_stats, skb->len); + ovpn_send(ovpn, skb_list.next, peer); + + return NETDEV_TX_OK; + +drop: + dev_core_stats_tx_dropped_inc(ovpn->dev); + skb_tx_error(skb); + kfree_skb_list(skb); + return NET_XMIT_DROP; +} + +/** + * ovpn_xmit_special - encrypt and transmit an out-of-band message to peer + * @peer: peer to send the message to + * @data: message content + * @len: message length + * + * Assumes that caller holds a reference to peer, which will be + * passed to ovpn_send() + */ +void ovpn_xmit_special(struct ovpn_peer *peer, const void *data, + const unsigned int len) +{ + struct ovpn_priv *ovpn; + struct sk_buff *skb; + + ovpn = peer->ovpn; + if (unlikely(!ovpn)) { + ovpn_peer_put(peer); + return; + } + + skb = alloc_skb(256 + len, GFP_ATOMIC); + if (unlikely(!skb)) { + ovpn_peer_put(peer); + return; + } + + skb_reserve(skb, 128); + skb->priority = TC_PRIO_BESTEFFORT; + __skb_put_data(skb, data, len); + + ovpn_send(ovpn, skb, peer); +} diff --git a/drivers/net/ovpn/io.h b/drivers/net/ovpn/io.h new file mode 100644 index 0000000000000..db9e10f9077c4 --- /dev/null +++ b/drivers/net/ovpn/io.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2019-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#ifndef _NET_OVPN_OVPN_H_ +#define _NET_OVPN_OVPN_H_ + +/* DATA_V2 header size with AEAD encryption */ +#define OVPN_HEAD_ROOM (OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE + \ + 16 /* AEAD TAG length */ + \ + max(sizeof(struct udphdr), sizeof(struct tcphdr)) +\ + max(sizeof(struct ipv6hdr), sizeof(struct iphdr))) + +/* max padding required by encryption */ +#define OVPN_MAX_PADDING 16 + +#define OVPN_KEEPALIVE_SIZE 16 +extern const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE]; + +netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev); + +void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb); +void ovpn_xmit_special(struct ovpn_peer *peer, const void *data, + const unsigned int len); + +void ovpn_encrypt_post(void *data, int ret); +void ovpn_decrypt_post(void *data, int ret); + +#endif /* _NET_OVPN_OVPN_H_ */ diff --git a/drivers/net/ovpn/main.c b/drivers/net/ovpn/main.c new file mode 100644 index 0000000000000..46abbbed384a4 --- /dev/null +++ b/drivers/net/ovpn/main.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + * James Yonan + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ovpnpriv.h" +#include "main.h" +#include "netlink.h" +#include "io.h" +#include "peer.h" +#include "proto.h" +#include "tcp.h" +#include "udp.h" + +static void ovpn_priv_free(struct net_device *net) +{ + struct ovpn_priv *ovpn = netdev_priv(net); + + kfree(ovpn->peers); +} + +static int ovpn_mp_alloc(struct ovpn_priv *ovpn) +{ + struct in_device *dev_v4; + int i; + + if (ovpn->mode != OVPN_MODE_MP) + return 0; + + dev_v4 = __in_dev_get_rtnl(ovpn->dev); + if (dev_v4) { + /* disable redirects as Linux gets confused by ovpn + * handling same-LAN routing. + * This happens because a multipeer interface is used as + * relay point between hosts in the same subnet, while + * in a classic LAN this would not be needed because the + * two hosts would be able to talk directly. + */ + IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); + IPV4_DEVCONF_ALL(dev_net(ovpn->dev), SEND_REDIRECTS) = false; + } + + /* the peer container is fairly large, therefore we allocate it only in + * MP mode + */ + ovpn->peers = kzalloc(sizeof(*ovpn->peers), GFP_KERNEL); + if (!ovpn->peers) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(ovpn->peers->by_id); i++) { + INIT_HLIST_HEAD(&ovpn->peers->by_id[i]); + INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr4[i], i); + INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_vpn_addr6[i], i); + INIT_HLIST_NULLS_HEAD(&ovpn->peers->by_transp_addr[i], i); + } + + return 0; +} + +static int ovpn_net_init(struct net_device *dev) +{ + struct ovpn_priv *ovpn = netdev_priv(dev); + int err = gro_cells_init(&ovpn->gro_cells, dev); + + if (err < 0) + return err; + + err = ovpn_mp_alloc(ovpn); + if (err < 0) { + gro_cells_destroy(&ovpn->gro_cells); + return err; + } + + return 0; +} + +static void ovpn_net_uninit(struct net_device *dev) +{ + struct ovpn_priv *ovpn = netdev_priv(dev); + + gro_cells_destroy(&ovpn->gro_cells); +} + +static int ovpn_net_open(struct net_device *dev) +{ + struct ovpn_priv *ovpn = netdev_priv(dev); + + /* carrier for P2P interfaces is switched on and off when + * the peer is added or deleted. + * + * in case of P2MP interfaces we just keep the carrier always on + */ + if (ovpn->mode == OVPN_MODE_MP) + netif_carrier_on(dev); + return 0; +} + +static const struct net_device_ops ovpn_netdev_ops = { + .ndo_init = ovpn_net_init, + .ndo_uninit = ovpn_net_uninit, + .ndo_open = ovpn_net_open, + .ndo_start_xmit = ovpn_net_xmit, +}; + +static const struct device_type ovpn_type = { + .name = OVPN_FAMILY_NAME, +}; + +static const struct nla_policy ovpn_policy[IFLA_OVPN_MAX + 1] = { + [IFLA_OVPN_MODE] = NLA_POLICY_RANGE(NLA_U8, OVPN_MODE_P2P, + OVPN_MODE_MP), +}; + +/** + * ovpn_dev_is_valid - check if the netdevice is of type 'ovpn' + * @dev: the interface to check + * + * Return: whether the netdevice is of type 'ovpn' + */ +bool ovpn_dev_is_valid(const struct net_device *dev) +{ + return dev->netdev_ops == &ovpn_netdev_ops; +} + +static void ovpn_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, "ovpn", sizeof(info->driver)); + strscpy(info->bus_info, "ovpn", sizeof(info->bus_info)); +} + +static const struct ethtool_ops ovpn_ethtool_ops = { + .get_drvinfo = ovpn_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static void ovpn_setup(struct net_device *dev) +{ + netdev_features_t feat = NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA; + + dev->needs_free_netdev = true; + + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; + + dev->ethtool_ops = &ovpn_ethtool_ops; + dev->netdev_ops = &ovpn_netdev_ops; + + dev->priv_destructor = ovpn_priv_free; + + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->mtu = ETH_DATA_LEN - OVPN_HEAD_ROOM; + dev->min_mtu = IPV4_MIN_MTU; + dev->max_mtu = IP_MAX_MTU - OVPN_HEAD_ROOM; + + dev->type = ARPHRD_NONE; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->priv_flags |= IFF_NO_QUEUE; + + dev->lltx = true; + dev->features |= feat; + dev->hw_features |= feat; + dev->hw_enc_features |= feat; + + dev->needed_headroom = ALIGN(OVPN_HEAD_ROOM, 4); + dev->needed_tailroom = OVPN_MAX_PADDING; + + SET_NETDEV_DEVTYPE(dev, &ovpn_type); +} + +static int ovpn_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, + struct netlink_ext_ack *extack) +{ + struct ovpn_priv *ovpn = netdev_priv(dev); + struct nlattr **data = params->data; + enum ovpn_mode mode = OVPN_MODE_P2P; + + if (data && data[IFLA_OVPN_MODE]) { + mode = nla_get_u8(data[IFLA_OVPN_MODE]); + netdev_dbg(dev, "setting device mode: %u\n", mode); + } + + ovpn->dev = dev; + ovpn->mode = mode; + spin_lock_init(&ovpn->lock); + INIT_DELAYED_WORK(&ovpn->keepalive_work, ovpn_peer_keepalive_work); + + /* turn carrier explicitly off after registration, this way state is + * clearly defined + */ + netif_carrier_off(dev); + + return register_netdevice(dev); +} + +static int ovpn_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct ovpn_priv *ovpn = netdev_priv(dev); + + if (nla_put_u8(skb, IFLA_OVPN_MODE, ovpn->mode)) + return -EMSGSIZE; + + return 0; +} + +static struct rtnl_link_ops ovpn_link_ops = { + .kind = "ovpn", + .netns_refund = false, + .priv_size = sizeof(struct ovpn_priv), + .setup = ovpn_setup, + .policy = ovpn_policy, + .maxtype = IFLA_OVPN_MAX, + .newlink = ovpn_newlink, + .dellink = unregister_netdevice_queue, + .fill_info = ovpn_fill_info, +}; + +static int ovpn_netdev_notifier_call(struct notifier_block *nb, + unsigned long state, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ovpn_priv *ovpn; + + if (!ovpn_dev_is_valid(dev)) + return NOTIFY_DONE; + + ovpn = netdev_priv(dev); + + switch (state) { + case NETDEV_REGISTER: + ovpn->registered = true; + break; + case NETDEV_UNREGISTER: + /* twiddle thumbs on netns device moves */ + if (dev->reg_state != NETREG_UNREGISTERING) + break; + + /* can be delivered multiple times, so check registered flag, + * then destroy the interface + */ + if (!ovpn->registered) + return NOTIFY_DONE; + + netif_carrier_off(dev); + ovpn->registered = false; + + cancel_delayed_work_sync(&ovpn->keepalive_work); + ovpn_peers_free(ovpn, NULL, OVPN_DEL_PEER_REASON_TEARDOWN); + break; + case NETDEV_POST_INIT: + case NETDEV_GOING_DOWN: + case NETDEV_DOWN: + case NETDEV_UP: + case NETDEV_PRE_UP: + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static struct notifier_block ovpn_netdev_notifier = { + .notifier_call = ovpn_netdev_notifier_call, +}; + +static int __init ovpn_init(void) +{ + int err = register_netdevice_notifier(&ovpn_netdev_notifier); + + if (err) { + pr_err("ovpn: can't register netdevice notifier: %d\n", err); + return err; + } + + err = rtnl_link_register(&ovpn_link_ops); + if (err) { + pr_err("ovpn: can't register rtnl link ops: %d\n", err); + goto unreg_netdev; + } + + err = ovpn_nl_register(); + if (err) { + pr_err("ovpn: can't register netlink family: %d\n", err); + goto unreg_rtnl; + } + + ovpn_tcp_init(); + + return 0; + +unreg_rtnl: + rtnl_link_unregister(&ovpn_link_ops); +unreg_netdev: + unregister_netdevice_notifier(&ovpn_netdev_notifier); + return err; +} + +static __exit void ovpn_cleanup(void) +{ + ovpn_nl_unregister(); + rtnl_link_unregister(&ovpn_link_ops); + unregister_netdevice_notifier(&ovpn_netdev_notifier); + + rcu_barrier(); +} + +module_init(ovpn_init); +module_exit(ovpn_cleanup); + +MODULE_DESCRIPTION("OpenVPN data channel offload (ovpn)"); +MODULE_AUTHOR("(C) 2020-2025 OpenVPN, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ovpn/main.h b/drivers/net/ovpn/main.h new file mode 100644 index 0000000000000..017cd01007659 --- /dev/null +++ b/drivers/net/ovpn/main.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + */ + +#ifndef _NET_OVPN_MAIN_H_ +#define _NET_OVPN_MAIN_H_ + +bool ovpn_dev_is_valid(const struct net_device *dev); + +#endif /* _NET_OVPN_MAIN_H_ */ diff --git a/drivers/net/ovpn/netlink-gen.c b/drivers/net/ovpn/netlink-gen.c new file mode 100644 index 0000000000000..58e1a4342378e --- /dev/null +++ b/drivers/net/ovpn/netlink-gen.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/ovpn.yaml */ +/* YNL-GEN kernel source */ + +#include +#include + +#include "netlink-gen.h" + +#include + +/* Integer value ranges */ +static const struct netlink_range_validation ovpn_a_peer_id_range = { + .max = 16777215ULL, +}; + +static const struct netlink_range_validation ovpn_a_keyconf_peer_id_range = { + .max = 16777215ULL, +}; + +/* Common nested types */ +const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1] = { + [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range), + [OVPN_A_KEYCONF_SLOT] = NLA_POLICY_MAX(NLA_U32, 1), + [OVPN_A_KEYCONF_KEY_ID] = NLA_POLICY_MAX(NLA_U32, 7), + [OVPN_A_KEYCONF_CIPHER_ALG] = NLA_POLICY_MAX(NLA_U32, 2), + [OVPN_A_KEYCONF_ENCRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy), + [OVPN_A_KEYCONF_DECRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy), +}; + +const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1] = { + [OVPN_A_KEYDIR_CIPHER_KEY] = NLA_POLICY_MAX_LEN(256), + [OVPN_A_KEYDIR_NONCE_TAIL] = NLA_POLICY_EXACT_LEN(OVPN_NONCE_TAIL_SIZE), +}; + +const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1] = { + [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range), + [OVPN_A_PEER_REMOTE_IPV4] = { .type = NLA_BE32, }, + [OVPN_A_PEER_REMOTE_IPV6] = NLA_POLICY_EXACT_LEN(16), + [OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID] = { .type = NLA_U32, }, + [OVPN_A_PEER_REMOTE_PORT] = NLA_POLICY_MIN(NLA_BE16, 1), + [OVPN_A_PEER_SOCKET] = { .type = NLA_U32, }, + [OVPN_A_PEER_SOCKET_NETNSID] = { .type = NLA_S32, }, + [OVPN_A_PEER_VPN_IPV4] = { .type = NLA_BE32, }, + [OVPN_A_PEER_VPN_IPV6] = NLA_POLICY_EXACT_LEN(16), + [OVPN_A_PEER_LOCAL_IPV4] = { .type = NLA_BE32, }, + [OVPN_A_PEER_LOCAL_IPV6] = NLA_POLICY_EXACT_LEN(16), + [OVPN_A_PEER_LOCAL_PORT] = NLA_POLICY_MIN(NLA_BE16, 1), + [OVPN_A_PEER_KEEPALIVE_INTERVAL] = { .type = NLA_U32, }, + [OVPN_A_PEER_KEEPALIVE_TIMEOUT] = { .type = NLA_U32, }, + [OVPN_A_PEER_DEL_REASON] = NLA_POLICY_MAX(NLA_U32, 4), + [OVPN_A_PEER_VPN_RX_BYTES] = { .type = NLA_UINT, }, + [OVPN_A_PEER_VPN_TX_BYTES] = { .type = NLA_UINT, }, + [OVPN_A_PEER_VPN_RX_PACKETS] = { .type = NLA_UINT, }, + [OVPN_A_PEER_VPN_TX_PACKETS] = { .type = NLA_UINT, }, + [OVPN_A_PEER_LINK_RX_BYTES] = { .type = NLA_UINT, }, + [OVPN_A_PEER_LINK_TX_BYTES] = { .type = NLA_UINT, }, + [OVPN_A_PEER_LINK_RX_PACKETS] = { .type = NLA_UINT, }, + [OVPN_A_PEER_LINK_TX_PACKETS] = { .type = NLA_UINT, }, +}; + +/* OVPN_CMD_PEER_NEW - do */ +static const struct nla_policy ovpn_peer_new_nl_policy[OVPN_A_PEER + 1] = { + [OVPN_A_IFINDEX] = { .type = NLA_U32, }, + [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy), +}; + +/* OVPN_CMD_PEER_SET - do */ +static const struct nla_policy ovpn_peer_set_nl_policy[OVPN_A_PEER + 1] = { + [OVPN_A_IFINDEX] = { .type = NLA_U32, }, + [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy), +}; + +/* OVPN_CMD_PEER_GET - do */ +static const struct nla_policy ovpn_peer_get_do_nl_policy[OVPN_A_PEER + 1] = { + [OVPN_A_IFINDEX] = { .type = NLA_U32, }, + [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy), +}; + +/* OVPN_CMD_PEER_GET - dump */ +static const struct nla_policy ovpn_peer_get_dump_nl_policy[OVPN_A_IFINDEX + 1] = { + [OVPN_A_IFINDEX] = { .type = NLA_U32, }, +}; + +/* OVPN_CMD_PEER_DEL - do */ +static const struct nla_policy ovpn_peer_del_nl_policy[OVPN_A_PEER + 1] = { + [OVPN_A_IFINDEX] = { .type = NLA_U32, }, + [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy), +}; + +/* OVPN_CMD_KEY_NEW - do */ +static const struct nla_policy ovpn_key_new_nl_policy[OVPN_A_KEYCONF + 1] = { + [OVPN_A_IFINDEX] = { .type = NLA_U32, }, + [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy), +}; + +/* OVPN_CMD_KEY_GET - do */ +static const struct nla_policy ovpn_key_get_nl_policy[OVPN_A_KEYCONF + 1] = { + [OVPN_A_IFINDEX] = { .type = NLA_U32, }, + [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy), +}; + +/* OVPN_CMD_KEY_SWAP - do */ +static const struct nla_policy ovpn_key_swap_nl_policy[OVPN_A_KEYCONF + 1] = { + [OVPN_A_IFINDEX] = { .type = NLA_U32, }, + [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy), +}; + +/* OVPN_CMD_KEY_DEL - do */ +static const struct nla_policy ovpn_key_del_nl_policy[OVPN_A_KEYCONF + 1] = { + [OVPN_A_IFINDEX] = { .type = NLA_U32, }, + [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy), +}; + +/* Ops table for ovpn */ +static const struct genl_split_ops ovpn_nl_ops[] = { + { + .cmd = OVPN_CMD_PEER_NEW, + .pre_doit = ovpn_nl_pre_doit, + .doit = ovpn_nl_peer_new_doit, + .post_doit = ovpn_nl_post_doit, + .policy = ovpn_peer_new_nl_policy, + .maxattr = OVPN_A_PEER, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = OVPN_CMD_PEER_SET, + .pre_doit = ovpn_nl_pre_doit, + .doit = ovpn_nl_peer_set_doit, + .post_doit = ovpn_nl_post_doit, + .policy = ovpn_peer_set_nl_policy, + .maxattr = OVPN_A_PEER, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = OVPN_CMD_PEER_GET, + .pre_doit = ovpn_nl_pre_doit, + .doit = ovpn_nl_peer_get_doit, + .post_doit = ovpn_nl_post_doit, + .policy = ovpn_peer_get_do_nl_policy, + .maxattr = OVPN_A_PEER, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = OVPN_CMD_PEER_GET, + .dumpit = ovpn_nl_peer_get_dumpit, + .policy = ovpn_peer_get_dump_nl_policy, + .maxattr = OVPN_A_IFINDEX, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DUMP, + }, + { + .cmd = OVPN_CMD_PEER_DEL, + .pre_doit = ovpn_nl_pre_doit, + .doit = ovpn_nl_peer_del_doit, + .post_doit = ovpn_nl_post_doit, + .policy = ovpn_peer_del_nl_policy, + .maxattr = OVPN_A_PEER, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = OVPN_CMD_KEY_NEW, + .pre_doit = ovpn_nl_pre_doit, + .doit = ovpn_nl_key_new_doit, + .post_doit = ovpn_nl_post_doit, + .policy = ovpn_key_new_nl_policy, + .maxattr = OVPN_A_KEYCONF, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = OVPN_CMD_KEY_GET, + .pre_doit = ovpn_nl_pre_doit, + .doit = ovpn_nl_key_get_doit, + .post_doit = ovpn_nl_post_doit, + .policy = ovpn_key_get_nl_policy, + .maxattr = OVPN_A_KEYCONF, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = OVPN_CMD_KEY_SWAP, + .pre_doit = ovpn_nl_pre_doit, + .doit = ovpn_nl_key_swap_doit, + .post_doit = ovpn_nl_post_doit, + .policy = ovpn_key_swap_nl_policy, + .maxattr = OVPN_A_KEYCONF, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, + { + .cmd = OVPN_CMD_KEY_DEL, + .pre_doit = ovpn_nl_pre_doit, + .doit = ovpn_nl_key_del_doit, + .post_doit = ovpn_nl_post_doit, + .policy = ovpn_key_del_nl_policy, + .maxattr = OVPN_A_KEYCONF, + .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, + }, +}; + +static const struct genl_multicast_group ovpn_nl_mcgrps[] = { + [OVPN_NLGRP_PEERS] = { "peers", }, +}; + +struct genl_family ovpn_nl_family __ro_after_init = { + .name = OVPN_FAMILY_NAME, + .version = OVPN_FAMILY_VERSION, + .netnsok = true, + .parallel_ops = true, + .module = THIS_MODULE, + .split_ops = ovpn_nl_ops, + .n_split_ops = ARRAY_SIZE(ovpn_nl_ops), + .mcgrps = ovpn_nl_mcgrps, + .n_mcgrps = ARRAY_SIZE(ovpn_nl_mcgrps), +}; diff --git a/drivers/net/ovpn/netlink-gen.h b/drivers/net/ovpn/netlink-gen.h new file mode 100644 index 0000000000000..66a4e4a0a055b --- /dev/null +++ b/drivers/net/ovpn/netlink-gen.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/ovpn.yaml */ +/* YNL-GEN kernel header */ + +#ifndef _LINUX_OVPN_GEN_H +#define _LINUX_OVPN_GEN_H + +#include +#include + +#include + +/* Common nested types */ +extern const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1]; +extern const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1]; +extern const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1]; + +int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, + struct genl_info *info); +void +ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, + struct genl_info *info); + +int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info); +int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info); +int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info); +int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb); +int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info); +int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info); +int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info); +int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info); +int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info); + +enum { + OVPN_NLGRP_PEERS, +}; + +extern struct genl_family ovpn_nl_family; + +#endif /* _LINUX_OVPN_GEN_H */ diff --git a/drivers/net/ovpn/netlink.c b/drivers/net/ovpn/netlink.c new file mode 100644 index 0000000000000..30f2a7896edd3 --- /dev/null +++ b/drivers/net/ovpn/netlink.c @@ -0,0 +1,1250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + */ + +#include +#include +#include + +#include + +#include "ovpnpriv.h" +#include "main.h" +#include "netlink.h" +#include "netlink-gen.h" +#include "bind.h" +#include "crypto.h" +#include "peer.h" +#include "socket.h" + +MODULE_ALIAS_GENL_FAMILY(OVPN_FAMILY_NAME); + +/** + * ovpn_get_dev_from_attrs - retrieve the ovpn private data from the netdevice + * a netlink message is targeting + * @net: network namespace where to look for the interface + * @info: generic netlink info from the user request + * @tracker: tracker object to be used for the netdev reference acquisition + * + * Return: the ovpn private data, if found, or an error otherwise + */ +static struct ovpn_priv * +ovpn_get_dev_from_attrs(struct net *net, const struct genl_info *info, + netdevice_tracker *tracker) +{ + struct ovpn_priv *ovpn; + struct net_device *dev; + int ifindex; + + if (GENL_REQ_ATTR_CHECK(info, OVPN_A_IFINDEX)) + return ERR_PTR(-EINVAL); + + ifindex = nla_get_u32(info->attrs[OVPN_A_IFINDEX]); + + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, ifindex); + if (!dev) { + rcu_read_unlock(); + NL_SET_ERR_MSG_MOD(info->extack, + "ifindex does not match any interface"); + return ERR_PTR(-ENODEV); + } + + if (!ovpn_dev_is_valid(dev)) { + rcu_read_unlock(); + NL_SET_ERR_MSG_MOD(info->extack, + "specified interface is not ovpn"); + NL_SET_BAD_ATTR(info->extack, info->attrs[OVPN_A_IFINDEX]); + return ERR_PTR(-EINVAL); + } + + ovpn = netdev_priv(dev); + netdev_hold(dev, tracker, GFP_ATOMIC); + rcu_read_unlock(); + + return ovpn; +} + +int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, + struct genl_info *info) +{ + netdevice_tracker *tracker = (netdevice_tracker *)&info->user_ptr[1]; + struct ovpn_priv *ovpn = ovpn_get_dev_from_attrs(genl_info_net(info), + info, tracker); + + if (IS_ERR(ovpn)) + return PTR_ERR(ovpn); + + info->user_ptr[0] = ovpn; + + return 0; +} + +void ovpn_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, + struct genl_info *info) +{ + netdevice_tracker *tracker = (netdevice_tracker *)&info->user_ptr[1]; + struct ovpn_priv *ovpn = info->user_ptr[0]; + + if (ovpn) + netdev_put(ovpn->dev, tracker); +} + +static bool ovpn_nl_attr_sockaddr_remote(struct nlattr **attrs, + struct sockaddr_storage *ss) +{ + struct sockaddr_in6 *sin6; + struct sockaddr_in *sin; + struct in6_addr *in6; + __be16 port = 0; + __be32 *in; + + ss->ss_family = AF_UNSPEC; + + if (attrs[OVPN_A_PEER_REMOTE_PORT]) + port = nla_get_be16(attrs[OVPN_A_PEER_REMOTE_PORT]); + + if (attrs[OVPN_A_PEER_REMOTE_IPV4]) { + ss->ss_family = AF_INET; + in = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV4]); + } else if (attrs[OVPN_A_PEER_REMOTE_IPV6]) { + ss->ss_family = AF_INET6; + in6 = nla_data(attrs[OVPN_A_PEER_REMOTE_IPV6]); + } else { + return false; + } + + switch (ss->ss_family) { + case AF_INET6: + /* If this is a regular IPv6 just break and move on, + * otherwise switch to AF_INET and extract the IPv4 accordingly + */ + if (!ipv6_addr_v4mapped(in6)) { + sin6 = (struct sockaddr_in6 *)ss; + sin6->sin6_port = port; + memcpy(&sin6->sin6_addr, in6, sizeof(*in6)); + break; + } + + /* v4-mapped-v6 address */ + ss->ss_family = AF_INET; + in = &in6->s6_addr32[3]; + fallthrough; + case AF_INET: + sin = (struct sockaddr_in *)ss; + sin->sin_port = port; + sin->sin_addr.s_addr = *in; + break; + } + + return true; +} + +static u8 *ovpn_nl_attr_local_ip(struct nlattr **attrs) +{ + u8 *addr6; + + if (!attrs[OVPN_A_PEER_LOCAL_IPV4] && !attrs[OVPN_A_PEER_LOCAL_IPV6]) + return NULL; + + if (attrs[OVPN_A_PEER_LOCAL_IPV4]) + return nla_data(attrs[OVPN_A_PEER_LOCAL_IPV4]); + + addr6 = nla_data(attrs[OVPN_A_PEER_LOCAL_IPV6]); + /* this is an IPv4-mapped IPv6 address, therefore extract the actual + * v4 address from the last 4 bytes + */ + if (ipv6_addr_v4mapped((struct in6_addr *)addr6)) + return addr6 + 12; + + return addr6; +} + +static sa_family_t ovpn_nl_family_get(struct nlattr *addr4, + struct nlattr *addr6) +{ + if (addr4) + return AF_INET; + + if (addr6) { + if (ipv6_addr_v4mapped((struct in6_addr *)nla_data(addr6))) + return AF_INET; + return AF_INET6; + } + + return AF_UNSPEC; +} + +static int ovpn_nl_peer_precheck(struct ovpn_priv *ovpn, + struct genl_info *info, + struct nlattr **attrs) +{ + sa_family_t local_fam, remote_fam; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs, + OVPN_A_PEER_ID)) + return -EINVAL; + + if (attrs[OVPN_A_PEER_REMOTE_IPV4] && attrs[OVPN_A_PEER_REMOTE_IPV6]) { + NL_SET_ERR_MSG_MOD(info->extack, + "cannot specify both remote IPv4 or IPv6 address"); + return -EINVAL; + } + + if (!attrs[OVPN_A_PEER_REMOTE_IPV4] && + !attrs[OVPN_A_PEER_REMOTE_IPV6] && attrs[OVPN_A_PEER_REMOTE_PORT]) { + NL_SET_ERR_MSG_MOD(info->extack, + "cannot specify remote port without IP address"); + return -EINVAL; + } + + if (!attrs[OVPN_A_PEER_REMOTE_IPV4] && + attrs[OVPN_A_PEER_LOCAL_IPV4]) { + NL_SET_ERR_MSG_MOD(info->extack, + "cannot specify local IPv4 address without remote"); + return -EINVAL; + } + + if (!attrs[OVPN_A_PEER_REMOTE_IPV6] && + attrs[OVPN_A_PEER_LOCAL_IPV6]) { + NL_SET_ERR_MSG_MOD(info->extack, + "cannot specify local IPV6 address without remote"); + return -EINVAL; + } + + /* check that local and remote address families are the same even + * after parsing v4mapped IPv6 addresses. + * (if addresses are not provided, family will be AF_UNSPEC and + * the check is skipped) + */ + local_fam = ovpn_nl_family_get(attrs[OVPN_A_PEER_LOCAL_IPV4], + attrs[OVPN_A_PEER_LOCAL_IPV6]); + remote_fam = ovpn_nl_family_get(attrs[OVPN_A_PEER_REMOTE_IPV4], + attrs[OVPN_A_PEER_REMOTE_IPV6]); + if (local_fam != AF_UNSPEC && remote_fam != AF_UNSPEC && + local_fam != remote_fam) { + NL_SET_ERR_MSG_MOD(info->extack, + "mismatching local and remote address families"); + return -EINVAL; + } + + if (remote_fam != AF_INET6 && attrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]) { + NL_SET_ERR_MSG_MOD(info->extack, + "cannot specify scope id without remote IPv6 address"); + return -EINVAL; + } + + /* VPN IPs are needed only in MP mode for selecting the right peer */ + if (ovpn->mode == OVPN_MODE_P2P && (attrs[OVPN_A_PEER_VPN_IPV4] || + attrs[OVPN_A_PEER_VPN_IPV6])) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "unexpected VPN IP in P2P mode"); + return -EINVAL; + } + + if ((attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] && + !attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) || + (!attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] && + attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT])) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "keepalive interval and timeout are required together"); + return -EINVAL; + } + + return 0; +} + +/** + * ovpn_nl_peer_modify - modify the peer attributes according to the incoming msg + * @peer: the peer to modify + * @info: generic netlink info from the user request + * @attrs: the attributes from the user request + * + * Return: a negative error code in case of failure, 0 on success or 1 on + * success and the VPN IPs have been modified (requires rehashing in MP + * mode) + */ +static int ovpn_nl_peer_modify(struct ovpn_peer *peer, struct genl_info *info, + struct nlattr **attrs) +{ + struct sockaddr_storage ss = {}; + void *local_ip = NULL; + u32 interv, timeout; + bool rehash = false; + int ret; + + spin_lock_bh(&peer->lock); + + if (ovpn_nl_attr_sockaddr_remote(attrs, &ss)) { + /* we carry the local IP in a generic container. + * ovpn_peer_reset_sockaddr() will properly interpret it + * based on ss.ss_family + */ + local_ip = ovpn_nl_attr_local_ip(attrs); + + /* set peer sockaddr */ + ret = ovpn_peer_reset_sockaddr(peer, &ss, local_ip); + if (ret < 0) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot set peer sockaddr: %d", + ret); + goto err_unlock; + } + dst_cache_reset(&peer->dst_cache); + } + + if (attrs[OVPN_A_PEER_VPN_IPV4]) { + rehash = true; + peer->vpn_addrs.ipv4.s_addr = + nla_get_in_addr(attrs[OVPN_A_PEER_VPN_IPV4]); + } + + if (attrs[OVPN_A_PEER_VPN_IPV6]) { + rehash = true; + peer->vpn_addrs.ipv6 = + nla_get_in6_addr(attrs[OVPN_A_PEER_VPN_IPV6]); + } + + /* when setting the keepalive, both parameters have to be configured */ + if (attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL] && + attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) { + interv = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]); + timeout = nla_get_u32(attrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]); + ovpn_peer_keepalive_set(peer, interv, timeout); + } + + netdev_dbg(peer->ovpn->dev, + "modify peer id=%u endpoint=%pIScp VPN-IPv4=%pI4 VPN-IPv6=%pI6c\n", + peer->id, &ss, + &peer->vpn_addrs.ipv4.s_addr, &peer->vpn_addrs.ipv6); + + spin_unlock_bh(&peer->lock); + + return rehash ? 1 : 0; +err_unlock: + spin_unlock_bh(&peer->lock); + return ret; +} + +int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *attrs[OVPN_A_PEER_MAX + 1]; + struct ovpn_priv *ovpn = info->user_ptr[0]; + struct ovpn_socket *ovpn_sock; + struct socket *sock = NULL; + struct ovpn_peer *peer; + u32 sockfd, peer_id; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER)) + return -EINVAL; + + ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER], + ovpn_peer_nl_policy, info->extack); + if (ret) + return ret; + + ret = ovpn_nl_peer_precheck(ovpn, info, attrs); + if (ret < 0) + return ret; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs, + OVPN_A_PEER_SOCKET)) + return -EINVAL; + + /* in MP mode VPN IPs are required for selecting the right peer */ + if (ovpn->mode == OVPN_MODE_MP && !attrs[OVPN_A_PEER_VPN_IPV4] && + !attrs[OVPN_A_PEER_VPN_IPV6]) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "VPN IP must be provided in MP mode"); + return -EINVAL; + } + + peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]); + peer = ovpn_peer_new(ovpn, peer_id); + if (IS_ERR(peer)) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot create new peer object for peer %u: %ld", + peer_id, PTR_ERR(peer)); + return PTR_ERR(peer); + } + + /* lookup the fd in the kernel table and extract the socket object */ + sockfd = nla_get_u32(attrs[OVPN_A_PEER_SOCKET]); + /* sockfd_lookup() increases sock's refcounter */ + sock = sockfd_lookup(sockfd, &ret); + if (!sock) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot lookup peer socket (fd=%u): %d", + sockfd, ret); + ret = -ENOTSOCK; + goto peer_release; + } + + /* Only when using UDP as transport protocol the remote endpoint + * can be configured so that ovpn knows where to send packets to. + */ + if (sock->sk->sk_protocol == IPPROTO_UDP && + !attrs[OVPN_A_PEER_REMOTE_IPV4] && + !attrs[OVPN_A_PEER_REMOTE_IPV6]) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "missing remote IP address for UDP socket"); + sockfd_put(sock); + ret = -EINVAL; + goto peer_release; + } + + /* In case of TCP, the socket is connected to the peer and ovpn + * will just send bytes over it, without the need to specify a + * destination. + */ + if (sock->sk->sk_protocol == IPPROTO_TCP && + (attrs[OVPN_A_PEER_REMOTE_IPV4] || + attrs[OVPN_A_PEER_REMOTE_IPV6])) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "unexpected remote IP address with TCP socket"); + sockfd_put(sock); + ret = -EINVAL; + goto peer_release; + } + + ovpn_sock = ovpn_socket_new(sock, peer); + /* at this point we unconditionally drop the reference to the socket: + * - in case of error, the socket has to be dropped + * - if case of success, the socket is configured and let + * userspace own the reference, so that the latter can + * trigger the final close() + */ + sockfd_put(sock); + if (IS_ERR(ovpn_sock)) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot encapsulate socket: %ld", + PTR_ERR(ovpn_sock)); + ret = -ENOTSOCK; + goto peer_release; + } + + rcu_assign_pointer(peer->sock, ovpn_sock); + + ret = ovpn_nl_peer_modify(peer, info, attrs); + if (ret < 0) + goto sock_release; + + ret = ovpn_peer_add(ovpn, peer); + if (ret < 0) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot add new peer (id=%u) to hashtable: %d", + peer->id, ret); + goto sock_release; + } + + return 0; + +sock_release: + ovpn_socket_release(peer); +peer_release: + /* release right away because peer was not yet hashed, thus it is not + * used in any context + */ + ovpn_peer_release(peer); + + return ret; +} + +int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *attrs[OVPN_A_PEER_MAX + 1]; + struct ovpn_priv *ovpn = info->user_ptr[0]; + struct ovpn_socket *sock; + struct ovpn_peer *peer; + u32 peer_id; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER)) + return -EINVAL; + + ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER], + ovpn_peer_nl_policy, info->extack); + if (ret) + return ret; + + ret = ovpn_nl_peer_precheck(ovpn, info, attrs); + if (ret < 0) + return ret; + + if (attrs[OVPN_A_PEER_SOCKET]) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "socket cannot be modified"); + return -EINVAL; + } + + peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]); + peer = ovpn_peer_get_by_id(ovpn, peer_id); + if (!peer) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot find peer with id %u", peer_id); + return -ENOENT; + } + + /* when using a TCP socket the remote IP is not expected */ + rcu_read_lock(); + sock = rcu_dereference(peer->sock); + if (sock && sock->sock->sk->sk_protocol == IPPROTO_TCP && + (attrs[OVPN_A_PEER_REMOTE_IPV4] || + attrs[OVPN_A_PEER_REMOTE_IPV6])) { + rcu_read_unlock(); + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "unexpected remote IP address with TCP socket"); + ovpn_peer_put(peer); + return -EINVAL; + } + rcu_read_unlock(); + + spin_lock_bh(&ovpn->lock); + ret = ovpn_nl_peer_modify(peer, info, attrs); + if (ret < 0) { + spin_unlock_bh(&ovpn->lock); + ovpn_peer_put(peer); + return ret; + } + + /* ret == 1 means that VPN IPv4/6 has been modified and rehashing + * is required + */ + if (ret > 0) + ovpn_peer_hash_vpn_ip(peer); + spin_unlock_bh(&ovpn->lock); + ovpn_peer_put(peer); + + return 0; +} + +static int ovpn_nl_send_peer(struct sk_buff *skb, const struct genl_info *info, + const struct ovpn_peer *peer, u32 portid, u32 seq, + int flags) +{ + const struct ovpn_bind *bind; + struct ovpn_socket *sock; + int ret = -EMSGSIZE; + struct nlattr *attr; + __be16 local_port; + void *hdr; + int id; + + hdr = genlmsg_put(skb, portid, seq, &ovpn_nl_family, flags, + OVPN_CMD_PEER_GET); + if (!hdr) + return -ENOBUFS; + + attr = nla_nest_start(skb, OVPN_A_PEER); + if (!attr) + goto err; + + rcu_read_lock(); + sock = rcu_dereference(peer->sock); + if (!sock) { + ret = -EINVAL; + goto err_unlock; + } + + if (!net_eq(genl_info_net(info), sock_net(sock->sock->sk))) { + id = peernet2id_alloc(genl_info_net(info), + sock_net(sock->sock->sk), + GFP_ATOMIC); + if (nla_put_s32(skb, OVPN_A_PEER_SOCKET_NETNSID, id)) + goto err_unlock; + } + local_port = inet_sk(sock->sock->sk)->inet_sport; + rcu_read_unlock(); + + if (nla_put_u32(skb, OVPN_A_PEER_ID, peer->id)) + goto err; + + if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) + if (nla_put_in_addr(skb, OVPN_A_PEER_VPN_IPV4, + peer->vpn_addrs.ipv4.s_addr)) + goto err; + + if (!ipv6_addr_equal(&peer->vpn_addrs.ipv6, &in6addr_any)) + if (nla_put_in6_addr(skb, OVPN_A_PEER_VPN_IPV6, + &peer->vpn_addrs.ipv6)) + goto err; + + if (nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_INTERVAL, + peer->keepalive_interval) || + nla_put_u32(skb, OVPN_A_PEER_KEEPALIVE_TIMEOUT, + peer->keepalive_timeout)) + goto err; + + rcu_read_lock(); + bind = rcu_dereference(peer->bind); + if (bind) { + if (bind->remote.in4.sin_family == AF_INET) { + if (nla_put_in_addr(skb, OVPN_A_PEER_REMOTE_IPV4, + bind->remote.in4.sin_addr.s_addr) || + nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT, + bind->remote.in4.sin_port) || + nla_put_in_addr(skb, OVPN_A_PEER_LOCAL_IPV4, + bind->local.ipv4.s_addr)) + goto err_unlock; + } else if (bind->remote.in4.sin_family == AF_INET6) { + if (nla_put_in6_addr(skb, OVPN_A_PEER_REMOTE_IPV6, + &bind->remote.in6.sin6_addr) || + nla_put_u32(skb, OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID, + bind->remote.in6.sin6_scope_id) || + nla_put_net16(skb, OVPN_A_PEER_REMOTE_PORT, + bind->remote.in6.sin6_port) || + nla_put_in6_addr(skb, OVPN_A_PEER_LOCAL_IPV6, + &bind->local.ipv6)) + goto err_unlock; + } + } + rcu_read_unlock(); + + if (nla_put_net16(skb, OVPN_A_PEER_LOCAL_PORT, local_port) || + /* VPN RX stats */ + nla_put_uint(skb, OVPN_A_PEER_VPN_RX_BYTES, + atomic64_read(&peer->vpn_stats.rx.bytes)) || + nla_put_uint(skb, OVPN_A_PEER_VPN_RX_PACKETS, + atomic64_read(&peer->vpn_stats.rx.packets)) || + /* VPN TX stats */ + nla_put_uint(skb, OVPN_A_PEER_VPN_TX_BYTES, + atomic64_read(&peer->vpn_stats.tx.bytes)) || + nla_put_uint(skb, OVPN_A_PEER_VPN_TX_PACKETS, + atomic64_read(&peer->vpn_stats.tx.packets)) || + /* link RX stats */ + nla_put_uint(skb, OVPN_A_PEER_LINK_RX_BYTES, + atomic64_read(&peer->link_stats.rx.bytes)) || + nla_put_uint(skb, OVPN_A_PEER_LINK_RX_PACKETS, + atomic64_read(&peer->link_stats.rx.packets)) || + /* link TX stats */ + nla_put_uint(skb, OVPN_A_PEER_LINK_TX_BYTES, + atomic64_read(&peer->link_stats.tx.bytes)) || + nla_put_uint(skb, OVPN_A_PEER_LINK_TX_PACKETS, + atomic64_read(&peer->link_stats.tx.packets))) + goto err; + + nla_nest_end(skb, attr); + genlmsg_end(skb, hdr); + + return 0; +err_unlock: + rcu_read_unlock(); +err: + genlmsg_cancel(skb, hdr); + return ret; +} + +int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *attrs[OVPN_A_PEER_MAX + 1]; + struct ovpn_priv *ovpn = info->user_ptr[0]; + struct ovpn_peer *peer; + struct sk_buff *msg; + u32 peer_id; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER)) + return -EINVAL; + + ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER], + ovpn_peer_nl_policy, info->extack); + if (ret) + return ret; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs, + OVPN_A_PEER_ID)) + return -EINVAL; + + peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]); + peer = ovpn_peer_get_by_id(ovpn, peer_id); + if (!peer) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot find peer with id %u", peer_id); + return -ENOENT; + } + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; + goto err; + } + + ret = ovpn_nl_send_peer(msg, info, peer, info->snd_portid, + info->snd_seq, 0); + if (ret < 0) { + nlmsg_free(msg); + goto err; + } + + ret = genlmsg_reply(msg, info); +err: + ovpn_peer_put(peer); + return ret; +} + +int ovpn_nl_peer_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) +{ + const struct genl_info *info = genl_info_dump(cb); + int bkt, last_idx = cb->args[1], dumped = 0; + netdevice_tracker tracker; + struct ovpn_priv *ovpn; + struct ovpn_peer *peer; + + ovpn = ovpn_get_dev_from_attrs(sock_net(cb->skb->sk), info, &tracker); + if (IS_ERR(ovpn)) + return PTR_ERR(ovpn); + + if (ovpn->mode == OVPN_MODE_P2P) { + /* if we already dumped a peer it means we are done */ + if (last_idx) + goto out; + + rcu_read_lock(); + peer = rcu_dereference(ovpn->peer); + if (peer) { + if (ovpn_nl_send_peer(skb, info, peer, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI) == 0) + dumped++; + } + rcu_read_unlock(); + } else { + rcu_read_lock(); + hash_for_each_rcu(ovpn->peers->by_id, bkt, peer, + hash_entry_id) { + /* skip already dumped peers that were dumped by + * previous invocations + */ + if (last_idx > 0) { + last_idx--; + continue; + } + + if (ovpn_nl_send_peer(skb, info, peer, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI) < 0) + break; + + /* count peers being dumped during this invocation */ + dumped++; + } + rcu_read_unlock(); + } + +out: + netdev_put(ovpn->dev, &tracker); + + /* sum up peers dumped in this message, so that at the next invocation + * we can continue from where we left + */ + cb->args[1] += dumped; + return skb->len; +} + +int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *attrs[OVPN_A_PEER_MAX + 1]; + struct ovpn_priv *ovpn = info->user_ptr[0]; + struct ovpn_peer *peer; + u32 peer_id; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER)) + return -EINVAL; + + ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER], + ovpn_peer_nl_policy, info->extack); + if (ret) + return ret; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_PEER], attrs, + OVPN_A_PEER_ID)) + return -EINVAL; + + peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]); + peer = ovpn_peer_get_by_id(ovpn, peer_id); + if (!peer) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot find peer with id %u", peer_id); + return -ENOENT; + } + + netdev_dbg(ovpn->dev, "del peer %u\n", peer->id); + ret = ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_USERSPACE); + ovpn_peer_put(peer); + + return ret; +} + +static int ovpn_nl_get_key_dir(struct genl_info *info, struct nlattr *key, + enum ovpn_cipher_alg cipher, + struct ovpn_key_direction *dir) +{ + struct nlattr *attrs[OVPN_A_KEYDIR_MAX + 1]; + int ret; + + ret = nla_parse_nested(attrs, OVPN_A_KEYDIR_MAX, key, + ovpn_keydir_nl_policy, info->extack); + if (ret) + return ret; + + switch (cipher) { + case OVPN_CIPHER_ALG_AES_GCM: + case OVPN_CIPHER_ALG_CHACHA20_POLY1305: + if (NL_REQ_ATTR_CHECK(info->extack, key, attrs, + OVPN_A_KEYDIR_CIPHER_KEY) || + NL_REQ_ATTR_CHECK(info->extack, key, attrs, + OVPN_A_KEYDIR_NONCE_TAIL)) + return -EINVAL; + + dir->cipher_key = nla_data(attrs[OVPN_A_KEYDIR_CIPHER_KEY]); + dir->cipher_key_size = nla_len(attrs[OVPN_A_KEYDIR_CIPHER_KEY]); + + /* These algorithms require a 96bit nonce, + * Construct it by combining 4-bytes packet id and + * 8-bytes nonce-tail from userspace + */ + dir->nonce_tail = nla_data(attrs[OVPN_A_KEYDIR_NONCE_TAIL]); + dir->nonce_tail_size = nla_len(attrs[OVPN_A_KEYDIR_NONCE_TAIL]); + break; + default: + NL_SET_ERR_MSG_MOD(info->extack, "unsupported cipher"); + return -EINVAL; + } + + return 0; +} + +/** + * ovpn_nl_key_new_doit - configure a new key for the specified peer + * @skb: incoming netlink message + * @info: genetlink metadata + * + * This function allows the user to install a new key in the peer crypto + * state. + * Each peer has two 'slots', namely 'primary' and 'secondary', where + * keys can be installed. The key in the 'primary' slot is used for + * encryption, while both keys can be used for decryption by matching the + * key ID carried in the incoming packet. + * + * The user is responsible for rotating keys when necessary. The user + * may fetch peer traffic statistics via netlink in order to better + * identify the right time to rotate keys. + * The renegotiation follows these steps: + * 1. a new key is computed by the user and is installed in the 'secondary' + * slot + * 2. at user discretion (usually after a predetermined time) 'primary' and + * 'secondary' contents are swapped and the new key starts being used for + * encryption, while the old key is kept around for decryption of late + * packets. + * + * Return: 0 on success or a negative error code otherwise. + */ +int ovpn_nl_key_new_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1]; + struct ovpn_priv *ovpn = info->user_ptr[0]; + struct ovpn_peer_key_reset pkr; + struct ovpn_peer *peer; + u32 peer_id; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF)) + return -EINVAL; + + ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX, + info->attrs[OVPN_A_KEYCONF], + ovpn_keyconf_nl_policy, info->extack); + if (ret) + return ret; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_PEER_ID)) + return -EINVAL; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_SLOT) || + NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_KEY_ID) || + NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_CIPHER_ALG) || + NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_ENCRYPT_DIR) || + NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_DECRYPT_DIR)) + return -EINVAL; + + pkr.slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]); + pkr.key.key_id = nla_get_u32(attrs[OVPN_A_KEYCONF_KEY_ID]); + pkr.key.cipher_alg = nla_get_u32(attrs[OVPN_A_KEYCONF_CIPHER_ALG]); + + ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_ENCRYPT_DIR], + pkr.key.cipher_alg, &pkr.key.encrypt); + if (ret < 0) + return ret; + + ret = ovpn_nl_get_key_dir(info, attrs[OVPN_A_KEYCONF_DECRYPT_DIR], + pkr.key.cipher_alg, &pkr.key.decrypt); + if (ret < 0) + return ret; + + peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]); + peer = ovpn_peer_get_by_id(ovpn, peer_id); + if (!peer) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "no peer with id %u to set key for", + peer_id); + return -ENOENT; + } + + ret = ovpn_crypto_state_reset(&peer->crypto, &pkr); + if (ret < 0) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot install new key for peer %u", + peer_id); + goto out; + } + + netdev_dbg(ovpn->dev, "new key installed (id=%u) for peer %u\n", + pkr.key.key_id, peer_id); +out: + ovpn_peer_put(peer); + return ret; +} + +static int ovpn_nl_send_key(struct sk_buff *skb, const struct genl_info *info, + u32 peer_id, enum ovpn_key_slot slot, + const struct ovpn_key_config *keyconf) +{ + struct nlattr *attr; + void *hdr; + + hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, &ovpn_nl_family, + 0, OVPN_CMD_KEY_GET); + if (!hdr) + return -ENOBUFS; + + attr = nla_nest_start(skb, OVPN_A_KEYCONF); + if (!attr) + goto err; + + if (nla_put_u32(skb, OVPN_A_KEYCONF_PEER_ID, peer_id)) + goto err; + + if (nla_put_u32(skb, OVPN_A_KEYCONF_SLOT, slot) || + nla_put_u32(skb, OVPN_A_KEYCONF_KEY_ID, keyconf->key_id) || + nla_put_u32(skb, OVPN_A_KEYCONF_CIPHER_ALG, keyconf->cipher_alg)) + goto err; + + nla_nest_end(skb, attr); + genlmsg_end(skb, hdr); + + return 0; +err: + genlmsg_cancel(skb, hdr); + return -EMSGSIZE; +} + +int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1]; + struct ovpn_priv *ovpn = info->user_ptr[0]; + struct ovpn_key_config keyconf = { 0 }; + enum ovpn_key_slot slot; + struct ovpn_peer *peer; + struct sk_buff *msg; + u32 peer_id; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF)) + return -EINVAL; + + ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX, + info->attrs[OVPN_A_KEYCONF], + ovpn_keyconf_nl_policy, info->extack); + if (ret) + return ret; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_PEER_ID)) + return -EINVAL; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_SLOT)) + return -EINVAL; + + peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]); + peer = ovpn_peer_get_by_id(ovpn, peer_id); + if (!peer) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot find peer with id %u", peer_id); + return -ENOENT; + } + + slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]); + + ret = ovpn_crypto_config_get(&peer->crypto, slot, &keyconf); + if (ret < 0) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "cannot extract key from slot %u for peer %u", + slot, peer_id); + goto err; + } + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; + goto err; + } + + ret = ovpn_nl_send_key(msg, info, peer->id, slot, &keyconf); + if (ret < 0) { + nlmsg_free(msg); + goto err; + } + + ret = genlmsg_reply(msg, info); +err: + ovpn_peer_put(peer); + return ret; +} + +int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct ovpn_priv *ovpn = info->user_ptr[0]; + struct nlattr *attrs[OVPN_A_PEER_MAX + 1]; + struct ovpn_peer *peer; + u32 peer_id; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF)) + return -EINVAL; + + ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX, + info->attrs[OVPN_A_KEYCONF], + ovpn_keyconf_nl_policy, info->extack); + if (ret) + return ret; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_PEER_ID)) + return -EINVAL; + + peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]); + peer = ovpn_peer_get_by_id(ovpn, peer_id); + if (!peer) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "no peer with id %u to swap keys for", + peer_id); + return -ENOENT; + } + + ovpn_crypto_key_slots_swap(&peer->crypto); + ovpn_peer_put(peer); + + return 0; +} + +int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr *attrs[OVPN_A_KEYCONF_MAX + 1]; + struct ovpn_priv *ovpn = info->user_ptr[0]; + enum ovpn_key_slot slot; + struct ovpn_peer *peer; + u32 peer_id; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF)) + return -EINVAL; + + ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX, + info->attrs[OVPN_A_KEYCONF], + ovpn_keyconf_nl_policy, info->extack); + if (ret) + return ret; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_PEER_ID)) + return -EINVAL; + + if (NL_REQ_ATTR_CHECK(info->extack, info->attrs[OVPN_A_KEYCONF], attrs, + OVPN_A_KEYCONF_SLOT)) + return -EINVAL; + + peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]); + slot = nla_get_u32(attrs[OVPN_A_KEYCONF_SLOT]); + + peer = ovpn_peer_get_by_id(ovpn, peer_id); + if (!peer) { + NL_SET_ERR_MSG_FMT_MOD(info->extack, + "no peer with id %u to delete key for", + peer_id); + return -ENOENT; + } + + ovpn_crypto_key_slot_delete(&peer->crypto, slot); + ovpn_peer_put(peer); + + return 0; +} + +/** + * ovpn_nl_peer_del_notify - notify userspace about peer being deleted + * @peer: the peer being deleted + * + * Return: 0 on success or a negative error code otherwise + */ +int ovpn_nl_peer_del_notify(struct ovpn_peer *peer) +{ + struct ovpn_socket *sock; + struct sk_buff *msg; + struct nlattr *attr; + int ret = -EMSGSIZE; + void *hdr; + + netdev_info(peer->ovpn->dev, "deleting peer with id %u, reason %d\n", + peer->id, peer->delete_reason); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_PEER_DEL_NTF); + if (!hdr) { + ret = -ENOBUFS; + goto err_free_msg; + } + + if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex)) + goto err_cancel_msg; + + attr = nla_nest_start(msg, OVPN_A_PEER); + if (!attr) + goto err_cancel_msg; + + if (nla_put_u32(msg, OVPN_A_PEER_DEL_REASON, peer->delete_reason)) + goto err_cancel_msg; + + if (nla_put_u32(msg, OVPN_A_PEER_ID, peer->id)) + goto err_cancel_msg; + + nla_nest_end(msg, attr); + + genlmsg_end(msg, hdr); + + rcu_read_lock(); + sock = rcu_dereference(peer->sock); + if (!sock) { + ret = -EINVAL; + goto err_unlock; + } + genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk), + msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC); + rcu_read_unlock(); + + return 0; + +err_unlock: + rcu_read_unlock(); +err_cancel_msg: + genlmsg_cancel(msg, hdr); +err_free_msg: + nlmsg_free(msg); + return ret; +} + +/** + * ovpn_nl_key_swap_notify - notify userspace peer's key must be renewed + * @peer: the peer whose key needs to be renewed + * @key_id: the ID of the key that needs to be renewed + * + * Return: 0 on success or a negative error code otherwise + */ +int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id) +{ + struct ovpn_socket *sock; + struct nlattr *k_attr; + struct sk_buff *msg; + int ret = -EMSGSIZE; + void *hdr; + + netdev_info(peer->ovpn->dev, "peer with id %u must rekey - primary key unusable.\n", + peer->id); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); + if (!msg) + return -ENOMEM; + + hdr = genlmsg_put(msg, 0, 0, &ovpn_nl_family, 0, OVPN_CMD_KEY_SWAP_NTF); + if (!hdr) { + ret = -ENOBUFS; + goto err_free_msg; + } + + if (nla_put_u32(msg, OVPN_A_IFINDEX, peer->ovpn->dev->ifindex)) + goto err_cancel_msg; + + k_attr = nla_nest_start(msg, OVPN_A_KEYCONF); + if (!k_attr) + goto err_cancel_msg; + + if (nla_put_u32(msg, OVPN_A_KEYCONF_PEER_ID, peer->id)) + goto err_cancel_msg; + + if (nla_put_u16(msg, OVPN_A_KEYCONF_KEY_ID, key_id)) + goto err_cancel_msg; + + nla_nest_end(msg, k_attr); + genlmsg_end(msg, hdr); + + rcu_read_lock(); + sock = rcu_dereference(peer->sock); + if (!sock) { + ret = -EINVAL; + goto err_unlock; + } + genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk), + msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC); + rcu_read_unlock(); + + return 0; +err_unlock: + rcu_read_unlock(); +err_cancel_msg: + genlmsg_cancel(msg, hdr); +err_free_msg: + nlmsg_free(msg); + return ret; +} + +/** + * ovpn_nl_register - perform any needed registration in the NL subsustem + * + * Return: 0 on success, a negative error code otherwise + */ +int __init ovpn_nl_register(void) +{ + int ret = genl_register_family(&ovpn_nl_family); + + if (ret) { + pr_err("ovpn: genl_register_family failed: %d\n", ret); + return ret; + } + + return 0; +} + +/** + * ovpn_nl_unregister - undo any module wide netlink registration + */ +void ovpn_nl_unregister(void) +{ + genl_unregister_family(&ovpn_nl_family); +} diff --git a/drivers/net/ovpn/netlink.h b/drivers/net/ovpn/netlink.h new file mode 100644 index 0000000000000..8615dfc3c4720 --- /dev/null +++ b/drivers/net/ovpn/netlink.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + */ + +#ifndef _NET_OVPN_NETLINK_H_ +#define _NET_OVPN_NETLINK_H_ + +int ovpn_nl_register(void); +void ovpn_nl_unregister(void); + +int ovpn_nl_peer_del_notify(struct ovpn_peer *peer); +int ovpn_nl_key_swap_notify(struct ovpn_peer *peer, u8 key_id); + +#endif /* _NET_OVPN_NETLINK_H_ */ diff --git a/drivers/net/ovpn/ovpnpriv.h b/drivers/net/ovpn/ovpnpriv.h new file mode 100644 index 0000000000000..5403cdc99a67c --- /dev/null +++ b/drivers/net/ovpn/ovpnpriv.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2019-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#ifndef _NET_OVPN_OVPNSTRUCT_H_ +#define _NET_OVPN_OVPNSTRUCT_H_ + +#include +#include +#include +#include + +/** + * struct ovpn_peer_collection - container of peers for MultiPeer mode + * @by_id: table of peers index by ID + * @by_vpn_addr4: table of peers indexed by VPN IPv4 address (items can be + * rehashed on the fly due to peer IP change) + * @by_vpn_addr6: table of peers indexed by VPN IPv6 address (items can be + * rehashed on the fly due to peer IP change) + * @by_transp_addr: table of peers indexed by transport address (items can be + * rehashed on the fly due to peer IP change) + */ +struct ovpn_peer_collection { + DECLARE_HASHTABLE(by_id, 12); + struct hlist_nulls_head by_vpn_addr4[1 << 12]; + struct hlist_nulls_head by_vpn_addr6[1 << 12]; + struct hlist_nulls_head by_transp_addr[1 << 12]; +}; + +/** + * struct ovpn_priv - per ovpn interface state + * @dev: the actual netdev representing the tunnel + * @registered: whether dev is still registered with netdev or not + * @mode: device operation mode (i.e. p2p, mp, ..) + * @lock: protect this object + * @peers: data structures holding multi-peer references + * @peer: in P2P mode, this is the only remote peer + * @gro_cells: pointer to the Generic Receive Offload cell + * @keepalive_work: struct used to schedule keepalive periodic job + */ +struct ovpn_priv { + struct net_device *dev; + bool registered; + enum ovpn_mode mode; + spinlock_t lock; /* protect writing to the ovpn_priv object */ + struct ovpn_peer_collection *peers; + struct ovpn_peer __rcu *peer; + struct gro_cells gro_cells; + struct delayed_work keepalive_work; +}; + +#endif /* _NET_OVPN_OVPNSTRUCT_H_ */ diff --git a/drivers/net/ovpn/peer.c b/drivers/net/ovpn/peer.c new file mode 100644 index 0000000000000..0b1d26388dba9 --- /dev/null +++ b/drivers/net/ovpn/peer.c @@ -0,0 +1,1364 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#include +#include +#include +#include + +#include "ovpnpriv.h" +#include "bind.h" +#include "pktid.h" +#include "crypto.h" +#include "io.h" +#include "main.h" +#include "netlink.h" +#include "peer.h" +#include "socket.h" + +static void unlock_ovpn(struct ovpn_priv *ovpn, + struct llist_head *release_list) + __releases(&ovpn->lock) +{ + struct ovpn_peer *peer; + + spin_unlock_bh(&ovpn->lock); + + llist_for_each_entry(peer, release_list->first, release_entry) { + ovpn_socket_release(peer); + ovpn_peer_put(peer); + } +} + +/** + * ovpn_peer_keepalive_set - configure keepalive values for peer + * @peer: the peer to configure + * @interval: outgoing keepalive interval + * @timeout: incoming keepalive timeout + */ +void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout) +{ + time64_t now = ktime_get_real_seconds(); + + netdev_dbg(peer->ovpn->dev, + "scheduling keepalive for peer %u: interval=%u timeout=%u\n", + peer->id, interval, timeout); + + peer->keepalive_interval = interval; + WRITE_ONCE(peer->last_sent, now); + peer->keepalive_xmit_exp = now + interval; + + peer->keepalive_timeout = timeout; + WRITE_ONCE(peer->last_recv, now); + peer->keepalive_recv_exp = now + timeout; + + /* now that interval and timeout have been changed, kick + * off the worker so that the next delay can be recomputed + */ + mod_delayed_work(system_wq, &peer->ovpn->keepalive_work, 0); +} + +/** + * ovpn_peer_keepalive_send - periodic worker sending keepalive packets + * @work: pointer to the work member of the related peer object + * + * NOTE: the reference to peer is not dropped because it gets inherited + * by ovpn_xmit_special() + */ +static void ovpn_peer_keepalive_send(struct work_struct *work) +{ + struct ovpn_peer *peer = container_of(work, struct ovpn_peer, + keepalive_work); + + local_bh_disable(); + ovpn_xmit_special(peer, ovpn_keepalive_message, + sizeof(ovpn_keepalive_message)); + local_bh_enable(); +} + +/** + * ovpn_peer_new - allocate and initialize a new peer object + * @ovpn: the openvpn instance inside which the peer should be created + * @id: the ID assigned to this peer + * + * Return: a pointer to the new peer on success or an error code otherwise + */ +struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id) +{ + struct ovpn_peer *peer; + int ret; + + /* alloc and init peer object */ + peer = kzalloc(sizeof(*peer), GFP_KERNEL); + if (!peer) + return ERR_PTR(-ENOMEM); + + peer->id = id; + peer->ovpn = ovpn; + + peer->vpn_addrs.ipv4.s_addr = htonl(INADDR_ANY); + peer->vpn_addrs.ipv6 = in6addr_any; + + RCU_INIT_POINTER(peer->bind, NULL); + ovpn_crypto_state_init(&peer->crypto); + spin_lock_init(&peer->lock); + kref_init(&peer->refcount); + ovpn_peer_stats_init(&peer->vpn_stats); + ovpn_peer_stats_init(&peer->link_stats); + INIT_WORK(&peer->keepalive_work, ovpn_peer_keepalive_send); + + ret = dst_cache_init(&peer->dst_cache, GFP_KERNEL); + if (ret < 0) { + netdev_err(ovpn->dev, + "cannot initialize dst cache for peer %u\n", + peer->id); + kfree(peer); + return ERR_PTR(ret); + } + + netdev_hold(ovpn->dev, &peer->dev_tracker, GFP_KERNEL); + + return peer; +} + +/** + * ovpn_peer_reset_sockaddr - recreate binding for peer + * @peer: peer to recreate the binding for + * @ss: sockaddr to use as remote endpoint for the binding + * @local_ip: local IP for the binding + * + * Return: 0 on success or a negative error code otherwise + */ +int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer, + const struct sockaddr_storage *ss, + const void *local_ip) +{ + struct ovpn_bind *bind; + size_t ip_len; + + lockdep_assert_held(&peer->lock); + + /* create new ovpn_bind object */ + bind = ovpn_bind_from_sockaddr(ss); + if (IS_ERR(bind)) + return PTR_ERR(bind); + + if (local_ip) { + if (ss->ss_family == AF_INET) { + ip_len = sizeof(struct in_addr); + } else if (ss->ss_family == AF_INET6) { + ip_len = sizeof(struct in6_addr); + } else { + net_dbg_ratelimited("%s: invalid family %u for remote endpoint for peer %u\n", + netdev_name(peer->ovpn->dev), + ss->ss_family, peer->id); + kfree(bind); + return -EINVAL; + } + + memcpy(&bind->local, local_ip, ip_len); + } + + /* set binding */ + ovpn_bind_reset(peer, bind); + + return 0; +} + +/* variable name __tbl2 needs to be different from __tbl1 + * in the macro below to avoid confusing clang + */ +#define ovpn_get_hash_slot(_tbl, _key, _key_len) ({ \ + typeof(_tbl) *__tbl2 = &(_tbl); \ + jhash(_key, _key_len, 0) % HASH_SIZE(*__tbl2); \ +}) + +#define ovpn_get_hash_head(_tbl, _key, _key_len) ({ \ + typeof(_tbl) *__tbl1 = &(_tbl); \ + &(*__tbl1)[ovpn_get_hash_slot(*__tbl1, _key, _key_len)];\ +}) + +/** + * ovpn_peer_endpoints_update - update remote or local endpoint for peer + * @peer: peer to update the remote endpoint for + * @skb: incoming packet to retrieve the source/destination address from + */ +void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb) +{ + struct hlist_nulls_head *nhead; + struct sockaddr_storage ss; + struct sockaddr_in6 *sa6; + bool reset_cache = false; + struct sockaddr_in *sa; + struct ovpn_bind *bind; + const void *local_ip; + size_t salen = 0; + + spin_lock_bh(&peer->lock); + bind = rcu_dereference_protected(peer->bind, + lockdep_is_held(&peer->lock)); + if (unlikely(!bind)) + goto unlock; + + switch (skb->protocol) { + case htons(ETH_P_IP): + /* float check */ + if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) { + /* unconditionally save local endpoint in case + * of float, as it may have changed as well + */ + local_ip = &ip_hdr(skb)->daddr; + sa = (struct sockaddr_in *)&ss; + sa->sin_family = AF_INET; + sa->sin_addr.s_addr = ip_hdr(skb)->saddr; + sa->sin_port = udp_hdr(skb)->source; + salen = sizeof(*sa); + reset_cache = true; + break; + } + + /* if no float happened, let's double check if the local endpoint + * has changed + */ + if (unlikely(bind->local.ipv4.s_addr != ip_hdr(skb)->daddr)) { + net_dbg_ratelimited("%s: learning local IPv4 for peer %d (%pI4 -> %pI4)\n", + netdev_name(peer->ovpn->dev), + peer->id, &bind->local.ipv4.s_addr, + &ip_hdr(skb)->daddr); + bind->local.ipv4.s_addr = ip_hdr(skb)->daddr; + reset_cache = true; + } + break; + case htons(ETH_P_IPV6): + /* float check */ + if (unlikely(!ovpn_bind_skb_src_match(bind, skb))) { + /* unconditionally save local endpoint in case + * of float, as it may have changed as well + */ + local_ip = &ipv6_hdr(skb)->daddr; + sa6 = (struct sockaddr_in6 *)&ss; + sa6->sin6_family = AF_INET6; + sa6->sin6_addr = ipv6_hdr(skb)->saddr; + sa6->sin6_port = udp_hdr(skb)->source; + sa6->sin6_scope_id = ipv6_iface_scope_id(&ipv6_hdr(skb)->saddr, + skb->skb_iif); + salen = sizeof(*sa6); + reset_cache = true; + break; + } + + /* if no float happened, let's double check if the local endpoint + * has changed + */ + if (unlikely(!ipv6_addr_equal(&bind->local.ipv6, + &ipv6_hdr(skb)->daddr))) { + net_dbg_ratelimited("%s: learning local IPv6 for peer %d (%pI6c -> %pI6c\n", + netdev_name(peer->ovpn->dev), + peer->id, &bind->local.ipv6, + &ipv6_hdr(skb)->daddr); + bind->local.ipv6 = ipv6_hdr(skb)->daddr; + reset_cache = true; + } + break; + default: + goto unlock; + } + + if (unlikely(reset_cache)) + dst_cache_reset(&peer->dst_cache); + + /* if the peer did not float, we can bail out now */ + if (likely(!salen)) + goto unlock; + + if (unlikely(ovpn_peer_reset_sockaddr(peer, + (struct sockaddr_storage *)&ss, + local_ip) < 0)) + goto unlock; + + net_dbg_ratelimited("%s: peer %d floated to %pIScp", + netdev_name(peer->ovpn->dev), peer->id, &ss); + + spin_unlock_bh(&peer->lock); + + /* rehashing is required only in MP mode as P2P has one peer + * only and thus there is no hashtable + */ + if (peer->ovpn->mode == OVPN_MODE_MP) { + spin_lock_bh(&peer->ovpn->lock); + spin_lock_bh(&peer->lock); + bind = rcu_dereference_protected(peer->bind, + lockdep_is_held(&peer->lock)); + if (unlikely(!bind)) { + spin_unlock_bh(&peer->lock); + spin_unlock_bh(&peer->ovpn->lock); + return; + } + + /* This function may be invoked concurrently, therefore another + * float may have happened in parallel: perform rehashing + * using the peer->bind->remote directly as key + */ + + switch (bind->remote.in4.sin_family) { + case AF_INET: + salen = sizeof(*sa); + break; + case AF_INET6: + salen = sizeof(*sa6); + break; + } + + /* remove old hashing */ + hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr); + /* re-add with new transport address */ + nhead = ovpn_get_hash_head(peer->ovpn->peers->by_transp_addr, + &bind->remote, salen); + hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead); + spin_unlock_bh(&peer->lock); + spin_unlock_bh(&peer->ovpn->lock); + } + return; +unlock: + spin_unlock_bh(&peer->lock); +} + +/** + * ovpn_peer_release_rcu - RCU callback performing last peer release steps + * @head: RCU member of the ovpn_peer + */ +static void ovpn_peer_release_rcu(struct rcu_head *head) +{ + struct ovpn_peer *peer = container_of(head, struct ovpn_peer, rcu); + + /* this call will immediately free the dst_cache, therefore we + * perform it in the RCU callback, when all contexts are done + */ + dst_cache_destroy(&peer->dst_cache); + kfree(peer); +} + +/** + * ovpn_peer_release - release peer private members + * @peer: the peer to release + */ +void ovpn_peer_release(struct ovpn_peer *peer) +{ + ovpn_crypto_state_release(&peer->crypto); + spin_lock_bh(&peer->lock); + ovpn_bind_reset(peer, NULL); + spin_unlock_bh(&peer->lock); + call_rcu(&peer->rcu, ovpn_peer_release_rcu); + netdev_put(peer->ovpn->dev, &peer->dev_tracker); +} + +/** + * ovpn_peer_release_kref - callback for kref_put + * @kref: the kref object belonging to the peer + */ +void ovpn_peer_release_kref(struct kref *kref) +{ + struct ovpn_peer *peer = container_of(kref, struct ovpn_peer, refcount); + + ovpn_peer_release(peer); +} + +/** + * ovpn_peer_skb_to_sockaddr - fill sockaddr with skb source address + * @skb: the packet to extract data from + * @ss: the sockaddr to fill + * + * Return: sockaddr length on success or -1 otherwise + */ +static int ovpn_peer_skb_to_sockaddr(struct sk_buff *skb, + struct sockaddr_storage *ss) +{ + struct sockaddr_in6 *sa6; + struct sockaddr_in *sa4; + + switch (skb->protocol) { + case htons(ETH_P_IP): + sa4 = (struct sockaddr_in *)ss; + sa4->sin_family = AF_INET; + sa4->sin_addr.s_addr = ip_hdr(skb)->saddr; + sa4->sin_port = udp_hdr(skb)->source; + return sizeof(*sa4); + case htons(ETH_P_IPV6): + sa6 = (struct sockaddr_in6 *)ss; + sa6->sin6_family = AF_INET6; + sa6->sin6_addr = ipv6_hdr(skb)->saddr; + sa6->sin6_port = udp_hdr(skb)->source; + return sizeof(*sa6); + } + + return -1; +} + +/** + * ovpn_nexthop_from_skb4 - retrieve IPv4 nexthop for outgoing skb + * @skb: the outgoing packet + * + * Return: the IPv4 of the nexthop + */ +static __be32 ovpn_nexthop_from_skb4(struct sk_buff *skb) +{ + const struct rtable *rt = skb_rtable(skb); + + if (rt && rt->rt_uses_gateway) + return rt->rt_gw4; + + return ip_hdr(skb)->daddr; +} + +/** + * ovpn_nexthop_from_skb6 - retrieve IPv6 nexthop for outgoing skb + * @skb: the outgoing packet + * + * Return: the IPv6 of the nexthop + */ +static struct in6_addr ovpn_nexthop_from_skb6(struct sk_buff *skb) +{ + const struct rt6_info *rt = skb_rt6_info(skb); + + if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) + return ipv6_hdr(skb)->daddr; + + return rt->rt6i_gateway; +} + +/** + * ovpn_peer_get_by_vpn_addr4 - retrieve peer by its VPN IPv4 address + * @ovpn: the openvpn instance to search + * @addr: VPN IPv4 to use as search key + * + * Refcounter is not increased for the returned peer. + * + * Return: the peer if found or NULL otherwise + */ +static struct ovpn_peer *ovpn_peer_get_by_vpn_addr4(struct ovpn_priv *ovpn, + __be32 addr) +{ + struct hlist_nulls_head *nhead; + struct hlist_nulls_node *ntmp; + struct ovpn_peer *tmp; + unsigned int slot; + +begin: + slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr4, &addr, + sizeof(addr)); + nhead = &ovpn->peers->by_vpn_addr4[slot]; + + hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr4) + if (addr == tmp->vpn_addrs.ipv4.s_addr) + return tmp; + + /* item may have moved during lookup - check nulls and restart + * if that's the case + */ + if (get_nulls_value(ntmp) != slot) + goto begin; + + return NULL; +} + +/** + * ovpn_peer_get_by_vpn_addr6 - retrieve peer by its VPN IPv6 address + * @ovpn: the openvpn instance to search + * @addr: VPN IPv6 to use as search key + * + * Refcounter is not increased for the returned peer. + * + * Return: the peer if found or NULL otherwise + */ +static struct ovpn_peer *ovpn_peer_get_by_vpn_addr6(struct ovpn_priv *ovpn, + struct in6_addr *addr) +{ + struct hlist_nulls_head *nhead; + struct hlist_nulls_node *ntmp; + struct ovpn_peer *tmp; + unsigned int slot; + +begin: + slot = ovpn_get_hash_slot(ovpn->peers->by_vpn_addr6, addr, + sizeof(*addr)); + nhead = &ovpn->peers->by_vpn_addr6[slot]; + + hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, hash_entry_addr6) + if (ipv6_addr_equal(addr, &tmp->vpn_addrs.ipv6)) + return tmp; + + /* item may have moved during lookup - check nulls and restart + * if that's the case + */ + if (get_nulls_value(ntmp) != slot) + goto begin; + + return NULL; +} + +/** + * ovpn_peer_transp_match - check if sockaddr and peer binding match + * @peer: the peer to get the binding from + * @ss: the sockaddr to match + * + * Return: true if sockaddr and binding match or false otherwise + */ +static bool ovpn_peer_transp_match(const struct ovpn_peer *peer, + const struct sockaddr_storage *ss) +{ + struct ovpn_bind *bind = rcu_dereference(peer->bind); + struct sockaddr_in6 *sa6; + struct sockaddr_in *sa4; + + if (unlikely(!bind)) + return false; + + if (ss->ss_family != bind->remote.in4.sin_family) + return false; + + switch (ss->ss_family) { + case AF_INET: + sa4 = (struct sockaddr_in *)ss; + if (sa4->sin_addr.s_addr != bind->remote.in4.sin_addr.s_addr) + return false; + if (sa4->sin_port != bind->remote.in4.sin_port) + return false; + break; + case AF_INET6: + sa6 = (struct sockaddr_in6 *)ss; + if (!ipv6_addr_equal(&sa6->sin6_addr, + &bind->remote.in6.sin6_addr)) + return false; + if (sa6->sin6_port != bind->remote.in6.sin6_port) + return false; + break; + default: + return false; + } + + return true; +} + +/** + * ovpn_peer_get_by_transp_addr_p2p - get peer by transport address in a P2P + * instance + * @ovpn: the openvpn instance to search + * @ss: the transport socket address + * + * Return: the peer if found or NULL otherwise + */ +static struct ovpn_peer * +ovpn_peer_get_by_transp_addr_p2p(struct ovpn_priv *ovpn, + struct sockaddr_storage *ss) +{ + struct ovpn_peer *tmp, *peer = NULL; + + rcu_read_lock(); + tmp = rcu_dereference(ovpn->peer); + if (likely(tmp && ovpn_peer_transp_match(tmp, ss) && + ovpn_peer_hold(tmp))) + peer = tmp; + rcu_read_unlock(); + + return peer; +} + +/** + * ovpn_peer_get_by_transp_addr - retrieve peer by transport address + * @ovpn: the openvpn instance to search + * @skb: the skb to retrieve the source transport address from + * + * Return: a pointer to the peer if found or NULL otherwise + */ +struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn, + struct sk_buff *skb) +{ + struct ovpn_peer *tmp, *peer = NULL; + struct sockaddr_storage ss = { 0 }; + struct hlist_nulls_head *nhead; + struct hlist_nulls_node *ntmp; + unsigned int slot; + ssize_t sa_len; + + sa_len = ovpn_peer_skb_to_sockaddr(skb, &ss); + if (unlikely(sa_len < 0)) + return NULL; + + if (ovpn->mode == OVPN_MODE_P2P) + return ovpn_peer_get_by_transp_addr_p2p(ovpn, &ss); + + rcu_read_lock(); +begin: + slot = ovpn_get_hash_slot(ovpn->peers->by_transp_addr, &ss, sa_len); + nhead = &ovpn->peers->by_transp_addr[slot]; + + hlist_nulls_for_each_entry_rcu(tmp, ntmp, nhead, + hash_entry_transp_addr) { + if (!ovpn_peer_transp_match(tmp, &ss)) + continue; + + if (!ovpn_peer_hold(tmp)) + continue; + + peer = tmp; + break; + } + + /* item may have moved during lookup - check nulls and restart + * if that's the case + */ + if (!peer && get_nulls_value(ntmp) != slot) + goto begin; + rcu_read_unlock(); + + return peer; +} + +/** + * ovpn_peer_get_by_id_p2p - get peer by ID in a P2P instance + * @ovpn: the openvpn instance to search + * @peer_id: the ID of the peer to find + * + * Return: the peer if found or NULL otherwise + */ +static struct ovpn_peer *ovpn_peer_get_by_id_p2p(struct ovpn_priv *ovpn, + u32 peer_id) +{ + struct ovpn_peer *tmp, *peer = NULL; + + rcu_read_lock(); + tmp = rcu_dereference(ovpn->peer); + if (likely(tmp && tmp->id == peer_id && ovpn_peer_hold(tmp))) + peer = tmp; + rcu_read_unlock(); + + return peer; +} + +/** + * ovpn_peer_get_by_id - retrieve peer by ID + * @ovpn: the openvpn instance to search + * @peer_id: the unique peer identifier to match + * + * Return: a pointer to the peer if found or NULL otherwise + */ +struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id) +{ + struct ovpn_peer *tmp, *peer = NULL; + struct hlist_head *head; + + if (ovpn->mode == OVPN_MODE_P2P) + return ovpn_peer_get_by_id_p2p(ovpn, peer_id); + + head = ovpn_get_hash_head(ovpn->peers->by_id, &peer_id, + sizeof(peer_id)); + + rcu_read_lock(); + hlist_for_each_entry_rcu(tmp, head, hash_entry_id) { + if (tmp->id != peer_id) + continue; + + if (!ovpn_peer_hold(tmp)) + continue; + + peer = tmp; + break; + } + rcu_read_unlock(); + + return peer; +} + +static void ovpn_peer_remove(struct ovpn_peer *peer, + enum ovpn_del_peer_reason reason, + struct llist_head *release_list) +{ + lockdep_assert_held(&peer->ovpn->lock); + + switch (peer->ovpn->mode) { + case OVPN_MODE_MP: + /* prevent double remove */ + if (hlist_unhashed(&peer->hash_entry_id)) + return; + + hlist_del_init_rcu(&peer->hash_entry_id); + hlist_nulls_del_init_rcu(&peer->hash_entry_addr4); + hlist_nulls_del_init_rcu(&peer->hash_entry_addr6); + hlist_nulls_del_init_rcu(&peer->hash_entry_transp_addr); + break; + case OVPN_MODE_P2P: + /* prevent double remove */ + if (peer != rcu_access_pointer(peer->ovpn->peer)) + return; + + RCU_INIT_POINTER(peer->ovpn->peer, NULL); + /* in P2P mode the carrier is switched off when the peer is + * deleted so that third party protocols can react accordingly + */ + netif_carrier_off(peer->ovpn->dev); + break; + } + + peer->delete_reason = reason; + ovpn_nl_peer_del_notify(peer); + + /* append to provided list for later socket release and ref drop */ + llist_add(&peer->release_entry, release_list); +} + +/** + * ovpn_peer_get_by_dst - Lookup peer to send skb to + * @ovpn: the private data representing the current VPN session + * @skb: the skb to extract the destination address from + * + * This function takes a tunnel packet and looks up the peer to send it to + * after encapsulation. The skb is expected to be the in-tunnel packet, without + * any OpenVPN related header. + * + * Assume that the IP header is accessible in the skb data. + * + * Return: the peer if found or NULL otherwise. + */ +struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn, + struct sk_buff *skb) +{ + struct ovpn_peer *peer = NULL; + struct in6_addr addr6; + __be32 addr4; + + /* in P2P mode, no matter the destination, packets are always sent to + * the single peer listening on the other side + */ + if (ovpn->mode == OVPN_MODE_P2P) { + rcu_read_lock(); + peer = rcu_dereference(ovpn->peer); + if (unlikely(peer && !ovpn_peer_hold(peer))) + peer = NULL; + rcu_read_unlock(); + return peer; + } + + rcu_read_lock(); + switch (skb->protocol) { + case htons(ETH_P_IP): + addr4 = ovpn_nexthop_from_skb4(skb); + peer = ovpn_peer_get_by_vpn_addr4(ovpn, addr4); + break; + case htons(ETH_P_IPV6): + addr6 = ovpn_nexthop_from_skb6(skb); + peer = ovpn_peer_get_by_vpn_addr6(ovpn, &addr6); + break; + } + + if (unlikely(peer && !ovpn_peer_hold(peer))) + peer = NULL; + rcu_read_unlock(); + + return peer; +} + +/** + * ovpn_nexthop_from_rt4 - look up the IPv4 nexthop for the given destination + * @ovpn: the private data representing the current VPN session + * @dest: the destination to be looked up + * + * Looks up in the IPv4 system routing table the IP of the nexthop to be used + * to reach the destination passed as argument. If no nexthop can be found, the + * destination itself is returned as it probably has to be used as nexthop. + * + * Return: the IP of the next hop if found or dest itself otherwise + */ +static __be32 ovpn_nexthop_from_rt4(struct ovpn_priv *ovpn, __be32 dest) +{ + struct rtable *rt; + struct flowi4 fl = { + .daddr = dest + }; + + rt = ip_route_output_flow(dev_net(ovpn->dev), &fl, NULL); + if (IS_ERR(rt)) { + net_dbg_ratelimited("%s: no route to host %pI4\n", + netdev_name(ovpn->dev), &dest); + /* if we end up here this packet is probably going to be + * thrown away later + */ + return dest; + } + + if (!rt->rt_uses_gateway) + goto out; + + dest = rt->rt_gw4; +out: + ip_rt_put(rt); + return dest; +} + +/** + * ovpn_nexthop_from_rt6 - look up the IPv6 nexthop for the given destination + * @ovpn: the private data representing the current VPN session + * @dest: the destination to be looked up + * + * Looks up in the IPv6 system routing table the IP of the nexthop to be used + * to reach the destination passed as argument. If no nexthop can be found, the + * destination itself is returned as it probably has to be used as nexthop. + * + * Return: the IP of the next hop if found or dest itself otherwise + */ +static struct in6_addr ovpn_nexthop_from_rt6(struct ovpn_priv *ovpn, + struct in6_addr dest) +{ +#if IS_ENABLED(CONFIG_IPV6) + struct dst_entry *entry; + struct rt6_info *rt; + struct flowi6 fl = { + .daddr = dest, + }; + + entry = ipv6_stub->ipv6_dst_lookup_flow(dev_net(ovpn->dev), NULL, &fl, + NULL); + if (IS_ERR(entry)) { + net_dbg_ratelimited("%s: no route to host %pI6c\n", + netdev_name(ovpn->dev), &dest); + /* if we end up here this packet is probably going to be + * thrown away later + */ + return dest; + } + + rt = dst_rt6_info(entry); + + if (!(rt->rt6i_flags & RTF_GATEWAY)) + goto out; + + dest = rt->rt6i_gateway; +out: + dst_release((struct dst_entry *)rt); +#endif + return dest; +} + +/** + * ovpn_peer_check_by_src - check that skb source is routed via peer + * @ovpn: the openvpn instance to search + * @skb: the packet to extract source address from + * @peer: the peer to check against the source address + * + * Return: true if the peer is matching or false otherwise + */ +bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb, + struct ovpn_peer *peer) +{ + bool match = false; + struct in6_addr addr6; + __be32 addr4; + + if (ovpn->mode == OVPN_MODE_P2P) { + /* in P2P mode, no matter the destination, packets are always + * sent to the single peer listening on the other side + */ + return peer == rcu_access_pointer(ovpn->peer); + } + + /* This function performs a reverse path check, therefore we now + * lookup the nexthop we would use if we wanted to route a packet + * to the source IP. If the nexthop matches the sender we know the + * latter is valid and we allow the packet to come in + */ + + switch (skb->protocol) { + case htons(ETH_P_IP): + addr4 = ovpn_nexthop_from_rt4(ovpn, ip_hdr(skb)->saddr); + rcu_read_lock(); + match = (peer == ovpn_peer_get_by_vpn_addr4(ovpn, addr4)); + rcu_read_unlock(); + break; + case htons(ETH_P_IPV6): + addr6 = ovpn_nexthop_from_rt6(ovpn, ipv6_hdr(skb)->saddr); + rcu_read_lock(); + match = (peer == ovpn_peer_get_by_vpn_addr6(ovpn, &addr6)); + rcu_read_unlock(); + break; + } + + return match; +} + +void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer) +{ + struct hlist_nulls_head *nhead; + + lockdep_assert_held(&peer->ovpn->lock); + + /* rehashing makes sense only in multipeer mode */ + if (peer->ovpn->mode != OVPN_MODE_MP) + return; + + if (peer->vpn_addrs.ipv4.s_addr != htonl(INADDR_ANY)) { + /* remove potential old hashing */ + hlist_nulls_del_init_rcu(&peer->hash_entry_addr4); + + nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr4, + &peer->vpn_addrs.ipv4, + sizeof(peer->vpn_addrs.ipv4)); + hlist_nulls_add_head_rcu(&peer->hash_entry_addr4, nhead); + } + + if (!ipv6_addr_any(&peer->vpn_addrs.ipv6)) { + /* remove potential old hashing */ + hlist_nulls_del_init_rcu(&peer->hash_entry_addr6); + + nhead = ovpn_get_hash_head(peer->ovpn->peers->by_vpn_addr6, + &peer->vpn_addrs.ipv6, + sizeof(peer->vpn_addrs.ipv6)); + hlist_nulls_add_head_rcu(&peer->hash_entry_addr6, nhead); + } +} + +/** + * ovpn_peer_add_mp - add peer to related tables in a MP instance + * @ovpn: the instance to add the peer to + * @peer: the peer to add + * + * Return: 0 on success or a negative error code otherwise + */ +static int ovpn_peer_add_mp(struct ovpn_priv *ovpn, struct ovpn_peer *peer) +{ + struct sockaddr_storage sa = { 0 }; + struct hlist_nulls_head *nhead; + struct sockaddr_in6 *sa6; + struct sockaddr_in *sa4; + struct ovpn_bind *bind; + struct ovpn_peer *tmp; + size_t salen; + int ret = 0; + + spin_lock_bh(&ovpn->lock); + /* do not add duplicates */ + tmp = ovpn_peer_get_by_id(ovpn, peer->id); + if (tmp) { + ovpn_peer_put(tmp); + ret = -EEXIST; + goto out; + } + + bind = rcu_dereference_protected(peer->bind, true); + /* peers connected via TCP have bind == NULL */ + if (bind) { + switch (bind->remote.in4.sin_family) { + case AF_INET: + sa4 = (struct sockaddr_in *)&sa; + + sa4->sin_family = AF_INET; + sa4->sin_addr.s_addr = bind->remote.in4.sin_addr.s_addr; + sa4->sin_port = bind->remote.in4.sin_port; + salen = sizeof(*sa4); + break; + case AF_INET6: + sa6 = (struct sockaddr_in6 *)&sa; + + sa6->sin6_family = AF_INET6; + sa6->sin6_addr = bind->remote.in6.sin6_addr; + sa6->sin6_port = bind->remote.in6.sin6_port; + salen = sizeof(*sa6); + break; + default: + ret = -EPROTONOSUPPORT; + goto out; + } + + nhead = ovpn_get_hash_head(ovpn->peers->by_transp_addr, &sa, + salen); + hlist_nulls_add_head_rcu(&peer->hash_entry_transp_addr, nhead); + } + + hlist_add_head_rcu(&peer->hash_entry_id, + ovpn_get_hash_head(ovpn->peers->by_id, &peer->id, + sizeof(peer->id))); + + ovpn_peer_hash_vpn_ip(peer); +out: + spin_unlock_bh(&ovpn->lock); + return ret; +} + +/** + * ovpn_peer_add_p2p - add peer to related tables in a P2P instance + * @ovpn: the instance to add the peer to + * @peer: the peer to add + * + * Return: 0 on success or a negative error code otherwise + */ +static int ovpn_peer_add_p2p(struct ovpn_priv *ovpn, struct ovpn_peer *peer) +{ + LLIST_HEAD(release_list); + struct ovpn_peer *tmp; + + spin_lock_bh(&ovpn->lock); + /* in p2p mode it is possible to have a single peer only, therefore the + * old one is released and substituted by the new one + */ + tmp = rcu_dereference_protected(ovpn->peer, + lockdep_is_held(&ovpn->lock)); + if (tmp) + ovpn_peer_remove(tmp, OVPN_DEL_PEER_REASON_TEARDOWN, + &release_list); + + rcu_assign_pointer(ovpn->peer, peer); + /* in P2P mode the carrier is switched on when the peer is added */ + netif_carrier_on(ovpn->dev); + unlock_ovpn(ovpn, &release_list); + + return 0; +} + +/** + * ovpn_peer_add - add peer to the related tables + * @ovpn: the openvpn instance the peer belongs to + * @peer: the peer object to add + * + * Assume refcounter was increased by caller + * + * Return: 0 on success or a negative error code otherwise + */ +int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer) +{ + switch (ovpn->mode) { + case OVPN_MODE_MP: + return ovpn_peer_add_mp(ovpn, peer); + case OVPN_MODE_P2P: + return ovpn_peer_add_p2p(ovpn, peer); + } + + return -EOPNOTSUPP; +} + +/** + * ovpn_peer_del_mp - delete peer from related tables in a MP instance + * @peer: the peer to delete + * @reason: reason why the peer was deleted (sent to userspace) + * @release_list: list where delete peer should be appended + * + * Return: 0 on success or a negative error code otherwise + */ +static int ovpn_peer_del_mp(struct ovpn_peer *peer, + enum ovpn_del_peer_reason reason, + struct llist_head *release_list) +{ + struct ovpn_peer *tmp; + int ret = -ENOENT; + + lockdep_assert_held(&peer->ovpn->lock); + + tmp = ovpn_peer_get_by_id(peer->ovpn, peer->id); + if (tmp == peer) { + ovpn_peer_remove(peer, reason, release_list); + ret = 0; + } + + if (tmp) + ovpn_peer_put(tmp); + + return ret; +} + +/** + * ovpn_peer_del_p2p - delete peer from related tables in a P2P instance + * @peer: the peer to delete + * @reason: reason why the peer was deleted (sent to userspace) + * @release_list: list where delete peer should be appended + * + * Return: 0 on success or a negative error code otherwise + */ +static int ovpn_peer_del_p2p(struct ovpn_peer *peer, + enum ovpn_del_peer_reason reason, + struct llist_head *release_list) +{ + struct ovpn_peer *tmp; + + lockdep_assert_held(&peer->ovpn->lock); + + tmp = rcu_dereference_protected(peer->ovpn->peer, + lockdep_is_held(&peer->ovpn->lock)); + if (tmp != peer) + return -ENOENT; + + ovpn_peer_remove(peer, reason, release_list); + + return 0; +} + +/** + * ovpn_peer_del - delete peer from related tables + * @peer: the peer object to delete + * @reason: reason for deleting peer (will be sent to userspace) + * + * Return: 0 on success or a negative error code otherwise + */ +int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason) +{ + LLIST_HEAD(release_list); + int ret = -EOPNOTSUPP; + + spin_lock_bh(&peer->ovpn->lock); + switch (peer->ovpn->mode) { + case OVPN_MODE_MP: + ret = ovpn_peer_del_mp(peer, reason, &release_list); + break; + case OVPN_MODE_P2P: + ret = ovpn_peer_del_p2p(peer, reason, &release_list); + break; + default: + break; + } + unlock_ovpn(peer->ovpn, &release_list); + + return ret; +} + +/** + * ovpn_peer_release_p2p - release peer upon P2P device teardown + * @ovpn: the instance being torn down + * @sk: if not NULL, release peer only if it's using this specific socket + * @reason: the reason for releasing the peer + */ +static void ovpn_peer_release_p2p(struct ovpn_priv *ovpn, struct sock *sk, + enum ovpn_del_peer_reason reason) +{ + struct ovpn_socket *ovpn_sock; + LLIST_HEAD(release_list); + struct ovpn_peer *peer; + + spin_lock_bh(&ovpn->lock); + peer = rcu_dereference_protected(ovpn->peer, + lockdep_is_held(&ovpn->lock)); + if (!peer) { + spin_unlock_bh(&ovpn->lock); + return; + } + + if (sk) { + ovpn_sock = rcu_access_pointer(peer->sock); + if (!ovpn_sock || ovpn_sock->sock->sk != sk) { + spin_unlock_bh(&ovpn->lock); + ovpn_peer_put(peer); + return; + } + } + + ovpn_peer_remove(peer, reason, &release_list); + unlock_ovpn(ovpn, &release_list); +} + +static void ovpn_peers_release_mp(struct ovpn_priv *ovpn, struct sock *sk, + enum ovpn_del_peer_reason reason) +{ + struct ovpn_socket *ovpn_sock; + LLIST_HEAD(release_list); + struct ovpn_peer *peer; + struct hlist_node *tmp; + int bkt; + + spin_lock_bh(&ovpn->lock); + hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) { + bool remove = true; + + /* if a socket was passed as argument, skip all peers except + * those using it + */ + if (sk) { + rcu_read_lock(); + ovpn_sock = rcu_dereference(peer->sock); + remove = ovpn_sock && ovpn_sock->sock->sk == sk; + rcu_read_unlock(); + } + + if (remove) + ovpn_peer_remove(peer, reason, &release_list); + } + unlock_ovpn(ovpn, &release_list); +} + +/** + * ovpn_peers_free - free all peers in the instance + * @ovpn: the instance whose peers should be released + * @sk: if not NULL, only peers using this socket are removed and the socket + * is released immediately + * @reason: the reason for releasing all peers + */ +void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sk, + enum ovpn_del_peer_reason reason) +{ + switch (ovpn->mode) { + case OVPN_MODE_P2P: + ovpn_peer_release_p2p(ovpn, sk, reason); + break; + case OVPN_MODE_MP: + ovpn_peers_release_mp(ovpn, sk, reason); + break; + } +} + +static time64_t ovpn_peer_keepalive_work_single(struct ovpn_peer *peer, + time64_t now, + struct llist_head *release_list) +{ + time64_t last_recv, last_sent, next_run1, next_run2; + unsigned long timeout, interval; + bool expired; + + spin_lock_bh(&peer->lock); + /* we expect both timers to be configured at the same time, + * therefore bail out if either is not set + */ + if (!peer->keepalive_timeout || !peer->keepalive_interval) { + spin_unlock_bh(&peer->lock); + return 0; + } + + /* check for peer timeout */ + expired = false; + timeout = peer->keepalive_timeout; + last_recv = READ_ONCE(peer->last_recv); + if (now < last_recv + timeout) { + peer->keepalive_recv_exp = last_recv + timeout; + next_run1 = peer->keepalive_recv_exp; + } else if (peer->keepalive_recv_exp > now) { + next_run1 = peer->keepalive_recv_exp; + } else { + expired = true; + } + + if (expired) { + /* peer is dead -> kill it and move on */ + spin_unlock_bh(&peer->lock); + netdev_dbg(peer->ovpn->dev, "peer %u expired\n", + peer->id); + ovpn_peer_remove(peer, OVPN_DEL_PEER_REASON_EXPIRED, + release_list); + return 0; + } + + /* check for peer keepalive */ + expired = false; + interval = peer->keepalive_interval; + last_sent = READ_ONCE(peer->last_sent); + if (now < last_sent + interval) { + peer->keepalive_xmit_exp = last_sent + interval; + next_run2 = peer->keepalive_xmit_exp; + } else if (peer->keepalive_xmit_exp > now) { + next_run2 = peer->keepalive_xmit_exp; + } else { + expired = true; + next_run2 = now + interval; + } + spin_unlock_bh(&peer->lock); + + if (expired) { + /* a keepalive packet is required */ + netdev_dbg(peer->ovpn->dev, + "sending keepalive to peer %u\n", + peer->id); + if (schedule_work(&peer->keepalive_work)) + ovpn_peer_hold(peer); + } + + if (next_run1 < next_run2) + return next_run1; + + return next_run2; +} + +static time64_t ovpn_peer_keepalive_work_mp(struct ovpn_priv *ovpn, + time64_t now, + struct llist_head *release_list) +{ + time64_t tmp_next_run, next_run = 0; + struct hlist_node *tmp; + struct ovpn_peer *peer; + int bkt; + + lockdep_assert_held(&ovpn->lock); + + hash_for_each_safe(ovpn->peers->by_id, bkt, tmp, peer, hash_entry_id) { + tmp_next_run = ovpn_peer_keepalive_work_single(peer, now, + release_list); + if (!tmp_next_run) + continue; + + /* the next worker run will be scheduled based on the shortest + * required interval across all peers + */ + if (!next_run || tmp_next_run < next_run) + next_run = tmp_next_run; + } + + return next_run; +} + +static time64_t ovpn_peer_keepalive_work_p2p(struct ovpn_priv *ovpn, + time64_t now, + struct llist_head *release_list) +{ + struct ovpn_peer *peer; + time64_t next_run = 0; + + lockdep_assert_held(&ovpn->lock); + + peer = rcu_dereference_protected(ovpn->peer, + lockdep_is_held(&ovpn->lock)); + if (peer) + next_run = ovpn_peer_keepalive_work_single(peer, now, + release_list); + + return next_run; +} + +/** + * ovpn_peer_keepalive_work - run keepalive logic on each known peer + * @work: pointer to the work member of the related ovpn object + * + * Each peer has two timers (if configured): + * 1. peer timeout: when no data is received for a certain interval, + * the peer is considered dead and it gets killed. + * 2. peer keepalive: when no data is sent to a certain peer for a + * certain interval, a special 'keepalive' packet is explicitly sent. + * + * This function iterates across the whole peer collection while + * checking the timers described above. + */ +void ovpn_peer_keepalive_work(struct work_struct *work) +{ + struct ovpn_priv *ovpn = container_of(work, struct ovpn_priv, + keepalive_work.work); + time64_t next_run = 0, now = ktime_get_real_seconds(); + LLIST_HEAD(release_list); + + spin_lock_bh(&ovpn->lock); + switch (ovpn->mode) { + case OVPN_MODE_MP: + next_run = ovpn_peer_keepalive_work_mp(ovpn, now, + &release_list); + break; + case OVPN_MODE_P2P: + next_run = ovpn_peer_keepalive_work_p2p(ovpn, now, + &release_list); + break; + } + + /* prevent rearming if the interface is being destroyed */ + if (next_run > 0 && ovpn->registered) { + netdev_dbg(ovpn->dev, + "scheduling keepalive work: now=%llu next_run=%llu delta=%llu\n", + next_run, now, next_run - now); + schedule_delayed_work(&ovpn->keepalive_work, + (next_run - now) * HZ); + } + unlock_ovpn(ovpn, &release_list); +} diff --git a/drivers/net/ovpn/peer.h b/drivers/net/ovpn/peer.h new file mode 100644 index 0000000000000..a1423f2b09e06 --- /dev/null +++ b/drivers/net/ovpn/peer.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#ifndef _NET_OVPN_OVPNPEER_H_ +#define _NET_OVPN_OVPNPEER_H_ + +#include +#include + +#include "crypto.h" +#include "socket.h" +#include "stats.h" + +/** + * struct ovpn_peer - the main remote peer object + * @ovpn: main openvpn instance this peer belongs to + * @dev_tracker: reference tracker for associated dev + * @id: unique identifier + * @vpn_addrs: IP addresses assigned over the tunnel + * @vpn_addrs.ipv4: IPv4 assigned to peer on the tunnel + * @vpn_addrs.ipv6: IPv6 assigned to peer on the tunnel + * @hash_entry_id: entry in the peer ID hashtable + * @hash_entry_addr4: entry in the peer IPv4 hashtable + * @hash_entry_addr6: entry in the peer IPv6 hashtable + * @hash_entry_transp_addr: entry in the peer transport address hashtable + * @sock: the socket being used to talk to this peer + * @tcp: keeps track of TCP specific state + * @tcp.strp: stream parser context (TCP only) + * @tcp.user_queue: received packets that have to go to userspace (TCP only) + * @tcp.out_queue: packets on hold while socket is taken by user (TCP only) + * @tcp.tx_in_progress: true if TX is already ongoing (TCP only) + * @tcp.out_msg.skb: packet scheduled for sending (TCP only) + * @tcp.out_msg.offset: offset where next send should start (TCP only) + * @tcp.out_msg.len: remaining data to send within packet (TCP only) + * @tcp.sk_cb.sk_data_ready: pointer to original cb (TCP only) + * @tcp.sk_cb.sk_write_space: pointer to original cb (TCP only) + * @tcp.sk_cb.prot: pointer to original prot object (TCP only) + * @tcp.sk_cb.ops: pointer to the original prot_ops object (TCP only) + * @crypto: the crypto configuration (ciphers, keys, etc..) + * @dst_cache: cache for dst_entry used to send to peer + * @bind: remote peer binding + * @keepalive_interval: seconds after which a new keepalive should be sent + * @keepalive_xmit_exp: future timestamp when next keepalive should be sent + * @last_sent: timestamp of the last successfully sent packet + * @keepalive_timeout: seconds after which an inactive peer is considered dead + * @keepalive_recv_exp: future timestamp when the peer should expire + * @last_recv: timestamp of the last authenticated received packet + * @vpn_stats: per-peer in-VPN TX/RX stats + * @link_stats: per-peer link/transport TX/RX stats + * @delete_reason: why peer was deleted (i.e. timeout, transport error, ..) + * @lock: protects binding to peer (bind) and keepalive* fields + * @refcount: reference counter + * @rcu: used to free peer in an RCU safe way + * @release_entry: entry for the socket release list + * @keepalive_work: used to schedule keepalive sending + */ +struct ovpn_peer { + struct ovpn_priv *ovpn; + netdevice_tracker dev_tracker; + u32 id; + struct { + struct in_addr ipv4; + struct in6_addr ipv6; + } vpn_addrs; + struct hlist_node hash_entry_id; + struct hlist_nulls_node hash_entry_addr4; + struct hlist_nulls_node hash_entry_addr6; + struct hlist_nulls_node hash_entry_transp_addr; + struct ovpn_socket __rcu *sock; + + struct { + struct strparser strp; + struct sk_buff_head user_queue; + struct sk_buff_head out_queue; + bool tx_in_progress; + + struct { + struct sk_buff *skb; + int offset; + int len; + } out_msg; + + struct { + void (*sk_data_ready)(struct sock *sk); + void (*sk_write_space)(struct sock *sk); + struct proto *prot; + const struct proto_ops *ops; + } sk_cb; + + struct work_struct defer_del_work; + } tcp; + struct ovpn_crypto_state crypto; + struct dst_cache dst_cache; + struct ovpn_bind __rcu *bind; + unsigned long keepalive_interval; + unsigned long keepalive_xmit_exp; + time64_t last_sent; + unsigned long keepalive_timeout; + unsigned long keepalive_recv_exp; + time64_t last_recv; + struct ovpn_peer_stats vpn_stats; + struct ovpn_peer_stats link_stats; + enum ovpn_del_peer_reason delete_reason; + spinlock_t lock; /* protects bind and keepalive* */ + struct kref refcount; + struct rcu_head rcu; + struct llist_node release_entry; + struct work_struct keepalive_work; +}; + +/** + * ovpn_peer_hold - increase reference counter + * @peer: the peer whose counter should be increased + * + * Return: true if the counter was increased or false if it was zero already + */ +static inline bool ovpn_peer_hold(struct ovpn_peer *peer) +{ + return kref_get_unless_zero(&peer->refcount); +} + +void ovpn_peer_release(struct ovpn_peer *peer); +void ovpn_peer_release_kref(struct kref *kref); + +/** + * ovpn_peer_put - decrease reference counter + * @peer: the peer whose counter should be decreased + */ +static inline void ovpn_peer_put(struct ovpn_peer *peer) +{ + kref_put(&peer->refcount, ovpn_peer_release_kref); +} + +struct ovpn_peer *ovpn_peer_new(struct ovpn_priv *ovpn, u32 id); +int ovpn_peer_add(struct ovpn_priv *ovpn, struct ovpn_peer *peer); +int ovpn_peer_del(struct ovpn_peer *peer, enum ovpn_del_peer_reason reason); +void ovpn_peers_free(struct ovpn_priv *ovpn, struct sock *sock, + enum ovpn_del_peer_reason reason); + +struct ovpn_peer *ovpn_peer_get_by_transp_addr(struct ovpn_priv *ovpn, + struct sk_buff *skb); +struct ovpn_peer *ovpn_peer_get_by_id(struct ovpn_priv *ovpn, u32 peer_id); +struct ovpn_peer *ovpn_peer_get_by_dst(struct ovpn_priv *ovpn, + struct sk_buff *skb); +void ovpn_peer_hash_vpn_ip(struct ovpn_peer *peer); +bool ovpn_peer_check_by_src(struct ovpn_priv *ovpn, struct sk_buff *skb, + struct ovpn_peer *peer); + +void ovpn_peer_keepalive_set(struct ovpn_peer *peer, u32 interval, u32 timeout); +void ovpn_peer_keepalive_work(struct work_struct *work); + +void ovpn_peer_endpoints_update(struct ovpn_peer *peer, struct sk_buff *skb); +int ovpn_peer_reset_sockaddr(struct ovpn_peer *peer, + const struct sockaddr_storage *ss, + const void *local_ip); + +#endif /* _NET_OVPN_OVPNPEER_H_ */ diff --git a/drivers/net/ovpn/pktid.c b/drivers/net/ovpn/pktid.c new file mode 100644 index 0000000000000..2e73d1c1ec8b6 --- /dev/null +++ b/drivers/net/ovpn/pktid.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + * James Yonan + */ + +#include +#include +#include +#include +#include + +#include "ovpnpriv.h" +#include "main.h" +#include "pktid.h" + +void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid) +{ + atomic64_set(&pid->seq_num, 1); +} + +void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr) +{ + memset(pr, 0, sizeof(*pr)); + spin_lock_init(&pr->lock); +} + +/* Packet replay detection. + * Allows ID backtrack of up to REPLAY_WINDOW_SIZE - 1. + */ +int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time) +{ + const unsigned long now = jiffies; + int ret; + + /* ID must not be zero */ + if (unlikely(pkt_id == 0)) + return -EINVAL; + + spin_lock_bh(&pr->lock); + + /* expire backtracks at or below pr->id after PKTID_RECV_EXPIRE time */ + if (unlikely(time_after_eq(now, pr->expire))) + pr->id_floor = pr->id; + + /* time changed? */ + if (unlikely(pkt_time != pr->time)) { + if (pkt_time > pr->time) { + /* time moved forward, accept */ + pr->base = 0; + pr->extent = 0; + pr->id = 0; + pr->time = pkt_time; + pr->id_floor = 0; + } else { + /* time moved backward, reject */ + ret = -ETIME; + goto out; + } + } + + if (likely(pkt_id == pr->id + 1)) { + /* well-formed ID sequence (incremented by 1) */ + pr->base = REPLAY_INDEX(pr->base, -1); + pr->history[pr->base / 8] |= (1 << (pr->base % 8)); + if (pr->extent < REPLAY_WINDOW_SIZE) + ++pr->extent; + pr->id = pkt_id; + } else if (pkt_id > pr->id) { + /* ID jumped forward by more than one */ + const unsigned int delta = pkt_id - pr->id; + + if (delta < REPLAY_WINDOW_SIZE) { + unsigned int i; + + pr->base = REPLAY_INDEX(pr->base, -delta); + pr->history[pr->base / 8] |= (1 << (pr->base % 8)); + pr->extent += delta; + if (pr->extent > REPLAY_WINDOW_SIZE) + pr->extent = REPLAY_WINDOW_SIZE; + for (i = 1; i < delta; ++i) { + unsigned int newb = REPLAY_INDEX(pr->base, i); + + pr->history[newb / 8] &= ~BIT(newb % 8); + } + } else { + pr->base = 0; + pr->extent = REPLAY_WINDOW_SIZE; + memset(pr->history, 0, sizeof(pr->history)); + pr->history[0] = 1; + } + pr->id = pkt_id; + } else { + /* ID backtrack */ + const unsigned int delta = pr->id - pkt_id; + + if (delta > pr->max_backtrack) + pr->max_backtrack = delta; + if (delta < pr->extent) { + if (pkt_id > pr->id_floor) { + const unsigned int ri = REPLAY_INDEX(pr->base, + delta); + u8 *p = &pr->history[ri / 8]; + const u8 mask = (1 << (ri % 8)); + + if (*p & mask) { + ret = -EINVAL; + goto out; + } + *p |= mask; + } else { + ret = -EINVAL; + goto out; + } + } else { + ret = -EINVAL; + goto out; + } + } + + pr->expire = now + PKTID_RECV_EXPIRE; + ret = 0; +out: + spin_unlock_bh(&pr->lock); + return ret; +} diff --git a/drivers/net/ovpn/pktid.h b/drivers/net/ovpn/pktid.h new file mode 100644 index 0000000000000..0e5fd91d48835 --- /dev/null +++ b/drivers/net/ovpn/pktid.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + * James Yonan + */ + +#ifndef _NET_OVPN_OVPNPKTID_H_ +#define _NET_OVPN_OVPNPKTID_H_ + +#include "proto.h" + +/* If no packets received for this length of time, set a backtrack floor + * at highest received packet ID thus far. + */ +#define PKTID_RECV_EXPIRE (30 * HZ) + +/* Packet-ID state for transmitter */ +struct ovpn_pktid_xmit { + atomic64_t seq_num; +}; + +/* replay window sizing in bytes = 2^REPLAY_WINDOW_ORDER */ +#define REPLAY_WINDOW_ORDER 8 + +#define REPLAY_WINDOW_BYTES BIT(REPLAY_WINDOW_ORDER) +#define REPLAY_WINDOW_SIZE (REPLAY_WINDOW_BYTES * 8) +#define REPLAY_INDEX(base, i) (((base) + (i)) & (REPLAY_WINDOW_SIZE - 1)) + +/* Packet-ID state for receiver. + * Other than lock member, can be zeroed to initialize. + */ +struct ovpn_pktid_recv { + /* "sliding window" bitmask of recent packet IDs received */ + u8 history[REPLAY_WINDOW_BYTES]; + /* bit position of deque base in history */ + unsigned int base; + /* extent (in bits) of deque in history */ + unsigned int extent; + /* expiration of history in jiffies */ + unsigned long expire; + /* highest sequence number received */ + u32 id; + /* highest time stamp received */ + u32 time; + /* we will only accept backtrack IDs > id_floor */ + u32 id_floor; + unsigned int max_backtrack; + /* protects entire pktd ID state */ + spinlock_t lock; +}; + +/* Get the next packet ID for xmit */ +static inline int ovpn_pktid_xmit_next(struct ovpn_pktid_xmit *pid, u32 *pktid) +{ + const s64 seq_num = atomic64_fetch_add_unless(&pid->seq_num, 1, + 0x100000000LL); + /* when the 32bit space is over, we return an error because the packet + * ID is used to create the cipher IV and we do not want to reuse the + * same value more than once + */ + if (unlikely(seq_num == 0x100000000LL)) + return -ERANGE; + + *pktid = (u32)seq_num; + + return 0; +} + +/* Write 12-byte AEAD IV to dest */ +static inline void ovpn_pktid_aead_write(const u32 pktid, + const u8 nt[], + unsigned char *dest) +{ + *(__force __be32 *)(dest) = htonl(pktid); + BUILD_BUG_ON(4 + OVPN_NONCE_TAIL_SIZE != OVPN_NONCE_SIZE); + memcpy(dest + 4, nt, OVPN_NONCE_TAIL_SIZE); +} + +void ovpn_pktid_xmit_init(struct ovpn_pktid_xmit *pid); +void ovpn_pktid_recv_init(struct ovpn_pktid_recv *pr); + +int ovpn_pktid_recv(struct ovpn_pktid_recv *pr, u32 pkt_id, u32 pkt_time); + +#endif /* _NET_OVPN_OVPNPKTID_H_ */ diff --git a/drivers/net/ovpn/proto.h b/drivers/net/ovpn/proto.h new file mode 100644 index 0000000000000..b7322c7c515e5 --- /dev/null +++ b/drivers/net/ovpn/proto.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + * James Yonan + */ + +#ifndef _NET_OVPN_PROTO_H_ +#define _NET_OVPN_PROTO_H_ + +#include "main.h" + +#include +#include + +/* When the OpenVPN protocol is ran in AEAD mode, use + * the OpenVPN packet ID as the AEAD nonce: + * + * 00000005 521c3b01 4308c041 + * [seq # ] [ nonce_tail ] + * [ 12-byte full IV ] -> OVPN_NONCE_SIZE + * [4-bytes -> OVPN_NONCE_WIRE_SIZE + * on wire] + */ + +/* nonce size (96bits) as required by AEAD ciphers */ +#define OVPN_NONCE_SIZE 12 +/* last 8 bytes of AEAD nonce: provided by userspace and usually derived + * from key material generated during TLS handshake + */ +#define OVPN_NONCE_TAIL_SIZE 8 + +/* OpenVPN nonce size reduced by 8-byte nonce tail -- this is the + * size of the AEAD Associated Data (AD) sent over the wire + * and is normally the head of the IV + */ +#define OVPN_NONCE_WIRE_SIZE (OVPN_NONCE_SIZE - OVPN_NONCE_TAIL_SIZE) + +#define OVPN_OPCODE_SIZE 4 /* DATA_V2 opcode size */ +#define OVPN_OPCODE_KEYID_MASK 0x07000000 +#define OVPN_OPCODE_PKTTYPE_MASK 0xF8000000 +#define OVPN_OPCODE_PEERID_MASK 0x00FFFFFF + +/* packet opcodes of interest to us */ +#define OVPN_DATA_V1 6 /* data channel v1 packet */ +#define OVPN_DATA_V2 9 /* data channel v2 packet */ + +#define OVPN_PEER_ID_UNDEF 0x00FFFFFF + +/** + * ovpn_opcode_from_skb - extract OP code from skb at specified offset + * @skb: the packet to extract the OP code from + * @offset: the offset in the data buffer where the OP code is located + * + * Note: this function assumes that the skb head was pulled enough + * to access the first 4 bytes. + * + * Return: the OP code + */ +static inline u8 ovpn_opcode_from_skb(const struct sk_buff *skb, u16 offset) +{ + u32 opcode = be32_to_cpu(*(__be32 *)(skb->data + offset)); + + return FIELD_GET(OVPN_OPCODE_PKTTYPE_MASK, opcode); +} + +/** + * ovpn_peer_id_from_skb - extract peer ID from skb at specified offset + * @skb: the packet to extract the OP code from + * @offset: the offset in the data buffer where the OP code is located + * + * Note: this function assumes that the skb head was pulled enough + * to access the first 4 bytes. + * + * Return: the peer ID + */ +static inline u32 ovpn_peer_id_from_skb(const struct sk_buff *skb, u16 offset) +{ + u32 opcode = be32_to_cpu(*(__be32 *)(skb->data + offset)); + + return FIELD_GET(OVPN_OPCODE_PEERID_MASK, opcode); +} + +/** + * ovpn_key_id_from_skb - extract key ID from the skb head + * @skb: the packet to extract the key ID code from + * + * Note: this function assumes that the skb head was pulled enough + * to access the first byte. + * + * Return: the key ID + */ +static inline u8 ovpn_key_id_from_skb(const struct sk_buff *skb) +{ + u32 opcode = be32_to_cpu(*(__be32 *)skb->data); + + return FIELD_GET(OVPN_OPCODE_KEYID_MASK, opcode); +} + +/** + * ovpn_opcode_compose - combine OP code, key ID and peer ID to wire format + * @opcode: the OP code + * @key_id: the key ID + * @peer_id: the peer ID + * + * Return: a 4 bytes integer obtained combining all input values following the + * OpenVPN wire format. This integer can then be written to the packet header. + */ +static inline u32 ovpn_opcode_compose(u8 opcode, u8 key_id, u32 peer_id) +{ + return FIELD_PREP(OVPN_OPCODE_PKTTYPE_MASK, opcode) | + FIELD_PREP(OVPN_OPCODE_KEYID_MASK, key_id) | + FIELD_PREP(OVPN_OPCODE_PEERID_MASK, peer_id); +} + +#endif /* _NET_OVPN_OVPNPROTO_H_ */ diff --git a/drivers/net/ovpn/skb.h b/drivers/net/ovpn/skb.h new file mode 100644 index 0000000000000..64430880f1dae --- /dev/null +++ b/drivers/net/ovpn/skb.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + * James Yonan + */ + +#ifndef _NET_OVPN_SKB_H_ +#define _NET_OVPN_SKB_H_ + +#include +#include +#include +#include +#include +#include +#include + +struct ovpn_cb { + struct ovpn_peer *peer; + struct ovpn_crypto_key_slot *ks; + struct aead_request *req; + struct scatterlist *sg; + u8 *iv; + unsigned int payload_offset; + bool nosignal; +}; + +static inline struct ovpn_cb *ovpn_skb_cb(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct ovpn_cb) > sizeof(skb->cb)); + return (struct ovpn_cb *)skb->cb; +} + +/* Return IP protocol version from skb header. + * Return 0 if protocol is not IPv4/IPv6 or cannot be read. + */ +static inline __be16 ovpn_ip_check_protocol(struct sk_buff *skb) +{ + __be16 proto = 0; + + /* skb could be non-linear, + * make sure IP header is in non-fragmented part + */ + if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + return 0; + + if (ip_hdr(skb)->version == 4) { + proto = htons(ETH_P_IP); + } else if (ip_hdr(skb)->version == 6) { + if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) + return 0; + proto = htons(ETH_P_IPV6); + } + + return proto; +} + +#endif /* _NET_OVPN_SKB_H_ */ diff --git a/drivers/net/ovpn/socket.c b/drivers/net/ovpn/socket.c new file mode 100644 index 0000000000000..6ecf40857651a --- /dev/null +++ b/drivers/net/ovpn/socket.c @@ -0,0 +1,244 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#include +#include +#include + +#include "ovpnpriv.h" +#include "main.h" +#include "io.h" +#include "peer.h" +#include "socket.h" +#include "tcp.h" +#include "udp.h" + +static void ovpn_socket_release_kref(struct kref *kref) +{ + struct ovpn_socket *sock = container_of(kref, struct ovpn_socket, + refcount); + + if (sock->sock->sk->sk_protocol == IPPROTO_UDP) + ovpn_udp_socket_detach(sock); + else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) + ovpn_tcp_socket_detach(sock); +} + +/** + * ovpn_socket_put - decrease reference counter + * @peer: peer whose socket reference counter should be decreased + * @sock: the RCU protected peer socket + * + * This function is only used internally. Users willing to release + * references to the ovpn_socket should use ovpn_socket_release() + * + * Return: true if the socket was released, false otherwise + */ +static bool ovpn_socket_put(struct ovpn_peer *peer, struct ovpn_socket *sock) +{ + return kref_put(&sock->refcount, ovpn_socket_release_kref); +} + +/** + * ovpn_socket_release - release resources owned by socket user + * @peer: peer whose socket should be released + * + * This function should be invoked when the peer is being removed + * and wants to drop its link to the socket. + * + * In case of UDP, the detach routine will drop a reference to the + * ovpn netdev, pointed by the ovpn_socket. + * + * In case of TCP, releasing the socket will cause dropping + * the refcounter for the peer it is linked to, thus allowing the peer + * disappear as well. + * + * This function is expected to be invoked exactly once per peer + * + * NOTE: this function may sleep + */ +void ovpn_socket_release(struct ovpn_peer *peer) +{ + struct ovpn_socket *sock; + bool released; + + might_sleep(); + + /* release may be invoked after socket was detached */ + rcu_read_lock(); + sock = rcu_dereference_protected(peer->sock, true); + if (!sock) { + rcu_read_unlock(); + return; + } + rcu_assign_pointer(peer->sock, NULL); + rcu_read_unlock(); + + /* sanity check: we should not end up here if the socket + * was already closed + */ + if (!sock->sock->sk) { + DEBUG_NET_WARN_ON_ONCE(1); + return; + } + + /* Drop the reference while holding the sock lock to avoid + * concurrent ovpn_socket_new call to mess up with a partially + * detached socket. + * + * Holding the lock ensures that a socket with refcnt 0 is fully + * detached before it can be picked by a concurrent reader. + */ + lock_sock(sock->sock->sk); + released = ovpn_socket_put(peer, sock); + release_sock(sock->sock->sk); + + /* align all readers with sk_user_data being NULL */ + synchronize_rcu(); + + /* following cleanup should happen with lock released */ + if (released) { + if (sock->sock->sk->sk_protocol == IPPROTO_UDP) { + netdev_put(sock->ovpn->dev, &sock->dev_tracker); + } else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) { + /* wait for TCP jobs to terminate */ + ovpn_tcp_socket_wait_finish(sock); + ovpn_peer_put(sock->peer); + } + /* we can call plain kfree() because we already waited one RCU + * period due to synchronize_rcu() + */ + kfree(sock); + } +} + +static bool ovpn_socket_hold(struct ovpn_socket *sock) +{ + return kref_get_unless_zero(&sock->refcount); +} + +static int ovpn_socket_attach(struct ovpn_socket *sock, struct ovpn_peer *peer) +{ + if (sock->sock->sk->sk_protocol == IPPROTO_UDP) + return ovpn_udp_socket_attach(sock, peer->ovpn); + else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) + return ovpn_tcp_socket_attach(sock, peer); + + return -EOPNOTSUPP; +} + +/** + * ovpn_socket_new - create a new socket and initialize it + * @sock: the kernel socket to embed + * @peer: the peer reachable via this socket + * + * Return: an openvpn socket on success or a negative error code otherwise + */ +struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer) +{ + struct ovpn_socket *ovpn_sock; + int ret; + + lock_sock(sock->sk); + + /* a TCP socket can only be owned by a single peer, therefore there + * can't be any other user + */ + if (sock->sk->sk_protocol == IPPROTO_TCP && sock->sk->sk_user_data) { + ovpn_sock = ERR_PTR(-EBUSY); + goto sock_release; + } + + /* a UDP socket can be shared across multiple peers, but we must make + * sure it is not owned by something else + */ + if (sock->sk->sk_protocol == IPPROTO_UDP) { + u8 type = READ_ONCE(udp_sk(sock->sk)->encap_type); + + /* socket owned by other encapsulation module */ + if (type && type != UDP_ENCAP_OVPNINUDP) { + ovpn_sock = ERR_PTR(-EBUSY); + goto sock_release; + } + + rcu_read_lock(); + ovpn_sock = rcu_dereference_sk_user_data(sock->sk); + if (ovpn_sock) { + /* socket owned by another ovpn instance, we can't use it */ + if (ovpn_sock->ovpn != peer->ovpn) { + ovpn_sock = ERR_PTR(-EBUSY); + rcu_read_unlock(); + goto sock_release; + } + + /* this socket is already owned by this instance, + * therefore we can increase the refcounter and + * use it as expected + */ + if (WARN_ON(!ovpn_socket_hold(ovpn_sock))) { + /* this should never happen because setting + * the refcnt to 0 and detaching the socket + * is expected to be atomic + */ + ovpn_sock = ERR_PTR(-EAGAIN); + rcu_read_unlock(); + goto sock_release; + } + + /* caller is expected to increase the sock + * refcounter before passing it to this + * function. For this reason we drop it if + * not needed, like when this socket is already + * owned. + */ + rcu_read_unlock(); + goto sock_release; + } + rcu_read_unlock(); + } + + /* socket is not owned: attach to this ovpn instance */ + + ovpn_sock = kzalloc(sizeof(*ovpn_sock), GFP_KERNEL); + if (!ovpn_sock) { + ovpn_sock = ERR_PTR(-ENOMEM); + goto sock_release; + } + + kref_init(&ovpn_sock->refcount); + ovpn_sock->sock = sock; + + ret = ovpn_socket_attach(ovpn_sock, peer); + if (ret < 0) { + kfree(ovpn_sock); + ovpn_sock = ERR_PTR(ret); + goto sock_release; + } + + /* TCP sockets are per-peer, therefore they are linked to their unique + * peer + */ + if (sock->sk->sk_protocol == IPPROTO_TCP) { + INIT_WORK(&ovpn_sock->tcp_tx_work, ovpn_tcp_tx_work); + ovpn_sock->peer = peer; + ovpn_peer_hold(peer); + } else if (sock->sk->sk_protocol == IPPROTO_UDP) { + /* in UDP we only link the ovpn instance since the socket is + * shared among multiple peers + */ + ovpn_sock->ovpn = peer->ovpn; + netdev_hold(peer->ovpn->dev, &ovpn_sock->dev_tracker, + GFP_KERNEL); + } + + rcu_assign_sk_user_data(sock->sk, ovpn_sock); +sock_release: + release_sock(sock->sk); + return ovpn_sock; +} diff --git a/drivers/net/ovpn/socket.h b/drivers/net/ovpn/socket.h new file mode 100644 index 0000000000000..00d856b1a5d88 --- /dev/null +++ b/drivers/net/ovpn/socket.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#ifndef _NET_OVPN_SOCK_H_ +#define _NET_OVPN_SOCK_H_ + +#include +#include +#include + +struct ovpn_priv; +struct ovpn_peer; + +/** + * struct ovpn_socket - a kernel socket referenced in the ovpn code + * @ovpn: ovpn instance owning this socket (UDP only) + * @dev_tracker: reference tracker for associated dev (UDP only) + * @peer: unique peer transmitting over this socket (TCP only) + * @sock: the low level sock object + * @refcount: amount of contexts currently referencing this object + * @work: member used to schedule release routine (it may block) + * @tcp_tx_work: work for deferring outgoing packet processing (TCP only) + */ +struct ovpn_socket { + union { + struct { + struct ovpn_priv *ovpn; + netdevice_tracker dev_tracker; + }; + struct ovpn_peer *peer; + }; + + struct socket *sock; + struct kref refcount; + struct work_struct work; + struct work_struct tcp_tx_work; +}; + +struct ovpn_socket *ovpn_socket_new(struct socket *sock, + struct ovpn_peer *peer); +void ovpn_socket_release(struct ovpn_peer *peer); + +#endif /* _NET_OVPN_SOCK_H_ */ diff --git a/drivers/net/ovpn/stats.c b/drivers/net/ovpn/stats.c new file mode 100644 index 0000000000000..d637143473bb9 --- /dev/null +++ b/drivers/net/ovpn/stats.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + */ + +#include + +#include "stats.h" + +void ovpn_peer_stats_init(struct ovpn_peer_stats *ps) +{ + atomic64_set(&ps->rx.bytes, 0); + atomic64_set(&ps->rx.packets, 0); + + atomic64_set(&ps->tx.bytes, 0); + atomic64_set(&ps->tx.packets, 0); +} diff --git a/drivers/net/ovpn/stats.h b/drivers/net/ovpn/stats.h new file mode 100644 index 0000000000000..53433d8b6c331 --- /dev/null +++ b/drivers/net/ovpn/stats.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: James Yonan + * Antonio Quartulli + * Lev Stipakov + */ + +#ifndef _NET_OVPN_OVPNSTATS_H_ +#define _NET_OVPN_OVPNSTATS_H_ + +/* one stat */ +struct ovpn_peer_stat { + atomic64_t bytes; + atomic64_t packets; +}; + +/* rx and tx stats combined */ +struct ovpn_peer_stats { + struct ovpn_peer_stat rx; + struct ovpn_peer_stat tx; +}; + +void ovpn_peer_stats_init(struct ovpn_peer_stats *ps); + +static inline void ovpn_peer_stats_increment(struct ovpn_peer_stat *stat, + const unsigned int n) +{ + atomic64_add(n, &stat->bytes); + atomic64_inc(&stat->packets); +} + +static inline void ovpn_peer_stats_increment_rx(struct ovpn_peer_stats *stats, + const unsigned int n) +{ + ovpn_peer_stats_increment(&stats->rx, n); +} + +static inline void ovpn_peer_stats_increment_tx(struct ovpn_peer_stats *stats, + const unsigned int n) +{ + ovpn_peer_stats_increment(&stats->tx, n); +} + +#endif /* _NET_OVPN_OVPNSTATS_H_ */ diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c new file mode 100644 index 0000000000000..e05c9ba0e0f5f --- /dev/null +++ b/drivers/net/ovpn/tcp.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2019-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ovpnpriv.h" +#include "main.h" +#include "io.h" +#include "peer.h" +#include "proto.h" +#include "skb.h" +#include "tcp.h" + +#define OVPN_TCP_DEPTH_NESTING 2 +#if OVPN_TCP_DEPTH_NESTING == SINGLE_DEPTH_NESTING +#error "OVPN TCP requires its own lockdep subclass" +#endif + +static struct proto ovpn_tcp_prot __ro_after_init; +static struct proto_ops ovpn_tcp_ops __ro_after_init; +static struct proto ovpn_tcp6_prot __ro_after_init; +static struct proto_ops ovpn_tcp6_ops __ro_after_init; + +static int ovpn_tcp_parse(struct strparser *strp, struct sk_buff *skb) +{ + struct strp_msg *rxm = strp_msg(skb); + __be16 blen; + u16 len; + int err; + + /* when packets are written to the TCP stream, they are prepended with + * two bytes indicating the actual packet size. + * Parse accordingly and return the actual size (including the size + * header) + */ + + if (skb->len < rxm->offset + 2) + return 0; + + err = skb_copy_bits(skb, rxm->offset, &blen, sizeof(blen)); + if (err < 0) + return err; + + len = be16_to_cpu(blen); + if (len < 2) + return -EINVAL; + + return len + 2; +} + +/* queue skb for sending to userspace via recvmsg on the socket */ +static void ovpn_tcp_to_userspace(struct ovpn_peer *peer, struct sock *sk, + struct sk_buff *skb) +{ + skb_set_owner_r(skb, sk); + memset(skb->cb, 0, sizeof(skb->cb)); + skb_queue_tail(&peer->tcp.user_queue, skb); + peer->tcp.sk_cb.sk_data_ready(sk); +} + +static void ovpn_tcp_rcv(struct strparser *strp, struct sk_buff *skb) +{ + struct ovpn_peer *peer = container_of(strp, struct ovpn_peer, tcp.strp); + struct strp_msg *msg = strp_msg(skb); + size_t pkt_len = msg->full_len - 2; + size_t off = msg->offset + 2; + u8 opcode; + + /* ensure skb->data points to the beginning of the openvpn packet */ + if (!pskb_pull(skb, off)) { + net_warn_ratelimited("%s: packet too small for peer %u\n", + netdev_name(peer->ovpn->dev), peer->id); + goto err; + } + + /* strparser does not trim the skb for us, therefore we do it now */ + if (pskb_trim(skb, pkt_len) != 0) { + net_warn_ratelimited("%s: trimming skb failed for peer %u\n", + netdev_name(peer->ovpn->dev), peer->id); + goto err; + } + + /* we need the first byte of data to be accessible + * to extract the opcode and the key ID later on + */ + if (!pskb_may_pull(skb, OVPN_OPCODE_SIZE)) { + net_warn_ratelimited("%s: packet too small to fetch opcode for peer %u\n", + netdev_name(peer->ovpn->dev), peer->id); + goto err; + } + + /* DATA_V2 packets are handled in kernel, the rest goes to user space */ + opcode = ovpn_opcode_from_skb(skb, 0); + if (unlikely(opcode != OVPN_DATA_V2)) { + if (opcode == OVPN_DATA_V1) { + net_warn_ratelimited("%s: DATA_V1 detected on the TCP stream\n", + netdev_name(peer->ovpn->dev)); + goto err; + } + + /* The packet size header must be there when sending the packet + * to userspace, therefore we put it back + */ + skb_push(skb, 2); + ovpn_tcp_to_userspace(peer, strp->sk, skb); + return; + } + + /* hold reference to peer as required by ovpn_recv(). + * + * NOTE: in this context we should already be holding a reference to + * this peer, therefore ovpn_peer_hold() is not expected to fail + */ + if (WARN_ON(!ovpn_peer_hold(peer))) + goto err; + + ovpn_recv(peer, skb); + return; +err: + dev_core_stats_rx_dropped_inc(peer->ovpn->dev); + kfree_skb(skb); + ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR); +} + +static int ovpn_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int flags, int *addr_len) +{ + int err = 0, off, copied = 0, ret; + struct ovpn_socket *sock; + struct ovpn_peer *peer; + struct sk_buff *skb; + + rcu_read_lock(); + sock = rcu_dereference_sk_user_data(sk); + if (unlikely(!sock || !sock->peer || !ovpn_peer_hold(sock->peer))) { + rcu_read_unlock(); + return -EBADF; + } + peer = sock->peer; + rcu_read_unlock(); + + skb = __skb_recv_datagram(sk, &peer->tcp.user_queue, flags, &off, &err); + if (!skb) { + if (err == -EAGAIN && sk->sk_shutdown & RCV_SHUTDOWN) { + ret = 0; + goto out; + } + ret = err; + goto out; + } + + copied = len; + if (copied > skb->len) + copied = skb->len; + else if (copied < skb->len) + msg->msg_flags |= MSG_TRUNC; + + err = skb_copy_datagram_msg(skb, 0, msg, copied); + if (unlikely(err)) { + kfree_skb(skb); + ret = err; + goto out; + } + + if (flags & MSG_TRUNC) + copied = skb->len; + kfree_skb(skb); + ret = copied; +out: + ovpn_peer_put(peer); + return ret; +} + +void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock) +{ + struct ovpn_peer *peer = ovpn_sock->peer; + struct socket *sock = ovpn_sock->sock; + + strp_stop(&peer->tcp.strp); + skb_queue_purge(&peer->tcp.user_queue); + + /* restore CBs that were saved in ovpn_sock_set_tcp_cb() */ + sock->sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready; + sock->sk->sk_write_space = peer->tcp.sk_cb.sk_write_space; + sock->sk->sk_prot = peer->tcp.sk_cb.prot; + sock->sk->sk_socket->ops = peer->tcp.sk_cb.ops; + + rcu_assign_sk_user_data(sock->sk, NULL); +} + +void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock) +{ + struct ovpn_peer *peer = sock->peer; + + /* NOTE: we don't wait for peer->tcp.defer_del_work to finish: + * either the worker is not running or this function + * was invoked by that worker. + */ + + cancel_work_sync(&sock->tcp_tx_work); + strp_done(&peer->tcp.strp); + + skb_queue_purge(&peer->tcp.out_queue); + kfree_skb(peer->tcp.out_msg.skb); + peer->tcp.out_msg.skb = NULL; +} + +static void ovpn_tcp_send_sock(struct ovpn_peer *peer, struct sock *sk) +{ + struct sk_buff *skb = peer->tcp.out_msg.skb; + int ret, flags; + + if (!skb) + return; + + if (peer->tcp.tx_in_progress) + return; + + peer->tcp.tx_in_progress = true; + + do { + flags = ovpn_skb_cb(skb)->nosignal ? MSG_NOSIGNAL : 0; + ret = skb_send_sock_locked_with_flags(sk, skb, + peer->tcp.out_msg.offset, + peer->tcp.out_msg.len, + flags); + if (unlikely(ret < 0)) { + if (ret == -EAGAIN) + goto out; + + net_warn_ratelimited("%s: TCP error to peer %u: %d\n", + netdev_name(peer->ovpn->dev), + peer->id, ret); + + /* in case of TCP error we can't recover the VPN + * stream therefore we abort the connection + */ + ovpn_peer_hold(peer); + schedule_work(&peer->tcp.defer_del_work); + + /* we bail out immediately and keep tx_in_progress set + * to true. This way we prevent more TX attempts + * which would lead to more invocations of + * schedule_work() + */ + return; + } + + peer->tcp.out_msg.len -= ret; + peer->tcp.out_msg.offset += ret; + } while (peer->tcp.out_msg.len > 0); + + if (!peer->tcp.out_msg.len) { + preempt_disable(); + dev_sw_netstats_tx_add(peer->ovpn->dev, 1, skb->len); + preempt_enable(); + } + + kfree_skb(peer->tcp.out_msg.skb); + peer->tcp.out_msg.skb = NULL; + peer->tcp.out_msg.len = 0; + peer->tcp.out_msg.offset = 0; + +out: + peer->tcp.tx_in_progress = false; +} + +void ovpn_tcp_tx_work(struct work_struct *work) +{ + struct ovpn_socket *sock; + + sock = container_of(work, struct ovpn_socket, tcp_tx_work); + + lock_sock(sock->sock->sk); + if (sock->peer) + ovpn_tcp_send_sock(sock->peer, sock->sock->sk); + release_sock(sock->sock->sk); +} + +static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk, + struct sk_buff *skb) +{ + if (peer->tcp.out_msg.skb) + ovpn_tcp_send_sock(peer, sk); + + if (peer->tcp.out_msg.skb) { + dev_core_stats_tx_dropped_inc(peer->ovpn->dev); + kfree_skb(skb); + return; + } + + peer->tcp.out_msg.skb = skb; + peer->tcp.out_msg.len = skb->len; + peer->tcp.out_msg.offset = 0; + ovpn_tcp_send_sock(peer, sk); +} + +void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock, + struct sk_buff *skb) +{ + u16 len = skb->len; + + *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len); + + spin_lock_nested(&sock->sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING); + if (sock_owned_by_user(sock->sk)) { + if (skb_queue_len(&peer->tcp.out_queue) >= + READ_ONCE(net_hotdata.max_backlog)) { + dev_core_stats_tx_dropped_inc(peer->ovpn->dev); + kfree_skb(skb); + goto unlock; + } + __skb_queue_tail(&peer->tcp.out_queue, skb); + } else { + ovpn_tcp_send_sock_skb(peer, sock->sk, skb); + } +unlock: + spin_unlock(&sock->sk->sk_lock.slock); +} + +static void ovpn_tcp_release(struct sock *sk) +{ + struct sk_buff_head queue; + struct ovpn_socket *sock; + struct ovpn_peer *peer; + struct sk_buff *skb; + + rcu_read_lock(); + sock = rcu_dereference_sk_user_data(sk); + if (!sock) { + rcu_read_unlock(); + return; + } + + peer = sock->peer; + + /* during initialization this function is called before + * assigning sock->peer + */ + if (unlikely(!peer || !ovpn_peer_hold(peer))) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + __skb_queue_head_init(&queue); + skb_queue_splice_init(&peer->tcp.out_queue, &queue); + + while ((skb = __skb_dequeue(&queue))) + ovpn_tcp_send_sock_skb(peer, sk, skb); + + peer->tcp.sk_cb.prot->release_cb(sk); + ovpn_peer_put(peer); +} + +static int ovpn_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +{ + struct ovpn_socket *sock; + int ret, linear = PAGE_SIZE; + struct ovpn_peer *peer; + struct sk_buff *skb; + + lock_sock(sk); + rcu_read_lock(); + sock = rcu_dereference_sk_user_data(sk); + if (unlikely(!sock || !sock->peer || !ovpn_peer_hold(sock->peer))) { + rcu_read_unlock(); + release_sock(sk); + return -EIO; + } + rcu_read_unlock(); + peer = sock->peer; + + if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL)) { + ret = -EOPNOTSUPP; + goto peer_free; + } + + if (peer->tcp.out_msg.skb) { + ret = -EAGAIN; + goto peer_free; + } + + if (size < linear) + linear = size; + + skb = sock_alloc_send_pskb(sk, linear, size - linear, + msg->msg_flags & MSG_DONTWAIT, &ret, 0); + if (!skb) { + net_err_ratelimited("%s: skb alloc failed: %d\n", + netdev_name(peer->ovpn->dev), ret); + goto peer_free; + } + + skb_put(skb, linear); + skb->len = size; + skb->data_len = size - linear; + + ret = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); + if (ret) { + kfree_skb(skb); + net_err_ratelimited("%s: skb copy from iter failed: %d\n", + netdev_name(peer->ovpn->dev), ret); + goto peer_free; + } + + ovpn_skb_cb(skb)->nosignal = msg->msg_flags & MSG_NOSIGNAL; + ovpn_tcp_send_sock_skb(peer, sk, skb); + ret = size; +peer_free: + release_sock(sk); + ovpn_peer_put(peer); + return ret; +} + +static void ovpn_tcp_data_ready(struct sock *sk) +{ + struct ovpn_socket *sock; + + trace_sk_data_ready(sk); + + rcu_read_lock(); + sock = rcu_dereference_sk_user_data(sk); + if (likely(sock && sock->peer)) + strp_data_ready(&sock->peer->tcp.strp); + rcu_read_unlock(); +} + +static void ovpn_tcp_write_space(struct sock *sk) +{ + struct ovpn_socket *sock; + + rcu_read_lock(); + sock = rcu_dereference_sk_user_data(sk); + if (likely(sock && sock->peer)) { + schedule_work(&sock->tcp_tx_work); + sock->peer->tcp.sk_cb.sk_write_space(sk); + } + rcu_read_unlock(); +} + +static void ovpn_tcp_build_protos(struct proto *new_prot, + struct proto_ops *new_ops, + const struct proto *orig_prot, + const struct proto_ops *orig_ops); + +static void ovpn_tcp_peer_del_work(struct work_struct *work) +{ + struct ovpn_peer *peer = container_of(work, struct ovpn_peer, + tcp.defer_del_work); + + ovpn_peer_del(peer, OVPN_DEL_PEER_REASON_TRANSPORT_ERROR); + ovpn_peer_put(peer); +} + +/* Set TCP encapsulation callbacks */ +int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock, + struct ovpn_peer *peer) +{ + struct socket *sock = ovpn_sock->sock; + struct strp_callbacks cb = { + .rcv_msg = ovpn_tcp_rcv, + .parse_msg = ovpn_tcp_parse, + }; + int ret; + + /* make sure no pre-existing encapsulation handler exists */ + if (sock->sk->sk_user_data) + return -EBUSY; + + /* only a fully connected socket is expected. Connection should be + * handled in userspace + */ + if (sock->sk->sk_state != TCP_ESTABLISHED) { + net_err_ratelimited("%s: provided TCP socket is not in ESTABLISHED state: %d\n", + netdev_name(peer->ovpn->dev), + sock->sk->sk_state); + return -EINVAL; + } + + ret = strp_init(&peer->tcp.strp, sock->sk, &cb); + if (ret < 0) { + DEBUG_NET_WARN_ON_ONCE(1); + return ret; + } + + INIT_WORK(&peer->tcp.defer_del_work, ovpn_tcp_peer_del_work); + + __sk_dst_reset(sock->sk); + skb_queue_head_init(&peer->tcp.user_queue); + skb_queue_head_init(&peer->tcp.out_queue); + + /* save current CBs so that they can be restored upon socket release */ + peer->tcp.sk_cb.sk_data_ready = sock->sk->sk_data_ready; + peer->tcp.sk_cb.sk_write_space = sock->sk->sk_write_space; + peer->tcp.sk_cb.prot = sock->sk->sk_prot; + peer->tcp.sk_cb.ops = sock->sk->sk_socket->ops; + + /* assign our static CBs and prot/ops */ + sock->sk->sk_data_ready = ovpn_tcp_data_ready; + sock->sk->sk_write_space = ovpn_tcp_write_space; + + if (sock->sk->sk_family == AF_INET) { + sock->sk->sk_prot = &ovpn_tcp_prot; + sock->sk->sk_socket->ops = &ovpn_tcp_ops; + } else { + sock->sk->sk_prot = &ovpn_tcp6_prot; + sock->sk->sk_socket->ops = &ovpn_tcp6_ops; + } + + /* avoid using task_frag */ + sock->sk->sk_allocation = GFP_ATOMIC; + sock->sk->sk_use_task_frag = false; + + /* enqueue the RX worker */ + strp_check_rcv(&peer->tcp.strp); + + return 0; +} + +static void ovpn_tcp_close(struct sock *sk, long timeout) +{ + struct ovpn_socket *sock; + struct ovpn_peer *peer; + + rcu_read_lock(); + sock = rcu_dereference_sk_user_data(sk); + if (!sock || !sock->peer || !ovpn_peer_hold(sock->peer)) { + rcu_read_unlock(); + return; + } + peer = sock->peer; + rcu_read_unlock(); + + ovpn_peer_del(sock->peer, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT); + peer->tcp.sk_cb.prot->close(sk, timeout); + ovpn_peer_put(peer); +} + +static __poll_t ovpn_tcp_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + __poll_t mask = datagram_poll(file, sock, wait); + struct ovpn_socket *ovpn_sock; + + rcu_read_lock(); + ovpn_sock = rcu_dereference_sk_user_data(sock->sk); + if (ovpn_sock && ovpn_sock->peer && + !skb_queue_empty(&ovpn_sock->peer->tcp.user_queue)) + mask |= EPOLLIN | EPOLLRDNORM; + rcu_read_unlock(); + + return mask; +} + +static void ovpn_tcp_build_protos(struct proto *new_prot, + struct proto_ops *new_ops, + const struct proto *orig_prot, + const struct proto_ops *orig_ops) +{ + memcpy(new_prot, orig_prot, sizeof(*new_prot)); + memcpy(new_ops, orig_ops, sizeof(*new_ops)); + new_prot->recvmsg = ovpn_tcp_recvmsg; + new_prot->sendmsg = ovpn_tcp_sendmsg; + new_prot->close = ovpn_tcp_close; + new_prot->release_cb = ovpn_tcp_release; + new_ops->poll = ovpn_tcp_poll; +} + +/* Initialize TCP static objects */ +void __init ovpn_tcp_init(void) +{ + ovpn_tcp_build_protos(&ovpn_tcp_prot, &ovpn_tcp_ops, &tcp_prot, + &inet_stream_ops); + +#if IS_ENABLED(CONFIG_IPV6) + ovpn_tcp_build_protos(&ovpn_tcp6_prot, &ovpn_tcp6_ops, &tcpv6_prot, + &inet6_stream_ops); +#endif +} diff --git a/drivers/net/ovpn/tcp.h b/drivers/net/ovpn/tcp.h new file mode 100644 index 0000000000000..10aefa834cf35 --- /dev/null +++ b/drivers/net/ovpn/tcp.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2019-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + */ + +#ifndef _NET_OVPN_TCP_H_ +#define _NET_OVPN_TCP_H_ + +#include +#include +#include + +#include "peer.h" +#include "skb.h" +#include "socket.h" + +void __init ovpn_tcp_init(void); + +int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock, + struct ovpn_peer *peer); +void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock); +void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock); + +/* Prepare skb and enqueue it for sending to peer. + * + * Preparation consist in prepending the skb payload with its size. + * Required by the OpenVPN protocol in order to extract packets from + * the TCP stream on the receiver side. + */ +void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock, struct sk_buff *skb); +void ovpn_tcp_tx_work(struct work_struct *work); + +#endif /* _NET_OVPN_TCP_H_ */ diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c new file mode 100644 index 0000000000000..3367feb5cf725 --- /dev/null +++ b/drivers/net/ovpn/udp.c @@ -0,0 +1,442 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel offload + * + * Copyright (C) 2019-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ovpnpriv.h" +#include "main.h" +#include "bind.h" +#include "io.h" +#include "peer.h" +#include "proto.h" +#include "socket.h" +#include "udp.h" + +/* Retrieve the corresponding ovpn object from a UDP socket + * rcu_read_lock must be held on entry + */ +static struct ovpn_socket *ovpn_socket_from_udp_sock(struct sock *sk) +{ + struct ovpn_socket *ovpn_sock; + + if (unlikely(READ_ONCE(udp_sk(sk)->encap_type) != UDP_ENCAP_OVPNINUDP)) + return NULL; + + ovpn_sock = rcu_dereference_sk_user_data(sk); + if (unlikely(!ovpn_sock)) + return NULL; + + /* make sure that sk matches our stored transport socket */ + if (unlikely(!ovpn_sock->sock || sk != ovpn_sock->sock->sk)) + return NULL; + + return ovpn_sock; +} + +/** + * ovpn_udp_encap_recv - Start processing a received UDP packet. + * @sk: socket over which the packet was received + * @skb: the received packet + * + * If the first byte of the payload is: + * - DATA_V2 the packet is accepted for further processing, + * - DATA_V1 the packet is dropped as not supported, + * - anything else the packet is forwarded to the UDP stack for + * delivery to user space. + * + * Return: + * 0 if skb was consumed or dropped + * >0 if skb should be passed up to userspace as UDP (packet not consumed) + * <0 if skb should be resubmitted as proto -N (packet not consumed) + */ +static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb) +{ + struct ovpn_socket *ovpn_sock; + struct ovpn_priv *ovpn; + struct ovpn_peer *peer; + u32 peer_id; + u8 opcode; + + ovpn_sock = ovpn_socket_from_udp_sock(sk); + if (unlikely(!ovpn_sock)) { + net_err_ratelimited("ovpn: %s invoked on non ovpn socket\n", + __func__); + goto drop_noovpn; + } + + ovpn = ovpn_sock->ovpn; + if (unlikely(!ovpn)) { + net_err_ratelimited("ovpn: cannot obtain ovpn object from UDP socket\n"); + goto drop_noovpn; + } + + /* Make sure the first 4 bytes of the skb data buffer after the UDP + * header are accessible. + * They are required to fetch the OP code, the key ID and the peer ID. + */ + if (unlikely(!pskb_may_pull(skb, sizeof(struct udphdr) + + OVPN_OPCODE_SIZE))) { + net_dbg_ratelimited("%s: packet too small from UDP socket\n", + netdev_name(ovpn->dev)); + goto drop; + } + + opcode = ovpn_opcode_from_skb(skb, sizeof(struct udphdr)); + if (unlikely(opcode != OVPN_DATA_V2)) { + /* DATA_V1 is not supported */ + if (opcode == OVPN_DATA_V1) + goto drop; + + /* unknown or control packet: let it bubble up to userspace */ + return 1; + } + + peer_id = ovpn_peer_id_from_skb(skb, sizeof(struct udphdr)); + /* some OpenVPN server implementations send data packets with the + * peer-id set to UNDEF. In this case we skip the peer lookup by peer-id + * and we try with the transport address + */ + if (peer_id == OVPN_PEER_ID_UNDEF) + peer = ovpn_peer_get_by_transp_addr(ovpn, skb); + else + peer = ovpn_peer_get_by_id(ovpn, peer_id); + + if (unlikely(!peer)) + goto drop; + + /* pop off outer UDP header */ + __skb_pull(skb, sizeof(struct udphdr)); + ovpn_recv(peer, skb); + return 0; + +drop: + dev_core_stats_rx_dropped_inc(ovpn->dev); +drop_noovpn: + kfree_skb(skb); + return 0; +} + +/** + * ovpn_udp4_output - send IPv4 packet over udp socket + * @peer: the destination peer + * @bind: the binding related to the destination peer + * @cache: dst cache + * @sk: the socket to send the packet over + * @skb: the packet to send + * + * Return: 0 on success or a negative error code otherwise + */ +static int ovpn_udp4_output(struct ovpn_peer *peer, struct ovpn_bind *bind, + struct dst_cache *cache, struct sock *sk, + struct sk_buff *skb) +{ + struct rtable *rt; + struct flowi4 fl = { + .saddr = bind->local.ipv4.s_addr, + .daddr = bind->remote.in4.sin_addr.s_addr, + .fl4_sport = inet_sk(sk)->inet_sport, + .fl4_dport = bind->remote.in4.sin_port, + .flowi4_proto = sk->sk_protocol, + .flowi4_mark = sk->sk_mark, + }; + int ret; + + local_bh_disable(); + rt = dst_cache_get_ip4(cache, &fl.saddr); + if (rt) + goto transmit; + + if (unlikely(!inet_confirm_addr(sock_net(sk), NULL, 0, fl.saddr, + RT_SCOPE_HOST))) { + /* we may end up here when the cached address is not usable + * anymore. In this case we reset address/cache and perform a + * new look up + */ + fl.saddr = 0; + spin_lock_bh(&peer->lock); + bind->local.ipv4.s_addr = 0; + spin_unlock_bh(&peer->lock); + dst_cache_reset(cache); + } + + rt = ip_route_output_flow(sock_net(sk), &fl, sk); + if (IS_ERR(rt) && PTR_ERR(rt) == -EINVAL) { + fl.saddr = 0; + spin_lock_bh(&peer->lock); + bind->local.ipv4.s_addr = 0; + spin_unlock_bh(&peer->lock); + dst_cache_reset(cache); + + rt = ip_route_output_flow(sock_net(sk), &fl, sk); + } + + if (IS_ERR(rt)) { + ret = PTR_ERR(rt); + net_dbg_ratelimited("%s: no route to host %pISpc: %d\n", + netdev_name(peer->ovpn->dev), + &bind->remote.in4, + ret); + goto err; + } + dst_cache_set_ip4(cache, &rt->dst, fl.saddr); + +transmit: + udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0, + ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport, + fl.fl4_dport, false, sk->sk_no_check_tx); + ret = 0; +err: + local_bh_enable(); + return ret; +} + +#if IS_ENABLED(CONFIG_IPV6) +/** + * ovpn_udp6_output - send IPv6 packet over udp socket + * @peer: the destination peer + * @bind: the binding related to the destination peer + * @cache: dst cache + * @sk: the socket to send the packet over + * @skb: the packet to send + * + * Return: 0 on success or a negative error code otherwise + */ +static int ovpn_udp6_output(struct ovpn_peer *peer, struct ovpn_bind *bind, + struct dst_cache *cache, struct sock *sk, + struct sk_buff *skb) +{ + struct dst_entry *dst; + int ret; + + struct flowi6 fl = { + .saddr = bind->local.ipv6, + .daddr = bind->remote.in6.sin6_addr, + .fl6_sport = inet_sk(sk)->inet_sport, + .fl6_dport = bind->remote.in6.sin6_port, + .flowi6_proto = sk->sk_protocol, + .flowi6_mark = sk->sk_mark, + .flowi6_oif = bind->remote.in6.sin6_scope_id, + }; + + local_bh_disable(); + dst = dst_cache_get_ip6(cache, &fl.saddr); + if (dst) + goto transmit; + + if (unlikely(!ipv6_chk_addr(sock_net(sk), &fl.saddr, NULL, 0))) { + /* we may end up here when the cached address is not usable + * anymore. In this case we reset address/cache and perform a + * new look up + */ + fl.saddr = in6addr_any; + spin_lock_bh(&peer->lock); + bind->local.ipv6 = in6addr_any; + spin_unlock_bh(&peer->lock); + dst_cache_reset(cache); + } + + dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sk), sk, &fl, NULL); + if (IS_ERR(dst)) { + ret = PTR_ERR(dst); + net_dbg_ratelimited("%s: no route to host %pISpc: %d\n", + netdev_name(peer->ovpn->dev), + &bind->remote.in6, ret); + goto err; + } + dst_cache_set_ip6(cache, dst, &fl.saddr); + +transmit: + udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0, + ip6_dst_hoplimit(dst), 0, fl.fl6_sport, + fl.fl6_dport, udp_get_no_check6_tx(sk)); + ret = 0; +err: + local_bh_enable(); + return ret; +} +#endif + +/** + * ovpn_udp_output - transmit skb using udp-tunnel + * @peer: the destination peer + * @cache: dst cache + * @sk: the socket to send the packet over + * @skb: the packet to send + * + * rcu_read_lock should be held on entry. + * On return, the skb is consumed. + * + * Return: 0 on success or a negative error code otherwise + */ +static int ovpn_udp_output(struct ovpn_peer *peer, struct dst_cache *cache, + struct sock *sk, struct sk_buff *skb) +{ + struct ovpn_bind *bind; + int ret; + + /* set sk to null if skb is already orphaned */ + if (!skb->destructor) + skb->sk = NULL; + + /* always permit openvpn-created packets to be (outside) fragmented */ + skb->ignore_df = 1; + + rcu_read_lock(); + bind = rcu_dereference(peer->bind); + if (unlikely(!bind)) { + net_warn_ratelimited("%s: no bind for remote peer %u\n", + netdev_name(peer->ovpn->dev), peer->id); + ret = -ENODEV; + goto out; + } + + switch (bind->remote.in4.sin_family) { + case AF_INET: + ret = ovpn_udp4_output(peer, bind, cache, sk, skb); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + ret = ovpn_udp6_output(peer, bind, cache, sk, skb); + break; +#endif + default: + ret = -EAFNOSUPPORT; + break; + } + +out: + rcu_read_unlock(); + return ret; +} + +/** + * ovpn_udp_send_skb - prepare skb and send it over via UDP + * @peer: the destination peer + * @sock: the RCU protected peer socket + * @skb: the packet to send + */ +void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock, + struct sk_buff *skb) +{ + int ret = -1; + + skb->dev = peer->ovpn->dev; + /* no checksum performed at this layer */ + skb->ip_summed = CHECKSUM_NONE; + + /* get socket info */ + if (unlikely(!sock)) { + net_warn_ratelimited("%s: no sock for remote peer %u\n", + netdev_name(peer->ovpn->dev), peer->id); + goto out; + } + + /* crypto layer -> transport (UDP) */ + ret = ovpn_udp_output(peer, &peer->dst_cache, sock->sk, skb); +out: + if (unlikely(ret < 0)) { + kfree_skb(skb); + return; + } +} + +static void ovpn_udp_encap_destroy(struct sock *sk) +{ + struct ovpn_socket *sock; + struct ovpn_priv *ovpn; + + rcu_read_lock(); + sock = rcu_dereference_sk_user_data(sk); + if (!sock || !sock->ovpn) { + rcu_read_unlock(); + return; + } + ovpn = sock->ovpn; + rcu_read_unlock(); + + ovpn_peers_free(ovpn, sk, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT); +} + +/** + * ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn + * @ovpn_sock: socket to configure + * @ovpn: the openvp instance to link + * + * After invoking this function, the sock will be controlled by ovpn so that + * any incoming packet may be processed by ovpn first. + * + * Return: 0 on success or a negative error code otherwise + */ +int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, + struct ovpn_priv *ovpn) +{ + struct udp_tunnel_sock_cfg cfg = { + .encap_type = UDP_ENCAP_OVPNINUDP, + .encap_rcv = ovpn_udp_encap_recv, + .encap_destroy = ovpn_udp_encap_destroy, + }; + struct socket *sock = ovpn_sock->sock; + struct ovpn_socket *old_data; + int ret; + + /* make sure no pre-existing encapsulation handler exists */ + rcu_read_lock(); + old_data = rcu_dereference_sk_user_data(sock->sk); + if (!old_data) { + /* socket is currently unused - we can take it */ + rcu_read_unlock(); + setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg); + return 0; + } + + /* socket is in use. We need to understand if it's owned by this ovpn + * instance or by something else. + * In the former case, we can increase the refcounter and happily + * use it, because the same UDP socket is expected to be shared among + * different peers. + * + * Unlikely TCP, a single UDP socket can be used to talk to many remote + * hosts and therefore openvpn instantiates one only for all its peers + */ + if ((READ_ONCE(udp_sk(sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) && + old_data->ovpn == ovpn) { + netdev_dbg(ovpn->dev, + "provided socket already owned by this interface\n"); + ret = -EALREADY; + } else { + netdev_dbg(ovpn->dev, + "provided socket already taken by other user\n"); + ret = -EBUSY; + } + rcu_read_unlock(); + + return ret; +} + +/** + * ovpn_udp_socket_detach - clean udp-tunnel status for this socket + * @ovpn_sock: the socket to clean + */ +void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock) +{ + struct udp_tunnel_sock_cfg cfg = { }; + + setup_udp_tunnel_sock(sock_net(ovpn_sock->sock->sk), ovpn_sock->sock, + &cfg); +} diff --git a/drivers/net/ovpn/udp.h b/drivers/net/ovpn/udp.h new file mode 100644 index 0000000000000..9994eb6e04283 --- /dev/null +++ b/drivers/net/ovpn/udp.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* OpenVPN data channel offload + * + * Copyright (C) 2019-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + */ + +#ifndef _NET_OVPN_UDP_H_ +#define _NET_OVPN_UDP_H_ + +#include + +struct ovpn_peer; +struct ovpn_priv; +struct socket; + +int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, + struct ovpn_priv *ovpn); +void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock); + +void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock, + struct sk_buff *skb); + +#endif /* _NET_OVPN_UDP_H_ */ diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c index e46f588cae7d3..23b40e9eacbbc 100644 --- a/drivers/net/pcs/pcs-lynx.c +++ b/drivers/net/pcs/pcs-lynx.c @@ -355,7 +355,6 @@ static struct phylink_pcs *lynx_pcs_create(struct mdio_device *mdio) mdio_device_get(mdio); lynx->mdio = mdio; lynx->pcs.ops = &lynx_pcs_phylink_ops; - lynx->pcs.neg_mode = true; lynx->pcs.poll = true; for (i = 0; i < ARRAY_SIZE(lynx_interfaces); i++) diff --git a/drivers/net/pcs/pcs-mtk-lynxi.c b/drivers/net/pcs/pcs-mtk-lynxi.c index 7d6261dee5340..149ddf51d7856 100644 --- a/drivers/net/pcs/pcs-mtk-lynxi.c +++ b/drivers/net/pcs/pcs-mtk-lynxi.c @@ -305,7 +305,6 @@ struct phylink_pcs *mtk_pcs_lynxi_create(struct device *dev, mpcs->regmap = regmap; mpcs->flags = flags; mpcs->pcs.ops = &mtk_pcs_lynxi_ops; - mpcs->pcs.neg_mode = true; mpcs->pcs.poll = true; mpcs->interface = PHY_INTERFACE_MODE_NA; diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c index 61944574d087d..d79bb9b06cd22 100644 --- a/drivers/net/pcs/pcs-rzn1-miic.c +++ b/drivers/net/pcs/pcs-rzn1-miic.c @@ -268,17 +268,6 @@ static void miic_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, (MIIC_CONVCTRL_CONV_SPEED | MIIC_CONVCTRL_FULLD), val); } -static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported, - const struct phylink_link_state *state) -{ - if (phy_interface_mode_is_rgmii(state->interface) || - state->interface == PHY_INTERFACE_MODE_RMII || - state->interface == PHY_INTERFACE_MODE_MII) - return 1; - - return -EINVAL; -} - static int miic_pre_init(struct phylink_pcs *pcs) { struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs); @@ -307,7 +296,6 @@ static int miic_pre_init(struct phylink_pcs *pcs) } static const struct phylink_pcs_ops miic_phylink_ops = { - .pcs_validate = miic_validate, .pcs_config = miic_config, .pcs_link_up = miic_link_up, .pcs_pre_init = miic_pre_init, @@ -361,7 +349,10 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np) miic_port->miic = miic; miic_port->port = port - 1; miic_port->pcs.ops = &miic_phylink_ops; - miic_port->pcs.neg_mode = true; + + phy_interface_set_rgmii(miic_port->pcs.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_RMII, miic_port->pcs.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_MII, miic_port->pcs.supported_interfaces); return &miic_port->pcs; } @@ -472,13 +463,10 @@ static int miic_parse_dt(struct device *dev, u32 *mode_cfg) if (of_property_read_u32(np, "renesas,miic-switch-portin", &conf) == 0) dt_val[0] = conf; - for_each_child_of_node(np, conv) { + for_each_available_child_of_node(np, conv) { if (of_property_read_u32(conv, "reg", &port)) continue; - if (!of_device_is_available(conv)) - continue; - if (of_property_read_u32(conv, "renesas,miic-input", &conf) == 0) dt_val[port] = conf; } diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 1faa37f0e7b94..3d1bd5aac0937 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -602,35 +602,21 @@ static void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces) __set_bit(compat->interface, interfaces); } -int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable) +static int xpcs_switch_interface_mode(struct dw_xpcs *xpcs, + phy_interface_t interface) { - u16 mask, val; - int ret; - - mask = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | - DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN | - DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | - DW_VR_MII_EEE_MULT_FACT_100NS; + int ret = 0; - if (enable) - val = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | - DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN | - DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | - FIELD_PREP(DW_VR_MII_EEE_MULT_FACT_100NS, - mult_fact_100ns); - else - val = 0; - - ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, mask, - val); - if (ret < 0) - return ret; + if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) { + ret = txgbe_xpcs_switch_mode(xpcs, interface); + } else if (xpcs->interface != interface) { + if (interface == PHY_INTERFACE_MODE_SGMII) + xpcs->need_reset = true; + xpcs->interface = interface; + } - return xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, - DW_VR_MII_EEE_TRN_LPI, - enable ? DW_VR_MII_EEE_TRN_LPI : 0); + return ret; } -EXPORT_SYMBOL_GPL(xpcs_config_eee); static void xpcs_pre_config(struct phylink_pcs *pcs, phy_interface_t interface) { @@ -638,6 +624,11 @@ static void xpcs_pre_config(struct phylink_pcs *pcs, phy_interface_t interface) const struct dw_xpcs_compat *compat; int ret; + ret = xpcs_switch_interface_mode(xpcs, interface); + if (ret) + dev_err(&xpcs->mdiodev->dev, "switch interface failed: %pe\n", + ERR_PTR(ret)); + if (!xpcs->need_reset) return; @@ -829,10 +820,6 @@ static int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, return -ENODEV; if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) { - ret = txgbe_xpcs_switch_mode(xpcs, interface); - if (ret) - return ret; - /* Wangxun devices need backplane CL37 AN enabled for * SGMII and 1000base-X */ @@ -1193,6 +1180,63 @@ static void xpcs_an_restart(struct phylink_pcs *pcs) BMCR_ANRESTART); } +static int xpcs_config_eee(struct dw_xpcs *xpcs, bool enable) +{ + u16 mask, val; + int ret; + + mask = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | + DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN | + DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | + DW_VR_MII_EEE_MULT_FACT_100NS; + + if (enable) + val = DW_VR_MII_EEE_LTX_EN | DW_VR_MII_EEE_LRX_EN | + DW_VR_MII_EEE_TX_QUIET_EN | DW_VR_MII_EEE_RX_QUIET_EN | + DW_VR_MII_EEE_TX_EN_CTRL | DW_VR_MII_EEE_RX_EN_CTRL | + FIELD_PREP(DW_VR_MII_EEE_MULT_FACT_100NS, + xpcs->eee_mult_fact); + else + val = 0; + + ret = xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL0, mask, + val); + if (ret < 0) + return ret; + + return xpcs_modify(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, + DW_VR_MII_EEE_TRN_LPI, + enable ? DW_VR_MII_EEE_TRN_LPI : 0); +} + +static void xpcs_disable_eee(struct phylink_pcs *pcs) +{ + struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); + + xpcs_config_eee(xpcs, false); +} + +static void xpcs_enable_eee(struct phylink_pcs *pcs) +{ + struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); + + xpcs_config_eee(xpcs, true); +} + +/** + * xpcs_config_eee_mult_fact() - set the EEE clock multiplying factor + * @xpcs: pointer to a &struct dw_xpcs instance + * @mult_fact: the multiplying factor + * + * Configure the EEE clock multiplying factor. This value should be such that + * clk_eee_time_period * (mult_fact + 1) is within the range 80 to 120ns. + */ +void xpcs_config_eee_mult_fact(struct dw_xpcs *xpcs, u8 mult_fact) +{ + xpcs->eee_mult_fact = mult_fact; +} +EXPORT_SYMBOL_GPL(xpcs_config_eee_mult_fact); + static int xpcs_read_ids(struct dw_xpcs *xpcs) { int ret; @@ -1341,6 +1385,8 @@ static const struct phylink_pcs_ops xpcs_phylink_ops = { .pcs_get_state = xpcs_get_state, .pcs_an_restart = xpcs_an_restart, .pcs_link_up = xpcs_link_up, + .pcs_disable_eee = xpcs_disable_eee, + .pcs_enable_eee = xpcs_enable_eee, }; static int xpcs_identify(struct dw_xpcs *xpcs) @@ -1374,7 +1420,6 @@ static struct dw_xpcs *xpcs_create_data(struct mdio_device *mdiodev) mdio_device_get(mdiodev); xpcs->mdiodev = mdiodev; xpcs->pcs.ops = &xpcs_phylink_ops; - xpcs->pcs.neg_mode = true; xpcs->pcs.poll = true; return xpcs; diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h index adc5a0b3c8831..929fa238445ed 100644 --- a/drivers/net/pcs/pcs-xpcs.h +++ b/drivers/net/pcs/pcs-xpcs.h @@ -55,23 +55,11 @@ /* Clause 37 Defines */ /* VR MII MMD registers offsets */ #define DW_VR_MII_DIG_CTRL1 0x8000 -#define DW_VR_MII_AN_CTRL 0x8001 -#define DW_VR_MII_AN_INTR_STS 0x8002 -/* EEE Mode Control Register */ -#define DW_VR_MII_EEE_MCTRL0 0x8006 -#define DW_VR_MII_EEE_MCTRL1 0x800b -#define DW_VR_MII_DIG_CTRL2 0x80e1 - -/* VR_MII_DIG_CTRL1 */ #define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9) #define DW_VR_MII_DIG_CTRL1_2G5_EN BIT(2) #define DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL BIT(0) -/* VR_MII_DIG_CTRL2 */ -#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4) -#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0) - -/* VR_MII_AN_CTRL */ +#define DW_VR_MII_AN_CTRL 0x8001 #define DW_VR_MII_AN_CTRL_8BIT BIT(8) #define DW_VR_MII_TX_CONFIG_MASK BIT(3) #define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1 @@ -81,7 +69,7 @@ #define DW_VR_MII_PCS_MODE_C37_SGMII 0x2 #define DW_VR_MII_AN_INTR_EN BIT(0) -/* VR_MII_AN_INTR_STS */ +#define DW_VR_MII_AN_INTR_STS 0x8002 #define DW_VR_MII_AN_STS_C37_ANCMPLT_INTR BIT(0) #define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1) #define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2) @@ -90,19 +78,22 @@ #define DW_VR_MII_C37_ANSGM_SP_1000 0x2 #define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4) -/* VR MII EEE Control 0 defines */ +#define DW_VR_MII_EEE_MCTRL0 0x8006 #define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */ #define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */ #define DW_VR_MII_EEE_TX_QUIET_EN BIT(2) /* Tx Quiet Enable */ #define DW_VR_MII_EEE_RX_QUIET_EN BIT(3) /* Rx Quiet Enable */ #define DW_VR_MII_EEE_TX_EN_CTRL BIT(4) /* Tx Control Enable */ #define DW_VR_MII_EEE_RX_EN_CTRL BIT(7) /* Rx Control Enable */ - #define DW_VR_MII_EEE_MULT_FACT_100NS GENMASK(11, 8) -/* VR MII EEE Control 1 defines */ +#define DW_VR_MII_EEE_MCTRL1 0x800b #define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */ +#define DW_VR_MII_DIG_CTRL2 0x80e1 +#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4) +#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0) + #define DW_XPCS_INFO_DECLARE(_name, _pcs, _pma) \ static const struct dw_xpcs_info _name = { .pcs = _pcs, .pma = _pma } @@ -122,6 +113,7 @@ struct dw_xpcs { struct phylink_pcs pcs; phy_interface_t interface; bool need_reset; + u8 eee_mult_fact; }; int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg); diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c index 68d0d9e92a220..f873a92d24459 100644 --- a/drivers/net/pfcp.c +++ b/drivers/net/pfcp.c @@ -184,15 +184,16 @@ static int pfcp_add_sock(struct pfcp_dev *pfcp) return PTR_ERR_OR_ZERO(pfcp->sock); } -static int pfcp_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int pfcp_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); struct pfcp_dev *pfcp = netdev_priv(dev); struct pfcp_net *pn; int err; - pfcp->net = net; + pfcp->net = link_net; err = pfcp_add_sock(pfcp); if (err) { @@ -206,7 +207,7 @@ static int pfcp_newlink(struct net *net, struct net_device *dev, goto exit_del_pfcp_sock; } - pn = net_generic(net, pfcp_net_id); + pn = net_generic(link_net, pfcp_net_id); list_add(&pfcp->list, &pn->pfcp_dev_list); netdev_dbg(dev, "registered new PFCP interface\n"); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 41c15a2c20379..d29f9f7fd2e11 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -328,7 +328,7 @@ config NXP_C45_TJA11XX_PHY depends on MACSEC || !MACSEC help Enable support for NXP C45 TJA11XX PHYs. - Currently supports the TJA1103, TJA1104 and TJA1120 PHYs. + Currently supports the TJA1103, TJA1104, TJA1120 and TJA1121 PHYs. config NXP_TJA11XX_PHY tristate "NXP TJA11xx PHYs support" diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index c8dac6e922780..23ce205ae91d8 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -2,7 +2,8 @@ # Makefile for Linux PHY drivers libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \ - linkmode.o phy_link_topology.o + linkmode.o phy_link_topology.o \ + phy_package.o phy_caps.o mdio-bus-y += mdio_bus.o mdio_device.o ifdef CONFIG_MDIO_DEVICE diff --git a/drivers/net/phy/adin1100.c b/drivers/net/phy/adin1100.c index 6bb469429b9d0..bd7a47a903aca 100644 --- a/drivers/net/phy/adin1100.c +++ b/drivers/net/phy/adin1100.c @@ -215,8 +215,11 @@ static int adin_resume(struct phy_device *phydev) return adin_set_powerdown_mode(phydev, false); } -static int adin_set_loopback(struct phy_device *phydev, bool enable) +static int adin_set_loopback(struct phy_device *phydev, bool enable, int speed) { + if (enable && speed) + return -EOPNOTSUPP; + if (enable) return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_10T1L_CTRL, BMCR_LOOPBACK); diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c index e9fd24cb72709..de49bb200926e 100644 --- a/drivers/net/phy/air_en8811h.c +++ b/drivers/net/phy/air_en8811h.c @@ -16,6 +16,7 @@ #include #include #include +#include #define EN8811H_PHY_ID 0x03a2a411 @@ -112,6 +113,11 @@ #define EN8811H_POLARITY_TX_NORMAL BIT(0) #define EN8811H_POLARITY_RX_REVERSE BIT(1) +#define EN8811H_CLK_CGM 0xcf958 +#define EN8811H_CLK_CGM_CKO BIT(26) +#define EN8811H_HWTRAP1 0xcf914 +#define EN8811H_HWTRAP1_CKO BIT(12) + #define EN8811H_GPIO_OUTPUT 0xcf8b8 #define EN8811H_GPIO_OUTPUT_345 (BIT(3) | BIT(4) | BIT(5)) @@ -142,10 +148,15 @@ struct led { unsigned long state; }; +#define clk_hw_to_en8811h_priv(_hw) \ + container_of(_hw, struct en8811h_priv, hw) + struct en8811h_priv { u32 firmware_version; bool mcu_needs_restart; struct led led[EN8811H_LED_COUNT]; + struct clk_hw hw; + struct phy_device *phydev; }; enum { @@ -806,6 +817,86 @@ static int en8811h_led_hw_is_supported(struct phy_device *phydev, u8 index, return 0; }; +static unsigned long en8811h_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent) +{ + struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw); + struct phy_device *phydev = priv->phydev; + u32 pbus_value; + int ret; + + ret = air_buckpbus_reg_read(phydev, EN8811H_HWTRAP1, &pbus_value); + if (ret < 0) + return ret; + + return (pbus_value & EN8811H_HWTRAP1_CKO) ? 50000000 : 25000000; +} + +static int en8811h_clk_enable(struct clk_hw *hw) +{ + struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw); + struct phy_device *phydev = priv->phydev; + + return air_buckpbus_reg_modify(phydev, EN8811H_CLK_CGM, + EN8811H_CLK_CGM_CKO, EN8811H_CLK_CGM_CKO); +} + +static void en8811h_clk_disable(struct clk_hw *hw) +{ + struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw); + struct phy_device *phydev = priv->phydev; + + air_buckpbus_reg_modify(phydev, EN8811H_CLK_CGM, + EN8811H_CLK_CGM_CKO, 0); +} + +static int en8811h_clk_is_enabled(struct clk_hw *hw) +{ + struct en8811h_priv *priv = clk_hw_to_en8811h_priv(hw); + struct phy_device *phydev = priv->phydev; + int ret = 0; + u32 pbus_value; + + ret = air_buckpbus_reg_read(phydev, EN8811H_CLK_CGM, &pbus_value); + if (ret < 0) + return ret; + + return (pbus_value & EN8811H_CLK_CGM_CKO); +} + +static const struct clk_ops en8811h_clk_ops = { + .recalc_rate = en8811h_clk_recalc_rate, + .enable = en8811h_clk_enable, + .disable = en8811h_clk_disable, + .is_enabled = en8811h_clk_is_enabled, +}; + +static int en8811h_clk_provider_setup(struct device *dev, + struct clk_hw *hw) +{ + struct clk_init_data init; + int ret; + + if (!IS_ENABLED(CONFIG_COMMON_CLK)) + return 0; + + init.name = devm_kasprintf(dev, GFP_KERNEL, "%s-cko", + fwnode_get_name(dev_fwnode(dev))); + if (!init.name) + return -ENOMEM; + + init.ops = &en8811h_clk_ops; + init.flags = 0; + init.num_parents = 0; + hw->init = &init; + + ret = devm_clk_hw_register(dev, hw); + if (ret) + return ret; + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw); +} + static int en8811h_probe(struct phy_device *phydev) { struct en8811h_priv *priv; @@ -838,6 +929,12 @@ static int en8811h_probe(struct phy_device *phydev) return ret; } + priv->phydev = phydev; + /* Co-Clock Output */ + ret = en8811h_clk_provider_setup(&phydev->mdio.dev, &priv->hw); + if (ret) + return ret; + /* Configure led gpio pins as output */ ret = air_buckpbus_reg_modify(phydev, EN8811H_GPIO_OUTPUT, EN8811H_GPIO_OUTPUT_345, diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c index dab3af80593f5..bbbcc9736b00e 100644 --- a/drivers/net/phy/aquantia/aquantia_firmware.c +++ b/drivers/net/phy/aquantia/aquantia_firmware.c @@ -328,10 +328,11 @@ static int aqr_firmware_load_fs(struct phy_device *phydev) const char *fw_name; int ret; - ret = of_property_read_string(dev->of_node, "firmware-name", - &fw_name); - if (ret) + ret = device_property_read_string(dev, "firmware-name", &fw_name); + if (ret) { + phydev_err(phydev, "failed to read firmware-name: %d\n", ret); return ret; + } ret = request_firmware(&fw, fw_name, dev); if (ret) { diff --git a/drivers/net/phy/aquantia/aquantia_hwmon.c b/drivers/net/phy/aquantia/aquantia_hwmon.c index 7b3c49c3bf49b..1a714b56b765a 100644 --- a/drivers/net/phy/aquantia/aquantia_hwmon.c +++ b/drivers/net/phy/aquantia/aquantia_hwmon.c @@ -172,33 +172,13 @@ static const struct hwmon_ops aqr_hwmon_ops = { .write = aqr_hwmon_write, }; -static u32 aqr_hwmon_chip_config[] = { - HWMON_C_REGISTER_TZ, - 0, -}; - -static const struct hwmon_channel_info aqr_hwmon_chip = { - .type = hwmon_chip, - .config = aqr_hwmon_chip_config, -}; - -static u32 aqr_hwmon_temp_config[] = { - HWMON_T_INPUT | - HWMON_T_MAX | HWMON_T_MIN | - HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | - HWMON_T_CRIT | HWMON_T_LCRIT | - HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM, - 0, -}; - -static const struct hwmon_channel_info aqr_hwmon_temp = { - .type = hwmon_temp, - .config = aqr_hwmon_temp_config, -}; - static const struct hwmon_channel_info * const aqr_hwmon_info[] = { - &aqr_hwmon_chip, - &aqr_hwmon_temp, + HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | + HWMON_T_MAX | HWMON_T_MIN | + HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | + HWMON_T_CRIT | HWMON_T_LCRIT | + HWMON_T_CRIT_ALARM | HWMON_T_LCRIT_ALARM), NULL, }; diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c index e42ace4e682aa..08b1c9cc902b8 100644 --- a/drivers/net/phy/aquantia/aquantia_main.c +++ b/drivers/net/phy/aquantia/aquantia_main.c @@ -50,6 +50,7 @@ #define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14) #define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11) #define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10) +#define MDIO_AN_VEND_PROV_EXC_PHYID_INFO BIT(6) #define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4) #define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0) #define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4 @@ -333,6 +334,238 @@ static int aqr_read_status(struct phy_device *phydev) return genphy_c45_read_status(phydev); } +static int aqr105_get_features(struct phy_device *phydev) +{ + int ret; + + /* Normal feature discovery */ + ret = genphy_c45_pma_read_abilities(phydev); + if (ret) + return ret; + + /* The AQR105 PHY misses to indicate the 2.5G and 5G modes, so add them + * here + */ + linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->supported); + + /* The AQR105 PHY suppports both RJ45 and SFP+ interfaces */ + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); + + return 0; +} + +static int aqr105_setup_forced(struct phy_device *phydev) +{ + int vend = MDIO_AN_VEND_PROV_EXC_PHYID_INFO; + int ctrl10 = 0; + int adv = ADVERTISE_CSMA; + int ret; + + switch (phydev->speed) { + case SPEED_100: + adv |= ADVERTISE_100FULL; + break; + case SPEED_1000: + adv |= ADVERTISE_NPAGE; + if (phydev->duplex == DUPLEX_FULL) + vend |= MDIO_AN_VEND_PROV_1000BASET_FULL; + else + vend |= MDIO_AN_VEND_PROV_1000BASET_HALF; + break; + case SPEED_2500: + adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV); + vend |= MDIO_AN_VEND_PROV_2500BASET_FULL; + break; + case SPEED_5000: + adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV); + vend |= MDIO_AN_VEND_PROV_5000BASET_FULL; + break; + case SPEED_10000: + adv |= (ADVERTISE_NPAGE | ADVERTISE_RESV); + ctrl10 |= MDIO_AN_10GBT_CTRL_ADV10G; + break; + default: + return -EINVAL; + } + ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, adv); + if (ret < 0) + return ret; + ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, vend); + if (ret < 0) + return ret; + ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, ctrl10); + if (ret < 0) + return ret; + + /* set by vendor driver, but should be on by default */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, + MDIO_AN_CTRL1_XNP); + if (ret < 0) + return ret; + + return genphy_c45_an_disable_aneg(phydev); +} + +static int aqr105_config_aneg(struct phy_device *phydev) +{ + bool changed = false; + u16 reg; + int ret; + + ret = aqr_set_mdix(phydev, phydev->mdix_ctrl); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + if (phydev->autoneg == AUTONEG_DISABLE) + return aqr105_setup_forced(phydev); + + ret = genphy_c45_an_config_aneg(phydev); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + /* Clause 45 has no standardized support for 1000BaseT, therefore + * use vendor registers for this mode. + */ + reg = 0; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_1000BASET_FULL; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_1000BASET_HALF; + + /* Handle the case when the 2.5G and 5G speeds are not advertised */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_2500BASET_FULL; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_5000BASET_FULL; + + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, + MDIO_AN_VEND_PROV_1000BASET_HALF | + MDIO_AN_VEND_PROV_1000BASET_FULL | + MDIO_AN_VEND_PROV_2500BASET_FULL | + MDIO_AN_VEND_PROV_5000BASET_FULL, reg); + if (ret < 0) + return ret; + if (ret > 0) + changed = true; + + return genphy_c45_check_and_restart_aneg(phydev, changed); +} + +static int aqr105_read_rate(struct phy_device *phydev) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1); + if (val < 0) + return val; + + if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + + switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) { + case MDIO_AN_TX_VEND_STATUS1_10BASET: + phydev->speed = SPEED_10; + break; + case MDIO_AN_TX_VEND_STATUS1_100BASETX: + phydev->speed = SPEED_100; + break; + case MDIO_AN_TX_VEND_STATUS1_1000BASET: + phydev->speed = SPEED_1000; + break; + case MDIO_AN_TX_VEND_STATUS1_2500BASET: + phydev->speed = SPEED_2500; + break; + case MDIO_AN_TX_VEND_STATUS1_5000BASET: + phydev->speed = SPEED_5000; + break; + case MDIO_AN_TX_VEND_STATUS1_10GBASET: + phydev->speed = SPEED_10000; + break; + default: + phydev->speed = SPEED_UNKNOWN; + } + + return 0; +} + +static int aqr105_read_status(struct phy_device *phydev) +{ + int ret; + int val; + + ret = aqr_read_status(phydev); + if (ret) + return ret; + + if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE) + return 0; + + /** + * The status register is not immediately correct on line side link up. + * Poll periodically until it reflects the correct ON state. + * Only return fail for read error, timeout defaults to OFF state. + */ + ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_PHYXS, + MDIO_PHYXS_VEND_IF_STATUS, val, + (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val) != + MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF), + AQR107_OP_IN_PROG_SLEEP, + AQR107_OP_IN_PROG_TIMEOUT, false); + if (ret && ret != -ETIMEDOUT) + return ret; + + switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) { + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR: + phydev->interface = PHY_INTERFACE_MODE_10GKR; + break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KX: + phydev->interface = PHY_INTERFACE_MODE_1000BASEKX; + break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI: + phydev->interface = PHY_INTERFACE_MODE_10GBASER; + break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_USXGMII: + phydev->interface = PHY_INTERFACE_MODE_USXGMII; + break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XAUI: + phydev->interface = PHY_INTERFACE_MODE_XAUI; + break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII: + phydev->interface = PHY_INTERFACE_MODE_SGMII; + break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_RXAUI: + phydev->interface = PHY_INTERFACE_MODE_RXAUI; + break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII: + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + break; + case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF: + default: + phydev->link = false; + phydev->interface = PHY_INTERFACE_MODE_NA; + break; + } + + /* Read rate from vendor register */ + return aqr105_read_rate(phydev); +} + static int aqr107_read_rate(struct phy_device *phydev) { u32 config_reg; @@ -911,10 +1144,13 @@ static struct phy_driver aqr_driver[] = { { PHY_ID_MATCH_MODEL(PHY_ID_AQR105), .name = "Aquantia AQR105", - .config_aneg = aqr_config_aneg, + .get_features = aqr105_get_features, + .probe = aqr107_probe, + .config_init = aqr107_config_init, + .config_aneg = aqr105_config_aneg, .config_intr = aqr_config_intr, .handle_interrupt = aqr_handle_interrupt, - .read_status = aqr_read_status, + .read_status = aqr105_read_status, .suspend = aqr107_suspend, .resume = aqr107_resume, }, diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c index 208e8f561e069..eba8b5fb1365f 100644 --- a/drivers/net/phy/bcm-phy-ptp.c +++ b/drivers/net/phy/bcm-phy-ptp.c @@ -597,7 +597,8 @@ static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv, period = BCM_MAX_PERIOD_8NS; /* write nonzero value */ - if (req->flags & PTP_PEROUT_PHASE) + /* Reject unsupported flags */ + if (req->flags & ~PTP_PEROUT_DUTY_CYCLE) return -EOPNOTSUPP; if (req->flags & PTP_PEROUT_DUTY_CYCLE) diff --git a/drivers/net/phy/bcm54140.c b/drivers/net/phy/bcm54140.c index 7969345f6b355..a8edf45fd7335 100644 --- a/drivers/net/phy/bcm54140.c +++ b/drivers/net/phy/bcm54140.c @@ -10,6 +10,7 @@ #include #include +#include "phylib.h" #include "bcm-phy-lib.h" /* RDB per-port registers diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 22edb7e4c1a1f..13e43fee1906e 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 6599feca1967d..14f3615496384 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -31,6 +31,7 @@ #define MII_DP83822_RCSR 0x17 #define MII_DP83822_RESET_CTRL 0x1f #define MII_DP83822_MLEDCR 0x25 +#define MII_DP83822_LDCTRL 0x403 #define MII_DP83822_LEDCFG1 0x460 #define MII_DP83822_IOCTRL1 0x462 #define MII_DP83822_IOCTRL2 0x463 @@ -123,6 +124,9 @@ #define DP83822_IOCTRL1_GPIO1_CTRL GENMASK(2, 0) #define DP83822_IOCTRL1_GPIO1_CTRL_LED_1 BIT(0) +/* LDCTRL bits */ +#define DP83822_100BASE_TX_LINE_DRIVER_SWING GENMASK(7, 4) + /* IOCTRL2 bits */ #define DP83822_IOCTRL2_GPIO2_CLK_SRC GENMASK(6, 4) #define DP83822_IOCTRL2_GPIO2_CTRL GENMASK(2, 0) @@ -197,6 +201,7 @@ struct dp83822_private { bool set_gpio2_clk_out; u32 gpio2_clk_out; bool led_pin_enable[DP83822_MAX_LED_PINS]; + int tx_amplitude_100base_tx_index; }; static int dp83822_config_wol(struct phy_device *phydev, @@ -522,6 +527,12 @@ static int dp83822_config_init(struct phy_device *phydev) FIELD_PREP(DP83822_IOCTRL2_GPIO2_CLK_SRC, dp83822->gpio2_clk_out)); + if (dp83822->tx_amplitude_100base_tx_index >= 0) + phy_modify_mmd(phydev, MDIO_MMD_VEND2, MII_DP83822_LDCTRL, + DP83822_100BASE_TX_LINE_DRIVER_SWING, + FIELD_PREP(DP83822_100BASE_TX_LINE_DRIVER_SWING, + dp83822->tx_amplitude_100base_tx_index)); + err = dp83822_config_init_leds(phydev); if (err) return err; @@ -720,6 +731,11 @@ static int dp83822_phy_reset(struct phy_device *phydev) } #ifdef CONFIG_OF_MDIO +static const u32 tx_amplitude_100base_tx_gain[] = { + 80, 82, 83, 85, 87, 88, 90, 92, + 93, 95, 97, 98, 100, 102, 103, 105, +}; + static int dp83822_of_init_leds(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; @@ -780,6 +796,8 @@ static int dp83822_of_init(struct phy_device *phydev) struct dp83822_private *dp83822 = phydev->priv; struct device *dev = &phydev->mdio.dev; const char *of_val; + int i, ret; + u32 val; /* Signal detection for the PHY is only enabled if the FX_EN and the * SD_EN pins are strapped. Signal detection can only enabled if FX_EN @@ -815,6 +833,25 @@ static int dp83822_of_init(struct phy_device *phydev) dp83822->set_gpio2_clk_out = true; } + ret = phy_get_tx_amplitude_gain(phydev, dev, + ETHTOOL_LINK_MODE_100baseT_Full_BIT, + &val); + if (!ret) { + for (i = 0; i < ARRAY_SIZE(tx_amplitude_100base_tx_gain); i++) { + if (tx_amplitude_100base_tx_gain[i] == val) { + dp83822->tx_amplitude_100base_tx_index = i; + break; + } + } + + if (dp83822->tx_amplitude_100base_tx_index < 0) { + phydev_err(phydev, + "Invalid value for tx-amplitude-100base-tx-percent property (%u)\n", + val); + return -EINVAL; + } + } + return dp83822_of_init_leds(phydev); } @@ -893,6 +930,7 @@ static int dp8382x_probe(struct phy_device *phydev) if (!dp83822) return -ENOMEM; + dp83822->tx_amplitude_100base_tx_index = -1; phydev->priv = dp83822; return 0; diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index c1451df430ace..063266cafe9c7 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -1009,8 +1009,11 @@ static void dp83867_link_change_notify(struct phy_device *phydev) } } -static int dp83867_loopback(struct phy_device *phydev, bool enable) +static int dp83867_loopback(struct phy_device *phydev, bool enable, int speed) { + if (enable && speed) + return -EOPNOTSUPP; + return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, enable ? BMCR_LOOPBACK : 0); } diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c index a42af9c168ec5..23af1ac194fa9 100644 --- a/drivers/net/phy/dp83td510.c +++ b/drivers/net/phy/dp83td510.c @@ -204,10 +204,191 @@ struct dp83td510_priv { #define DP83TD510E_UNKN_030E 0x30e #define DP83TD510E_030E_VAL 0x2520 +#define DP83TD510E_LEDS_CFG_1 0x460 +#define DP83TD510E_LED_FN(idx, val) (((val) & 0xf) << ((idx) * 4)) +#define DP83TD510E_LED_FN_MASK(idx) (0xf << ((idx) * 4)) +/* link OK */ +#define DP83TD510E_LED_MODE_LINK_OK 0x0 +/* TX/RX activity */ +#define DP83TD510E_LED_MODE_TX_RX_ACTIVITY 0x1 +/* TX activity */ +#define DP83TD510E_LED_MODE_TX_ACTIVITY 0x2 +/* RX activity */ +#define DP83TD510E_LED_MODE_RX_ACTIVITY 0x3 +/* LR */ +#define DP83TD510E_LED_MODE_LR 0x4 +/* SR */ +#define DP83TD510E_LED_MODE_SR 0x5 +/* LED SPEED: High for 10Base-T */ +#define DP83TD510E_LED_MODE_LED_SPEED 0x6 +/* Duplex mode */ +#define DP83TD510E_LED_MODE_DUPLEX 0x7 +/* link + blink on activity with stretch option */ +#define DP83TD510E_LED_MODE_LINK_BLINK 0x8 +/* blink on activity with stretch option */ +#define DP83TD510E_LED_MODE_BLINK_ACTIVITY 0x9 +/* blink on tx activity with stretch option */ +#define DP83TD510E_LED_MODE_BLINK_TX 0xa +/* blink on rx activity with stretch option */ +#define DP83TD510E_LED_MODE_BLINK_RX 0xb +/* link_lost */ +#define DP83TD510E_LED_MODE_LINK_LOST 0xc +/* PRBS error: toggles on error */ +#define DP83TD510E_LED_MODE_PRBS_ERROR 0xd +/* XMII TX/RX Error with stretch option */ +#define DP83TD510E_LED_MODE_XMII_ERR 0xe + +#define DP83TD510E_LED_COUNT 4 + +#define DP83TD510E_LEDS_CFG_2 0x469 +#define DP83TD510E_LED_POLARITY(idx) BIT((idx) * 4 + 2) +#define DP83TD510E_LED_DRV_VAL(idx) BIT((idx) * 4 + 1) +#define DP83TD510E_LED_DRV_EN(idx) BIT((idx) * 4) + #define DP83TD510E_ALCD_STAT 0xa9f #define DP83TD510E_ALCD_COMPLETE BIT(15) #define DP83TD510E_ALCD_CABLE_LENGTH GENMASK(10, 0) +static int dp83td510_led_brightness_set(struct phy_device *phydev, u8 index, + enum led_brightness brightness) +{ + u32 val; + + if (index >= DP83TD510E_LED_COUNT) + return -EINVAL; + + val = DP83TD510E_LED_DRV_EN(index); + + if (brightness) + val |= DP83TD510E_LED_DRV_VAL(index); + + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2, + DP83TD510E_LED_DRV_VAL(index) | + DP83TD510E_LED_DRV_EN(index), val); +} + +static int dp83td510_led_mode(u8 index, unsigned long rules) +{ + if (index >= DP83TD510E_LED_COUNT) + return -EINVAL; + + switch (rules) { + case BIT(TRIGGER_NETDEV_LINK): + return DP83TD510E_LED_MODE_LINK_OK; + case BIT(TRIGGER_NETDEV_LINK_10): + return DP83TD510E_LED_MODE_LED_SPEED; + case BIT(TRIGGER_NETDEV_FULL_DUPLEX): + return DP83TD510E_LED_MODE_DUPLEX; + case BIT(TRIGGER_NETDEV_TX): + return DP83TD510E_LED_MODE_TX_ACTIVITY; + case BIT(TRIGGER_NETDEV_RX): + return DP83TD510E_LED_MODE_RX_ACTIVITY; + case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX): + return DP83TD510E_LED_MODE_TX_RX_ACTIVITY; + case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | + BIT(TRIGGER_NETDEV_RX): + return DP83TD510E_LED_MODE_LINK_BLINK; + default: + return -EOPNOTSUPP; + } +} + +static int dp83td510_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int ret; + + ret = dp83td510_led_mode(index, rules); + if (ret < 0) + return ret; + + return 0; +} + +static int dp83td510_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int mode, ret; + + mode = dp83td510_led_mode(index, rules); + if (mode < 0) + return mode; + + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_1, + DP83TD510E_LED_FN_MASK(index), + DP83TD510E_LED_FN(index, mode)); + if (ret) + return ret; + + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2, + DP83TD510E_LED_DRV_EN(index), 0); +} + +static int dp83td510_led_hw_control_get(struct phy_device *phydev, + u8 index, unsigned long *rules) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_1); + if (val < 0) + return val; + + val &= DP83TD510E_LED_FN_MASK(index); + val >>= index * 4; + + switch (val) { + case DP83TD510E_LED_MODE_LINK_OK: + *rules = BIT(TRIGGER_NETDEV_LINK); + break; + /* LED mode: LED SPEED (10BaseT1L indicator) */ + case DP83TD510E_LED_MODE_LED_SPEED: + *rules = BIT(TRIGGER_NETDEV_LINK_10); + break; + case DP83TD510E_LED_MODE_DUPLEX: + *rules = BIT(TRIGGER_NETDEV_FULL_DUPLEX); + break; + case DP83TD510E_LED_MODE_TX_ACTIVITY: + *rules = BIT(TRIGGER_NETDEV_TX); + break; + case DP83TD510E_LED_MODE_RX_ACTIVITY: + *rules = BIT(TRIGGER_NETDEV_RX); + break; + case DP83TD510E_LED_MODE_TX_RX_ACTIVITY: + *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX); + break; + case DP83TD510E_LED_MODE_LINK_BLINK: + *rules = BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_TX) | + BIT(TRIGGER_NETDEV_RX); + break; + default: + *rules = 0; + break; + } + + return 0; +} + +static int dp83td510_led_polarity_set(struct phy_device *phydev, int index, + unsigned long modes) +{ + u16 polarity = DP83TD510E_LED_POLARITY(index); + u32 mode; + + for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) { + switch (mode) { + case PHY_LED_ACTIVE_LOW: + polarity = 0; + break; + default: + return -EINVAL; + } + } + + return phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_LEDS_CFG_2, + DP83TD510E_LED_POLARITY(index), polarity); +} + /** * dp83td510_update_stats - Update the PHY statistics for the DP83TD510 PHY. * @phydev: Pointer to the phy_device structure. @@ -712,6 +893,12 @@ static struct phy_driver dp83td510_driver[] = { .get_phy_stats = dp83td510_get_phy_stats, .update_stats = dp83td510_update_stats, + .led_brightness_set = dp83td510_led_brightness_set, + .led_hw_is_supported = dp83td510_led_hw_is_supported, + .led_hw_control_set = dp83td510_led_hw_control_set, + .led_hw_control_get = dp83td510_led_hw_control_get, + .led_polarity_set = dp83td510_led_polarity_set, + .suspend = genphy_suspend, .resume = genphy_resume, } }; diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c index 050f4537d140f..7e76323409c4b 100644 --- a/drivers/net/phy/dp83tg720.c +++ b/drivers/net/phy/dp83tg720.c @@ -4,12 +4,31 @@ */ #include #include +#include #include #include #include +#include #include "open_alliance_helpers.h" +/* + * DP83TG720S_POLL_ACTIVE_LINK - Polling interval in milliseconds when the link + * is active. + * DP83TG720S_POLL_NO_LINK_MIN - Minimum polling interval in milliseconds when + * the link is down. + * DP83TG720S_POLL_NO_LINK_MAX - Maximum polling interval in milliseconds when + * the link is down. + * + * These values are not documented or officially recommended by the vendor but + * were determined through empirical testing. They achieve a good balance in + * minimizing the number of reset retries while ensuring reliable link recovery + * within a reasonable timeframe. + */ +#define DP83TG720S_POLL_ACTIVE_LINK 1000 +#define DP83TG720S_POLL_NO_LINK_MIN 100 +#define DP83TG720S_POLL_NO_LINK_MAX 1000 + #define DP83TG720S_PHY_ID 0x2000a284 /* MDIO_MMD_VEND2 registers */ @@ -371,6 +390,13 @@ static int dp83tg720_read_status(struct phy_device *phydev) if (ret) return ret; + /* Sleep 600ms for PHY stabilization post-reset. + * Empirically chosen value (not documented). + * Helps reduce reset bounces with link partners having similar + * issues. + */ + msleep(600); + /* After HW reset we need to restore master/slave configuration. * genphy_c45_pma_baset1_read_master_slave() call will be done * by the dp83tg720_config_aneg() function. @@ -498,6 +524,57 @@ static int dp83tg720_probe(struct phy_device *phydev) return 0; } +/** + * dp83tg720_get_next_update_time - Determine the next update time for PHY + * state + * @phydev: Pointer to the phy_device structure + * + * This function addresses a limitation of the DP83TG720 PHY, which cannot + * reliably detect or report a stable link state. To recover from such + * scenarios, the PHY must be periodically reset when the link is down. However, + * if the link partner also runs Linux with the same driver, synchronized reset + * intervals can lead to a deadlock where the link never establishes due to + * simultaneous resets on both sides. + * + * To avoid this, the function implements randomized polling intervals when the + * link is down. It ensures that reset intervals are desynchronized by + * introducing a random delay between a configured minimum and maximum range. + * When the link is up, a fixed polling interval is used to minimize overhead. + * + * This mechanism guarantees that the link will reestablish within 10 seconds + * in the worst-case scenario. + * + * Return: Time (in jiffies) until the next update event for the PHY state + * machine. + */ +static unsigned int dp83tg720_get_next_update_time(struct phy_device *phydev) +{ + unsigned int next_time_jiffies; + + if (phydev->link) { + /* When the link is up, use a fixed 1000ms interval + * (in jiffies) + */ + next_time_jiffies = + msecs_to_jiffies(DP83TG720S_POLL_ACTIVE_LINK); + } else { + unsigned int min_jiffies, max_jiffies, rand_jiffies; + + /* When the link is down, randomize interval between min/max + * (in jiffies) + */ + min_jiffies = msecs_to_jiffies(DP83TG720S_POLL_NO_LINK_MIN); + max_jiffies = msecs_to_jiffies(DP83TG720S_POLL_NO_LINK_MAX); + + rand_jiffies = min_jiffies + + get_random_u32_below(max_jiffies - min_jiffies + 1); + next_time_jiffies = rand_jiffies; + } + + /* Ensure the polling time is at least one jiffy */ + return max(next_time_jiffies, 1U); +} + static struct phy_driver dp83tg720_driver[] = { { PHY_ID_MATCH_MODEL(DP83TG720S_PHY_ID), @@ -516,6 +593,7 @@ static struct phy_driver dp83tg720_driver[] = { .get_link_stats = dp83tg720_get_link_stats, .get_phy_stats = dp83tg720_get_phy_stats, .update_stats = dp83tg720_update_stats, + .get_next_update_time = dp83tg720_get_next_update_time, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index aef739c20ac4d..ee7831a9849b3 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include #include @@ -40,7 +40,7 @@ struct fixed_phy { struct gpio_desc *link_gpiod; }; -static struct platform_device *pdev; +static struct faux_device *fdev; static struct fixed_mdio_bus platform_fmb = { .phys = LIST_HEAD_INIT(platform_fmb.phys), }; @@ -337,9 +337,9 @@ static int __init fixed_mdio_bus_init(void) struct fixed_mdio_bus *fmb = &platform_fmb; int ret; - pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + fdev = faux_device_create("Fixed MDIO bus", NULL, NULL); + if (!fdev) + return -ENODEV; fmb->mii_bus = mdiobus_alloc(); if (fmb->mii_bus == NULL) { @@ -350,7 +350,7 @@ static int __init fixed_mdio_bus_init(void) snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); fmb->mii_bus->name = "Fixed MDIO Bus"; fmb->mii_bus->priv = fmb; - fmb->mii_bus->parent = &pdev->dev; + fmb->mii_bus->parent = &fdev->dev; fmb->mii_bus->read = &fixed_mdio_read; fmb->mii_bus->write = &fixed_mdio_write; fmb->mii_bus->phy_mask = ~0; @@ -364,7 +364,7 @@ static int __init fixed_mdio_bus_init(void) err_mdiobus_alloc: mdiobus_free(fmb->mii_bus); err_mdiobus_reg: - platform_device_unregister(pdev); + faux_device_destroy(fdev); return ret; } module_init(fixed_mdio_bus_init); @@ -376,7 +376,7 @@ static void __exit fixed_mdio_bus_exit(void) mdiobus_unregister(fmb->mii_bus); mdiobus_free(fmb->mii_bus); - platform_device_unregister(pdev); + faux_device_destroy(fdev); list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { list_del(&fp->node); diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c index a3996471a1c9a..23e1f0521f549 100644 --- a/drivers/net/phy/marvell-88q2xxx.c +++ b/drivers/net/phy/marvell-88q2xxx.c @@ -7,30 +7,34 @@ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH */ #include +#include #include +#include #include -#include -#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1) -#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2) -#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3) +#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1) +#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2) +#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3) -#define MDIO_MMD_AN_MV_STAT 32769 -#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100 -#define MDIO_MMD_AN_MV_STAT_LOCAL_RX 0x1000 -#define MDIO_MMD_AN_MV_STAT_REMOTE_RX 0x2000 -#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000 -#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000 +#define MDIO_MMD_AN_MV_STAT 32769 +#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100 +#define MDIO_MMD_AN_MV_STAT_LOCAL_RX 0x1000 +#define MDIO_MMD_AN_MV_STAT_REMOTE_RX 0x2000 +#define MDIO_MMD_AN_MV_STAT_LOCAL_MASTER 0x4000 +#define MDIO_MMD_AN_MV_STAT_MS_CONF_FAULT 0x8000 -#define MDIO_MMD_AN_MV_STAT2 32794 -#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800 -#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000 -#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000 +#define MDIO_MMD_AN_MV_STAT2 32794 +#define MDIO_MMD_AN_MV_STAT2_AN_RESOLVED 0x0800 +#define MDIO_MMD_AN_MV_STAT2_100BT1 0x2000 +#define MDIO_MMD_AN_MV_STAT2_1000BT1 0x4000 -#define MDIO_MMD_PCS_MV_INT_EN 32784 -#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040 -#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080 -#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000 +#define MDIO_MMD_PCS_MV_RESET_CTRL 32768 +#define MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE 0x8 + +#define MDIO_MMD_PCS_MV_INT_EN 32784 +#define MDIO_MMD_PCS_MV_INT_EN_LINK_UP 0x0040 +#define MDIO_MMD_PCS_MV_INT_EN_LINK_DOWN 0x0080 +#define MDIO_MMD_PCS_MV_INT_EN_100BT1 0x1000 #define MDIO_MMD_PCS_MV_GPIO_INT_STAT 32785 #define MDIO_MMD_PCS_MV_GPIO_INT_STAT_LINK_UP 0x0040 @@ -40,6 +44,22 @@ #define MDIO_MMD_PCS_MV_GPIO_INT_CTRL 32787 #define MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS 0x0800 +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL 32790 +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK GENMASK(7, 4) +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK GENMASK(3, 0) +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK 0x0 /* Link established */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX 0x1 /* Link established, blink for rx or tx activity */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1 0x2 /* Blink 3x for 1000BT1 link established */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX_ON 0x3 /* Receive or transmit activity */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX 0x4 /* Blink on receive or transmit activity */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX 0x5 /* Transmit activity */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_COPPER 0x6 /* Copper Link established */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON 0x7 /* 1000BT1 link established */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_OFF 0x8 /* Force off */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_ON 0x9 /* Force on */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_HIGHZ 0xa /* Force Hi-Z */ +#define MDIO_MMD_PCS_MV_LED_FUNC_CTRL_FORCE_BLINK 0xb /* Force blink */ + #define MDIO_MMD_PCS_MV_TEMP_SENSOR1 32833 #define MDIO_MMD_PCS_MV_TEMP_SENSOR1_RAW_INT 0x0001 #define MDIO_MMD_PCS_MV_TEMP_SENSOR1_INT 0x0040 @@ -60,11 +80,11 @@ #define MDIO_MMD_PCS_MV_100BT1_STAT1_REMOTE_RX 0x2000 #define MDIO_MMD_PCS_MV_100BT1_STAT1_LOCAL_MASTER 0x4000 -#define MDIO_MMD_PCS_MV_100BT1_STAT2 33033 -#define MDIO_MMD_PCS_MV_100BT1_STAT2_JABBER 0x0001 -#define MDIO_MMD_PCS_MV_100BT1_STAT2_POL 0x0002 -#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004 -#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008 +#define MDIO_MMD_PCS_MV_100BT1_STAT2 33033 +#define MDIO_MMD_PCS_MV_100BT1_STAT2_JABBER 0x0001 +#define MDIO_MMD_PCS_MV_100BT1_STAT2_POL 0x0002 +#define MDIO_MMD_PCS_MV_100BT1_STAT2_LINK 0x0004 +#define MDIO_MMD_PCS_MV_100BT1_STAT2_ANGE 0x0008 #define MDIO_MMD_PCS_MV_100BT1_INT_EN 33042 #define MDIO_MMD_PCS_MV_100BT1_INT_EN_LINKEVENT 0x0400 @@ -72,7 +92,7 @@ #define MDIO_MMD_PCS_MV_COPPER_INT_STAT 33043 #define MDIO_MMD_PCS_MV_COPPER_INT_STAT_LINKEVENT 0x0400 -#define MDIO_MMD_PCS_MV_RX_STAT 33328 +#define MDIO_MMD_PCS_MV_RX_STAT 33328 #define MDIO_MMD_PCS_MV_TDR_RESET 65226 #define MDIO_MMD_PCS_MV_TDR_RESET_TDR_RST 0x1000 @@ -95,8 +115,12 @@ #define MDIO_MMD_PCS_MV_TDR_OFF_CUTOFF 65246 +#define MV88Q2XXX_LED_INDEX_TX_ENABLE 0 +#define MV88Q2XXX_LED_INDEX_GPIO 1 + struct mv88q2xxx_priv { bool enable_temp; + bool enable_led0; }; struct mmd_val { @@ -460,6 +484,9 @@ static int mv88q2xxx_config_aneg(struct phy_device *phydev) static int mv88q2xxx_config_init(struct phy_device *phydev) { + struct mv88q2xxx_priv *priv = phydev->priv; + int ret; + /* The 88Q2XXX PHYs do have the extended ability register available, but * register MDIO_PMA_EXTABLE where they should signalize it does not * work according to specification. Therefore, we force it here. @@ -469,10 +496,31 @@ static int mv88q2xxx_config_init(struct phy_device *phydev) /* Configure interrupt with default settings, output is driven low for * active interrupt and high for inactive. */ - if (phy_interrupt_is_valid(phydev)) - return phy_set_bits_mmd(phydev, MDIO_MMD_PCS, - MDIO_MMD_PCS_MV_GPIO_INT_CTRL, - MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS); + if (phy_interrupt_is_valid(phydev)) { + ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_GPIO_INT_CTRL, + MDIO_MMD_PCS_MV_GPIO_INT_CTRL_TRI_DIS); + if (ret < 0) + return ret; + } + + /* Enable LED function and disable TX disable feature on LED/TX_ENABLE */ + if (priv->enable_led0) { + ret = phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_RESET_CTRL, + MDIO_MMD_PCS_MV_RESET_CTRL_TX_DISABLE); + if (ret < 0) + return ret; + } + + /* Enable temperature sense */ + if (priv->enable_temp) { + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_TEMP_SENSOR2, + MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0); + if (ret < 0) + return ret; + } return 0; } @@ -717,16 +765,10 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev) struct mv88q2xxx_priv *priv = phydev->priv; struct device *dev = &phydev->mdio.dev; struct device *hwmon; - char *hwmon_name; priv->enable_temp = true; - hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev)); - if (IS_ERR(hwmon_name)) - return PTR_ERR(hwmon_name); - hwmon = devm_hwmon_device_register_with_info(dev, - hwmon_name, - phydev, + hwmon = devm_hwmon_device_register_with_info(dev, NULL, phydev, &mv88q2xxx_hwmon_chip_info, NULL); @@ -740,6 +782,49 @@ static int mv88q2xxx_hwmon_probe(struct phy_device *phydev) } #endif +#if IS_ENABLED(CONFIG_OF_MDIO) +static int mv88q2xxx_leds_probe(struct phy_device *phydev) +{ + struct device_node *node = phydev->mdio.dev.of_node; + struct mv88q2xxx_priv *priv = phydev->priv; + struct device_node *leds; + int ret = 0; + u32 index; + + if (!node) + return 0; + + leds = of_get_child_by_name(node, "leds"); + if (!leds) + return 0; + + for_each_available_child_of_node_scoped(leds, led) { + ret = of_property_read_u32(led, "reg", &index); + if (ret) + goto exit; + + if (index > MV88Q2XXX_LED_INDEX_GPIO) { + ret = -EINVAL; + goto exit; + } + + if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE) + priv->enable_led0 = true; + } + +exit: + of_node_put(leds); + + return ret; +} + +#else +static int mv88q2xxx_leds_probe(struct phy_device *phydev) +{ + return 0; +} +#endif + static int mv88q2xxx_probe(struct phy_device *phydev) { struct mv88q2xxx_priv *priv; @@ -750,6 +835,21 @@ static int mv88q2xxx_probe(struct phy_device *phydev) phydev->priv = priv; + return 0; +} + +static int mv88q222x_probe(struct phy_device *phydev) +{ + int ret; + + ret = mv88q2xxx_probe(phydev); + if (ret) + return ret; + + ret = mv88q2xxx_leds_probe(phydev); + if (ret) + return ret; + return mv88q2xxx_hwmon_probe(phydev); } @@ -817,18 +917,6 @@ static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev) static int mv88q222x_config_init(struct phy_device *phydev) { - struct mv88q2xxx_priv *priv = phydev->priv; - int ret; - - /* Enable temperature sense */ - if (priv->enable_temp) { - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, - MDIO_MMD_PCS_MV_TEMP_SENSOR2, - MDIO_MMD_PCS_MV_TEMP_SENSOR2_DIS_MASK, 0); - if (ret < 0) - return ret; - } - if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0) return mv88q222x_revb0_config_init(phydev); else @@ -918,11 +1006,104 @@ static int mv88q222x_cable_test_get_status(struct phy_device *phydev, return 0; } +static int mv88q2xxx_led_mode(u8 index, unsigned long rules) +{ + switch (rules) { + case BIT(TRIGGER_NETDEV_LINK): + return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK; + case BIT(TRIGGER_NETDEV_LINK_1000): + return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON; + case BIT(TRIGGER_NETDEV_TX): + return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX; + case BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX): + return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX; + case BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX): + return MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX; + default: + return -EOPNOTSUPP; + } +} + +static int mv88q2xxx_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int mode; + + mode = mv88q2xxx_led_mode(index, rules); + if (mode < 0) + return mode; + + return 0; +} + +static int mv88q2xxx_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + int mode; + + mode = mv88q2xxx_led_mode(index, rules); + if (mode < 0) + return mode; + + if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE) + return phy_modify_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_LED_FUNC_CTRL, + MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK, + FIELD_PREP(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK, + mode)); + else + return phy_modify_mmd(phydev, MDIO_MMD_PCS, + MDIO_MMD_PCS_MV_LED_FUNC_CTRL, + MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK, + FIELD_PREP(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK, + mode)); +} + +static int mv88q2xxx_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + int val; + + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_MMD_PCS_MV_LED_FUNC_CTRL); + if (val < 0) + return val; + + if (index == MV88Q2XXX_LED_INDEX_TX_ENABLE) + val = FIELD_GET(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_0_MASK, val); + else + val = FIELD_GET(MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LED_1_MASK, val); + + switch (val) { + case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK: + *rules = BIT(TRIGGER_NETDEV_LINK); + break; + case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_1000BT1_ON: + *rules = BIT(TRIGGER_NETDEV_LINK_1000); + break; + case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_TX: + *rules = BIT(TRIGGER_NETDEV_TX); + break; + case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_RX_TX: + *rules = BIT(TRIGGER_NETDEV_TX) | BIT(TRIGGER_NETDEV_RX); + break; + case MDIO_MMD_PCS_MV_LED_FUNC_CTRL_LINK_RX_TX: + *rules = BIT(TRIGGER_NETDEV_LINK) | BIT(TRIGGER_NETDEV_TX) | + BIT(TRIGGER_NETDEV_RX); + break; + default: + *rules = 0; + break; + } + + return 0; +} + static struct phy_driver mv88q2xxx_driver[] = { { .phy_id = MARVELL_PHY_ID_88Q2110, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88q2110", + .probe = mv88q2xxx_probe, .get_features = mv88q2xxx_get_features, .config_aneg = mv88q2xxx_config_aneg, .config_init = mv88q2110_config_init, @@ -937,7 +1118,7 @@ static struct phy_driver mv88q2xxx_driver[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88q2220", .flags = PHY_POLL_CABLE_TEST, - .probe = mv88q2xxx_probe, + .probe = mv88q222x_probe, .get_features = mv88q2xxx_get_features, .config_aneg = mv88q2xxx_config_aneg, .aneg_done = genphy_c45_aneg_done, @@ -953,6 +1134,9 @@ static struct phy_driver mv88q2xxx_driver[] = { .get_sqi_max = mv88q2xxx_get_sqi_max, .suspend = mv88q2xxx_suspend, .resume = mv88q2xxx_resume, + .led_hw_is_supported = mv88q2xxx_led_hw_is_supported, + .led_hw_control_set = mv88q2xxx_led_hw_control_set, + .led_hw_control_get = mv88q2xxx_led_hw_control_get, }, }; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 44e1927de4997..623292948fa70 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2131,52 +2131,52 @@ static void marvell_get_stats_simple(struct phy_device *phydev, data[i] = marvell_get_stat_simple(phydev, i); } -static int m88e1510_loopback(struct phy_device *phydev, bool enable) +static int m88e1510_loopback(struct phy_device *phydev, bool enable, int speed) { + u16 bmcr_ctl, mscr2_ctl = 0; int err; - if (enable) { - u16 bmcr_ctl, mscr2_ctl = 0; + if (!enable) + return genphy_loopback(phydev, enable, 0); - bmcr_ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex); - - err = phy_write(phydev, MII_BMCR, bmcr_ctl); - if (err < 0) - return err; + if (speed == SPEED_10 || speed == SPEED_100 || speed == SPEED_1000) + phydev->speed = speed; + else if (speed) + return -EINVAL; - if (phydev->speed == SPEED_1000) - mscr2_ctl = BMCR_SPEED1000; - else if (phydev->speed == SPEED_100) - mscr2_ctl = BMCR_SPEED100; + bmcr_ctl = mii_bmcr_encode_fixed(phydev->speed, phydev->duplex); - err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE, - MII_88E1510_MSCR_2, BMCR_SPEED1000 | - BMCR_SPEED100, mscr2_ctl); - if (err < 0) - return err; + err = phy_write(phydev, MII_BMCR, bmcr_ctl); + if (err < 0) + return err; - /* Need soft reset to have speed configuration takes effect */ - err = genphy_soft_reset(phydev); - if (err < 0) - return err; + if (phydev->speed == SPEED_1000) + mscr2_ctl = BMCR_SPEED1000; + else if (phydev->speed == SPEED_100) + mscr2_ctl = BMCR_SPEED100; - err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, - BMCR_LOOPBACK); + err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE, + MII_88E1510_MSCR_2, BMCR_SPEED1000 | + BMCR_SPEED100, mscr2_ctl); + if (err < 0) + return err; - if (!err) { - /* It takes some time for PHY device to switch - * into/out-of loopback mode. - */ - msleep(1000); - } + /* Need soft reset to have speed configuration takes effect */ + err = genphy_soft_reset(phydev); + if (err < 0) return err; - } else { - err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0); - if (err < 0) - return err; - return phy_config_aneg(phydev); + err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, + BMCR_LOOPBACK); + + if (!err) { + /* + * It takes some time for PHY device to switch into loopback + * mode. + */ + msleep(1000); } + return err; } static int marvell_vct5_wait_complete(struct phy_device *phydev) @@ -3124,33 +3124,13 @@ static umode_t marvell_hwmon_is_visible(const void *data, } } -static u32 marvell_hwmon_chip_config[] = { - HWMON_C_REGISTER_TZ, - 0 -}; - -static const struct hwmon_channel_info marvell_hwmon_chip = { - .type = hwmon_chip, - .config = marvell_hwmon_chip_config, -}; - /* we can define HWMON_T_CRIT and HWMON_T_MAX_ALARM even though these are not * defined for all PHYs, because the hwmon code checks whether the attributes * exists via the .is_visible method */ -static u32 marvell_hwmon_temp_config[] = { - HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM, - 0 -}; - -static const struct hwmon_channel_info marvell_hwmon_temp = { - .type = hwmon_temp, - .config = marvell_hwmon_temp_config, -}; - static const struct hwmon_channel_info * const marvell_hwmon_info[] = { - &marvell_hwmon_chip, - &marvell_hwmon_temp, + HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM), NULL }; diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 623bdb8466b86..5354c8895163a 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -230,29 +230,9 @@ static const struct hwmon_ops mv3310_hwmon_ops = { .read = mv3310_hwmon_read, }; -static u32 mv3310_hwmon_chip_config[] = { - HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL, - 0, -}; - -static const struct hwmon_channel_info mv3310_hwmon_chip = { - .type = hwmon_chip, - .config = mv3310_hwmon_chip_config, -}; - -static u32 mv3310_hwmon_temp_config[] = { - HWMON_T_INPUT, - 0, -}; - -static const struct hwmon_channel_info mv3310_hwmon_temp = { - .type = hwmon_temp, - .config = mv3310_hwmon_temp_config, -}; - static const struct hwmon_channel_info * const mv3310_hwmon_info[] = { - &mv3310_hwmon_chip, - &mv3310_hwmon_temp, + HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ | HWMON_C_UPDATE_INTERVAL), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT), NULL, }; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 7e2f10182c0cf..ede596c1a69d1 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -551,6 +551,8 @@ static int mdiobus_create_device(struct mii_bus *bus, static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45) { struct phy_device *phydev = ERR_PTR(-ENODEV); + struct fwnode_handle *fwnode; + char node_name[16]; int err; phydev = get_phy_device(bus, addr, c45); @@ -562,6 +564,18 @@ static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45) */ of_mdiobus_link_mdiodev(bus, &phydev->mdio); + /* Search for a swnode for the phy in the swnode hierarchy of the bus. + * If there is no swnode for the phy provided, just ignore it. + */ + if (dev_fwnode(&bus->dev) && !dev_fwnode(&phydev->mdio.dev)) { + snprintf(node_name, sizeof(node_name), "ethernet-phy@%d", + addr); + fwnode = fwnode_get_named_child_node(dev_fwnode(&bus->dev), + node_name); + if (fwnode) + device_set_node(&phydev->mdio.dev, fwnode); + } + err = phy_device_register(phydev); if (err) { phy_device_free(phydev); diff --git a/drivers/net/phy/mediatek/mtk-ge-soc.c b/drivers/net/phy/mediatek/mtk-ge-soc.c index bdf99b3270293..175cf5239bba8 100644 --- a/drivers/net/phy/mediatek/mtk-ge-soc.c +++ b/drivers/net/phy/mediatek/mtk-ge-soc.c @@ -8,6 +8,7 @@ #include #include +#include "../phylib.h" #include "mtk.h" #define MTK_GPHY_ID_MT7981 0x03a29461 @@ -24,7 +25,107 @@ #define MTK_PHY_SMI_DET_ON_THRESH_MASK GENMASK(13, 8) #define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30 -#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5 + +/* Registers on Token Ring debug nodes */ +/* ch_addr = 0x0, node_addr = 0x7, data_addr = 0x15 */ +/* NormMseLoThresh */ +#define NORMAL_MSE_LO_THRESH_MASK GENMASK(15, 8) + +/* ch_addr = 0x0, node_addr = 0xf, data_addr = 0x3c */ +/* RemAckCntLimitCtrl */ +#define REMOTE_ACK_COUNT_LIMIT_CTRL_MASK GENMASK(2, 1) + +/* ch_addr = 0x1, node_addr = 0xd, data_addr = 0x20 */ +/* VcoSlicerThreshBitsHigh */ +#define VCO_SLICER_THRESH_HIGH_MASK GENMASK(23, 0) + +/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x0 */ +/* DfeTailEnableVgaThresh1000 */ +#define DFE_TAIL_EANBLE_VGA_TRHESH_1000 GENMASK(5, 1) + +/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x1 */ +/* MrvlTrFix100Kp */ +#define MRVL_TR_FIX_100KP_MASK GENMASK(22, 20) +/* MrvlTrFix100Kf */ +#define MRVL_TR_FIX_100KF_MASK GENMASK(19, 17) +/* MrvlTrFix1000Kp */ +#define MRVL_TR_FIX_1000KP_MASK GENMASK(16, 14) +/* MrvlTrFix1000Kf */ +#define MRVL_TR_FIX_1000KF_MASK GENMASK(13, 11) + +/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x12 */ +/* VgaDecRate */ +#define VGA_DECIMATION_RATE_MASK GENMASK(8, 5) + +/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x17 */ +/* SlvDSPreadyTime */ +#define SLAVE_DSP_READY_TIME_MASK GENMASK(22, 15) +/* MasDSPreadyTime */ +#define MASTER_DSP_READY_TIME_MASK GENMASK(14, 7) + +/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x18 */ +/* EnabRandUpdTrig */ +#define ENABLE_RANDOM_UPDOWN_COUNTER_TRIGGER BIT(8) + +/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x20 */ +/* ResetSyncOffset */ +#define RESET_SYNC_OFFSET_MASK GENMASK(11, 8) + +/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x0 */ +/* FfeUpdGainForceVal */ +#define FFE_UPDATE_GAIN_FORCE_VAL_MASK GENMASK(9, 7) +/* FfeUpdGainForce */ +#define FFE_UPDATE_GAIN_FORCE BIT(6) + +/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x3 */ +/* TrFreeze */ +#define TR_FREEZE_MASK GENMASK(11, 0) + +/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x6 */ +/* SS: Steady-state, KP: Proportional Gain */ +/* SSTrKp100 */ +#define SS_TR_KP100_MASK GENMASK(21, 19) +/* SSTrKf100 */ +#define SS_TR_KF100_MASK GENMASK(18, 16) +/* SSTrKp1000Mas */ +#define SS_TR_KP1000_MASTER_MASK GENMASK(15, 13) +/* SSTrKf1000Mas */ +#define SS_TR_KF1000_MASTER_MASK GENMASK(12, 10) +/* SSTrKp1000Slv */ +#define SS_TR_KP1000_SLAVE_MASK GENMASK(9, 7) +/* SSTrKf1000Slv */ +#define SS_TR_KF1000_SLAVE_MASK GENMASK(6, 4) + +/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x8 */ +/* clear this bit if wanna select from AFE */ +/* Regsigdet_sel_1000 */ +#define EEE1000_SELECT_SIGNAL_DETECTION_FROM_DFE BIT(4) + +/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0xd */ +/* RegEEE_st2TrKf1000 */ +#define EEE1000_STAGE2_TR_KF_MASK GENMASK(13, 11) + +/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0xf */ +/* RegEEE_slv_waketr_timer_tar */ +#define SLAVE_WAKETR_TIMER_MASK GENMASK(20, 11) +/* RegEEE_slv_remtx_timer_tar */ +#define SLAVE_REMTX_TIMER_MASK GENMASK(10, 1) + +/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x10 */ +/* RegEEE_slv_wake_int_timer_tar */ +#define SLAVE_WAKEINT_TIMER_MASK GENMASK(10, 1) + +/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x14 */ +/* RegEEE_trfreeze_timer2 */ +#define TR_FREEZE_TIMER2_MASK GENMASK(9, 0) + +/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x1c */ +/* RegEEE100Stg1_tar */ +#define EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK GENMASK(8, 0) + +/* ch_addr = 0x2, node_addr = 0xd, data_addr = 0x25 */ +/* REGEEE_wake_slv_tr_wait_dfesigdet_en */ +#define WAKE_SLAVE_TR_WAIT_DFE_DETECTION_EN BIT(11) #define ANALOG_INTERNAL_OPERATION_MAX_US 20 #define TXRESERVE_MIN 0 @@ -701,40 +802,36 @@ static int tx_vcm_cal_sw(struct phy_device *phydev, u8 rg_txreserve_x) static void mt798x_phy_common_finetune(struct phy_device *phydev) { phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); - /* SlvDSPreadyTime = 24, MasDSPreadyTime = 24 */ - __phy_write(phydev, 0x11, 0xc71); - __phy_write(phydev, 0x12, 0xc); - __phy_write(phydev, 0x10, 0x8fae); - - /* EnabRandUpdTrig = 1 */ - __phy_write(phydev, 0x11, 0x2f00); - __phy_write(phydev, 0x12, 0xe); - __phy_write(phydev, 0x10, 0x8fb0); - - /* NormMseLoThresh = 85 */ - __phy_write(phydev, 0x11, 0x55a0); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x83aa); - - /* FfeUpdGainForce = 1(Enable), FfeUpdGainForceVal = 4 */ - __phy_write(phydev, 0x11, 0x240); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x9680); - - /* TrFreeze = 0 (mt7988 default) */ - __phy_write(phydev, 0x11, 0x0); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x9686); - - /* SSTrKp100 = 5 */ - /* SSTrKf100 = 6 */ - /* SSTrKp1000Mas = 5 */ - /* SSTrKf1000Mas = 6 */ - /* SSTrKp1000Slv = 5 */ - /* SSTrKf1000Slv = 6 */ - __phy_write(phydev, 0x11, 0xbaef); - __phy_write(phydev, 0x12, 0x2e); - __phy_write(phydev, 0x10, 0x968c); + __mtk_tr_modify(phydev, 0x1, 0xf, 0x17, + SLAVE_DSP_READY_TIME_MASK | MASTER_DSP_READY_TIME_MASK, + FIELD_PREP(SLAVE_DSP_READY_TIME_MASK, 0x18) | + FIELD_PREP(MASTER_DSP_READY_TIME_MASK, 0x18)); + + __mtk_tr_set_bits(phydev, 0x1, 0xf, 0x18, + ENABLE_RANDOM_UPDOWN_COUNTER_TRIGGER); + + __mtk_tr_modify(phydev, 0x0, 0x7, 0x15, + NORMAL_MSE_LO_THRESH_MASK, + FIELD_PREP(NORMAL_MSE_LO_THRESH_MASK, 0x55)); + + __mtk_tr_modify(phydev, 0x2, 0xd, 0x0, + FFE_UPDATE_GAIN_FORCE_VAL_MASK, + FIELD_PREP(FFE_UPDATE_GAIN_FORCE_VAL_MASK, 0x4) | + FFE_UPDATE_GAIN_FORCE); + + __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x3, TR_FREEZE_MASK); + + __mtk_tr_modify(phydev, 0x2, 0xd, 0x6, + SS_TR_KP100_MASK | SS_TR_KF100_MASK | + SS_TR_KP1000_MASTER_MASK | SS_TR_KF1000_MASTER_MASK | + SS_TR_KP1000_SLAVE_MASK | SS_TR_KF1000_SLAVE_MASK, + FIELD_PREP(SS_TR_KP100_MASK, 0x5) | + FIELD_PREP(SS_TR_KF100_MASK, 0x6) | + FIELD_PREP(SS_TR_KP1000_MASTER_MASK, 0x5) | + FIELD_PREP(SS_TR_KF1000_MASTER_MASK, 0x6) | + FIELD_PREP(SS_TR_KP1000_SLAVE_MASK, 0x5) | + FIELD_PREP(SS_TR_KF1000_SLAVE_MASK, 0x6)); + phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); } @@ -757,27 +854,29 @@ static void mt7981_phy_finetune(struct phy_device *phydev) } phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); - /* ResetSyncOffset = 6 */ - __phy_write(phydev, 0x11, 0x600); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x8fc0); + __mtk_tr_modify(phydev, 0x1, 0xf, 0x20, + RESET_SYNC_OFFSET_MASK, + FIELD_PREP(RESET_SYNC_OFFSET_MASK, 0x6)); - /* VgaDecRate = 1 */ - __phy_write(phydev, 0x11, 0x4c2a); - __phy_write(phydev, 0x12, 0x3e); - __phy_write(phydev, 0x10, 0x8fa4); + __mtk_tr_modify(phydev, 0x1, 0xf, 0x12, + VGA_DECIMATION_RATE_MASK, + FIELD_PREP(VGA_DECIMATION_RATE_MASK, 0x1)); /* MrvlTrFix100Kp = 3, MrvlTrFix100Kf = 2, * MrvlTrFix1000Kp = 3, MrvlTrFix1000Kf = 2 */ - __phy_write(phydev, 0x11, 0xd10a); - __phy_write(phydev, 0x12, 0x34); - __phy_write(phydev, 0x10, 0x8f82); + __mtk_tr_modify(phydev, 0x1, 0xf, 0x1, + MRVL_TR_FIX_100KP_MASK | MRVL_TR_FIX_100KF_MASK | + MRVL_TR_FIX_1000KP_MASK | MRVL_TR_FIX_1000KF_MASK, + FIELD_PREP(MRVL_TR_FIX_100KP_MASK, 0x3) | + FIELD_PREP(MRVL_TR_FIX_100KF_MASK, 0x2) | + FIELD_PREP(MRVL_TR_FIX_1000KP_MASK, 0x3) | + FIELD_PREP(MRVL_TR_FIX_1000KF_MASK, 0x2)); /* VcoSlicerThreshBitsHigh */ - __phy_write(phydev, 0x11, 0x5555); - __phy_write(phydev, 0x12, 0x55); - __phy_write(phydev, 0x10, 0x8ec0); + __mtk_tr_modify(phydev, 0x1, 0xd, 0x20, + VCO_SLICER_THRESH_HIGH_MASK, + FIELD_PREP(VCO_SLICER_THRESH_HIGH_MASK, 0x555555)); phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 9 */ @@ -829,25 +928,23 @@ static void mt7988_phy_finetune(struct phy_device *phydev) phy_write_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RG_TX_FILTER, 0x5); phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); - /* ResetSyncOffset = 5 */ - __phy_write(phydev, 0x11, 0x500); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x8fc0); + __mtk_tr_modify(phydev, 0x1, 0xf, 0x20, + RESET_SYNC_OFFSET_MASK, + FIELD_PREP(RESET_SYNC_OFFSET_MASK, 0x5)); /* VgaDecRate is 1 at default on mt7988 */ - /* MrvlTrFix100Kp = 6, MrvlTrFix100Kf = 7, - * MrvlTrFix1000Kp = 6, MrvlTrFix1000Kf = 7 - */ - __phy_write(phydev, 0x11, 0xb90a); - __phy_write(phydev, 0x12, 0x6f); - __phy_write(phydev, 0x10, 0x8f82); - - /* RemAckCntLimitCtrl = 1 */ - __phy_write(phydev, 0x11, 0xfbba); - __phy_write(phydev, 0x12, 0xc3); - __phy_write(phydev, 0x10, 0x87f8); - + __mtk_tr_modify(phydev, 0x1, 0xf, 0x1, + MRVL_TR_FIX_100KP_MASK | MRVL_TR_FIX_100KF_MASK | + MRVL_TR_FIX_1000KP_MASK | MRVL_TR_FIX_1000KF_MASK, + FIELD_PREP(MRVL_TR_FIX_100KP_MASK, 0x6) | + FIELD_PREP(MRVL_TR_FIX_100KF_MASK, 0x7) | + FIELD_PREP(MRVL_TR_FIX_1000KP_MASK, 0x6) | + FIELD_PREP(MRVL_TR_FIX_1000KF_MASK, 0x7)); + + __mtk_tr_modify(phydev, 0x0, 0xf, 0x3c, + REMOTE_ACK_COUNT_LIMIT_CTRL_MASK, + FIELD_PREP(REMOTE_ACK_COUNT_LIMIT_CTRL_MASK, 0x1)); phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); /* TR_OPEN_LOOP_EN = 1, lpf_x_average = 10 */ @@ -923,45 +1020,37 @@ static void mt798x_phy_eee(struct phy_device *phydev) MTK_PHY_TR_READY_SKIP_AFE_WAKEUP); phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); - /* Regsigdet_sel_1000 = 0 */ - __phy_write(phydev, 0x11, 0xb); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x9690); - - /* REG_EEE_st2TrKf1000 = 2 */ - __phy_write(phydev, 0x11, 0x114f); - __phy_write(phydev, 0x12, 0x2); - __phy_write(phydev, 0x10, 0x969a); - - /* RegEEE_slv_wake_tr_timer_tar = 6, RegEEE_slv_remtx_timer_tar = 20 */ - __phy_write(phydev, 0x11, 0x3028); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x969e); - - /* RegEEE_slv_wake_int_timer_tar = 8 */ - __phy_write(phydev, 0x11, 0x5010); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x96a0); - - /* RegEEE_trfreeze_timer2 = 586 */ - __phy_write(phydev, 0x11, 0x24a); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x96a8); - - /* RegEEE100Stg1_tar = 16 */ - __phy_write(phydev, 0x11, 0x3210); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x96b8); - - /* REGEEE_wake_slv_tr_wait_dfesigdet_en = 0 */ - __phy_write(phydev, 0x11, 0x1463); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x96ca); - - /* DfeTailEnableVgaThresh1000 = 27 */ - __phy_write(phydev, 0x11, 0x36); - __phy_write(phydev, 0x12, 0x0); - __phy_write(phydev, 0x10, 0x8f80); + __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x8, + EEE1000_SELECT_SIGNAL_DETECTION_FROM_DFE); + + __mtk_tr_modify(phydev, 0x2, 0xd, 0xd, + EEE1000_STAGE2_TR_KF_MASK, + FIELD_PREP(EEE1000_STAGE2_TR_KF_MASK, 0x2)); + + __mtk_tr_modify(phydev, 0x2, 0xd, 0xf, + SLAVE_WAKETR_TIMER_MASK | SLAVE_REMTX_TIMER_MASK, + FIELD_PREP(SLAVE_WAKETR_TIMER_MASK, 0x6) | + FIELD_PREP(SLAVE_REMTX_TIMER_MASK, 0x14)); + + __mtk_tr_modify(phydev, 0x2, 0xd, 0x10, + SLAVE_WAKEINT_TIMER_MASK, + FIELD_PREP(SLAVE_WAKEINT_TIMER_MASK, 0x8)); + + __mtk_tr_modify(phydev, 0x2, 0xd, 0x14, + TR_FREEZE_TIMER2_MASK, + FIELD_PREP(TR_FREEZE_TIMER2_MASK, 0x24a)); + + __mtk_tr_modify(phydev, 0x2, 0xd, 0x1c, + EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK, + FIELD_PREP(EEE100_LPSYNC_STAGE1_UPDATE_TIMER_MASK, + 0x10)); + + __mtk_tr_clr_bits(phydev, 0x2, 0xd, 0x25, + WAKE_SLAVE_TR_WAIT_DFE_DETECTION_EN); + + __mtk_tr_modify(phydev, 0x1, 0xf, 0x0, + DFE_TAIL_EANBLE_VGA_TRHESH_1000, + FIELD_PREP(DFE_TAIL_EANBLE_VGA_TRHESH_1000, 0x1b)); phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_3); @@ -1190,7 +1279,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index, static bool mt7988_phy_led_get_polarity(struct phy_device *phydev, int led_num) { - struct mtk_socphy_shared *priv = phydev->shared->priv; + struct mtk_socphy_shared *priv = phy_package_get_priv(phydev); u32 polarities; if (led_num == 0) @@ -1229,7 +1318,7 @@ static int mt7988_phy_fix_leds_polarities(struct phy_device *phydev) static int mt7988_phy_probe_shared(struct phy_device *phydev) { struct device_node *np = dev_of_node(&phydev->mdio.bus->dev); - struct mtk_socphy_shared *shared = phydev->shared->priv; + struct mtk_socphy_shared *shared = phy_package_get_priv(phydev); struct regmap *regmap; u32 reg; int ret; @@ -1280,7 +1369,7 @@ static int mt7988_phy_probe(struct phy_device *phydev) return err; } - shared = phydev->shared->priv; + shared = phy_package_get_priv(phydev); priv = &shared->priv[phydev->mdio.addr]; phydev->priv = priv; diff --git a/drivers/net/phy/mediatek/mtk-ge.c b/drivers/net/phy/mediatek/mtk-ge.c index b517ca8573e76..73d9b72f9d9e2 100644 --- a/drivers/net/phy/mediatek/mtk-ge.c +++ b/drivers/net/phy/mediatek/mtk-ge.c @@ -8,31 +8,58 @@ #define MTK_GPHY_ID_MT7530 0x03a29412 #define MTK_GPHY_ID_MT7531 0x03a29441 -#define MTK_EXT_PAGE_ACCESS 0x1f -#define MTK_PHY_PAGE_STANDARD 0x0000 -#define MTK_PHY_PAGE_EXTENDED 0x0001 -#define MTK_PHY_PAGE_EXTENDED_2 0x0002 -#define MTK_PHY_PAGE_EXTENDED_3 0x0003 -#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30 -#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5 +#define MTK_PHY_PAGE_EXTENDED_2 0x0002 +#define MTK_PHY_PAGE_EXTENDED_3 0x0003 +#define MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG11 0x11 + +#define MTK_PHY_PAGE_EXTENDED_2A30 0x2a30 + +/* Registers on Token Ring debug nodes */ +/* ch_addr = 0x1, node_addr = 0xf, data_addr = 0x17 */ +#define SLAVE_DSP_READY_TIME_MASK GENMASK(22, 15) + +/* Registers on MDIO_MMD_VEND1 */ +#define MTK_PHY_GBE_MODE_TX_DELAY_SEL 0x13 +#define MTK_PHY_TEST_MODE_TX_DELAY_SEL 0x14 +#define MTK_TX_DELAY_PAIR_B_MASK GENMASK(10, 8) +#define MTK_TX_DELAY_PAIR_D_MASK GENMASK(2, 0) + +#define MTK_PHY_MCC_CTRL_AND_TX_POWER_CTRL 0xa6 +#define MTK_MCC_NEARECHO_OFFSET_MASK GENMASK(15, 8) + +#define MTK_PHY_RXADC_CTRL_RG7 0xc6 +#define MTK_PHY_DA_AD_BUF_BIAS_LP_MASK GENMASK(9, 8) + +#define MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG123 0x123 +#define MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK GENMASK(15, 8) +#define MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK GENMASK(7, 0) static void mtk_gephy_config_init(struct phy_device *phydev) { /* Enable HW auto downshift */ - phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED, 0x14, 0, BIT(4)); + phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED_1, + MTK_PHY_AUX_CTRL_AND_STATUS, + 0, MTK_PHY_ENABLE_DOWNSHIFT); /* Increase SlvDPSready time */ - phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); - __phy_write(phydev, 0x10, 0xafae); - __phy_write(phydev, 0x12, 0x2f); - __phy_write(phydev, 0x10, 0x8fae); - phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); + mtk_tr_modify(phydev, 0x1, 0xf, 0x17, SLAVE_DSP_READY_TIME_MASK, + FIELD_PREP(SLAVE_DSP_READY_TIME_MASK, 0x5e)); /* Adjust 100_mse_threshold */ - phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x123, 0xffff); - - /* Disable mcc */ - phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xa6, 0x300); + phy_modify_mmd(phydev, MDIO_MMD_VEND1, + MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG123, + MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK | + MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK, + FIELD_PREP(MTK_PHY_LPI_NORM_MSE_LO_THRESH100_MASK, + 0xff) | + FIELD_PREP(MTK_PHY_LPI_NORM_MSE_HI_THRESH100_MASK, + 0xff)); + + /* If echo time is narrower than 0x3, it will be regarded as noise */ + phy_modify_mmd(phydev, MDIO_MMD_VEND1, + MTK_PHY_MCC_CTRL_AND_TX_POWER_CTRL, + MTK_MCC_NEARECHO_OFFSET_MASK, + FIELD_PREP(MTK_MCC_NEARECHO_OFFSET_MASK, 0x3)); } static int mt7530_phy_config_init(struct phy_device *phydev) @@ -40,7 +67,8 @@ static int mt7530_phy_config_init(struct phy_device *phydev) mtk_gephy_config_init(phydev); /* Increase post_update_timer */ - phy_write_paged(phydev, MTK_PHY_PAGE_EXTENDED_3, 0x11, 0x4b); + phy_write_paged(phydev, MTK_PHY_PAGE_EXTENDED_3, + MTK_PHY_RG_LPI_PCS_DSP_CTRL_REG11, 0x4b); return 0; } @@ -51,11 +79,19 @@ static int mt7531_phy_config_init(struct phy_device *phydev) /* PHY link down power saving enable */ phy_set_bits(phydev, 0x17, BIT(4)); - phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, 0xc6, 0x300); + phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_RXADC_CTRL_RG7, + MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, + FIELD_PREP(MTK_PHY_DA_AD_BUF_BIAS_LP_MASK, 0x3)); /* Set TX Pair delay selection */ - phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x13, 0x404); - phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x14, 0x404); + phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_GBE_MODE_TX_DELAY_SEL, + MTK_TX_DELAY_PAIR_B_MASK | MTK_TX_DELAY_PAIR_D_MASK, + FIELD_PREP(MTK_TX_DELAY_PAIR_B_MASK, 0x4) | + FIELD_PREP(MTK_TX_DELAY_PAIR_D_MASK, 0x4)); + phy_modify_mmd(phydev, MDIO_MMD_VEND1, MTK_PHY_TEST_MODE_TX_DELAY_SEL, + MTK_TX_DELAY_PAIR_B_MASK | MTK_TX_DELAY_PAIR_D_MASK, + FIELD_PREP(MTK_TX_DELAY_PAIR_B_MASK, 0x4) | + FIELD_PREP(MTK_TX_DELAY_PAIR_D_MASK, 0x4)); return 0; } diff --git a/drivers/net/phy/mediatek/mtk-phy-lib.c b/drivers/net/phy/mediatek/mtk-phy-lib.c index 98a09d670e9ca..dfd0f4e439a21 100644 --- a/drivers/net/phy/mediatek/mtk-phy-lib.c +++ b/drivers/net/phy/mediatek/mtk-phy-lib.c @@ -6,6 +6,83 @@ #include "mtk.h" +/* Difference between functions with mtk_tr* and __mtk_tr* prefixes is + * mtk_tr* functions: wrapped by page switching operations + * __mtk_tr* functions: no page switching operations + */ + +static void __mtk_tr_access(struct phy_device *phydev, bool read, u8 ch_addr, + u8 node_addr, u8 data_addr) +{ + u16 tr_cmd = BIT(15); /* bit 14 & 0 are reserved */ + + if (read) + tr_cmd |= BIT(13); + + tr_cmd |= (((ch_addr & 0x3) << 11) | + ((node_addr & 0xf) << 7) | + ((data_addr & 0x3f) << 1)); + dev_dbg(&phydev->mdio.dev, "tr_cmd: 0x%x\n", tr_cmd); + __phy_write(phydev, 0x10, tr_cmd); +} + +static void __mtk_tr_read(struct phy_device *phydev, u8 ch_addr, u8 node_addr, + u8 data_addr, u16 *tr_high, u16 *tr_low) +{ + __mtk_tr_access(phydev, true, ch_addr, node_addr, data_addr); + *tr_low = __phy_read(phydev, 0x11); + *tr_high = __phy_read(phydev, 0x12); + dev_dbg(&phydev->mdio.dev, "tr_high read: 0x%x, tr_low read: 0x%x\n", + *tr_high, *tr_low); +} + +static void __mtk_tr_write(struct phy_device *phydev, u8 ch_addr, u8 node_addr, + u8 data_addr, u32 tr_data) +{ + __phy_write(phydev, 0x11, tr_data & 0xffff); + __phy_write(phydev, 0x12, tr_data >> 16); + dev_dbg(&phydev->mdio.dev, "tr_high write: 0x%x, tr_low write: 0x%x\n", + tr_data >> 16, tr_data & 0xffff); + __mtk_tr_access(phydev, false, ch_addr, node_addr, data_addr); +} + +void __mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr, + u8 data_addr, u32 mask, u32 set) +{ + u32 tr_data; + u16 tr_high; + u16 tr_low; + + __mtk_tr_read(phydev, ch_addr, node_addr, data_addr, &tr_high, &tr_low); + tr_data = (tr_high << 16) | tr_low; + tr_data = (tr_data & ~mask) | set; + __mtk_tr_write(phydev, ch_addr, node_addr, data_addr, tr_data); +} +EXPORT_SYMBOL_GPL(__mtk_tr_modify); + +void mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr, + u8 data_addr, u32 mask, u32 set) +{ + phy_select_page(phydev, MTK_PHY_PAGE_EXTENDED_52B5); + __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, mask, set); + phy_restore_page(phydev, MTK_PHY_PAGE_STANDARD, 0); +} +EXPORT_SYMBOL_GPL(mtk_tr_modify); + +void __mtk_tr_set_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr, + u8 data_addr, u32 set) +{ + __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, 0, set); +} +EXPORT_SYMBOL_GPL(__mtk_tr_set_bits); + +void __mtk_tr_clr_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr, + u8 data_addr, u32 clr) +{ + __mtk_tr_modify(phydev, ch_addr, node_addr, data_addr, clr, 0); +} +EXPORT_SYMBOL_GPL(__mtk_tr_clr_bits); + int mtk_phy_read_page(struct phy_device *phydev) { return __phy_read(phydev, MTK_EXT_PAGE_ACCESS); diff --git a/drivers/net/phy/mediatek/mtk.h b/drivers/net/phy/mediatek/mtk.h index 63d9fe179b8fd..320f76ffa81fa 100644 --- a/drivers/net/phy/mediatek/mtk.h +++ b/drivers/net/phy/mediatek/mtk.h @@ -8,7 +8,13 @@ #ifndef _MTK_EPHY_H_ #define _MTK_EPHY_H_ +#define MTK_PHY_AUX_CTRL_AND_STATUS 0x14 +#define MTK_PHY_ENABLE_DOWNSHIFT BIT(4) + #define MTK_EXT_PAGE_ACCESS 0x1f +#define MTK_PHY_PAGE_EXTENDED_1 0x0001 +#define MTK_PHY_PAGE_STANDARD 0x0000 +#define MTK_PHY_PAGE_EXTENDED_52B5 0x52b5 /* Registers on MDIO_MMD_VEND2 */ #define MTK_PHY_LED0_ON_CTRL 0x24 @@ -66,6 +72,15 @@ struct mtk_socphy_priv { unsigned long led_state; }; +void __mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr, + u8 data_addr, u32 mask, u32 set); +void mtk_tr_modify(struct phy_device *phydev, u8 ch_addr, u8 node_addr, + u8 data_addr, u32 mask, u32 set); +void __mtk_tr_set_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr, + u8 data_addr, u32 set); +void __mtk_tr_clr_bits(struct phy_device *phydev, u8 ch_addr, u8 node_addr, + u8 data_addr, u32 clr); + int mtk_phy_read_page(struct phy_device *phydev); int mtk_phy_write_page(struct phy_device *phydev, int page); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 9c0b1c229af64..24882d30f6858 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -34,6 +34,8 @@ #include #include +#include "phylib.h" + /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 #define KSZPHY_OMSO_FACTORY_TEST BIT(15) @@ -1030,6 +1032,29 @@ static int ksz9021_config_init(struct phy_device *phydev) #define MII_KSZ9031RN_EDPD 0x23 #define MII_KSZ9031RN_EDPD_ENABLE BIT(0) +static int ksz9031_set_loopback(struct phy_device *phydev, bool enable, + int speed) +{ + u16 ctl = BMCR_LOOPBACK; + int val; + + if (!enable) + return genphy_loopback(phydev, enable, 0); + + if (speed == SPEED_10 || speed == SPEED_100 || speed == SPEED_1000) + phydev->speed = speed; + else if (speed) + return -EINVAL; + phydev->duplex = DUPLEX_FULL; + + ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex); + + phy_write(phydev, MII_BMCR, ctl); + + return phy_read_poll_timeout(phydev, MII_BMSR, val, val & BMSR_LSTATUS, + 5000, 500000, true); +} + static int ksz9031_of_load_skew_values(struct phy_device *phydev, const struct device_node *of_node, u16 reg, size_t field_sz, @@ -2631,8 +2656,7 @@ static void lan8814_ptp_tx_ts_get(struct phy_device *phydev, static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct kernel_ethtool_ts_info *info) { struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts); - struct phy_device *phydev = ptp_priv->phydev; - struct lan8814_shared_priv *shared = phydev->shared->priv; + struct lan8814_shared_priv *shared = phy_package_get_priv(ptp_priv->phydev); info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | @@ -3653,7 +3677,7 @@ static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared) static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status) { - struct lan8814_shared_priv *shared = phydev->shared->priv; + struct lan8814_shared_priv *shared = phy_package_get_priv(phydev); int ret; mutex_lock(&shared->shared_lock); @@ -3864,7 +3888,7 @@ static void lan8814_ptp_init(struct phy_device *phydev) static int lan8814_ptp_probe_once(struct phy_device *phydev) { - struct lan8814_shared_priv *shared = phydev->shared->priv; + struct lan8814_shared_priv *shared = phy_package_get_priv(phydev); /* Initialise shared lock for clock*/ mutex_init(&shared->shared_lock); @@ -5564,6 +5588,7 @@ static struct phy_driver ksphy_driver[] = { .resume = kszphy_resume, .cable_test_start = ksz9x31_cable_test_start, .cable_test_get_status = ksz9x31_cable_test_get_status, + .set_loopback = ksz9031_set_loopback, }, { .phy_id = PHY_ID_LAN8814, .phy_id_mask = MICREL_PHY_ID_MASK, diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 19cf12ee8990f..7ff975efd8e7a 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -17,6 +17,8 @@ #include #include #include + +#include "../phylib.h" #include "mscc_serdes.h" #include "mscc.h" diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index 738a8822fcf01..ed8fb14a7f215 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -17,6 +17,7 @@ #include #include +#include "../phylib.h" #include "mscc.h" #include "mscc_ptp.h" @@ -645,11 +646,12 @@ static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts) { struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); struct phy_device *phydev = ptp->phydev; - struct vsc85xx_shared_private *shared = - (struct vsc85xx_shared_private *)phydev->shared->priv; struct vsc8531_private *priv = phydev->priv; + struct vsc85xx_shared_private *shared; u32 val; + shared = phy_package_get_priv(phydev); + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL); val |= PTP_LTC_CTRL_SAVE_ENA; vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val); @@ -696,11 +698,12 @@ static int __vsc85xx_settime(struct ptp_clock_info *info, { struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); struct phy_device *phydev = ptp->phydev; - struct vsc85xx_shared_private *shared = - (struct vsc85xx_shared_private *)phydev->shared->priv; struct vsc8531_private *priv = phydev->priv; + struct vsc85xx_shared_private *shared; u32 val; + shared = phy_package_get_priv(phydev); + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB, PTP_LTC_LOAD_SEC_MSB(ts->tv_sec)); vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB, @@ -1580,8 +1583,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev) int vsc8584_ptp_probe_once(struct phy_device *phydev) { - struct vsc85xx_shared_private *shared = - (struct vsc85xx_shared_private *)phydev->shared->priv; + struct vsc85xx_shared_private *shared = phy_package_get_priv(phydev); /* Initialize shared GPIO lock */ mutex_init(&shared->gpio_lock); diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c index 94d9cb727121a..0c8dc16ee7bde 100644 --- a/drivers/net/phy/mxl-gpy.c +++ b/drivers/net/phy/mxl-gpy.c @@ -225,14 +225,8 @@ static int gpy_hwmon_register(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; struct device *hwmon_dev; - char *hwmon_name; - hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev)); - if (IS_ERR(hwmon_name)) - return PTR_ERR(hwmon_name); - - hwmon_dev = devm_hwmon_device_register_with_info(dev, hwmon_name, - phydev, + hwmon_dev = devm_hwmon_device_register_with_info(dev, NULL, phydev, &gpy_hwmon_chip_info, NULL); @@ -813,7 +807,7 @@ static void gpy_get_wol(struct phy_device *phydev, wol->wolopts = priv->wolopts; } -static int gpy_loopback(struct phy_device *phydev, bool enable) +static int gpy_loopback(struct phy_device *phydev, bool enable, int speed) { struct gpy_priv *priv = phydev->priv; u16 set = 0; @@ -822,6 +816,9 @@ static int gpy_loopback(struct phy_device *phydev, bool enable) if (enable) { u64 now = get_jiffies_64(); + if (speed) + return -EOPNOTSUPP; + /* wait until 3 seconds from last disable */ if (time_before64(now, priv->lb_dis_to)) msleep(jiffies64_to_msecs(priv->lb_dis_to - now)); @@ -845,15 +842,15 @@ static int gpy_loopback(struct phy_device *phydev, bool enable) return 0; } -static int gpy115_loopback(struct phy_device *phydev, bool enable) +static int gpy115_loopback(struct phy_device *phydev, bool enable, int speed) { struct gpy_priv *priv = phydev->priv; if (enable) - return gpy_loopback(phydev, enable); + return gpy_loopback(phydev, enable, speed); if (priv->fw_minor > 0x76) - return gpy_loopback(phydev, 0); + return gpy_loopback(phydev, 0, 0); return genphy_soft_reset(phydev); } diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 34231b5b91754..250a018d55468 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* NXP C45 PHY driver - * Copyright 2021-2023 NXP + * Copyright 2021-2025 NXP * Author: Radu Pirea */ @@ -19,9 +19,17 @@ #include "nxp-c45-tja11xx.h" +#define PHY_ID_MASK GENMASK(31, 4) +/* Same id: TJA1103, TJA1104 */ #define PHY_ID_TJA_1103 0x001BB010 +/* Same id: TJA1120, TJA1121 */ #define PHY_ID_TJA_1120 0x001BB031 +#define VEND1_DEVICE_ID3 0x0004 +#define TJA1120_DEV_ID3_SILICON_VERSION GENMASK(15, 12) +#define TJA1120_DEV_ID3_SAMPLE_TYPE GENMASK(11, 8) +#define DEVICE_ID3_SAMPLE_TYPE_R 0x9 + #define VEND1_DEVICE_CONTROL 0x0040 #define DEVICE_CONTROL_RESET BIT(15) #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14) @@ -109,6 +117,9 @@ #define MII_BASIC_CONFIG_RMII 0x5 #define MII_BASIC_CONFIG_MII 0x4 +#define VEND1_SGMII_BASIC_CONTROL 0xB000 +#define SGMII_LPM BIT(11) + #define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351 #define EXTENDED_CNT_EN BIT(15) #define VEND1_MONITOR_STATUS 0xAC80 @@ -1593,6 +1604,63 @@ static int nxp_c45_set_phy_mode(struct phy_device *phydev) return 0; } +/* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */ +static void nxp_c45_tja1120_errata(struct phy_device *phydev) +{ + bool macsec_ability, sgmii_ability; + int silicon_version, sample_type; + int phy_abilities; + int ret = 0; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3); + if (ret < 0) + return; + + sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret); + if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R) + return; + + silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret); + + phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PORT_ABILITIES); + macsec_ability = !!(phy_abilities & MACSEC_ABILITY); + sgmii_ability = !!(phy_abilities & SGMII_ABILITY); + if ((!macsec_ability && silicon_version == 2) || + (macsec_ability && silicon_version == 1)) { + /* TJA1120/TJA1121 PHY configuration errata workaround. + * Apply PHY writes sequence before link up. + */ + if (!macsec_ability) { + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95); + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd); + } else { + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7); + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893); + } + + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0); + + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a); + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1); + + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0); + phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0); + + if (sgmii_ability) { + /* TJA1120B/TJA1121B SGMII PCS restart errata workaround. + * Put SGMII PCS into power down mode and back up. + */ + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_SGMII_BASIC_CONTROL, + SGMII_LPM); + phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, + VEND1_SGMII_BASIC_CONTROL, + SGMII_LPM); + } + } +} + static int nxp_c45_config_init(struct phy_device *phydev) { int ret; @@ -1609,6 +1677,9 @@ static int nxp_c45_config_init(struct phy_device *phydev) phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1); phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2); + if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4))) + nxp_c45_tja1120_errata(phydev); + phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG, PHY_CONFIG_AUTO); @@ -1888,6 +1959,42 @@ static void tja1120_nmi_handler(struct phy_device *phydev, } } +static int nxp_c45_macsec_ability(struct phy_device *phydev) +{ + bool macsec_ability; + int phy_abilities; + + phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1, + VEND1_PORT_ABILITIES); + macsec_ability = !!(phy_abilities & MACSEC_ABILITY); + + return macsec_ability; +} + +static int tja1103_match_phy_device(struct phy_device *phydev) +{ + return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) && + !nxp_c45_macsec_ability(phydev); +} + +static int tja1104_match_phy_device(struct phy_device *phydev) +{ + return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) && + nxp_c45_macsec_ability(phydev); +} + +static int tja1120_match_phy_device(struct phy_device *phydev) +{ + return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) && + !nxp_c45_macsec_ability(phydev); +} + +static int tja1121_match_phy_device(struct phy_device *phydev) +{ + return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, PHY_ID_MASK) && + nxp_c45_macsec_ability(phydev); +} + static const struct nxp_c45_regmap tja1120_regmap = { .vend1_ptp_clk_period = 0x1020, .vend1_event_msg_filt = 0x9010, @@ -1958,7 +2065,6 @@ static const struct nxp_c45_phy_data tja1120_phy_data = { static struct phy_driver nxp_c45_driver[] = { { - PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103), .name = "NXP C45 TJA1103", .get_features = nxp_c45_get_features, .driver_data = &tja1103_phy_data, @@ -1980,9 +2086,33 @@ static struct phy_driver nxp_c45_driver[] = { .get_sqi = nxp_c45_get_sqi, .get_sqi_max = nxp_c45_get_sqi_max, .remove = nxp_c45_remove, + .match_phy_device = tja1103_match_phy_device, + }, + { + .name = "NXP C45 TJA1104", + .get_features = nxp_c45_get_features, + .driver_data = &tja1103_phy_data, + .probe = nxp_c45_probe, + .soft_reset = nxp_c45_soft_reset, + .config_aneg = genphy_c45_config_aneg, + .config_init = nxp_c45_config_init, + .config_intr = tja1103_config_intr, + .handle_interrupt = nxp_c45_handle_interrupt, + .read_status = genphy_c45_read_status, + .suspend = genphy_c45_pma_suspend, + .resume = genphy_c45_pma_resume, + .get_sset_count = nxp_c45_get_sset_count, + .get_strings = nxp_c45_get_strings, + .get_stats = nxp_c45_get_stats, + .cable_test_start = nxp_c45_cable_test_start, + .cable_test_get_status = nxp_c45_cable_test_get_status, + .set_loopback = genphy_c45_loopback, + .get_sqi = nxp_c45_get_sqi, + .get_sqi_max = nxp_c45_get_sqi_max, + .remove = nxp_c45_remove, + .match_phy_device = tja1104_match_phy_device, }, { - PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120), .name = "NXP C45 TJA1120", .get_features = nxp_c45_get_features, .driver_data = &tja1120_phy_data, @@ -2005,6 +2135,32 @@ static struct phy_driver nxp_c45_driver[] = { .get_sqi = nxp_c45_get_sqi, .get_sqi_max = nxp_c45_get_sqi_max, .remove = nxp_c45_remove, + .match_phy_device = tja1120_match_phy_device, + }, + { + .name = "NXP C45 TJA1121", + .get_features = nxp_c45_get_features, + .driver_data = &tja1120_phy_data, + .probe = nxp_c45_probe, + .soft_reset = nxp_c45_soft_reset, + .config_aneg = genphy_c45_config_aneg, + .config_init = nxp_c45_config_init, + .config_intr = tja1120_config_intr, + .handle_interrupt = nxp_c45_handle_interrupt, + .read_status = genphy_c45_read_status, + .link_change_notify = tja1120_link_change_notify, + .suspend = genphy_c45_pma_suspend, + .resume = genphy_c45_pma_resume, + .get_sset_count = nxp_c45_get_sset_count, + .get_strings = nxp_c45_get_strings, + .get_stats = nxp_c45_get_stats, + .cable_test_start = nxp_c45_cable_test_start, + .cable_test_get_status = nxp_c45_cable_test_get_status, + .set_loopback = genphy_c45_loopback, + .get_sqi = nxp_c45_get_sqi, + .get_sqi_max = nxp_c45_get_sqi_max, + .remove = nxp_c45_remove, + .match_phy_device = tja1121_match_phy_device, }, }; diff --git a/drivers/net/phy/nxp-tja11xx.c b/drivers/net/phy/nxp-tja11xx.c index ed7fa26bac8e8..07e94a2478ac3 100644 --- a/drivers/net/phy/nxp-tja11xx.c +++ b/drivers/net/phy/nxp-tja11xx.c @@ -21,12 +21,14 @@ #define PHY_ID_TJA1100 0x0180dc40 #define PHY_ID_TJA1101 0x0180dd00 #define PHY_ID_TJA1102 0x0180dc80 +#define PHY_ID_TJA1102S 0x0180dc90 #define MII_ECTRL 17 #define MII_ECTRL_LINK_CONTROL BIT(15) #define MII_ECTRL_POWER_MODE_MASK GENMASK(14, 11) #define MII_ECTRL_POWER_MODE_NO_CHANGE (0x0 << 11) #define MII_ECTRL_POWER_MODE_NORMAL (0x3 << 11) +#define MII_ECTRL_POWER_MODE_SLEEP (0xa << 11) #define MII_ECTRL_POWER_MODE_STANDBY (0xc << 11) #define MII_ECTRL_CABLE_TEST BIT(5) #define MII_ECTRL_CONFIG_EN BIT(2) @@ -78,12 +80,13 @@ #define MII_COMMCFG 27 #define MII_COMMCFG_AUTO_OP BIT(15) +#define MII_CFG3 28 +#define MII_CFG3_PHY_EN BIT(0) + /* Configure REF_CLK as input in RMII mode */ #define TJA110X_RMII_MODE_REFCLK_IN BIT(0) struct tja11xx_priv { - char *hwmon_name; - struct device *hwmon_dev; struct phy_device *phydev; struct work_struct phy_register_work; u32 flags; @@ -179,6 +182,14 @@ static int tja11xx_wakeup(struct phy_device *phydev) return ret; return tja11xx_enable_link_control(phydev); + case MII_ECTRL_POWER_MODE_SLEEP: + switch (phydev->phy_id & PHY_ID_MASK) { + case PHY_ID_TJA1102S: + /* Enable PHY, maybe it is disabled due to pin strapping */ + return phy_set_bits(phydev, MII_CFG3, MII_CFG3_PHY_EN); + default: + return 0; + } default: break; } @@ -316,6 +327,7 @@ static int tja11xx_config_init(struct phy_device *phydev) if (ret) return ret; break; + case PHY_ID_TJA1102S: case PHY_ID_TJA1101: reg_mask = MII_CFG1_INTERFACE_MODE_MASK; ret = tja11xx_get_interface_mode(phydev); @@ -494,19 +506,12 @@ static const struct hwmon_chip_info tja11xx_hwmon_chip_info = { static int tja11xx_hwmon_register(struct phy_device *phydev, struct tja11xx_priv *priv) { - struct device *dev = &phydev->mdio.dev; - - priv->hwmon_name = devm_hwmon_sanitize_name(dev, dev_name(dev)); - if (IS_ERR(priv->hwmon_name)) - return PTR_ERR(priv->hwmon_name); + struct device *hdev, *dev = &phydev->mdio.dev; - priv->hwmon_dev = - devm_hwmon_device_register_with_info(dev, priv->hwmon_name, - phydev, - &tja11xx_hwmon_chip_info, - NULL); - - return PTR_ERR_OR_ZERO(priv->hwmon_dev); + hdev = devm_hwmon_device_register_with_info(dev, NULL, phydev, + &tja11xx_hwmon_chip_info, + NULL); + return PTR_ERR_OR_ZERO(hdev); } static int tja11xx_parse_dt(struct phy_device *phydev) @@ -883,6 +888,29 @@ static struct phy_driver tja11xx_driver[] = { .handle_interrupt = tja11xx_handle_interrupt, .cable_test_start = tja11xx_cable_test_start, .cable_test_get_status = tja11xx_cable_test_get_status, + }, { + PHY_ID_MATCH_MODEL(PHY_ID_TJA1102S), + .name = "NXP TJA1102S", + .features = PHY_BASIC_T1_FEATURES, + .flags = PHY_POLL_CABLE_TEST, + .probe = tja11xx_probe, + .soft_reset = tja11xx_soft_reset, + .config_aneg = tja11xx_config_aneg, + .config_init = tja11xx_config_init, + .read_status = tja11xx_read_status, + .get_sqi = tja11xx_get_sqi, + .get_sqi_max = tja11xx_get_sqi_max, + .suspend = genphy_suspend, + .resume = genphy_resume, + .set_loopback = genphy_loopback, + /* Statistics */ + .get_sset_count = tja11xx_get_sset_count, + .get_strings = tja11xx_get_strings, + .get_stats = tja11xx_get_stats, + .config_intr = tja11xx_config_intr, + .handle_interrupt = tja11xx_handle_interrupt, + .cable_test_start = tja11xx_cable_test_start, + .cable_test_get_status = tja11xx_cable_test_get_status, } }; @@ -892,6 +920,7 @@ static const struct mdio_device_id __maybe_unused tja11xx_tbl[] = { { PHY_ID_MATCH_MODEL(PHY_ID_TJA1100) }, { PHY_ID_MATCH_MODEL(PHY_ID_TJA1101) }, { PHY_ID_MATCH_MODEL(PHY_ID_TJA1102) }, + { PHY_ID_MATCH_MODEL(PHY_ID_TJA1102S) }, { } }; diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index 0dac08e85304b..bdd70d424491b 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -9,6 +9,7 @@ #include #include "mdio-open-alliance.h" +#include "phylib-internal.h" /** * genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities @@ -683,13 +684,10 @@ EXPORT_SYMBOL_GPL(genphy_c45_read_mdix); static int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp); int val, changed = 0; - linkmode_andnot(tmp, adv, phydev->eee_broken_modes); - if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP1_FEATURES)) { - val = linkmode_to_mii_eee_cap1_t(tmp); + val = linkmode_to_mii_eee_cap1_t(adv); /* IEEE 802.3-2018 45.2.7.13 EEE advertisement 1 * (Register 7.60) @@ -707,7 +705,7 @@ static int genphy_c45_write_eee_adv(struct phy_device *phydev, } if (linkmode_intersects(phydev->supported_eee, PHY_EEE_CAP2_FEATURES)) { - val = linkmode_to_mii_eee_cap2_t(tmp); + val = linkmode_to_mii_eee_cap2_t(adv); /* IEEE 802.3-2022 45.2.7.16 EEE advertisement 2 * (Register 7.62) @@ -1230,8 +1228,11 @@ int gen10g_config_aneg(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(gen10g_config_aneg); -int genphy_c45_loopback(struct phy_device *phydev, bool enable) +int genphy_c45_loopback(struct phy_device *phydev, bool enable, int speed) { + if (enable && speed) + return -EOPNOTSUPP; + return phy_modify_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1, MDIO_PCS_CTRL1_LOOPBACK, enable ? MDIO_PCS_CTRL1_LOOPBACK : 0); @@ -1467,42 +1468,32 @@ EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status); /** * genphy_c45_eee_is_active - get EEE status * @phydev: target phy_device struct - * @adv: variable to store advertised linkmodes * @lp: variable to store LP advertised linkmodes * - * Description: this function will read local and link partner PHY - * advertisements. Compare them return current EEE state. + * Description: this function will read link partner PHY advertisement + * and compare it to local advertisement to return current EEE state. */ -int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv, - unsigned long *lp) +int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *lp) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_adv) = {}; __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_lp) = {}; __ETHTOOL_DECLARE_LINK_MODE_MASK(common); - bool eee_active; int ret; - ret = genphy_c45_read_eee_adv(phydev, tmp_adv); - if (ret) - return ret; + if (!phydev->eee_cfg.eee_enabled) + return 0; ret = genphy_c45_read_eee_lpa(phydev, tmp_lp); if (ret) return ret; - linkmode_and(common, tmp_adv, tmp_lp); - if (!linkmode_empty(tmp_adv) && !linkmode_empty(common)) - eee_active = phy_check_valid(phydev->speed, phydev->duplex, - common); - else - eee_active = false; - - if (adv) - linkmode_copy(adv, tmp_adv); if (lp) linkmode_copy(lp, tmp_lp); - return eee_active; + linkmode_and(common, phydev->advertising_eee, tmp_lp); + if (linkmode_empty(common)) + return 0; + + return phy_check_valid(phydev->speed, phydev->duplex, common); } EXPORT_SYMBOL(genphy_c45_eee_is_active); @@ -1519,14 +1510,14 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev, { int ret; - ret = genphy_c45_eee_is_active(phydev, data->advertised, - data->lp_advertised); + ret = genphy_c45_eee_is_active(phydev, data->lp_advertised); if (ret < 0) return ret; data->eee_active = phydev->eee_active; - linkmode_copy(data->supported, phydev->supported_eee); - + linkmode_andnot(data->supported, phydev->supported_eee, + phydev->eee_disabled_modes); + linkmode_copy(data->advertised, phydev->advertising_eee); return 0; } EXPORT_SYMBOL(genphy_c45_ethtool_get_eee); @@ -1559,7 +1550,9 @@ int genphy_c45_ethtool_set_eee(struct phy_device *phydev, phydev_warn(phydev, "At least some EEE link modes are not supported.\n"); return -EINVAL; } - linkmode_copy(phydev->advertising_eee, adv); + + linkmode_andnot(phydev->advertising_eee, adv, + phydev->eee_disabled_modes); } else if (linkmode_empty(phydev->advertising_eee)) { phy_advertise_eee_all(phydev); } diff --git a/drivers/net/phy/phy-caps.h b/drivers/net/phy/phy-caps.h new file mode 100644 index 0000000000000..157759966650e --- /dev/null +++ b/drivers/net/phy/phy-caps.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * link caps internal header, for link modes <-> capabilities <-> interfaces + * conversions. + */ + +#ifndef __PHY_CAPS_H +#define __PHY_CAPS_H + +#include +#include + +enum { + LINK_CAPA_10HD = 0, + LINK_CAPA_10FD, + LINK_CAPA_100HD, + LINK_CAPA_100FD, + LINK_CAPA_1000HD, + LINK_CAPA_1000FD, + LINK_CAPA_2500FD, + LINK_CAPA_5000FD, + LINK_CAPA_10000FD, + LINK_CAPA_20000FD, + LINK_CAPA_25000FD, + LINK_CAPA_40000FD, + LINK_CAPA_50000FD, + LINK_CAPA_56000FD, + LINK_CAPA_100000FD, + LINK_CAPA_200000FD, + LINK_CAPA_400000FD, + LINK_CAPA_800000FD, + + __LINK_CAPA_MAX, +}; + +#define LINK_CAPA_ALL GENMASK((__LINK_CAPA_MAX - 1), 0) + +struct link_capabilities { + int speed; + unsigned int duplex; + __ETHTOOL_DECLARE_LINK_MODE_MASK(linkmodes); +}; + +int phy_caps_init(void); + +size_t phy_caps_speeds(unsigned int *speeds, size_t size, + unsigned long *linkmodes); +void phy_caps_linkmode_max_speed(u32 max_speed, unsigned long *linkmodes); +bool phy_caps_valid(int speed, int duplex, const unsigned long *linkmodes); +void phy_caps_linkmodes(unsigned long caps, unsigned long *linkmodes); +unsigned long phy_caps_from_interface(phy_interface_t interface); + +const struct link_capabilities * +phy_caps_lookup_by_linkmode(const unsigned long *linkmodes); + +const struct link_capabilities * +phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only); + +const struct link_capabilities * +phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported, + bool exact); + +#endif /* __PHY_CAPS_H */ diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 6bf3ec985f3d3..e177037f9110b 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -6,6 +6,10 @@ #include #include +#include "phylib.h" +#include "phylib-internal.h" +#include "phy-caps.h" + /** * phy_speed_to_str - Return a string representing the PHY link speed * @@ -13,7 +17,7 @@ */ const char *phy_speed_to_str(int speed) { - BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 103, + BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 121, "Enum ethtool_link_mode_bit_indices and phylib are out of sync. " "If a speed or mode has been added please update phy_speed_to_str " "and the PHY settings array.\n"); @@ -153,203 +157,9 @@ int phy_interface_num_ports(phy_interface_t interface) } EXPORT_SYMBOL_GPL(phy_interface_num_ports); -/* A mapping of all SUPPORTED settings to speed/duplex. This table - * must be grouped by speed and sorted in descending match priority - * - iow, descending speed. - */ - -#define PHY_SETTING(s, d, b) { .speed = SPEED_ ## s, .duplex = DUPLEX_ ## d, \ - .bit = ETHTOOL_LINK_MODE_ ## b ## _BIT} - -static const struct phy_setting settings[] = { - /* 800G */ - PHY_SETTING( 800000, FULL, 800000baseCR8_Full ), - PHY_SETTING( 800000, FULL, 800000baseKR8_Full ), - PHY_SETTING( 800000, FULL, 800000baseDR8_Full ), - PHY_SETTING( 800000, FULL, 800000baseDR8_2_Full ), - PHY_SETTING( 800000, FULL, 800000baseSR8_Full ), - PHY_SETTING( 800000, FULL, 800000baseVR8_Full ), - /* 400G */ - PHY_SETTING( 400000, FULL, 400000baseCR8_Full ), - PHY_SETTING( 400000, FULL, 400000baseKR8_Full ), - PHY_SETTING( 400000, FULL, 400000baseLR8_ER8_FR8_Full ), - PHY_SETTING( 400000, FULL, 400000baseDR8_Full ), - PHY_SETTING( 400000, FULL, 400000baseSR8_Full ), - PHY_SETTING( 400000, FULL, 400000baseCR4_Full ), - PHY_SETTING( 400000, FULL, 400000baseKR4_Full ), - PHY_SETTING( 400000, FULL, 400000baseLR4_ER4_FR4_Full ), - PHY_SETTING( 400000, FULL, 400000baseDR4_Full ), - PHY_SETTING( 400000, FULL, 400000baseSR4_Full ), - /* 200G */ - PHY_SETTING( 200000, FULL, 200000baseCR4_Full ), - PHY_SETTING( 200000, FULL, 200000baseKR4_Full ), - PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full ), - PHY_SETTING( 200000, FULL, 200000baseDR4_Full ), - PHY_SETTING( 200000, FULL, 200000baseSR4_Full ), - PHY_SETTING( 200000, FULL, 200000baseCR2_Full ), - PHY_SETTING( 200000, FULL, 200000baseKR2_Full ), - PHY_SETTING( 200000, FULL, 200000baseLR2_ER2_FR2_Full ), - PHY_SETTING( 200000, FULL, 200000baseDR2_Full ), - PHY_SETTING( 200000, FULL, 200000baseSR2_Full ), - /* 100G */ - PHY_SETTING( 100000, FULL, 100000baseCR4_Full ), - PHY_SETTING( 100000, FULL, 100000baseKR4_Full ), - PHY_SETTING( 100000, FULL, 100000baseLR4_ER4_Full ), - PHY_SETTING( 100000, FULL, 100000baseSR4_Full ), - PHY_SETTING( 100000, FULL, 100000baseCR2_Full ), - PHY_SETTING( 100000, FULL, 100000baseKR2_Full ), - PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full ), - PHY_SETTING( 100000, FULL, 100000baseDR2_Full ), - PHY_SETTING( 100000, FULL, 100000baseSR2_Full ), - PHY_SETTING( 100000, FULL, 100000baseCR_Full ), - PHY_SETTING( 100000, FULL, 100000baseKR_Full ), - PHY_SETTING( 100000, FULL, 100000baseLR_ER_FR_Full ), - PHY_SETTING( 100000, FULL, 100000baseDR_Full ), - PHY_SETTING( 100000, FULL, 100000baseSR_Full ), - /* 56G */ - PHY_SETTING( 56000, FULL, 56000baseCR4_Full ), - PHY_SETTING( 56000, FULL, 56000baseKR4_Full ), - PHY_SETTING( 56000, FULL, 56000baseLR4_Full ), - PHY_SETTING( 56000, FULL, 56000baseSR4_Full ), - /* 50G */ - PHY_SETTING( 50000, FULL, 50000baseCR2_Full ), - PHY_SETTING( 50000, FULL, 50000baseKR2_Full ), - PHY_SETTING( 50000, FULL, 50000baseSR2_Full ), - PHY_SETTING( 50000, FULL, 50000baseCR_Full ), - PHY_SETTING( 50000, FULL, 50000baseKR_Full ), - PHY_SETTING( 50000, FULL, 50000baseLR_ER_FR_Full ), - PHY_SETTING( 50000, FULL, 50000baseDR_Full ), - PHY_SETTING( 50000, FULL, 50000baseSR_Full ), - /* 40G */ - PHY_SETTING( 40000, FULL, 40000baseCR4_Full ), - PHY_SETTING( 40000, FULL, 40000baseKR4_Full ), - PHY_SETTING( 40000, FULL, 40000baseLR4_Full ), - PHY_SETTING( 40000, FULL, 40000baseSR4_Full ), - /* 25G */ - PHY_SETTING( 25000, FULL, 25000baseCR_Full ), - PHY_SETTING( 25000, FULL, 25000baseKR_Full ), - PHY_SETTING( 25000, FULL, 25000baseSR_Full ), - /* 20G */ - PHY_SETTING( 20000, FULL, 20000baseKR2_Full ), - PHY_SETTING( 20000, FULL, 20000baseMLD2_Full ), - /* 10G */ - PHY_SETTING( 10000, FULL, 10000baseCR_Full ), - PHY_SETTING( 10000, FULL, 10000baseER_Full ), - PHY_SETTING( 10000, FULL, 10000baseKR_Full ), - PHY_SETTING( 10000, FULL, 10000baseKX4_Full ), - PHY_SETTING( 10000, FULL, 10000baseLR_Full ), - PHY_SETTING( 10000, FULL, 10000baseLRM_Full ), - PHY_SETTING( 10000, FULL, 10000baseR_FEC ), - PHY_SETTING( 10000, FULL, 10000baseSR_Full ), - PHY_SETTING( 10000, FULL, 10000baseT_Full ), - /* 5G */ - PHY_SETTING( 5000, FULL, 5000baseT_Full ), - /* 2.5G */ - PHY_SETTING( 2500, FULL, 2500baseT_Full ), - PHY_SETTING( 2500, FULL, 2500baseX_Full ), - /* 1G */ - PHY_SETTING( 1000, FULL, 1000baseT_Full ), - PHY_SETTING( 1000, HALF, 1000baseT_Half ), - PHY_SETTING( 1000, FULL, 1000baseT1_Full ), - PHY_SETTING( 1000, FULL, 1000baseX_Full ), - PHY_SETTING( 1000, FULL, 1000baseKX_Full ), - /* 100M */ - PHY_SETTING( 100, FULL, 100baseT_Full ), - PHY_SETTING( 100, FULL, 100baseT1_Full ), - PHY_SETTING( 100, HALF, 100baseT_Half ), - PHY_SETTING( 100, HALF, 100baseFX_Half ), - PHY_SETTING( 100, FULL, 100baseFX_Full ), - /* 10M */ - PHY_SETTING( 10, FULL, 10baseT_Full ), - PHY_SETTING( 10, HALF, 10baseT_Half ), - PHY_SETTING( 10, FULL, 10baseT1L_Full ), - PHY_SETTING( 10, FULL, 10baseT1S_Full ), - PHY_SETTING( 10, HALF, 10baseT1S_Half ), - PHY_SETTING( 10, HALF, 10baseT1S_P2MP_Half ), - PHY_SETTING( 10, FULL, 10baseT1BRR_Full ), -}; -#undef PHY_SETTING - -/** - * phy_lookup_setting - lookup a PHY setting - * @speed: speed to match - * @duplex: duplex to match - * @mask: allowed link modes - * @exact: an exact match is required - * - * Search the settings array for a setting that matches the speed and - * duplex, and which is supported. - * - * If @exact is unset, either an exact match or %NULL for no match will - * be returned. - * - * If @exact is set, an exact match, the fastest supported setting at - * or below the specified speed, the slowest supported setting, or if - * they all fail, %NULL will be returned. - */ -const struct phy_setting * -phy_lookup_setting(int speed, int duplex, const unsigned long *mask, bool exact) -{ - const struct phy_setting *p, *match = NULL, *last = NULL; - int i; - - for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { - if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS && - test_bit(p->bit, mask)) { - last = p; - if (p->speed == speed && p->duplex == duplex) { - /* Exact match for speed and duplex */ - match = p; - break; - } else if (!exact) { - if (!match && p->speed <= speed) - /* Candidate */ - match = p; - - if (p->speed < speed) - break; - } - } - } - - if (!match && !exact) - match = last; - - return match; -} -EXPORT_SYMBOL_GPL(phy_lookup_setting); - -size_t phy_speeds(unsigned int *speeds, size_t size, - unsigned long *mask) -{ - size_t count; - int i; - - for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++) - if (settings[i].bit < __ETHTOOL_LINK_MODE_MASK_NBITS && - test_bit(settings[i].bit, mask) && - (count == 0 || speeds[count - 1] != settings[i].speed)) - speeds[count++] = settings[i].speed; - - return count; -} - -static void __set_linkmode_max_speed(u32 max_speed, unsigned long *addr) -{ - const struct phy_setting *p; - int i; - - for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { - if (p->speed > max_speed) - linkmode_clear_bit(p->bit, addr); - else - break; - } -} - static void __set_phy_supported(struct phy_device *phydev, u32 max_speed) { - __set_linkmode_max_speed(max_speed, phydev->supported); + phy_caps_linkmode_max_speed(max_speed, phydev->supported); } /** @@ -388,7 +198,7 @@ void of_set_phy_supported(struct phy_device *phydev) void of_set_phy_eee_broken(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; - unsigned long *modes = phydev->eee_broken_modes; + unsigned long *modes = phydev->eee_disabled_modes; if (!IS_ENABLED(CONFIG_OF_MDIO) || !node) return; @@ -475,16 +285,15 @@ EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause); void phy_resolve_aneg_linkmode(struct phy_device *phydev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); - int i; + const struct link_capabilities *c; linkmode_and(common, phydev->lp_advertising, phydev->advertising); - for (i = 0; i < ARRAY_SIZE(settings); i++) - if (test_bit(settings[i].bit, common)) { - phydev->speed = settings[i].speed; - phydev->duplex = settings[i].duplex; - break; - } + c = phy_caps_lookup_by_linkmode(common); + if (c) { + phydev->speed = c->speed; + phydev->duplex = c->duplex; + } phy_resolve_aneg_pause(phydev); } @@ -502,7 +311,8 @@ EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode); void phy_check_downshift(struct phy_device *phydev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); - int i, speed = SPEED_UNKNOWN; + const struct link_capabilities *c; + int speed = SPEED_UNKNOWN; phydev->downshifted_rate = 0; @@ -512,11 +322,9 @@ void phy_check_downshift(struct phy_device *phydev) linkmode_and(common, phydev->lp_advertising, phydev->advertising); - for (i = 0; i < ARRAY_SIZE(settings); i++) - if (test_bit(settings[i].bit, common)) { - speed = settings[i].speed; - break; - } + c = phy_caps_lookup_by_linkmode(common); + if (c) + speed = c->speed; if (speed == SPEED_UNKNOWN || phydev->speed >= speed) return; @@ -526,22 +334,17 @@ void phy_check_downshift(struct phy_device *phydev) phydev->downshifted_rate = 1; } -EXPORT_SYMBOL_GPL(phy_check_downshift); static int phy_resolve_min_speed(struct phy_device *phydev, bool fdx_only) { __ETHTOOL_DECLARE_LINK_MODE_MASK(common); - int i = ARRAY_SIZE(settings); + const struct link_capabilities *c; linkmode_and(common, phydev->lp_advertising, phydev->advertising); - while (--i >= 0) { - if (test_bit(settings[i].bit, common)) { - if (fdx_only && settings[i].duplex != DUPLEX_FULL) - continue; - return settings[i].speed; - } - } + c = phy_caps_lookup_by_linkmode_rev(common, fdx_only); + if (c) + return c->speed; return SPEED_UNKNOWN; } @@ -553,7 +356,7 @@ int phy_speed_down_core(struct phy_device *phydev) if (min_common_speed == SPEED_UNKNOWN) return -EINVAL; - __set_linkmode_max_speed(min_common_speed, phydev->advertising); + phy_caps_linkmode_max_speed(min_common_speed, phydev->advertising); return 0; } @@ -714,43 +517,6 @@ int __phy_package_read_mmd(struct phy_device *phydev, } EXPORT_SYMBOL(__phy_package_read_mmd); -/** - * phy_package_read_mmd - read MMD reg relative to PHY package base addr - * @phydev: The phy_device struct - * @addr_offset: The offset to be added to PHY package base_addr - * @devad: The MMD to read from - * @regnum: The register on the MMD to read - * - * Convenience helper for reading a register of an MMD on a given PHY - * using the PHY package base address. The base address is added to - * the addr_offset value. - * - * Same calling rules as for phy_read(); - * - * NOTE: It's assumed that the entire PHY package is either C22 or C45. - */ -int phy_package_read_mmd(struct phy_device *phydev, - unsigned int addr_offset, int devad, - u32 regnum) -{ - int addr = phy_package_address(phydev, addr_offset); - int val; - - if (addr < 0) - return addr; - - if (regnum > (u16)~0 || devad > 32) - return -EINVAL; - - phy_lock_mdio_bus(phydev); - val = mmd_phy_read(phydev->mdio.bus, addr, phydev->is_c45, devad, - regnum); - phy_unlock_mdio_bus(phydev); - - return val; -} -EXPORT_SYMBOL(phy_package_read_mmd); - /** * __phy_package_write_mmd - write MMD reg relative to PHY package base addr * @phydev: The phy_device struct @@ -784,44 +550,6 @@ int __phy_package_write_mmd(struct phy_device *phydev, } EXPORT_SYMBOL(__phy_package_write_mmd); -/** - * phy_package_write_mmd - write MMD reg relative to PHY package base addr - * @phydev: The phy_device struct - * @addr_offset: The offset to be added to PHY package base_addr - * @devad: The MMD to write to - * @regnum: The register on the MMD to write - * @val: value to write to @regnum - * - * Convenience helper for writing a register of an MMD on a given PHY - * using the PHY package base address. The base address is added to - * the addr_offset value. - * - * Same calling rules as for phy_write(); - * - * NOTE: It's assumed that the entire PHY package is either C22 or C45. - */ -int phy_package_write_mmd(struct phy_device *phydev, - unsigned int addr_offset, int devad, - u32 regnum, u16 val) -{ - int addr = phy_package_address(phydev, addr_offset); - int ret; - - if (addr < 0) - return addr; - - if (regnum > (u16)~0 || devad > 32) - return -EINVAL; - - phy_lock_mdio_bus(phydev); - ret = mmd_phy_write(phydev->mdio.bus, addr, phydev->is_c45, devad, - regnum, val); - phy_unlock_mdio_bus(phydev); - - return ret; -} -EXPORT_SYMBOL(phy_package_write_mmd); - /** * phy_modify_changed - Function for modifying a PHY register * @phydev: the phy_device struct diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d0c1718e2b16a..13df28445f020 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -36,6 +36,9 @@ #include #include +#include "phylib-internal.h" +#include "phy-caps.h" + #define PHY_STATE_TIME HZ #define PHY_STATE_STR(_state) \ @@ -210,25 +213,6 @@ int phy_aneg_done(struct phy_device *phydev) } EXPORT_SYMBOL(phy_aneg_done); -/** - * phy_find_valid - find a PHY setting that matches the requested parameters - * @speed: desired speed - * @duplex: desired duplex - * @supported: mask of supported link modes - * - * Locate a supported phy setting that is, in priority order: - * - an exact match for the specified speed and duplex mode - * - a match for the specified speed, or slower speed - * - the slowest supported speed - * Returns the matched phy_setting entry, or %NULL if no supported phy - * settings were found. - */ -static const struct phy_setting * -phy_find_valid(int speed, int duplex, unsigned long *supported) -{ - return phy_lookup_setting(speed, duplex, supported, false); -} - /** * phy_supported_speeds - return all speeds currently supported by a phy device * @phy: The phy device to return supported speeds of. @@ -243,7 +227,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int *speeds, unsigned int size) { - return phy_speeds(speeds, size, phy->supported); + return phy_caps_speeds(speeds, size, phy->supported); } /** @@ -257,7 +241,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy, */ bool phy_check_valid(int speed, int duplex, unsigned long *features) { - return !!phy_lookup_setting(speed, duplex, features, true); + return phy_caps_valid(speed, duplex, features); } EXPORT_SYMBOL(phy_check_valid); @@ -271,13 +255,14 @@ EXPORT_SYMBOL(phy_check_valid); */ static void phy_sanitize_settings(struct phy_device *phydev) { - const struct phy_setting *setting; + const struct link_capabilities *c; + + c = phy_caps_lookup(phydev->speed, phydev->duplex, phydev->supported, + false); - setting = phy_find_valid(phydev->speed, phydev->duplex, - phydev->supported); - if (setting) { - phydev->speed = setting->speed; - phydev->duplex = setting->duplex; + if (c) { + phydev->speed = c->speed; + phydev->duplex = c->duplex; } else { /* We failed to find anything (no supported speeds?) */ phydev->speed = SPEED_UNKNOWN; @@ -302,7 +287,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev, cmd->base.port = PORT_BNC; else cmd->base.port = phydev->port; - cmd->base.transceiver = phy_is_internal(phydev) ? + cmd->base.transceiver = phydev->is_internal ? XCVR_INTERNAL : XCVR_EXTERNAL; cmd->base.phy_address = phydev->mdio.addr; cmd->base.autoneg = phydev->autoneg; @@ -520,12 +505,12 @@ int __phy_hwtstamp_set(struct phy_device *phydev, * @phydev: the phy_device struct * @jiffies: Run the state machine after these jiffies */ -void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies) +static void phy_queue_state_machine(struct phy_device *phydev, + unsigned long jiffies) { mod_delayed_work(system_power_efficient_wq, &phydev->state_queue, jiffies); } -EXPORT_SYMBOL(phy_queue_state_machine); /** * phy_trigger_machine - Trigger the state machine to run now @@ -1031,7 +1016,7 @@ static int phy_check_link_status(struct phy_device *phydev) if (phydev->link && phydev->state != PHY_RUNNING) { phy_check_downshift(phydev); phydev->state = PHY_RUNNING; - err = genphy_c45_eee_is_active(phydev, NULL, NULL); + err = genphy_c45_eee_is_active(phydev, NULL); phydev->eee_active = err > 0; phydev->enable_tx_lpi = phydev->eee_cfg.tx_lpi_enabled && phydev->eee_active; @@ -1501,6 +1486,24 @@ void phy_free_interrupt(struct phy_device *phydev) } EXPORT_SYMBOL(phy_free_interrupt); +/** + * phy_get_next_update_time - Determine the next PHY update time + * @phydev: Pointer to the phy_device structure + * + * This function queries the PHY driver to get the time for the next polling + * event. If the driver does not implement the callback, a default value is + * used. + * + * Return: The time for the next polling event in jiffies + */ +static unsigned int phy_get_next_update_time(struct phy_device *phydev) +{ + if (phydev->drv && phydev->drv->get_next_update_time) + return phydev->drv->get_next_update_time(phydev); + + return PHY_STATE_TIME; +} + enum phy_state_work { PHY_STATE_WORK_NONE, PHY_STATE_WORK_ANEG, @@ -1580,7 +1583,8 @@ static enum phy_state_work _phy_state_machine(struct phy_device *phydev) * called from phy_disconnect() synchronously. */ if (phy_polling_mode(phydev) && phy_is_started(phydev)) - phy_queue_state_machine(phydev, PHY_STATE_TIME); + phy_queue_state_machine(phydev, + phy_get_next_update_time(phydev)); return state_work; } @@ -1703,6 +1707,93 @@ void phy_mac_interrupt(struct phy_device *phydev) } EXPORT_SYMBOL(phy_mac_interrupt); +/** + * phy_loopback - Configure loopback mode of PHY + * @phydev: target phy_device struct + * @enable: enable or disable loopback mode + * @speed: enable loopback mode with speed + * + * Configure loopback mode of PHY and signal link down and link up if speed is + * changing. + * + * Return: 0 on success, negative error code on failure. + */ +int phy_loopback(struct phy_device *phydev, bool enable, int speed) +{ + bool link_up = false; + int ret = 0; + + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + + if (enable && phydev->loopback_enabled) { + ret = -EBUSY; + goto out; + } + + if (!enable && !phydev->loopback_enabled) { + ret = -EINVAL; + goto out; + } + + if (enable) { + /* + * Link up is signaled with a defined speed. If speed changes, + * then first link down and after that link up needs to be + * signaled. + */ + if (phydev->link && phydev->state == PHY_RUNNING) { + /* link is up and signaled */ + if (speed && phydev->speed != speed) { + /* signal link down and up for new speed */ + phydev->link = false; + phydev->state = PHY_NOLINK; + phy_link_down(phydev); + + link_up = true; + } + } else { + /* link is not signaled */ + if (speed) { + /* signal link up for new speed */ + link_up = true; + } + } + } + + if (phydev->drv->set_loopback) + ret = phydev->drv->set_loopback(phydev, enable, speed); + else + ret = genphy_loopback(phydev, enable, speed); + + if (ret) { + if (enable) { + /* try to restore link if enabling loopback fails */ + if (phydev->drv->set_loopback) + phydev->drv->set_loopback(phydev, false, 0); + else + genphy_loopback(phydev, false, 0); + } + + goto out; + } + + if (link_up) { + phydev->link = true; + phydev->state = PHY_RUNNING; + phy_link_up(phydev); + } + + phydev->loopback_enabled = enable; + +out: + mutex_unlock(&phydev->lock); + return ret; +} +EXPORT_SYMBOL(phy_loopback); + /** * phy_eee_tx_clock_stop_capable() - indicate whether the MAC can stop tx clock * @phydev: target phy_device struct @@ -1761,7 +1852,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if (!phydev->drv) return -EIO; - ret = genphy_c45_eee_is_active(phydev, NULL, NULL); + ret = genphy_c45_eee_is_active(phydev, NULL); if (ret < 0) return ret; if (!ret) diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c new file mode 100644 index 0000000000000..7033216897264 --- /dev/null +++ b/drivers/net/phy/phy_caps.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include + +#include "phy-caps.h" + +static struct link_capabilities link_caps[__LINK_CAPA_MAX] __ro_after_init = { + { SPEED_10, DUPLEX_HALF, {0} }, /* LINK_CAPA_10HD */ + { SPEED_10, DUPLEX_FULL, {0} }, /* LINK_CAPA_10FD */ + { SPEED_100, DUPLEX_HALF, {0} }, /* LINK_CAPA_100HD */ + { SPEED_100, DUPLEX_FULL, {0} }, /* LINK_CAPA_100FD */ + { SPEED_1000, DUPLEX_HALF, {0} }, /* LINK_CAPA_1000HD */ + { SPEED_1000, DUPLEX_FULL, {0} }, /* LINK_CAPA_1000FD */ + { SPEED_2500, DUPLEX_FULL, {0} }, /* LINK_CAPA_2500FD */ + { SPEED_5000, DUPLEX_FULL, {0} }, /* LINK_CAPA_5000FD */ + { SPEED_10000, DUPLEX_FULL, {0} }, /* LINK_CAPA_10000FD */ + { SPEED_20000, DUPLEX_FULL, {0} }, /* LINK_CAPA_20000FD */ + { SPEED_25000, DUPLEX_FULL, {0} }, /* LINK_CAPA_25000FD */ + { SPEED_40000, DUPLEX_FULL, {0} }, /* LINK_CAPA_40000FD */ + { SPEED_50000, DUPLEX_FULL, {0} }, /* LINK_CAPA_50000FD */ + { SPEED_56000, DUPLEX_FULL, {0} }, /* LINK_CAPA_56000FD */ + { SPEED_100000, DUPLEX_FULL, {0} }, /* LINK_CAPA_100000FD */ + { SPEED_200000, DUPLEX_FULL, {0} }, /* LINK_CAPA_200000FD */ + { SPEED_400000, DUPLEX_FULL, {0} }, /* LINK_CAPA_400000FD */ + { SPEED_800000, DUPLEX_FULL, {0} }, /* LINK_CAPA_800000FD */ +}; + +static int speed_duplex_to_capa(int speed, unsigned int duplex) +{ + if (duplex == DUPLEX_UNKNOWN || + (speed > SPEED_1000 && duplex != DUPLEX_FULL)) + return -EINVAL; + + switch (speed) { + case SPEED_10: return duplex == DUPLEX_FULL ? + LINK_CAPA_10FD : LINK_CAPA_10HD; + case SPEED_100: return duplex == DUPLEX_FULL ? + LINK_CAPA_100FD : LINK_CAPA_100HD; + case SPEED_1000: return duplex == DUPLEX_FULL ? + LINK_CAPA_1000FD : LINK_CAPA_1000HD; + case SPEED_2500: return LINK_CAPA_2500FD; + case SPEED_5000: return LINK_CAPA_5000FD; + case SPEED_10000: return LINK_CAPA_10000FD; + case SPEED_20000: return LINK_CAPA_20000FD; + case SPEED_25000: return LINK_CAPA_25000FD; + case SPEED_40000: return LINK_CAPA_40000FD; + case SPEED_50000: return LINK_CAPA_50000FD; + case SPEED_56000: return LINK_CAPA_56000FD; + case SPEED_100000: return LINK_CAPA_100000FD; + case SPEED_200000: return LINK_CAPA_200000FD; + case SPEED_400000: return LINK_CAPA_400000FD; + case SPEED_800000: return LINK_CAPA_800000FD; + } + + return -EINVAL; +} + +#define for_each_link_caps_asc_speed(cap) \ + for (cap = link_caps; cap < &link_caps[__LINK_CAPA_MAX]; cap++) + +#define for_each_link_caps_desc_speed(cap) \ + for (cap = &link_caps[__LINK_CAPA_MAX - 1]; cap >= link_caps; cap--) + +/** + * phy_caps_init() - Initializes the link_caps array from the link_mode_params. + * + * Returns: 0 if phy caps init was successful, -EINVAL if we found an + * unexpected linkmode setting that requires LINK_CAPS update. + * + */ +int phy_caps_init(void) +{ + const struct link_mode_info *linkmode; + int i, capa; + + /* Fill the caps array from net/ethtool/common.c */ + for (i = 0; i < __ETHTOOL_LINK_MODE_MASK_NBITS; i++) { + linkmode = &link_mode_params[i]; + capa = speed_duplex_to_capa(linkmode->speed, linkmode->duplex); + + if (capa < 0) { + if (linkmode->speed != SPEED_UNKNOWN) { + pr_err("Unknown speed %d, please update LINK_CAPS\n", + linkmode->speed); + return -EINVAL; + } + continue; + } + + __set_bit(i, link_caps[capa].linkmodes); + } + + return 0; +} + +/** + * phy_caps_speeds() - Fill an array of supported SPEED_* values for given modes + * @speeds: Output array to store the speeds list into + * @size: Size of the output array + * @linkmodes: Linkmodes to get the speeds from + * + * Fills the speeds array with all possible speeds that can be achieved with + * the specified linkmodes. + * + * Returns: The number of speeds filled into the array. If the input array isn't + * big enough to store all speeds, fill it as much as possible. + */ +size_t phy_caps_speeds(unsigned int *speeds, size_t size, + unsigned long *linkmodes) +{ + struct link_capabilities *lcap; + size_t count = 0; + + for_each_link_caps_asc_speed(lcap) { + if (linkmode_intersects(lcap->linkmodes, linkmodes) && + (count == 0 || speeds[count - 1] != lcap->speed)) { + speeds[count++] = lcap->speed; + if (count >= size) + break; + } + } + + return count; +} + +/** + * phy_caps_lookup_by_linkmode() - Lookup the fastest matching link_capabilities + * @linkmodes: Linkmodes to match against + * + * Returns: The highest-speed link_capabilities that intersects the given + * linkmodes. In case several DUPLEX_ options exist at that speed, + * DUPLEX_FULL is matched first. NULL is returned if no match. + */ +const struct link_capabilities * +phy_caps_lookup_by_linkmode(const unsigned long *linkmodes) +{ + struct link_capabilities *lcap; + + for_each_link_caps_desc_speed(lcap) + if (linkmode_intersects(lcap->linkmodes, linkmodes)) + return lcap; + + return NULL; +} + +/** + * phy_caps_lookup_by_linkmode_rev() - Lookup the slowest matching link_capabilities + * @linkmodes: Linkmodes to match against + * @fdx_only: Full duplex match only when set + * + * Returns: The lowest-speed link_capabilities that intersects the given + * linkmodes. When set, fdx_only will ignore half-duplex matches. + * NULL is returned if no match. + */ +const struct link_capabilities * +phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only) +{ + struct link_capabilities *lcap; + + for_each_link_caps_asc_speed(lcap) { + if (fdx_only && lcap->duplex != DUPLEX_FULL) + continue; + + if (linkmode_intersects(lcap->linkmodes, linkmodes)) + return lcap; + } + + return NULL; +} + +/** + * phy_caps_lookup() - Lookup capabilities by speed/duplex that matches a mask + * @speed: Speed to match + * @duplex: Duplex to match + * @supported: Mask of linkmodes to match + * @exact: Perform an exact match or not. + * + * Lookup a link_capabilities entry that intersect the supported linkmodes mask, + * and that matches the passed speed and duplex. + * + * When @exact is set, an exact match is performed on speed and duplex, meaning + * that if the linkmodes for the given speed and duplex intersect the supported + * mask, this capability is returned, otherwise we don't have a match and return + * NULL. + * + * When @exact is not set, we return either an exact match, or matching capabilities + * at lower speed, or the lowest matching speed, or NULL. + * + * Returns: a matched link_capabilities according to the above process, NULL + * otherwise. + */ +const struct link_capabilities * +phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported, + bool exact) +{ + const struct link_capabilities *lcap, *last = NULL; + + for_each_link_caps_desc_speed(lcap) { + if (linkmode_intersects(lcap->linkmodes, supported)) { + last = lcap; + /* exact match on speed and duplex*/ + if (lcap->speed == speed && lcap->duplex == duplex) { + return lcap; + } else if (!exact) { + if (lcap->speed <= speed) + return lcap; + } + } + } + + if (!exact) + return last; + + return NULL; +} +EXPORT_SYMBOL_GPL(phy_caps_lookup); + +/** + * phy_caps_linkmode_max_speed() - Clamp a linkmodes set to a max speed + * @max_speed: Speed limit for the linkmode set + * @linkmodes: Linkmodes to limit + */ +void phy_caps_linkmode_max_speed(u32 max_speed, unsigned long *linkmodes) +{ + struct link_capabilities *lcap; + + for_each_link_caps_desc_speed(lcap) + if (lcap->speed > max_speed) + linkmode_andnot(linkmodes, linkmodes, lcap->linkmodes); + else + break; +} + +/** + * phy_caps_valid() - Validate a linkmodes set agains given speed and duplex + * @speed: input speed to validate + * @duplex: input duplex to validate. Passing DUPLEX_UNKNOWN is always not valid + * @linkmodes: The linkmodes to validate + * + * Returns: True if at least one of the linkmodes in @linkmodes can function at + * the given speed and duplex, false otherwise. + */ +bool phy_caps_valid(int speed, int duplex, const unsigned long *linkmodes) +{ + int capa = speed_duplex_to_capa(speed, duplex); + + if (capa < 0) + return false; + + return linkmode_intersects(link_caps[capa].linkmodes, linkmodes); +} + +/** + * phy_caps_linkmodes() - Convert a bitfield of capabilities into linkmodes + * @caps: The list of caps, each bit corresponding to a LINK_CAPA value + * @linkmodes: The set of linkmodes to fill. Must be previously initialized. + */ +void phy_caps_linkmodes(unsigned long caps, unsigned long *linkmodes) +{ + unsigned long capa; + + for_each_set_bit(capa, &caps, __LINK_CAPA_MAX) + linkmode_or(linkmodes, linkmodes, link_caps[capa].linkmodes); +} +EXPORT_SYMBOL_GPL(phy_caps_linkmodes); + +/** + * phy_caps_from_interface() - Get the link capa from a given PHY interface + * @interface: The PHY interface we want to get the possible Speed/Duplex from + * + * Returns: A bitmask of LINK_CAPA_xxx values that can be achieved with the + * provided interface. + */ +unsigned long phy_caps_from_interface(phy_interface_t interface) +{ + unsigned long link_caps = 0; + + switch (interface) { + case PHY_INTERFACE_MODE_USXGMII: + link_caps |= BIT(LINK_CAPA_10000FD) | BIT(LINK_CAPA_5000FD); + fallthrough; + + case PHY_INTERFACE_MODE_10G_QXGMII: + link_caps |= BIT(LINK_CAPA_2500FD); + fallthrough; + + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_PSGMII: + case PHY_INTERFACE_MODE_QSGMII: + case PHY_INTERFACE_MODE_QUSGMII: + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_GMII: + link_caps |= BIT(LINK_CAPA_1000HD) | BIT(LINK_CAPA_1000FD); + fallthrough; + + case PHY_INTERFACE_MODE_REVRMII: + case PHY_INTERFACE_MODE_RMII: + case PHY_INTERFACE_MODE_SMII: + case PHY_INTERFACE_MODE_REVMII: + case PHY_INTERFACE_MODE_MII: + link_caps |= BIT(LINK_CAPA_10HD) | BIT(LINK_CAPA_10FD); + fallthrough; + + case PHY_INTERFACE_MODE_100BASEX: + link_caps |= BIT(LINK_CAPA_100HD) | BIT(LINK_CAPA_100FD); + break; + + case PHY_INTERFACE_MODE_TBI: + case PHY_INTERFACE_MODE_MOCA: + case PHY_INTERFACE_MODE_RTBI: + case PHY_INTERFACE_MODE_1000BASEX: + link_caps |= BIT(LINK_CAPA_1000HD); + fallthrough; + case PHY_INTERFACE_MODE_1000BASEKX: + case PHY_INTERFACE_MODE_TRGMII: + link_caps |= BIT(LINK_CAPA_1000FD); + break; + + case PHY_INTERFACE_MODE_2500BASEX: + link_caps |= BIT(LINK_CAPA_2500FD); + break; + + case PHY_INTERFACE_MODE_5GBASER: + link_caps |= BIT(LINK_CAPA_5000FD); + break; + + case PHY_INTERFACE_MODE_XGMII: + case PHY_INTERFACE_MODE_RXAUI: + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_10GBASER: + case PHY_INTERFACE_MODE_10GKR: + link_caps |= BIT(LINK_CAPA_10000FD); + break; + + case PHY_INTERFACE_MODE_25GBASER: + link_caps |= BIT(LINK_CAPA_25000FD); + break; + + case PHY_INTERFACE_MODE_XLGMII: + link_caps |= BIT(LINK_CAPA_40000FD); + break; + + case PHY_INTERFACE_MODE_INTERNAL: + link_caps |= LINK_CAPA_ALL; + break; + + case PHY_INTERFACE_MODE_NA: + case PHY_INTERFACE_MODE_MAX: + break; + } + + return link_caps; +} +EXPORT_SYMBOL_GPL(phy_caps_from_interface); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 46713d27412b7..675fbd2253787 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -41,10 +41,24 @@ #include #include +#include "phylib-internal.h" +#include "phy-caps.h" + MODULE_DESCRIPTION("PHY library"); MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); +#define PHY_ANY_ID "MATCH ANY PHY" +#define PHY_ANY_UID 0xffffffff + +struct phy_fixup { + struct list_head list; + char bus_id[MII_BUS_ID_SIZE + 3]; + u32 phy_uid; + u32 phy_uid_mask; + int (*run)(struct phy_device *phydev); +}; + __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_basic_features); @@ -80,37 +94,28 @@ static const int phy_all_ports_features_array[7] = { ETHTOOL_LINK_MODE_Backplane_BIT, }; -const int phy_10_100_features_array[4] = { +static const int phy_10_100_features_array[4] = { ETHTOOL_LINK_MODE_10baseT_Half_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT, }; -EXPORT_SYMBOL_GPL(phy_10_100_features_array); -const int phy_basic_t1_features_array[3] = { +static const int phy_basic_t1_features_array[3] = { ETHTOOL_LINK_MODE_TP_BIT, ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, ETHTOOL_LINK_MODE_100baseT1_Full_BIT, }; -EXPORT_SYMBOL_GPL(phy_basic_t1_features_array); -const int phy_basic_t1s_p2mp_features_array[2] = { +static const int phy_basic_t1s_p2mp_features_array[2] = { ETHTOOL_LINK_MODE_TP_BIT, ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, }; -EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features_array); -const int phy_gbit_features_array[2] = { +static const int phy_gbit_features_array[2] = { ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT, }; -EXPORT_SYMBOL_GPL(phy_gbit_features_array); - -const int phy_10gbit_features_array[1] = { - ETHTOOL_LINK_MODE_10000baseT_Full_BIT, -}; -EXPORT_SYMBOL_GPL(phy_10gbit_features_array); static const int phy_eee_cap1_features_array[] = { ETHTOOL_LINK_MODE_100baseT_Full_BIT, @@ -185,9 +190,8 @@ static void features_init(void) linkmode_set_bit_array(phy_gbit_features_array, ARRAY_SIZE(phy_gbit_features_array), phy_10gbit_features); - linkmode_set_bit_array(phy_10gbit_features_array, - ARRAY_SIZE(phy_10gbit_features_array), - phy_10gbit_features); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phy_10gbit_features); linkmode_set_bit_array(phy_eee_cap1_features_array, ARRAY_SIZE(phy_eee_cap1_features_array), @@ -378,8 +382,8 @@ static SIMPLE_DEV_PM_OPS(mdio_bus_phy_pm_ops, mdio_bus_phy_suspend, * comparison * @run: The actual code to be run when a matching PHY is found */ -int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, - int (*run)(struct phy_device *)) +static int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, + int (*run)(struct phy_device *)) { struct phy_fixup *fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); @@ -397,7 +401,6 @@ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, return 0; } -EXPORT_SYMBOL(phy_register_fixup); /* Registers a fixup to be run on any PHY with the UID in phy_uid */ int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, @@ -544,7 +547,7 @@ phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf) struct phy_device *phydev = to_phy_device(dev); const char *mode = NULL; - if (phy_is_internal(phydev)) + if (phydev->is_internal) mode = "internal"; else mode = phy_modes(phydev->interface); @@ -1684,243 +1687,6 @@ bool phy_driver_is_genphy_10g(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g); -/** - * phy_package_join - join a common PHY group - * @phydev: target phy_device struct - * @base_addr: cookie and base PHY address of PHY package for offset - * calculation of global register access - * @priv_size: if non-zero allocate this amount of bytes for private data - * - * This joins a PHY group and provides a shared storage for all phydevs in - * this group. This is intended to be used for packages which contain - * more than one PHY, for example a quad PHY transceiver. - * - * The base_addr parameter serves as cookie which has to have the same values - * for all members of one group and as the base PHY address of the PHY package - * for offset calculation to access generic registers of a PHY package. - * Usually, one of the PHY addresses of the different PHYs in the package - * provides access to these global registers. - * The address which is given here, will be used in the phy_package_read() - * and phy_package_write() convenience functions as base and added to the - * passed offset in those functions. - * - * This will set the shared pointer of the phydev to the shared storage. - * If this is the first call for a this cookie the shared storage will be - * allocated. If priv_size is non-zero, the given amount of bytes are - * allocated for the priv member. - * - * Returns < 1 on error, 0 on success. Esp. calling phy_package_join() - * with the same cookie but a different priv_size is an error. - */ -int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size) -{ - struct mii_bus *bus = phydev->mdio.bus; - struct phy_package_shared *shared; - int ret; - - if (base_addr < 0 || base_addr >= PHY_MAX_ADDR) - return -EINVAL; - - mutex_lock(&bus->shared_lock); - shared = bus->shared[base_addr]; - if (!shared) { - ret = -ENOMEM; - shared = kzalloc(sizeof(*shared), GFP_KERNEL); - if (!shared) - goto err_unlock; - if (priv_size) { - shared->priv = kzalloc(priv_size, GFP_KERNEL); - if (!shared->priv) - goto err_free; - shared->priv_size = priv_size; - } - shared->base_addr = base_addr; - shared->np = NULL; - refcount_set(&shared->refcnt, 1); - bus->shared[base_addr] = shared; - } else { - ret = -EINVAL; - if (priv_size && priv_size != shared->priv_size) - goto err_unlock; - refcount_inc(&shared->refcnt); - } - mutex_unlock(&bus->shared_lock); - - phydev->shared = shared; - - return 0; - -err_free: - kfree(shared); -err_unlock: - mutex_unlock(&bus->shared_lock); - return ret; -} -EXPORT_SYMBOL_GPL(phy_package_join); - -/** - * of_phy_package_join - join a common PHY group in PHY package - * @phydev: target phy_device struct - * @priv_size: if non-zero allocate this amount of bytes for private data - * - * This is a variant of phy_package_join for PHY package defined in DT. - * - * The parent node of the @phydev is checked as a valid PHY package node - * structure (by matching the node name "ethernet-phy-package") and the - * base_addr for the PHY package is passed to phy_package_join. - * - * With this configuration the shared struct will also have the np value - * filled to use additional DT defined properties in PHY specific - * probe_once and config_init_once PHY package OPs. - * - * Returns < 0 on error, 0 on success. Esp. calling phy_package_join() - * with the same cookie but a different priv_size is an error. Or a parent - * node is not detected or is not valid or doesn't match the expected node - * name for PHY package. - */ -int of_phy_package_join(struct phy_device *phydev, size_t priv_size) -{ - struct device_node *node = phydev->mdio.dev.of_node; - struct device_node *package_node; - u32 base_addr; - int ret; - - if (!node) - return -EINVAL; - - package_node = of_get_parent(node); - if (!package_node) - return -EINVAL; - - if (!of_node_name_eq(package_node, "ethernet-phy-package")) { - ret = -EINVAL; - goto exit; - } - - if (of_property_read_u32(package_node, "reg", &base_addr)) { - ret = -EINVAL; - goto exit; - } - - ret = phy_package_join(phydev, base_addr, priv_size); - if (ret) - goto exit; - - phydev->shared->np = package_node; - - return 0; -exit: - of_node_put(package_node); - return ret; -} -EXPORT_SYMBOL_GPL(of_phy_package_join); - -/** - * phy_package_leave - leave a common PHY group - * @phydev: target phy_device struct - * - * This leaves a PHY group created by phy_package_join(). If this phydev - * was the last user of the shared data between the group, this data is - * freed. Resets the phydev->shared pointer to NULL. - */ -void phy_package_leave(struct phy_device *phydev) -{ - struct phy_package_shared *shared = phydev->shared; - struct mii_bus *bus = phydev->mdio.bus; - - if (!shared) - return; - - /* Decrease the node refcount on leave if present */ - if (shared->np) - of_node_put(shared->np); - - if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) { - bus->shared[shared->base_addr] = NULL; - mutex_unlock(&bus->shared_lock); - kfree(shared->priv); - kfree(shared); - } - - phydev->shared = NULL; -} -EXPORT_SYMBOL_GPL(phy_package_leave); - -static void devm_phy_package_leave(struct device *dev, void *res) -{ - phy_package_leave(*(struct phy_device **)res); -} - -/** - * devm_phy_package_join - resource managed phy_package_join() - * @dev: device that is registering this PHY package - * @phydev: target phy_device struct - * @base_addr: cookie and base PHY address of PHY package for offset - * calculation of global register access - * @priv_size: if non-zero allocate this amount of bytes for private data - * - * Managed phy_package_join(). Shared storage fetched by this function, - * phy_package_leave() is automatically called on driver detach. See - * phy_package_join() for more information. - */ -int devm_phy_package_join(struct device *dev, struct phy_device *phydev, - int base_addr, size_t priv_size) -{ - struct phy_device **ptr; - int ret; - - ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr), - GFP_KERNEL); - if (!ptr) - return -ENOMEM; - - ret = phy_package_join(phydev, base_addr, priv_size); - - if (!ret) { - *ptr = phydev; - devres_add(dev, ptr); - } else { - devres_free(ptr); - } - - return ret; -} -EXPORT_SYMBOL_GPL(devm_phy_package_join); - -/** - * devm_of_phy_package_join - resource managed of_phy_package_join() - * @dev: device that is registering this PHY package - * @phydev: target phy_device struct - * @priv_size: if non-zero allocate this amount of bytes for private data - * - * Managed of_phy_package_join(). Shared storage fetched by this function, - * phy_package_leave() is automatically called on driver detach. See - * of_phy_package_join() for more information. - */ -int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev, - size_t priv_size) -{ - struct phy_device **ptr; - int ret; - - ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr), - GFP_KERNEL); - if (!ptr) - return -ENOMEM; - - ret = of_phy_package_join(phydev, priv_size); - - if (!ret) { - *ptr = phydev; - devres_add(dev, ptr); - } else { - devres_free(ptr); - } - - return ret; -} -EXPORT_SYMBOL_GPL(devm_of_phy_package_join); - /** * phy_detach - detach a PHY device from its network device * @phydev: target phy_device struct @@ -2052,41 +1818,6 @@ int phy_resume(struct phy_device *phydev) } EXPORT_SYMBOL(phy_resume); -int phy_loopback(struct phy_device *phydev, bool enable) -{ - int ret = 0; - - if (!phydev->drv) - return -EIO; - - mutex_lock(&phydev->lock); - - if (enable && phydev->loopback_enabled) { - ret = -EBUSY; - goto out; - } - - if (!enable && !phydev->loopback_enabled) { - ret = -EINVAL; - goto out; - } - - if (phydev->drv->set_loopback) - ret = phydev->drv->set_loopback(phydev, enable); - else - ret = genphy_loopback(phydev, enable); - - if (ret) - goto out; - - phydev->loopback_enabled = enable; - -out: - mutex_unlock(&phydev->lock); - return ret; -} -EXPORT_SYMBOL(phy_loopback); - /** * phy_reset_after_clk_enable - perform a PHY reset if needed * @phydev: target phy_device struct @@ -2354,7 +2085,7 @@ EXPORT_SYMBOL(genphy_check_and_restart_aneg); int __genphy_config_aneg(struct phy_device *phydev, bool changed) { __ETHTOOL_DECLARE_LINK_MODE_MASK(fixed_advert); - const struct phy_setting *set; + const struct link_capabilities *c; unsigned long *advert; int err; @@ -2380,10 +2111,11 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed) } else { linkmode_zero(fixed_advert); - set = phy_lookup_setting(phydev->speed, phydev->duplex, - phydev->supported, true); - if (set) - linkmode_set_bit(set->bit, fixed_advert); + c = phy_caps_lookup(phydev->speed, phydev->duplex, + phydev->supported, true); + if (c) + linkmode_and(fixed_advert, phydev->supported, + c->linkmodes); advert = fixed_advert; } @@ -2843,12 +2575,18 @@ int genphy_resume(struct phy_device *phydev) } EXPORT_SYMBOL(genphy_resume); -int genphy_loopback(struct phy_device *phydev, bool enable) +int genphy_loopback(struct phy_device *phydev, bool enable, int speed) { if (enable) { u16 ctl = BMCR_LOOPBACK; int ret, val; + if (speed == SPEED_10 || speed == SPEED_100 || + speed == SPEED_1000) + phydev->speed = speed; + else if (speed) + return -EINVAL; + ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex); phy_modify(phydev, MII_BMCR, ~0, ctl); @@ -2966,7 +2704,7 @@ void phy_disable_eee(struct phy_device *phydev) phydev->eee_cfg.tx_lpi_enabled = false; phydev->eee_cfg.eee_enabled = false; /* don't let userspace re-enable EEE advertisement */ - linkmode_fill(phydev->eee_broken_modes); + linkmode_fill(phydev->eee_disabled_modes); } EXPORT_SYMBOL_GPL(phy_disable_eee); @@ -3096,19 +2834,12 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause) EXPORT_SYMBOL(phy_get_pause); #if IS_ENABLED(CONFIG_OF_MDIO) -static int phy_get_int_delay_property(struct device *dev, const char *name) +static int phy_get_u32_property(struct device *dev, const char *name, u32 *val) { - s32 int_delay; - int ret; - - ret = device_property_read_u32(dev, name, &int_delay); - if (ret) - return ret; - - return int_delay; + return device_property_read_u32(dev, name, val); } #else -static int phy_get_int_delay_property(struct device *dev, const char *name) +static int phy_get_u32_property(struct device *dev, const char *name, u32 *val) { return -EINVAL; } @@ -3133,12 +2864,12 @@ static int phy_get_int_delay_property(struct device *dev, const char *name) s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, const int *delay_values, int size, bool is_rx) { - s32 delay; - int i; + int i, ret; + u32 delay; if (is_rx) { - delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps"); - if (delay < 0 && size == 0) { + ret = phy_get_u32_property(dev, "rx-internal-delay-ps", &delay); + if (ret < 0 && size == 0) { if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) return 1; @@ -3147,8 +2878,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, } } else { - delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps"); - if (delay < 0 && size == 0) { + ret = phy_get_u32_property(dev, "tx-internal-delay-ps", &delay); + if (ret < 0 && size == 0) { if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) return 1; @@ -3157,8 +2888,8 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, } } - if (delay < 0) - return delay; + if (ret < 0) + return ret; if (size == 0) return delay; @@ -3193,6 +2924,30 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, } EXPORT_SYMBOL(phy_get_internal_delay); +/** + * phy_get_tx_amplitude_gain - stores tx amplitude gain in @val + * @phydev: phy_device struct + * @dev: pointer to the devices device struct + * @linkmode: linkmode for which the tx amplitude gain should be retrieved + * @val: tx amplitude gain + * + * Returns: 0 on success, < 0 on failure + */ +int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev, + enum ethtool_link_mode_bit_indices linkmode, + u32 *val) +{ + switch (linkmode) { + case ETHTOOL_LINK_MODE_100baseT_Full_BIT: + return phy_get_u32_property(dev, + "tx-amplitude-100base-tx-percent", + val); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(phy_get_tx_amplitude_gain); + static int phy_led_set_brightness(struct led_classdev *led_cdev, enum led_brightness value) { @@ -3563,22 +3318,21 @@ static int phy_probe(struct device *dev) if (err) goto out; - /* There is no "enabled" flag. If PHY is advertising, assume it is - * kind of enabled. - */ - phydev->eee_cfg.eee_enabled = !linkmode_empty(phydev->advertising_eee); + /* Get the EEE modes we want to prohibit. */ + of_set_phy_eee_broken(phydev); /* Some PHYs may advertise, by default, not support EEE modes. So, - * we need to clean them. + * we need to clean them. In addition remove all disabled EEE modes. */ - if (phydev->eee_cfg.eee_enabled) - linkmode_and(phydev->advertising_eee, phydev->supported_eee, - phydev->advertising_eee); + linkmode_and(phydev->advertising_eee, phydev->supported_eee, + phydev->advertising_eee); + linkmode_andnot(phydev->advertising_eee, phydev->advertising_eee, + phydev->eee_disabled_modes); - /* Get the EEE modes we want to prohibit. We will ask - * the PHY stop advertising these mode later on + /* There is no "enabled" flag. If PHY is advertising, assume it is + * kind of enabled. */ - of_set_phy_eee_broken(phydev); + phydev->eee_cfg.eee_enabled = !linkmode_empty(phydev->advertising_eee); /* Get master/slave strap overrides */ of_set_phy_timing_role(phydev); @@ -3777,6 +3531,10 @@ static int __init phy_init(void) if (rc) goto err_ethtool_phy_ops; + rc = phy_caps_init(); + if (rc) + goto err_mdio_bus; + features_init(); rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE); diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index f550576eb9dae..bd3c9554f6acb 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c @@ -5,6 +5,8 @@ #include #include +#include "phylib-internal.h" + static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy, unsigned int speed) { diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c index 4a5d73002a1a8..0e9e987f37dd8 100644 --- a/drivers/net/phy/phy_link_topology.c +++ b/drivers/net/phy/phy_link_topology.c @@ -73,7 +73,7 @@ int phy_link_topo_add_phy(struct net_device *dev, xa_limit_32b, &topo->next_phy_index, GFP_KERNEL); - if (ret) + if (ret < 0) goto err; return 0; diff --git a/drivers/net/phy/phy_package.c b/drivers/net/phy/phy_package.c new file mode 100644 index 0000000000000..c738f76e8664b --- /dev/null +++ b/drivers/net/phy/phy_package.c @@ -0,0 +1,350 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PHY package support + */ + +#include +#include + +#include "phylib.h" +#include "phylib-internal.h" + +/** + * struct phy_package_shared - Shared information in PHY packages + * @base_addr: Base PHY address of PHY package used to combine PHYs + * in one package and for offset calculation of phy_package_read/write + * @np: Pointer to the Device Node if PHY package defined in DT + * @refcnt: Number of PHYs connected to this shared data + * @flags: Initialization of PHY package + * @priv_size: Size of the shared private data @priv + * @priv: Driver private data shared across a PHY package + * + * Represents a shared structure between different phydev's in the same + * package, for example a quad PHY. See phy_package_join() and + * phy_package_leave(). + */ +struct phy_package_shared { + u8 base_addr; + /* With PHY package defined in DT this points to the PHY package node */ + struct device_node *np; + refcount_t refcnt; + unsigned long flags; + size_t priv_size; + + /* private data pointer */ + /* note that this pointer is shared between different phydevs and + * the user has to take care of appropriate locking. It is allocated + * and freed automatically by phy_package_join() and + * phy_package_leave(). + */ + void *priv; +}; + +struct device_node *phy_package_get_node(struct phy_device *phydev) +{ + return phydev->shared->np; +} +EXPORT_SYMBOL_GPL(phy_package_get_node); + +void *phy_package_get_priv(struct phy_device *phydev) +{ + return phydev->shared->priv; +} +EXPORT_SYMBOL_GPL(phy_package_get_priv); + +int phy_package_address(struct phy_device *phydev, unsigned int addr_offset) +{ + struct phy_package_shared *shared = phydev->shared; + u8 base_addr = shared->base_addr; + + if (addr_offset >= PHY_MAX_ADDR - base_addr) + return -EIO; + + /* we know that addr will be in the range 0..31 and thus the + * implicit cast to a signed int is not a problem. + */ + return base_addr + addr_offset; +} + +int __phy_package_read(struct phy_device *phydev, unsigned int addr_offset, + u32 regnum) +{ + int addr = phy_package_address(phydev, addr_offset); + + if (addr < 0) + return addr; + + return __mdiobus_read(phydev->mdio.bus, addr, regnum); +} +EXPORT_SYMBOL_GPL(__phy_package_read); + +int __phy_package_write(struct phy_device *phydev, unsigned int addr_offset, + u32 regnum, u16 val) +{ + int addr = phy_package_address(phydev, addr_offset); + + if (addr < 0) + return addr; + + return __mdiobus_write(phydev->mdio.bus, addr, regnum, val); +} +EXPORT_SYMBOL_GPL(__phy_package_write); + +static bool __phy_package_set_once(struct phy_device *phydev, unsigned int b) +{ + struct phy_package_shared *shared = phydev->shared; + + if (!shared) + return false; + + return !test_and_set_bit(b, &shared->flags); +} + +bool phy_package_init_once(struct phy_device *phydev) +{ + return __phy_package_set_once(phydev, 0); +} +EXPORT_SYMBOL_GPL(phy_package_init_once); + +bool phy_package_probe_once(struct phy_device *phydev) +{ + return __phy_package_set_once(phydev, 1); +} +EXPORT_SYMBOL_GPL(phy_package_probe_once); + +/** + * phy_package_join - join a common PHY group + * @phydev: target phy_device struct + * @base_addr: cookie and base PHY address of PHY package for offset + * calculation of global register access + * @priv_size: if non-zero allocate this amount of bytes for private data + * + * This joins a PHY group and provides a shared storage for all phydevs in + * this group. This is intended to be used for packages which contain + * more than one PHY, for example a quad PHY transceiver. + * + * The base_addr parameter serves as cookie which has to have the same values + * for all members of one group and as the base PHY address of the PHY package + * for offset calculation to access generic registers of a PHY package. + * Usually, one of the PHY addresses of the different PHYs in the package + * provides access to these global registers. + * The address which is given here, will be used in the phy_package_read() + * and phy_package_write() convenience functions as base and added to the + * passed offset in those functions. + * + * This will set the shared pointer of the phydev to the shared storage. + * If this is the first call for a this cookie the shared storage will be + * allocated. If priv_size is non-zero, the given amount of bytes are + * allocated for the priv member. + * + * Returns < 1 on error, 0 on success. Esp. calling phy_package_join() + * with the same cookie but a different priv_size is an error. + */ +int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size) +{ + struct mii_bus *bus = phydev->mdio.bus; + struct phy_package_shared *shared; + int ret; + + if (base_addr < 0 || base_addr >= PHY_MAX_ADDR) + return -EINVAL; + + mutex_lock(&bus->shared_lock); + shared = bus->shared[base_addr]; + if (!shared) { + ret = -ENOMEM; + shared = kzalloc(sizeof(*shared), GFP_KERNEL); + if (!shared) + goto err_unlock; + if (priv_size) { + shared->priv = kzalloc(priv_size, GFP_KERNEL); + if (!shared->priv) + goto err_free; + shared->priv_size = priv_size; + } + shared->base_addr = base_addr; + shared->np = NULL; + refcount_set(&shared->refcnt, 1); + bus->shared[base_addr] = shared; + } else { + ret = -EINVAL; + if (priv_size && priv_size != shared->priv_size) + goto err_unlock; + refcount_inc(&shared->refcnt); + } + mutex_unlock(&bus->shared_lock); + + phydev->shared = shared; + + return 0; + +err_free: + kfree(shared); +err_unlock: + mutex_unlock(&bus->shared_lock); + return ret; +} +EXPORT_SYMBOL_GPL(phy_package_join); + +/** + * of_phy_package_join - join a common PHY group in PHY package + * @phydev: target phy_device struct + * @priv_size: if non-zero allocate this amount of bytes for private data + * + * This is a variant of phy_package_join for PHY package defined in DT. + * + * The parent node of the @phydev is checked as a valid PHY package node + * structure (by matching the node name "ethernet-phy-package") and the + * base_addr for the PHY package is passed to phy_package_join. + * + * With this configuration the shared struct will also have the np value + * filled to use additional DT defined properties in PHY specific + * probe_once and config_init_once PHY package OPs. + * + * Returns < 0 on error, 0 on success. Esp. calling phy_package_join() + * with the same cookie but a different priv_size is an error. Or a parent + * node is not detected or is not valid or doesn't match the expected node + * name for PHY package. + */ +int of_phy_package_join(struct phy_device *phydev, size_t priv_size) +{ + struct device_node *node = phydev->mdio.dev.of_node; + struct device_node *package_node; + u32 base_addr; + int ret; + + if (!node) + return -EINVAL; + + package_node = of_get_parent(node); + if (!package_node) + return -EINVAL; + + if (!of_node_name_eq(package_node, "ethernet-phy-package")) { + ret = -EINVAL; + goto exit; + } + + if (of_property_read_u32(package_node, "reg", &base_addr)) { + ret = -EINVAL; + goto exit; + } + + ret = phy_package_join(phydev, base_addr, priv_size); + if (ret) + goto exit; + + phydev->shared->np = package_node; + + return 0; +exit: + of_node_put(package_node); + return ret; +} +EXPORT_SYMBOL_GPL(of_phy_package_join); + +/** + * phy_package_leave - leave a common PHY group + * @phydev: target phy_device struct + * + * This leaves a PHY group created by phy_package_join(). If this phydev + * was the last user of the shared data between the group, this data is + * freed. Resets the phydev->shared pointer to NULL. + */ +void phy_package_leave(struct phy_device *phydev) +{ + struct phy_package_shared *shared = phydev->shared; + struct mii_bus *bus = phydev->mdio.bus; + + if (!shared) + return; + + /* Decrease the node refcount on leave if present */ + if (shared->np) + of_node_put(shared->np); + + if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) { + bus->shared[shared->base_addr] = NULL; + mutex_unlock(&bus->shared_lock); + kfree(shared->priv); + kfree(shared); + } + + phydev->shared = NULL; +} +EXPORT_SYMBOL_GPL(phy_package_leave); + +static void devm_phy_package_leave(struct device *dev, void *res) +{ + phy_package_leave(*(struct phy_device **)res); +} + +/** + * devm_phy_package_join - resource managed phy_package_join() + * @dev: device that is registering this PHY package + * @phydev: target phy_device struct + * @base_addr: cookie and base PHY address of PHY package for offset + * calculation of global register access + * @priv_size: if non-zero allocate this amount of bytes for private data + * + * Managed phy_package_join(). Shared storage fetched by this function, + * phy_package_leave() is automatically called on driver detach. See + * phy_package_join() for more information. + */ +int devm_phy_package_join(struct device *dev, struct phy_device *phydev, + int base_addr, size_t priv_size) +{ + struct phy_device **ptr; + int ret; + + ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = phy_package_join(phydev, base_addr, priv_size); + + if (!ret) { + *ptr = phydev; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return ret; +} +EXPORT_SYMBOL_GPL(devm_phy_package_join); + +/** + * devm_of_phy_package_join - resource managed of_phy_package_join() + * @dev: device that is registering this PHY package + * @phydev: target phy_device struct + * @priv_size: if non-zero allocate this amount of bytes for private data + * + * Managed of_phy_package_join(). Shared storage fetched by this function, + * phy_package_leave() is automatically called on driver detach. See + * of_phy_package_join() for more information. + */ +int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev, + size_t priv_size) +{ + struct phy_device **ptr; + int ret; + + ptr = devres_alloc(devm_phy_package_leave, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = of_phy_package_join(phydev, priv_size); + + if (!ret) { + *ptr = phydev; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return ret; +} +EXPORT_SYMBOL_GPL(devm_of_phy_package_join); diff --git a/drivers/net/phy/phylib-internal.h b/drivers/net/phy/phylib-internal.h new file mode 100644 index 0000000000000..afac2bd15b502 --- /dev/null +++ b/drivers/net/phy/phylib-internal.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * phylib-internal header + */ + +#ifndef __PHYLIB_INTERNAL_H +#define __PHYLIB_INTERNAL_H + +struct phy_device; + +/* + * phy_supported_speeds - return all speeds currently supported by a PHY device + */ +unsigned int phy_supported_speeds(struct phy_device *phy, + unsigned int *speeds, + unsigned int size); +void of_set_phy_supported(struct phy_device *phydev); +void of_set_phy_eee_broken(struct phy_device *phydev); +void of_set_phy_timing_role(struct phy_device *phydev); +int phy_speed_down_core(struct phy_device *phydev); +void phy_check_downshift(struct phy_device *phydev); + +int phy_package_address(struct phy_device *phydev, unsigned int addr_offset); + +int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv); + +#endif /* __PHYLIB_INTERNAL_H */ diff --git a/drivers/net/phy/phylib.h b/drivers/net/phy/phylib.h new file mode 100644 index 0000000000000..c15484a805b39 --- /dev/null +++ b/drivers/net/phy/phylib.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * phylib header + */ + +#ifndef __PHYLIB_H +#define __PHYLIB_H + +struct device_node; +struct phy_device; + +struct device_node *phy_package_get_node(struct phy_device *phydev); +void *phy_package_get_priv(struct phy_device *phydev); +int __phy_package_read(struct phy_device *phydev, unsigned int addr_offset, + u32 regnum); +int __phy_package_write(struct phy_device *phydev, unsigned int addr_offset, + u32 regnum, u16 val); +int __phy_package_read_mmd(struct phy_device *phydev, + unsigned int addr_offset, int devad, + u32 regnum); +int __phy_package_write_mmd(struct phy_device *phydev, + unsigned int addr_offset, int devad, + u32 regnum, u16 val); +bool phy_package_init_once(struct phy_device *phydev); +bool phy_package_probe_once(struct phy_device *phydev); +int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size); +int of_phy_package_join(struct phy_device *phydev, size_t priv_size); +void phy_package_leave(struct phy_device *phydev); +int devm_phy_package_join(struct device *dev, struct phy_device *phydev, + int base_addr, size_t priv_size); +int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev, + size_t priv_size); + +#endif /* __PHYLIB_H */ diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b00a315de0601..69ca765485db5 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -20,6 +20,7 @@ #include #include +#include "phy-caps.h" #include "sfp.h" #include "swphy.h" @@ -71,8 +72,6 @@ struct phylink { struct gpio_desc *link_gpio; unsigned int link_irq; struct timer_list link_poll; - void (*get_fixed_state)(struct net_device *dev, - struct phylink_link_state *s); struct mutex state_mutex; struct phylink_link_state phy_state; @@ -82,12 +81,14 @@ struct phylink { unsigned int pcs_state; bool link_failed; + bool major_config_failed; bool mac_supports_eee_ops; bool mac_supports_eee; bool phy_enable_tx_lpi; bool mac_enable_tx_lpi; bool mac_tx_clk_stop; u32 mac_tx_lpi_timer; + u8 mac_rx_clk_stop_blocked; struct sfp_bus *sfp_bus; bool sfp_may_have_phy; @@ -291,6 +292,61 @@ static int phylink_interface_max_speed(phy_interface_t interface) return SPEED_UNKNOWN; } +static struct { + unsigned long mask; + int speed; + unsigned int duplex; + unsigned int caps_bit; +} phylink_caps_params[] = { + { MAC_400000FD, SPEED_400000, DUPLEX_FULL, BIT(LINK_CAPA_400000FD) }, + { MAC_200000FD, SPEED_200000, DUPLEX_FULL, BIT(LINK_CAPA_200000FD) }, + { MAC_100000FD, SPEED_100000, DUPLEX_FULL, BIT(LINK_CAPA_100000FD) }, + { MAC_56000FD, SPEED_56000, DUPLEX_FULL, BIT(LINK_CAPA_56000FD) }, + { MAC_50000FD, SPEED_50000, DUPLEX_FULL, BIT(LINK_CAPA_50000FD) }, + { MAC_40000FD, SPEED_40000, DUPLEX_FULL, BIT(LINK_CAPA_40000FD) }, + { MAC_25000FD, SPEED_25000, DUPLEX_FULL, BIT(LINK_CAPA_25000FD) }, + { MAC_20000FD, SPEED_20000, DUPLEX_FULL, BIT(LINK_CAPA_20000FD) }, + { MAC_10000FD, SPEED_10000, DUPLEX_FULL, BIT(LINK_CAPA_10000FD) }, + { MAC_5000FD, SPEED_5000, DUPLEX_FULL, BIT(LINK_CAPA_5000FD) }, + { MAC_2500FD, SPEED_2500, DUPLEX_FULL, BIT(LINK_CAPA_2500FD) }, + { MAC_1000FD, SPEED_1000, DUPLEX_FULL, BIT(LINK_CAPA_1000FD) }, + { MAC_1000HD, SPEED_1000, DUPLEX_HALF, BIT(LINK_CAPA_1000HD) }, + { MAC_100FD, SPEED_100, DUPLEX_FULL, BIT(LINK_CAPA_100FD) }, + { MAC_100HD, SPEED_100, DUPLEX_HALF, BIT(LINK_CAPA_100HD) }, + { MAC_10FD, SPEED_10, DUPLEX_FULL, BIT(LINK_CAPA_10FD) }, + { MAC_10HD, SPEED_10, DUPLEX_HALF, BIT(LINK_CAPA_10HD) }, +}; + +/** + * phylink_caps_to_link_caps() - Convert a set of MAC capabilities LINK caps + * @caps: A set of MAC capabilities + * + * Returns: The corresponding set of LINK_CAPA as defined in phy-caps.h + */ +static unsigned long phylink_caps_to_link_caps(unsigned long caps) +{ + unsigned long link_caps = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++) + if (caps & phylink_caps_params[i].mask) + link_caps |= phylink_caps_params[i].caps_bit; + + return link_caps; +} + +static unsigned long phylink_link_caps_to_mac_caps(unsigned long link_caps) +{ + unsigned long caps = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(phylink_caps_params); i++) + if (link_caps & phylink_caps_params[i].caps_bit) + caps |= phylink_caps_params[i].mask; + + return caps; +} + /** * phylink_caps_to_linkmodes() - Convert capabilities to ethtool link modes * @linkmodes: ethtool linkmode mask (must be already initialised) @@ -302,172 +358,17 @@ static int phylink_interface_max_speed(phy_interface_t interface) static void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps) { + unsigned long link_caps = phylink_caps_to_link_caps(caps); + if (caps & MAC_SYM_PAUSE) __set_bit(ETHTOOL_LINK_MODE_Pause_BIT, linkmodes); if (caps & MAC_ASYM_PAUSE) __set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes); - if (caps & MAC_10HD) { - __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Half_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, linkmodes); - } - - if (caps & MAC_10FD) { - __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Full_BIT, linkmodes); - } - - if (caps & MAC_100HD) { - __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT, linkmodes); - } - - if (caps & MAC_100FD) { - __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT, linkmodes); - } - - if (caps & MAC_1000HD) - __set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, linkmodes); - - if (caps & MAC_1000FD) { - __set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_1000baseT1_Full_BIT, linkmodes); - } - - if (caps & MAC_2500FD) { - __set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT, linkmodes); - } - - if (caps & MAC_5000FD) - __set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, linkmodes); - - if (caps & MAC_10000FD) { - __set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_10000baseER_Full_BIT, linkmodes); - } - - if (caps & MAC_25000FD) { - __set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, linkmodes); - } - - if (caps & MAC_40000FD) { - __set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, linkmodes); - } - - if (caps & MAC_50000FD) { - __set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - linkmodes); - __set_bit(ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, linkmodes); - } - - if (caps & MAC_56000FD) { - __set_bit(ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, linkmodes); - } - - if (caps & MAC_100000FD) { - __set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, - linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, - linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, - linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, linkmodes); - } - - if (caps & MAC_200000FD) { - __set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, - linkmodes); - __set_bit(ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, - linkmodes); - __set_bit(ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, linkmodes); - } - - if (caps & MAC_400000FD) { - __set_bit(ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, - linkmodes); - __set_bit(ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, - linkmodes); - __set_bit(ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, linkmodes); - __set_bit(ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, linkmodes); - } + phy_caps_linkmodes(link_caps, linkmodes); } -static struct { - unsigned long mask; - int speed; - unsigned int duplex; -} phylink_caps_params[] = { - { MAC_400000FD, SPEED_400000, DUPLEX_FULL }, - { MAC_200000FD, SPEED_200000, DUPLEX_FULL }, - { MAC_100000FD, SPEED_100000, DUPLEX_FULL }, - { MAC_56000FD, SPEED_56000, DUPLEX_FULL }, - { MAC_50000FD, SPEED_50000, DUPLEX_FULL }, - { MAC_40000FD, SPEED_40000, DUPLEX_FULL }, - { MAC_25000FD, SPEED_25000, DUPLEX_FULL }, - { MAC_20000FD, SPEED_20000, DUPLEX_FULL }, - { MAC_10000FD, SPEED_10000, DUPLEX_FULL }, - { MAC_5000FD, SPEED_5000, DUPLEX_FULL }, - { MAC_2500FD, SPEED_2500, DUPLEX_FULL }, - { MAC_1000FD, SPEED_1000, DUPLEX_FULL }, - { MAC_1000HD, SPEED_1000, DUPLEX_HALF }, - { MAC_100FD, SPEED_100, DUPLEX_FULL }, - { MAC_100HD, SPEED_100, DUPLEX_HALF }, - { MAC_10FD, SPEED_10, DUPLEX_FULL }, - { MAC_10HD, SPEED_10, DUPLEX_HALF }, -}; - /** * phylink_limit_mac_speed - limit the phylink_config to a maximum speed * @config: pointer to a &struct phylink_config @@ -523,86 +424,12 @@ static unsigned long phylink_get_capabilities(phy_interface_t interface, unsigned long mac_capabilities, int rate_matching) { + unsigned long link_caps = phy_caps_from_interface(interface); int max_speed = phylink_interface_max_speed(interface); unsigned long caps = MAC_SYM_PAUSE | MAC_ASYM_PAUSE; unsigned long matched_caps = 0; - switch (interface) { - case PHY_INTERFACE_MODE_USXGMII: - caps |= MAC_10000FD | MAC_5000FD; - fallthrough; - - case PHY_INTERFACE_MODE_10G_QXGMII: - caps |= MAC_2500FD; - fallthrough; - - case PHY_INTERFACE_MODE_RGMII_TXID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_PSGMII: - case PHY_INTERFACE_MODE_QSGMII: - case PHY_INTERFACE_MODE_QUSGMII: - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_GMII: - caps |= MAC_1000HD | MAC_1000FD; - fallthrough; - - case PHY_INTERFACE_MODE_REVRMII: - case PHY_INTERFACE_MODE_RMII: - case PHY_INTERFACE_MODE_SMII: - case PHY_INTERFACE_MODE_REVMII: - case PHY_INTERFACE_MODE_MII: - caps |= MAC_10HD | MAC_10FD; - fallthrough; - - case PHY_INTERFACE_MODE_100BASEX: - caps |= MAC_100HD | MAC_100FD; - break; - - case PHY_INTERFACE_MODE_TBI: - case PHY_INTERFACE_MODE_MOCA: - case PHY_INTERFACE_MODE_RTBI: - case PHY_INTERFACE_MODE_1000BASEX: - caps |= MAC_1000HD; - fallthrough; - case PHY_INTERFACE_MODE_1000BASEKX: - case PHY_INTERFACE_MODE_TRGMII: - caps |= MAC_1000FD; - break; - - case PHY_INTERFACE_MODE_2500BASEX: - caps |= MAC_2500FD; - break; - - case PHY_INTERFACE_MODE_5GBASER: - caps |= MAC_5000FD; - break; - - case PHY_INTERFACE_MODE_XGMII: - case PHY_INTERFACE_MODE_RXAUI: - case PHY_INTERFACE_MODE_XAUI: - case PHY_INTERFACE_MODE_10GBASER: - case PHY_INTERFACE_MODE_10GKR: - caps |= MAC_10000FD; - break; - - case PHY_INTERFACE_MODE_25GBASER: - caps |= MAC_25000FD; - break; - - case PHY_INTERFACE_MODE_XLGMII: - caps |= MAC_40000FD; - break; - - case PHY_INTERFACE_MODE_INTERNAL: - caps |= ~0; - break; - - case PHY_INTERFACE_MODE_NA: - case PHY_INTERFACE_MODE_MAX: - break; - } + caps |= phylink_link_caps_to_mac_caps(link_caps); switch (rate_matching) { case RATE_MATCH_OPEN_LOOP: @@ -801,12 +628,26 @@ static int phylink_validate(struct phylink *pl, unsigned long *supported, return phylink_validate_mac_and_pcs(pl, supported, state); } +static void phylink_fill_fixedlink_supported(unsigned long *supported) +{ + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, supported); +} + static int phylink_parse_fixedlink(struct phylink *pl, const struct fwnode_handle *fwnode) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(match) = { 0, }; __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + const struct link_capabilities *c; struct fwnode_handle *fixed_node; - const struct phy_setting *s; struct gpio_desc *desc; u32 speed; int ret; @@ -874,12 +715,16 @@ static int phylink_parse_fixedlink(struct phylink *pl, phylink_warn(pl, "fixed link specifies half duplex for %dMbps link?\n", pl->link_config.speed); - linkmode_fill(pl->supported); + linkmode_zero(pl->supported); + phylink_fill_fixedlink_supported(pl->supported); + linkmode_copy(pl->link_config.advertising, pl->supported); phylink_validate(pl, pl->supported, &pl->link_config); - s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex, - pl->supported, true); + c = phy_caps_lookup(pl->link_config.speed, pl->link_config.duplex, + pl->supported, true); + if (c) + linkmode_and(match, pl->supported, c->linkmodes); linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mask); linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, mask); @@ -888,9 +733,10 @@ static int phylink_parse_fixedlink(struct phylink *pl, phylink_set(pl->supported, MII); - if (s) { - __set_bit(s->bit, pl->supported); - __set_bit(s->bit, pl->link_config.lp_advertising); + if (c) { + linkmode_or(pl->supported, pl->supported, match); + linkmode_or(pl->link_config.lp_advertising, + pl->link_config.lp_advertising, match); } else { phylink_warn(pl, "fixed link %s duplex %dMbps not recognised\n", pl->link_config.duplex == DUPLEX_FULL ? "full" : "half", @@ -1073,6 +919,18 @@ static void phylink_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, pcs->ops->pcs_link_up(pcs, neg_mode, interface, speed, duplex); } +static void phylink_pcs_disable_eee(struct phylink_pcs *pcs) +{ + if (pcs && pcs->ops->pcs_disable_eee) + pcs->ops->pcs_disable_eee(pcs); +} + +static void phylink_pcs_enable_eee(struct phylink_pcs *pcs) +{ + if (pcs && pcs->ops->pcs_enable_eee) + pcs->ops->pcs_enable_eee(pcs); +} + /* Query inband for a specific interface mode, asking the MAC for the * PCS which will be used to handle the interface mode. */ @@ -1353,19 +1211,22 @@ static void phylink_major_config(struct phylink *pl, bool restart, struct phylink_pcs *pcs = NULL; bool pcs_changed = false; unsigned int rate_kbd; - unsigned int neg_mode; int err; phylink_dbg(pl, "major config, requested %s/%s\n", phylink_an_mode_str(pl->req_link_an_mode), phy_modes(state->interface)); + pl->major_config_failed = false; + if (pl->mac_ops->mac_select_pcs) { pcs = pl->mac_ops->mac_select_pcs(pl->config, state->interface); if (IS_ERR(pcs)) { phylink_err(pl, "mac_select_pcs unexpectedly failed: %pe\n", pcs); + + pl->major_config_failed = true; return; } @@ -1387,6 +1248,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, if (err < 0) { phylink_err(pl, "mac_prepare failed: %pe\n", ERR_PTR(err)); + pl->major_config_failed = true; return; } } @@ -1410,23 +1272,27 @@ static void phylink_major_config(struct phylink *pl, bool restart, phylink_mac_config(pl, state); - if (pl->pcs) - phylink_pcs_post_config(pl->pcs, state->interface); + if (pl->pcs) { + err = phylink_pcs_post_config(pl->pcs, state->interface); + if (err < 0) { + phylink_err(pl, "pcs_post_config failed: %pe\n", + ERR_PTR(err)); + + pl->major_config_failed = true; + } + } if (pl->pcs_state == PCS_STATE_STARTING || pcs_changed) phylink_pcs_enable(pl->pcs); - neg_mode = pl->act_link_an_mode; - if (pl->pcs && pl->pcs->neg_mode) - neg_mode = pl->pcs_neg_mode; - - err = phylink_pcs_config(pl->pcs, neg_mode, state, + err = phylink_pcs_config(pl->pcs, pl->pcs_neg_mode, state, !!(pl->link_config.pause & MLO_PAUSE_AN)); - if (err < 0) - phylink_err(pl, "pcs_config failed: %pe\n", - ERR_PTR(err)); - else if (err > 0) + if (err < 0) { + phylink_err(pl, "pcs_config failed: %pe\n", ERR_PTR(err)); + pl->major_config_failed = true; + } else if (err > 0) { restart = true; + } if (restart) phylink_pcs_an_restart(pl); @@ -1434,16 +1300,22 @@ static void phylink_major_config(struct phylink *pl, bool restart, if (pl->mac_ops->mac_finish) { err = pl->mac_ops->mac_finish(pl->config, pl->act_link_an_mode, state->interface); - if (err < 0) + if (err < 0) { phylink_err(pl, "mac_finish failed: %pe\n", ERR_PTR(err)); + + pl->major_config_failed = true; + } } if (pl->phydev && pl->phy_ib_mode) { err = phy_config_inband(pl->phydev, pl->phy_ib_mode); - if (err < 0) + if (err < 0) { phylink_err(pl, "phy_config_inband: %pe\n", ERR_PTR(err)); + + pl->major_config_failed = true; + } } if (pl->sfp_bus) { @@ -1463,7 +1335,6 @@ static void phylink_major_config(struct phylink *pl, bool restart, */ static int phylink_change_inband_advert(struct phylink *pl) { - unsigned int neg_mode; int ret; if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) @@ -1479,15 +1350,11 @@ static int phylink_change_inband_advert(struct phylink *pl) phylink_pcs_neg_mode(pl, pl->pcs, pl->link_config.interface, pl->link_config.advertising); - neg_mode = pl->act_link_an_mode; - if (pl->pcs->neg_mode) - neg_mode = pl->pcs_neg_mode; - /* Modern PCS-based method; update the advert at the PCS, and * restart negotiation if the pcs_config() helper indicates that * the programmed advertisement has changed. */ - ret = phylink_pcs_config(pl->pcs, neg_mode, &pl->link_config, + ret = phylink_pcs_config(pl->pcs, pl->pcs_neg_mode, &pl->link_config, !!(pl->link_config.pause & MLO_PAUSE_AN)); if (ret < 0) return ret; @@ -1511,13 +1378,7 @@ static void phylink_mac_pcs_get_state(struct phylink *pl, state->an_complete = 0; state->link = 1; - pcs = pl->pcs; - if (!pcs || pcs->neg_mode) - autoneg = pl->pcs_neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED; - else - autoneg = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - state->advertising); - + autoneg = pl->pcs_neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED; if (autoneg) { state->speed = SPEED_UNKNOWN; state->duplex = DUPLEX_UNKNOWN; @@ -1528,6 +1389,7 @@ static void phylink_mac_pcs_get_state(struct phylink *pl, state->pause = pl->link_config.pause; } + pcs = pl->pcs; if (pcs) pcs->ops->pcs_get_state(pcs, pl->pcs_neg_mode, state); else @@ -1601,6 +1463,8 @@ static void phylink_deactivate_lpi(struct phylink *pl) phylink_dbg(pl, "disabling LPI\n"); pl->mac_ops->mac_disable_tx_lpi(pl->config); + + phylink_pcs_disable_eee(pl->pcs); } } @@ -1617,20 +1481,24 @@ static void phylink_activate_lpi(struct phylink *pl) phylink_dbg(pl, "LPI timer %uus, tx clock stop %u\n", pl->mac_tx_lpi_timer, pl->mac_tx_clk_stop); + phylink_pcs_enable_eee(pl->pcs); + err = pl->mac_ops->mac_enable_tx_lpi(pl->config, pl->mac_tx_lpi_timer, pl->mac_tx_clk_stop); - if (!err) - pl->mac_enable_tx_lpi = true; - else + if (err) { + phylink_pcs_disable_eee(pl->pcs); phylink_err(pl, "%ps() failed: %pe\n", pl->mac_ops->mac_enable_tx_lpi, ERR_PTR(err)); + return; + } + + pl->mac_enable_tx_lpi = true; } static void phylink_link_up(struct phylink *pl, struct phylink_link_state link_state) { struct net_device *ndev = pl->netdev; - unsigned int neg_mode; int speed, duplex; bool rx_pause; @@ -1661,11 +1529,7 @@ static void phylink_link_up(struct phylink *pl, pl->cur_interface = link_state.interface; - neg_mode = pl->act_link_an_mode; - if (pl->pcs && pl->pcs->neg_mode) - neg_mode = pl->pcs_neg_mode; - - phylink_pcs_link_up(pl->pcs, neg_mode, pl->cur_interface, speed, + phylink_pcs_link_up(pl->pcs, pl->pcs_neg_mode, pl->cur_interface, speed, duplex); pl->mac_ops->mac_link_up(pl->config, pl->phydev, pl->act_link_an_mode, @@ -1795,6 +1659,12 @@ static void phylink_resolve(struct work_struct *w) } } + /* If configuration of the interface failed, force the link down + * until we get a successful configuration. + */ + if (pl->major_config_failed) + link_state.link = false; + if (link_state.link != cur_link_state) { pl->old_link_state = link_state.link; if (!link_state.link) @@ -1879,21 +1749,20 @@ static int phylink_register_sfp(struct phylink *pl, int phylink_set_fixed_link(struct phylink *pl, const struct phylink_link_state *state) { - const struct phy_setting *s; + const struct link_capabilities *c; unsigned long *adv; if (pl->cfg_link_an_mode != MLO_AN_PHY || !state || !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) return -EINVAL; - s = phy_lookup_setting(state->speed, state->duplex, - pl->supported, true); - if (!s) + c = phy_caps_lookup(state->speed, state->duplex, + pl->supported, true); + if (!c) return -EINVAL; adv = pl->link_config.advertising; - linkmode_zero(adv); - linkmode_set_bit(s->bit, adv); + linkmode_and(adv, pl->supported, c->linkmodes); linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, adv); pl->link_config.speed = state->speed; @@ -1957,8 +1826,7 @@ struct phylink *phylink_create(struct phylink_config *config, return ERR_PTR(-EINVAL); } - pl->mac_supports_eee_ops = mac_ops->mac_disable_tx_lpi && - mac_ops->mac_enable_tx_lpi; + pl->mac_supports_eee_ops = phylink_mac_implements_lpi(mac_ops); pl->mac_supports_eee = pl->mac_supports_eee_ops && pl->config->lpi_capabilities && !phy_interface_empty(pl->config->lpi_interfaces); @@ -2046,7 +1914,7 @@ bool phylink_expects_phy(struct phylink *pl) { if (pl->cfg_link_an_mode == MLO_AN_FIXED || (pl->cfg_link_an_mode == MLO_AN_INBAND && - phy_interface_mode_is_8023z(pl->link_config.interface))) + phy_interface_mode_is_8023z(pl->link_interface))) return false; return true; } @@ -2594,6 +2462,64 @@ void phylink_stop(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_stop); +/** + * phylink_rx_clk_stop_block() - block PHY ability to stop receive clock in LPI + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * Disable the PHY's ability to stop the receive clock while the receive path + * is in EEE LPI state, until the number of calls to phylink_rx_clk_stop_block() + * are balanced by calls to phylink_rx_clk_stop_unblock(). + */ +void phylink_rx_clk_stop_block(struct phylink *pl) +{ + ASSERT_RTNL(); + + if (pl->mac_rx_clk_stop_blocked == U8_MAX) { + phylink_warn(pl, "%s called too many times - ignoring\n", + __func__); + dump_stack(); + return; + } + + /* Disable PHY receive clock stop if this is the first time this + * function has been called and clock-stop was previously enabled. + */ + if (pl->mac_rx_clk_stop_blocked++ == 0 && + pl->mac_supports_eee_ops && pl->phydev && + pl->config->eee_rx_clk_stop_enable) + phy_eee_rx_clock_stop(pl->phydev, false); +} +EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_block); + +/** + * phylink_rx_clk_stop_unblock() - unblock PHY ability to stop receive clock + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * All calls to phylink_rx_clk_stop_block() must be balanced with a + * corresponding call to phylink_rx_clk_stop_unblock() to restore the PHYs + * ability to stop the receive clock when the receive path is in EEE LPI mode. + */ +void phylink_rx_clk_stop_unblock(struct phylink *pl) +{ + ASSERT_RTNL(); + + if (pl->mac_rx_clk_stop_blocked == 0) { + phylink_warn(pl, "%s called too many times - ignoring\n", + __func__); + dump_stack(); + return; + } + + /* Re-enable PHY receive clock stop if the number of unblocks matches + * the number of calls to the block function above. + */ + if (--pl->mac_rx_clk_stop_blocked == 0 && + pl->mac_supports_eee_ops && pl->phydev && + pl->config->eee_rx_clk_stop_enable) + phy_eee_rx_clock_stop(pl->phydev, true); +} +EXPORT_SYMBOL_GPL(phylink_rx_clk_stop_unblock); + /** * phylink_suspend() - handle a network device suspend event * @pl: a pointer to a &struct phylink returned from phylink_create() @@ -2638,6 +2564,31 @@ void phylink_suspend(struct phylink *pl, bool mac_wol) } EXPORT_SYMBOL_GPL(phylink_suspend); +/** + * phylink_prepare_resume() - prepare to resume a network device + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * Optional, but if called must be called prior to phylink_resume(). + * + * Prepare to resume a network device, preparing the PHY as necessary. + */ +void phylink_prepare_resume(struct phylink *pl) +{ + struct phy_device *phydev = pl->phydev; + + ASSERT_RTNL(); + + /* IEEE 802.3 22.2.4.1.5 allows PHYs to stop their receive clock + * when PDOWN is set. However, some MACs require RXC to be running + * in order to resume. If the MAC requires RXC, and we have a PHY, + * then resume the PHY. Note that 802.3 allows PHYs 500ms before + * the clock meets requirements. We do not implement this delay. + */ + if (pl->config->mac_requires_rxc && phydev && phydev->suspended) + phy_resume(phydev); +} +EXPORT_SYMBOL_GPL(phylink_prepare_resume); + /** * phylink_resume() - handle a network device resume event * @pl: a pointer to a &struct phylink returned from phylink_create() @@ -2854,8 +2805,8 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, const struct ethtool_link_ksettings *kset) { __ETHTOOL_DECLARE_LINK_MODE_MASK(support); + const struct link_capabilities *c; struct phylink_link_state config; - const struct phy_setting *s; ASSERT_RTNL(); @@ -2898,23 +2849,23 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, /* Autonegotiation disabled, select a suitable speed and * duplex. */ - s = phy_lookup_setting(kset->base.speed, kset->base.duplex, - pl->supported, false); - if (!s) + c = phy_caps_lookup(kset->base.speed, kset->base.duplex, + pl->supported, false); + if (!c) return -EINVAL; /* If we have a fixed link, refuse to change link parameters. * If the link parameters match, accept them but do nothing. */ if (pl->req_link_an_mode == MLO_AN_FIXED) { - if (s->speed != pl->link_config.speed || - s->duplex != pl->link_config.duplex) + if (c->speed != pl->link_config.speed || + c->duplex != pl->link_config.duplex) return -EINVAL; return 0; } - config.speed = s->speed; - config.duplex = s->duplex; + config.speed = c->speed; + config.duplex = c->duplex; break; case AUTONEG_ENABLE: @@ -3159,24 +3110,6 @@ int phylink_get_eee_err(struct phylink *pl) } EXPORT_SYMBOL_GPL(phylink_get_eee_err); -/** - * phylink_init_eee() - init and check the EEE features - * @pl: a pointer to a &struct phylink returned from phylink_create() - * @clk_stop_enable: allow PHY to stop receive clock - * - * Must be called either with RTNL held or within mac_link_up() - */ -int phylink_init_eee(struct phylink *pl, bool clk_stop_enable) -{ - int ret = -EOPNOTSUPP; - - if (pl->phydev) - ret = phy_init_eee(pl->phydev, clk_stop_enable); - - return ret; -} -EXPORT_SYMBOL_GPL(phylink_init_eee); - /** * phylink_ethtool_get_eee() - read the energy efficient ethernet parameters * @pl: a pointer to a &struct phylink returned from phylink_create() diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c index 3279de857b474..1af6b5ead74bf 100644 --- a/drivers/net/phy/qcom/qca807x.c +++ b/drivers/net/phy/qcom/qca807x.c @@ -15,6 +15,7 @@ #include #include +#include "../phylib.h" #include "qcom.h" #define QCA807X_CHIP_CONFIGURATION 0x1f @@ -486,13 +487,13 @@ static int qca807x_read_status(struct phy_device *phydev) static int qca807x_phy_package_probe_once(struct phy_device *phydev) { - struct phy_package_shared *shared = phydev->shared; - struct qca807x_shared_priv *priv = shared->priv; + struct qca807x_shared_priv *priv = phy_package_get_priv(phydev); + struct device_node *np = phy_package_get_node(phydev); unsigned int tx_drive_strength; const char *package_mode_name; /* Default to 600mw if not defined */ - if (of_property_read_u32(shared->np, "qcom,tx-drive-strength-milliwatt", + if (of_property_read_u32(np, "qcom,tx-drive-strength-milliwatt", &tx_drive_strength)) tx_drive_strength = 600; @@ -541,7 +542,7 @@ static int qca807x_phy_package_probe_once(struct phy_device *phydev) } priv->package_mode = PHY_INTERFACE_MODE_NA; - if (!of_property_read_string(shared->np, "qcom,package-mode", + if (!of_property_read_string(np, "qcom,package-mode", &package_mode_name)) { if (!strcasecmp(package_mode_name, phy_modes(PHY_INTERFACE_MODE_PSGMII))) @@ -558,8 +559,7 @@ static int qca807x_phy_package_probe_once(struct phy_device *phydev) static int qca807x_phy_package_config_init_once(struct phy_device *phydev) { - struct phy_package_shared *shared = phydev->shared; - struct qca807x_shared_priv *priv = shared->priv; + struct qca807x_shared_priv *priv = phy_package_get_priv(phydev); int val, ret; /* Make sure PHY follow PHY package mode if enforced */ @@ -708,7 +708,6 @@ static int qca807x_probe(struct phy_device *phydev) struct device_node *node = phydev->mdio.dev.of_node; struct qca807x_shared_priv *shared_priv; struct device *dev = &phydev->mdio.dev; - struct phy_package_shared *shared; struct qca807x_priv *priv; int ret; @@ -722,8 +721,7 @@ static int qca807x_probe(struct phy_device *phydev) return ret; } - shared = phydev->shared; - shared_priv = shared->priv; + shared_priv = phy_package_get_priv(phydev); priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -774,7 +772,7 @@ static int qca807x_config_init(struct phy_device *phydev) control_dac &= ~QCA807X_CONTROL_DAC_MASK; if (!priv->dac_full_amplitude) control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE; - if (!priv->dac_full_amplitude) + if (!priv->dac_full_bias_current) control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT; if (!priv->dac_disable_bias_current_tweak) control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK; diff --git a/drivers/net/phy/qt2025.rs b/drivers/net/phy/qt2025.rs index 1ab065798175b..7e754d5d71544 100644 --- a/drivers/net/phy/qt2025.rs +++ b/drivers/net/phy/qt2025.rs @@ -41,7 +41,7 @@ impl Driver for PhyQT2025 { fn probe(dev: &mut phy::Device) -> Result<()> { // Check the hardware revision code. - // Only 0x3b works with this driver and firmware. + // Only 0xb3 works with this driver and firmware. let hw_rev = dev.read(C45::new(Mmd::PMAPMD, 0xd001))?; if (hw_rev >> 8) != 0xb3 { return Err(code::ENODEV); diff --git a/drivers/net/phy/realtek/Kconfig b/drivers/net/phy/realtek/Kconfig index 31935f147d879..b05c2a1e90243 100644 --- a/drivers/net/phy/realtek/Kconfig +++ b/drivers/net/phy/realtek/Kconfig @@ -4,8 +4,12 @@ config REALTEK_PHY help Currently supports RTL821x/RTL822x and fast ethernet PHYs +if REALTEK_PHY + config REALTEK_PHY_HWMON - def_bool REALTEK_PHY && HWMON - depends on !(REALTEK_PHY=y && HWMON=m) + bool "HWMON support for Realtek PHYs" + depends on HWMON && !(REALTEK_PHY=y && HWMON=m) help Optional hwmon support for the temperature sensor + +endif # REALTEK_PHY diff --git a/drivers/net/phy/realtek/realtek_hwmon.c b/drivers/net/phy/realtek/realtek_hwmon.c index 1ecb410bb9418..ac96e2d1ebe82 100644 --- a/drivers/net/phy/realtek/realtek_hwmon.c +++ b/drivers/net/phy/realtek/realtek_hwmon.c @@ -63,16 +63,11 @@ static const struct hwmon_chip_info rtl822x_hwmon_chip_info = { int rtl822x_hwmon_init(struct phy_device *phydev) { struct device *hwdev, *dev = &phydev->mdio.dev; - const char *name; /* Ensure over-temp alarm is reset. */ phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_TSALRM, 3); - name = devm_hwmon_sanitize_name(dev, dev_name(dev)); - if (IS_ERR(name)) - return PTR_ERR(name); - - hwdev = devm_hwmon_device_register_with_info(dev, name, phydev, + hwdev = devm_hwmon_device_register_with_info(dev, NULL, phydev, &rtl822x_hwmon_chip_info, NULL); return PTR_ERR_OR_ZERO(hwdev); diff --git a/drivers/net/phy/realtek/realtek_main.c b/drivers/net/phy/realtek/realtek_main.c index 572a933636b01..893c824796715 100644 --- a/drivers/net/phy/realtek/realtek_main.c +++ b/drivers/net/phy/realtek/realtek_main.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "realtek.h" @@ -32,6 +33,9 @@ #define RTL8211F_PHYCR1 0x18 #define RTL8211F_PHYCR2 0x19 +#define RTL8211F_CLKOUT_EN BIT(0) +#define RTL8211F_PHYCR2_PHY_EEE_ENABLE BIT(5) + #define RTL8211F_INSR 0x1d #define RTL8211F_LEDCR 0x10 @@ -54,8 +58,6 @@ #define RTL8211E_TX_DELAY BIT(12) #define RTL8211E_RX_DELAY BIT(11) -#define RTL8211F_CLKOUT_EN BIT(0) - #define RTL8201F_ISR 0x1e #define RTL8201F_ISR_ANERR BIT(15) #define RTL8201F_ISR_DUPLEX BIT(13) @@ -78,9 +80,7 @@ /* RTL822X_VND2_XXXXX registers are only accessible when phydev->is_c45 * is set, they cannot be accessed by C45-over-C22. */ -#define RTL822X_VND2_GBCR 0xa412 - -#define RTL822X_VND2_GANLPAR 0xa414 +#define RTL822X_VND2_C22_REG(reg) (0xa400 + 2 * (reg)) #define RTL8366RB_POWER_SAVE 0x15 #define RTL8366RB_POWER_SAVE_ON BIT(12) @@ -95,6 +95,16 @@ #define RTL_VND2_PHYSR_MASTER BIT(11) #define RTL_VND2_PHYSR_SPEED_MASK (RTL_VND2_PHYSR_SPEEDL | RTL_VND2_PHYSR_SPEEDH) +#define RTL_MDIO_PCS_EEE_ABLE 0xa5c4 +#define RTL_MDIO_AN_EEE_ADV 0xa5d0 +#define RTL_MDIO_AN_EEE_LPABLE 0xa5d2 +#define RTL_MDIO_AN_10GBT_CTRL 0xa5d4 +#define RTL_MDIO_AN_10GBT_STAT 0xa5d6 +#define RTL_MDIO_PMA_SPEED 0xa616 +#define RTL_MDIO_AN_EEE_LPABLE2 0xa6d0 +#define RTL_MDIO_AN_EEE_ADV2 0xa6d4 +#define RTL_MDIO_PCS_EEE_ABLE2 0xa6ec + #define RTL_GENERIC_PHYID 0x001cc800 #define RTL_8211FVD_PHYID 0x001cc878 #define RTL_8221B 0x001cc840 @@ -422,11 +432,11 @@ static int rtl8211f_config_init(struct phy_device *phydev) } else if (ret) { dev_dbg(dev, "%s 2ns TX delay (and changing the value from pin-strapping RXD1 or the bootloader)\n", - val_txdly ? "Enabling" : "Disabling"); + str_enable_disable(val_txdly)); } else { dev_dbg(dev, "2ns TX delay was already %s (by pin-strapping RXD1 or bootloader configuration)\n", - val_txdly ? "enabled" : "disabled"); + str_enabled_disabled(val_txdly)); } ret = phy_modify_paged_changed(phydev, 0xd08, 0x15, RTL8211F_RX_DELAY, @@ -437,13 +447,19 @@ static int rtl8211f_config_init(struct phy_device *phydev) } else if (ret) { dev_dbg(dev, "%s 2ns RX delay (and changing the value from pin-strapping RXD0 or the bootloader)\n", - val_rxdly ? "Enabling" : "Disabling"); + str_enable_disable(val_rxdly)); } else { dev_dbg(dev, "2ns RX delay was already %s (by pin-strapping RXD0 or bootloader configuration)\n", - val_rxdly ? "enabled" : "disabled"); + str_enabled_disabled(val_rxdly)); } + /* Disable PHY-mode EEE so LPI is passed to the MAC */ + ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2, + RTL8211F_PHYCR2_PHY_EEE_ENABLE, 0); + if (ret) + return ret; + if (priv->has_phycr2) { ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2, RTL8211F_CLKOUT_EN, priv->phycr2); @@ -734,29 +750,31 @@ static int rtlgen_read_status(struct phy_device *phydev) return 0; } +static int rtlgen_read_vend2(struct phy_device *phydev, int regnum) +{ + return __mdiobus_c45_read(phydev->mdio.bus, 0, MDIO_MMD_VEND2, regnum); +} + +static int rtlgen_write_vend2(struct phy_device *phydev, int regnum, u16 val) +{ + return __mdiobus_c45_write(phydev->mdio.bus, 0, MDIO_MMD_VEND2, regnum, + val); +} + static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) { int ret; - if (devnum == MDIO_MMD_VEND2) { - rtl821x_write_page(phydev, regnum >> 4); - ret = __phy_read(phydev, 0x10 + ((regnum & 0xf) >> 1)); - rtl821x_write_page(phydev, 0); - } else if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) { - rtl821x_write_page(phydev, 0xa5c); - ret = __phy_read(phydev, 0x12); - rtl821x_write_page(phydev, 0); - } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) { - rtl821x_write_page(phydev, 0xa5d); - ret = __phy_read(phydev, 0x10); - rtl821x_write_page(phydev, 0); - } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE) { - rtl821x_write_page(phydev, 0xa5d); - ret = __phy_read(phydev, 0x11); - rtl821x_write_page(phydev, 0); - } else { + if (devnum == MDIO_MMD_VEND2) + ret = rtlgen_read_vend2(phydev, regnum); + else if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE) + ret = rtlgen_read_vend2(phydev, RTL_MDIO_PCS_EEE_ABLE); + else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) + ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_ADV); + else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE) + ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_LPABLE); + else ret = -EOPNOTSUPP; - } return ret; } @@ -766,17 +784,12 @@ static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, { int ret; - if (devnum == MDIO_MMD_VEND2) { - rtl821x_write_page(phydev, regnum >> 4); - ret = __phy_write(phydev, 0x10 + ((regnum & 0xf) >> 1), val); - rtl821x_write_page(phydev, 0); - } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) { - rtl821x_write_page(phydev, 0xa5d); - ret = __phy_write(phydev, 0x10, val); - rtl821x_write_page(phydev, 0); - } else { + if (devnum == MDIO_MMD_VEND2) + ret = rtlgen_write_vend2(phydev, regnum, val); + else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV) + ret = rtlgen_write_vend2(phydev, regnum, RTL_MDIO_AN_EEE_ADV); + else ret = -EOPNOTSUPP; - } return ret; } @@ -788,19 +801,12 @@ static int rtl822x_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) if (ret != -EOPNOTSUPP) return ret; - if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2) { - rtl821x_write_page(phydev, 0xa6e); - ret = __phy_read(phydev, 0x16); - rtl821x_write_page(phydev, 0); - } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) { - rtl821x_write_page(phydev, 0xa6d); - ret = __phy_read(phydev, 0x12); - rtl821x_write_page(phydev, 0); - } else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2) { - rtl821x_write_page(phydev, 0xa6d); - ret = __phy_read(phydev, 0x10); - rtl821x_write_page(phydev, 0); - } + if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE2) + ret = rtlgen_read_vend2(phydev, RTL_MDIO_PCS_EEE_ABLE2); + else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) + ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_ADV2); + else if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_LPABLE2) + ret = rtlgen_read_vend2(phydev, RTL_MDIO_AN_EEE_LPABLE2); return ret; } @@ -813,11 +819,8 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, if (ret != -EOPNOTSUPP) return ret; - if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) { - rtl821x_write_page(phydev, 0xa6d); - ret = __phy_write(phydev, 0x12, val); - rtl821x_write_page(phydev, 0); - } + if (devnum == MDIO_MMD_AN && regnum == MDIO_AN_EEE_ADV2) + ret = rtlgen_write_vend2(phydev, RTL_MDIO_AN_EEE_ADV2, val); return ret; } @@ -913,7 +916,7 @@ static int rtl822x_get_features(struct phy_device *phydev) { int val; - val = phy_read_paged(phydev, 0xa61, 0x13); + val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_MDIO_PMA_SPEED); if (val < 0) return val; @@ -934,10 +937,10 @@ static int rtl822x_config_aneg(struct phy_device *phydev) if (phydev->autoneg == AUTONEG_ENABLE) { u16 adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising); - ret = phy_modify_paged_changed(phydev, 0xa5d, 0x12, - MDIO_AN_10GBT_CTRL_ADV2_5G | - MDIO_AN_10GBT_CTRL_ADV5G, - adv); + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, + RTL_MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADV2_5G | + MDIO_AN_10GBT_CTRL_ADV5G, adv); if (ret < 0) return ret; } @@ -981,7 +984,7 @@ static int rtl822x_read_status(struct phy_device *phydev) !phydev->autoneg_complete) return 0; - lpadv = phy_read_paged(phydev, 0xa5d, 0x13); + lpadv = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL_MDIO_AN_10GBT_STAT); if (lpadv < 0) return lpadv; @@ -1028,7 +1031,8 @@ static int rtl822x_c45_config_aneg(struct phy_device *phydev) val = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); /* Vendor register as C45 has no standardized support for 1000BaseT */ - ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL822X_VND2_GBCR, + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, + RTL822X_VND2_C22_REG(MII_CTRL1000), ADVERTISE_1000FULL, val); if (ret < 0) return ret; @@ -1045,7 +1049,7 @@ static int rtl822x_c45_read_status(struct phy_device *phydev) /* Vendor register as C45 has no standardized support for 1000BaseT */ if (phydev->autoneg == AUTONEG_ENABLE && genphy_c45_aneg_done(phydev)) { val = phy_read_mmd(phydev, MDIO_MMD_VEND2, - RTL822X_VND2_GANLPAR); + RTL822X_VND2_C22_REG(MII_STAT1000)); if (val < 0) return val; } else { diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 7dbcbf0a4ee26..347c1e0e94d95 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -234,6 +234,7 @@ struct sfp { enum mdio_i2c_proto mdio_protocol; struct phy_device *mod_phy; const struct sff_data *type; + size_t i2c_max_block_size; size_t i2c_block_size; u32 max_power_mW; @@ -385,7 +386,7 @@ static void sfp_fixup_rollball(struct sfp *sfp) sfp->phy_t_retry = msecs_to_jiffies(1000); } -static void sfp_fixup_fs_2_5gt(struct sfp *sfp) +static void sfp_fixup_rollball_wait4s(struct sfp *sfp) { sfp_fixup_rollball(sfp); @@ -399,7 +400,7 @@ static void sfp_fixup_fs_2_5gt(struct sfp *sfp) static void sfp_fixup_fs_10gt(struct sfp *sfp) { sfp_fixup_10gbaset_30m(sfp); - sfp_fixup_fs_2_5gt(sfp); + sfp_fixup_rollball_wait4s(sfp); } static void sfp_fixup_halny_gsfp(struct sfp *sfp) @@ -479,9 +480,10 @@ static const struct sfp_quirk sfp_quirks[] = { // PHY. SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt), - // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and - // needs 4 sec wait before probing the PHY. - SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt), + // Fiberstore SFP-2.5G-T and SFP-10GM-T uses Rollball protocol to talk + // to the PHY and needs 4 sec wait before probing the PHY. + SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_rollball_wait4s), + SFP_QUIRK_F("FS", "SFP-10GM-T", sfp_fixup_rollball_wait4s), // Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd // NRZ in their EEPROM @@ -515,6 +517,8 @@ static const struct sfp_quirk sfp_quirks[] = { SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc), SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g), + SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-D", sfp_quirk_2500basex), + SFP_QUIRK_M("OEM", "SFP-2.5G-BX10-U", sfp_quirk_2500basex), SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc), SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc), SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball), @@ -688,14 +692,71 @@ static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf, return ret == ARRAY_SIZE(msgs) ? len : 0; } -static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) +static int sfp_smbus_byte_read(struct sfp *sfp, bool a2, u8 dev_addr, + void *buf, size_t len) { - if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) - return -EINVAL; + union i2c_smbus_data smbus_data; + u8 bus_addr = a2 ? 0x51 : 0x50; + u8 *data = buf; + int ret; + + while (len) { + ret = i2c_smbus_xfer(sfp->i2c, bus_addr, 0, + I2C_SMBUS_READ, dev_addr, + I2C_SMBUS_BYTE_DATA, &smbus_data); + if (ret < 0) + return ret; + + *data = smbus_data.byte; + + len--; + data++; + dev_addr++; + } + + return data - (u8 *)buf; +} + +static int sfp_smbus_byte_write(struct sfp *sfp, bool a2, u8 dev_addr, + void *buf, size_t len) +{ + union i2c_smbus_data smbus_data; + u8 bus_addr = a2 ? 0x51 : 0x50; + u8 *data = buf; + int ret; + while (len) { + smbus_data.byte = *data; + ret = i2c_smbus_xfer(sfp->i2c, bus_addr, 0, + I2C_SMBUS_WRITE, dev_addr, + I2C_SMBUS_BYTE_DATA, &smbus_data); + if (ret) + return ret; + + len--; + data++; + dev_addr++; + } + + return 0; +} + +static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) +{ sfp->i2c = i2c; - sfp->read = sfp_i2c_read; - sfp->write = sfp_i2c_write; + + if (i2c_check_functionality(i2c, I2C_FUNC_I2C)) { + sfp->read = sfp_i2c_read; + sfp->write = sfp_i2c_write; + sfp->i2c_max_block_size = SFP_EEPROM_BLOCK_SIZE; + } else if (i2c_check_functionality(i2c, I2C_FUNC_SMBUS_BYTE_DATA)) { + sfp->read = sfp_smbus_byte_read; + sfp->write = sfp_smbus_byte_write; + sfp->i2c_max_block_size = 1; + } else { + sfp->i2c = NULL; + return -EINVAL; + } return 0; } @@ -1591,7 +1652,7 @@ static void sfp_hwmon_probe(struct work_struct *work) */ if (sfp->i2c_block_size < 2) { dev_info(sfp->dev, - "skipping hwmon device registration due to broken EEPROM\n"); + "skipping hwmon device registration\n"); dev_info(sfp->dev, "diagnostic EEPROM area cannot be read atomically to guarantee data coherency\n"); return; @@ -2198,7 +2259,7 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) u8 check; int ret; - sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE; + sfp->i2c_block_size = sfp->i2c_max_block_size; ret = sfp_read(sfp, false, 0, &id.base, sizeof(id.base)); if (ret < 0) { @@ -2938,7 +2999,6 @@ static struct sfp *sfp_alloc(struct device *dev) return ERR_PTR(-ENOMEM); sfp->dev = dev; - sfp->i2c_block_size = SFP_EEPROM_BLOCK_SIZE; mutex_init(&sfp->sm_mutex); mutex_init(&sfp->st_mutex); @@ -3112,6 +3172,15 @@ static int sfp_probe(struct platform_device *pdev) if (!sfp->sfp_bus) return -ENOMEM; + if (sfp->i2c_max_block_size < 2) + dev_warn(sfp->dev, + "Please note:\n" + "This SFP cage is accessed via an SMBus only capable of single byte\n" + "transactions. Some features are disabled, other may be unreliable or\n" + "sporadically fail. Use with caution. There is nothing that the kernel\n" + "or community can do to fix it, the kernel will try best efforts. Please\n" + "verify any problems on hardware that supports multi-byte I2C transactions.\n"); + sfp_debugfs_init(sfp); return 0; diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 7c51daecf18ee..2024d8ef36d98 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -64,15 +64,16 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) return 0; } -static int xgmiitorgmii_set_loopback(struct phy_device *phydev, bool enable) +static int xgmiitorgmii_set_loopback(struct phy_device *phydev, bool enable, + int speed) { struct gmii2rgmii *priv = mdiodev_get_drvdata(&phydev->mdio); int err; if (priv->phy_drv->set_loopback) - err = priv->phy_drv->set_loopback(phydev, enable); + err = priv->phy_drv->set_loopback(phydev, enable, speed); else - err = genphy_loopback(phydev, enable); + err = genphy_loopback(phydev, enable, speed); if (err < 0) return err; diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 4583e15ad03a0..53463767cc43e 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -72,6 +73,17 @@ #define PPP_PROTO_LEN 2 #define PPP_LCP_HDRLEN 4 +/* The filter instructions generated by libpcap are constructed + * assuming a four-byte PPP header on each packet, where the last + * 2 bytes are the protocol field defined in the RFC and the first + * byte of the first 2 bytes indicates the direction. + * The second byte is currently unused, but we still need to initialize + * it to prevent crafted BPF programs from reading them which would + * cause reading of uninitialized data. + */ +#define PPP_FILTER_OUTBOUND_TAG 0x0100 +#define PPP_FILTER_INBOUND_TAG 0x0000 + /* * An instance of /dev/ppp can be associated with either a ppp * interface unit or a ppp channel. In both cases, file->private_data @@ -1303,10 +1315,13 @@ static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } -static int ppp_nl_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int ppp_nl_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct ppp_config conf = { .unit = -1, .ifname_is_set = true, @@ -1343,7 +1358,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev, if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME])) conf.ifname_is_set = false; - err = ppp_dev_configure(src_net, dev, &conf); + err = ppp_dev_configure(link_net, dev, &conf); out_unlock: mutex_unlock(&ppp_mutex); @@ -1762,10 +1777,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) if (proto < 0x8000) { #ifdef CONFIG_PPP_FILTER - /* check if we should pass this packet */ - /* the filter instructions are constructed assuming - a four-byte PPP header on each packet */ - *(u8 *)skb_push(skb, 2) = 1; + /* check if the packet passes the pass and active filters. + * See comment for PPP_FILTER_OUTBOUND_TAG above. + */ + *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG); if (ppp->pass_filter && bpf_prog_run(ppp->pass_filter, skb) == 0) { if (ppp->debug & 1) @@ -2482,14 +2497,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) /* network protocol frame - give it to the kernel */ #ifdef CONFIG_PPP_FILTER - /* check if the packet passes the pass and active filters */ - /* the filter instructions are constructed assuming - a four-byte PPP header on each packet */ if (ppp->pass_filter || ppp->active_filter) { if (skb_unclone(skb, GFP_ATOMIC)) goto err; - - *(u8 *)skb_push(skb, 2) = 0; + /* Check if the packet passes the pass and active filters. + * See comment for PPP_FILTER_INBOUND_TAG above. + */ + *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG); if (ppp->pass_filter && bpf_prog_run(ppp->pass_filter, skb) == 0) { if (ppp->debug & 1) @@ -3490,6 +3504,10 @@ ppp_connect_channel(struct channel *pch, int unit) ret = -ENOTCONN; goto outl; } + if (pch->chan->direct_xmit) + ppp->dev->priv_flags |= IFF_NO_QUEUE; + else + ppp->dev->priv_flags &= ~IFF_NO_QUEUE; spin_unlock_bh(&pch->downl); if (pch->file.hdrlen > ppp->file.hdrlen) ppp->file.hdrlen = pch->file.hdrlen; diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 2ea4f4890d23b..68e631718ab0b 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -693,6 +693,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2; po->chan.private = sk; po->chan.ops = &pppoe_chan_ops; + po->chan.direct_xmit = true; error = ppp_register_net_channel(dev_net(dev), &po->chan); if (error) { diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 689687bd2574b..5feaa70b5f47e 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -465,6 +465,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.mtu -= PPTP_HEADER_OVERHEAD; po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); + po->chan.direct_xmit = true; error = ppp_register_channel(&po->chan); if (error) { pr_err("PPTP: failed to register PPP channel (%d)\n", error); diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c deleted file mode 100644 index c3f8020571add..0000000000000 --- a/drivers/net/sb1000.c +++ /dev/null @@ -1,1179 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* sb1000.c: A General Instruments SB1000 driver for linux. */ -/* - Written 1998 by Franco Venturi. - - Copyright 1998 by Franco Venturi. - Copyright 1994,1995 by Donald Becker. - Copyright 1993 United States Government as represented by the - Director, National Security Agency. - - This driver is for the General Instruments SB1000 (internal SURFboard) - - The author may be reached as fventuri@mediaone.net - - - Changes: - - 981115 Steven Hirsch - - Linus changed the timer interface. Should work on all recent - development kernels. - - 980608 Steven Hirsch - - Small changes to make it work with 2.1.x kernels. Hopefully, - nothing major will change before official release of Linux 2.2. - - Merged with 2.2 - Alan Cox -*/ - -static char version[] = "sb1000.c:v1.1.2 6/01/98 (fventuri@mediaone.net)\n"; - -#include -#include -#include -#include -#include -#include -#include /* for SIOGCM/SIOSCM stuff */ -#include -#include -#include -#include -#include -#include /* for udelay() */ -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef SB1000_DEBUG -static int sb1000_debug = SB1000_DEBUG; -#else -static const int sb1000_debug = 1; -#endif - -static const int SB1000_IO_EXTENT = 8; -/* SB1000 Maximum Receive Unit */ -static const int SB1000_MRU = 1500; /* octects */ - -#define NPIDS 4 -struct sb1000_private { - struct sk_buff *rx_skb[NPIDS]; - short rx_dlen[NPIDS]; - unsigned int rx_frames; - short rx_error_count; - short rx_error_dpc_count; - unsigned char rx_session_id[NPIDS]; - unsigned char rx_frame_id[NPIDS]; - unsigned char rx_pkt_type[NPIDS]; -}; - -/* prototypes for Linux interface */ -extern int sb1000_probe(struct net_device *dev); -static int sb1000_open(struct net_device *dev); -static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr, - void __user *data, int cmd); -static netdev_tx_t sb1000_start_xmit(struct sk_buff *skb, - struct net_device *dev); -static irqreturn_t sb1000_interrupt(int irq, void *dev_id); -static int sb1000_close(struct net_device *dev); - - -/* SB1000 hardware routines to be used during open/configuration phases */ -static int card_wait_for_busy_clear(const int ioaddr[], - const char* name); -static int card_wait_for_ready(const int ioaddr[], const char* name, - unsigned char in[]); -static int card_send_command(const int ioaddr[], const char* name, - const unsigned char out[], unsigned char in[]); - -/* SB1000 hardware routines to be used during frame rx interrupt */ -static int sb1000_wait_for_ready(const int ioaddr[], const char* name); -static int sb1000_wait_for_ready_clear(const int ioaddr[], - const char* name); -static void sb1000_send_command(const int ioaddr[], const char* name, - const unsigned char out[]); -static void sb1000_read_status(const int ioaddr[], unsigned char in[]); -static void sb1000_issue_read_command(const int ioaddr[], - const char* name); - -/* SB1000 commands for open/configuration */ -static int sb1000_reset(const int ioaddr[], const char* name); -static int sb1000_check_CRC(const int ioaddr[], const char* name); -static inline int sb1000_start_get_set_command(const int ioaddr[], - const char* name); -static int sb1000_end_get_set_command(const int ioaddr[], - const char* name); -static int sb1000_activate(const int ioaddr[], const char* name); -static int sb1000_get_firmware_version(const int ioaddr[], - const char* name, unsigned char version[], int do_end); -static int sb1000_get_frequency(const int ioaddr[], const char* name, - int* frequency); -static int sb1000_set_frequency(const int ioaddr[], const char* name, - int frequency); -static int sb1000_get_PIDs(const int ioaddr[], const char* name, - short PID[]); -static int sb1000_set_PIDs(const int ioaddr[], const char* name, - const short PID[]); - -/* SB1000 commands for frame rx interrupt */ -static int sb1000_rx(struct net_device *dev); -static void sb1000_error_dpc(struct net_device *dev); - -static const struct pnp_device_id sb1000_pnp_ids[] = { - { "GIC1000", 0 }, - { "", 0 } -}; -MODULE_DEVICE_TABLE(pnp, sb1000_pnp_ids); - -static const struct net_device_ops sb1000_netdev_ops = { - .ndo_open = sb1000_open, - .ndo_start_xmit = sb1000_start_xmit, - .ndo_siocdevprivate = sb1000_siocdevprivate, - .ndo_stop = sb1000_close, - .ndo_set_mac_address = eth_mac_addr, - .ndo_validate_addr = eth_validate_addr, -}; - -static int -sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id) -{ - struct net_device *dev; - unsigned short ioaddr[2], irq; - unsigned int serial_number; - int error = -ENODEV; - u8 addr[ETH_ALEN]; - - if (pnp_device_attach(pdev) < 0) - return -ENODEV; - if (pnp_activate_dev(pdev) < 0) - goto out_detach; - - if (!pnp_port_valid(pdev, 0) || !pnp_port_valid(pdev, 1)) - goto out_disable; - if (!pnp_irq_valid(pdev, 0)) - goto out_disable; - - serial_number = pdev->card->serial; - - ioaddr[0] = pnp_port_start(pdev, 0); - ioaddr[1] = pnp_port_start(pdev, 0); - - irq = pnp_irq(pdev, 0); - - if (!request_region(ioaddr[0], 16, "sb1000")) - goto out_disable; - if (!request_region(ioaddr[1], 16, "sb1000")) - goto out_release_region0; - - dev = alloc_etherdev(sizeof(struct sb1000_private)); - if (!dev) { - error = -ENOMEM; - goto out_release_regions; - } - - - dev->base_addr = ioaddr[0]; - /* mem_start holds the second I/O address */ - dev->mem_start = ioaddr[1]; - dev->irq = irq; - - if (sb1000_debug > 0) - printk(KERN_NOTICE "%s: sb1000 at (%#3.3lx,%#3.3lx), " - "S/N %#8.8x, IRQ %d.\n", dev->name, dev->base_addr, - dev->mem_start, serial_number, dev->irq); - - /* - * The SB1000 is an rx-only cable modem device. The uplink is a modem - * and we do not want to arp on it. - */ - dev->flags = IFF_POINTOPOINT|IFF_NOARP; - - SET_NETDEV_DEV(dev, &pdev->dev); - - if (sb1000_debug > 0) - printk(KERN_NOTICE "%s", version); - - dev->netdev_ops = &sb1000_netdev_ops; - - /* hardware address is 0:0:serial_number */ - addr[0] = 0; - addr[1] = 0; - addr[2] = serial_number >> 24 & 0xff; - addr[3] = serial_number >> 16 & 0xff; - addr[4] = serial_number >> 8 & 0xff; - addr[5] = serial_number >> 0 & 0xff; - eth_hw_addr_set(dev, addr); - - pnp_set_drvdata(pdev, dev); - - error = register_netdev(dev); - if (error) - goto out_free_netdev; - return 0; - - out_free_netdev: - free_netdev(dev); - out_release_regions: - release_region(ioaddr[1], 16); - out_release_region0: - release_region(ioaddr[0], 16); - out_disable: - pnp_disable_dev(pdev); - out_detach: - pnp_device_detach(pdev); - return error; -} - -static void -sb1000_remove_one(struct pnp_dev *pdev) -{ - struct net_device *dev = pnp_get_drvdata(pdev); - - unregister_netdev(dev); - release_region(dev->base_addr, 16); - release_region(dev->mem_start, 16); - free_netdev(dev); -} - -static struct pnp_driver sb1000_driver = { - .name = "sb1000", - .id_table = sb1000_pnp_ids, - .probe = sb1000_probe_one, - .remove = sb1000_remove_one, -}; - - -/* - * SB1000 hardware routines to be used during open/configuration phases - */ - -static const int TimeOutJiffies = (875 * HZ) / 100; - -/* Card Wait For Busy Clear (cannot be used during an interrupt) */ -static int -card_wait_for_busy_clear(const int ioaddr[], const char* name) -{ - unsigned char a; - unsigned long timeout; - - a = inb(ioaddr[0] + 7); - timeout = jiffies + TimeOutJiffies; - while (a & 0x80 || a & 0x40) { - /* a little sleep */ - yield(); - - a = inb(ioaddr[0] + 7); - if (time_after_eq(jiffies, timeout)) { - printk(KERN_WARNING "%s: card_wait_for_busy_clear timeout\n", - name); - return -ETIME; - } - } - - return 0; -} - -/* Card Wait For Ready (cannot be used during an interrupt) */ -static int -card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[]) -{ - unsigned char a; - unsigned long timeout; - - a = inb(ioaddr[1] + 6); - timeout = jiffies + TimeOutJiffies; - while (a & 0x80 || !(a & 0x40)) { - /* a little sleep */ - yield(); - - a = inb(ioaddr[1] + 6); - if (time_after_eq(jiffies, timeout)) { - printk(KERN_WARNING "%s: card_wait_for_ready timeout\n", - name); - return -ETIME; - } - } - - in[1] = inb(ioaddr[0] + 1); - in[2] = inb(ioaddr[0] + 2); - in[3] = inb(ioaddr[0] + 3); - in[4] = inb(ioaddr[0] + 4); - in[0] = inb(ioaddr[0] + 5); - in[6] = inb(ioaddr[0] + 6); - in[5] = inb(ioaddr[1] + 6); - return 0; -} - -/* Card Send Command (cannot be used during an interrupt) */ -static int -card_send_command(const int ioaddr[], const char* name, - const unsigned char out[], unsigned char in[]) -{ - int status; - - if ((status = card_wait_for_busy_clear(ioaddr, name))) - return status; - outb(0xa0, ioaddr[0] + 6); - outb(out[2], ioaddr[0] + 1); - outb(out[3], ioaddr[0] + 2); - outb(out[4], ioaddr[0] + 3); - outb(out[5], ioaddr[0] + 4); - outb(out[1], ioaddr[0] + 5); - outb(0xa0, ioaddr[0] + 6); - outb(out[0], ioaddr[0] + 7); - if (out[0] != 0x20 && out[0] != 0x30) { - if ((status = card_wait_for_ready(ioaddr, name, in))) - return status; - inb(ioaddr[0] + 7); - if (sb1000_debug > 3) - printk(KERN_DEBUG "%s: card_send_command " - "out: %02x%02x%02x%02x%02x%02x " - "in: %02x%02x%02x%02x%02x%02x%02x\n", name, - out[0], out[1], out[2], out[3], out[4], out[5], - in[0], in[1], in[2], in[3], in[4], in[5], in[6]); - } else { - if (sb1000_debug > 3) - printk(KERN_DEBUG "%s: card_send_command " - "out: %02x%02x%02x%02x%02x%02x\n", name, - out[0], out[1], out[2], out[3], out[4], out[5]); - } - - if (out[1] != 0x1b) { - if (out[0] >= 0x80 && in[0] != (out[1] | 0x80)) - return -EIO; - } - return 0; -} - - -/* - * SB1000 hardware routines to be used during frame rx interrupt - */ -static const int Sb1000TimeOutJiffies = 7 * HZ; - -/* Card Wait For Ready (to be used during frame rx) */ -static int -sb1000_wait_for_ready(const int ioaddr[], const char* name) -{ - unsigned long timeout; - - timeout = jiffies + Sb1000TimeOutJiffies; - while (inb(ioaddr[1] + 6) & 0x80) { - if (time_after_eq(jiffies, timeout)) { - printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n", - name); - return -ETIME; - } - } - timeout = jiffies + Sb1000TimeOutJiffies; - while (!(inb(ioaddr[1] + 6) & 0x40)) { - if (time_after_eq(jiffies, timeout)) { - printk(KERN_WARNING "%s: sb1000_wait_for_ready timeout\n", - name); - return -ETIME; - } - } - inb(ioaddr[0] + 7); - return 0; -} - -/* Card Wait For Ready Clear (to be used during frame rx) */ -static int -sb1000_wait_for_ready_clear(const int ioaddr[], const char* name) -{ - unsigned long timeout; - - timeout = jiffies + Sb1000TimeOutJiffies; - while (inb(ioaddr[1] + 6) & 0x80) { - if (time_after_eq(jiffies, timeout)) { - printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n", - name); - return -ETIME; - } - } - timeout = jiffies + Sb1000TimeOutJiffies; - while (inb(ioaddr[1] + 6) & 0x40) { - if (time_after_eq(jiffies, timeout)) { - printk(KERN_WARNING "%s: sb1000_wait_for_ready_clear timeout\n", - name); - return -ETIME; - } - } - return 0; -} - -/* Card Send Command (to be used during frame rx) */ -static void -sb1000_send_command(const int ioaddr[], const char* name, - const unsigned char out[]) -{ - outb(out[2], ioaddr[0] + 1); - outb(out[3], ioaddr[0] + 2); - outb(out[4], ioaddr[0] + 3); - outb(out[5], ioaddr[0] + 4); - outb(out[1], ioaddr[0] + 5); - outb(out[0], ioaddr[0] + 7); - if (sb1000_debug > 3) - printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x" - "%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]); -} - -/* Card Read Status (to be used during frame rx) */ -static void -sb1000_read_status(const int ioaddr[], unsigned char in[]) -{ - in[1] = inb(ioaddr[0] + 1); - in[2] = inb(ioaddr[0] + 2); - in[3] = inb(ioaddr[0] + 3); - in[4] = inb(ioaddr[0] + 4); - in[0] = inb(ioaddr[0] + 5); -} - -/* Issue Read Command (to be used during frame rx) */ -static void -sb1000_issue_read_command(const int ioaddr[], const char* name) -{ - static const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00}; - - sb1000_wait_for_ready_clear(ioaddr, name); - outb(0xa0, ioaddr[0] + 6); - sb1000_send_command(ioaddr, name, Command0); -} - - -/* - * SB1000 commands for open/configuration - */ -/* reset SB1000 card */ -static int -sb1000_reset(const int ioaddr[], const char* name) -{ - static const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00}; - - unsigned char st[7]; - int port, status; - - port = ioaddr[1] + 6; - outb(0x4, port); - inb(port); - udelay(1000); - outb(0x0, port); - inb(port); - ssleep(1); - outb(0x4, port); - inb(port); - udelay(1000); - outb(0x0, port); - inb(port); - udelay(0); - - if ((status = card_send_command(ioaddr, name, Command0, st))) - return status; - if (st[3] != 0xf0) - return -EIO; - return 0; -} - -/* check SB1000 firmware CRC */ -static int -sb1000_check_CRC(const int ioaddr[], const char* name) -{ - static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00}; - - unsigned char st[7]; - int status; - - /* check CRC */ - if ((status = card_send_command(ioaddr, name, Command0, st))) - return status; - if (st[1] != st[3] || st[2] != st[4]) - return -EIO; - return 0; -} - -static inline int -sb1000_start_get_set_command(const int ioaddr[], const char* name) -{ - static const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00}; - - unsigned char st[7]; - - return card_send_command(ioaddr, name, Command0, st); -} - -static int -sb1000_end_get_set_command(const int ioaddr[], const char* name) -{ - static const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00}; - static const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00}; - - unsigned char st[7]; - int status; - - if ((status = card_send_command(ioaddr, name, Command0, st))) - return status; - return card_send_command(ioaddr, name, Command1, st); -} - -static int -sb1000_activate(const int ioaddr[], const char* name) -{ - static const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00}; - static const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00}; - - unsigned char st[7]; - int status; - - ssleep(1); - status = card_send_command(ioaddr, name, Command0, st); - if (status) - return status; - status = card_send_command(ioaddr, name, Command1, st); - if (status) - return status; - if (st[3] != 0xf1) { - status = sb1000_start_get_set_command(ioaddr, name); - if (status) - return status; - return -EIO; - } - udelay(1000); - return sb1000_start_get_set_command(ioaddr, name); -} - -/* get SB1000 firmware version */ -static int -sb1000_get_firmware_version(const int ioaddr[], const char* name, - unsigned char version[], int do_end) -{ - static const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00}; - - unsigned char st[7]; - int status; - - if ((status = sb1000_start_get_set_command(ioaddr, name))) - return status; - if ((status = card_send_command(ioaddr, name, Command0, st))) - return status; - if (st[0] != 0xa3) - return -EIO; - version[0] = st[1]; - version[1] = st[2]; - if (do_end) - return sb1000_end_get_set_command(ioaddr, name); - else - return 0; -} - -/* get SB1000 frequency */ -static int -sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency) -{ - static const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00}; - - unsigned char st[7]; - int status; - - udelay(1000); - if ((status = sb1000_start_get_set_command(ioaddr, name))) - return status; - if ((status = card_send_command(ioaddr, name, Command0, st))) - return status; - *frequency = ((st[1] << 8 | st[2]) << 8 | st[3]) << 8 | st[4]; - return sb1000_end_get_set_command(ioaddr, name); -} - -/* set SB1000 frequency */ -static int -sb1000_set_frequency(const int ioaddr[], const char* name, int frequency) -{ - unsigned char st[7]; - int status; - unsigned char Command0[6] = {0x80, 0x29, 0x00, 0x00, 0x00, 0x00}; - - const int FrequencyLowerLimit = 57000; - const int FrequencyUpperLimit = 804000; - - if (frequency < FrequencyLowerLimit || frequency > FrequencyUpperLimit) { - printk(KERN_ERR "%s: frequency chosen (%d kHz) is not in the range " - "[%d,%d] kHz\n", name, frequency, FrequencyLowerLimit, - FrequencyUpperLimit); - return -EINVAL; - } - udelay(1000); - if ((status = sb1000_start_get_set_command(ioaddr, name))) - return status; - Command0[5] = frequency & 0xff; - frequency >>= 8; - Command0[4] = frequency & 0xff; - frequency >>= 8; - Command0[3] = frequency & 0xff; - frequency >>= 8; - Command0[2] = frequency & 0xff; - return card_send_command(ioaddr, name, Command0, st); -} - -/* get SB1000 PIDs */ -static int -sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[]) -{ - static const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00}; - static const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00}; - static const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00}; - static const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00}; - - unsigned char st[7]; - int status; - - udelay(1000); - if ((status = sb1000_start_get_set_command(ioaddr, name))) - return status; - - if ((status = card_send_command(ioaddr, name, Command0, st))) - return status; - PID[0] = st[1] << 8 | st[2]; - - if ((status = card_send_command(ioaddr, name, Command1, st))) - return status; - PID[1] = st[1] << 8 | st[2]; - - if ((status = card_send_command(ioaddr, name, Command2, st))) - return status; - PID[2] = st[1] << 8 | st[2]; - - if ((status = card_send_command(ioaddr, name, Command3, st))) - return status; - PID[3] = st[1] << 8 | st[2]; - - return sb1000_end_get_set_command(ioaddr, name); -} - -/* set SB1000 PIDs */ -static int -sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[]) -{ - static const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00}; - - unsigned char st[7]; - short p; - int status; - unsigned char Command0[6] = {0x80, 0x31, 0x00, 0x00, 0x00, 0x00}; - unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00}; - unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00}; - unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00}; - - udelay(1000); - if ((status = sb1000_start_get_set_command(ioaddr, name))) - return status; - - p = PID[0]; - Command0[3] = p & 0xff; - p >>= 8; - Command0[2] = p & 0xff; - if ((status = card_send_command(ioaddr, name, Command0, st))) - return status; - - p = PID[1]; - Command1[3] = p & 0xff; - p >>= 8; - Command1[2] = p & 0xff; - if ((status = card_send_command(ioaddr, name, Command1, st))) - return status; - - p = PID[2]; - Command2[3] = p & 0xff; - p >>= 8; - Command2[2] = p & 0xff; - if ((status = card_send_command(ioaddr, name, Command2, st))) - return status; - - p = PID[3]; - Command3[3] = p & 0xff; - p >>= 8; - Command3[2] = p & 0xff; - if ((status = card_send_command(ioaddr, name, Command3, st))) - return status; - - if ((status = card_send_command(ioaddr, name, Command4, st))) - return status; - return sb1000_end_get_set_command(ioaddr, name); -} - - -static void -sb1000_print_status_buffer(const char* name, unsigned char st[], - unsigned char buffer[], int size) -{ - int i, j, k; - - printk(KERN_DEBUG "%s: status: %02x %02x\n", name, st[0], st[1]); - if (buffer[24] == 0x08 && buffer[25] == 0x00 && buffer[26] == 0x45) { - printk(KERN_DEBUG "%s: length: %d protocol: %d from: %d.%d.%d.%d:%d " - "to %d.%d.%d.%d:%d\n", name, buffer[28] << 8 | buffer[29], - buffer[35], buffer[38], buffer[39], buffer[40], buffer[41], - buffer[46] << 8 | buffer[47], - buffer[42], buffer[43], buffer[44], buffer[45], - buffer[48] << 8 | buffer[49]); - } else { - for (i = 0, k = 0; i < (size + 7) / 8; i++) { - printk(KERN_DEBUG "%s: %s", name, i ? " " : "buffer:"); - for (j = 0; j < 8 && k < size; j++, k++) - printk(" %02x", buffer[k]); - printk("\n"); - } - } -} - -/* - * SB1000 commands for frame rx interrupt - */ -/* receive a single frame and assemble datagram - * (this is the heart of the interrupt routine) - */ -static int -sb1000_rx(struct net_device *dev) -{ - -#define FRAMESIZE 184 - unsigned char st[2], buffer[FRAMESIZE], session_id, frame_id; - short dlen; - int ioaddr, ns; - unsigned int skbsize; - struct sk_buff *skb; - struct sb1000_private *lp = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - - /* SB1000 frame constants */ - const int FrameSize = FRAMESIZE; - const int NewDatagramHeaderSkip = 8; - const int NewDatagramHeaderSize = NewDatagramHeaderSkip + 18; - const int NewDatagramDataSize = FrameSize - NewDatagramHeaderSize; - const int ContDatagramHeaderSkip = 7; - const int ContDatagramHeaderSize = ContDatagramHeaderSkip + 1; - const int ContDatagramDataSize = FrameSize - ContDatagramHeaderSize; - const int TrailerSize = 4; - - ioaddr = dev->base_addr; - - insw(ioaddr, (unsigned short*) st, 1); -#ifdef XXXDEBUG -printk("cm0: received: %02x %02x\n", st[0], st[1]); -#endif /* XXXDEBUG */ - lp->rx_frames++; - - /* decide if it is a good or bad frame */ - for (ns = 0; ns < NPIDS; ns++) { - session_id = lp->rx_session_id[ns]; - frame_id = lp->rx_frame_id[ns]; - if (st[0] == session_id) { - if (st[1] == frame_id || (!frame_id && (st[1] & 0xf0) == 0x30)) { - goto good_frame; - } else if ((st[1] & 0xf0) == 0x30 && (st[0] & 0x40)) { - goto skipped_frame; - } else { - goto bad_frame; - } - } else if (st[0] == (session_id | 0x40)) { - if ((st[1] & 0xf0) == 0x30) { - goto skipped_frame; - } else { - goto bad_frame; - } - } - } - goto bad_frame; - -skipped_frame: - stats->rx_frame_errors++; - skb = lp->rx_skb[ns]; - if (sb1000_debug > 1) - printk(KERN_WARNING "%s: missing frame(s): got %02x %02x " - "expecting %02x %02x\n", dev->name, st[0], st[1], - skb ? session_id : session_id | 0x40, frame_id); - if (skb) { - dev_kfree_skb(skb); - skb = NULL; - } - -good_frame: - lp->rx_frame_id[ns] = 0x30 | ((st[1] + 1) & 0x0f); - /* new datagram */ - if (st[0] & 0x40) { - /* get data length */ - insw(ioaddr, buffer, NewDatagramHeaderSize / 2); -#ifdef XXXDEBUG -printk("cm0: IP identification: %02x%02x fragment offset: %02x%02x\n", buffer[30], buffer[31], buffer[32], buffer[33]); -#endif /* XXXDEBUG */ - if (buffer[0] != NewDatagramHeaderSkip) { - if (sb1000_debug > 1) - printk(KERN_WARNING "%s: new datagram header skip error: " - "got %02x expecting %02x\n", dev->name, buffer[0], - NewDatagramHeaderSkip); - stats->rx_length_errors++; - insw(ioaddr, buffer, NewDatagramDataSize / 2); - goto bad_frame_next; - } - dlen = ((buffer[NewDatagramHeaderSkip + 3] & 0x0f) << 8 | - buffer[NewDatagramHeaderSkip + 4]) - 17; - if (dlen > SB1000_MRU) { - if (sb1000_debug > 1) - printk(KERN_WARNING "%s: datagram length (%d) greater " - "than MRU (%d)\n", dev->name, dlen, SB1000_MRU); - stats->rx_length_errors++; - insw(ioaddr, buffer, NewDatagramDataSize / 2); - goto bad_frame_next; - } - lp->rx_dlen[ns] = dlen; - /* compute size to allocate for datagram */ - skbsize = dlen + FrameSize; - if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) { - if (sb1000_debug > 1) - printk(KERN_WARNING "%s: can't allocate %d bytes long " - "skbuff\n", dev->name, skbsize); - stats->rx_dropped++; - insw(ioaddr, buffer, NewDatagramDataSize / 2); - goto dropped_frame; - } - skb->dev = dev; - skb_reset_mac_header(skb); - skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16]; - insw(ioaddr, skb_put(skb, NewDatagramDataSize), - NewDatagramDataSize / 2); - lp->rx_skb[ns] = skb; - } else { - /* continuation of previous datagram */ - insw(ioaddr, buffer, ContDatagramHeaderSize / 2); - if (buffer[0] != ContDatagramHeaderSkip) { - if (sb1000_debug > 1) - printk(KERN_WARNING "%s: cont datagram header skip error: " - "got %02x expecting %02x\n", dev->name, buffer[0], - ContDatagramHeaderSkip); - stats->rx_length_errors++; - insw(ioaddr, buffer, ContDatagramDataSize / 2); - goto bad_frame_next; - } - skb = lp->rx_skb[ns]; - insw(ioaddr, skb_put(skb, ContDatagramDataSize), - ContDatagramDataSize / 2); - dlen = lp->rx_dlen[ns]; - } - if (skb->len < dlen + TrailerSize) { - lp->rx_session_id[ns] &= ~0x40; - return 0; - } - - /* datagram completed: send to upper level */ - skb_trim(skb, dlen); - __netif_rx(skb); - stats->rx_bytes+=dlen; - stats->rx_packets++; - lp->rx_skb[ns] = NULL; - lp->rx_session_id[ns] |= 0x40; - return 0; - -bad_frame: - insw(ioaddr, buffer, FrameSize / 2); - if (sb1000_debug > 1) - printk(KERN_WARNING "%s: frame error: got %02x %02x\n", - dev->name, st[0], st[1]); - stats->rx_frame_errors++; -bad_frame_next: - if (sb1000_debug > 2) - sb1000_print_status_buffer(dev->name, st, buffer, FrameSize); -dropped_frame: - stats->rx_errors++; - if (ns < NPIDS) { - if ((skb = lp->rx_skb[ns])) { - dev_kfree_skb(skb); - lp->rx_skb[ns] = NULL; - } - lp->rx_session_id[ns] |= 0x40; - } - return -1; -} - -static void -sb1000_error_dpc(struct net_device *dev) -{ - static const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00}; - - char *name; - unsigned char st[5]; - int ioaddr[2]; - struct sb1000_private *lp = netdev_priv(dev); - const int ErrorDpcCounterInitialize = 200; - - ioaddr[0] = dev->base_addr; - /* mem_start holds the second I/O address */ - ioaddr[1] = dev->mem_start; - name = dev->name; - - sb1000_wait_for_ready_clear(ioaddr, name); - sb1000_send_command(ioaddr, name, Command0); - sb1000_wait_for_ready(ioaddr, name); - sb1000_read_status(ioaddr, st); - if (st[1] & 0x10) - lp->rx_error_dpc_count = ErrorDpcCounterInitialize; -} - - -/* - * Linux interface functions - */ -static int -sb1000_open(struct net_device *dev) -{ - char *name; - int ioaddr[2], status; - struct sb1000_private *lp = netdev_priv(dev); - const unsigned short FirmwareVersion[] = {0x01, 0x01}; - - ioaddr[0] = dev->base_addr; - /* mem_start holds the second I/O address */ - ioaddr[1] = dev->mem_start; - name = dev->name; - - /* initialize sb1000 */ - if ((status = sb1000_reset(ioaddr, name))) - return status; - ssleep(1); - if ((status = sb1000_check_CRC(ioaddr, name))) - return status; - - /* initialize private data before board can catch interrupts */ - lp->rx_skb[0] = NULL; - lp->rx_skb[1] = NULL; - lp->rx_skb[2] = NULL; - lp->rx_skb[3] = NULL; - lp->rx_dlen[0] = 0; - lp->rx_dlen[1] = 0; - lp->rx_dlen[2] = 0; - lp->rx_dlen[3] = 0; - lp->rx_frames = 0; - lp->rx_error_count = 0; - lp->rx_error_dpc_count = 0; - lp->rx_session_id[0] = 0x50; - lp->rx_session_id[1] = 0x48; - lp->rx_session_id[2] = 0x44; - lp->rx_session_id[3] = 0x42; - lp->rx_frame_id[0] = 0; - lp->rx_frame_id[1] = 0; - lp->rx_frame_id[2] = 0; - lp->rx_frame_id[3] = 0; - if (request_irq(dev->irq, sb1000_interrupt, 0, "sb1000", dev)) { - return -EAGAIN; - } - - if (sb1000_debug > 2) - printk(KERN_DEBUG "%s: Opening, IRQ %d\n", name, dev->irq); - - /* Activate board and check firmware version */ - udelay(1000); - if ((status = sb1000_activate(ioaddr, name))) - return status; - udelay(0); - if ((status = sb1000_get_firmware_version(ioaddr, name, version, 0))) - return status; - if (version[0] != FirmwareVersion[0] || version[1] != FirmwareVersion[1]) - printk(KERN_WARNING "%s: found firmware version %x.%02x " - "(should be %x.%02x)\n", name, version[0], version[1], - FirmwareVersion[0], FirmwareVersion[1]); - - - netif_start_queue(dev); - return 0; /* Always succeed */ -} - -static int sb1000_siocdevprivate(struct net_device *dev, struct ifreq *ifr, - void __user *data, int cmd) -{ - char* name; - unsigned char version[2]; - short PID[4]; - int ioaddr[2], status, frequency; - unsigned int stats[5]; - struct sb1000_private *lp = netdev_priv(dev); - - if (!(dev && dev->flags & IFF_UP)) - return -ENODEV; - - ioaddr[0] = dev->base_addr; - /* mem_start holds the second I/O address */ - ioaddr[1] = dev->mem_start; - name = dev->name; - - switch (cmd) { - case SIOCGCMSTATS: /* get statistics */ - stats[0] = dev->stats.rx_bytes; - stats[1] = lp->rx_frames; - stats[2] = dev->stats.rx_packets; - stats[3] = dev->stats.rx_errors; - stats[4] = dev->stats.rx_dropped; - if (copy_to_user(data, stats, sizeof(stats))) - return -EFAULT; - status = 0; - break; - - case SIOCGCMFIRMWARE: /* get firmware version */ - if ((status = sb1000_get_firmware_version(ioaddr, name, version, 1))) - return status; - if (copy_to_user(data, version, sizeof(version))) - return -EFAULT; - break; - - case SIOCGCMFREQUENCY: /* get frequency */ - if ((status = sb1000_get_frequency(ioaddr, name, &frequency))) - return status; - if (put_user(frequency, (int __user *)data)) - return -EFAULT; - break; - - case SIOCSCMFREQUENCY: /* set frequency */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (get_user(frequency, (int __user *)data)) - return -EFAULT; - if ((status = sb1000_set_frequency(ioaddr, name, frequency))) - return status; - break; - - case SIOCGCMPIDS: /* get PIDs */ - if ((status = sb1000_get_PIDs(ioaddr, name, PID))) - return status; - if (copy_to_user(data, PID, sizeof(PID))) - return -EFAULT; - break; - - case SIOCSCMPIDS: /* set PIDs */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - if (copy_from_user(PID, data, sizeof(PID))) - return -EFAULT; - if ((status = sb1000_set_PIDs(ioaddr, name, PID))) - return status; - /* set session_id, frame_id and pkt_type too */ - lp->rx_session_id[0] = 0x50 | (PID[0] & 0x0f); - lp->rx_session_id[1] = 0x48; - lp->rx_session_id[2] = 0x44; - lp->rx_session_id[3] = 0x42; - lp->rx_frame_id[0] = 0; - lp->rx_frame_id[1] = 0; - lp->rx_frame_id[2] = 0; - lp->rx_frame_id[3] = 0; - break; - - default: - status = -EINVAL; - break; - } - return status; -} - -/* transmit function: do nothing since SB1000 can't send anything out */ -static netdev_tx_t -sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - printk(KERN_WARNING "%s: trying to transmit!!!\n", dev->name); - /* sb1000 can't xmit datagrams */ - dev_kfree_skb(skb); - return NETDEV_TX_OK; -} - -/* SB1000 interrupt handler. */ -static irqreturn_t sb1000_interrupt(int irq, void *dev_id) -{ - static const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00}; - static const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00}; - - char *name; - unsigned char st; - int ioaddr[2]; - struct net_device *dev = dev_id; - struct sb1000_private *lp = netdev_priv(dev); - - const int MaxRxErrorCount = 6; - - ioaddr[0] = dev->base_addr; - /* mem_start holds the second I/O address */ - ioaddr[1] = dev->mem_start; - name = dev->name; - - /* is it a good interrupt? */ - st = inb(ioaddr[1] + 6); - if (!(st & 0x08 && st & 0x20)) { - return IRQ_NONE; - } - - if (sb1000_debug > 3) - printk(KERN_DEBUG "%s: entering interrupt\n", dev->name); - - st = inb(ioaddr[0] + 7); - if (sb1000_rx(dev)) - lp->rx_error_count++; -#ifdef SB1000_DELAY - udelay(SB1000_DELAY); -#endif /* SB1000_DELAY */ - sb1000_issue_read_command(ioaddr, name); - if (st & 0x01) { - sb1000_error_dpc(dev); - sb1000_issue_read_command(ioaddr, name); - } - if (lp->rx_error_dpc_count && !(--lp->rx_error_dpc_count)) { - sb1000_wait_for_ready_clear(ioaddr, name); - sb1000_send_command(ioaddr, name, Command0); - sb1000_wait_for_ready(ioaddr, name); - sb1000_issue_read_command(ioaddr, name); - } - if (lp->rx_error_count >= MaxRxErrorCount) { - sb1000_wait_for_ready_clear(ioaddr, name); - sb1000_send_command(ioaddr, name, Command1); - sb1000_wait_for_ready(ioaddr, name); - sb1000_issue_read_command(ioaddr, name); - lp->rx_error_count = 0; - } - - return IRQ_HANDLED; -} - -static int sb1000_close(struct net_device *dev) -{ - int i; - int ioaddr[2]; - struct sb1000_private *lp = netdev_priv(dev); - - if (sb1000_debug > 2) - printk(KERN_DEBUG "%s: Shutting down sb1000.\n", dev->name); - - netif_stop_queue(dev); - - ioaddr[0] = dev->base_addr; - /* mem_start holds the second I/O address */ - ioaddr[1] = dev->mem_start; - - free_irq(dev->irq, dev); - /* If we don't do this, we can't re-insmod it later. */ - release_region(ioaddr[1], SB1000_IO_EXTENT); - release_region(ioaddr[0], SB1000_IO_EXTENT); - - /* free rx_skb's if needed */ - for (i=0; i<4; i++) { - if (lp->rx_skb[i]) { - dev_kfree_skb(lp->rx_skb[i]); - } - } - return 0; -} - -MODULE_AUTHOR("Franco Venturi "); -MODULE_DESCRIPTION("General Instruments SB1000 driver"); -MODULE_LICENSE("GPL"); - -module_pnp_driver(sb1000_driver); diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 5ca6ecf0ce5fb..d4ece538f1b23 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -26,74 +26,9 @@ #include #include -#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) - -#define TAP_VNET_LE 0x80000000 -#define TAP_VNET_BE 0x40000000 - -#ifdef CONFIG_TUN_VNET_CROSS_LE -static inline bool tap_legacy_is_little_endian(struct tap_queue *q) -{ - return q->flags & TAP_VNET_BE ? false : - virtio_legacy_is_little_endian(); -} - -static long tap_get_vnet_be(struct tap_queue *q, int __user *sp) -{ - int s = !!(q->flags & TAP_VNET_BE); - - if (put_user(s, sp)) - return -EFAULT; - - return 0; -} - -static long tap_set_vnet_be(struct tap_queue *q, int __user *sp) -{ - int s; - - if (get_user(s, sp)) - return -EFAULT; - - if (s) - q->flags |= TAP_VNET_BE; - else - q->flags &= ~TAP_VNET_BE; - - return 0; -} -#else -static inline bool tap_legacy_is_little_endian(struct tap_queue *q) -{ - return virtio_legacy_is_little_endian(); -} - -static long tap_get_vnet_be(struct tap_queue *q, int __user *argp) -{ - return -EINVAL; -} - -static long tap_set_vnet_be(struct tap_queue *q, int __user *argp) -{ - return -EINVAL; -} -#endif /* CONFIG_TUN_VNET_CROSS_LE */ - -static inline bool tap_is_little_endian(struct tap_queue *q) -{ - return q->flags & TAP_VNET_LE || - tap_legacy_is_little_endian(q); -} - -static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val) -{ - return __virtio16_to_cpu(tap_is_little_endian(q), val); -} +#include "tun_vnet.h" -static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val) -{ - return __cpu_to_virtio16(tap_is_little_endian(q), val); -} +#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) static struct proto tap_proto = { .name = "tap", @@ -645,6 +580,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, int err; struct virtio_net_hdr vnet_hdr = { 0 }; int vnet_hdr_len = 0; + int hdr_len = 0; int copylen = 0; int depth; bool zerocopy = false; @@ -654,25 +590,13 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, if (q->flags & IFF_VNET_HDR) { vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); - err = -EINVAL; - if (len < vnet_hdr_len) + hdr_len = tun_vnet_hdr_get(vnet_hdr_len, q->flags, from, &vnet_hdr); + if (hdr_len < 0) { + err = hdr_len; goto err; - len -= vnet_hdr_len; + } - err = -EFAULT; - if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from)) - goto err; - iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); - if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && - tap16_to_cpu(q, vnet_hdr.csum_start) + - tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > - tap16_to_cpu(q, vnet_hdr.hdr_len)) - vnet_hdr.hdr_len = cpu_to_tap16(q, - tap16_to_cpu(q, vnet_hdr.csum_start) + - tap16_to_cpu(q, vnet_hdr.csum_offset) + 2); - err = -EINVAL; - if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len) - goto err; + len -= vnet_hdr_len; } err = -EINVAL; @@ -682,12 +606,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { struct iov_iter i; - copylen = vnet_hdr.hdr_len ? - tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; - if (copylen > good_linear) - copylen = good_linear; - else if (copylen < ETH_HLEN) - copylen = ETH_HLEN; + copylen = clamp(hdr_len ?: GOODCOPY_LEN, ETH_HLEN, good_linear); linear = copylen; i = *from; iov_iter_advance(&i, copylen); @@ -697,11 +616,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, if (!zerocopy) { copylen = len; - linear = tap16_to_cpu(q, vnet_hdr.hdr_len); - if (linear > good_linear) - linear = good_linear; - else if (linear < ETH_HLEN) - linear = ETH_HLEN; + linear = clamp(hdr_len, ETH_HLEN, good_linear); } skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen, @@ -733,8 +648,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, skb->dev = tap->dev; if (vnet_hdr_len) { - err = virtio_net_hdr_to_skb(skb, &vnet_hdr, - tap_is_little_endian(q)); + err = tun_vnet_hdr_to_skb(q->flags, skb, &vnet_hdr); if (err) { rcu_read_unlock(); drop_reason = SKB_DROP_REASON_DEV_HDR; @@ -797,23 +711,17 @@ static ssize_t tap_put_user(struct tap_queue *q, int total; if (q->flags & IFF_VNET_HDR) { - int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0; struct virtio_net_hdr vnet_hdr; vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); - if (iov_iter_count(iter) < vnet_hdr_len) - return -EINVAL; - if (virtio_net_hdr_from_skb(skb, &vnet_hdr, - tap_is_little_endian(q), true, - vlan_hlen)) - BUG(); - - if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != - sizeof(vnet_hdr)) - return -EFAULT; + ret = tun_vnet_hdr_from_skb(q->flags, NULL, skb, &vnet_hdr); + if (ret) + return ret; - iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); + ret = tun_vnet_hdr_put(vnet_hdr_len, iter, &vnet_hdr); + if (ret) + return ret; } total = vnet_hdr_len; total += skb->len; @@ -1072,42 +980,6 @@ static long tap_ioctl(struct file *file, unsigned int cmd, q->sk.sk_sndbuf = s; return 0; - case TUNGETVNETHDRSZ: - s = q->vnet_hdr_sz; - if (put_user(s, sp)) - return -EFAULT; - return 0; - - case TUNSETVNETHDRSZ: - if (get_user(s, sp)) - return -EFAULT; - if (s < (int)sizeof(struct virtio_net_hdr)) - return -EINVAL; - - q->vnet_hdr_sz = s; - return 0; - - case TUNGETVNETLE: - s = !!(q->flags & TAP_VNET_LE); - if (put_user(s, sp)) - return -EFAULT; - return 0; - - case TUNSETVNETLE: - if (get_user(s, sp)) - return -EFAULT; - if (s) - q->flags |= TAP_VNET_LE; - else - q->flags &= ~TAP_VNET_LE; - return 0; - - case TUNGETVNETBE: - return tap_get_vnet_be(q, sp); - - case TUNSETVNETBE: - return tap_set_vnet_be(q, sp); - case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | @@ -1151,7 +1023,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, return ret; default: - return -EINVAL; + return tun_vnet_ioctl(&q->vnet_hdr_sz, &q->flags, cmd, sp); } } @@ -1198,7 +1070,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) skb->protocol = eth_hdr(skb)->h_proto; if (vnet_hdr_len) { - err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q)); + err = tun_vnet_hdr_to_skb(q->flags, skb, gso); if (err) goto err_kfree; } diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c index f4019815f4736..d8fc0c79745d3 100644 --- a/drivers/net/team/team_core.c +++ b/drivers/net/team/team_core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -2203,7 +2204,7 @@ static void team_setup(struct net_device *dev) dev->lltx = true; /* Don't allow team devices to change network namespaces. */ - dev->netns_local = true; + dev->netns_immutable = true; dev->features |= NETIF_F_GRO; @@ -2218,10 +2219,12 @@ static void team_setup(struct net_device *dev) dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; } -static int team_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int team_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct nlattr **tb = params->tb; + if (tb[IFLA_ADDRESS] == NULL) eth_hw_addr_random(dev); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index acf96f2624887..f75f912a0225f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -83,6 +83,8 @@ #include #include +#include "tun_vnet.h" + static void tun_default_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd); @@ -94,9 +96,6 @@ static void tun_default_link_ksettings(struct net_device *dev, * overload it to mean fasync when stored there. */ #define TUN_FASYNC IFF_ATTACH_QUEUE -/* High bits in flags field are unused. */ -#define TUN_VNET_LE 0x80000000 -#define TUN_VNET_BE 0x40000000 #define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \ IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS) @@ -298,70 +297,6 @@ static bool tun_napi_frags_enabled(const struct tun_file *tfile) return tfile->napi_frags_enabled; } -#ifdef CONFIG_TUN_VNET_CROSS_LE -static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) -{ - return tun->flags & TUN_VNET_BE ? false : - virtio_legacy_is_little_endian(); -} - -static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) -{ - int be = !!(tun->flags & TUN_VNET_BE); - - if (put_user(be, argp)) - return -EFAULT; - - return 0; -} - -static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) -{ - int be; - - if (get_user(be, argp)) - return -EFAULT; - - if (be) - tun->flags |= TUN_VNET_BE; - else - tun->flags &= ~TUN_VNET_BE; - - return 0; -} -#else -static inline bool tun_legacy_is_little_endian(struct tun_struct *tun) -{ - return virtio_legacy_is_little_endian(); -} - -static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp) -{ - return -EINVAL; -} - -static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp) -{ - return -EINVAL; -} -#endif /* CONFIG_TUN_VNET_CROSS_LE */ - -static inline bool tun_is_little_endian(struct tun_struct *tun) -{ - return tun->flags & TUN_VNET_LE || - tun_legacy_is_little_endian(tun); -} - -static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val) -{ - return __virtio16_to_cpu(tun_is_little_endian(tun), val); -} - -static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val) -{ - return __cpu_to_virtio16(tun_is_little_endian(tun), val); -} - static inline u32 tun_hashfn(u32 rxhash) { return rxhash & TUN_MASK_FLOW_ENTRIES; @@ -1600,7 +1535,8 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, static struct sk_buff *__tun_build_skb(struct tun_file *tfile, struct page_frag *alloc_frag, char *buf, - int buflen, int len, int pad) + int buflen, int len, int pad, + int metasize) { struct sk_buff *skb = build_skb(buf, buflen); @@ -1609,6 +1545,8 @@ static struct sk_buff *__tun_build_skb(struct tun_file *tfile, skb_reserve(skb, pad); skb_put(skb, len); + if (metasize) + skb_metadata_set(skb, metasize); skb_set_owner_w(skb, tfile->socket.sk); get_page(alloc_frag->page); @@ -1668,6 +1606,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, char *buf; size_t copied; int pad = TUN_RX_PAD; + int metasize = 0; int err = 0; rcu_read_lock(); @@ -1695,7 +1634,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, if (hdr->gso_type || !xdp_prog) { *skb_xdp = 1; return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, - pad); + pad, metasize); } *skb_xdp = 0; @@ -1709,7 +1648,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, u32 act; xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq); - xdp_prepare_buff(&xdp, buf, pad, len, false); + xdp_prepare_buff(&xdp, buf, pad, len, true); act = bpf_prog_run_xdp(xdp_prog, &xdp); if (act == XDP_REDIRECT || act == XDP_TX) { @@ -1730,12 +1669,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, pad = xdp.data - xdp.data_hard_start; len = xdp.data_end - xdp.data; + + /* It is known that the xdp_buff was prepared with metadata + * support, so the metasize will never be negative. + */ + metasize = xdp.data - xdp.data_meta; } bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock(); local_bh_enable(); - return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); + return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad, + metasize); out: bpf_net_ctx_clear(bpf_net_ctx); @@ -1756,6 +1701,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, struct virtio_net_hdr gso = { 0 }; int good_linear; int copylen; + int hdr_len = 0; bool zerocopy = false; int err; u32 rxhash = 0; @@ -1775,26 +1721,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (tun->flags & IFF_VNET_HDR) { int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); - if (len < vnet_hdr_sz) - return -EINVAL; - len -= vnet_hdr_sz; + hdr_len = tun_vnet_hdr_get(vnet_hdr_sz, tun->flags, from, &gso); + if (hdr_len < 0) + return hdr_len; - if (!copy_from_iter_full(&gso, sizeof(gso), from)) - return -EFAULT; - - if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && - tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len)) - gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2); - - if (tun16_to_cpu(tun, gso.hdr_len) > len) - return -EINVAL; - iov_iter_advance(from, vnet_hdr_sz - sizeof(gso)); + len -= vnet_hdr_sz; } if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) { align += NET_IP_ALIGN; - if (unlikely(len < ETH_HLEN || - (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN))) + if (unlikely(len < ETH_HLEN || (hdr_len && hdr_len < ETH_HLEN))) return -EINVAL; } @@ -1807,9 +1743,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, * enough room for skb expand head in case it is used. * The rest of the buffer is mapped from userspace. */ - copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN; - if (copylen > good_linear) - copylen = good_linear; + copylen = min(hdr_len ? hdr_len : GOODCOPY_LEN, good_linear); linear = copylen; iov_iter_advance(&i, copylen); if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) @@ -1830,10 +1764,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } else { if (!zerocopy) { copylen = len; - if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) - linear = good_linear; - else - linear = tun16_to_cpu(tun, gso.hdr_len); + linear = min(hdr_len, good_linear); } if (frags) { @@ -1868,7 +1799,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } } - if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { + if (tun_vnet_hdr_to_skb(tun->flags, skb, &gso)) { atomic_long_inc(&tun->rx_frame_errors); err = -EINVAL; goto free_skb; @@ -2063,18 +1994,15 @@ static ssize_t tun_put_user_xdp(struct tun_struct *tun, { int vnet_hdr_sz = 0; size_t size = xdp_frame->len; - size_t ret; + ssize_t ret; if (tun->flags & IFF_VNET_HDR) { struct virtio_net_hdr gso = { 0 }; vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); - if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) - return -EINVAL; - if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != - sizeof(gso))) - return -EFAULT; - iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); + ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso); + if (ret) + return ret; } ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz; @@ -2097,6 +2025,7 @@ static ssize_t tun_put_user(struct tun_struct *tun, int vlan_offset = 0; int vlan_hlen = 0; int vnet_hdr_sz = 0; + int ret; if (skb_vlan_tag_present(skb)) vlan_hlen = VLAN_HLEN; @@ -2123,31 +2052,13 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (vnet_hdr_sz) { struct virtio_net_hdr gso; - if (iov_iter_count(iter) < vnet_hdr_sz) - return -EINVAL; - - if (virtio_net_hdr_from_skb(skb, &gso, - tun_is_little_endian(tun), true, - vlan_hlen)) { - struct skb_shared_info *sinfo = skb_shinfo(skb); - - if (net_ratelimit()) { - netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n", - sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), - tun16_to_cpu(tun, gso.hdr_len)); - print_hex_dump(KERN_ERR, "tun: ", - DUMP_PREFIX_NONE, - 16, 1, skb->head, - min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); - } - WARN_ON_ONCE(1); - return -EINVAL; - } - - if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) - return -EFAULT; + ret = tun_vnet_hdr_from_skb(tun->flags, tun->dev, skb, &gso); + if (ret) + return ret; - iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); + ret = tun_vnet_hdr_put(vnet_hdr_sz, iter, &gso); + if (ret) + return ret; } if (vlan_hlen) { @@ -2452,6 +2363,7 @@ static int tun_xdp_one(struct tun_struct *tun, struct sk_buff_head *queue; u32 rxhash = 0, act; int buflen = hdr->buflen; + int metasize = 0; int ret = 0; bool skb_xdp = false; struct page *page; @@ -2467,7 +2379,6 @@ static int tun_xdp_one(struct tun_struct *tun, } xdp_init_buff(xdp, buflen, &tfile->xdp_rxq); - xdp_set_data_meta_invalid(xdp); act = bpf_prog_run_xdp(xdp_prog, xdp); ret = tun_xdp_act(tun, xdp_prog, xdp, act); @@ -2507,7 +2418,15 @@ static int tun_xdp_one(struct tun_struct *tun, skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); - if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { + /* The externally provided xdp_buff may have no metadata support, which + * is marked by xdp->data_meta being xdp->data + 1. This will lead to a + * metasize of -1 and is the reason why the condition checks for > 0. + */ + metasize = xdp->data - xdp->data_meta; + if (metasize > 0) + skb_metadata_set(skb, metasize); + + if (tun_vnet_hdr_to_skb(tun->flags, skb, gso)) { atomic_long_inc(&tun->rx_frame_errors); kfree_skb(skb); ret = -EINVAL; @@ -3091,8 +3010,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, kgid_t group; int ifindex; int sndbuf; - int vnet_hdr_sz; - int le; int ret; bool do_notify = false; @@ -3299,50 +3216,6 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, tun_set_sndbuf(tun); break; - case TUNGETVNETHDRSZ: - vnet_hdr_sz = tun->vnet_hdr_sz; - if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) - ret = -EFAULT; - break; - - case TUNSETVNETHDRSZ: - if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { - ret = -EFAULT; - break; - } - if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { - ret = -EINVAL; - break; - } - - tun->vnet_hdr_sz = vnet_hdr_sz; - break; - - case TUNGETVNETLE: - le = !!(tun->flags & TUN_VNET_LE); - if (put_user(le, (int __user *)argp)) - ret = -EFAULT; - break; - - case TUNSETVNETLE: - if (get_user(le, (int __user *)argp)) { - ret = -EFAULT; - break; - } - if (le) - tun->flags |= TUN_VNET_LE; - else - tun->flags &= ~TUN_VNET_LE; - break; - - case TUNGETVNETBE: - ret = tun_get_vnet_be(tun, argp); - break; - - case TUNSETVNETBE: - ret = tun_set_vnet_be(tun, argp); - break; - case TUNATTACHFILTER: /* Can be set only for TAPs */ ret = -EINVAL; @@ -3398,7 +3271,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, break; default: - ret = -EINVAL; + ret = tun_vnet_ioctl(&tun->vnet_hdr_sz, &tun->flags, cmd, argp); break; } diff --git a/drivers/net/tun_vnet.h b/drivers/net/tun_vnet.h new file mode 100644 index 0000000000000..58b9ac7a5fc40 --- /dev/null +++ b/drivers/net/tun_vnet.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef TUN_VNET_H +#define TUN_VNET_H + +/* High bits in flags field are unused. */ +#define TUN_VNET_LE 0x80000000 +#define TUN_VNET_BE 0x40000000 + +static inline bool tun_vnet_legacy_is_little_endian(unsigned int flags) +{ + bool be = IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE) && + (flags & TUN_VNET_BE); + + return !be && virtio_legacy_is_little_endian(); +} + +static inline long tun_get_vnet_be(unsigned int flags, int __user *argp) +{ + int be = !!(flags & TUN_VNET_BE); + + if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE)) + return -EINVAL; + + if (put_user(be, argp)) + return -EFAULT; + + return 0; +} + +static inline long tun_set_vnet_be(unsigned int *flags, int __user *argp) +{ + int be; + + if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE)) + return -EINVAL; + + if (get_user(be, argp)) + return -EFAULT; + + if (be) + *flags |= TUN_VNET_BE; + else + *flags &= ~TUN_VNET_BE; + + return 0; +} + +static inline bool tun_vnet_is_little_endian(unsigned int flags) +{ + return flags & TUN_VNET_LE || tun_vnet_legacy_is_little_endian(flags); +} + +static inline u16 tun_vnet16_to_cpu(unsigned int flags, __virtio16 val) +{ + return __virtio16_to_cpu(tun_vnet_is_little_endian(flags), val); +} + +static inline __virtio16 cpu_to_tun_vnet16(unsigned int flags, u16 val) +{ + return __cpu_to_virtio16(tun_vnet_is_little_endian(flags), val); +} + +static inline long tun_vnet_ioctl(int *vnet_hdr_sz, unsigned int *flags, + unsigned int cmd, int __user *sp) +{ + int s; + + switch (cmd) { + case TUNGETVNETHDRSZ: + s = *vnet_hdr_sz; + if (put_user(s, sp)) + return -EFAULT; + return 0; + + case TUNSETVNETHDRSZ: + if (get_user(s, sp)) + return -EFAULT; + if (s < (int)sizeof(struct virtio_net_hdr)) + return -EINVAL; + + *vnet_hdr_sz = s; + return 0; + + case TUNGETVNETLE: + s = !!(*flags & TUN_VNET_LE); + if (put_user(s, sp)) + return -EFAULT; + return 0; + + case TUNSETVNETLE: + if (get_user(s, sp)) + return -EFAULT; + if (s) + *flags |= TUN_VNET_LE; + else + *flags &= ~TUN_VNET_LE; + return 0; + + case TUNGETVNETBE: + return tun_get_vnet_be(*flags, sp); + + case TUNSETVNETBE: + return tun_set_vnet_be(flags, sp); + + default: + return -EINVAL; + } +} + +static inline int tun_vnet_hdr_get(int sz, unsigned int flags, + struct iov_iter *from, + struct virtio_net_hdr *hdr) +{ + u16 hdr_len; + + if (iov_iter_count(from) < sz) + return -EINVAL; + + if (!copy_from_iter_full(hdr, sizeof(*hdr), from)) + return -EFAULT; + + hdr_len = tun_vnet16_to_cpu(flags, hdr->hdr_len); + + if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + hdr_len = max(tun_vnet16_to_cpu(flags, hdr->csum_start) + tun_vnet16_to_cpu(flags, hdr->csum_offset) + 2, hdr_len); + hdr->hdr_len = cpu_to_tun_vnet16(flags, hdr_len); + } + + if (hdr_len > iov_iter_count(from)) + return -EINVAL; + + iov_iter_advance(from, sz - sizeof(*hdr)); + + return hdr_len; +} + +static inline int tun_vnet_hdr_put(int sz, struct iov_iter *iter, + const struct virtio_net_hdr *hdr) +{ + if (unlikely(iov_iter_count(iter) < sz)) + return -EINVAL; + + if (unlikely(copy_to_iter(hdr, sizeof(*hdr), iter) != sizeof(*hdr))) + return -EFAULT; + + if (iov_iter_zero(sz - sizeof(*hdr), iter) != sz - sizeof(*hdr)) + return -EFAULT; + + return 0; +} + +static inline int tun_vnet_hdr_to_skb(unsigned int flags, struct sk_buff *skb, + const struct virtio_net_hdr *hdr) +{ + return virtio_net_hdr_to_skb(skb, hdr, tun_vnet_is_little_endian(flags)); +} + +static inline int tun_vnet_hdr_from_skb(unsigned int flags, + const struct net_device *dev, + const struct sk_buff *skb, + struct virtio_net_hdr *hdr) +{ + int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0; + + if (virtio_net_hdr_from_skb(skb, hdr, + tun_vnet_is_little_endian(flags), true, + vlan_hlen)) { + struct skb_shared_info *sinfo = skb_shinfo(skb); + + if (net_ratelimit()) { + netdev_err(dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n", + sinfo->gso_type, tun_vnet16_to_cpu(flags, hdr->gso_size), + tun_vnet16_to_cpu(flags, hdr->hdr_len)); + print_hex_dump(KERN_ERR, "tun: ", + DUMP_PREFIX_NONE, + 16, 1, skb->head, + min(tun_vnet16_to_cpu(flags, hdr->hdr_len), 64), true); + } + WARN_ON_ONCE(1); + return -EINVAL; + } + + return 0; +} + +#endif /* TUN_VNET_H */ diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 57d6e5abc30e8..da24941a6e444 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -1421,6 +1421,19 @@ static const struct driver_info hg20f9_info = { .data = FLAG_EEPROM_MAC, }; +static const struct driver_info lyconsys_fibergecko100_info = { + .description = "LyconSys FiberGecko 100 USB 2.0 to SFP Adapter", + .bind = ax88178_bind, + .status = asix_status, + .link_reset = ax88178_link_reset, + .reset = ax88178_link_reset, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | + FLAG_MULTI_PACKET, + .rx_fixup = asix_rx_fixup_common, + .tx_fixup = asix_tx_fixup, + .data = 0x20061201, +}; + static const struct usb_device_id products [] = { { // Linksys USB200M @@ -1578,6 +1591,10 @@ static const struct usb_device_id products [] = { // Linux Automation GmbH USB 10Base-T1L USB_DEVICE(0x33f7, 0x0004), .driver_info = (unsigned long) &lxausb_t1l_info, +}, { + /* LyconSys FiberGecko 100 */ + USB_DEVICE(0x1d2a, 0x0801), + .driver_info = (unsigned long) &lyconsys_fibergecko100_info, }, { }, // END }; diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index e47bb125048d4..f613e4bc68c85 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -18,8 +18,8 @@ struct ax88172a_private { struct mii_bus *mdio; struct phy_device *phydev; - char phy_name[20]; - u16 phy_addr; + char phy_name[PHY_ID_SIZE]; + u8 phy_addr; u16 oldmode; int use_embdphy; struct asix_rx_fixup_info rx_fixup_info; @@ -210,7 +210,11 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) ret = asix_read_phy_addr(dev, priv->use_embdphy); if (ret < 0) goto free; - + if (ret >= PHY_MAX_ADDR) { + netdev_err(dev->net, "Invalid PHY address %#x\n", ret); + ret = -ENODEV; + goto free; + } priv->phy_addr = ret; ax88172a_reset_phy(dev, priv->use_embdphy); @@ -308,7 +312,7 @@ static int ax88172a_reset(struct usbnet *dev) rx_ctl); /* Connect to PHY */ - snprintf(priv->phy_name, 20, PHY_ID_FMT, + snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT, priv->mdio->id, priv->phy_addr); priv->phydev = phy_connect(dev->net, priv->phy_name, diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index a6469235d904e..a032c1ded4063 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -783,6 +783,13 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa359, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */ { USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101, diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index e13e4920ee9b2..dbf01210b0e78 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -660,12 +660,12 @@ static const struct usb_device_id mbim_devs[] = { .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, - /* Telit FN990 */ + /* Telit FN990A */ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, - /* Telit FE990 */ + /* Telit FE990A */ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c index f69d9b902da04..957d186e324af 100644 --- a/drivers/net/usb/ch9200.c +++ b/drivers/net/usb/ch9200.c @@ -168,8 +168,6 @@ static int control_write(struct usbnet *dev, unsigned char request, err = -EINVAL; kfree(buf); - return 0; - err_out: return err; } @@ -178,6 +176,7 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc) { struct usbnet *dev = netdev_priv(netdev); unsigned char buff[2]; + int ret; netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n", __func__, phy_id, loc); @@ -185,8 +184,10 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc) if (phy_id != 0) return -ENODEV; - control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02, - CONTROL_TIMEOUT_MS); + ret = control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02, + CONTROL_TIMEOUT_MS); + if (ret < 0) + return ret; return (buff[0] | buff[1] << 8); } @@ -303,24 +304,27 @@ static int ch9200_rx_fixup(struct usbnet *dev, struct sk_buff *skb) static int get_mac_address(struct usbnet *dev, unsigned char *data) { - int err = 0; unsigned char mac_addr[0x06]; - int rd_mac_len = 0; + int rd_mac_len; netdev_dbg(dev->net, "%s:\n\tusbnet VID:%0x PID:%0x\n", __func__, le16_to_cpu(dev->udev->descriptor.idVendor), le16_to_cpu(dev->udev->descriptor.idProduct)); - memset(mac_addr, 0, sizeof(mac_addr)); - rd_mac_len = control_read(dev, REQUEST_READ, 0, - MAC_REG_STATION_L, mac_addr, 0x02, - CONTROL_TIMEOUT_MS); - rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_M, - mac_addr + 2, 0x02, CONTROL_TIMEOUT_MS); - rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_H, - mac_addr + 4, 0x02, CONTROL_TIMEOUT_MS); - if (rd_mac_len != ETH_ALEN) - err = -EINVAL; + rd_mac_len = control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_L, + mac_addr, 0x02, CONTROL_TIMEOUT_MS); + if (rd_mac_len < 0) + return rd_mac_len; + + rd_mac_len = control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_M, + mac_addr + 2, 0x02, CONTROL_TIMEOUT_MS); + if (rd_mac_len < 0) + return rd_mac_len; + + rd_mac_len = control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_H, + mac_addr + 4, 0x02, CONTROL_TIMEOUT_MS); + if (rd_mac_len < 0) + return rd_mac_len; data[0] = mac_addr[5]; data[1] = mac_addr[4]; @@ -329,17 +333,17 @@ static int get_mac_address(struct usbnet *dev, unsigned char *data) data[4] = mac_addr[1]; data[5] = mac_addr[0]; - return err; + return 0; } static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf) { - int retval = 0; + int retval; unsigned char data[2]; u8 addr[ETH_ALEN]; retval = usbnet_get_endpoints(dev, intf); - if (retval) + if (retval < 0) return retval; dev->mii.dev = dev->net; @@ -357,37 +361,52 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf) data[1] = 0x0F; retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_THRESHOLD, data, 0x02, CONTROL_TIMEOUT_MS); + if (retval < 0) + return retval; data[0] = 0xA0; data[1] = 0x90; retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FIFO_DEPTH, data, 0x02, CONTROL_TIMEOUT_MS); + if (retval < 0) + return retval; data[0] = 0x30; data[1] = 0x00; retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_PAUSE, data, 0x02, CONTROL_TIMEOUT_MS); + if (retval < 0) + return retval; data[0] = 0x17; data[1] = 0xD8; retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FLOW_CONTROL, data, 0x02, CONTROL_TIMEOUT_MS); + if (retval < 0) + return retval; /* Undocumented register */ data[0] = 0x01; data[1] = 0x00; retval = control_write(dev, REQUEST_WRITE, 0, 254, data, 0x02, CONTROL_TIMEOUT_MS); + if (retval < 0) + return retval; data[0] = 0x5F; data[1] = 0x0D; retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02, CONTROL_TIMEOUT_MS); + if (retval < 0) + return retval; retval = get_mac_address(dev, addr); + if (retval < 0) + return retval; + eth_hw_addr_set(dev->net, addr); - return retval; + return 0; } static const struct driver_info ch9200_info = { diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c index 46af78caf457a..0bfa37c140591 100644 --- a/drivers/net/usb/gl620a.c +++ b/drivers/net/usb/gl620a.c @@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf) { dev->hard_mtu = GL_RCV_BUF_SIZE; dev->net->hard_header_len += 4; - dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in); - dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out); - return 0; + return usbnet_get_endpoints(dev, intf); } static const struct driver_info genelink_info = { diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index a91bf9c7e31d2..137adf6d5b08a 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -627,7 +627,7 @@ static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data) kfree(buf); - return ret; + return ret < 0 ? ret : 0; } static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) @@ -658,7 +658,7 @@ static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data) kfree(buf); - return ret; + return ret < 0 ? ret : 0; } static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index e9208a8d2bfa6..b586b1c13a47f 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1360,14 +1360,16 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ - {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */ - {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990A */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10b0, 0)}, /* Telit FE990B */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10d0, 0)}, /* Telit FN990B */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 468c739740463..2cab046749a92 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -785,6 +785,7 @@ enum rtl8152_flags { #define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387 #define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3 0x3062 +#define DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK 0xa359 struct tally_counter { __le64 tx_packets; @@ -9787,6 +9788,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev) case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2: case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3: case DEVICE_ID_THINKPAD_USB_C_DONGLE: + case DEVICE_ID_THINKPAD_HYBRID_USB_C_DOCK: return 1; } } else if (vendor_id == VENDOR_ID_REALTEK && parent_vendor_id == VENDOR_ID_LENOVO) { @@ -10064,6 +10066,8 @@ static const struct usb_device_id rtl8152_table[] = { { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) }, { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) }, { USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) }, + + /* Lenovo */ { USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) }, @@ -10074,11 +10078,14 @@ static const struct usb_device_id rtl8152_table[] = { { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0xa359) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) }, + { USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) }, { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) }, + { USB_DEVICE(VENDOR_ID_DELL, 0xb097) }, { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) }, {} }; diff --git a/drivers/net/usb/r8153_ecm.c b/drivers/net/usb/r8153_ecm.c index 20b2df8d74ae1..8d860dacdf49b 100644 --- a/drivers/net/usb/r8153_ecm.c +++ b/drivers/net/usb/r8153_ecm.c @@ -135,6 +135,12 @@ static const struct usb_device_id products[] = { USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&r8153_info, }, +/* Lenovo ThinkPad Hybrid USB-C with USB-A Dock (40af0135eu, based on Realtek RTL8153) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0xa359, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&r8153_info, +}, { }, /* END */ }; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 01251868a9c27..7bb53961c0eaf 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -17,6 +17,7 @@ #include #include +#include #include #include #include @@ -684,8 +685,7 @@ static void veth_xdp_rcv_bulk_skb(struct veth_rq *rq, void **frames, void *skbs[VETH_XDP_BATCH]; int i; - if (xdp_alloc_skb_bulk(skbs, n_xdpf, - GFP_ATOMIC | __GFP_ZERO) < 0) { + if (unlikely(!napi_skb_cache_get_bulk(skbs, n_xdpf))) { for (i = 0; i < n_xdpf; i++) xdp_return_frame(frames[i]); stats->rx_drops += n_xdpf; @@ -1765,10 +1765,13 @@ static int veth_init_queues(struct net_device *dev, struct nlattr *tb[]) return 0; } -static int veth_newlink(struct net *peer_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int veth_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *peer_net = rtnl_newlink_peer_net(params); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; int err; struct net_device *peer; struct veth_priv *priv; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7646ddd9bef70..aac5a5184ba95 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -360,24 +360,7 @@ struct receive_queue { struct xdp_buff **xsk_buffs; }; -/* This structure can contain rss message with maximum settings for indirection table and keysize - * Note, that default structure that describes RSS configuration virtio_net_rss_config - * contains same info but can't handle table values. - * In any case, structure would be passed to virtio hw through sg_buf split by parts - * because table sizes may be differ according to the device configuration. - */ #define VIRTIO_NET_RSS_MAX_KEY_SIZE 40 -struct virtio_net_ctrl_rss { - u32 hash_types; - u16 indirection_table_mask; - u16 unclassified_queue; - u16 hash_cfg_reserved; /* for HASH_CONFIG (see virtio_net_hash_config for details) */ - u16 max_tx_vq; - u8 hash_key_length; - u8 key[VIRTIO_NET_RSS_MAX_KEY_SIZE]; - - u16 *indirection_table; -}; /* Control VQ buffers: protected by the rtnl lock */ struct control_buf { @@ -421,7 +404,9 @@ struct virtnet_info { u16 rss_indir_table_size; u32 rss_hash_types_supported; u32 rss_hash_types_saved; - struct virtio_net_ctrl_rss rss; + struct virtio_net_rss_config_hdr *rss_hdr; + struct virtio_net_rss_config_trailer rss_trailer; + u8 rss_hash_key_data[VIRTIO_NET_RSS_MAX_KEY_SIZE]; /* Has control virtqueue */ bool has_cvq; @@ -523,23 +508,16 @@ enum virtnet_xmit_type { VIRTNET_XMIT_TYPE_XSK, }; -static int rss_indirection_table_alloc(struct virtio_net_ctrl_rss *rss, u16 indir_table_size) +static size_t virtnet_rss_hdr_size(const struct virtnet_info *vi) { - if (!indir_table_size) { - rss->indirection_table = NULL; - return 0; - } + u16 indir_table_size = vi->has_rss ? vi->rss_indir_table_size : 1; - rss->indirection_table = kmalloc_array(indir_table_size, sizeof(u16), GFP_KERNEL); - if (!rss->indirection_table) - return -ENOMEM; - - return 0; + return struct_size(vi->rss_hdr, indirection_table, indir_table_size); } -static void rss_indirection_table_free(struct virtio_net_ctrl_rss *rss) +static size_t virtnet_rss_trailer_size(const struct virtnet_info *vi) { - kfree(rss->indirection_table); + return struct_size(&vi->rss_trailer, hash_key_data, vi->rss_key_size); } /* We use the last two bits of the pointer to distinguish the xmit type. */ @@ -1088,11 +1066,10 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) return false; } -static void check_sq_full_and_disable(struct virtnet_info *vi, - struct net_device *dev, - struct send_queue *sq) +static bool tx_may_stop(struct virtnet_info *vi, + struct net_device *dev, + struct send_queue *sq) { - bool use_napi = sq->napi.weight; int qnum; qnum = sq - vi->sq; @@ -1114,6 +1091,25 @@ static void check_sq_full_and_disable(struct virtnet_info *vi, u64_stats_update_begin(&sq->stats.syncp); u64_stats_inc(&sq->stats.stop); u64_stats_update_end(&sq->stats.syncp); + + return true; + } + + return false; +} + +static void check_sq_full_and_disable(struct virtnet_info *vi, + struct net_device *dev, + struct send_queue *sq) +{ + bool use_napi = sq->napi.weight; + int qnum; + + qnum = sq - vi->sq; + + if (tx_may_stop(vi, dev, sq)) { + struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); + if (use_napi) { if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) virtqueue_napi_schedule(&sq->napi, sq->vq); @@ -2789,7 +2785,8 @@ static void skb_recv_done(struct virtqueue *rvq) virtqueue_napi_schedule(&rq->napi, rvq); } -static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) +static void virtnet_napi_do_enable(struct virtqueue *vq, + struct napi_struct *napi) { napi_enable(napi); @@ -2802,10 +2799,21 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi) local_bh_enable(); } -static void virtnet_napi_tx_enable(struct virtnet_info *vi, - struct virtqueue *vq, - struct napi_struct *napi) +static void virtnet_napi_enable(struct receive_queue *rq) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + int qidx = vq2rxq(rq->vq); + + virtnet_napi_do_enable(rq->vq, &rq->napi); + netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, &rq->napi); +} + +static void virtnet_napi_tx_enable(struct send_queue *sq) { + struct virtnet_info *vi = sq->vq->vdev->priv; + struct napi_struct *napi = &sq->napi; + int qidx = vq2txq(sq->vq); + if (!napi->weight) return; @@ -2817,13 +2825,30 @@ static void virtnet_napi_tx_enable(struct virtnet_info *vi, return; } - return virtnet_napi_enable(vq, napi); + virtnet_napi_do_enable(sq->vq, napi); + netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, napi); } -static void virtnet_napi_tx_disable(struct napi_struct *napi) +static void virtnet_napi_tx_disable(struct send_queue *sq) { - if (napi->weight) + struct virtnet_info *vi = sq->vq->vdev->priv; + struct napi_struct *napi = &sq->napi; + int qidx = vq2txq(sq->vq); + + if (napi->weight) { + netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_TX, NULL); napi_disable(napi); + } +} + +static void virtnet_napi_disable(struct receive_queue *rq) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + struct napi_struct *napi = &rq->napi; + int qidx = vq2rxq(rq->vq); + + netif_queue_set_napi(vi->dev, qidx, NETDEV_QUEUE_TYPE_RX, NULL); + napi_disable(napi); } static void refill_work(struct work_struct *work) @@ -2836,9 +2861,23 @@ static void refill_work(struct work_struct *work) for (i = 0; i < vi->curr_queue_pairs; i++) { struct receive_queue *rq = &vi->rq[i]; + /* + * When queue API support is added in the future and the call + * below becomes napi_disable_locked, this driver will need to + * be refactored. + * + * One possible solution would be to: + * - cancel refill_work with cancel_delayed_work (note: + * non-sync) + * - cancel refill_work with cancel_delayed_work_sync in + * virtnet_remove after the netdev is unregistered + * - wrap all of the work in a lock (perhaps the netdev + * instance lock) + * - check netif_running() and return early to avoid a race + */ napi_disable(&rq->napi); still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); - virtnet_napi_enable(rq->vq, &rq->napi); + virtnet_napi_do_enable(rq->vq, &rq->napi); /* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. @@ -3035,8 +3074,8 @@ static int virtnet_poll(struct napi_struct *napi, int budget) static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index) { - virtnet_napi_tx_disable(&vi->sq[qp_index].napi); - napi_disable(&vi->rq[qp_index].napi); + virtnet_napi_tx_disable(&vi->sq[qp_index]); + virtnet_napi_disable(&vi->rq[qp_index]); xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq); } @@ -3055,8 +3094,8 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) if (err < 0) goto err_xdp_reg_mem_model; - virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); - virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); + virtnet_napi_enable(&vi->rq[qp_index]); + virtnet_napi_tx_enable(&vi->sq[qp_index]); return 0; @@ -3253,15 +3292,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) bool use_napi = sq->napi.weight; bool kick; - /* Free up any pending old buffers before queueing new ones. */ - do { - if (use_napi) - virtqueue_disable_cb(sq->vq); - + if (!use_napi) free_old_xmit(sq, txq, false); - - } while (use_napi && !xmit_more && - unlikely(!virtqueue_enable_cb_delayed(sq->vq))); + else + virtqueue_disable_cb(sq->vq); /* timestamp packet in software */ skb_tx_timestamp(skb); @@ -3287,7 +3321,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset_ct(skb); } - check_sq_full_and_disable(vi, dev, sq); + if (use_napi) + tx_may_stop(vi, dev, sq); + else + check_sq_full_and_disable(vi, dev,sq); kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) : !xmit_more || netif_xmit_stopped(txq); @@ -3299,6 +3336,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) } } + if (use_napi && kick && unlikely(!virtqueue_enable_cb_delayed(sq->vq))) + virtqueue_napi_schedule(&sq->napi, sq->vq); + return NETDEV_TX_OK; } @@ -3307,7 +3347,7 @@ static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq) bool running = netif_running(vi->dev); if (running) { - napi_disable(&rq->napi); + virtnet_napi_disable(rq); virtnet_cancel_dim(vi, &rq->dim); } } @@ -3320,7 +3360,7 @@ static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq) schedule_delayed_work(&vi->refill, 0); if (running) - virtnet_napi_enable(rq->vq, &rq->napi); + virtnet_napi_enable(rq); } static int virtnet_rx_resize(struct virtnet_info *vi, @@ -3349,7 +3389,7 @@ static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq) qindex = sq - vi->sq; if (running) - virtnet_napi_tx_disable(&sq->napi); + virtnet_napi_tx_disable(sq); txq = netdev_get_tx_queue(vi->dev, qindex); @@ -3383,7 +3423,7 @@ static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq) __netif_tx_unlock_bh(txq); if (running) - virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); + virtnet_napi_tx_enable(sq); } static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, @@ -3576,15 +3616,16 @@ static void virtnet_rss_update_by_qpairs(struct virtnet_info *vi, u16 queue_pair for (; i < vi->rss_indir_table_size; ++i) { indir_val = ethtool_rxfh_indir_default(i, queue_pairs); - vi->rss.indirection_table[i] = indir_val; + vi->rss_hdr->indirection_table[i] = cpu_to_le16(indir_val); } - vi->rss.max_tx_vq = queue_pairs; + vi->rss_trailer.max_tx_vq = cpu_to_le16(queue_pairs); } static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct virtio_net_ctrl_mq *mq __free(kfree) = NULL; - struct virtio_net_ctrl_rss old_rss; + struct virtio_net_rss_config_hdr *old_rss_hdr; + struct virtio_net_rss_config_trailer old_rss_trailer; struct net_device *dev = vi->dev; struct scatterlist sg; @@ -3599,24 +3640,28 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) * update (VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET below) and return directly. */ if (vi->has_rss && !netif_is_rxfh_configured(dev)) { - memcpy(&old_rss, &vi->rss, sizeof(old_rss)); - if (rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size)) { - vi->rss.indirection_table = old_rss.indirection_table; + old_rss_hdr = vi->rss_hdr; + old_rss_trailer = vi->rss_trailer; + vi->rss_hdr = devm_kzalloc(&dev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); + if (!vi->rss_hdr) { + vi->rss_hdr = old_rss_hdr; return -ENOMEM; } + *vi->rss_hdr = *old_rss_hdr; virtnet_rss_update_by_qpairs(vi, queue_pairs); if (!virtnet_commit_rss_command(vi)) { /* restore ctrl_rss if commit_rss_command failed */ - rss_indirection_table_free(&vi->rss); - memcpy(&vi->rss, &old_rss, sizeof(old_rss)); + devm_kfree(&dev->dev, vi->rss_hdr); + vi->rss_hdr = old_rss_hdr; + vi->rss_trailer = old_rss_trailer; dev_warn(&dev->dev, "Fail to set num of queue pairs to %d, because committing RSS failed\n", queue_pairs); return -EINVAL; } - rss_indirection_table_free(&old_rss); + devm_kfree(&dev->dev, old_rss_hdr); goto succ; } @@ -4059,28 +4104,12 @@ static int virtnet_set_ringparam(struct net_device *dev, static bool virtnet_commit_rss_command(struct virtnet_info *vi) { struct net_device *dev = vi->dev; - struct scatterlist sgs[4]; - unsigned int sg_buf_size; + struct scatterlist sgs[2]; /* prepare sgs */ - sg_init_table(sgs, 4); - - sg_buf_size = offsetof(struct virtio_net_ctrl_rss, hash_cfg_reserved); - sg_set_buf(&sgs[0], &vi->rss, sg_buf_size); - - if (vi->has_rss) { - sg_buf_size = sizeof(uint16_t) * vi->rss_indir_table_size; - sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size); - } else { - sg_set_buf(&sgs[1], &vi->rss.hash_cfg_reserved, sizeof(uint16_t)); - } - - sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key) - - offsetof(struct virtio_net_ctrl_rss, max_tx_vq); - sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size); - - sg_buf_size = vi->rss_key_size; - sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size); + sg_init_table(sgs, 2); + sg_set_buf(&sgs[0], vi->rss_hdr, virtnet_rss_hdr_size(vi)); + sg_set_buf(&sgs[1], &vi->rss_trailer, virtnet_rss_trailer_size(vi)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG @@ -4097,17 +4126,17 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi) static void virtnet_init_default_rss(struct virtnet_info *vi) { - vi->rss.hash_types = vi->rss_hash_types_supported; + vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_supported); vi->rss_hash_types_saved = vi->rss_hash_types_supported; - vi->rss.indirection_table_mask = vi->rss_indir_table_size - ? vi->rss_indir_table_size - 1 : 0; - vi->rss.unclassified_queue = 0; + vi->rss_hdr->indirection_table_mask = vi->rss_indir_table_size + ? cpu_to_le16(vi->rss_indir_table_size - 1) : 0; + vi->rss_hdr->unclassified_queue = 0; virtnet_rss_update_by_qpairs(vi, vi->curr_queue_pairs); - vi->rss.hash_key_length = vi->rss_key_size; + vi->rss_trailer.hash_key_length = vi->rss_key_size; - netdev_rss_key_fill(vi->rss.key, vi->rss_key_size); + netdev_rss_key_fill(vi->rss_hash_key_data, vi->rss_key_size); } static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info) @@ -4218,7 +4247,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc * if (new_hashtypes != vi->rss_hash_types_saved) { vi->rss_hash_types_saved = new_hashtypes; - vi->rss.hash_types = vi->rss_hash_types_saved; + vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); if (vi->dev->features & NETIF_F_RXHASH) return virtnet_commit_rss_command(vi); } @@ -5398,11 +5427,11 @@ static int virtnet_get_rxfh(struct net_device *dev, if (rxfh->indir) { for (i = 0; i < vi->rss_indir_table_size; ++i) - rxfh->indir[i] = vi->rss.indirection_table[i]; + rxfh->indir[i] = le16_to_cpu(vi->rss_hdr->indirection_table[i]); } if (rxfh->key) - memcpy(rxfh->key, vi->rss.key, vi->rss_key_size); + memcpy(rxfh->key, vi->rss_hash_key_data, vi->rss_key_size); rxfh->hfunc = ETH_RSS_HASH_TOP; @@ -5426,7 +5455,7 @@ static int virtnet_set_rxfh(struct net_device *dev, return -EOPNOTSUPP; for (i = 0; i < vi->rss_indir_table_size; ++i) - vi->rss.indirection_table[i] = rxfh->indir[i]; + vi->rss_hdr->indirection_table[i] = cpu_to_le16(rxfh->indir[i]); update = true; } @@ -5438,7 +5467,7 @@ static int virtnet_set_rxfh(struct net_device *dev, if (!vi->has_rss && !vi->has_rss_hash_report) return -EOPNOTSUPP; - memcpy(vi->rss.key, rxfh->key, vi->rss_key_size); + memcpy(vi->rss_hash_key_data, rxfh->key, vi->rss_key_size); update = true; } @@ -5615,8 +5644,11 @@ static void virtnet_freeze_down(struct virtio_device *vdev) netif_tx_lock_bh(vi->dev); netif_device_detach(vi->dev); netif_tx_unlock_bh(vi->dev); - if (netif_running(vi->dev)) + if (netif_running(vi->dev)) { + rtnl_lock(); virtnet_close(vi->dev); + rtnl_unlock(); + } } static int init_vqs(struct virtnet_info *vi); @@ -5636,7 +5668,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) enable_rx_mode_work(vi); if (netif_running(vi->dev)) { + rtnl_lock(); err = virtnet_open(vi->dev); + rtnl_unlock(); if (err) return err; } @@ -5926,8 +5960,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, /* Make sure NAPI is not using any XDP TX queues for RX. */ if (netif_running(dev)) { for (i = 0; i < vi->max_queue_pairs; i++) { - napi_disable(&vi->rq[i].napi); - virtnet_napi_tx_disable(&vi->sq[i].napi); + virtnet_napi_disable(&vi->rq[i]); + virtnet_napi_tx_disable(&vi->sq[i]); } } @@ -5964,9 +5998,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, if (old_prog) bpf_prog_put(old_prog); if (netif_running(dev)) { - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); - virtnet_napi_tx_enable(vi, vi->sq[i].vq, - &vi->sq[i].napi); + virtnet_napi_enable(&vi->rq[i]); + virtnet_napi_tx_enable(&vi->sq[i]); } } @@ -5981,9 +6014,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, if (netif_running(dev)) { for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); - virtnet_napi_tx_enable(vi, vi->sq[i].vq, - &vi->sq[i].napi); + virtnet_napi_enable(&vi->rq[i]); + virtnet_napi_tx_enable(&vi->sq[i]); } } if (prog) @@ -6044,9 +6076,9 @@ static int virtnet_set_features(struct net_device *dev, if ((dev->features ^ features) & NETIF_F_RXHASH) { if (features & NETIF_F_RXHASH) - vi->rss.hash_types = vi->rss_hash_types_saved; + vi->rss_hdr->hash_types = cpu_to_le32(vi->rss_hash_types_saved); else - vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE; + vi->rss_hdr->hash_types = cpu_to_le32(VIRTIO_NET_HASH_REPORT_NONE); if (!virtnet_commit_rss_command(vi)) return -EINVAL; @@ -6390,8 +6422,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) INIT_DELAYED_WORK(&vi->refill, refill_work); for (i = 0; i < vi->max_queue_pairs; i++) { vi->rq[i].pages = NULL; - netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll, - napi_weight); + netif_napi_add_config(vi->dev, &vi->rq[i].napi, virtnet_poll, + i); + vi->rq[i].napi.weight = napi_weight; netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, napi_tx ? napi_weight : 0); @@ -6735,9 +6768,11 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_cread16(vdev, offsetof(struct virtio_net_config, rss_max_indirection_table_length)); } - err = rss_indirection_table_alloc(&vi->rss, vi->rss_indir_table_size); - if (err) + vi->rss_hdr = devm_kzalloc(&vdev->dev, virtnet_rss_hdr_size(vi), GFP_KERNEL); + if (!vi->rss_hdr) { + err = -ENOMEM; goto free; + } if (vi->has_rss || vi->has_rss_hash_report) { vi->rss_key_size = @@ -7016,8 +7051,6 @@ static void virtnet_remove(struct virtio_device *vdev) remove_vq_common(vi); - rss_indirection_table_free(&vi->rss); - free_netdev(vi->dev); } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 6793fa09f9d1a..3df6aabc7e339 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2033,6 +2033,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, rq->comp_ring.gen = VMXNET3_INIT_GEN; rq->comp_ring.next2proc = 0; + + if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) + xdp_rxq_info_unreg(&rq->xdp_rxq); + page_pool_destroy(rq->page_pool); + rq->page_pool = NULL; } @@ -2073,11 +2078,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, } } - if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) - xdp_rxq_info_unreg(&rq->xdp_rxq); - page_pool_destroy(rq->page_pool); - rq->page_pool = NULL; - if (rq->data_ring.base) { dma_free_coherent(&adapter->pdev->dev, rq->rx_ring[0].size * rq->data_ring.desc_size, diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index ca81b212a246b..7168b33adadb0 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -1537,14 +1538,12 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) nlmsg_end(skb, nlh); - /* fib_nl_{new,del}rule handling looks for net from skb->sk */ - skb->sk = dev_net(dev)->rtnl; if (add_it) { - err = fib_nl_newrule(skb, nlh, NULL); + err = fib_newrule(dev_net(dev), skb, nlh, NULL, true); if (err == -EEXIST) err = 0; } else { - err = fib_nl_delrule(skb, nlh, NULL); + err = fib_delrule(dev_net(dev), skb, nlh, NULL, true); if (err == -ENOENT) err = 0; } @@ -1619,7 +1618,7 @@ static void vrf_setup(struct net_device *dev) dev->lltx = true; /* don't allow vrf devices to change network namespaces. */ - dev->netns_local = true; + dev->netns_immutable = true; /* does not make sense for a VLAN to be added to a vrf device */ dev->features |= NETIF_F_VLAN_CHALLENGED; @@ -1677,11 +1676,12 @@ static void vrf_dellink(struct net_device *dev, struct list_head *head) unregister_netdevice_queue(dev, head); } -static int vrf_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int vrf_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct net_vrf *vrf = netdev_priv(dev); + struct nlattr **data = params->data; struct netns_vrf *nn_vrf; bool *add_fib_rules; struct net *net; diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 92516189e792f..8c49e903cb3a1 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -227,9 +228,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, be32_to_cpu(fdb->vni))) goto nla_put_failure; - ci.ndm_used = jiffies_to_clock_t(now - fdb->used); + ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used)); ci.ndm_confirmed = 0; - ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); + ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated)); ci.ndm_refcnt = 0; if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) @@ -434,8 +435,12 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, struct vxlan_fdb *f; f = __vxlan_find_mac(vxlan, mac, vni); - if (f && f->used != jiffies) - f->used = jiffies; + if (f) { + unsigned long now = jiffies; + + if (READ_ONCE(f->used) != now) + WRITE_ONCE(f->used, now); + } return f; } @@ -1009,12 +1014,10 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, !(f->flags & NTF_VXLAN_ADDED_BY_USER)) { if (f->state != state) { f->state = state; - f->updated = jiffies; notify = 1; } if (f->flags != fdb_flags) { f->flags = fdb_flags; - f->updated = jiffies; notify = 1; } } @@ -1048,12 +1051,13 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, } if (ndm_flags & NTF_USE) - f->used = jiffies; + WRITE_ONCE(f->updated, jiffies); if (notify) { if (rd == NULL) rd = first_remote_rtnl(f); + WRITE_ONCE(f->updated, jiffies); err = vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, swdev_notify, extack); if (err) @@ -1292,7 +1296,7 @@ int __vxlan_fdb_delete(struct vxlan_dev *vxlan, struct vxlan_fdb *f; int err = -ENOENT; - f = vxlan_find_mac(vxlan, addr, src_vni); + f = __vxlan_find_mac(vxlan, addr, src_vni); if (!f) return err; @@ -1459,9 +1463,13 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev, ifindex = src_ifindex; #endif - f = vxlan_find_mac(vxlan, src_mac, vni); + f = __vxlan_find_mac(vxlan, src_mac, vni); if (likely(f)) { struct vxlan_rdst *rdst = first_remote_rcu(f); + unsigned long now = jiffies; + + if (READ_ONCE(f->updated) != now) + WRITE_ONCE(f->updated, now); if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip) && rdst->remote_ifindex == ifindex)) @@ -1481,7 +1489,6 @@ static enum skb_drop_reason vxlan_snoop(struct net_device *dev, src_mac, &rdst->remote_ip.sa, &src_ip->sa); rdst->remote_ip = *src_ip; - f->updated = jiffies; vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); } else { u32 hash_index = fdb_head_index(vxlan, src_mac, vni); @@ -1664,7 +1671,6 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, return err <= 1; } -/* Callback from net/ipv4/udp.c to receive packets */ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) { struct vxlan_vni_node *vninode = NULL; @@ -1834,7 +1840,6 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) return 0; } -/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */ static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb) { struct vxlan_dev *vxlan; @@ -2852,7 +2857,7 @@ static void vxlan_cleanup(struct timer_list *t) if (f->flags & NTF_EXT_LEARNED) continue; - timeout = f->used + vxlan->cfg.age_interval * HZ; + timeout = READ_ONCE(f->updated) + vxlan->cfg.age_interval * HZ; if (time_before_eq(timeout, jiffies)) { netdev_dbg(vxlan->dev, "garbage collect %pM\n", @@ -3932,7 +3937,7 @@ static void vxlan_config_apply(struct net_device *dev, } static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, - struct vxlan_config *conf, bool changelink, + struct vxlan_config *conf, struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -3943,7 +3948,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, if (ret) return ret; - vxlan_config_apply(dev, conf, lowerdev, src_net, changelink); + vxlan_config_apply(dev, conf, lowerdev, src_net, false); return 0; } @@ -3961,7 +3966,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, int err; dst = &vxlan->default_dst; - err = vxlan_dev_configure(net, dev, conf, false, extack); + err = vxlan_dev_configure(net, dev, conf, extack); if (err) return err; @@ -4396,10 +4401,13 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], return 0; } -static int vxlan_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int vxlan_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct vxlan_config conf; int err; @@ -4407,7 +4415,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (err) return err; - return __vxlan_dev_create(src_net, dev, &conf, extack); + return __vxlan_dev_create(link_net, dev, &conf, extack); } static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], @@ -4415,6 +4423,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); + bool rem_ip_changed, change_igmp; struct net_device *lowerdev; struct vxlan_config conf; struct vxlan_rdst *dst; @@ -4438,8 +4447,13 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], if (err) return err; + rem_ip_changed = !vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip); + change_igmp = vxlan->dev->flags & IFF_UP && + (rem_ip_changed || + dst->remote_ifindex != conf.remote_ifindex); + /* handle default dst entry */ - if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) { + if (rem_ip_changed) { u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni); spin_lock_bh(&vxlan->hash_lock[hash_index]); @@ -4483,6 +4497,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], } } + if (change_igmp && vxlan_addr_multicast(&dst->remote_ip)) + err = vxlan_multicast_leave(vxlan); + if (conf.age_interval != vxlan->cfg.age_interval) mod_timer(&vxlan->age_timer, jiffies); @@ -4490,7 +4507,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], if (lowerdev && lowerdev != dst->remote_dev) dst->remote_dev = lowerdev; vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); - return 0; + + if (!err && change_igmp && + vxlan_addr_multicast(&dst->remote_ip)) + err = vxlan_multicast_join(vxlan); + + return err; } static void vxlan_dellink(struct net_device *dev, struct list_head *head) @@ -4768,7 +4790,7 @@ vxlan_fdb_offloaded_set(struct net_device *dev, spin_lock_bh(&vxlan->hash_lock[hash_index]); - f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); + f = __vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); if (!f) goto out; @@ -4824,7 +4846,7 @@ vxlan_fdb_external_learn_del(struct net_device *dev, hash_index = fdb_head_index(vxlan, fdb_info->eth_addr, fdb_info->vni); spin_lock_bh(&vxlan->hash_lock[hash_index]); - f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); + f = __vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); if (!f) err = -ENOENT; else if (f->flags & NTF_EXT_LEARNED) diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c index 6cf173a008e78..c496d35b266dd 100644 --- a/drivers/net/wireguard/device.c +++ b/drivers/net/wireguard/device.c @@ -307,14 +307,15 @@ static void wg_setup(struct net_device *dev) wg->dev = dev; } -static int wg_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int wg_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); struct wg_device *wg = netdev_priv(dev); int ret = -ENOMEM; - rcu_assign_pointer(wg->creating_net, src_net); + rcu_assign_pointer(wg->creating_net, link_net); init_rwsem(&wg->static_identity.lock); mutex_init(&wg->socket_update_lock); mutex_init(&wg->device_update_lock); diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index b3294287bce1f..6d336e39d6738 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1163,8 +1163,11 @@ int ath10k_core_check_dt(struct ath10k *ar) if (!node) return -ENOENT; - of_property_read_string(node, "qcom,ath10k-calibration-variant", + of_property_read_string(node, "qcom,calibration-variant", &variant); + if (!variant) + of_property_read_string(node, "qcom,ath10k-calibration-variant", + &variant); if (!variant) return -ENODATA; @@ -2259,7 +2262,9 @@ static int ath10k_core_pre_cal_download(struct ath10k *ar) "boot did not find a pre calibration file, try DT next: %d\n", ret); - ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data"); + ret = ath10k_download_cal_dt(ar, "qcom,pre-calibration-data"); + if (ret == -ENOENT) + ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data"); if (ret) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "unable to load pre cal data from DT: %d\n", ret); @@ -2337,7 +2342,9 @@ static int ath10k_download_cal_data(struct ath10k *ar) "boot did not find a calibration file, try DT next: %d\n", ret); - ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data"); + ret = ath10k_download_cal_dt(ar, "qcom,calibration-data"); + if (ret == -ENOENT) + ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data"); if (ret == 0) { ar->cal_mode = ATH10K_CAL_MODE_DT; goto done; diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index d436a874cd5af..52e0de8c30696 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -935,7 +935,7 @@ static int ath10k_snoc_hif_start(struct ath10k *ar) bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX); - dev_set_threaded(ar->napi_dev, true); + dev_set_threaded(ar->napi_dev, NETDEV_NAPI_THREADED_ENABLE); ath10k_core_napi_enable(ar); ath10k_snoc_irq_enable(ar); ath10k_snoc_rx_post(ar); diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile index 43d2d8ddcdc05..d9092414b362d 100644 --- a/drivers/net/wireless/ath/ath11k/Makefile +++ b/drivers/net/wireless/ath/ath11k/Makefile @@ -27,6 +27,7 @@ ath11k-$(CONFIG_ATH11K_TRACING) += trace.o ath11k-$(CONFIG_THERMAL) += thermal.o ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o ath11k-$(CONFIG_PM) += wow.o +ath11k-$(CONFIG_DEV_COREDUMP) += coredump.o obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o ath11k_ahb-y += ahb.o diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index f2fc04596d481..eedba3766ba24 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1290,6 +1290,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev) ath11k_core_deinit(ab); qmi_fail: + ath11k_fw_destroy(ab); ath11k_ahb_free_resources(ab); } @@ -1309,6 +1310,7 @@ static void ath11k_ahb_shutdown(struct platform_device *pdev) ath11k_core_deinit(ab); free_resources: + ath11k_fw_destroy(ab); ath11k_ahb_free_resources(ab); } diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index c576bbba52bf1..3d39ff85ba94a 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1175,8 +1175,11 @@ int ath11k_core_check_dt(struct ath11k_base *ab) if (!node) return -ENOENT; - of_property_read_string(node, "qcom,ath11k-calibration-variant", + of_property_read_string(node, "qcom,calibration-variant", &variant); + if (!variant) + of_property_read_string(node, "qcom,ath11k-calibration-variant", + &variant); if (!variant) return -ENODATA; @@ -2056,6 +2059,7 @@ void ath11k_core_halt(struct ath11k *ar) ath11k_mac_scan_finish(ar); ath11k_mac_peer_cleanup_all(ar); cancel_delayed_work_sync(&ar->scan.timeout); + cancel_work_sync(&ar->channel_update_work); cancel_work_sync(&ar->regd_update_work); cancel_work_sync(&ab->update_11d_work); @@ -2257,6 +2261,7 @@ static void ath11k_core_reset(struct work_struct *work) reinit_completion(&ab->recovery_start); atomic_set(&ab->recovery_start_count, 0); + ath11k_coredump_collect(ab); ath11k_core_pre_reconfigure_recovery(ab); reinit_completion(&ab->reconfigure_complete); @@ -2346,7 +2351,6 @@ void ath11k_core_deinit(struct ath11k_base *ab) ath11k_hif_power_down(ab); ath11k_mac_destroy(ab); ath11k_core_soc_destroy(ab); - ath11k_fw_destroy(ab); } EXPORT_SYMBOL(ath11k_core_deinit); @@ -2393,6 +2397,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size, INIT_WORK(&ab->restart_work, ath11k_core_restart); INIT_WORK(&ab->update_11d_work, ath11k_update_11d); INIT_WORK(&ab->reset_work, ath11k_core_reset); + INIT_WORK(&ab->dump_work, ath11k_coredump_upload); timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); init_completion(&ab->wow.wakeup_completed); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index a9dc7fe7765a1..1a3d0de4afde8 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_CORE_H @@ -32,6 +32,7 @@ #include "spectral.h" #include "wow.h" #include "fw.h" +#include "coredump.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -370,6 +371,7 @@ struct ath11k_vif { struct ieee80211_vif *vif; struct wmi_wmm_params_all_arg wmm_params; + struct wmi_wmm_params_all_arg muedca_params; struct list_head list; union { struct { @@ -685,7 +687,7 @@ struct ath11k { struct mutex conf_mutex; /* protects the radio specific data like debug stats, ppdu_stats_info stats, * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info, - * channel context data, survey info, test mode data. + * channel context data, survey info, test mode data, channel_update_queue. */ spinlock_t data_lock; @@ -743,6 +745,9 @@ struct ath11k { struct completion bss_survey_done; struct work_struct regd_update_work; + struct work_struct channel_update_work; + /* protected with data_lock */ + struct list_head channel_update_queue; struct work_struct wmi_mgmt_tx_work; struct sk_buff_head wmi_mgmt_tx_queue; @@ -900,6 +905,10 @@ struct ath11k_base { /* HW channel counters frequency value in hertz common to all MACs */ u32 cc_freq_hz; + struct ath11k_dump_file_data *dump_data; + size_t ath11k_coredump_len; + struct work_struct dump_work; + struct ath11k_htc htc; struct ath11k_dp dp; diff --git a/drivers/net/wireless/ath/ath11k/coredump.c b/drivers/net/wireless/ath/ath11k/coredump.c new file mode 100644 index 0000000000000..b8bad358cebec --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/coredump.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#include +#include "hif.h" +#include "coredump.h" +#include "debug.h" + +enum +ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type) +{ + enum ath11k_fw_crash_dump_type dump_type; + + switch (type) { + case HOST_DDR_REGION_TYPE: + dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA; + break; + case M3_DUMP_REGION_TYPE: + dump_type = FW_CRASH_DUMP_M3_DUMP; + break; + case PAGEABLE_MEM_REGION_TYPE: + dump_type = FW_CRASH_DUMP_PAGEABLE_DATA; + break; + case BDF_MEM_REGION_TYPE: + case CALDB_MEM_REGION_TYPE: + dump_type = FW_CRASH_DUMP_NONE; + break; + default: + dump_type = FW_CRASH_DUMP_TYPE_MAX; + break; + } + + return dump_type; +} +EXPORT_SYMBOL(ath11k_coredump_get_dump_type); + +void ath11k_coredump_upload(struct work_struct *work) +{ + struct ath11k_base *ab = container_of(work, struct ath11k_base, dump_work); + + ath11k_info(ab, "Uploading coredump\n"); + /* dev_coredumpv() takes ownership of the buffer */ + dev_coredumpv(ab->dev, ab->dump_data, ab->ath11k_coredump_len, GFP_KERNEL); + ab->dump_data = NULL; +} + +void ath11k_coredump_collect(struct ath11k_base *ab) +{ + ath11k_hif_coredump_download(ab); +} diff --git a/drivers/net/wireless/ath/ath11k/coredump.h b/drivers/net/wireless/ath/ath11k/coredump.h new file mode 100644 index 0000000000000..3960d93852617 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/coredump.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef _ATH11K_COREDUMP_H_ +#define _ATH11K_COREDUMP_H_ + +#define ATH11K_FW_CRASH_DUMP_V2 2 + +enum ath11k_fw_crash_dump_type { + FW_CRASH_DUMP_PAGING_DATA, + FW_CRASH_DUMP_RDDM_DATA, + FW_CRASH_DUMP_REMOTE_MEM_DATA, + FW_CRASH_DUMP_PAGEABLE_DATA, + FW_CRASH_DUMP_M3_DUMP, + FW_CRASH_DUMP_NONE, + + /* keep last */ + FW_CRASH_DUMP_TYPE_MAX, +}; + +#define COREDUMP_TLV_HDR_SIZE 8 + +struct ath11k_tlv_dump_data { + /* see ath11k_fw_crash_dump_type above */ + __le32 type; + + /* in bytes */ + __le32 tlv_len; + + /* pad to 32-bit boundaries as needed */ + u8 tlv_data[]; +} __packed; + +struct ath11k_dump_file_data { + /* "ATH11K-FW-DUMP" */ + char df_magic[16]; + /* total dump len in bytes */ + __le32 len; + /* file dump version */ + __le32 version; + /* pci device id */ + __le32 chip_id; + /* qrtr instance id */ + __le32 qrtr_id; + /* pci domain id */ + __le32 bus_id; + guid_t guid; + /* time-of-day stamp */ + __le64 tv_sec; + /* time-of-day stamp, nano-seconds */ + __le64 tv_nsec; + /* room for growth w/out changing binary format */ + u8 unused[128]; + u8 data[]; +} __packed; + +#ifdef CONFIG_DEV_COREDUMP +enum ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type); +void ath11k_coredump_upload(struct work_struct *work); +void ath11k_coredump_collect(struct ath11k_base *ab); +#else +static inline enum +ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type) +{ + return FW_CRASH_DUMP_TYPE_MAX; +} + +static inline void ath11k_coredump_upload(struct work_struct *work) +{ +} + +static inline void ath11k_coredump_collect(struct ath11k_base *ab) +{ +} +#endif + +#endif diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index fbf666d0ecf1d..f124b7329e1ac 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -104,14 +104,12 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) if (!ring->vaddr_unaligned) return; - if (ring->cached) { - dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size, - DMA_FROM_DEVICE); - kfree(ring->vaddr_unaligned); - } else { + if (ring->cached) + dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned, + ring->paddr_unaligned, DMA_FROM_DEVICE); + else dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned, ring->paddr_unaligned); - } ring->vaddr_unaligned = NULL; } @@ -249,25 +247,14 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, default: cached = false; } - - if (cached) { - ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL); - if (!ring->vaddr_unaligned) - return -ENOMEM; - - ring->paddr_unaligned = dma_map_single(ab->dev, - ring->vaddr_unaligned, - ring->size, - DMA_FROM_DEVICE); - if (dma_mapping_error(ab->dev, ring->paddr_unaligned)) { - kfree(ring->vaddr_unaligned); - ring->vaddr_unaligned = NULL; - return -ENOMEM; - } - } } - if (!cached) + if (cached) + ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size, + &ring->paddr_unaligned, + DMA_FROM_DEVICE, + GFP_KERNEL); + else ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size, &ring->paddr_unaligned, GFP_KERNEL); diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index f777314db8b36..7a55afd33be82 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_DP_H @@ -20,7 +20,6 @@ struct ath11k_ext_irq_grp; struct dp_rx_tid { u8 tid; - u32 *vaddr; dma_addr_t paddr; u32 size; u32 ba_win_sz; @@ -37,6 +36,9 @@ struct dp_rx_tid { /* Timer info related to fragments */ struct timer_list frag_timer; struct ath11k_base *ab; + u32 *vaddr_unaligned; + dma_addr_t paddr_unaligned; + u32 unaligned_size; }; #define DP_REO_DESC_FREE_THRESHOLD 64 diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 029ecf51c9efd..f2bdbac2a0b7f 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -675,11 +675,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) { list_del(&cmd->list); rx_tid = &cmd->data; - if (rx_tid->vaddr) { - dma_unmap_single(ab->dev, rx_tid->paddr, - rx_tid->size, DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } kfree(cmd); } @@ -689,11 +689,11 @@ void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab) list_del(&cmd_cache->list); dp->reo_cmd_cache_flush_count--; rx_tid = &cmd_cache->data; - if (rx_tid->vaddr) { - dma_unmap_single(ab->dev, rx_tid->paddr, - rx_tid->size, DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } kfree(cmd_cache); } @@ -708,11 +708,11 @@ static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx, if (status != HAL_REO_CMD_SUCCESS) ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n", rx_tid->tid, status); - if (rx_tid->vaddr) { - dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + if (rx_tid->vaddr_unaligned) { + dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } } @@ -749,10 +749,10 @@ static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab, if (ret) { ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n", rx_tid->tid, ret); - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } } @@ -802,10 +802,10 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, return; free_desc: - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } void ath11k_peer_rx_tid_delete(struct ath11k *ar, @@ -831,14 +831,16 @@ void ath11k_peer_rx_tid_delete(struct ath11k *ar, if (ret != -ESHUTDOWN) ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n", tid, ret); - dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size, + rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; } rx_tid->paddr = 0; + rx_tid->paddr_unaligned = 0; rx_tid->size = 0; + rx_tid->unaligned_size = 0; } static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab, @@ -982,10 +984,9 @@ static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab, if (!rx_tid->active) goto unlock_exit; - dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size, - DMA_BIDIRECTIONAL); - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; + dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned, + rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL); + rx_tid->vaddr_unaligned = NULL; rx_tid->active = false; @@ -1000,9 +1001,8 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, struct ath11k_base *ab = ar->ab; struct ath11k_peer *peer; struct dp_rx_tid *rx_tid; - u32 hw_desc_sz; - u32 *addr_aligned; - void *vaddr; + u32 hw_desc_sz, *vaddr; + void *vaddr_unaligned; dma_addr_t paddr; int ret; @@ -1050,49 +1050,40 @@ int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id, else hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid); - vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC); - if (!vaddr) { + rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1; + vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr, + DMA_BIDIRECTIONAL, GFP_ATOMIC); + if (!vaddr_unaligned) { spin_unlock_bh(&ab->base_lock); return -ENOMEM; } - addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN); - - ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz, - ssn, pn_type); - - paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz, - DMA_BIDIRECTIONAL); - - ret = dma_mapping_error(ab->dev, paddr); - if (ret) { - spin_unlock_bh(&ab->base_lock); - ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n", - peer_mac, tid, ret); - goto err_mem_free; - } - - rx_tid->vaddr = vaddr; - rx_tid->paddr = paddr; + rx_tid->vaddr_unaligned = vaddr_unaligned; + vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN); + rx_tid->paddr_unaligned = paddr; + rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr - + (unsigned long)rx_tid->vaddr_unaligned); + ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type); rx_tid->size = hw_desc_sz; rx_tid->active = true; + /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup. + * Since these changes are not reflected in the device, driver now needs to + * explicitly call dma_sync_single_for_device. + */ + dma_sync_single_for_device(ab->dev, rx_tid->paddr, + rx_tid->size, + DMA_TO_DEVICE); spin_unlock_bh(&ab->base_lock); - ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, - paddr, tid, 1, ba_win_sz); + ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr, + tid, 1, ba_win_sz); if (ret) { ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n", peer_mac, tid, ret); ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid); } - return ret; - -err_mem_free: - kfree(rx_tid->vaddr); - rx_tid->vaddr = NULL; - return ret; } @@ -2831,8 +2822,6 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta, rx_stats->dcm_count += ppdu_info->dcm; rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu; - arsta->rssi_comb = ppdu_info->rssi_comb; - BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) > ARRAY_SIZE(ppdu_info->rssi_chain_pri20)); @@ -4783,7 +4772,7 @@ u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id, if (!msdu) { ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "msdu_pop: invalid buf_id %d\n", buf_id); - break; + goto next_msdu; } rxcb = ATH11K_SKB_RXCB(msdu); if (!rxcb->unmapped) { @@ -5148,7 +5137,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data; const struct ath11k_hw_hal_params *hal_params; void *ring_entry; - void *mon_dst_srng; + struct hal_srng *mon_dst_srng; u32 ppdu_id; u32 rx_bufs_used; u32 ring_id; @@ -5165,6 +5154,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, spin_lock_bh(&pmon->mon_lock); + spin_lock_bh(&mon_dst_srng->lock); ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); ppdu_id = pmon->mon_ppdu_info.ppdu_id; @@ -5223,6 +5213,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id, mon_dst_srng); } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); + spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock); @@ -5410,7 +5401,7 @@ ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar, "full mon msdu_pop: invalid buf_id %d\n", buf_id); spin_unlock_bh(&rx_ring->idr_lock); - break; + goto next_msdu; } idr_remove(&rx_ring->bufs_idr, buf_id); spin_unlock_bh(&rx_ring->idr_lock); @@ -5612,7 +5603,7 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, struct hal_sw_mon_ring_entries *sw_mon_entries; struct ath11k_pdev_mon_stats *rx_mon_stats; struct sk_buff *head_msdu, *tail_msdu; - void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + struct hal_srng *mon_dst_srng; void *ring_entry; u32 rx_bufs_used = 0, mpdu_rx_bufs_used; int quota = 0, ret; @@ -5628,6 +5619,9 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, goto reap_status_ring; } + mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id]; + spin_lock_bh(&mon_dst_srng->lock); + ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng); while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) { head_msdu = NULL; @@ -5671,6 +5665,7 @@ static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id, } ath11k_hal_srng_access_end(ar->ab, mon_dst_srng); + spin_unlock_bh(&mon_dst_srng->lock); spin_unlock_bh(&pmon->mon_lock); if (rx_bufs_used) { diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c index 4e36292a79db8..cbbd8e57119f2 100644 --- a/drivers/net/wireless/ath/ath11k/fw.c +++ b/drivers/net/wireless/ath/ath11k/fw.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* - * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" @@ -166,3 +166,4 @@ void ath11k_fw_destroy(struct ath11k_base *ab) { release_firmware(ab->fw.fw); } +EXPORT_SYMBOL(ath11k_fw_destroy); diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h index 674ff772b181b..770c39ff99b45 100644 --- a/drivers/net/wireless/ath/ath11k/hif.h +++ b/drivers/net/wireless/ath/ath11k/hif.h @@ -31,6 +31,7 @@ struct ath11k_hif_ops { void (*ce_irq_enable)(struct ath11k_base *ab); void (*ce_irq_disable)(struct ath11k_base *ab); void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx); + void (*coredump_download)(struct ath11k_base *ab); }; static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab) @@ -146,4 +147,10 @@ static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, *msi_data_idx = ce_id; } +static inline void ath11k_hif_coredump_download(struct ath11k_base *ab) +{ + if (ab->hif.ops->coredump_download) + ab->hif.ops->coredump_download(ab); +} + #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 1556392f7ad48..97816916abac9 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1529,17 +1529,23 @@ static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif, return ret; } -static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif) +static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif) +{ + if (arvif->vif->mbssid_tx_vif) + return ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif); + + return NULL; +} + +static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif, + struct ath11k_vif *tx_arvif) { - struct ath11k_vif *tx_arvif; struct ieee80211_ema_beacons *beacons; int ret = 0; bool nontx_vif_params_set = false; u32 params = 0; u8 i = 0; - tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif); - beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw, tx_arvif->vif, 0); if (!beacons || !beacons->cnt) { @@ -1585,25 +1591,22 @@ static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif) return ret; } -static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif) +static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif, + struct ath11k_vif *tx_arvif) { struct ath11k *ar = arvif->ar; struct ath11k_base *ab = ar->ab; - struct ath11k_vif *tx_arvif = arvif; struct ieee80211_hw *hw = ar->hw; struct ieee80211_vif *vif = arvif->vif; struct ieee80211_mutable_offsets offs = {}; struct sk_buff *bcn; int ret; - if (vif->mbssid_tx_vif) { - tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif); - if (tx_arvif != arvif) { - ar = tx_arvif->ar; - ab = ar->ab; - hw = ar->hw; - vif = tx_arvif->vif; - } + if (tx_arvif != arvif) { + ar = tx_arvif->ar; + ab = ar->ab; + hw = ar->hw; + vif = tx_arvif->vif; } bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0); @@ -1632,6 +1635,7 @@ static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif) static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) { struct ieee80211_vif *vif = arvif->vif; + struct ath11k_vif *tx_arvif; if (arvif->vdev_type != WMI_VDEV_TYPE_AP) return 0; @@ -1639,14 +1643,18 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) /* Target does not expect beacon templates for the already up * non-transmitting interfaces, and results in a crash if sent. */ - if (vif->mbssid_tx_vif && - arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up) - return 0; + tx_arvif = ath11k_mac_get_tx_arvif(arvif); + if (tx_arvif) { + if (arvif != tx_arvif && arvif->is_up) + return 0; - if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif) - return ath11k_mac_setup_bcn_tmpl_ema(arvif); + if (vif->bss_conf.ema_ap) + return ath11k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif); + } else { + tx_arvif = arvif; + } - return ath11k_mac_setup_bcn_tmpl_mbssid(arvif); + return ath11k_mac_setup_bcn_tmpl_mbssid(arvif, tx_arvif); } void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif) @@ -1674,7 +1682,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif, struct ieee80211_bss_conf *info) { struct ath11k *ar = arvif->ar; - struct ath11k_vif *tx_arvif = NULL; + struct ath11k_vif *tx_arvif; int ret = 0; lockdep_assert_held(&arvif->ar->conf_mutex); @@ -1701,9 +1709,7 @@ static void ath11k_control_beaconing(struct ath11k_vif *arvif, ether_addr_copy(arvif->bssid, info->bssid); - if (arvif->vif->mbssid_tx_vif) - tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif); - + tx_arvif = ath11k_mac_get_tx_arvif(arvif); ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid, tx_arvif ? tx_arvif->bssid : NULL, @@ -5204,6 +5210,45 @@ static int ath11k_conf_tx_uapsd(struct ath11k *ar, struct ieee80211_vif *vif, return ret; } +static int ath11k_mac_op_conf_tx_mu_edca(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); + struct ath11k *ar = hw->priv; + struct wmi_wmm_params_arg *p; + int ret; + + switch (ac) { + case IEEE80211_AC_VO: + p = &arvif->muedca_params.ac_vo; + break; + case IEEE80211_AC_VI: + p = &arvif->muedca_params.ac_vi; + break; + case IEEE80211_AC_BE: + p = &arvif->muedca_params.ac_be; + break; + case IEEE80211_AC_BK: + p = &arvif->muedca_params.ac_bk; + break; + default: + ath11k_warn(ar->ab, "error ac: %d", ac); + return -EINVAL; + } + + p->cwmin = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(3, 0)); + p->cwmax = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(7, 4)); + p->aifs = u8_get_bits(params->mu_edca_param_rec.aifsn, GENMASK(3, 0)); + p->txop = params->mu_edca_param_rec.mu_edca_timer; + + ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id, + &arvif->muedca_params, + WMI_WMM_PARAM_TYPE_11AX_MU_EDCA); + return ret; +} + static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 ac, @@ -5242,12 +5287,22 @@ static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw, p->txop = params->txop; ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id, - &arvif->wmm_params); + &arvif->wmm_params, + WMI_WMM_PARAM_TYPE_LEGACY); if (ret) { ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret); goto exit; } + if (params->mu_edca) { + ret = ath11k_mac_op_conf_tx_mu_edca(hw, vif, link_id, ac, + params); + if (ret) { + ath11k_warn(ar->ab, "failed to set mu_edca params: %d\n", ret); + goto exit; + } + } + ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd); if (ret) @@ -5336,8 +5391,6 @@ static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif) if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; - if (nsts > (ar->num_rx_chains - 1)) - nsts = ar->num_rx_chains - 1; value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); } @@ -5421,9 +5474,6 @@ static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap) /* Enable Beamformee STS Field only if SU BF is enabled */ if (subfee) { - if (nsts > (ar->num_rx_chains - 1)) - nsts = ar->num_rx_chains - 1; - nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; *vht_cap |= nsts; @@ -6288,6 +6338,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend) { struct ath11k *ar = hw->priv; struct htt_ppdu_stats_info *ppdu_stats, *tmp; + struct scan_chan_list_params *params; int ret; ath11k_mac_drain_tx(ar); @@ -6303,6 +6354,7 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend) mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); + cancel_work_sync(&ar->channel_update_work); cancel_work_sync(&ar->regd_update_work); cancel_work_sync(&ar->ab->update_11d_work); @@ -6312,10 +6364,19 @@ static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend) } spin_lock_bh(&ar->data_lock); + list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { list_del(&ppdu_stats->list); kfree(ppdu_stats); } + + while ((params = list_first_entry_or_null(&ar->channel_update_queue, + struct scan_chan_list_params, + list))) { + list_del(¶ms->list); + kfree(params); + } + spin_unlock_bh(&ar->data_lock); rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL); @@ -6330,23 +6391,20 @@ static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif, { struct ath11k *ar = arvif->ar; struct ath11k_vif *tx_arvif; - struct ieee80211_vif *tx_vif; *tx_vdev_id = 0; - tx_vif = arvif->vif->mbssid_tx_vif; - if (!tx_vif) { + tx_arvif = ath11k_mac_get_tx_arvif(arvif); + if (!tx_arvif) { *flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP; return 0; } - tx_arvif = ath11k_vif_to_arvif(tx_vif); - if (arvif->vif->bss_conf.nontransmitted) { - if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) + if (ar->hw->wiphy != tx_arvif->ar->hw->wiphy) return -EINVAL; *flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP; - *tx_vdev_id = ath11k_vif_to_arvif(tx_vif)->vdev_id; + *tx_vdev_id = tx_arvif->vdev_id; } else if (tx_arvif == arvif) { *flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP; } else { @@ -7306,8 +7364,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar, int n_vifs) { struct ath11k_base *ab = ar->ab; - struct ath11k_vif *arvif, *tx_arvif = NULL; - struct ieee80211_vif *mbssid_tx_vif; + struct ath11k_vif *arvif, *tx_arvif; int ret; int i; bool monitor_vif = false; @@ -7361,10 +7418,7 @@ ath11k_mac_update_vif_chan(struct ath11k *ar, ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n", ret); - mbssid_tx_vif = arvif->vif->mbssid_tx_vif; - if (mbssid_tx_vif) - tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif); - + tx_arvif = ath11k_mac_get_tx_arvif(arvif); ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, arvif->bssid, tx_arvif ? tx_arvif->bssid : NULL, @@ -10019,6 +10073,7 @@ static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = { static void __ath11k_mac_unregister(struct ath11k *ar) { + cancel_work_sync(&ar->channel_update_work); cancel_work_sync(&ar->regd_update_work); ieee80211_unregister_hw(ar->hw); @@ -10418,6 +10473,8 @@ int ath11k_mac_allocate(struct ath11k_base *ab) init_completion(&ar->thermal.wmi_sync); INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work); + INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work); + INIT_LIST_HEAD(&ar->channel_update_queue); INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work); INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work); diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c index 6e45f464a429b..fc77eac83e953 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.c +++ b/drivers/net/wireless/ath/ath11k/mhi.c @@ -491,3 +491,8 @@ int ath11k_mhi_resume(struct ath11k_pci *ab_pci) return 0; } + +void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic) +{ + mhi_download_rddm_image(mhi_ctrl, in_panic); +} diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h index a682aad52fc51..651470091bd5a 100644 --- a/drivers/net/wireless/ath/ath11k/mhi.h +++ b/drivers/net/wireless/ath/ath11k/mhi.h @@ -26,5 +26,6 @@ void ath11k_mhi_clear_vector(struct ath11k_base *ab); int ath11k_mhi_suspend(struct ath11k_pci *ar_pci); int ath11k_mhi_resume(struct ath11k_pci *ar_pci); +void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic); #endif diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c index b93f04973ad79..412f4a134e4a8 100644 --- a/drivers/net/wireless/ath/ath11k/pci.c +++ b/drivers/net/wireless/ath/ath11k/pci.c @@ -1,13 +1,15 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include #include #include +#include +#include #include "pci.h" #include "core.h" @@ -610,6 +612,187 @@ static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci) PCI_EXP_LNKCTL_ASPMC); } +#ifdef CONFIG_DEV_COREDUMP +static int ath11k_pci_coredump_calculate_size(struct ath11k_base *ab, u32 *dump_seg_sz) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; + struct image_info *rddm_img, *fw_img; + struct ath11k_tlv_dump_data *dump_tlv; + enum ath11k_fw_crash_dump_type mem_type; + u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0; + struct ath11k_dump_file_data *file_data; + int i; + + rddm_img = mhi_ctrl->rddm_image; + if (!rddm_img) { + ath11k_err(ab, "No RDDM dump found\n"); + return 0; + } + + fw_img = mhi_ctrl->fbc_image; + + for (i = 0; i < fw_img->entries ; i++) { + if (!fw_img->mhi_buf[i].buf) + continue; + + paging_tlv_sz += fw_img->mhi_buf[i].len; + } + dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz; + + for (i = 0; i < rddm_img->entries; i++) { + if (!rddm_img->mhi_buf[i].buf) + continue; + + rddm_tlv_sz += rddm_img->mhi_buf[i].len; + } + dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz; + + for (i = 0; i < ab->qmi.mem_seg_count; i++) { + mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type); + + if (mem_type == FW_CRASH_DUMP_NONE) + continue; + + if (mem_type == FW_CRASH_DUMP_TYPE_MAX) { + ath11k_dbg(ab, ATH11K_DBG_PCI, + "target mem region type %d not supported", + ab->qmi.target_mem[i].type); + continue; + } + + if (!ab->qmi.target_mem[i].anyaddr) + continue; + + dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size; + } + + for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) { + if (!dump_seg_sz[i]) + continue; + + len += sizeof(*dump_tlv) + dump_seg_sz[i]; + } + + if (len) + len += sizeof(*file_data); + + return len; +} + +static void ath11k_pci_coredump_download(struct ath11k_base *ab) +{ + struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); + struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; + struct image_info *rddm_img, *fw_img; + struct timespec64 timestamp; + int i, len, mem_idx; + enum ath11k_fw_crash_dump_type mem_type; + struct ath11k_dump_file_data *file_data; + struct ath11k_tlv_dump_data *dump_tlv; + size_t hdr_len = sizeof(*file_data); + void *buf; + u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = { 0 }; + + ath11k_mhi_coredump(mhi_ctrl, false); + + len = ath11k_pci_coredump_calculate_size(ab, dump_seg_sz); + if (!len) { + ath11k_warn(ab, "No crash dump data found for devcoredump"); + return; + } + + rddm_img = mhi_ctrl->rddm_image; + fw_img = mhi_ctrl->fbc_image; + + /* dev_coredumpv() requires vmalloc data */ + buf = vzalloc(len); + if (!buf) + return; + + ab->dump_data = buf; + ab->ath11k_coredump_len = len; + file_data = ab->dump_data; + strscpy(file_data->df_magic, "ATH11K-FW-DUMP", sizeof(file_data->df_magic)); + file_data->len = cpu_to_le32(len); + file_data->version = cpu_to_le32(ATH11K_FW_CRASH_DUMP_V2); + file_data->chip_id = cpu_to_le32(ab_pci->dev_id); + file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id); + file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus)); + guid_gen(&file_data->guid); + ktime_get_real_ts64(×tamp); + file_data->tv_sec = cpu_to_le64(timestamp.tv_sec); + file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec); + buf += hdr_len; + dump_tlv = buf; + dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA); + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]); + buf += COREDUMP_TLV_HDR_SIZE; + + /* append all segments together as they are all part of a single contiguous + * block of memory + */ + for (i = 0; i < fw_img->entries ; i++) { + if (!fw_img->mhi_buf[i].buf) + continue; + + memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf, + fw_img->mhi_buf[i].len); + buf += fw_img->mhi_buf[i].len; + } + + dump_tlv = buf; + dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA); + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]); + buf += COREDUMP_TLV_HDR_SIZE; + + /* append all segments together as they are all part of a single contiguous + * block of memory + */ + for (i = 0; i < rddm_img->entries; i++) { + if (!rddm_img->mhi_buf[i].buf) + continue; + + memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf, + rddm_img->mhi_buf[i].len); + buf += rddm_img->mhi_buf[i].len; + } + + mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA; + for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) { + if (mem_idx == FW_CRASH_DUMP_NONE) + continue; + + for (i = 0; i < ab->qmi.mem_seg_count; i++) { + mem_type = ath11k_coredump_get_dump_type + (ab->qmi.target_mem[i].type); + + if (mem_type != mem_idx) + continue; + + if (!ab->qmi.target_mem[i].anyaddr) { + ath11k_dbg(ab, ATH11K_DBG_PCI, + "Skipping mem region type %d", + ab->qmi.target_mem[i].type); + continue; + } + + dump_tlv = buf; + dump_tlv->type = cpu_to_le32(mem_idx); + dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]); + buf += COREDUMP_TLV_HDR_SIZE; + + memcpy_fromio(buf, ab->qmi.target_mem[i].iaddr, + ab->qmi.target_mem[i].size); + + buf += ab->qmi.target_mem[i].size; + } + } + + queue_work(ab->workqueue, &ab->dump_work); +} +#endif + static int ath11k_pci_power_up(struct ath11k_base *ab) { struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); @@ -713,6 +896,9 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = { .ce_irq_enable = ath11k_pci_hif_ce_irq_enable, .ce_irq_disable = ath11k_pci_hif_ce_irq_disable, .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx, +#ifdef CONFIG_DEV_COREDUMP + .coredump_download = ath11k_pci_coredump_download, +#endif }; static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor) @@ -735,7 +921,7 @@ static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci, if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags)) return 0; - return irq_set_affinity_hint(ab_pci->pdev->irq, m); + return irq_set_affinity_and_hint(ab_pci->pdev->irq, m); } static int ath11k_pci_probe(struct pci_dev *pdev, @@ -939,6 +1125,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev, return 0; err_free_irq: + /* __free_irq() expects the caller to have cleared the affinity hint */ + ath11k_pci_set_irq_affinity_hint(ab_pci, NULL); ath11k_pcic_free_irq(ab); err_ce_free: @@ -981,9 +1169,12 @@ static void ath11k_pci_remove(struct pci_dev *pdev) set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); + cancel_work_sync(&ab->reset_work); + cancel_work_sync(&ab->dump_work); ath11k_core_deinit(ab); qmi_fail: + ath11k_fw_destroy(ab); ath11k_mhi_unregister(ab_pci); ath11k_pcic_free_irq(ab); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index 5759fc5213162..4f8b08ed1bbc6 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -1957,13 +1957,15 @@ static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab) int i; for (i = 0; i < ab->qmi.mem_seg_count; i++) { - if ((ab->hw_params.fixed_mem_region || - test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) && - ab->qmi.target_mem[i].iaddr) - iounmap(ab->qmi.target_mem[i].iaddr); + if (!ab->qmi.target_mem[i].anyaddr) + continue; - if (!ab->qmi.target_mem[i].vaddr) + if (ab->hw_params.fixed_mem_region || + test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { + iounmap(ab->qmi.target_mem[i].iaddr); + ab->qmi.target_mem[i].iaddr = NULL; continue; + } dma_free_coherent(ab->dev, ab->qmi.target_mem[i].prev_size, @@ -2070,7 +2072,7 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab) break; case BDF_MEM_REGION_TYPE: ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr; - ab->qmi.target_mem[idx].vaddr = NULL; + ab->qmi.target_mem[idx].iaddr = NULL; ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; idx++; @@ -2093,10 +2095,11 @@ static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab) } else { ab->qmi.target_mem[idx].paddr = ATH11K_QMI_CALDB_ADDRESS; + ab->qmi.target_mem[idx].iaddr = NULL; } } else { ab->qmi.target_mem[idx].paddr = 0; - ab->qmi.target_mem[idx].vaddr = NULL; + ab->qmi.target_mem[idx].iaddr = NULL; } ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size; ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type; diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index 7e06d100af575..7968ab122b65d 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_QMI_H @@ -102,8 +102,11 @@ struct target_mem_chunk { u32 prev_size; u32 prev_type; dma_addr_t paddr; - u32 *vaddr; - void __iomem *iaddr; + union { + u32 *vaddr; + void __iomem *iaddr; + void *anyaddr; + }; }; struct target_info { @@ -154,6 +157,7 @@ struct ath11k_qmi { #define BDF_MEM_REGION_TYPE 0x2 #define M3_DUMP_REGION_TYPE 0x3 #define CALDB_MEM_REGION_TYPE 0x4 +#define PAGEABLE_MEM_REGION_TYPE 0x9 struct qmi_wlanfw_host_cap_req_msg_v01 { u8 num_clients_valid; diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index b0f289784dd3a..d62a2014315a0 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -55,6 +55,19 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Regulatory Notification received for %s\n", wiphy_name(wiphy)); + if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) { + ath11k_dbg(ar->ab, ATH11K_DBG_REG, + "driver initiated regd update\n"); + if (ar->state != ATH11K_STATE_ON) + return; + + ret = ath11k_reg_update_chan_list(ar, true); + if (ret) + ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret); + + return; + } + /* Currently supporting only General User Hints. Cell base user * hints to be handled later. * Hints from other sources like Core, Beacons are not expected for @@ -111,32 +124,7 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait) struct channel_param *ch; enum nl80211_band band; int num_channels = 0; - int i, ret, left; - - if (wait && ar->state_11d != ATH11K_11D_IDLE) { - left = wait_for_completion_timeout(&ar->completed_11d_scan, - ATH11K_SCAN_TIMEOUT_HZ); - if (!left) { - ath11k_dbg(ar->ab, ATH11K_DBG_REG, - "failed to receive 11d scan complete: timed out\n"); - ar->state_11d = ATH11K_11D_IDLE; - } - ath11k_dbg(ar->ab, ATH11K_DBG_REG, - "11d scan wait left time %d\n", left); - } - - if (wait && - (ar->scan.state == ATH11K_SCAN_STARTING || - ar->scan.state == ATH11K_SCAN_RUNNING)) { - left = wait_for_completion_timeout(&ar->scan.completed, - ATH11K_SCAN_TIMEOUT_HZ); - if (!left) - ath11k_dbg(ar->ab, ATH11K_DBG_REG, - "failed to receive hw scan complete: timed out\n"); - - ath11k_dbg(ar->ab, ATH11K_DBG_REG, - "hw scan wait left time %d\n", left); - } + int i, ret = 0; if (ar->state == ATH11K_STATE_RESTARTING) return 0; @@ -218,6 +206,16 @@ int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait) } } + if (wait) { + spin_lock_bh(&ar->data_lock); + list_add_tail(¶ms->list, &ar->channel_update_queue); + spin_unlock_bh(&ar->data_lock); + + queue_work(ar->ab->workqueue, &ar->channel_update_work); + + return 0; + } + ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params); kfree(params); @@ -293,12 +291,6 @@ int ath11k_regd_update(struct ath11k *ar) if (ret) goto err; - if (ar->state == ATH11K_STATE_ON) { - ret = ath11k_reg_update_chan_list(ar, true); - if (ret) - goto err; - } - return 0; err: ath11k_warn(ab, "failed to perform regd update : %d\n", ret); @@ -804,6 +796,54 @@ ath11k_reg_build_regd(struct ath11k_base *ab, return new_regd; } +void ath11k_regd_update_chan_list_work(struct work_struct *work) +{ + struct ath11k *ar = container_of(work, struct ath11k, + channel_update_work); + struct scan_chan_list_params *params; + struct list_head local_update_list; + int left; + + INIT_LIST_HEAD(&local_update_list); + + spin_lock_bh(&ar->data_lock); + list_splice_tail_init(&ar->channel_update_queue, &local_update_list); + spin_unlock_bh(&ar->data_lock); + + while ((params = list_first_entry_or_null(&local_update_list, + struct scan_chan_list_params, + list))) { + if (ar->state_11d != ATH11K_11D_IDLE) { + left = wait_for_completion_timeout(&ar->completed_11d_scan, + ATH11K_SCAN_TIMEOUT_HZ); + if (!left) { + ath11k_dbg(ar->ab, ATH11K_DBG_REG, + "failed to receive 11d scan complete: timed out\n"); + ar->state_11d = ATH11K_11D_IDLE; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_REG, + "reg 11d scan wait left time %d\n", left); + } + + if ((ar->scan.state == ATH11K_SCAN_STARTING || + ar->scan.state == ATH11K_SCAN_RUNNING)) { + left = wait_for_completion_timeout(&ar->scan.completed, + ATH11K_SCAN_TIMEOUT_HZ); + if (!left) + ath11k_dbg(ar->ab, ATH11K_DBG_REG, + "failed to receive hw scan complete: timed out\n"); + + ath11k_dbg(ar->ab, ATH11K_DBG_REG, + "reg hw scan wait left time %d\n", left); + } + + ath11k_wmi_send_scan_chan_list_cmd(ar, params); + list_del(¶ms->list); + kfree(params); + } +} + static bool ath11k_reg_is_world_alpha(char *alpha) { if (alpha[0] == '0' && alpha[1] == '0') @@ -977,6 +1017,7 @@ void ath11k_regd_update_work(struct work_struct *work) void ath11k_reg_init(struct ath11k *ar) { ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED; + ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER; ar->hw->wiphy->reg_notifier = ath11k_reg_notifier; } diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h index 263ea90619483..72b4835940157 100644 --- a/drivers/net/wireless/ath/ath11k/reg.h +++ b/drivers/net/wireless/ath/ath11k/reg.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_REG_H @@ -33,6 +33,7 @@ void ath11k_reg_init(struct ath11k *ar); void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info); void ath11k_reg_free(struct ath11k_base *ab); void ath11k_regd_update_work(struct work_struct *work); +void ath11k_regd_update_chan_list_work(struct work_struct *work); struct ieee80211_regdomain * ath11k_reg_build_regd(struct ath11k_base *ab, struct cur_regulatory_info *reg_info, bool intersect, diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c index 302d66092b973..9be1cd742339c 100644 --- a/drivers/net/wireless/ath/ath11k/testmode.c +++ b/drivers/net/wireless/ath/ath11k/testmode.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "testmode.h" @@ -10,18 +10,18 @@ #include "wmi.h" #include "hw.h" #include "core.h" -#include "testmode_i.h" +#include "../testmode_i.h" #define ATH11K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0) #define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4) -static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = { - [ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 }, - [ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY, - .len = ATH11K_TM_DATA_MAX_LEN }, - [ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 }, - [ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 }, - [ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 }, +static const struct nla_policy ath11k_tm_policy[ATH_TM_ATTR_MAX + 1] = { + [ATH_TM_ATTR_CMD] = { .type = NLA_U32 }, + [ATH_TM_ATTR_DATA] = { .type = NLA_BINARY, + .len = ATH_TM_DATA_MAX_LEN }, + [ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 }, + [ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 }, + [ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 }, }; static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab) @@ -73,9 +73,9 @@ static void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id, goto out; } - if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI) || - nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) || - nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data)) { + if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) || + nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) || + nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) { ath11k_warn(ab, "failed to populate testmode unsegmented event\n"); kfree_skb(nl_skb); goto out; @@ -140,7 +140,7 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id, data_pos = ab->testmode.data_pos; - if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) { + if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) { ath11k_warn(ab, "Invalid ftm event length at %d: %d\n", data_pos, datalen); ret = -EINVAL; @@ -172,10 +172,10 @@ static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id, goto out; } - if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, - ATH11K_TM_CMD_WMI_FTM) || - nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) || - nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos, + if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, + ATH_TM_CMD_WMI_FTM) || + nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) || + nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos, &ab->testmode.eventdata[0])) { ath11k_warn(ab, "failed to populate segmented testmode event"); kfree_skb(nl_skb); @@ -235,23 +235,23 @@ static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[]) ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, "cmd get version_major %d version_minor %d\n", - ATH11K_TESTMODE_VERSION_MAJOR, - ATH11K_TESTMODE_VERSION_MINOR); + ATH_TESTMODE_VERSION_MAJOR, + ATH_TESTMODE_VERSION_MINOR); skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy, nla_total_size(sizeof(u32))); if (!skb) return -ENOMEM; - ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR, - ATH11K_TESTMODE_VERSION_MAJOR); + ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR, + ATH_TESTMODE_VERSION_MAJOR); if (ret) { kfree_skb(skb); return ret; } - ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR, - ATH11K_TESTMODE_VERSION_MINOR); + ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR, + ATH_TESTMODE_VERSION_MINOR); if (ret) { kfree_skb(skb); return ret; @@ -277,7 +277,7 @@ static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[]) goto err; } - ar->ab->testmode.eventdata = kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH, + ar->ab->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH, GFP_KERNEL); if (!ar->ab->testmode.eventdata) { ret = -ENOMEM; @@ -310,25 +310,25 @@ static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[], mutex_lock(&ar->conf_mutex); - if (!tb[ATH11K_TM_ATTR_DATA]) { + if (!tb[ATH_TM_ATTR_DATA]) { ret = -EINVAL; goto out; } - if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) { + if (!tb[ATH_TM_ATTR_WMI_CMDID]) { ret = -EINVAL; goto out; } - buf = nla_data(tb[ATH11K_TM_ATTR_DATA]); - buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]); + buf = nla_data(tb[ATH_TM_ATTR_DATA]); + buf_len = nla_len(tb[ATH_TM_ATTR_DATA]); if (!buf_len) { ath11k_warn(ar->ab, "No data present in testmode wmi command\n"); ret = -EINVAL; goto out; } - cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]); + cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]); /* Make sure that the buffer length is long enough to * hold TLV and pdev/vdev id. @@ -409,13 +409,13 @@ static int ath11k_tm_cmd_wmi_ftm(struct ath11k *ar, struct nlattr *tb[]) goto out; } - if (!tb[ATH11K_TM_ATTR_DATA]) { + if (!tb[ATH_TM_ATTR_DATA]) { ret = -EINVAL; goto out; } - buf = nla_data(tb[ATH11K_TM_ATTR_DATA]); - buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]); + buf = nla_data(tb[ATH_TM_ATTR_DATA]); + buf_len = nla_len(tb[ATH_TM_ATTR_DATA]); cmd_id = WMI_PDEV_UTF_CMDID; ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, @@ -476,25 +476,25 @@ int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len) { struct ath11k *ar = hw->priv; - struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1]; + struct nlattr *tb[ATH_TM_ATTR_MAX + 1]; int ret; - ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy, + ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath11k_tm_policy, NULL); if (ret) return ret; - if (!tb[ATH11K_TM_ATTR_CMD]) + if (!tb[ATH_TM_ATTR_CMD]) return -EINVAL; - switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) { - case ATH11K_TM_CMD_GET_VERSION: + switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) { + case ATH_TM_CMD_GET_VERSION: return ath11k_tm_cmd_get_version(ar, tb); - case ATH11K_TM_CMD_WMI: + case ATH_TM_CMD_WMI: return ath11k_tm_cmd_wmi(ar, tb, vif); - case ATH11K_TM_CMD_TESTMODE_START: + case ATH_TM_CMD_TESTMODE_START: return ath11k_tm_cmd_testmode_start(ar, tb); - case ATH11K_TM_CMD_WMI_FTM: + case ATH_TM_CMD_WMI_FTM: return ath11k_tm_cmd_wmi_ftm(ar, tb); default: return -EOPNOTSUPP; diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 87abfa5475295..d7f852bebf4aa 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include @@ -2662,7 +2662,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, } int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, - struct wmi_wmm_params_all_arg *param) + struct wmi_wmm_params_all_arg *param, + enum wmi_wmm_params_type wmm_param_type) { struct ath11k_pdev_wmi *wmi = ar->wmi; struct wmi_vdev_set_wmm_params_cmd *cmd; @@ -2681,7 +2682,7 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); cmd->vdev_id = vdev_id; - cmd->wmm_param_type = 0; + cmd->wmm_param_type = wmm_param_type; for (ac = 0; ac < WME_NUM_AC; ac++) { switch (ac) { @@ -2714,8 +2715,8 @@ int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, wmm_param->no_ack = wmi_wmm_arg->no_ack; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", - ac, wmm_param->aifs, wmm_param->cwmin, + "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n", + wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin, wmm_param->cwmax, wmm_param->txoplimit, wmm_param->acm, wmm_param->no_ack); } diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 8982b909c821e..9fcffaa2f383c 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_WMI_H @@ -3817,6 +3817,7 @@ struct wmi_stop_scan_cmd { }; struct scan_chan_list_params { + struct list_head list; u32 pdev_id; u16 nallchans; struct channel_param ch_param[]; @@ -6346,6 +6347,11 @@ enum wmi_sta_keepalive_method { #define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30 #define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 +enum wmi_wmm_params_type { + WMI_WMM_PARAM_TYPE_LEGACY = 0, + WMI_WMM_PARAM_TYPE_11AX_MU_EDCA = 1, +}; + const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, struct sk_buff *skb, gfp_t gfp); int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb, @@ -6402,7 +6408,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar, struct scan_cancel_param *param); int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, - struct wmi_wmm_params_all_arg *param); + struct wmi_wmm_params_all_arg *param, + enum wmi_wmm_params_type wmm_param_type); int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt, u32 pdev_id); int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id); diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile index b5bb3e2599cd0..60644cb42c76e 100644 --- a/drivers/net/wireless/ath/ath12k/Makefile +++ b/drivers/net/wireless/ath/ath12k/Makefile @@ -23,11 +23,12 @@ ath12k-y += core.o \ fw.o \ p2p.o -ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o +ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o ath12k-$(CONFIG_ACPI) += acpi.o ath12k-$(CONFIG_ATH12K_TRACING) += trace.o ath12k-$(CONFIG_PM) += wow.o ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o +ath12k-$(CONFIG_NL80211_TESTMODE) += testmode.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c index 0555d35aab477..d81367ce69294 100644 --- a/drivers/net/wireless/ath/ath12k/acpi.c +++ b/drivers/net/wireless/ath/ath12k/acpi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" @@ -12,7 +12,7 @@ static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func) { union acpi_object *obj; acpi_handle root_handle; - int ret; + int ret, i; root_handle = ACPI_HANDLE(ab->dev); if (!root_handle) { @@ -29,9 +29,48 @@ static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func) } if (obj->type == ACPI_TYPE_INTEGER) { - ab->acpi.func_bit = obj->integer.value; + switch (func) { + case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS: + ab->acpi.func_bit = obj->integer.value; + break; + case ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG: + ab->acpi.bit_flag = obj->integer.value; + break; + } + } else if (obj->type == ACPI_TYPE_STRING) { + switch (func) { + case ATH12K_ACPI_DSM_FUNC_BDF_EXT: + if (obj->string.length <= ATH12K_ACPI_BDF_ANCHOR_STRING_LEN || + obj->string.length > ATH12K_ACPI_BDF_MAX_LEN || + memcmp(obj->string.pointer, ATH12K_ACPI_BDF_ANCHOR_STRING, + ATH12K_ACPI_BDF_ANCHOR_STRING_LEN)) { + ath12k_warn(ab, "invalid ACPI DSM BDF size: %d\n", + obj->string.length); + ret = -EINVAL; + goto out; + } + + memcpy(ab->acpi.bdf_string, obj->string.pointer, + obj->buffer.length); + + break; + } } else if (obj->type == ACPI_TYPE_BUFFER) { switch (func) { + case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS: + if (obj->buffer.length < ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE || + obj->buffer.length > ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE) { + ath12k_warn(ab, "invalid ACPI DSM func size: %d\n", + obj->buffer.length); + ret = -EINVAL; + goto out; + } + + ab->acpi.func_bit = 0; + for (i = 0; i < obj->buffer.length; i++) + ab->acpi.func_bit += obj->buffer.pointer[i] << (i * 8); + + break; case ATH12K_ACPI_DSM_FUNC_TAS_CFG: if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) { ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n", @@ -247,24 +286,118 @@ static int ath12k_acpi_set_tas_params(struct ath12k_base *ab) return 0; } +bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab) +{ + return ab->acpi.acpi_disable_rfkill; +} + +bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab) +{ + return ab->acpi.acpi_disable_11be; +} + +void ath12k_acpi_set_dsm_func(struct ath12k_base *ab) +{ + int ret; + u8 *buf; + + if (!ab->hw_params->acpi_guid) + /* not supported with this hardware */ + return; + + if (ab->acpi.acpi_tas_enable) { + ret = ath12k_acpi_set_tas_params(ab); + if (ret) { + ath12k_warn(ab, "failed to send ACPI TAS parameters: %d\n", ret); + return; + } + } + + if (ab->acpi.acpi_bios_sar_enable) { + ret = ath12k_acpi_set_bios_sar_params(ab); + if (ret) { + ath12k_warn(ab, "failed to send ACPI BIOS SAR: %d\n", ret); + return; + } + } + + if (ab->acpi.acpi_cca_enable) { + buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET; + ret = ath12k_wmi_set_bios_cmd(ab, + WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE, + buf, + ATH12K_ACPI_CCA_THR_OFFSET_LEN); + if (ret) { + ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n", + ret); + return; + } + } + + if (ab->acpi.acpi_band_edge_enable) { + ret = ath12k_wmi_set_bios_cmd(ab, + WMI_BIOS_PARAM_TYPE_BANDEDGE, + ab->acpi.band_edge_power, + sizeof(ab->acpi.band_edge_power)); + if (ret) { + ath12k_warn(ab, + "failed to set ACPI DSM band edge channel power: %d\n", + ret); + return; + } + } +} + int ath12k_acpi_start(struct ath12k_base *ab) { acpi_status status; - u8 *buf; int ret; + ab->acpi.acpi_tas_enable = false; + ab->acpi.acpi_disable_11be = false; + ab->acpi.acpi_disable_rfkill = false; + ab->acpi.acpi_bios_sar_enable = false; + ab->acpi.acpi_cca_enable = false; + ab->acpi.acpi_band_edge_enable = false; + ab->acpi.acpi_enable_bdf = false; + ab->acpi.bdf_string[0] = '\0'; + if (!ab->hw_params->acpi_guid) /* not supported with this hardware */ return 0; - ab->acpi.acpi_tas_enable = false; - ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS); if (ret) { ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret); return ret; } + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG); + if (ret) { + ath12k_warn(ab, "failed to get ACPI DISABLE FLAG: %d\n", ret); + return ret; + } + + if (ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi, + ATH12K_ACPI_DSM_DISABLE_11BE_BIT)) + ab->acpi.acpi_disable_11be = true; + + if (!ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi, + ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT)) + ab->acpi.acpi_disable_rfkill = true; + } + + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BDF_EXT)) { + ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BDF_EXT); + if (ret || ab->acpi.bdf_string[0] == '\0') { + ath12k_warn(ab, "failed to get ACPI BDF EXT: %d\n", ret); + return ret; + } + + ab->acpi.acpi_enable_bdf = true; + } + if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) { ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG); if (ret) { @@ -308,20 +441,6 @@ int ath12k_acpi_start(struct ath12k_base *ab) ab->acpi.acpi_bios_sar_enable = true; } - if (ab->acpi.acpi_tas_enable) { - ret = ath12k_acpi_set_tas_params(ab); - if (ret) { - ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret); - return ret; - } - } - - if (ab->acpi.acpi_bios_sar_enable) { - ret = ath12k_acpi_set_bios_sar_params(ab); - if (ret) - return ret; - } - if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) { ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA); if (ret) { @@ -332,18 +451,8 @@ int ath12k_acpi_start(struct ath12k_base *ab) if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION && ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] == - ATH12K_ACPI_CCA_THR_ENABLE_FLAG) { - buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET; - ret = ath12k_wmi_set_bios_cmd(ab, - WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE, - buf, - ATH12K_ACPI_CCA_THR_OFFSET_LEN); - if (ret) { - ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n", - ret); - return ret; - } - } + ATH12K_ACPI_CCA_THR_ENABLE_FLAG) + ab->acpi.acpi_cca_enable = true; } if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, @@ -356,18 +465,8 @@ int ath12k_acpi_start(struct ath12k_base *ab) } if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION && - ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) { - ret = ath12k_wmi_set_bios_cmd(ab, - WMI_BIOS_PARAM_TYPE_BANDEDGE, - ab->acpi.band_edge_power, - sizeof(ab->acpi.band_edge_power)); - if (ret) { - ath12k_warn(ab, - "failed to set ACPI DSM band edge channel power: %d\n", - ret); - return ret; - } - } + ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) + ab->acpi.acpi_band_edge_enable = true; } status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev), @@ -383,6 +482,21 @@ int ath12k_acpi_start(struct ath12k_base *ab) return 0; } +int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab) +{ + size_t max_len = sizeof(ab->qmi.target.bdf_ext); + + if (!ab->acpi.acpi_enable_bdf) + return -ENODATA; + + if (strscpy(ab->qmi.target.bdf_ext, ab->acpi.bdf_string + 4, max_len) < 0) + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "acpi bdf variant longer than the buffer (variant: %s)\n", + ab->acpi.bdf_string); + + return 0; +} + void ath12k_acpi_stop(struct ath12k_base *ab) { if (!ab->acpi.started) diff --git a/drivers/net/wireless/ath/ath12k/acpi.h b/drivers/net/wireless/ath/ath12k/acpi.h index 39e003d86a48b..3a26fea6af1a9 100644 --- a/drivers/net/wireless/ath/ath12k/acpi.h +++ b/drivers/net/wireless/ath/ath12k/acpi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_ACPI_H #define ATH12K_ACPI_H @@ -9,6 +9,8 @@ #include #define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0 +#define ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG 2 +#define ATH12K_ACPI_DSM_FUNC_BDF_EXT 3 #define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4 #define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5 #define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6 @@ -16,6 +18,8 @@ #define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9 #define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10 +#define ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG BIT(1) +#define ATH12K_ACPI_FUNC_BIT_BDF_EXT BIT(2) #define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3) #define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4) #define ATH12K_ACPI_FUNC_BIT_CCA BIT(5) @@ -25,6 +29,7 @@ #define ATH12K_ACPI_NOTIFY_EVENT 0x86 #define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func)) +#define ATH12K_ACPI_CHEK_BIT_VALID(_acdata, _func) (((_acdata).bit_flag) & (_func)) #define ATH12K_ACPI_TAS_DATA_VERSION 0x1 #define ATH12K_ACPI_TAS_DATA_ENABLE 0x1 @@ -48,6 +53,16 @@ #define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100 #define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108 +#define ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE 1 +#define ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE 4 + +#define ATH12K_ACPI_DSM_DISABLE_11BE_BIT BIT(0) +#define ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT BIT(2) + +#define ATH12K_ACPI_BDF_ANCHOR_STRING_LEN 3 +#define ATH12K_ACPI_BDF_ANCHOR_STRING "BDF" +#define ATH12K_ACPI_BDF_MAX_LEN 100 + #define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \ ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN) #define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \ @@ -59,6 +74,10 @@ int ath12k_acpi_start(struct ath12k_base *ab); void ath12k_acpi_stop(struct ath12k_base *ab); +bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab); +bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab); +void ath12k_acpi_set_dsm_func(struct ath12k_base *ab); +int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab); #else @@ -71,6 +90,25 @@ static inline void ath12k_acpi_stop(struct ath12k_base *ab) { } +static inline bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab) +{ + return false; +} + +static inline bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab) +{ + return false; +} + +static inline void ath12k_acpi_set_dsm_func(struct ath12k_base *ab) +{ +} + +static inline int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab) +{ + return 0; +} + #endif /* CONFIG_ACPI */ #endif /* ATH12K_ACPI_H */ diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 0606116d6b9c4..0b2dec081c6ee 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -23,6 +23,10 @@ unsigned int ath12k_debug_mask; module_param_named(debug_mask, ath12k_debug_mask, uint, 0644); MODULE_PARM_DESC(debug_mask, "Debugging mask"); +bool ath12k_ftm_mode; +module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444); +MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode"); + /* protected with ath12k_hw_group_mutex */ static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list); @@ -36,6 +40,9 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab) if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL)) return 0; + if (ath12k_acpi_get_disable_rfkill(ab)) + return 0; + for (i = 0; i < ab->num_radios; i++) { ar = ab->pdevs[i].ar; @@ -173,7 +180,7 @@ EXPORT_SYMBOL(ath12k_core_resume); static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name, size_t name_len, bool with_variant, - bool bus_type_mode) + bool bus_type_mode, bool with_default) { /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */ char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 }; @@ -204,7 +211,9 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name, "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s", ath12k_bus_str(ab->hif.bus), ab->qmi.target.chip_id, - ab->qmi.target.board_id, variant); + with_default ? + ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id, + variant); break; } @@ -216,19 +225,19 @@ static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name, static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name, size_t name_len) { - return __ath12k_core_create_board_name(ab, name, name_len, true, false); + return __ath12k_core_create_board_name(ab, name, name_len, true, false, false); } static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name, size_t name_len) { - return __ath12k_core_create_board_name(ab, name, name_len, false, false); + return __ath12k_core_create_board_name(ab, name, name_len, false, false, true); } static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name, size_t name_len) { - return __ath12k_core_create_board_name(ab, name, name_len, false, true); + return __ath12k_core_create_board_name(ab, name, name_len, false, true, true); } const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, @@ -693,6 +702,11 @@ static int ath12k_core_soc_create(struct ath12k_base *ab) { int ret; + if (ath12k_ftm_mode) { + ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM; + ath12k_info(ab, "Booting in ftm mode\n"); + } + ret = ath12k_qmi_init_service(ab); if (ret) { ath12k_err(ab, "failed to initialize qmi :%d\n", ret); @@ -741,8 +755,7 @@ static void ath12k_core_pdev_destroy(struct ath12k_base *ab) ath12k_dp_pdev_free(ab); } -static int ath12k_core_start(struct ath12k_base *ab, - enum ath12k_firmware_mode mode) +static int ath12k_core_start(struct ath12k_base *ab) { int ret; @@ -836,10 +849,7 @@ static int ath12k_core_start(struct ath12k_base *ab, goto err_reo_cleanup; } - ret = ath12k_acpi_start(ab); - if (ret) - /* ACPI is optional so continue in case of an error */ - ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret); + ath12k_acpi_set_dsm_func(ab); if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) /* Indicate the core start in the appropriate group */ @@ -887,10 +897,41 @@ static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag) ath12k_mac_destroy(ag); } +u8 ath12k_get_num_partner_link(struct ath12k *ar) +{ + struct ath12k_base *partner_ab, *ab = ar->ab; + struct ath12k_hw_group *ag = ab->ag; + struct ath12k_pdev *pdev; + u8 num_link = 0; + int i, j; + + lockdep_assert_held(&ag->mutex); + + for (i = 0; i < ag->num_devices; i++) { + partner_ab = ag->ab[i]; + + for (j = 0; j < partner_ab->num_radios; j++) { + pdev = &partner_ab->pdevs[j]; + + /* Avoid the self link */ + if (ar == pdev->ar) + continue; + + num_link++; + } + } + + return num_link; +} + static int __ath12k_mac_mlo_ready(struct ath12k *ar) { + u8 num_link = ath12k_get_num_partner_link(ar); int ret; + if (num_link == 0) + return 0; + ret = ath12k_wmi_mlo_ready(ar); if (ret) { ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n", @@ -920,19 +961,18 @@ int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag) ar = &ah->radio[j]; ret = __ath12k_mac_mlo_ready(ar); if (ret) - goto out; + return ret; } } -out: - return ret; + return 0; } static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag) { int ret, i; - if (!ag->mlo_capable || ag->num_devices == 1) + if (!ag->mlo_capable) return 0; ret = ath12k_mac_mlo_setup(ag); @@ -1068,7 +1108,7 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab) struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab); int ret, i; - ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL); + ret = ath12k_core_start_firmware(ab, ab->fw_mode); if (ret) { ath12k_err(ab, "failed to start firmware: %d\n", ret); return ret; @@ -1089,7 +1129,7 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab) mutex_lock(&ag->mutex); mutex_lock(&ab->core_lock); - ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL); + ret = ath12k_core_start(ab); if (ret) { ath12k_err(ab, "failed to start core: %d\n", ret); goto err_dp_free; @@ -1122,16 +1162,18 @@ int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab) ath12k_core_stop(ab); mutex_unlock(&ab->core_lock); } + mutex_unlock(&ag->mutex); goto exit; err_dp_free: ath12k_dp_free(ab); mutex_unlock(&ab->core_lock); + mutex_unlock(&ag->mutex); + err_firmware_stop: ath12k_qmi_firmware_stop(ab); exit: - mutex_unlock(&ag->mutex); return ret; } @@ -1239,7 +1281,8 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab) for (i = 0; i < ag->num_hw; i++) { ah = ath12k_ag_to_ah(ag, i); - if (!ah || ah->state == ATH12K_HW_STATE_OFF) + if (!ah || ah->state == ATH12K_HW_STATE_OFF || + ah->state == ATH12K_HW_STATE_TM) continue; ieee80211_stop_queues(ah->hw); @@ -1309,6 +1352,9 @@ static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab) ath12k_warn(ab, "device is wedged, will not restart hw %d\n", i); break; + case ATH12K_HW_STATE_TM: + ath12k_warn(ab, "fw mode reset done radio %d\n", i); + break; } mutex_unlock(&ah->hw_mutex); @@ -1602,6 +1648,9 @@ static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *a lockdep_assert_held(&ath12k_hw_group_mutex); + if (ath12k_ftm_mode) + goto invalid_group; + /* The grouping of multiple devices will be done based on device tree file. * The platforms that do not have any valid group information would have * each device to be part of its own invalid group. @@ -1789,19 +1838,19 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag) struct ath12k_base *ab; int i; + if (ath12k_ftm_mode) + return; + lockdep_assert_held(&ag->mutex); /* If more than one devices are grouped, then inter MLO * functionality can work still independent of whether internally * each device supports single_chip_mlo or not. - * Only when there is one device, then it depends whether the - * device can support intra chip MLO or not + * Only when there is one device, then disable for WCN chipsets + * till the required driver implementation is in place. */ - if (ag->num_devices > 1) { - ag->mlo_capable = true; - } else { + if (ag->num_devices == 1) { ab = ag->ab[0]; - ag->mlo_capable = ab->single_chip_mlo_supp; /* WCN chipsets does not advertise in firmware features * hence skip checking @@ -1810,8 +1859,7 @@ void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag) return; } - if (!ag->mlo_capable) - return; + ag->mlo_capable = true; for (i = 0; i < ag->num_devices; i++) { ab = ag->ab[i]; @@ -1927,7 +1975,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, ab->dev = dev; ab->hif.bus = bus; ab->qmi.num_radios = U8_MAX; - ab->single_chip_mlo_supp = false; /* Device index used to identify the devices in a group. * diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index ee595794a7aee..3fac4f00d3832 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -15,6 +15,7 @@ #include #include #include +#include #include "qmi.h" #include "htc.h" #include "wmi.h" @@ -52,8 +53,6 @@ #define ATH12K_INVALID_HW_MAC_ID 0xFF #define ATH12K_CONNECTION_LOSS_HZ (3 * HZ) -#define ATH12K_RX_RATE_TABLE_NUM 320 -#define ATH12K_RX_RATE_TABLE_11AX_NUM 576 #define ATH12K_MON_TIMER_INTERVAL 10 #define ATH12K_RESET_TIMEOUT_HZ (20 * HZ) @@ -87,6 +86,7 @@ enum wme_ac { #define ATH12K_HT_MCS_MAX 7 #define ATH12K_VHT_MCS_MAX 9 #define ATH12K_HE_MCS_MAX 11 +#define ATH12K_EHT_MCS_MAX 15 enum ath12k_crypt_mode { /* Only use hardware crypto engine */ @@ -141,6 +141,7 @@ struct ath12k_skb_rxcb { u8 is_frag; u8 tid; u16 peer_id; + bool is_end_of_ppdu; }; enum ath12k_hw_rev { @@ -166,6 +167,7 @@ struct ath12k_ext_irq_grp { u32 num_irq; u32 grp_id; u64 timestamp; + bool napi_enabled; struct napi_struct napi; struct net_device *napi_ndev; }; @@ -235,6 +237,7 @@ enum ath12k_dev_flags { ATH12K_FLAG_CE_IRQ_ENABLED, ATH12K_FLAG_EXT_IRQ_ENABLED, ATH12K_FLAG_QMI_FW_READY_COMPLETE, + ATH12K_FLAG_FTM_SEGMENTED, }; struct ath12k_tx_conf { @@ -298,6 +301,8 @@ struct ath12k_link_vif { u8 link_id; struct ath12k_vif *ahvif; struct ath12k_rekey_data rekey_data; + + u8 current_cntdown_counter; }; struct ath12k_vif { @@ -327,6 +332,7 @@ struct ath12k_vif { u32 key_cipher; u8 tx_encap_type; bool ps; + atomic_t mcbc_gsn; struct ath12k_link_vif deflink; struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS]; @@ -354,20 +360,20 @@ struct ath12k_vif_iter { #define HAL_RX_MAX_MCS_HT 31 #define HAL_RX_MAX_MCS_VHT 9 #define HAL_RX_MAX_MCS_HE 11 +#define HAL_RX_MAX_MCS_BE 15 #define HAL_RX_MAX_NSS 8 #define HAL_RX_MAX_NUM_LEGACY_RATES 12 -#define ATH12K_RX_RATE_TABLE_11AX_NUM 576 -#define ATH12K_RX_RATE_TABLE_NUM 320 struct ath12k_rx_peer_rate_stats { u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1]; u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1]; u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1]; + u64 be_mcs_count[HAL_RX_MAX_MCS_BE + 1]; u64 nss_count[HAL_RX_MAX_NSS]; u64 bw_count[HAL_RX_BW_MAX]; u64 gi_count[HAL_RX_GI_MAX]; u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES]; - u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM]; + u64 rx_rate[HAL_RX_BW_MAX][HAL_RX_GI_MAX][HAL_RX_MAX_NSS][HAL_RX_MAX_MCS_HT + 1]; }; struct ath12k_rx_peer_stats { @@ -477,6 +483,8 @@ struct ath12k_wbm_tx_stats { u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX]; }; +DECLARE_EWMA(avg_rssi, 10, 8) + struct ath12k_link_sta { struct ath12k_link_vif *arvif; struct ath12k_sta *ahsta; @@ -496,10 +504,13 @@ struct ath12k_link_sta { u64 rx_duration; u64 tx_duration; u8 rssi_comb; + struct ewma_avg_rssi avg_rssi; u8 link_id; struct ath12k_rx_peer_stats *rx_stats; struct ath12k_wbm_tx_stats *wbm_tx_stats; u32 bw_prev; + u32 peer_nss; + s8 rssi_beacon; /* For now the assoc link will be considered primary */ bool is_assoc_link; @@ -534,18 +545,26 @@ enum ath12k_hw_state { ATH12K_HW_STATE_RESTARTING, ATH12K_HW_STATE_RESTARTED, ATH12K_HW_STATE_WEDGED, + ATH12K_HW_STATE_TM, /* Add other states as required */ }; /* Antenna noise floor */ #define ATH12K_DEFAULT_NOISE_FLOOR -95 +struct ath12k_ftm_event_obj { + u32 data_pos; + u32 expected_seq; + u8 *eventdata; +}; + struct ath12k_fw_stats { u32 pdev_id; u32 stats_id; struct list_head pdevs; struct list_head vdevs; struct list_head bcn; + bool fw_stats_done; }; struct ath12k_dbg_htt_stats { @@ -559,6 +578,12 @@ struct ath12k_debug { struct dentry *debugfs_pdev; struct dentry *debugfs_pdev_symlink; struct ath12k_dbg_htt_stats htt_stats; + enum wmi_halphy_ctrl_path_stats_id tpc_stats_type; + bool tpc_request; + struct completion tpc_complete; + struct wmi_tpc_stats_arg *tpc_stats; + u32 rx_filter; + bool extd_rx_stats; }; struct ath12k_per_peer_tx_stats { @@ -712,8 +737,12 @@ struct ath12k { bool nlo_enabled; + struct completion fw_stats_complete; + struct completion mlo_setup_done; u32 mlo_setup_status; + u8 ftm_msgref; + struct ath12k_fw_stats fw_stats; }; struct ath12k_hw { @@ -1026,9 +1055,6 @@ struct ath12k_base { const struct hal_rx_ops *hal_rx_ops; - /* Denotes the whether MLO is possible within the chip */ - bool single_chip_mlo_supp; - struct completion restart_completed; #ifdef CONFIG_ACPI @@ -1038,6 +1064,13 @@ struct ath12k_base { u32 func_bit; bool acpi_tas_enable; bool acpi_bios_sar_enable; + bool acpi_disable_11be; + bool acpi_disable_rfkill; + bool acpi_cca_enable; + bool acpi_band_edge_enable; + bool acpi_enable_bdf; + u32 bit_flag; + char bdf_string[ATH12K_ACPI_BDF_MAX_LEN]; u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE]; u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE]; u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE]; @@ -1052,6 +1085,8 @@ struct ath12k_base { struct ath12k_hw_group *ag; struct ath12k_wsi_info wsi_info; + enum ath12k_firmware_mode fw_mode; + struct ath12k_ftm_event_obj ftm_event_obj; /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); @@ -1062,6 +1097,93 @@ struct ath12k_pdev_map { u8 pdev_idx; }; +struct ath12k_fw_stats_vdev { + struct list_head list; + + u32 vdev_id; + u32 beacon_snr; + u32 data_snr; + u32 num_tx_frames[WLAN_MAX_AC]; + u32 num_rx_frames; + u32 num_tx_frames_retries[WLAN_MAX_AC]; + u32 num_tx_frames_failures[WLAN_MAX_AC]; + u32 num_rts_fail; + u32 num_rts_success; + u32 num_rx_err; + u32 num_rx_discard; + u32 num_tx_not_acked; + u32 tx_rate_history[MAX_TX_RATE_VALUES]; + u32 beacon_rssi_history[MAX_TX_RATE_VALUES]; +}; + +struct ath12k_fw_stats_bcn { + struct list_head list; + + u32 vdev_id; + u32 tx_bcn_succ_cnt; + u32 tx_bcn_outage_cnt; +}; + +struct ath12k_fw_stats_pdev { + struct list_head list; + + /* PDEV stats */ + s32 ch_noise_floor; + u32 tx_frame_count; + u32 rx_frame_count; + u32 rx_clear_count; + u32 cycle_count; + u32 phy_err_count; + u32 chan_tx_power; + u32 ack_rx_bad; + u32 rts_bad; + u32 rts_good; + u32 fcs_bad; + u32 no_beacons; + u32 mib_int_count; + + /* PDEV TX stats */ + s32 comp_queued; + s32 comp_delivered; + s32 msdu_enqued; + s32 mpdu_enqued; + s32 wmm_drop; + s32 local_enqued; + s32 local_freed; + s32 hw_queued; + s32 hw_reaped; + s32 underrun; + s32 tx_abort; + s32 mpdus_requed; + u32 tx_ko; + u32 data_rc; + u32 self_triggers; + u32 sw_retry_failure; + u32 illgl_rate_phy_err; + u32 pdev_cont_xretry; + u32 pdev_tx_timeout; + u32 pdev_resets; + u32 stateless_tid_alloc_failure; + u32 phy_underrun; + u32 txop_ovf; + + /* PDEV RX stats */ + s32 mid_ppdu_route_change; + s32 status_rcvd; + s32 r0_frags; + s32 r1_frags; + s32 r2_frags; + s32 r3_frags; + s32 htt_msdus; + s32 htt_mpdus; + s32 loc_msdus; + s32 loc_mpdus; + s32 oversize_amsdu; + s32 phy_errs; + s32 phy_err_drop; + s32 mpdu_errs; +}; + int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab); int ath12k_core_pre_init(struct ath12k_base *ab); int ath12k_core_init(struct ath12k_base *ath12k); @@ -1084,6 +1206,7 @@ int ath12k_core_resume(struct ath12k_base *ab); int ath12k_core_suspend(struct ath12k_base *ab); int ath12k_core_suspend_late(struct ath12k_base *ab); void ath12k_core_hw_group_unassign(struct ath12k_base *ab); +u8 ath12k_get_num_partner_link(struct ath12k *ar); const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab, const char *filename); diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c index ff6eaeafa092c..5ce100cd9a9d1 100644 --- a/drivers/net/wireless/ath/ath12k/debug.c +++ b/drivers/net/wireless/ath/ath12k/debug.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -63,8 +63,10 @@ void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask, vaf.fmt = fmt; vaf.va = &args; - if (ath12k_debug_mask & mask) + if (likely(ab)) dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf); + else + printk(KERN_DEBUG "ath12k: %pV", &vaf); /* TODO: trace log */ diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h index 90e801136bc65..48916e4e1f601 100644 --- a/drivers/net/wireless/ath/ath12k/debug.h +++ b/drivers/net/wireless/ath/ath12k/debug.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ATH12K_DEBUG_H_ @@ -37,6 +37,7 @@ __printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...); #define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__) extern unsigned int ath12k_debug_mask; +extern bool ath12k_ftm_mode; #ifdef CONFIG_ATH12K_DEBUG __printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab, @@ -61,11 +62,14 @@ static inline void ath12k_dbg_dump(struct ath12k_base *ab, } #endif /* CONFIG_ATH12K_DEBUG */ -#define ath12k_dbg(ar, dbg_mask, fmt, ...) \ +#define ath12k_dbg(ab, dbg_mask, fmt, ...) \ do { \ typeof(dbg_mask) mask = (dbg_mask); \ if (ath12k_debug_mask & mask) \ - __ath12k_dbg(ar, mask, fmt, ##__VA_ARGS__); \ + __ath12k_dbg(ab, mask, fmt, ##__VA_ARGS__); \ } while (0) +#define ath12k_generic_dbg(dbg_mask, fmt, ...) \ + ath12k_dbg(NULL, dbg_mask, fmt, ##__VA_ARGS__) + #endif /* _ATH12K_DEBUG_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c index d4b32d1a431c0..57002215ddf16 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.c +++ b/drivers/net/wireless/ath/ath12k/debugfs.c @@ -1,10 +1,12 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" +#include "dp_tx.h" +#include "debug.h" #include "debugfs.h" #include "debugfs_htt_stats.h" @@ -31,6 +33,806 @@ static const struct file_operations fops_simulate_radar = { .open = simple_open }; +static ssize_t ath12k_write_tpc_stats_type(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + u8 type; + int ret; + + ret = kstrtou8_from_user(user_buf, count, 0, &type); + if (ret) + return ret; + + if (type >= WMI_HALPHY_PDEV_TX_STATS_MAX) + return -EINVAL; + + spin_lock_bh(&ar->data_lock); + ar->debug.tpc_stats_type = type; + spin_unlock_bh(&ar->data_lock); + + return count; +} + +static int ath12k_debug_tpc_stats_request(struct ath12k *ar) +{ + enum wmi_halphy_ctrl_path_stats_id tpc_stats_sub_id; + struct ath12k_base *ab = ar->ab; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + reinit_completion(&ar->debug.tpc_complete); + + spin_lock_bh(&ar->data_lock); + ar->debug.tpc_request = true; + tpc_stats_sub_id = ar->debug.tpc_stats_type; + spin_unlock_bh(&ar->data_lock); + + ret = ath12k_wmi_send_tpc_stats_request(ar, tpc_stats_sub_id); + if (ret) { + ath12k_warn(ab, "failed to request pdev tpc stats: %d\n", ret); + spin_lock_bh(&ar->data_lock); + ar->debug.tpc_request = false; + spin_unlock_bh(&ar->data_lock); + return ret; + } + + return 0; +} + +static int ath12k_get_tpc_ctl_mode_idx(struct wmi_tpc_stats_arg *tpc_stats, + enum wmi_tpc_pream_bw pream_bw, int *mode_idx) +{ + u32 chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq); + u8 band; + + band = ((chan_freq > ATH12K_MIN_6G_FREQ) ? NL80211_BAND_6GHZ : + ((chan_freq > ATH12K_MIN_5G_FREQ) ? NL80211_BAND_5GHZ : + NL80211_BAND_2GHZ)); + + if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) { + switch (pream_bw) { + case WMI_TPC_PREAM_HT20: + case WMI_TPC_PREAM_VHT20: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ; + break; + case WMI_TPC_PREAM_HE20: + case WMI_TPC_PREAM_EHT20: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ; + break; + case WMI_TPC_PREAM_HT40: + case WMI_TPC_PREAM_VHT40: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ; + break; + case WMI_TPC_PREAM_HE40: + case WMI_TPC_PREAM_EHT40: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ; + break; + case WMI_TPC_PREAM_VHT80: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ; + break; + case WMI_TPC_PREAM_EHT60: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20; + break; + case WMI_TPC_PREAM_HE80: + case WMI_TPC_PREAM_EHT80: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ; + break; + case WMI_TPC_PREAM_VHT160: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ; + break; + case WMI_TPC_PREAM_EHT120: + case WMI_TPC_PREAM_EHT140: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20; + break; + case WMI_TPC_PREAM_HE160: + case WMI_TPC_PREAM_EHT160: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ; + break; + case WMI_TPC_PREAM_EHT200: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120; + break; + case WMI_TPC_PREAM_EHT240: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80; + break; + case WMI_TPC_PREAM_EHT280: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40; + break; + case WMI_TPC_PREAM_EHT320: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ; + break; + default: + /* for 5GHZ and 6GHZ, default case will be for OFDM */ + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ; + break; + } + } else { + switch (pream_bw) { + case WMI_TPC_PREAM_OFDM: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ; + break; + case WMI_TPC_PREAM_HT20: + case WMI_TPC_PREAM_VHT20: + case WMI_TPC_PREAM_HE20: + case WMI_TPC_PREAM_EHT20: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ; + break; + case WMI_TPC_PREAM_HT40: + case WMI_TPC_PREAM_VHT40: + case WMI_TPC_PREAM_HE40: + case WMI_TPC_PREAM_EHT40: + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ; + break; + default: + /* for 2GHZ, default case will be CCK */ + *mode_idx = ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ; + break; + } + } + + return 0; +} + +static s16 ath12k_tpc_get_rate(struct ath12k *ar, + struct wmi_tpc_stats_arg *tpc_stats, + u32 rate_idx, u32 num_chains, u32 rate_code, + enum wmi_tpc_pream_bw pream_bw, + enum wmi_halphy_ctrl_path_stats_id type, + u32 eht_rate_idx) +{ + u32 tot_nss, tot_modes, txbf_on_off, index_offset1, index_offset2, index_offset3; + u8 chain_idx, stm_idx, num_streams; + bool is_mu, txbf_enabled = 0; + s8 rates_ctl_min, tpc_ctl; + s16 rates, tpc, reg_pwr; + u16 rate1, rate2; + int mode, ret; + + num_streams = 1 + ATH12K_HW_NSS(rate_code); + chain_idx = num_chains - 1; + stm_idx = num_streams - 1; + mode = -1; + + ret = ath12k_get_tpc_ctl_mode_idx(tpc_stats, pream_bw, &mode); + if (ret) { + ath12k_warn(ar->ab, "Invalid mode index received\n"); + tpc = TPC_INVAL; + goto out; + } + + if (num_chains < num_streams) { + tpc = TPC_INVAL; + goto out; + } + + if (le32_to_cpu(tpc_stats->tpc_config.num_tx_chain) <= 1) { + tpc = TPC_INVAL; + goto out; + } + + if (type == WMI_HALPHY_PDEV_TX_SUTXBF_STATS || + type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) + txbf_enabled = 1; + + if (type == WMI_HALPHY_PDEV_TX_MU_STATS || + type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) { + is_mu = true; + } else { + is_mu = false; + } + + /* Below is the min calculation of ctl array, rates array and + * regulator power table. tpc is minimum of all 3 + */ + if (pream_bw >= WMI_TPC_PREAM_EHT20 && pream_bw <= WMI_TPC_PREAM_EHT320) { + rate2 = tpc_stats->rates_array2.rate_array[eht_rate_idx]; + if (is_mu) + rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_MU); + else + rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_SU); + } else { + rate1 = tpc_stats->rates_array1.rate_array[rate_idx]; + if (is_mu) + rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_MU); + else + rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_SU); + } + + if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) { + tot_nss = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d1); + tot_modes = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d2); + txbf_on_off = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d3); + index_offset1 = txbf_on_off * tot_modes * tot_nss; + index_offset2 = tot_modes * tot_nss; + index_offset3 = tot_nss; + + tpc_ctl = *(tpc_stats->ctl_array.ctl_pwr_table + + chain_idx * index_offset1 + txbf_enabled * index_offset2 + + mode * index_offset3 + stm_idx); + } else { + tpc_ctl = TPC_MAX; + ath12k_warn(ar->ab, + "ctl array for tpc stats not received from fw\n"); + } + + rates_ctl_min = min_t(s16, rates, tpc_ctl); + + reg_pwr = tpc_stats->max_reg_allowed_power.reg_pwr_array[chain_idx]; + + if (reg_pwr < 0) + reg_pwr = TPC_INVAL; + + tpc = min_t(s16, rates_ctl_min, reg_pwr); + + /* MODULATION_LIMIT is the maximum power limit,tpc should not exceed + * modulation limit even if min tpc of all three array is greater + * modulation limit + */ + tpc = min_t(s16, tpc, MODULATION_LIMIT); + +out: + return tpc; +} + +static u16 ath12k_get_ratecode(u16 pream_idx, u16 nss, u16 mcs_rate) +{ + u16 mode_type = ~0; + + /* Below assignments are just for printing purpose only */ + switch (pream_idx) { + case WMI_TPC_PREAM_CCK: + mode_type = WMI_RATE_PREAMBLE_CCK; + break; + case WMI_TPC_PREAM_OFDM: + mode_type = WMI_RATE_PREAMBLE_OFDM; + break; + case WMI_TPC_PREAM_HT20: + case WMI_TPC_PREAM_HT40: + mode_type = WMI_RATE_PREAMBLE_HT; + break; + case WMI_TPC_PREAM_VHT20: + case WMI_TPC_PREAM_VHT40: + case WMI_TPC_PREAM_VHT80: + case WMI_TPC_PREAM_VHT160: + mode_type = WMI_RATE_PREAMBLE_VHT; + break; + case WMI_TPC_PREAM_HE20: + case WMI_TPC_PREAM_HE40: + case WMI_TPC_PREAM_HE80: + case WMI_TPC_PREAM_HE160: + mode_type = WMI_RATE_PREAMBLE_HE; + break; + case WMI_TPC_PREAM_EHT20: + case WMI_TPC_PREAM_EHT40: + case WMI_TPC_PREAM_EHT60: + case WMI_TPC_PREAM_EHT80: + case WMI_TPC_PREAM_EHT120: + case WMI_TPC_PREAM_EHT140: + case WMI_TPC_PREAM_EHT160: + case WMI_TPC_PREAM_EHT200: + case WMI_TPC_PREAM_EHT240: + case WMI_TPC_PREAM_EHT280: + case WMI_TPC_PREAM_EHT320: + mode_type = WMI_RATE_PREAMBLE_EHT; + if (mcs_rate == 0 || mcs_rate == 1) + mcs_rate += 14; + else + mcs_rate -= 2; + break; + default: + return mode_type; + } + return ((mode_type << 8) | ((nss & 0x7) << 5) | (mcs_rate & 0x1F)); +} + +static bool ath12k_he_supports_extra_mcs(struct ath12k *ar, int freq) +{ + struct ath12k_pdev_cap *cap = &ar->pdev->cap; + struct ath12k_band_cap *cap_band; + bool extra_mcs_supported; + + if (freq <= ATH12K_2GHZ_MAX_FREQUENCY) + cap_band = &cap->band[NL80211_BAND_2GHZ]; + else if (freq <= ATH12K_5GHZ_MAX_FREQUENCY) + cap_band = &cap->band[NL80211_BAND_5GHZ]; + else + cap_band = &cap->band[NL80211_BAND_6GHZ]; + + extra_mcs_supported = u32_get_bits(cap_band->he_cap_info[1], + HE_EXTRA_MCS_SUPPORT); + return extra_mcs_supported; +} + +static int ath12k_tpc_fill_pream(struct ath12k *ar, char *buf, int buf_len, int len, + enum wmi_tpc_pream_bw pream_bw, u32 max_rix, + int max_nss, int max_rates, int pream_type, + enum wmi_halphy_ctrl_path_stats_id tpc_type, + int rate_idx, int eht_rate_idx) +{ + struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats; + int nss, rates, chains; + u8 active_tx_chains; + u16 rate_code; + s16 tpc; + + static const char *const pream_str[] = { + [WMI_TPC_PREAM_CCK] = "CCK", + [WMI_TPC_PREAM_OFDM] = "OFDM", + [WMI_TPC_PREAM_HT20] = "HT20", + [WMI_TPC_PREAM_HT40] = "HT40", + [WMI_TPC_PREAM_VHT20] = "VHT20", + [WMI_TPC_PREAM_VHT40] = "VHT40", + [WMI_TPC_PREAM_VHT80] = "VHT80", + [WMI_TPC_PREAM_VHT160] = "VHT160", + [WMI_TPC_PREAM_HE20] = "HE20", + [WMI_TPC_PREAM_HE40] = "HE40", + [WMI_TPC_PREAM_HE80] = "HE80", + [WMI_TPC_PREAM_HE160] = "HE160", + [WMI_TPC_PREAM_EHT20] = "EHT20", + [WMI_TPC_PREAM_EHT40] = "EHT40", + [WMI_TPC_PREAM_EHT60] = "EHT60", + [WMI_TPC_PREAM_EHT80] = "EHT80", + [WMI_TPC_PREAM_EHT120] = "EHT120", + [WMI_TPC_PREAM_EHT140] = "EHT140", + [WMI_TPC_PREAM_EHT160] = "EHT160", + [WMI_TPC_PREAM_EHT200] = "EHT200", + [WMI_TPC_PREAM_EHT240] = "EHT240", + [WMI_TPC_PREAM_EHT280] = "EHT280", + [WMI_TPC_PREAM_EHT320] = "EHT320"}; + + active_tx_chains = ar->num_tx_chains; + + for (nss = 0; nss < max_nss; nss++) { + for (rates = 0; rates < max_rates; rates++, rate_idx++, max_rix++) { + /* FW send extra MCS(10&11) for VHT and HE rates, + * this is not used. Hence skipping it here + */ + if (pream_type == WMI_RATE_PREAMBLE_VHT && + rates > ATH12K_VHT_MCS_MAX) + continue; + + if (pream_type == WMI_RATE_PREAMBLE_HE && + rates > ATH12K_HE_MCS_MAX) + continue; + + if (pream_type == WMI_RATE_PREAMBLE_EHT && + rates > ATH12K_EHT_MCS_MAX) + continue; + + rate_code = ath12k_get_ratecode(pream_bw, nss, rates); + len += scnprintf(buf + len, buf_len - len, + "%d\t %s\t 0x%03x\t", max_rix, + pream_str[pream_bw], rate_code); + + for (chains = 0; chains < active_tx_chains; chains++) { + if (nss > chains) { + len += scnprintf(buf + len, + buf_len - len, + "\t%s", "NA"); + } else { + tpc = ath12k_tpc_get_rate(ar, tpc_stats, + rate_idx, chains + 1, + rate_code, pream_bw, + tpc_type, + eht_rate_idx); + + if (tpc == TPC_INVAL) { + len += scnprintf(buf + len, + buf_len - len, "\tNA"); + } else { + len += scnprintf(buf + len, + buf_len - len, "\t%d", + tpc); + } + } + } + len += scnprintf(buf + len, buf_len - len, "\n"); + + if (pream_type == WMI_RATE_PREAMBLE_EHT) + /*For fetching the next eht rates pwr from rates array2*/ + ++eht_rate_idx; + } + } + + return len; +} + +static int ath12k_tpc_stats_print(struct ath12k *ar, + struct wmi_tpc_stats_arg *tpc_stats, + char *buf, size_t len, + enum wmi_halphy_ctrl_path_stats_id type) +{ + u32 eht_idx = 0, pream_idx = 0, rate_pream_idx = 0, total_rates = 0, max_rix = 0; + u32 chan_freq, num_tx_chain, caps, i, j = 1; + size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE; + u8 nss, active_tx_chains; + bool he_ext_mcs; + static const char *const type_str[WMI_HALPHY_PDEV_TX_STATS_MAX] = { + [WMI_HALPHY_PDEV_TX_SU_STATS] = "SU", + [WMI_HALPHY_PDEV_TX_SUTXBF_STATS] = "SU WITH TXBF", + [WMI_HALPHY_PDEV_TX_MU_STATS] = "MU", + [WMI_HALPHY_PDEV_TX_MUTXBF_STATS] = "MU WITH TXBF"}; + + u8 max_rates[WMI_TPC_PREAM_MAX] = { + [WMI_TPC_PREAM_CCK] = ATH12K_CCK_RATES, + [WMI_TPC_PREAM_OFDM] = ATH12K_OFDM_RATES, + [WMI_TPC_PREAM_HT20] = ATH12K_HT_RATES, + [WMI_TPC_PREAM_HT40] = ATH12K_HT_RATES, + [WMI_TPC_PREAM_VHT20] = ATH12K_VHT_RATES, + [WMI_TPC_PREAM_VHT40] = ATH12K_VHT_RATES, + [WMI_TPC_PREAM_VHT80] = ATH12K_VHT_RATES, + [WMI_TPC_PREAM_VHT160] = ATH12K_VHT_RATES, + [WMI_TPC_PREAM_HE20] = ATH12K_HE_RATES, + [WMI_TPC_PREAM_HE40] = ATH12K_HE_RATES, + [WMI_TPC_PREAM_HE80] = ATH12K_HE_RATES, + [WMI_TPC_PREAM_HE160] = ATH12K_HE_RATES, + [WMI_TPC_PREAM_EHT20] = ATH12K_EHT_RATES, + [WMI_TPC_PREAM_EHT40] = ATH12K_EHT_RATES, + [WMI_TPC_PREAM_EHT60] = ATH12K_EHT_RATES, + [WMI_TPC_PREAM_EHT80] = ATH12K_EHT_RATES, + [WMI_TPC_PREAM_EHT120] = ATH12K_EHT_RATES, + [WMI_TPC_PREAM_EHT140] = ATH12K_EHT_RATES, + [WMI_TPC_PREAM_EHT160] = ATH12K_EHT_RATES, + [WMI_TPC_PREAM_EHT200] = ATH12K_EHT_RATES, + [WMI_TPC_PREAM_EHT240] = ATH12K_EHT_RATES, + [WMI_TPC_PREAM_EHT280] = ATH12K_EHT_RATES, + [WMI_TPC_PREAM_EHT320] = ATH12K_EHT_RATES}; + static const u8 max_nss[WMI_TPC_PREAM_MAX] = { + [WMI_TPC_PREAM_CCK] = ATH12K_NSS_1, + [WMI_TPC_PREAM_OFDM] = ATH12K_NSS_1, + [WMI_TPC_PREAM_HT20] = ATH12K_NSS_4, + [WMI_TPC_PREAM_HT40] = ATH12K_NSS_4, + [WMI_TPC_PREAM_VHT20] = ATH12K_NSS_8, + [WMI_TPC_PREAM_VHT40] = ATH12K_NSS_8, + [WMI_TPC_PREAM_VHT80] = ATH12K_NSS_8, + [WMI_TPC_PREAM_VHT160] = ATH12K_NSS_4, + [WMI_TPC_PREAM_HE20] = ATH12K_NSS_8, + [WMI_TPC_PREAM_HE40] = ATH12K_NSS_8, + [WMI_TPC_PREAM_HE80] = ATH12K_NSS_8, + [WMI_TPC_PREAM_HE160] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT20] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT40] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT60] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT80] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT120] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT140] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT160] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT200] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT240] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT280] = ATH12K_NSS_4, + [WMI_TPC_PREAM_EHT320] = ATH12K_NSS_4}; + + u16 rate_idx[WMI_TPC_PREAM_MAX] = {}, eht_rate_idx[WMI_TPC_PREAM_MAX] = {}; + static const u8 pream_type[WMI_TPC_PREAM_MAX] = { + [WMI_TPC_PREAM_CCK] = WMI_RATE_PREAMBLE_CCK, + [WMI_TPC_PREAM_OFDM] = WMI_RATE_PREAMBLE_OFDM, + [WMI_TPC_PREAM_HT20] = WMI_RATE_PREAMBLE_HT, + [WMI_TPC_PREAM_HT40] = WMI_RATE_PREAMBLE_HT, + [WMI_TPC_PREAM_VHT20] = WMI_RATE_PREAMBLE_VHT, + [WMI_TPC_PREAM_VHT40] = WMI_RATE_PREAMBLE_VHT, + [WMI_TPC_PREAM_VHT80] = WMI_RATE_PREAMBLE_VHT, + [WMI_TPC_PREAM_VHT160] = WMI_RATE_PREAMBLE_VHT, + [WMI_TPC_PREAM_HE20] = WMI_RATE_PREAMBLE_HE, + [WMI_TPC_PREAM_HE40] = WMI_RATE_PREAMBLE_HE, + [WMI_TPC_PREAM_HE80] = WMI_RATE_PREAMBLE_HE, + [WMI_TPC_PREAM_HE160] = WMI_RATE_PREAMBLE_HE, + [WMI_TPC_PREAM_EHT20] = WMI_RATE_PREAMBLE_EHT, + [WMI_TPC_PREAM_EHT40] = WMI_RATE_PREAMBLE_EHT, + [WMI_TPC_PREAM_EHT60] = WMI_RATE_PREAMBLE_EHT, + [WMI_TPC_PREAM_EHT80] = WMI_RATE_PREAMBLE_EHT, + [WMI_TPC_PREAM_EHT120] = WMI_RATE_PREAMBLE_EHT, + [WMI_TPC_PREAM_EHT140] = WMI_RATE_PREAMBLE_EHT, + [WMI_TPC_PREAM_EHT160] = WMI_RATE_PREAMBLE_EHT, + [WMI_TPC_PREAM_EHT200] = WMI_RATE_PREAMBLE_EHT, + [WMI_TPC_PREAM_EHT240] = WMI_RATE_PREAMBLE_EHT, + [WMI_TPC_PREAM_EHT280] = WMI_RATE_PREAMBLE_EHT, + [WMI_TPC_PREAM_EHT320] = WMI_RATE_PREAMBLE_EHT}; + + chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq); + num_tx_chain = le32_to_cpu(tpc_stats->tpc_config.num_tx_chain); + caps = le32_to_cpu(tpc_stats->tpc_config.caps); + + active_tx_chains = ar->num_tx_chains; + he_ext_mcs = ath12k_he_supports_extra_mcs(ar, chan_freq); + + /* mcs 12&13 is sent by FW for certain HWs in rate array, skipping it as + * it is not supported + */ + if (he_ext_mcs) { + for (i = WMI_TPC_PREAM_HE20; i <= WMI_TPC_PREAM_HE160; ++i) + max_rates[i] = ATH12K_HE_RATES; + } + + if (type == WMI_HALPHY_PDEV_TX_MU_STATS || + type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) { + pream_idx = WMI_TPC_PREAM_VHT20; + + for (i = WMI_TPC_PREAM_CCK; i <= WMI_TPC_PREAM_HT40; ++i) + max_rix += max_nss[i] * max_rates[i]; + } + /* Enumerate all the rate indices */ + for (i = rate_pream_idx + 1; i < WMI_TPC_PREAM_MAX; i++) { + nss = (max_nss[i - 1] < num_tx_chain ? + max_nss[i - 1] : num_tx_chain); + + rate_idx[i] = rate_idx[i - 1] + max_rates[i - 1] * nss; + + if (pream_type[i] == WMI_RATE_PREAMBLE_EHT) { + eht_rate_idx[j] = eht_rate_idx[j - 1] + max_rates[i] * nss; + ++j; + } + } + + for (i = 0; i < WMI_TPC_PREAM_MAX; i++) { + nss = (max_nss[i] < num_tx_chain ? + max_nss[i] : num_tx_chain); + total_rates += max_rates[i] * nss; + } + + len += scnprintf(buf + len, buf_len - len, + "No.of rates-%d\n", total_rates); + + len += scnprintf(buf + len, buf_len - len, + "**************** %s ****************\n", + type_str[type]); + len += scnprintf(buf + len, buf_len - len, + "\t\t\t\tTPC values for Active chains\n"); + len += scnprintf(buf + len, buf_len - len, + "Rate idx Preamble Rate code"); + + for (i = 1; i <= active_tx_chains; ++i) { + len += scnprintf(buf + len, buf_len - len, + "\t%d-Chain", i); + } + + len += scnprintf(buf + len, buf_len - len, "\n"); + for (i = pream_idx; i < WMI_TPC_PREAM_MAX; i++) { + if (chan_freq <= 2483) { + if (i == WMI_TPC_PREAM_VHT80 || + i == WMI_TPC_PREAM_VHT160 || + i == WMI_TPC_PREAM_HE80 || + i == WMI_TPC_PREAM_HE160 || + (i >= WMI_TPC_PREAM_EHT60 && + i <= WMI_TPC_PREAM_EHT320)) { + max_rix += max_nss[i] * max_rates[i]; + continue; + } + } else { + if (i == WMI_TPC_PREAM_CCK) { + max_rix += max_rates[i]; + continue; + } + } + + nss = (max_nss[i] < ar->num_tx_chains ? max_nss[i] : ar->num_tx_chains); + + if (!(caps & + (1 << ATH12K_TPC_STATS_SUPPORT_BE_PUNC))) { + if (i == WMI_TPC_PREAM_EHT60 || i == WMI_TPC_PREAM_EHT120 || + i == WMI_TPC_PREAM_EHT140 || i == WMI_TPC_PREAM_EHT200 || + i == WMI_TPC_PREAM_EHT240 || i == WMI_TPC_PREAM_EHT280) { + max_rix += max_nss[i] * max_rates[i]; + continue; + } + } + + len = ath12k_tpc_fill_pream(ar, buf, buf_len, len, i, max_rix, nss, + max_rates[i], pream_type[i], + type, rate_idx[i], eht_rate_idx[eht_idx]); + + if (pream_type[i] == WMI_RATE_PREAMBLE_EHT) + /*For fetch the next index eht rates from rates array2*/ + ++eht_idx; + + max_rix += max_nss[i] * max_rates[i]; + } + return len; +} + +static void ath12k_tpc_stats_fill(struct ath12k *ar, + struct wmi_tpc_stats_arg *tpc_stats, + char *buf) +{ + size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE; + struct wmi_tpc_config_params *tpc; + size_t len = 0; + + if (!tpc_stats) { + ath12k_warn(ar->ab, "failed to find tpc stats\n"); + return; + } + + spin_lock_bh(&ar->data_lock); + + tpc = &tpc_stats->tpc_config; + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, + "*************** TPC config **************\n"); + len += scnprintf(buf + len, buf_len - len, + "* powers are in 0.25 dBm steps\n"); + len += scnprintf(buf + len, buf_len - len, + "reg domain-%d\t\tchan freq-%d\n", + tpc->reg_domain, tpc->chan_freq); + len += scnprintf(buf + len, buf_len - len, + "power limit-%d\t\tmax reg-domain Power-%d\n", + le32_to_cpu(tpc->twice_max_reg_power) / 2, tpc->power_limit); + len += scnprintf(buf + len, buf_len - len, + "No.of tx chain-%d\t", + ar->num_tx_chains); + + ath12k_tpc_stats_print(ar, tpc_stats, buf, len, + ar->debug.tpc_stats_type); + + spin_unlock_bh(&ar->data_lock); +} + +static int ath12k_open_tpc_stats(struct inode *inode, struct file *file) +{ + struct ath12k *ar = inode->i_private; + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); + int ret; + + guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy); + + if (ah->state != ATH12K_HW_STATE_ON) { + ath12k_warn(ar->ab, "Interface not up\n"); + return -ENETDOWN; + } + + void *buf __free(kfree) = kzalloc(ATH12K_TPC_STATS_BUF_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = ath12k_debug_tpc_stats_request(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to request tpc stats: %d\n", + ret); + return ret; + } + + if (!wait_for_completion_timeout(&ar->debug.tpc_complete, TPC_STATS_WAIT_TIME)) { + spin_lock_bh(&ar->data_lock); + ath12k_wmi_free_tpc_stats_mem(ar); + ar->debug.tpc_request = false; + spin_unlock_bh(&ar->data_lock); + return -ETIMEDOUT; + } + + ath12k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf); + file->private_data = no_free_ptr(buf); + + spin_lock_bh(&ar->data_lock); + ath12k_wmi_free_tpc_stats_mem(ar); + spin_unlock_bh(&ar->data_lock); + + return 0; +} + +static ssize_t ath12k_read_tpc_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + size_t len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static int ath12k_release_tpc_stats(struct inode *inode, + struct file *file) +{ + kfree(file->private_data); + return 0; +} + +static const struct file_operations fops_tpc_stats = { + .open = ath12k_open_tpc_stats, + .release = ath12k_release_tpc_stats, + .read = ath12k_read_tpc_stats, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static const struct file_operations fops_tpc_stats_type = { + .write = ath12k_write_tpc_stats_type, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t ath12k_write_extd_rx_stats(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + struct htt_rx_ring_tlv_filter tlv_filter = {0}; + u32 ring_id, rx_filter = 0; + bool enable; + int ret, i; + + if (kstrtobool_from_user(ubuf, count, &enable)) + return -EINVAL; + + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); + + if (!ar->ab->hw_params->rxdma1_enable) { + ret = count; + goto exit; + } + + if (ar->ah->state != ATH12K_HW_STATE_ON) { + ret = -ENETDOWN; + goto exit; + } + + if (enable == ar->debug.extd_rx_stats) { + ret = count; + goto exit; + } + + if (enable) { + rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START; + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START; + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END; + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS; + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT; + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE; + rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO; + + tlv_filter.rx_filter = rx_filter; + tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0; + tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1; + tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2; + tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 | + HTT_RX_FP_DATA_FILTER_FLASG3; + } else { + tlv_filter = ath12k_mac_mon_status_filter_default; + } + + ar->debug.rx_filter = tlv_filter.rx_filter; + + for (i = 0; i < ar->ab->hw_params->num_rxdma_per_pdev; i++) { + ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id; + ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i, + HAL_RXDMA_MONITOR_DST, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + if (ret) { + ath12k_warn(ar->ab, "failed to set rx filter for monitor status ring\n"); + goto exit; + } + } + + ar->debug.extd_rx_stats = !!enable; + ret = count; +exit: + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + return ret; +} + +static ssize_t ath12k_read_extd_rx_stats(struct file *file, + char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + char buf[32]; + int len = 0; + + wiphy_lock(ath12k_ar_to_hw(ar)->wiphy); + len = scnprintf(buf, sizeof(buf) - len, "%d\n", + ar->debug.extd_rx_stats); + wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy); + + return simple_read_from_buffer(ubuf, count, ppos, buf, len); +} + +static const struct file_operations fops_extd_rx_stats = { + .read = ath12k_read_extd_rx_stats, + .write = ath12k_write_extd_rx_stats, + .open = simple_open, +}; + void ath12k_debugfs_soc_create(struct ath12k_base *ab) { bool dput_needed; @@ -68,6 +870,382 @@ void ath12k_debugfs_soc_destroy(struct ath12k_base *ab) */ } +static void ath12k_fw_stats_pdevs_free(struct list_head *head) +{ + struct ath12k_fw_stats_pdev *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath12k_fw_stats_bcn_free(struct list_head *head) +{ + struct ath12k_fw_stats_bcn *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +static void ath12k_fw_stats_vdevs_free(struct list_head *head) +{ + struct ath12k_fw_stats_vdev *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + +void ath12k_debugfs_fw_stats_reset(struct ath12k *ar) +{ + spin_lock_bh(&ar->data_lock); + ar->fw_stats.fw_stats_done = false; + ath12k_fw_stats_vdevs_free(&ar->fw_stats.vdevs); + ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn); + ath12k_fw_stats_pdevs_free(&ar->fw_stats.pdevs); + spin_unlock_bh(&ar->data_lock); +} + +static int ath12k_debugfs_fw_stats_request(struct ath12k *ar, + struct ath12k_fw_stats_req_params *param) +{ + struct ath12k_base *ab = ar->ab; + unsigned long timeout, time_left; + int ret; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + /* FW stats can get split when exceeding the stats data buffer limit. + * In that case, since there is no end marking for the back-to-back + * received 'update stats' event, we keep a 3 seconds timeout in case, + * fw_stats_done is not marked yet + */ + timeout = jiffies + msecs_to_jiffies(3 * 1000); + + ath12k_debugfs_fw_stats_reset(ar); + + reinit_completion(&ar->fw_stats_complete); + + ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id, + param->vdev_id, param->pdev_id); + + if (ret) { + ath12k_warn(ab, "could not request fw stats (%d)\n", + ret); + return ret; + } + + time_left = wait_for_completion_timeout(&ar->fw_stats_complete, + 1 * HZ); + /* If the wait timed out, return -ETIMEDOUT */ + if (!time_left) + return -ETIMEDOUT; + + /* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back + * when stats data buffer limit is reached. fw_stats_complete + * is completed once host receives first event from firmware, but + * still end might not be marked in the TLV. + * Below loop is to confirm that firmware completed sending all the event + * and fw_stats_done is marked true when end is marked in the TLV + */ + for (;;) { + if (time_after(jiffies, timeout)) + break; + + spin_lock_bh(&ar->data_lock); + if (ar->fw_stats.fw_stats_done) { + spin_unlock_bh(&ar->data_lock); + break; + } + spin_unlock_bh(&ar->data_lock); + } + return 0; +} + +void +ath12k_debugfs_fw_stats_process(struct ath12k *ar, + struct ath12k_fw_stats *stats) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_pdev *pdev; + bool is_end; + static unsigned int num_vdev, num_bcn; + size_t total_vdevs_started = 0; + int i; + + if (stats->stats_id == WMI_REQUEST_VDEV_STAT) { + if (list_empty(&stats->vdevs)) { + ath12k_warn(ab, "empty vdev stats"); + return; + } + /* FW sends all the active VDEV stats irrespective of PDEV, + * hence limit until the count of all VDEVs started + */ + rcu_read_lock(); + for (i = 0; i < ab->num_radios; i++) { + pdev = rcu_dereference(ab->pdevs_active[i]); + if (pdev && pdev->ar) + total_vdevs_started += pdev->ar->num_started_vdevs; + } + rcu_read_unlock(); + + is_end = ((++num_vdev) == total_vdevs_started); + + list_splice_tail_init(&stats->vdevs, + &ar->fw_stats.vdevs); + + if (is_end) { + ar->fw_stats.fw_stats_done = true; + num_vdev = 0; + } + return; + } + if (stats->stats_id == WMI_REQUEST_BCN_STAT) { + if (list_empty(&stats->bcn)) { + ath12k_warn(ab, "empty beacon stats"); + return; + } + /* Mark end until we reached the count of all started VDEVs + * within the PDEV + */ + is_end = ((++num_bcn) == ar->num_started_vdevs); + + list_splice_tail_init(&stats->bcn, + &ar->fw_stats.bcn); + + if (is_end) { + ar->fw_stats.fw_stats_done = true; + num_bcn = 0; + } + } + if (stats->stats_id == WMI_REQUEST_PDEV_STAT) { + list_splice_tail_init(&stats->pdevs, &ar->fw_stats.pdevs); + ar->fw_stats.fw_stats_done = true; + } +} + +static int ath12k_open_vdev_stats(struct inode *inode, struct file *file) +{ + struct ath12k *ar = inode->i_private; + struct ath12k_fw_stats_req_params param; + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); + int ret; + + guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy); + + if (!ah) + return -ENETDOWN; + + if (ah->state != ATH12K_HW_STATE_ON) + return -ENETDOWN; + + void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + param.pdev_id = ath12k_mac_get_target_pdev_id(ar); + /* VDEV stats is always sent for all active VDEVs from FW */ + param.vdev_id = 0; + param.stats_id = WMI_REQUEST_VDEV_STAT; + + ret = ath12k_debugfs_fw_stats_request(ar, ¶m); + if (ret) { + ath12k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret); + return ret; + } + + ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id, + buf); + + file->private_data = no_free_ptr(buf); + + return 0; +} + +static int ath12k_release_vdev_stats(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + + return 0; +} + +static ssize_t ath12k_read_vdev_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + size_t len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_vdev_stats = { + .open = ath12k_open_vdev_stats, + .release = ath12k_release_vdev_stats, + .read = ath12k_read_vdev_stats, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath12k_open_bcn_stats(struct inode *inode, struct file *file) +{ + struct ath12k *ar = inode->i_private; + struct ath12k_link_vif *arvif; + struct ath12k_fw_stats_req_params param; + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); + int ret; + + guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy); + + if (ah && ah->state != ATH12K_HW_STATE_ON) + return -ENETDOWN; + + void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + param.pdev_id = ath12k_mac_get_target_pdev_id(ar); + param.stats_id = WMI_REQUEST_BCN_STAT; + + /* loop all active VDEVs for bcn stats */ + list_for_each_entry(arvif, &ar->arvifs, list) { + if (!arvif->is_up) + continue; + + param.vdev_id = arvif->vdev_id; + ret = ath12k_debugfs_fw_stats_request(ar, ¶m); + if (ret) { + ath12k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret); + return ret; + } + } + + ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id, + buf); + /* since beacon stats request is looped for all active VDEVs, saved fw + * stats is not freed for each request until done for all active VDEVs + */ + spin_lock_bh(&ar->data_lock); + ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn); + spin_unlock_bh(&ar->data_lock); + + file->private_data = no_free_ptr(buf); + + return 0; +} + +static int ath12k_release_bcn_stats(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + + return 0; +} + +static ssize_t ath12k_read_bcn_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + size_t len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_bcn_stats = { + .open = ath12k_open_bcn_stats, + .release = ath12k_release_bcn_stats, + .read = ath12k_read_bcn_stats, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath12k_open_pdev_stats(struct inode *inode, struct file *file) +{ + struct ath12k *ar = inode->i_private; + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); + struct ath12k_base *ab = ar->ab; + struct ath12k_fw_stats_req_params param; + int ret; + + guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy); + + if (ah && ah->state != ATH12K_HW_STATE_ON) + return -ENETDOWN; + + void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC); + if (!buf) + return -ENOMEM; + + param.pdev_id = ath12k_mac_get_target_pdev_id(ar); + param.vdev_id = 0; + param.stats_id = WMI_REQUEST_PDEV_STAT; + + ret = ath12k_debugfs_fw_stats_request(ar, ¶m); + if (ret) { + ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret); + return ret; + } + + ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id, + buf); + + file->private_data = no_free_ptr(buf); + + return 0; +} + +static int ath12k_release_pdev_stats(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + + return 0; +} + +static ssize_t ath12k_read_pdev_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + const char *buf = file->private_data; + size_t len = strlen(buf); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_pdev_stats = { + .open = ath12k_open_pdev_stats, + .release = ath12k_release_pdev_stats, + .read = ath12k_read_pdev_stats, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static +void ath12k_debugfs_fw_stats_register(struct ath12k *ar) +{ + struct dentry *fwstats_dir = debugfs_create_dir("fw_stats", + ar->debug.debugfs_pdev); + + /* all stats debugfs files created are under "fw_stats" directory + * created per PDEV + */ + debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar, + &fops_vdev_stats); + debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar, + &fops_bcn_stats); + debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar, + &fops_pdev_stats); + + INIT_LIST_HEAD(&ar->fw_stats.vdevs); + INIT_LIST_HEAD(&ar->fw_stats.bcn); + INIT_LIST_HEAD(&ar->fw_stats.pdevs); + + init_completion(&ar->fw_stats_complete); +} + void ath12k_debugfs_register(struct ath12k *ar) { struct ath12k_base *ab = ar->ab; @@ -91,7 +1269,18 @@ void ath12k_debugfs_register(struct ath12k *ar) &fops_simulate_radar); } + debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_pdev, ar, + &fops_tpc_stats); + debugfs_create_file("tpc_stats_type", 0200, ar->debug.debugfs_pdev, + ar, &fops_tpc_stats_type); + init_completion(&ar->debug.tpc_complete); + ath12k_debugfs_htt_stats_register(ar); + ath12k_debugfs_fw_stats_register(ar); + + debugfs_create_file("ext_rx_stats", 0644, + ar->debug.debugfs_pdev, ar, + &fops_extd_rx_stats); } void ath12k_debugfs_unregister(struct ath12k *ar) diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h index 8d64ba03aa9ad..d7041297d5d88 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.h +++ b/drivers/net/wireless/ath/ath12k/debugfs.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ATH12K_DEBUGFS_H_ @@ -12,6 +12,101 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab); void ath12k_debugfs_soc_destroy(struct ath12k_base *ab); void ath12k_debugfs_register(struct ath12k *ar); void ath12k_debugfs_unregister(struct ath12k *ar); +void ath12k_debugfs_fw_stats_process(struct ath12k *ar, + struct ath12k_fw_stats *stats); +void ath12k_debugfs_fw_stats_reset(struct ath12k *ar); + +static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar) +{ + return ar->debug.extd_rx_stats; +} + +static inline int ath12k_debugfs_rx_filter(struct ath12k *ar) +{ + return ar->debug.rx_filter; +} + +#define ATH12K_CCK_RATES 4 +#define ATH12K_OFDM_RATES 8 +#define ATH12K_HT_RATES 8 +#define ATH12K_VHT_RATES 12 +#define ATH12K_HE_RATES 12 +#define ATH12K_HE_RATES_WITH_EXTRA_MCS 14 +#define ATH12K_EHT_RATES 16 +#define HE_EXTRA_MCS_SUPPORT GENMASK(31, 16) +#define ATH12K_NSS_1 1 +#define ATH12K_NSS_4 4 +#define ATH12K_NSS_8 8 +#define ATH12K_HW_NSS(_rcode) (((_rcode) >> 5) & 0x7) +#define TPC_STATS_WAIT_TIME (1 * HZ) +#define MAX_TPC_PREAM_STR_LEN 7 +#define TPC_INVAL -128 +#define TPC_MAX 127 +#define TPC_STATS_WAIT_TIME (1 * HZ) +#define TPC_STATS_TOT_ROW 700 +#define TPC_STATS_TOT_COLUMN 100 +#define MODULATION_LIMIT 126 + +#define ATH12K_TPC_STATS_BUF_SIZE (TPC_STATS_TOT_ROW * TPC_STATS_TOT_COLUMN) + +enum wmi_tpc_pream_bw { + WMI_TPC_PREAM_CCK, + WMI_TPC_PREAM_OFDM, + WMI_TPC_PREAM_HT20, + WMI_TPC_PREAM_HT40, + WMI_TPC_PREAM_VHT20, + WMI_TPC_PREAM_VHT40, + WMI_TPC_PREAM_VHT80, + WMI_TPC_PREAM_VHT160, + WMI_TPC_PREAM_HE20, + WMI_TPC_PREAM_HE40, + WMI_TPC_PREAM_HE80, + WMI_TPC_PREAM_HE160, + WMI_TPC_PREAM_EHT20, + WMI_TPC_PREAM_EHT40, + WMI_TPC_PREAM_EHT60, + WMI_TPC_PREAM_EHT80, + WMI_TPC_PREAM_EHT120, + WMI_TPC_PREAM_EHT140, + WMI_TPC_PREAM_EHT160, + WMI_TPC_PREAM_EHT200, + WMI_TPC_PREAM_EHT240, + WMI_TPC_PREAM_EHT280, + WMI_TPC_PREAM_EHT320, + WMI_TPC_PREAM_MAX +}; + +enum ath12k_debug_tpc_stats_ctl_mode { + ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ, + ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ, + ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ, + ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ, + ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ, + ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ, + ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ, + ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ, + ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ, + ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ, + ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ, + ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ, + ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ, + ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ, + + ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20 = 23, + ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20, + ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40, + ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80, + ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120 +}; + +enum ath12k_debug_tpc_stats_support_modes { + ATH12K_TPC_STATS_SUPPORT_160 = 0, + ATH12K_TPC_STATS_SUPPORT_320, + ATH12K_TPC_STATS_SUPPORT_AX, + ATH12K_TPC_STATS_SUPPORT_AX_EXTRA_MCS, + ATH12K_TPC_STATS_SUPPORT_BE, + ATH12K_TPC_STATS_SUPPORT_BE_PUNC, +}; #else static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab) { @@ -29,6 +124,24 @@ static inline void ath12k_debugfs_unregister(struct ath12k *ar) { } +static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar, + struct ath12k_fw_stats *stats) +{ +} + +static inline void ath12k_debugfs_fw_stats_reset(struct ath12k *ar) +{ +} + +static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar) +{ + return false; +} + +static inline int ath12k_debugfs_rx_filter(struct ath12k *ar) +{ + return 0; +} #endif /* CONFIG_ATH12K_DEBUGFS */ #endif /* _ATH12K_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c index 41e4ef2ef3af6..1c0d5fa39a8dc 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -48,6 +48,34 @@ print_array_to_buf(u8 *buf, u32 offset, const char *header, footer); } +static u32 +print_array_to_buf_s8(u8 *buf, u32 offset, const char *header, u32 stats_index, + const s8 *array, u32 array_len, const char *footer) +{ + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + int index = 0; + u8 i; + + if (header) + index += scnprintf(buf + offset, buf_len - offset, "%s = ", header); + + for (i = 0; i < array_len; i++) { + index += scnprintf(buf + offset + index, (buf_len - offset) - index, + " %u:%d,", stats_index++, array[i]); + } + + index--; + if ((offset + index) < buf_len) + buf[offset + index] = '\0'; + + if (footer) { + index += scnprintf(buf + offset + index, (buf_len - offset) - index, + "%s", footer); + } + + return index; +} + static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size) { switch (ru_size) { @@ -2511,6 +2539,268 @@ ath12k_htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf, u16 tag_len, stats_req->buf_len = len; } +static void +ath12k_htt_print_tx_sounding_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf; + const __le32 *cbf_20, *cbf_40, *cbf_80, *cbf_160, *cbf_320; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + u32 tx_sounding_mode; + u8 i, u; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + cbf_20 = htt_stats_buf->cbf_20; + cbf_40 = htt_stats_buf->cbf_40; + cbf_80 = htt_stats_buf->cbf_80; + cbf_160 = htt_stats_buf->cbf_160; + cbf_320 = htt_stats_buf->cbf_320; + tx_sounding_mode = le32_to_cpu(htt_stats_buf->tx_sounding_mode); + + if (tx_sounding_mode == ATH12K_HTT_TX_AC_SOUNDING_MODE) { + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_AC_SOUNDING_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, + "ac_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, + "ac_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, + "ac_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, + "ac_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + + for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; u++) { + len += scnprintf(buf + len, buf_len - len, + "Sounding User_%u = 20MHz: %u, ", u, + le32_to_cpu(htt_stats_buf->sounding[i++])); + len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ", + le32_to_cpu(htt_stats_buf->sounding[i++])); + len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ", + le32_to_cpu(htt_stats_buf->sounding[i++])); + len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n", + le32_to_cpu(htt_stats_buf->sounding[i++])); + } + } else if (tx_sounding_mode == ATH12K_HTT_TX_AX_SOUNDING_MODE) { + len += scnprintf(buf + len, buf_len - len, + "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, + "ax_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, + "ax_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, + "ax_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, + "ax_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + + for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; u++) { + len += scnprintf(buf + len, buf_len - len, + "Sounding User_%u = 20MHz: %u, ", u, + le32_to_cpu(htt_stats_buf->sounding[i++])); + len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ", + le32_to_cpu(htt_stats_buf->sounding[i++])); + len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ", + le32_to_cpu(htt_stats_buf->sounding[i++])); + len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n", + le32_to_cpu(htt_stats_buf->sounding[i++])); + } + } else if (tx_sounding_mode == ATH12K_HTT_TX_BE_SOUNDING_MODE) { + len += scnprintf(buf + len, buf_len - len, + "\nHTT_TX_BE_SOUNDING_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, + "be_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, + "be_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, + "be_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, + "be_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, + "be_cbf_320 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ", + le32_to_cpu(cbf_320[ATH12K_HTT_IMPL_STEER_STATS]), + le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]), + le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SURBO_STEER_STATS])); + len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n", + le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]), + le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MURBO_STEER_STATS])); + for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; u++) { + len += scnprintf(buf + len, buf_len - len, + "Sounding User_%u = 20MHz: %u, ", u, + le32_to_cpu(htt_stats_buf->sounding[i++])); + len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ", + le32_to_cpu(htt_stats_buf->sounding[i++])); + len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ", + le32_to_cpu(htt_stats_buf->sounding[i++])); + len += scnprintf(buf + len, buf_len - len, + "160MHz: %u, 320MHz: %u\n", + le32_to_cpu(htt_stats_buf->sounding[i++]), + le32_to_cpu(htt_stats_buf->sounding_320[u])); + } + } else if (tx_sounding_mode == ATH12K_HTT_TX_CMN_SOUNDING_MODE) { + len += scnprintf(buf + len, buf_len - len, + "\nCV UPLOAD HANDLER STATS:\n"); + len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch_err = %u\n", + le32_to_cpu(htt_stats_buf->cv_nc_mismatch_err)); + len += scnprintf(buf + len, buf_len - len, "cv_fcs_err = %u\n", + le32_to_cpu(htt_stats_buf->cv_fcs_err)); + len += scnprintf(buf + len, buf_len - len, "cv_frag_idx_mismatch = %u\n", + le32_to_cpu(htt_stats_buf->cv_frag_idx_mismatch)); + len += scnprintf(buf + len, buf_len - len, "cv_invalid_peer_id = %u\n", + le32_to_cpu(htt_stats_buf->cv_invalid_peer_id)); + len += scnprintf(buf + len, buf_len - len, "cv_no_txbf_setup = %u\n", + le32_to_cpu(htt_stats_buf->cv_no_txbf_setup)); + len += scnprintf(buf + len, buf_len - len, "cv_expiry_in_update = %u\n", + le32_to_cpu(htt_stats_buf->cv_expiry_in_update)); + len += scnprintf(buf + len, buf_len - len, "cv_pkt_bw_exceed = %u\n", + le32_to_cpu(htt_stats_buf->cv_pkt_bw_exceed)); + len += scnprintf(buf + len, buf_len - len, "cv_dma_not_done_err = %u\n", + le32_to_cpu(htt_stats_buf->cv_dma_not_done_err)); + len += scnprintf(buf + len, buf_len - len, "cv_update_failed = %u\n", + le32_to_cpu(htt_stats_buf->cv_update_failed)); + len += scnprintf(buf + len, buf_len - len, "cv_dma_timeout_error = %u\n", + le32_to_cpu(htt_stats_buf->cv_dma_timeout_error)); + len += scnprintf(buf + len, buf_len - len, "cv_buf_ibf_uploads = %u\n", + le32_to_cpu(htt_stats_buf->cv_buf_ibf_uploads)); + len += scnprintf(buf + len, buf_len - len, "cv_buf_ebf_uploads = %u\n", + le32_to_cpu(htt_stats_buf->cv_buf_ebf_uploads)); + len += scnprintf(buf + len, buf_len - len, "cv_buf_received = %u\n", + le32_to_cpu(htt_stats_buf->cv_buf_received)); + len += scnprintf(buf + len, buf_len - len, "cv_buf_fed_back = %u\n\n", + le32_to_cpu(htt_stats_buf->cv_buf_fed_back)); + + len += scnprintf(buf + len, buf_len - len, "CV QUERY STATS:\n"); + len += scnprintf(buf + len, buf_len - len, "cv_total_query = %u\n", + le32_to_cpu(htt_stats_buf->cv_total_query)); + len += scnprintf(buf + len, buf_len - len, + "cv_total_pattern_query = %u\n", + le32_to_cpu(htt_stats_buf->cv_total_pattern_query)); + len += scnprintf(buf + len, buf_len - len, "cv_total_bw_query = %u\n", + le32_to_cpu(htt_stats_buf->cv_total_bw_query)); + len += scnprintf(buf + len, buf_len - len, "cv_invalid_bw_coding = %u\n", + le32_to_cpu(htt_stats_buf->cv_invalid_bw_coding)); + len += scnprintf(buf + len, buf_len - len, "cv_forced_sounding = %u\n", + le32_to_cpu(htt_stats_buf->cv_forced_sounding)); + len += scnprintf(buf + len, buf_len - len, + "cv_standalone_sounding = %u\n", + le32_to_cpu(htt_stats_buf->cv_standalone_sounding)); + len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch = %u\n", + le32_to_cpu(htt_stats_buf->cv_nc_mismatch)); + len += scnprintf(buf + len, buf_len - len, "cv_fb_type_mismatch = %u\n", + le32_to_cpu(htt_stats_buf->cv_fb_type_mismatch)); + len += scnprintf(buf + len, buf_len - len, "cv_ofdma_bw_mismatch = %u\n", + le32_to_cpu(htt_stats_buf->cv_ofdma_bw_mismatch)); + len += scnprintf(buf + len, buf_len - len, "cv_bw_mismatch = %u\n", + le32_to_cpu(htt_stats_buf->cv_bw_mismatch)); + len += scnprintf(buf + len, buf_len - len, "cv_pattern_mismatch = %u\n", + le32_to_cpu(htt_stats_buf->cv_pattern_mismatch)); + len += scnprintf(buf + len, buf_len - len, "cv_preamble_mismatch = %u\n", + le32_to_cpu(htt_stats_buf->cv_preamble_mismatch)); + len += scnprintf(buf + len, buf_len - len, "cv_nr_mismatch = %u\n", + le32_to_cpu(htt_stats_buf->cv_nr_mismatch)); + len += scnprintf(buf + len, buf_len - len, + "cv_in_use_cnt_exceeded = %u\n", + le32_to_cpu(htt_stats_buf->cv_in_use_cnt_exceeded)); + len += scnprintf(buf + len, buf_len - len, "cv_ntbr_sounding = %u\n", + le32_to_cpu(htt_stats_buf->cv_ntbr_sounding)); + len += scnprintf(buf + len, buf_len - len, + "cv_found_upload_in_progress = %u\n", + le32_to_cpu(htt_stats_buf->cv_found_upload_in_progress)); + len += scnprintf(buf + len, buf_len - len, + "cv_expired_during_query = %u\n", + le32_to_cpu(htt_stats_buf->cv_expired_during_query)); + len += scnprintf(buf + len, buf_len - len, "cv_found = %u\n", + le32_to_cpu(htt_stats_buf->cv_found)); + len += scnprintf(buf + len, buf_len - len, "cv_not_found = %u\n", + le32_to_cpu(htt_stats_buf->cv_not_found)); + len += scnprintf(buf + len, buf_len - len, "cv_total_query_ibf = %u\n", + le32_to_cpu(htt_stats_buf->cv_total_query_ibf)); + len += scnprintf(buf + len, buf_len - len, "cv_found_ibf = %u\n", + le32_to_cpu(htt_stats_buf->cv_found_ibf)); + len += scnprintf(buf + len, buf_len - len, "cv_not_found_ibf = %u\n", + le32_to_cpu(htt_stats_buf->cv_not_found_ibf)); + len += scnprintf(buf + len, buf_len - len, + "cv_expired_during_query_ibf = %u\n\n", + le32_to_cpu(htt_stats_buf->cv_expired_during_query_ibf)); + } + + stats_req->buf_len = len; +} + static void ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) @@ -2576,6 +2866,428 @@ ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len, stats_req->buf_len = len; } +static void +ath12k_htt_print_latency_prof_ctx_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_latency_prof_ctx_tlv *htt_stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CTX_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "duration = %u\n", + le32_to_cpu(htt_stats_buf->duration)); + len += scnprintf(buf + len, buf_len - len, "tx_msdu_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tx_msdu_cnt)); + len += scnprintf(buf + len, buf_len - len, "tx_mpdu_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tx_mpdu_cnt)); + len += scnprintf(buf + len, buf_len - len, "rx_msdu_cnt = %u\n", + le32_to_cpu(htt_stats_buf->rx_msdu_cnt)); + len += scnprintf(buf + len, buf_len - len, "rx_mpdu_cnt = %u\n\n", + le32_to_cpu(htt_stats_buf->rx_mpdu_cnt)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_latency_prof_cnt(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_latency_prof_cnt_tlv *htt_stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CNT_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "prof_enable_cnt = %u\n\n", + le32_to_cpu(htt_stats_buf->prof_enable_cnt)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_latency_prof_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_latency_prof_stats_tlv *htt_stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + if (le32_to_cpu(htt_stats_buf->print_header) == 1) { + len += scnprintf(buf + len, buf_len - len, + "HTT_STATS_LATENCY_PROF_TLV:\n"); + } + + len += scnprintf(buf + len, buf_len - len, "Latency name = %s\n", + htt_stats_buf->latency_prof_name); + len += scnprintf(buf + len, buf_len - len, "count = %u\n", + le32_to_cpu(htt_stats_buf->cnt)); + len += scnprintf(buf + len, buf_len - len, "minimum = %u\n", + le32_to_cpu(htt_stats_buf->min)); + len += scnprintf(buf + len, buf_len - len, "maximum = %u\n", + le32_to_cpu(htt_stats_buf->max)); + len += scnprintf(buf + len, buf_len - len, "last = %u\n", + le32_to_cpu(htt_stats_buf->last)); + len += scnprintf(buf + len, buf_len - len, "total = %u\n", + le32_to_cpu(htt_stats_buf->tot)); + len += scnprintf(buf + len, buf_len - len, "average = %u\n", + le32_to_cpu(htt_stats_buf->avg)); + len += scnprintf(buf + len, buf_len - len, "histogram interval = %u\n", + le32_to_cpu(htt_stats_buf->hist_intvl)); + len += print_array_to_buf(buf, len, "histogram", htt_stats_buf->hist, + ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_ul_ofdma_trigger_stats(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv *htt_stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + u32 mac_id; + u8 j; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, + "HTT_RX_PDEV_UL_TRIGGER_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n", + le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma)); + len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs", + htt_stats_buf->ul_ofdma_rx_mcs, + ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n"); + for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) { + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u]", j); + len += print_array_to_buf(buf, len, "", + htt_stats_buf->ul_ofdma_rx_gi[j], + ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n"); + } + + len += print_array_to_buf_index(buf, len, "ul_ofdma_rx_nss", 1, + htt_stats_buf->ul_ofdma_rx_nss, + ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n"); + len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw", + htt_stats_buf->ul_ofdma_rx_bw, + ATH12K_HTT_RX_NUM_BW_CNTRS, "\n"); + + for (j = 0; j < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; j++) { + len += scnprintf(buf + len, buf_len - len, j == 0 ? + "half_ul_ofdma_rx_bw" : + "quarter_ul_ofdma_rx_bw"); + len += print_array_to_buf(buf, len, "", htt_stats_buf->red_bw[j], + ATH12K_HTT_RX_NUM_BW_CNTRS, "\n"); + } + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n", + le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc)); + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n", + le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc)); + + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ru_size_ppdu = "); + for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++) + len += scnprintf(buf + len, buf_len - len, " %s:%u ", + ath12k_htt_ax_tx_rx_ru_size_to_str(j), + le32_to_cpu(htt_stats_buf->data_ru_size_ppdu[j])); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, + "rx_ulofdma_non_data_ru_size_ppdu = "); + for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++) + len += scnprintf(buf + len, buf_len - len, " %s:%u ", + ath12k_htt_ax_tx_rx_ru_size_to_str(j), + le32_to_cpu(htt_stats_buf->non_data_ru_size_ppdu[j])); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += print_array_to_buf(buf, len, "rx_rssi_track_sta_aid", + htt_stats_buf->uplink_sta_aid, + ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n"); + len += print_array_to_buf(buf, len, "rx_sta_target_rssi", + htt_stats_buf->uplink_sta_target_rssi, + ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n"); + len += print_array_to_buf(buf, len, "rx_sta_fd_rssi", + htt_stats_buf->uplink_sta_fd_rssi, + ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n"); + len += print_array_to_buf(buf, len, "rx_sta_power_headroom", + htt_stats_buf->uplink_sta_power_headroom, + ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n"); + len += scnprintf(buf + len, buf_len - len, + "ul_ofdma_basic_trigger_rx_qos_null_only = %u\n\n", + le32_to_cpu(htt_stats_buf->ul_ofdma_bsc_trig_rx_qos_null_only)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_ul_ofdma_user_stats(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv *htt_stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + u32 user_index; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + user_index = __le32_to_cpu(htt_stats_buf->user_index); + + if (!user_index) + len += scnprintf(buf + len, buf_len - len, + "HTT_RX_PDEV_UL_OFDMA_USER_STAS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_ppdu)); + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ppdu_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->rx_ulofdma_data_ppdu)); + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_ok)); + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail_%u = %u\n", + user_index, + le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_fail)); + len += scnprintf(buf + len, buf_len - len, + "rx_ulofdma_non_data_nusers_%u = %u\n", user_index, + le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_nusers)); + len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_nusers_%u = %u\n\n", + user_index, + le32_to_cpu(htt_stats_buf->rx_ulofdma_data_nusers)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_ul_mumimo_trig_stats(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv *htt_stats_buf = tag_buf; + char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0}; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + u32 mac_id; + u16 index; + u8 i, j; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, + "HTT_RX_PDEV_UL_MUMIMO_TRIG_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_mumimo = %u\n", + le32_to_cpu(htt_stats_buf->rx_11ax_ul_mumimo)); + index = 0; + memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN); + for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++) + index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index, + " %u:%u,", i, + le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs[i])); + + for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++) + index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index, + " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS, + le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs_ext[i])); + str_buf[--index] = '\0'; + len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_mcs = %s\n", str_buf); + + for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) { + index = 0; + memset(&str_buf[index], 0x0, ATH12K_HTT_MAX_STRING_LEN); + for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++) + index += scnprintf(&str_buf[index], + ATH12K_HTT_MAX_STRING_LEN - index, + " %u:%u,", i, + le32_to_cpu(htt_stats_buf->ul_rx_gi[j][i])); + + for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++) + index += scnprintf(&str_buf[index], + ATH12K_HTT_MAX_STRING_LEN - index, + " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS, + le32_to_cpu(htt_stats_buf->ul_gi_ext[j][i])); + str_buf[--index] = '\0'; + len += scnprintf(buf + len, buf_len - len, + "ul_mumimo_rx_gi_%u = %s\n", j, str_buf); + } + + index = 0; + memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN); + len += print_array_to_buf_index(buf, len, "ul_mumimo_rx_nss", 1, + htt_stats_buf->ul_mumimo_rx_nss, + ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n"); + + len += print_array_to_buf(buf, len, "ul_mumimo_rx_bw", + htt_stats_buf->ul_mumimo_rx_bw, + ATH12K_HTT_RX_NUM_BW_CNTRS, "\n"); + for (i = 0; i < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; i++) { + index = 0; + memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN); + for (j = 0; j < ATH12K_HTT_RX_NUM_BW_CNTRS; j++) + index += scnprintf(&str_buf[index], + ATH12K_HTT_MAX_STRING_LEN - index, + " %u:%u,", j, + le32_to_cpu(htt_stats_buf->red_bw[i][j])); + str_buf[--index] = '\0'; + len += scnprintf(buf + len, buf_len - len, "%s = %s\n", + i == 0 ? "half_ul_mumimo_rx_bw" : + "quarter_ul_mumimo_rx_bw", str_buf); + } + + len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_stbc = %u\n", + le32_to_cpu(htt_stats_buf->ul_mumimo_rx_stbc)); + len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_ldpc = %u\n", + le32_to_cpu(htt_stats_buf->ul_mumimo_rx_ldpc)); + + for (j = 0; j < ATH12K_HTT_RX_NUM_SPATIAL_STREAMS; j++) { + len += scnprintf(buf + len, buf_len - len, + "rx_ul_mumimo_rssi_in_dbm: chain%u ", j); + len += print_array_to_buf_s8(buf, len, "", 0, + htt_stats_buf->ul_rssi[j], + ATH12K_HTT_RX_NUM_BW_CNTRS, "\n"); + } + + for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) { + len += scnprintf(buf + len, buf_len - len, + "rx_ul_mumimo_target_rssi: user_%u ", j); + len += print_array_to_buf_s8(buf, len, "", 0, + htt_stats_buf->tgt_rssi[j], + ATH12K_HTT_RX_NUM_BW_CNTRS, "\n"); + } + + for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) { + len += scnprintf(buf + len, buf_len - len, + "rx_ul_mumimo_fd_rssi: user_%u ", j); + len += print_array_to_buf_s8(buf, len, "", 0, + htt_stats_buf->fd[j], + ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n"); + } + + for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) { + len += scnprintf(buf + len, buf_len - len, + "rx_ulmumimo_pilot_evm_db_mean: user_%u ", j); + len += print_array_to_buf_s8(buf, len, "", 0, + htt_stats_buf->db[j], + ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n"); + } + + len += scnprintf(buf + len, buf_len - len, + "ul_mumimo_basic_trigger_rx_qos_null_only = %u\n\n", + le32_to_cpu(htt_stats_buf->mumimo_bsc_trig_rx_qos_null_only)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_rx_fse_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_rx_fse_stats_tlv *htt_stats_buf = tag_buf; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 len = stats_req->buf_len; + u8 *buf = stats_req->buf; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_RX_FSE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "=== Software RX FSE STATS ===\n"); + len += scnprintf(buf + len, buf_len - len, "Enable count = %u\n", + le32_to_cpu(htt_stats_buf->fse_enable_cnt)); + len += scnprintf(buf + len, buf_len - len, "Disable count = %u\n", + le32_to_cpu(htt_stats_buf->fse_disable_cnt)); + len += scnprintf(buf + len, buf_len - len, "Cache invalidate entry count = %u\n", + le32_to_cpu(htt_stats_buf->fse_cache_invalidate_entry_cnt)); + len += scnprintf(buf + len, buf_len - len, "Full cache invalidate count = %u\n", + le32_to_cpu(htt_stats_buf->fse_full_cache_invalidate_cnt)); + + len += scnprintf(buf + len, buf_len - len, "\n=== Hardware RX FSE STATS ===\n"); + len += scnprintf(buf + len, buf_len - len, "Cache hits count = %u\n", + le32_to_cpu(htt_stats_buf->fse_num_cache_hits_cnt)); + len += scnprintf(buf + len, buf_len - len, "Cache no. of searches = %u\n", + le32_to_cpu(htt_stats_buf->fse_num_searches_cnt)); + len += scnprintf(buf + len, buf_len - len, "Cache occupancy peak count:\n"); + len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ", + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[0]), + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[1]), + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[2])); + len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ", + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[3]), + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[4])); + len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ", + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[5]), + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[6])); + len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ", + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[7]), + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[8])); + len += scnprintf(buf + len, buf_len - len, "[128] = %u\n", + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[9])); + len += scnprintf(buf + len, buf_len - len, "Cache occupancy current count:\n"); + len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ", + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[0]), + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[1]), + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[2])); + len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ", + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[3]), + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[4])); + len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ", + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[5]), + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[6])); + len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ", + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[7]), + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[8])); + len += scnprintf(buf + len, buf_len - len, "[128] = %u\n", + le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[9])); + len += scnprintf(buf + len, buf_len - len, "Cache search square count:\n"); + len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-50] = %u [51-100] = %u ", + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[0]), + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[1]), + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[2])); + len += scnprintf(buf + len, buf_len - len, "[101-200] = %u [201-255] = %u ", + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[3]), + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[4])); + len += scnprintf(buf + len, buf_len - len, "[256] = %u\n", + le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[5])); + len += scnprintf(buf + len, buf_len - len, "Cache search peak pending count:\n"); + len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ", + le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[0]), + le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[1]), + le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[2])); + len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n", + le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[3])); + len += scnprintf(buf + len, buf_len - len, "Cache search tot pending count:\n"); + len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ", + le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[0]), + le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[1]), + le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[2])); + len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n\n", + le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[3])); + + stats_req->buf_len = len; +} + static void ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, u16 tag_len, struct debug_htt_stats_req *stats_req) @@ -3812,6 +4524,497 @@ ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_l stats_req->buf_len = len; } +static inline void +ath12k_htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 i, j; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id_word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n", + le32_to_cpu(htt_stats_buf->tx_ldpc)); + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n", + le32_to_cpu(htt_stats_buf->ac_mu_mimo_tx_ldpc)); + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n", + le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_ldpc)); + len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n", + le32_to_cpu(htt_stats_buf->ofdma_tx_ldpc)); + len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", + le32_to_cpu(htt_stats_buf->rts_cnt)); + len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n", + le32_to_cpu(htt_stats_buf->rts_success)); + len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n", + le32_to_cpu(htt_stats_buf->ack_rssi)); + len += scnprintf(buf + len, buf_len - len, + "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 12 Mbps: %u\n", + le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[0]), + le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[1]), + le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[2]), + le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[3])); + len += scnprintf(buf + len, buf_len - len, + "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n" + " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n", + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0]), + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]), + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2]), + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3]), + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]), + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5]), + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]), + le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7])); + len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n", + le32_to_cpu(htt_stats_buf->tx_he_ltf[1]), + le32_to_cpu(htt_stats_buf->tx_he_ltf[2]), + le32_to_cpu(htt_stats_buf->tx_he_ltf[3])); + + len += print_array_to_buf(buf, len, "tx_mcs", htt_stats_buf->tx_mcs, + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL); + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++) + len += scnprintf(buf + len, buf_len - len, ", %u:%u", + j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + le32_to_cpu(htt_stats_buf->tx_mcs_ext[j])); + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS; j++) + len += scnprintf(buf + len, buf_len - len, ", %u:%u", + j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS + + ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS, + le32_to_cpu(htt_stats_buf->tx_mcs_ext_2[j])); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += print_array_to_buf(buf, len, "ax_mu_mimo_tx_mcs", + htt_stats_buf->ax_mu_mimo_tx_mcs, + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL); + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++) + len += scnprintf(buf + len, buf_len - len, ", %u:%u", + j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_mcs_ext[j])); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += print_array_to_buf(buf, len, "ofdma_tx_mcs", + htt_stats_buf->ofdma_tx_mcs, + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL); + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++) + len += scnprintf(buf + len, buf_len - len, ", %u:%u", + j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + le32_to_cpu(htt_stats_buf->ofdma_tx_mcs_ext[j])); + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "tx_nss ="); + for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", + j, le32_to_cpu(htt_stats_buf->tx_nss[j - 1])); + len--; + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_nss ="); + for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", + j, le32_to_cpu(htt_stats_buf->ac_mu_mimo_tx_nss[j - 1])); + len--; + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_nss ="); + for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", + j, le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_nss[j - 1])); + len--; + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += scnprintf(buf + len, buf_len - len, "ofdma_tx_nss ="); + for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) + len += scnprintf(buf + len, buf_len - len, " %u:%u,", + j, le32_to_cpu(htt_stats_buf->ofdma_tx_nss[j - 1])); + len--; + len += scnprintf(buf + len, buf_len - len, "\n"); + + len += print_array_to_buf(buf, len, "tx_bw", htt_stats_buf->tx_bw, + ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, NULL); + len += scnprintf(buf + len, buf_len - len, ", %u:%u\n", + ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, + le32_to_cpu(htt_stats_buf->tx_bw_320mhz)); + + len += print_array_to_buf(buf, len, "tx_stbc", + htt_stats_buf->tx_stbc, + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL); + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++) + len += scnprintf(buf + len, buf_len - len, ", %u:%u", + j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + le32_to_cpu(htt_stats_buf->tx_stbc_ext[j])); + len += scnprintf(buf + len, buf_len - len, "\n"); + + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + len += scnprintf(buf + len, (buf_len - len), + "tx_gi[%u] =", j); + len += print_array_to_buf(buf, len, NULL, htt_stats_buf->tx_gi[j], + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + NULL); + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++) + len += scnprintf(buf + len, buf_len - len, ", %u:%u", + i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + le32_to_cpu(htt_stats_buf->tx_gi_ext[j][i])); + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + len += scnprintf(buf + len, (buf_len - len), + "ac_mu_mimo_tx_gi[%u] =", j); + len += print_array_to_buf(buf, len, NULL, + htt_stats_buf->ac_mu_mimo_tx_gi[j], + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + "\n"); + } + + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + len += scnprintf(buf + len, (buf_len - len), + "ax_mu_mimo_tx_gi[%u] =", j); + len += print_array_to_buf(buf, len, NULL, htt_stats_buf->ax_mimo_tx_gi[j], + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + NULL); + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++) + len += scnprintf(buf + len, buf_len - len, ", %u:%u", + i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + le32_to_cpu(htt_stats_buf->ax_tx_gi_ext[j][i])); + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + len += scnprintf(buf + len, (buf_len - len), + "ofdma_tx_gi[%u] = ", j); + len += print_array_to_buf(buf, len, NULL, htt_stats_buf->ofdma_tx_gi[j], + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + NULL); + for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++) + len += scnprintf(buf + len, buf_len - len, ", %u:%u", + i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, + le32_to_cpu(htt_stats_buf->ofd_tx_gi_ext[j][i])); + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + len += print_array_to_buf(buf, len, "tx_su_mcs", htt_stats_buf->tx_su_mcs, + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "tx_mu_mcs", htt_stats_buf->tx_mu_mcs, + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_tx_mcs", + htt_stats_buf->ac_mu_mimo_tx_mcs, + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "ac_mu_mimo_tx_bw", + htt_stats_buf->ac_mu_mimo_tx_bw, + ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "ax_mu_mimo_tx_bw", + htt_stats_buf->ax_mu_mimo_tx_bw, + ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "ofdma_tx_bw", + htt_stats_buf->ofdma_tx_bw, + ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "tx_pream", htt_stats_buf->tx_pream, + ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); + len += print_array_to_buf(buf, len, "tx_dcm", htt_stats_buf->tx_dcm, + ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n"); + + stats_req->buf_len = len; +} + +static inline void +ath12k_htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 i, j; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id_word); + + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "nsts = %u\n", + le32_to_cpu(htt_stats_buf->nsts)); + len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n", + le32_to_cpu(htt_stats_buf->rx_ldpc)); + len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n", + le32_to_cpu(htt_stats_buf->rts_cnt)); + len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n", + le32_to_cpu(htt_stats_buf->rssi_mgmt)); + len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n", + le32_to_cpu(htt_stats_buf->rssi_data)); + len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n", + le32_to_cpu(htt_stats_buf->rssi_comb)); + len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n", + le32_to_cpu(htt_stats_buf->rssi_in_dbm)); + len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n", + le32_to_cpu(htt_stats_buf->nss_count)); + len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n", + le32_to_cpu(htt_stats_buf->pilot_count)); + len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n", + le32_to_cpu(htt_stats_buf->rx_11ax_su_ext)); + len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n", + le32_to_cpu(htt_stats_buf->rx_11ac_mumimo)); + len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n", + le32_to_cpu(htt_stats_buf->rx_11ax_mumimo)); + len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n", + le32_to_cpu(htt_stats_buf->rx_11ax_ofdma)); + len += scnprintf(buf + len, buf_len - len, "txbf = %u\n", + le32_to_cpu(htt_stats_buf->txbf)); + len += scnprintf(buf + len, buf_len - len, "rx_su_ndpa = %u\n", + le32_to_cpu(htt_stats_buf->rx_su_ndpa)); + len += scnprintf(buf + len, buf_len - len, "rx_mu_ndpa = %u\n", + le32_to_cpu(htt_stats_buf->rx_mu_ndpa)); + len += scnprintf(buf + len, buf_len - len, "rx_br_poll = %u\n", + le32_to_cpu(htt_stats_buf->rx_br_poll)); + len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n", + le32_to_cpu(htt_stats_buf->rx_active_dur_us_low)); + len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n", + le32_to_cpu(htt_stats_buf->rx_active_dur_us_high)); + len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n", + le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma)); + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n", + le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc)); + len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n", + le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc)); + len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n", + le32_to_cpu(htt_stats_buf->per_chain_rssi_pkt_type)); + + len += print_array_to_buf(buf, len, "rx_nss", htt_stats_buf->rx_nss, + ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); + len += print_array_to_buf(buf, len, "rx_dcm", htt_stats_buf->rx_dcm, + ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "rx_stbc", htt_stats_buf->rx_stbc, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "rx_bw", htt_stats_buf->rx_bw, + ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "rx_pream", htt_stats_buf->rx_pream, + ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n"); + len += print_array_to_buf(buf, len, "rx_11ax_su_txbf_mcs", + htt_stats_buf->rx_11ax_su_txbf_mcs, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "rx_11ax_mu_txbf_mcs", + htt_stats_buf->rx_11ax_mu_txbf_mcs, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "rx_legacy_cck_rate", + htt_stats_buf->rx_legacy_cck_rate, + ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n"); + len += print_array_to_buf(buf, len, "rx_legacy_ofdm_rate", + htt_stats_buf->rx_legacy_ofdm_rate, + ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n"); + len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs", + htt_stats_buf->ul_ofdma_rx_mcs, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "ul_ofdma_rx_nss", + htt_stats_buf->ul_ofdma_rx_nss, + ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n"); + len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw", + htt_stats_buf->ul_ofdma_rx_bw, + ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "rx_ulofdma_non_data_ppdu", + htt_stats_buf->rx_ulofdma_non_data_ppdu, + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); + len += print_array_to_buf(buf, len, "rx_ulofdma_data_ppdu", + htt_stats_buf->rx_ulofdma_data_ppdu, + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); + len += print_array_to_buf(buf, len, "rx_ulofdma_mpdu_ok", + htt_stats_buf->rx_ulofdma_mpdu_ok, + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); + len += print_array_to_buf(buf, len, "rx_ulofdma_mpdu_fail", + htt_stats_buf->rx_ulofdma_mpdu_fail, + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); + len += print_array_to_buf(buf, len, "rx_ulofdma_non_data_nusers", + htt_stats_buf->rx_ulofdma_non_data_nusers, + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); + len += print_array_to_buf(buf, len, "rx_ulofdma_data_nusers", + htt_stats_buf->rx_ulofdma_data_nusers, + ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n"); + len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_mcs", + htt_stats_buf->rx_11ax_dl_ofdma_mcs, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_ru", + htt_stats_buf->rx_11ax_dl_ofdma_ru, + ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "rx_ulmumimo_non_data_ppdu", + htt_stats_buf->rx_ulmumimo_non_data_ppdu, + ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); + len += print_array_to_buf(buf, len, "rx_ulmumimo_data_ppdu", + htt_stats_buf->rx_ulmumimo_data_ppdu, + ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); + len += print_array_to_buf(buf, len, "rx_ulmumimo_mpdu_ok", + htt_stats_buf->rx_ulmumimo_mpdu_ok, + ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); + len += print_array_to_buf(buf, len, "rx_ulmumimo_mpdu_fail", + htt_stats_buf->rx_ulmumimo_mpdu_fail, + ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n"); + + len += print_array_to_buf(buf, len, "rx_mcs", + htt_stats_buf->rx_mcs, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, NULL); + for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++) + len += scnprintf(buf + len, buf_len - len, ", %u:%u", + j + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, + le32_to_cpu(htt_stats_buf->rx_mcs_ext[j])); + len += scnprintf(buf + len, buf_len - len, "\n"); + + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { + len += scnprintf(buf + len, buf_len - len, + "pilot_evm_db[%u] =", j); + len += print_array_to_buf(buf, len, NULL, + htt_stats_buf->rx_pil_evm_db[j], + ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS, + "\n"); + } + + len += scnprintf(buf + len, buf_len - len, "pilot_evm_db_mean ="); + for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++) + len += scnprintf(buf + len, + buf_len - len, + " %u:%d,", i, + le32_to_cpu(htt_stats_buf->rx_pilot_evm_db_mean[i])); + len--; + len += scnprintf(buf + len, buf_len - len, "\n"); + + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { + len += scnprintf(buf + len, buf_len - len, + "rssi_chain_in_db[%u] = ", j); + for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++) + len += scnprintf(buf + len, + buf_len - len, + " %u: %d,", i, + htt_stats_buf->rssi_chain_in_db[j][i]); + len--; + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + len += scnprintf(buf + len, buf_len - len, + "rx_gi[%u] = ", j); + len += print_array_to_buf(buf, len, NULL, + htt_stats_buf->rx_gi[j], + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, + "\n"); + } + + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + len += scnprintf(buf + len, buf_len - len, + "ul_ofdma_rx_gi[%u] = ", j); + len += print_array_to_buf(buf, len, NULL, + htt_stats_buf->ul_ofdma_rx_gi[j], + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, + "\n"); + } + + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { + len += scnprintf(buf + len, buf_len - len, + "rx_ul_fd_rssi: nss[%u] = ", j); + for (i = 0; i < ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++) + len += scnprintf(buf + len, + buf_len - len, + " %u:%d,", + i, htt_stats_buf->rx_ul_fd_rssi[j][i]); + len--; + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) { + len += scnprintf(buf + len, buf_len - len, + "rx_per_chain_rssi_in_dbm[%u] =", j); + for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++) + len += scnprintf(buf + len, + buf_len - len, + " %u:%d,", + i, + htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]); + len--; + len += scnprintf(buf + len, buf_len - len, "\n"); + } + + stats_req->buf_len = len; +} + +static inline void +ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_rx_pdev_rate_ext_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u8 j; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_EXT_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "rssi_mgmt_in_dbm = %d\n", + le32_to_cpu(htt_stats_buf->rssi_mgmt_in_dbm)); + + len += print_array_to_buf(buf, len, "rx_stbc_ext", + htt_stats_buf->rx_stbc_ext, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n"); + len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs_ext", + htt_stats_buf->ul_ofdma_rx_mcs_ext, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n"); + len += print_array_to_buf(buf, len, "rx_11ax_su_txbf_mcs_ext", + htt_stats_buf->rx_11ax_su_txbf_mcs_ext, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n"); + len += print_array_to_buf(buf, len, "rx_11ax_mu_txbf_mcs_ext", + htt_stats_buf->rx_11ax_mu_txbf_mcs_ext, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n"); + len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_mcs_ext", + htt_stats_buf->rx_11ax_dl_ofdma_mcs_ext, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n"); + len += print_array_to_buf(buf, len, "rx_bw_ext", + htt_stats_buf->rx_bw_ext, + ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS, "\n"); + len += print_array_to_buf(buf, len, "rx_su_punctured_mode", + htt_stats_buf->rx_su_punctured_mode, + ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS, + "\n"); + + len += print_array_to_buf(buf, len, "rx_mcs_ext", + htt_stats_buf->rx_mcs_ext, + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, + NULL); + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS; j++) + len += scnprintf(buf + len, buf_len - len, ", %u:%u", + j + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, + le32_to_cpu(htt_stats_buf->rx_mcs_ext_2[j])); + len += scnprintf(buf + len, buf_len - len, "\n"); + + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + len += scnprintf(buf + len, buf_len - len, + "rx_gi_ext[%u] = ", j); + len += print_array_to_buf(buf, len, NULL, + htt_stats_buf->rx_gi_ext[j], + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, + "\n"); + } + + for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) { + len += scnprintf(buf + len, buf_len - len, + "ul_ofdma_rx_gi_ext[%u] = ", j); + len += print_array_to_buf(buf, len, NULL, + htt_stats_buf->ul_ofdma_rx_gi_ext[j], + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, + "\n"); + } + + stats_req->buf_len = len; +} + static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, u16 tag, u16 len, const void *tag_buf, void *user_data) @@ -3982,9 +5185,33 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, case HTT_STATS_PDEV_CCA_COUNTERS_TAG: ath12k_htt_print_pdev_stats_cca_counters_tlv(tag_buf, len, stats_req); break; + case HTT_STATS_TX_SOUNDING_STATS_TAG: + ath12k_htt_print_tx_sounding_stats_tlv(tag_buf, len, stats_req); + break; case HTT_STATS_PDEV_OBSS_PD_TAG: ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req); break; + case HTT_STATS_LATENCY_CTX_TAG: + ath12k_htt_print_latency_prof_ctx_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_LATENCY_CNT_TAG: + ath12k_htt_print_latency_prof_cnt(tag_buf, len, stats_req); + break; + case HTT_STATS_LATENCY_PROF_STATS_TAG: + ath12k_htt_print_latency_prof_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG: + ath12k_htt_print_ul_ofdma_trigger_stats(tag_buf, len, stats_req); + break; + case HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG: + ath12k_htt_print_ul_ofdma_user_stats(tag_buf, len, stats_req); + break; + case HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG: + ath12k_htt_print_ul_mumimo_trig_stats(tag_buf, len, stats_req); + break; + case HTT_STATS_RX_FSE_STATS_TAG: + ath12k_htt_print_rx_fse_stats_tlv(tag_buf, len, stats_req); + break; case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG: ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, len, stats_req); break; @@ -4047,6 +5274,15 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len, stats_req); break; + case HTT_STATS_TX_PDEV_RATE_STATS_TAG: + ath12k_htt_print_tx_pdev_rate_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_RX_PDEV_RATE_STATS_TAG: + ath12k_htt_print_rx_pdev_rate_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG: + ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(tag_buf, len, stats_req); + break; default: break; } diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h index 4b59976fbc350..c2a02cf8a38b7 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef DEBUG_HTT_STATS_H @@ -123,30 +123,38 @@ struct ath12k_htt_extd_stats_msg { /* htt_dbg_ext_stats_type */ enum ath12k_dbg_htt_ext_stats_type { - ATH12K_DBG_HTT_EXT_STATS_RESET = 0, - ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1, - ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, - ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, - ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, - ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8, - ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12, - ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15, - ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16, - ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17, - ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19, - ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, - ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31, - ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32, - ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36, - ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37, - ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38, - ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40, - ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41, - ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45, - ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46, - ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49, - ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51, - ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54, + ATH12K_DBG_HTT_EXT_STATS_RESET = 0, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, + ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, + ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9, + ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10, + ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12, + ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO = 15, + ATH12K_DBG_HTT_EXT_STATS_SFM_INFO = 16, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17, + ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19, + ATH12K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22, + ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23, + ATH12K_DBG_HTT_EXT_STATS_LATENCY_PROF_STATS = 25, + ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_TRIG_STATS = 26, + ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_MUMIMO_TRIG_STATS = 27, + ATH12K_DBG_HTT_EXT_STATS_FSE_RX = 28, + ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE_EXT = 30, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF = 31, + ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32, + ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS = 36, + ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37, + ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS = 38, + ATH12K_DBG_HTT_EXT_PDEV_PER_STATS = 40, + ATH12K_DBG_HTT_EXT_AST_ENTRIES = 41, + ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR = 45, + ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS = 46, + ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO = 49, + ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA = 51, + ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME = 54, /* keep this last */ ATH12K_DBG_HTT_NUM_EXT_STATS, @@ -173,6 +181,8 @@ enum ath12k_dbg_htt_tlv_tag { HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25, HTT_STATS_SFM_CMN_TAG = 26, HTT_STATS_SRING_STATS_TAG = 27, + HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34, + HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35, HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36, HTT_STATS_TX_SCHED_CMN_TAG = 37, HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39, @@ -195,12 +205,21 @@ enum ath12k_dbg_htt_tlv_tag { HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72, HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73, HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74, + HTT_STATS_TX_SOUNDING_STATS_TAG = 80, HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86, HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87, HTT_STATS_PDEV_OBSS_PD_TAG = 88, HTT_STATS_HW_WAR_TAG = 89, + HTT_STATS_LATENCY_PROF_STATS_TAG = 91, + HTT_STATS_LATENCY_CTX_TAG = 92, + HTT_STATS_LATENCY_CNT_TAG = 93, + HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG = 94, + HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG = 95, + HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG = 97, + HTT_STATS_RX_FSE_STATS_TAG = 98, HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100, HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102, + HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG = 103, HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108, HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG = 111, HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG = 112, @@ -387,6 +406,182 @@ struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv { __le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS]; } __packed; +#define ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS 12 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS 4 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS 5 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES 7 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_LTF 4 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES 6 + +struct ath12k_htt_tx_pdev_rate_stats_tlv { + __le32 mac_id_word; + __le32 tx_ldpc; + __le32 rts_cnt; + __le32 ack_rssi; + __le32 tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 tx_su_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 tx_mu_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + __le32 tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; + __le32 tx_stbc[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 tx_pream[ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES]; + __le32 tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 tx_dcm[ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS]; + __le32 rts_success; + __le32 tx_legacy_cck_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS]; + __le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS]; + __le32 ac_mu_mimo_tx_ldpc; + __le32 ax_mu_mimo_tx_ldpc; + __le32 ofdma_tx_ldpc; + __le32 tx_he_ltf[ATH12K_HTT_TX_PDEV_STATS_NUM_LTF]; + __le32 ac_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 ax_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 ac_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + __le32 ax_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + __le32 ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + __le32 ac_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; + __le32 ax_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; + __le32 ofdma_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; + __le32 ac_mu_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 ax_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 ofdma_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 trigger_type_11ax[ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES]; + __le32 tx_11ax_su_ext; + __le32 tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; + __le32 tx_stbc_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; + __le32 tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; + __le32 ax_mu_mimo_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; + __le32 ofdma_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; + __le32 ax_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; + __le32 ofd_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; + __le32 tx_mcs_ext_2[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS]; + __le32 tx_bw_320mhz; +}; + +#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS 12 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES 7 +#define ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8 +#define ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS 16 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6 +#define ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS 2 + +struct ath12k_htt_rx_pdev_rate_stats_tlv { + __le32 mac_id_word; + __le32 nsts; + __le32 rx_ldpc; + __le32 rts_cnt; + __le32 rssi_mgmt; + __le32 rssi_data; + __le32 rssi_comb; + __le32 rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 rx_nss[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + __le32 rx_dcm[ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS]; + __le32 rx_stbc[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 rx_bw[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS]; + __le32 rx_pream[ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES]; + u8 rssi_chain_in_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] + [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS]; + __le32 rx_gi[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 rssi_in_dbm; + __le32 rx_11ax_su_ext; + __le32 rx_11ac_mumimo; + __le32 rx_11ax_mumimo; + __le32 rx_11ax_ofdma; + __le32 txbf; + __le32 rx_legacy_cck_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS]; + __le32 rx_legacy_ofdm_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS]; + __le32 rx_active_dur_us_low; + __le32 rx_active_dur_us_high; + __le32 rx_11ax_ul_ofdma; + __le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 ul_ofdma_rx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 ul_ofdma_rx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + __le32 ul_ofdma_rx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; + __le32 ul_ofdma_rx_stbc; + __le32 ul_ofdma_rx_ldpc; + __le32 rx_ulofdma_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; + __le32 rx_ulofdma_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; + __le32 rx_ulofdma_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; + __le32 rx_ulofdma_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; + __le32 nss_count; + __le32 pilot_count; + __le32 rx_pil_evm_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] + [ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS]; + __le32 rx_pilot_evm_db_mean[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]; + s8 rx_ul_fd_rssi[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] + [ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; + __le32 per_chain_rssi_pkt_type; + s8 rx_per_chain_rssi_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] + [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS]; + __le32 rx_su_ndpa; + __le32 rx_11ax_su_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 rx_mu_ndpa; + __le32 rx_11ax_mu_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 rx_br_poll; + __le32 rx_11ax_dl_ofdma_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS]; + __le32 rx_11ax_dl_ofdma_ru[ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS]; + __le32 rx_ulmumimo_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER]; + __le32 rx_ulmumimo_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER]; + __le32 rx_ulmumimo_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER]; + __le32 rx_ulmumimo_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER]; + __le32 rx_ulofdma_non_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; + __le32 rx_ulofdma_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; + __le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS]; +}; + +#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS 4 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT 14 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS 2 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS 5 +#define ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS 5 + +struct ath12k_htt_rx_pdev_rate_ext_stats_tlv { + u8 rssi_chain_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] + [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS]; + s8 rx_per_chain_rssi_ext_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] + [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS]; + __le32 rssi_mcast_in_dbm; + __le32 rssi_mgmt_in_dbm; + __le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; + __le32 rx_stbc_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; + __le32 rx_gi_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; + __le32 ul_ofdma_rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; + __le32 ul_ofdma_rx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; + __le32 rx_11ax_su_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; + __le32 rx_11ax_mu_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; + __le32 rx_11ax_dl_ofdma_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT]; + __le32 rx_mcs_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS]; + __le32 rx_bw_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS]; + __le32 rx_gi_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] + [ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS]; + __le32 rx_su_punctured_mode[ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS]; +}; + #define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0) #define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8) @@ -1058,6 +1253,82 @@ struct ath12k_htt_pdev_cca_stats_hist_v1_tlv { __le32 collection_interval; } __packed; +#define ATH12K_HTT_TX_CV_CORR_MAX_NUM_COLUMNS 8 +#define ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS 4 +#define ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS 8 +#define ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS 8 +#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4 +#define ATH12K_HTT_TX_NUM_MCS_CNTRS 12 +#define ATH12K_HTT_TX_NUM_EXTRA_MCS_CNTRS 2 + +#define ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \ + (ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \ + ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS) + +enum ath12k_htt_txbf_sound_steer_modes { + ATH12K_HTT_IMPL_STEER_STATS = 0, + ATH12K_HTT_EXPL_SUSIFS_STEER_STATS = 1, + ATH12K_HTT_EXPL_SURBO_STEER_STATS = 2, + ATH12K_HTT_EXPL_MUSIFS_STEER_STATS = 3, + ATH12K_HTT_EXPL_MURBO_STEER_STATS = 4, + ATH12K_HTT_TXBF_MAX_NUM_OF_MODES = 5 +}; + +enum ath12k_htt_stats_sounding_tx_mode { + ATH12K_HTT_TX_AC_SOUNDING_MODE = 0, + ATH12K_HTT_TX_AX_SOUNDING_MODE = 1, + ATH12K_HTT_TX_BE_SOUNDING_MODE = 2, + ATH12K_HTT_TX_CMN_SOUNDING_MODE = 3, +}; + +struct ath12k_htt_tx_sounding_stats_tlv { + __le32 tx_sounding_mode; + __le32 cbf_20[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES]; + __le32 cbf_40[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES]; + __le32 cbf_80[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES]; + __le32 cbf_160[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES]; + __le32 sounding[ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS]; + __le32 cv_nc_mismatch_err; + __le32 cv_fcs_err; + __le32 cv_frag_idx_mismatch; + __le32 cv_invalid_peer_id; + __le32 cv_no_txbf_setup; + __le32 cv_expiry_in_update; + __le32 cv_pkt_bw_exceed; + __le32 cv_dma_not_done_err; + __le32 cv_update_failed; + __le32 cv_total_query; + __le32 cv_total_pattern_query; + __le32 cv_total_bw_query; + __le32 cv_invalid_bw_coding; + __le32 cv_forced_sounding; + __le32 cv_standalone_sounding; + __le32 cv_nc_mismatch; + __le32 cv_fb_type_mismatch; + __le32 cv_ofdma_bw_mismatch; + __le32 cv_bw_mismatch; + __le32 cv_pattern_mismatch; + __le32 cv_preamble_mismatch; + __le32 cv_nr_mismatch; + __le32 cv_in_use_cnt_exceeded; + __le32 cv_found; + __le32 cv_not_found; + __le32 sounding_320[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS]; + __le32 cbf_320[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES]; + __le32 cv_ntbr_sounding; + __le32 cv_found_upload_in_progress; + __le32 cv_expired_during_query; + __le32 cv_dma_timeout_error; + __le32 cv_buf_ibf_uploads; + __le32 cv_buf_ebf_uploads; + __le32 cv_buf_received; + __le32 cv_buf_fed_back; + __le32 cv_total_query_ibf; + __le32 cv_found_ibf; + __le32 cv_not_found_ibf; + __le32 cv_expired_during_query_ibf; +} __packed; + struct ath12k_htt_pdev_obss_pd_stats_tlv { __le32 num_obss_tx_ppdu_success; __le32 num_obss_tx_ppdu_failure; @@ -1080,6 +1351,127 @@ struct ath12k_htt_pdev_obss_pd_stats_tlv { __le32 num_sr_ppdu_abort_flush_cnt; } __packed; +#define ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN 32 +#define ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST 3 +#define ATH12K_HTT_INTERRUPTS_LATENCY_PROFILE_MAX_HIST 3 + +struct ath12k_htt_latency_prof_stats_tlv { + __le32 print_header; + s8 latency_prof_name[ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN]; + __le32 cnt; + __le32 min; + __le32 max; + __le32 last; + __le32 tot; + __le32 avg; + __le32 hist_intvl; + __le32 hist[ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST]; +} __packed; + +struct ath12k_htt_latency_prof_ctx_tlv { + __le32 duration; + __le32 tx_msdu_cnt; + __le32 tx_mpdu_cnt; + __le32 tx_ppdu_cnt; + __le32 rx_msdu_cnt; + __le32 rx_mpdu_cnt; +} __packed; + +struct ath12k_htt_latency_prof_cnt_tlv { + __le32 prof_enable_cnt; +} __packed; + +#define ATH12K_HTT_RX_NUM_MCS_CNTRS 12 +#define ATH12K_HTT_RX_NUM_GI_CNTRS 4 +#define ATH12K_HTT_RX_NUM_SPATIAL_STREAMS 8 +#define ATH12K_HTT_RX_NUM_BW_CNTRS 4 +#define ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS 6 +#define ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS 7 +#define ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK 5 +#define ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES 2 +#define ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS 2 + +enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE { + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996, + ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2, + ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS, +}; + +struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv { + __le32 user_index; + __le32 rx_ulofdma_non_data_ppdu; + __le32 rx_ulofdma_data_ppdu; + __le32 rx_ulofdma_mpdu_ok; + __le32 rx_ulofdma_mpdu_fail; + __le32 rx_ulofdma_non_data_nusers; + __le32 rx_ulofdma_data_nusers; +} __packed; + +struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv { + __le32 mac_id__word; + __le32 rx_11ax_ul_ofdma; + __le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS]; + __le32 ul_ofdma_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS]; + __le32 ul_ofdma_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS]; + __le32 ul_ofdma_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS]; + __le32 ul_ofdma_rx_stbc; + __le32 ul_ofdma_rx_ldpc; + __le32 data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS]; + __le32 non_data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS]; + __le32 uplink_sta_aid[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK]; + __le32 uplink_sta_target_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK]; + __le32 uplink_sta_fd_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK]; + __le32 uplink_sta_power_headroom[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK]; + __le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS]; + __le32 ul_ofdma_bsc_trig_rx_qos_null_only; +} __packed; + +#define ATH12K_HTT_TX_UL_MUMIMO_USER_STATS 8 + +struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv { + __le32 mac_id__word; + __le32 rx_11ax_ul_mumimo; + __le32 ul_mumimo_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS]; + __le32 ul_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS]; + __le32 ul_mumimo_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS]; + __le32 ul_mumimo_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS]; + __le32 ul_mumimo_rx_stbc; + __le32 ul_mumimo_rx_ldpc; + __le32 ul_mumimo_rx_mcs_ext[ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS]; + __le32 ul_gi_ext[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS]; + s8 ul_rssi[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS][ATH12K_HTT_RX_NUM_BW_CNTRS]; + s8 tgt_rssi[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_BW_CNTRS]; + s8 fd[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS]; + s8 db[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS]; + __le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS]; + __le32 mumimo_bsc_trig_rx_qos_null_only; +} __packed; + +#define ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX 10 +#define ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX 10 +#define ATH12K_HTT_RX_NUM_SQUARE_INDEX 6 +#define ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX 4 +#define ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX 4 + +struct ath12k_htt_rx_fse_stats_tlv { + __le32 fse_enable_cnt; + __le32 fse_disable_cnt; + __le32 fse_cache_invalidate_entry_cnt; + __le32 fse_full_cache_invalidate_cnt; + __le32 fse_num_cache_hits_cnt; + __le32 fse_num_searches_cnt; + __le32 fse_cache_occupancy_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX]; + __le32 fse_cache_occupancy_curr_cnt[ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX]; + __le32 fse_search_stat_square_cnt[ATH12K_HTT_RX_NUM_SQUARE_INDEX]; + __le32 fse_search_stat_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX]; + __le32 fse_search_stat_pending_cnt[ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX]; +} __packed; + #define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS 14 #define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8 #define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8 @@ -1417,17 +1809,6 @@ enum ATH12K_HTT_RC_MODE { ATH12K_HTT_RC_MODE_2D_COUNT }; -enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE { - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26, - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52, - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106, - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242, - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484, - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996, - ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2, - ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS -}; - enum ath12k_htt_stats_rc_mode { ATH12K_HTT_STATS_RC_MODE_DLSU = 0, ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1, diff --git a/drivers/net/wireless/ath/ath12k/debugfs_sta.c b/drivers/net/wireless/ath/ath12k/debugfs_sta.c new file mode 100644 index 0000000000000..5bd2bf4c9dac3 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/debugfs_sta.c @@ -0,0 +1,337 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include + +#include "debugfs_sta.h" +#include "core.h" +#include "peer.h" +#include "debug.h" +#include "debugfs_htt_stats.h" +#include "debugfs.h" + +static +u32 ath12k_dbg_sta_dump_rate_stats(u8 *buf, u32 offset, const int size, + bool he_rates_avail, + const struct ath12k_rx_peer_rate_stats *stats) +{ + static const char *legacy_rate_str[HAL_RX_MAX_NUM_LEGACY_RATES] = { + "1 Mbps", "2 Mbps", "5.5 Mbps", "6 Mbps", + "9 Mbps", "11 Mbps", "12 Mbps", "18 Mbps", + "24 Mbps", "36 Mbps", "48 Mbps", "54 Mbps"}; + u8 max_bw = HAL_RX_BW_MAX, max_gi = HAL_RX_GI_MAX, max_mcs = HAL_RX_MAX_NSS; + int mcs = 0, bw = 0, nss = 0, gi = 0, bw_num = 0; + u32 i, len = offset, max = max_bw * max_gi * max_mcs; + bool found; + + len += scnprintf(buf + len, size - len, "\nEHT stats:\n"); + for (i = 0; i <= HAL_RX_MAX_MCS_BE; i++) + len += scnprintf(buf + len, size - len, + "MCS %d: %llu%s", i, stats->be_mcs_count[i], + (i + 1) % 8 ? "\t" : "\n"); + + len += scnprintf(buf + len, size - len, "\nHE stats:\n"); + for (i = 0; i <= HAL_RX_MAX_MCS_HE; i++) + len += scnprintf(buf + len, size - len, + "MCS %d: %llu%s", i, stats->he_mcs_count[i], + (i + 1) % 6 ? "\t" : "\n"); + + len += scnprintf(buf + len, size - len, "\nVHT stats:\n"); + for (i = 0; i <= HAL_RX_MAX_MCS_VHT; i++) + len += scnprintf(buf + len, size - len, + "MCS %d: %llu%s", i, stats->vht_mcs_count[i], + (i + 1) % 5 ? "\t" : "\n"); + + len += scnprintf(buf + len, size - len, "\nHT stats:\n"); + for (i = 0; i <= HAL_RX_MAX_MCS_HT; i++) + len += scnprintf(buf + len, size - len, + "MCS %d: %llu%s", i, stats->ht_mcs_count[i], + (i + 1) % 8 ? "\t" : "\n"); + + len += scnprintf(buf + len, size - len, "\nLegacy stats:\n"); + for (i = 0; i < HAL_RX_MAX_NUM_LEGACY_RATES; i++) + len += scnprintf(buf + len, size - len, + "%s: %llu%s", legacy_rate_str[i], + stats->legacy_count[i], + (i + 1) % 4 ? "\t" : "\n"); + + len += scnprintf(buf + len, size - len, "\nNSS stats:\n"); + for (i = 0; i < HAL_RX_MAX_NSS; i++) + len += scnprintf(buf + len, size - len, + "%dx%d: %llu ", i + 1, i + 1, + stats->nss_count[i]); + + len += scnprintf(buf + len, size - len, + "\n\nGI: 0.8 us %llu 0.4 us %llu 1.6 us %llu 3.2 us %llu\n", + stats->gi_count[0], + stats->gi_count[1], + stats->gi_count[2], + stats->gi_count[3]); + + len += scnprintf(buf + len, size - len, + "BW: 20 MHz %llu 40 MHz %llu 80 MHz %llu 160 MHz %llu 320 MHz %llu\n", + stats->bw_count[0], + stats->bw_count[1], + stats->bw_count[2], + stats->bw_count[3], + stats->bw_count[4]); + + for (i = 0; i < max; i++) { + found = false; + + for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) { + if (stats->rx_rate[bw][gi][nss][mcs]) { + found = true; + break; + } + } + + if (!found) + goto skip_report; + + switch (bw) { + case HAL_RX_BW_20MHZ: + bw_num = 20; + break; + case HAL_RX_BW_40MHZ: + bw_num = 40; + break; + case HAL_RX_BW_80MHZ: + bw_num = 80; + break; + case HAL_RX_BW_160MHZ: + bw_num = 160; + break; + case HAL_RX_BW_320MHZ: + bw_num = 320; + break; + } + + len += scnprintf(buf + len, size - len, "\n%d Mhz gi %d us %dx%d : ", + bw_num, gi, nss + 1, nss + 1); + + for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) { + if (stats->rx_rate[bw][gi][nss][mcs]) + len += scnprintf(buf + len, size - len, + " %d:%llu", mcs, + stats->rx_rate[bw][gi][nss][mcs]); + } + +skip_report: + if (nss++ >= max_mcs - 1) { + nss = 0; + if (gi++ >= max_gi - 1) { + gi = 0; + if (bw < max_bw - 1) + bw++; + } + } + } + + len += scnprintf(buf + len, size - len, "\n"); + + return len - offset; +} + +static ssize_t ath12k_dbg_sta_dump_rx_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_link_sta *link_sta = file->private_data; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta); + const int size = ATH12K_STA_RX_STATS_BUF_SIZE; + struct ath12k_hw *ah = ahsta->ahvif->ah; + struct ath12k_rx_peer_stats *rx_stats; + struct ath12k_link_sta *arsta; + u8 link_id = link_sta->link_id; + int len = 0, i, ret = 0; + bool he_rates_avail; + struct ath12k *ar; + + wiphy_lock(ah->hw->wiphy); + + if (!(BIT(link_id) & ahsta->links_map)) { + wiphy_unlock(ah->hw->wiphy); + return -ENOENT; + } + + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); + if (!arsta || !arsta->arvif->ar) { + wiphy_unlock(ah->hw->wiphy); + return -ENOENT; + } + + ar = arsta->arvif->ar; + + u8 *buf __free(kfree) = kzalloc(size, GFP_KERNEL); + if (!buf) { + ret = -ENOENT; + goto out; + } + + spin_lock_bh(&ar->ab->base_lock); + + rx_stats = arsta->rx_stats; + if (!rx_stats) { + ret = -ENOENT; + goto unlock; + } + + len += scnprintf(buf + len, size - len, "RX peer stats:\n\n"); + len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n", + rx_stats->num_msdu); + len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n", + rx_stats->tcp_msdu_count); + len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n", + rx_stats->udp_msdu_count); + len += scnprintf(buf + len, size - len, "Num of other MSDUs: %llu\n", + rx_stats->other_msdu_count); + len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n", + rx_stats->ampdu_msdu_count); + len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n", + rx_stats->non_ampdu_msdu_count); + len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n", + rx_stats->stbc_count); + len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n", + rx_stats->beamformed_count); + len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n", + rx_stats->num_mpdu_fcs_ok); + len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n", + rx_stats->num_mpdu_fcs_err); + + he_rates_avail = (rx_stats->pream_cnt[HAL_RX_PREAMBLE_11AX] > 1) ? true : false; + + len += scnprintf(buf + len, size - len, + "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu 11BE %llu\n", + rx_stats->pream_cnt[0], rx_stats->pream_cnt[1], + rx_stats->pream_cnt[2], rx_stats->pream_cnt[3], + rx_stats->pream_cnt[4], rx_stats->pream_cnt[6]); + len += scnprintf(buf + len, size - len, + "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n", + rx_stats->reception_type[0], rx_stats->reception_type[1], + rx_stats->reception_type[2], rx_stats->reception_type[3]); + + len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):"); + for (i = 0; i <= IEEE80211_NUM_TIDS; i++) + len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]); + + len += scnprintf(buf + len, size - len, "\nRX Duration:%llu\n", + rx_stats->rx_duration); + + len += scnprintf(buf + len, size - len, + "\nDCM: %llu\nRU26: %llu\nRU52: %llu\nRU106: %llu\nRU242: %llu\nRU484: %llu\nRU996: %llu\nRU996x2: %llu\n", + rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0], + rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2], + rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4], + rx_stats->ru_alloc_cnt[5], rx_stats->ru_alloc_cnt[6]); + + len += scnprintf(buf + len, size - len, "\nRX success packet stats:\n"); + len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail, + &rx_stats->pkt_stats); + + len += scnprintf(buf + len, size - len, "\n"); + + len += scnprintf(buf + len, size - len, "\nRX success byte stats:\n"); + len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail, + &rx_stats->byte_stats); + +unlock: + spin_unlock_bh(&ar->ab->base_lock); + + if (len) + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); +out: + wiphy_unlock(ah->hw->wiphy); + return ret; +} + +static const struct file_operations fops_rx_stats = { + .read = ath12k_dbg_sta_dump_rx_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath12k_dbg_sta_reset_rx_stats(struct file *file, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_link_sta *link_sta = file->private_data; + struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta); + struct ath12k_hw *ah = ahsta->ahvif->ah; + struct ath12k_rx_peer_stats *rx_stats; + struct ath12k_link_sta *arsta; + u8 link_id = link_sta->link_id; + struct ath12k *ar; + bool reset; + int ret; + + ret = kstrtobool_from_user(buf, count, &reset); + if (ret) + return ret; + + if (!reset) + return -EINVAL; + + wiphy_lock(ah->hw->wiphy); + + if (!(BIT(link_id) & ahsta->links_map)) { + ret = -ENOENT; + goto out; + } + + arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]); + if (!arsta || !arsta->arvif->ar) { + ret = -ENOENT; + goto out; + } + + ar = arsta->arvif->ar; + + spin_lock_bh(&ar->ab->base_lock); + + rx_stats = arsta->rx_stats; + if (!rx_stats) { + spin_unlock_bh(&ar->ab->base_lock); + ret = -ENOENT; + goto out; + } + + memset(rx_stats, 0, sizeof(*rx_stats)); + spin_unlock_bh(&ar->ab->base_lock); + + ret = count; +out: + wiphy_unlock(ah->hw->wiphy); + return ret; +} + +static const struct file_operations fops_reset_rx_stats = { + .write = ath12k_dbg_sta_reset_rx_stats, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct dentry *dir) +{ + struct ath12k *ar; + + lockdep_assert_wiphy(hw->wiphy); + + ar = ath12k_get_ar_by_vif(hw, vif, link_sta->link_id); + if (!ar) + return; + + if (ath12k_debugfs_is_extd_rx_stats_enabled(ar)) { + debugfs_create_file("rx_stats", 0400, dir, link_sta, + &fops_rx_stats); + debugfs_create_file("reset_rx_stats", 0200, dir, link_sta, + &fops_reset_rx_stats); + } +} diff --git a/drivers/net/wireless/ath/ath12k/debugfs_sta.h b/drivers/net/wireless/ath/ath12k/debugfs_sta.h new file mode 100644 index 0000000000000..8de924f4d7d52 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/debugfs_sta.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _ATH12K_DEBUGFS_STA_H_ +#define _ATH12K_DEBUGFS_STA_H_ + +#include + +#include "core.h" + +#define ATH12K_STA_RX_STATS_BUF_SIZE (1024 * 16) + +#ifdef CONFIG_ATH12K_DEBUGFS + +void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct dentry *dir); + +#endif /* CONFIG_ATH12K_DEBUGFS */ + +#endif /* _ATH12K_DEBUGFS_STA_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c index 9e5a4e75f2f62..b1f27c3ac723e 100644 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -1315,6 +1315,9 @@ void ath12k_dp_cc_config(struct ath12k_base *ab) u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG; u32 val = 0; + if (ath12k_ftm_mode) + return; + ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base); val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB, diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h index 7ac3143de0168..75435a931548c 100644 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_DP_H @@ -125,7 +125,6 @@ struct ath12k_mon_data { struct sk_buff_head rx_status_q; struct dp_mon_mpdu *mon_mpdu; struct list_head dp_rx_mon_mpdu_list; - struct sk_buff *dest_skb_q[DP_MON_MAX_STATUS_BUF]; struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info; struct dp_mon_tx_ppdu_info *tx_data_ppdu_info; }; @@ -176,7 +175,7 @@ struct ath12k_pdev_dp { #define DP_RXDMA_ERR_DST_RING_SIZE 1024 #define DP_RXDMA_MON_STATUS_RING_SIZE 1024 #define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096 -#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048 +#define DP_RXDMA_MONITOR_DST_RING_SIZE 8092 #define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096 #define DP_TX_MONITOR_BUF_RING_SIZE 4096 #define DP_TX_MONITOR_DEST_RING_SIZE 2048 @@ -373,17 +372,24 @@ struct ath12k_dp { }; /* HTT definitions */ +#define HTT_TAG_TCL_METADATA_VERSION 5 -#define HTT_TCL_META_DATA_TYPE BIT(0) -#define HTT_TCL_META_DATA_VALID_HTT BIT(1) +#define HTT_TCL_META_DATA_TYPE GENMASK(1, 0) +#define HTT_TCL_META_DATA_VALID_HTT BIT(2) /* vdev meta data */ -#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2) -#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10) -#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12) +#define HTT_TCL_META_DATA_VDEV_ID GENMASK(10, 3) +#define HTT_TCL_META_DATA_PDEV_ID GENMASK(12, 11) +#define HTT_TCL_META_DATA_HOST_INSPECTED_MISSION BIT(13) /* peer meta data */ -#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2) +#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 3) + +/* Global sequence number */ +#define HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM 3 +#define HTT_TCL_META_DATA_GLOBAL_SEQ_HOST_INSPECTED BIT(2) +#define HTT_TCL_META_DATA_GLOBAL_SEQ_NUM GENMASK(14, 3) +#define HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID 128 /* HTT tx completion is overlaid in wbm_release_ring */ #define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13) @@ -414,9 +420,15 @@ enum htt_h2t_msg_type { }; #define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0) +#define HTT_OPTION_TCL_METADATA_VER_V2 2 +#define HTT_OPTION_TAG GENMASK(7, 0) +#define HTT_OPTION_LEN GENMASK(15, 8) +#define HTT_OPTION_VALUE GENMASK(31, 16) +#define HTT_TCL_METADATA_VER_SZ 4 struct htt_ver_req_cmd { __le32 ver_reg_info; + __le32 tcl_metadata_version; } __packed; enum htt_srng_ring_type { @@ -434,8 +446,11 @@ enum htt_srng_ring_id { HTT_HOST1_TO_FW_RXBUF_RING, HTT_HOST2_TO_FW_RXBUF_RING, HTT_RXDMA_NON_MONITOR_DEST_RING, + HTT_RXDMA_HOST_BUF_RING2, HTT_TX_MON_HOST2MON_BUF_RING, HTT_TX_MON_MON2HOST_DEST_RING, + HTT_RX_MON_HOST2MON_BUF_RING, + HTT_RX_MON_MON2HOST_DEST_RING, }; /* host -> target HTT_SRING_SETUP message @@ -767,8 +782,22 @@ enum htt_stats_internal_ppdu_frametype { #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16) #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24) #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25) -#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0) -#define HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID BIT(26) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID BIT(26) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL BIT(27) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON BIT(28) + +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(18, 16) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(21, 19) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(24, 22) + +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD GENMASK(9, 0) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE BIT(17) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE BIT(18) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE BIT(19) + +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET BIT(0) +#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET GENMASK(14, 1) #define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0) #define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16) @@ -797,6 +826,7 @@ enum htt_rx_filter_tlv_flags { HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10), HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11), HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12), + HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO = BIT(13), }; enum htt_rx_mgmt_pkt_filter_tlv_flags0 { @@ -1085,6 +1115,21 @@ enum htt_rx_data_pkt_filter_tlv_flasg3 { HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \ HTT_RX_FILTER_TLV_FLAGS_ATTENTION) +#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING \ + (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ + HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \ + HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \ + HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \ + HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \ + HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \ + HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \ + HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO) + /* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */ #define HTT_RX_TLV_FLAGS_RXDMA_RING \ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \ @@ -1113,6 +1158,10 @@ struct htt_rx_ring_selection_cfg_cmd { __le32 info3; } __packed; +#define HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE 32 +#define HTT_RX_RING_DEFAULT_DMA_LENGTH 0x7 +#define HTT_RX_RING_PKT_TLV_OFFSET 0x1 + struct htt_rx_ring_tlv_filter { u32 rx_filter; /* see htt_rx_filter_tlv_flags */ u32 pkt_filter_flags0; /* MGMT */ @@ -1130,6 +1179,17 @@ struct htt_rx_ring_tlv_filter { u16 rx_mpdu_start_wmask; u16 rx_mpdu_end_wmask; u32 rx_msdu_end_wmask; + u32 conf_len_ctrl; + u32 conf_len_mgmt; + u32 conf_len_data; + u16 rx_drop_threshold; + bool enable_log_mgmt_type; + bool enable_log_ctrl_type; + bool enable_log_data_type; + bool enable_rx_tlv_offset; + u16 rx_tlv_offset; + bool drop_threshold_valid; + bool rxmon_disable; }; #define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0 diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index 5a21961cfd465..d22800e894850 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "dp_mon.h" @@ -10,6 +10,12 @@ #include "dp_tx.h" #include "peer.h" +#define ATH12K_LE32_DEC_ENC(value, dec_bits, enc_bits) \ + u32_encode_bits(le32_get_bits(value, dec_bits), enc_bits) + +#define ATH12K_LE64_DEC_ENC(value, dec_bits, enc_bits) \ + u32_encode_bits(le64_get_bits(value, dec_bits), enc_bits) + static void ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user, struct hal_rx_user_status *rx_user_status) @@ -75,7 +81,7 @@ ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats * static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig, struct hal_rx_mon_ppdu_info *ppdu_info) { - u32 nsts, group_id, info0, info1; + u32 nsts, info0, info1; u8 gi_setting; info0 = __le32_to_cpu(vht_sig->info0); @@ -103,12 +109,8 @@ static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vh ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW); ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED); - group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID); - if (group_id == 0 || group_id == 63) - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; - else - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; - ppdu_info->vht_flag_values5 = group_id; + ppdu_info->vht_flag_values5 = u32_get_bits(info0, + HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID); ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) | ppdu_info->nss); ppdu_info->vht_flag_values2 = ppdu_info->bw; @@ -128,7 +130,6 @@ static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig, ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING); ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI); ppdu_info->nss = (ppdu_info->mcs >> 3); - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; } static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb, @@ -160,7 +161,6 @@ static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb, ppdu_info->rate = rate; ppdu_info->cck_flag = 1; - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; } static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga, @@ -200,7 +200,6 @@ static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga, } ppdu_info->rate = rate; - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; } static void @@ -237,7 +236,6 @@ ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *of ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS); ppdu_info->beamformed = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF); - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; } static void @@ -277,7 +275,6 @@ ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION); ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); ppdu_info->he_RU[0] = ru_tones; - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; } static void @@ -411,7 +408,6 @@ ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_ ppdu_info->is_stbc = info1 & HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC; - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; } static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a, @@ -559,17 +555,887 @@ static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info * dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM); ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS); ppdu_info->dcm = dcm; - ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; +} + +static void +ath12k_dp_mon_hal_rx_parse_u_sig_cmn(const struct hal_mon_usig_cmn *cmn, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + u32 common; + + ppdu_info->u_sig_info.bw = le32_get_bits(cmn->info0, + HAL_RX_USIG_CMN_INFO0_BW); + ppdu_info->u_sig_info.ul_dl = le32_get_bits(cmn->info0, + HAL_RX_USIG_CMN_INFO0_UL_DL); + + common = __le32_to_cpu(ppdu_info->u_sig_info.usig.common); + common |= IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN | + ATH12K_LE32_DEC_ENC(cmn->info0, + HAL_RX_USIG_CMN_INFO0_PHY_VERSION, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) | + u32_encode_bits(ppdu_info->u_sig_info.bw, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) | + u32_encode_bits(ppdu_info->u_sig_info.ul_dl, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL) | + ATH12K_LE32_DEC_ENC(cmn->info0, + HAL_RX_USIG_CMN_INFO0_BSS_COLOR, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR) | + ATH12K_LE32_DEC_ENC(cmn->info0, + HAL_RX_USIG_CMN_INFO0_TXOP, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); + ppdu_info->u_sig_info.usig.common = cpu_to_le32(common); + + switch (ppdu_info->u_sig_info.bw) { + default: + fallthrough; + case HAL_EHT_BW_20: + ppdu_info->bw = HAL_RX_BW_20MHZ; + break; + case HAL_EHT_BW_40: + ppdu_info->bw = HAL_RX_BW_40MHZ; + break; + case HAL_EHT_BW_80: + ppdu_info->bw = HAL_RX_BW_80MHZ; + break; + case HAL_EHT_BW_160: + ppdu_info->bw = HAL_RX_BW_160MHZ; + break; + case HAL_EHT_BW_320_1: + case HAL_EHT_BW_320_2: + ppdu_info->bw = HAL_RX_BW_320MHZ; + break; + } +} + +static void +ath12k_dp_mon_hal_rx_parse_u_sig_tb(const struct hal_mon_usig_tb *usig_tb, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig; + enum ieee80211_radiotap_eht_usig_tb spatial_reuse1, spatial_reuse2; + u32 common, value, mask; + + spatial_reuse1 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1; + spatial_reuse2 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2; + + common = __le32_to_cpu(usig->common); + value = __le32_to_cpu(usig->value); + mask = __le32_to_cpu(usig->mask); + + ppdu_info->u_sig_info.ppdu_type_comp_mode = + le32_get_bits(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE); + + common |= ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); + + value |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD | + u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE) | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1, + spatial_reuse1) | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2, + spatial_reuse2) | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_CRC, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC) | + ATH12K_LE32_DEC_ENC(usig_tb->info0, + HAL_RX_USIG_TB_INFO0_TAIL, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL); + + mask |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE | + spatial_reuse1 | spatial_reuse2 | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC | + IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL; + + usig->common = cpu_to_le32(common); + usig->value = cpu_to_le32(value); + usig->mask = cpu_to_le32(mask); +} + +static void +ath12k_dp_mon_hal_rx_parse_u_sig_mu(const struct hal_mon_usig_mu *usig_mu, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig; + enum ieee80211_radiotap_eht_usig_mu sig_symb, punc; + u32 common, value, mask; + + sig_symb = IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS; + punc = IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO; + + common = __le32_to_cpu(usig->common); + value = __le32_to_cpu(usig->value); + mask = __le32_to_cpu(usig->mask); + + ppdu_info->u_sig_info.ppdu_type_comp_mode = + le32_get_bits(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE); + ppdu_info->u_sig_info.eht_sig_mcs = + le32_get_bits(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS); + ppdu_info->u_sig_info.num_eht_sig_sym = + le32_get_bits(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM); + + common |= ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); + + value |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE | + u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE) | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE | + ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO, + punc) | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE | + u32_encode_bits(ppdu_info->u_sig_info.eht_sig_mcs, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS) | + u32_encode_bits(ppdu_info->u_sig_info.num_eht_sig_sym, + sig_symb) | + ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_CRC, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC) | + ATH12K_LE32_DEC_ENC(usig_mu->info0, + HAL_RX_USIG_MU_INFO0_TAIL, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL); + + mask |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD | + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE | + punc | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS | + sig_symb | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC | + IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL; + + usig->common = cpu_to_le32(common); + usig->value = cpu_to_le32(value); + usig->mask = cpu_to_le32(mask); +} + +static void +ath12k_dp_mon_hal_rx_parse_u_sig_hdr(const struct hal_mon_usig_hdr *usig, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + u8 comp_mode; + + ppdu_info->eht_usig = true; + + ath12k_dp_mon_hal_rx_parse_u_sig_cmn(&usig->cmn, ppdu_info); + + comp_mode = le32_get_bits(usig->non_cmn.mu.info0, + HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE); + + if (comp_mode == 0 && ppdu_info->u_sig_info.ul_dl) + ath12k_dp_mon_hal_rx_parse_u_sig_tb(&usig->non_cmn.tb, ppdu_info); + else + ath12k_dp_mon_hal_rx_parse_u_sig_mu(&usig->non_cmn.mu, ppdu_info); +} + +static void +ath12k_dp_mon_hal_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info, + u16 tlv_len, const void *tlv_data) +{ + if (tlv_len <= HAL_RX_MON_MAX_AGGR_SIZE - ppdu_info->tlv_aggr.cur_len) { + memcpy(ppdu_info->tlv_aggr.buf + ppdu_info->tlv_aggr.cur_len, + tlv_data, tlv_len); + ppdu_info->tlv_aggr.cur_len += tlv_len; + } +} + +static inline bool +ath12k_dp_mon_hal_rx_is_frame_type_ndp(const struct hal_rx_u_sig_info *usig_info) +{ + if (usig_info->ppdu_type_comp_mode == 1 && + usig_info->eht_sig_mcs == 0 && + usig_info->num_eht_sig_sym == 0) + return true; + + return false; +} + +static inline bool +ath12k_dp_mon_hal_rx_is_non_ofdma(const struct hal_rx_u_sig_info *usig_info) +{ + u32 ppdu_type_comp_mode = usig_info->ppdu_type_comp_mode; + u32 ul_dl = usig_info->ul_dl; + + if ((ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 0) || + (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_OFDMA && ul_dl == 0) || + (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 1)) + return true; + + return false; +} + +static inline bool +ath12k_dp_mon_hal_rx_is_ofdma(const struct hal_rx_u_sig_info *usig_info) +{ + if (usig_info->ppdu_type_comp_mode == 0 && usig_info->ul_dl == 0) + return true; + + return false; +} + +static void +ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(const struct hal_eht_sig_ndp_cmn_eb *eht_sig_ndp, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE | + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF | + IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S | + IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S | + IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S | + IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 | + IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[0]); + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE, + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + /* GI and LTF size are separately indicated in radiotap header + * and hence will be parsed from other TLV + */ + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC, + IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O); + + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD, + IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S); + eht->data[0] = cpu_to_le32(data); + + data = __le32_to_cpu(eht->data[7]); + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS, + IEEE80211_RADIOTAP_EHT_DATA7_NSS_S); + + data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0, + HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED, + IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S); + eht->data[7] = cpu_to_le32(data); +} + +static void +ath12k_dp_mon_hal_rx_parse_usig_overflow(const struct hal_eht_sig_usig_overflow *ovflow, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE | + IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF | + IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM | + IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[0]); + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE, + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + + /* GI and LTF size are separately indicated in radiotap header + * and hence will be parsed from other TLV + */ + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR, + IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY, + IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM); + + data |= ATH12K_LE32_DEC_ENC(ovflow->info0, + HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD, + IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O); + eht->data[0] = cpu_to_le32(data); +} + +static void +ath12k_dp_mon_hal_rx_parse_non_ofdma_users(const struct hal_eht_sig_non_ofdma_cmn_eb *eb, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[7]); + data |= ATH12K_LE32_DEC_ENC(eb->info0, + HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS, + IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS); + eht->data[7] = cpu_to_le32(data); +} + +static void +ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(const struct hal_eht_sig_mu_mimo *user, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info; + u32 user_idx; + + if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info)) + return; + + user_idx = eht_info->num_user_info++; + + eht_info->user_info[user_idx] |= + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING, + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS, + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING, + IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M); + + ppdu_info->mcs = le32_get_bits(user->info0, + HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS); +} + +static void +ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(const struct hal_eht_sig_non_mu_mimo *user, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info; + u32 user_idx; + + if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info)) + return; + + user_idx = eht_info->num_user_info++; + + eht_info->user_info[user_idx] |= + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O | + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING, + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS, + IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS, + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O) | + ATH12K_LE32_DEC_ENC(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED, + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O); + + ppdu_info->mcs = le32_get_bits(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS); + + ppdu_info->nss = le32_get_bits(user->info0, + HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS) + 1; +} + +static inline bool +ath12k_dp_mon_hal_rx_is_mu_mimo_user(const struct hal_rx_u_sig_info *usig_info) +{ + if (usig_info->ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_SU && + usig_info->ul_dl == 1) + return true; + + return false; +} + +static void +ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(const void *tlv, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + const struct hal_eht_sig_non_ofdma_cmn_eb *eb = tlv; + + ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info); + ath12k_dp_mon_hal_rx_parse_non_ofdma_users(eb, ppdu_info); + + if (ath12k_dp_mon_hal_rx_is_mu_mimo_user(&ppdu_info->u_sig_info)) + ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(&eb->user_field.mu_mimo, + ppdu_info); + else + ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&eb->user_field.n_mu_mimo, + ppdu_info); +} + +static void +ath12k_dp_mon_hal_rx_parse_ru_allocation(const struct hal_eht_sig_ofdma_cmn_eb *eb, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + const struct hal_eht_sig_ofdma_cmn_eb1 *ofdma_cmn_eb1 = &eb->eb1; + const struct hal_eht_sig_ofdma_cmn_eb2 *ofdma_cmn_eb2 = &eb->eb2; + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + enum ieee80211_radiotap_eht_data ru_123, ru_124, ru_125, ru_126; + enum ieee80211_radiotap_eht_data ru_121, ru_122, ru_112, ru_111; + u32 data; + + ru_123 = IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3; + ru_124 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4; + ru_125 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5; + ru_126 = IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6; + ru_121 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1; + ru_122 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2; + ru_112 = IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2; + ru_111 = IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1; + + switch (ppdu_info->u_sig_info.bw) { + case HAL_EHT_BW_320_2: + case HAL_EHT_BW_320_1: + data = __le32_to_cpu(eht->data[4]); + /* CC1 2::3 */ + data |= IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3, + ru_123); + eht->data[4] = cpu_to_le32(data); + + data = __le32_to_cpu(eht->data[5]); + /* CC1 2::4 */ + data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4, + ru_124); + + /* CC1 2::5 */ + data |= IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5, + ru_125); + eht->data[5] = cpu_to_le32(data); + + data = __le32_to_cpu(eht->data[6]); + /* CC1 2::6 */ + data |= IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6, + ru_126); + eht->data[6] = cpu_to_le32(data); + + fallthrough; + case HAL_EHT_BW_160: + data = __le32_to_cpu(eht->data[3]); + /* CC1 2::1 */ + data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1, + ru_121); + /* CC1 2::2 */ + data |= IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0, + HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2, + ru_122); + eht->data[3] = cpu_to_le32(data); + + fallthrough; + case HAL_EHT_BW_80: + data = __le32_to_cpu(eht->data[2]); + /* CC1 1::2 */ + data |= IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0, + HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2, + ru_112); + eht->data[2] = cpu_to_le32(data); + + fallthrough; + case HAL_EHT_BW_40: + fallthrough; + case HAL_EHT_BW_20: + data = __le32_to_cpu(eht->data[1]); + /* CC1 1::1 */ + data |= IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN | + ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0, + HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1, + ru_111); + eht->data[1] = cpu_to_le32(data); + break; + default: + break; + } +} + +static void +ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(const void *tlv, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + const struct hal_eht_sig_ofdma_cmn_eb *ofdma = tlv; + + ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info); + ath12k_dp_mon_hal_rx_parse_ru_allocation(ofdma, ppdu_info); + + ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&ofdma->user_field.n_mu_mimo, + ppdu_info); +} + +static void +ath12k_dp_mon_parse_eht_sig_hdr(struct hal_rx_mon_ppdu_info *ppdu_info, + const void *tlv_data) +{ + ppdu_info->is_eht = true; + + if (ath12k_dp_mon_hal_rx_is_frame_type_ndp(&ppdu_info->u_sig_info)) + ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(tlv_data, ppdu_info); + else if (ath12k_dp_mon_hal_rx_is_non_ofdma(&ppdu_info->u_sig_info)) + ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(tlv_data, ppdu_info); + else if (ath12k_dp_mon_hal_rx_is_ofdma(&ppdu_info->u_sig_info)) + ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(tlv_data, ppdu_info); +} + +static inline enum ath12k_eht_ru_size +hal_rx_mon_hal_ru_size_to_ath12k_ru_size(u32 hal_ru_size) +{ + switch (hal_ru_size) { + case HAL_EHT_RU_26: + return ATH12K_EHT_RU_26; + case HAL_EHT_RU_52: + return ATH12K_EHT_RU_52; + case HAL_EHT_RU_78: + return ATH12K_EHT_RU_52_26; + case HAL_EHT_RU_106: + return ATH12K_EHT_RU_106; + case HAL_EHT_RU_132: + return ATH12K_EHT_RU_106_26; + case HAL_EHT_RU_242: + return ATH12K_EHT_RU_242; + case HAL_EHT_RU_484: + return ATH12K_EHT_RU_484; + case HAL_EHT_RU_726: + return ATH12K_EHT_RU_484_242; + case HAL_EHT_RU_996: + return ATH12K_EHT_RU_996; + case HAL_EHT_RU_996x2: + return ATH12K_EHT_RU_996x2; + case HAL_EHT_RU_996x3: + return ATH12K_EHT_RU_996x3; + case HAL_EHT_RU_996x4: + return ATH12K_EHT_RU_996x4; + case HAL_EHT_RU_NONE: + return ATH12K_EHT_RU_INVALID; + case HAL_EHT_RU_996_484: + return ATH12K_EHT_RU_996_484; + case HAL_EHT_RU_996x2_484: + return ATH12K_EHT_RU_996x2_484; + case HAL_EHT_RU_996x3_484: + return ATH12K_EHT_RU_996x3_484; + case HAL_EHT_RU_996_484_242: + return ATH12K_EHT_RU_996_484_242; + default: + return ATH12K_EHT_RU_INVALID; + } +} + +static inline u32 +hal_rx_ul_ofdma_ru_size_to_width(enum ath12k_eht_ru_size ru_size) +{ + switch (ru_size) { + case ATH12K_EHT_RU_26: + return RU_26; + case ATH12K_EHT_RU_52: + return RU_52; + case ATH12K_EHT_RU_52_26: + return RU_52_26; + case ATH12K_EHT_RU_106: + return RU_106; + case ATH12K_EHT_RU_106_26: + return RU_106_26; + case ATH12K_EHT_RU_242: + return RU_242; + case ATH12K_EHT_RU_484: + return RU_484; + case ATH12K_EHT_RU_484_242: + return RU_484_242; + case ATH12K_EHT_RU_996: + return RU_996; + case ATH12K_EHT_RU_996_484: + return RU_996_484; + case ATH12K_EHT_RU_996_484_242: + return RU_996_484_242; + case ATH12K_EHT_RU_996x2: + return RU_2X996; + case ATH12K_EHT_RU_996x2_484: + return RU_2X996_484; + case ATH12K_EHT_RU_996x3: + return RU_3X996; + case ATH12K_EHT_RU_996x3_484: + return RU_3X996_484; + case ATH12K_EHT_RU_996x4: + return RU_4X996; + default: + return RU_INVALID; + } +} + +static void +ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_info, + u16 user_id, + struct hal_rx_mon_ppdu_info *ppdu_info) +{ + struct hal_rx_user_status *mon_rx_user_status = NULL; + struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht; + enum ath12k_eht_ru_size rtap_ru_size = ATH12K_EHT_RU_INVALID; + u32 ru_width, reception_type, ru_index = HAL_EHT_RU_INVALID; + u32 ru_type_80_0, ru_start_index_80_0; + u32 ru_type_80_1, ru_start_index_80_1; + u32 ru_type_80_2, ru_start_index_80_2; + u32 ru_type_80_3, ru_start_index_80_3; + u32 ru_size = 0, num_80mhz_with_ru = 0; + u64 ru_index_320mhz = 0; + u32 ru_index_per80mhz; + + reception_type = le32_get_bits(rx_usr_info->info0, + HAL_RX_USR_INFO0_RECEPTION_TYPE); + + switch (reception_type) { + case HAL_RECEPTION_TYPE_SU: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU; + break; + case HAL_RECEPTION_TYPE_DL_MU_MIMO: + case HAL_RECEPTION_TYPE_UL_MU_MIMO: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO; + break; + case HAL_RECEPTION_TYPE_DL_MU_OFMA: + case HAL_RECEPTION_TYPE_UL_MU_OFDMA: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA; + break; + case HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO: + case HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO: + ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO; + } + + ppdu_info->is_stbc = le32_get_bits(rx_usr_info->info0, HAL_RX_USR_INFO0_STBC); + ppdu_info->ldpc = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_LDPC); + ppdu_info->dcm = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_STA_DCM); + ppdu_info->bw = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_RX_BW); + ppdu_info->mcs = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_MCS); + ppdu_info->nss = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_NSS) + 1; + + if (user_id < HAL_MAX_UL_MU_USERS) { + mon_rx_user_status = &ppdu_info->userstats[user_id]; + mon_rx_user_status->mcs = ppdu_info->mcs; + mon_rx_user_status->nss = ppdu_info->nss; + } + + if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO || + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA || + ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO)) + return; + + /* RU allocation present only for OFDMA reception */ + ru_type_80_0 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_0); + ru_start_index_80_0 = le32_get_bits(rx_usr_info->info3, + HAL_RX_USR_INFO3_RU_START_IDX_80_0); + if (ru_type_80_0 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_0; + ru_index_per80mhz = ru_start_index_80_0; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_0, 0, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + ru_type_80_1 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_1); + ru_start_index_80_1 = le32_get_bits(rx_usr_info->info3, + HAL_RX_USR_INFO3_RU_START_IDX_80_1); + if (ru_type_80_1 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_1; + ru_index_per80mhz = ru_start_index_80_1; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_1, 1, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + ru_type_80_2 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_2); + ru_start_index_80_2 = le32_get_bits(rx_usr_info->info3, + HAL_RX_USR_INFO3_RU_START_IDX_80_2); + if (ru_type_80_2 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_2; + ru_index_per80mhz = ru_start_index_80_2; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_2, 2, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + ru_type_80_3 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_3); + ru_start_index_80_3 = le32_get_bits(rx_usr_info->info2, + HAL_RX_USR_INFO3_RU_START_IDX_80_3); + if (ru_type_80_3 != HAL_EHT_RU_NONE) { + ru_size += ru_type_80_3; + ru_index_per80mhz = ru_start_index_80_3; + ru_index = ru_index_per80mhz; + ru_index_320mhz |= HAL_RU_PER80(ru_type_80_3, 3, ru_index_per80mhz); + num_80mhz_with_ru++; + } + + if (num_80mhz_with_ru > 1) { + /* Calculate the MRU index */ + switch (ru_index_320mhz) { + case HAL_EHT_RU_996_484_0: + case HAL_EHT_RU_996x2_484_0: + case HAL_EHT_RU_996x3_484_0: + ru_index = 0; + break; + case HAL_EHT_RU_996_484_1: + case HAL_EHT_RU_996x2_484_1: + case HAL_EHT_RU_996x3_484_1: + ru_index = 1; + break; + case HAL_EHT_RU_996_484_2: + case HAL_EHT_RU_996x2_484_2: + case HAL_EHT_RU_996x3_484_2: + ru_index = 2; + break; + case HAL_EHT_RU_996_484_3: + case HAL_EHT_RU_996x2_484_3: + case HAL_EHT_RU_996x3_484_3: + ru_index = 3; + break; + case HAL_EHT_RU_996_484_4: + case HAL_EHT_RU_996x2_484_4: + case HAL_EHT_RU_996x3_484_4: + ru_index = 4; + break; + case HAL_EHT_RU_996_484_5: + case HAL_EHT_RU_996x2_484_5: + case HAL_EHT_RU_996x3_484_5: + ru_index = 5; + break; + case HAL_EHT_RU_996_484_6: + case HAL_EHT_RU_996x2_484_6: + case HAL_EHT_RU_996x3_484_6: + ru_index = 6; + break; + case HAL_EHT_RU_996_484_7: + case HAL_EHT_RU_996x2_484_7: + case HAL_EHT_RU_996x3_484_7: + ru_index = 7; + break; + case HAL_EHT_RU_996x2_484_8: + ru_index = 8; + break; + case HAL_EHT_RU_996x2_484_9: + ru_index = 9; + break; + case HAL_EHT_RU_996x2_484_10: + ru_index = 10; + break; + case HAL_EHT_RU_996x2_484_11: + ru_index = 11; + break; + default: + ru_index = HAL_EHT_RU_INVALID; + break; + } + + ru_size += 4; + } + + rtap_ru_size = hal_rx_mon_hal_ru_size_to_ath12k_ru_size(ru_size); + if (rtap_ru_size != ATH12K_EHT_RU_INVALID) { + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[1]); + data |= u32_encode_bits(rtap_ru_size, + IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE); + eht->data[1] = cpu_to_le32(data); + } + + if (ru_index != HAL_EHT_RU_INVALID) { + u32 known, data; + + known = __le32_to_cpu(eht->known); + known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM; + eht->known = cpu_to_le32(known); + + data = __le32_to_cpu(eht->data[1]); + data |= u32_encode_bits(rtap_ru_size, + IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX); + eht->data[1] = cpu_to_le32(data); + } + + if (mon_rx_user_status && ru_index != HAL_EHT_RU_INVALID && + rtap_ru_size != ATH12K_EHT_RU_INVALID) { + mon_rx_user_status->ul_ofdma_ru_start_index = ru_index; + mon_rx_user_status->ul_ofdma_ru_size = rtap_ru_size; + + ru_width = hal_rx_ul_ofdma_ru_size_to_width(rtap_ru_size); + + mon_rx_user_status->ul_ofdma_ru_width = ru_width; + mon_rx_user_status->ofdma_info_valid = 1; + } } static enum hal_rx_mon_status -ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, +ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar, struct ath12k_mon_data *pmon, - u32 tlv_tag, const void *tlv_data, - u32 userid) + const struct hal_tlv_64_hdr *tlv) { struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; - u32 info[7]; + const void *tlv_data = tlv->value; + u32 info[7], userid; + u16 tlv_tag, tlv_len; + + tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG); + tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); + userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID); + + if (ppdu_info->tlv_aggr.in_progress && ppdu_info->tlv_aggr.tlv_tag != tlv_tag) { + ath12k_dp_mon_parse_eht_sig_hdr(ppdu_info, ppdu_info->tlv_aggr.buf); + + ppdu_info->tlv_aggr.in_progress = false; + ppdu_info->tlv_aggr.cur_len = 0; + } switch (tlv_tag) { case HAL_RX_PPDU_START: { @@ -638,6 +1504,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, ppdu_info->num_mpdu_fcs_err = u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR); + ppdu_info->peer_id = + u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID); + switch (ppdu_info->preamble_type) { case HAL_RX_PREAMBLE_11N: ppdu_info->ht_flags = 1; @@ -648,6 +1517,9 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, case HAL_RX_PREAMBLE_11AX: ppdu_info->he_flags = 1; break; + case HAL_RX_PREAMBLE_11BE: + ppdu_info->is_eht = true; + break; default: break; } @@ -655,6 +1527,11 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, if (userid < HAL_MAX_UL_MU_USERS) { struct hal_rx_user_status *rxuser_stats = &ppdu_info->userstats[userid]; + + if (ppdu_info->num_mpdu_fcs_ok > 1 || + ppdu_info->num_mpdu_fcs_err > 1) + ppdu_info->userstats[userid].ampdu_present = true; + ppdu_info->num_users += 1; ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats); @@ -730,6 +1607,17 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW); break; } + case HAL_PHYRX_OTHER_RECEIVE_INFO: { + const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data; + + ppdu_info->gi = le32_get_bits(cmn_usr_info->info0, + HAL_RX_PHY_CMN_USER_INFO0_GI); + break; + } + case HAL_RX_PPDU_START_USER_INFO: + ath12k_dp_mon_hal_rx_parse_user_info(tlv_data, userid, ppdu_info); + break; + case HAL_RXPCU_PPDU_END_INFO: { const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data; @@ -743,7 +1631,6 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, } case HAL_RX_MPDU_START: { const struct hal_rx_mpdu_start *mpdu_start = tlv_data; - struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; u16 peer_id; info[1] = __le32_to_cpu(mpdu_start->info1); @@ -756,68 +1643,38 @@ ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab, if (userid < HAL_MAX_UL_MU_USERS) { info[0] = __le32_to_cpu(mpdu_start->info0); ppdu_info->userid = userid; - ppdu_info->ampdu_id[userid] = - u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID); + ppdu_info->userstats[userid].ampdu_id = + u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID); } - mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC); - if (!mon_mpdu) - return HAL_RX_MON_STATUS_PPDU_NOT_DONE; - break; } case HAL_RX_MSDU_START: /* TODO: add msdu start parsing logic */ break; - case HAL_MON_BUF_ADDR: { - struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring; - const struct dp_mon_packet_info *packet_info = tlv_data; - int buf_id = u32_get_bits(packet_info->cookie, - DP_RXDMA_BUF_COOKIE_BUF_ID); - struct sk_buff *msdu; - struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; - struct ath12k_skb_rxcb *rxcb; - - spin_lock_bh(&buf_ring->idr_lock); - msdu = idr_remove(&buf_ring->bufs_idr, buf_id); - spin_unlock_bh(&buf_ring->idr_lock); - - if (unlikely(!msdu)) { - ath12k_warn(ab, "monitor destination with invalid buf_id %d\n", - buf_id); - return HAL_RX_MON_STATUS_PPDU_NOT_DONE; + case HAL_MON_BUF_ADDR: + return HAL_RX_MON_STATUS_BUF_ADDR; + case HAL_RX_MSDU_END: + return HAL_RX_MON_STATUS_MSDU_END; + case HAL_RX_MPDU_END: + return HAL_RX_MON_STATUS_MPDU_END; + case HAL_PHYRX_GENERIC_U_SIG: + ath12k_dp_mon_hal_rx_parse_u_sig_hdr(tlv_data, ppdu_info); + break; + case HAL_PHYRX_GENERIC_EHT_SIG: + /* Handle the case where aggregation is in progress + * or the current TLV is one of the TLVs which should be + * aggregated + */ + if (!ppdu_info->tlv_aggr.in_progress) { + ppdu_info->tlv_aggr.in_progress = true; + ppdu_info->tlv_aggr.tlv_tag = tlv_tag; + ppdu_info->tlv_aggr.cur_len = 0; } - rxcb = ATH12K_SKB_RXCB(msdu); - dma_unmap_single(ab->dev, rxcb->paddr, - msdu->len + skb_tailroom(msdu), - DMA_FROM_DEVICE); - - if (mon_mpdu->tail) - mon_mpdu->tail->next = msdu; - else - mon_mpdu->tail = msdu; - - ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); + ppdu_info->is_eht = true; - break; - } - case HAL_RX_MSDU_END: { - const struct rx_msdu_end_qcn9274 *msdu_end = tlv_data; - bool is_first_msdu_in_mpdu; - u16 msdu_end_info; - - msdu_end_info = __le16_to_cpu(msdu_end->info5); - is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info, - RX_MSDU_END_INFO5_FIRST_MSDU); - if (is_first_msdu_in_mpdu) { - pmon->mon_mpdu->head = pmon->mon_mpdu->tail; - pmon->mon_mpdu->tail = NULL; - } - break; - } - case HAL_RX_MPDU_END: - list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list); + ath12k_dp_mon_hal_aggr_tlv(ppdu_info, tlv_len, tlv_data); break; case HAL_DUMMY: return HAL_RX_MON_STATUS_BUF_DONE; @@ -844,7 +1701,7 @@ static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, } static struct sk_buff * -ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id, +ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, struct sk_buff *head_msdu, struct sk_buff *tail_msdu, struct ieee80211_rx_status *rxs, bool *fcs_err) { @@ -1005,18 +1862,70 @@ static void ath12k_dp_mon_update_radiotap(struct ath12k *ar, { struct ieee80211_supported_band *sband; u8 *ptr = NULL; - u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid]; rxs->flag |= RX_FLAG_MACTIME_START; rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR; rxs->nss = ppduinfo->nss + 1; - if (ampdu_id) { + if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) { rxs->flag |= RX_FLAG_AMPDU_DETAILS; - rxs->ampdu_reference = ampdu_id; + rxs->ampdu_reference = ppduinfo->userstats[ppduinfo->userid].ampdu_id; } - if (ppduinfo->he_mu_flags) { + if (ppduinfo->is_eht || ppduinfo->eht_usig) { + struct ieee80211_radiotap_tlv *tlv; + struct ieee80211_radiotap_eht *eht; + struct ieee80211_radiotap_eht_usig *usig; + u16 len = 0, i, eht_len, usig_len; + u8 user; + + if (ppduinfo->is_eht) { + eht_len = struct_size(eht, + user_info, + ppduinfo->eht_info.num_user_info); + len += sizeof(*tlv) + eht_len; + } + + if (ppduinfo->eht_usig) { + usig_len = sizeof(*usig); + len += sizeof(*tlv) + usig_len; + } + + rxs->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; + rxs->encoding = RX_ENC_EHT; + + skb_reset_mac_header(mon_skb); + + tlv = skb_push(mon_skb, len); + + if (ppduinfo->is_eht) { + tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT); + tlv->len = cpu_to_le16(eht_len); + + eht = (struct ieee80211_radiotap_eht *)tlv->data; + eht->known = ppduinfo->eht_info.eht.known; + + for (i = 0; + i < ARRAY_SIZE(eht->data) && + i < ARRAY_SIZE(ppduinfo->eht_info.eht.data); + i++) + eht->data[i] = ppduinfo->eht_info.eht.data[i]; + + for (user = 0; user < ppduinfo->eht_info.num_user_info; user++) + put_unaligned_le32(ppduinfo->eht_info.user_info[user], + &eht->user_info[user]); + + tlv = (struct ieee80211_radiotap_tlv *)&tlv->data[eht_len]; + } + + if (ppduinfo->eht_usig) { + tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG); + tlv->len = cpu_to_le16(usig_len); + + usig = (struct ieee80211_radiotap_eht_usig *)tlv->data; + *usig = ppduinfo->u_sig_info.usig; + } + } else if (ppduinfo->he_mu_flags) { rxs->flag |= RX_FLAG_RADIOTAP_HE_MU; rxs->encoding = RX_ENC_HE; ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu)); @@ -1125,7 +2034,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); } -static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, +static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, struct sk_buff *head_msdu, struct sk_buff *tail_msdu, struct hal_rx_mon_ppdu_info *ppduinfo, struct napi_struct *napi) @@ -1135,7 +2044,7 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, struct ieee80211_rx_status *rxs = &dp->rx_status; bool fcs_err = false; - mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, + mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, head_msdu, tail_msdu, rxs, &fcs_err); if (!mon_skb) @@ -1180,24 +2089,18 @@ static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id, } static enum hal_rx_mon_status -ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon, +ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon, struct sk_buff *skb) { - struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; struct hal_tlv_64_hdr *tlv; + struct ath12k_skb_rxcb *rxcb; enum hal_rx_mon_status hal_status; - u32 tlv_userid; u16 tlv_tag, tlv_len; u8 *ptr = skb->data; - memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info)); - do { tlv = (struct hal_tlv_64_hdr *)ptr; tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG); - tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); - tlv_userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID); - ptr += sizeof(*tlv); /* The actual length of PPDU_END is the combined length of many PHY * TLVs that follow. Skip the TLV header and @@ -1207,16 +2110,24 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon if (tlv_tag == HAL_RX_PPDU_END) tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview); + else + tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN); - hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon, - tlv_tag, ptr, tlv_userid); - ptr += tlv_len; + hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv); + ptr += sizeof(*tlv) + tlv_len; ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN); if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE) break; - } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE); + } while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) || + (hal_status == HAL_RX_MON_STATUS_BUF_ADDR) || + (hal_status == HAL_RX_MON_STATUS_MPDU_END) || + (hal_status == HAL_RX_MON_STATUS_MSDU_END)); + + rxcb = ATH12K_SKB_RXCB(skb); + if (rxcb->is_end_of_ppdu) + hal_status = HAL_RX_MON_STATUS_PPDU_DONE; return hal_status; } @@ -1224,18 +2135,16 @@ ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon enum hal_rx_mon_status ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, struct ath12k_mon_data *pmon, - int mac_id, struct sk_buff *skb, struct napi_struct *napi) { - struct ath12k_base *ab = ar->ab; struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info; struct dp_mon_mpdu *tmp; struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu; struct sk_buff *head_msdu, *tail_msdu; enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE; - ath12k_dp_mon_parse_rx_dest(ab, pmon, skb); + ath12k_dp_mon_parse_rx_dest(ar, pmon, skb); list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) { list_del(&mon_mpdu->list); @@ -1243,7 +2152,7 @@ ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, tail_msdu = mon_mpdu->tail; if (head_msdu && tail_msdu) { - ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, + ath12k_dp_mon_rx_deliver(ar, head_msdu, tail_msdu, ppdu_info, napi); } @@ -1924,7 +2833,7 @@ ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag, } static void -ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id, +ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, struct napi_struct *napi, struct dp_mon_tx_ppdu_info *tx_ppdu_info) { @@ -1938,7 +2847,7 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id, tail_msdu = mon_mpdu->tail; if (head_msdu) - ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, tail_msdu, + ath12k_dp_mon_rx_deliver(ar, head_msdu, tail_msdu, &tx_ppdu_info->rx_status, napi); kfree(mon_mpdu); @@ -1948,7 +2857,6 @@ ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id, enum hal_rx_mon_status ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar, struct ath12k_mon_data *pmon, - int mac_id, struct sk_buff *skb, struct napi_struct *napi, u32 ppdu_id) @@ -1995,155 +2903,43 @@ ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar, break; } while (tlv_status != DP_MON_TX_FES_STATUS_END); - ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_data_ppdu_info); - ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_prot_ppdu_info); + ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_data_ppdu_info); + ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_prot_ppdu_info); return tlv_status; } -int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget, - enum dp_monitor_mode monitor_mode, - struct napi_struct *napi) -{ - struct hal_mon_dest_desc *mon_dst_desc; - struct ath12k_pdev_dp *pdev_dp = &ar->dp; - struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data; - struct ath12k_base *ab = ar->ab; - struct ath12k_dp *dp = &ab->dp; - struct sk_buff *skb; - struct ath12k_skb_rxcb *rxcb; - struct dp_srng *mon_dst_ring; - struct hal_srng *srng; - struct dp_rxdma_mon_ring *buf_ring; - u64 cookie; - u32 ppdu_id; - int num_buffs_reaped = 0, srng_id, buf_id; - u8 dest_idx = 0, i; - bool end_of_ppdu; - struct hal_rx_mon_ppdu_info *ppdu_info; - struct ath12k_peer *peer = NULL; - - ppdu_info = &pmon->mon_ppdu_info; - memset(ppdu_info, 0, sizeof(*ppdu_info)); - ppdu_info->peer_id = HAL_INVALID_PEERID; - - srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); - - if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) { - mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; - buf_ring = &dp->rxdma_mon_buf_ring; - } else { - return 0; - } - - srng = &ab->hal.srng_list[mon_dst_ring->ring_id]; - - spin_lock_bh(&srng->lock); - ath12k_hal_srng_access_begin(ab, srng); - - while (likely(*budget)) { - *budget -= 1; - mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng); - if (unlikely(!mon_dst_desc)) - break; - - cookie = le32_to_cpu(mon_dst_desc->cookie); - buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); - - spin_lock_bh(&buf_ring->idr_lock); - skb = idr_remove(&buf_ring->bufs_idr, buf_id); - spin_unlock_bh(&buf_ring->idr_lock); - - if (unlikely(!skb)) { - ath12k_warn(ab, "monitor destination with invalid buf_id %d\n", - buf_id); - goto move_next; - } - - rxcb = ATH12K_SKB_RXCB(skb); - dma_unmap_single(ab->dev, rxcb->paddr, - skb->len + skb_tailroom(skb), - DMA_FROM_DEVICE); - - pmon->dest_skb_q[dest_idx] = skb; - dest_idx++; - ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id); - end_of_ppdu = le32_get_bits(mon_dst_desc->info0, - HAL_MON_DEST_INFO0_END_OF_PPDU); - if (!end_of_ppdu) - continue; - - for (i = 0; i < dest_idx; i++) { - skb = pmon->dest_skb_q[i]; - - if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) - ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id, - skb, napi); - else - ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id, - skb, napi, ppdu_id); - - peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); - - if (!peer || !peer->sta) { - ath12k_dbg(ab, ATH12K_DBG_DATA, - "failed to find the peer with peer_id %d\n", - ppdu_info->peer_id); - dev_kfree_skb_any(skb); - continue; - } - - dev_kfree_skb_any(skb); - pmon->dest_skb_q[i] = NULL; - } - - dest_idx = 0; -move_next: - ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); - ath12k_hal_srng_src_get_next_entry(ab, srng); - num_buffs_reaped++; - } - - ath12k_hal_srng_access_end(ab, srng); - spin_unlock_bh(&srng->lock); - - return num_buffs_reaped; -} - static void ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats, struct hal_rx_mon_ppdu_info *ppdu_info, struct hal_rx_user_status *user_stats, u32 num_msdu) { - u32 rate_idx = 0; + struct ath12k_rx_peer_rate_stats *stats; u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs; u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1; u32 bw_idx = ppdu_info->bw; u32 gi_idx = ppdu_info->gi; + u32 len; - if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) || - (bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) { + if (mcs_idx > HAL_RX_MAX_MCS_HT || nss_idx >= HAL_RX_MAX_NSS || + bw_idx >= HAL_RX_BW_MAX || gi_idx >= HAL_RX_GI_MAX) { return; } - if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N || - ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) { - rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx; - rate_idx += bw_idx * 2 + gi_idx; - } else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) { + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX || + ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE) gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi); - rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx; - rate_idx += bw_idx * 3 + gi_idx; - } else { - return; - } - rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu; + rx_stats->pkt_stats.rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += num_msdu; + stats = &rx_stats->byte_stats; + if (user_stats) - rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count; + len = user_stats->mpdu_ok_byte_count; else - rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len; + len = ppdu_info->mpdu_len; + + stats->rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += len; } static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar, @@ -2157,6 +2953,7 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar, return; arsta->rssi_comb = ppdu_info->rssi_comb; + ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count + ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count; @@ -2229,6 +3026,12 @@ static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar, rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len; } + if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE && + ppdu_info->mcs <= HAL_RX_MAX_MCS_BE) { + rx_stats->pkt_stats.be_mcs_count[ppdu_info->mcs] += num_msdu; + rx_stats->byte_stats.be_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len; + } + if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A || ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) && ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) { @@ -2329,6 +3132,7 @@ ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar, return; arsta->rssi_comb = ppdu_info->rssi_comb; + ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb); num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count + user_stats->udp_msdu_count + user_stats->other_msdu_count; @@ -2415,8 +3219,15 @@ ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k *ar, ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i); } -int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, - struct napi_struct *napi, int *budget) +static void +ath12k_dp_mon_rx_memset_ppdu_info(struct hal_rx_mon_ppdu_info *ppdu_info) +{ + memset(ppdu_info, 0, sizeof(*ppdu_info)); + ppdu_info->peer_id = HAL_INVALID_PEERID; +} + +int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, + struct napi_struct *napi) { struct ath12k_base *ab = ar->ab; struct ath12k_pdev_dp *pdev_dp = &ar->dp; @@ -2432,13 +3243,14 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, struct ath12k_sta *ahsta = NULL; struct ath12k_link_sta *arsta; struct ath12k_peer *peer; + struct sk_buff_head skb_list; u64 cookie; int num_buffs_reaped = 0, srng_id, buf_id; - u8 dest_idx = 0, i; - bool end_of_ppdu; - u32 hal_status; + u32 hal_status, end_offset, info0, end_reason; + u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, ar->pdev_idx); - srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id); + __skb_queue_head_init(&skb_list); + srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, pdev_idx); mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; buf_ring = &dp->rxdma_mon_buf_ring; @@ -2451,6 +3263,15 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng); if (unlikely(!mon_dst_desc)) break; + + /* In case of empty descriptor, the cookie in the ring descriptor + * is invalid. Therefore, this entry is skipped, and ring processing + * continues. + */ + info0 = le32_to_cpu(mon_dst_desc->info0); + if (u32_get_bits(info0, HAL_MON_DEST_INFO0_EMPTY_DESC)) + goto move_next; + cookie = le32_to_cpu(mon_dst_desc->cookie); buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID); @@ -2468,63 +3289,102 @@ int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, dma_unmap_single(ab->dev, rxcb->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); - pmon->dest_skb_q[dest_idx] = skb; - dest_idx++; - end_of_ppdu = le32_get_bits(mon_dst_desc->info0, - HAL_MON_DEST_INFO0_END_OF_PPDU); - if (!end_of_ppdu) - continue; - for (i = 0; i < dest_idx; i++) { - skb = pmon->dest_skb_q[i]; - hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb); + end_reason = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_REASON); - if (ppdu_info->peer_id == HAL_INVALID_PEERID || - hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { - dev_kfree_skb_any(skb); - continue; - } - - rcu_read_lock(); - spin_lock_bh(&ab->base_lock); - peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); - if (!peer || !peer->sta) { - ath12k_dbg(ab, ATH12K_DBG_DATA, - "failed to find the peer with peer_id %d\n", - ppdu_info->peer_id); - spin_unlock_bh(&ab->base_lock); - rcu_read_unlock(); - dev_kfree_skb_any(skb); - continue; - } + /* HAL_MON_FLUSH_DETECTED implies that an rx flush received at the end of + * rx PPDU and HAL_MON_PPDU_TRUNCATED implies that the PPDU got + * truncated due to a system level error. In both the cases, buffer data + * can be discarded + */ + if ((end_reason == HAL_MON_FLUSH_DETECTED) || + (end_reason == HAL_MON_PPDU_TRUNCATED)) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "Monitor dest descriptor end reason %d", end_reason); + dev_kfree_skb_any(skb); + goto move_next; + } - if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { - ahsta = ath12k_sta_to_ahsta(peer->sta); - arsta = &ahsta->deflink; - ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, - ppdu_info); - } else if ((ppdu_info->fc_valid) && - (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) { - ath12k_dp_mon_rx_process_ulofdma(ppdu_info); - ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info); - } + /* Calculate the budget when the ring descriptor with the + * HAL_MON_END_OF_PPDU to ensure that one PPDU worth of data is always + * reaped. This helps to efficiently utilize the NAPI budget. + */ + if (end_reason == HAL_MON_END_OF_PPDU) { + *budget -= 1; + rxcb->is_end_of_ppdu = true; + } - spin_unlock_bh(&ab->base_lock); - rcu_read_unlock(); - dev_kfree_skb_any(skb); - memset(ppdu_info, 0, sizeof(*ppdu_info)); - ppdu_info->peer_id = HAL_INVALID_PEERID; + end_offset = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_OFFSET); + if (likely(end_offset <= DP_RX_BUFFER_SIZE)) { + skb_put(skb, end_offset); + } else { + ath12k_warn(ab, + "invalid offset on mon stats destination %u\n", + end_offset); + skb_put(skb, DP_RX_BUFFER_SIZE); } - dest_idx = 0; + __skb_queue_tail(&skb_list, skb); + move_next: ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); - ath12k_hal_srng_src_get_next_entry(ab, srng); + ath12k_hal_srng_dst_get_next_entry(ab, srng); num_buffs_reaped++; } ath12k_hal_srng_access_end(ab, srng); spin_unlock_bh(&srng->lock); + + if (!num_buffs_reaped) + return 0; + + /* In some cases, one PPDU worth of data can be spread across multiple NAPI + * schedules, To avoid losing existing parsed ppdu_info information, skip + * the memset of the ppdu_info structure and continue processing it. + */ + if (!ppdu_info->ppdu_continuation) + ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info); + + while ((skb = __skb_dequeue(&skb_list))) { + hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb); + if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) { + ppdu_info->ppdu_continuation = true; + dev_kfree_skb_any(skb); + continue; + } + + if (ppdu_info->peer_id == HAL_INVALID_PEERID) + goto free_skb; + + rcu_read_lock(); + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id); + if (!peer || !peer->sta) { + ath12k_dbg(ab, ATH12K_DBG_DATA, + "failed to find the peer with monitor peer_id %d\n", + ppdu_info->peer_id); + goto next_skb; + } + + if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) { + ahsta = ath12k_sta_to_ahsta(peer->sta); + arsta = &ahsta->deflink; + ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta, + ppdu_info); + } else if ((ppdu_info->fc_valid) && + (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) { + ath12k_dp_mon_rx_process_ulofdma(ppdu_info); + ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info); + } + +next_skb: + spin_unlock_bh(&ab->base_lock); + rcu_read_unlock(); +free_skb: + dev_kfree_skb_any(skb); + ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info); + } + return num_buffs_reaped; } @@ -2535,11 +3395,10 @@ int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id, struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id); int num_buffs_reaped = 0; - if (!ar->monitor_started) - ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget); - else - num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget, - monitor_mode, napi); + if (ab->hw_params->rxdma1_enable) { + if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) + num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi); + } return num_buffs_reaped; } diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h index fb9e9c176ce57..e4368eb42aca8 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.h +++ b/drivers/net/wireless/ath/ath12k/dp_mon.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_DP_MON_H @@ -77,14 +77,11 @@ struct dp_mon_tx_ppdu_info { enum hal_rx_mon_status ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar, struct ath12k_mon_data *pmon, - int mac_id, struct sk_buff *skb, + struct sk_buff *skb, struct napi_struct *napi); int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab, struct dp_rxdma_mon_ring *buf_ring, int req_entries); -int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, - int *budget, enum dp_monitor_mode monitor_mode, - struct napi_struct *napi); int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id, struct napi_struct *napi, int budget, enum dp_monitor_mode monitor_mode); @@ -96,11 +93,9 @@ ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag, enum hal_rx_mon_status ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar, struct ath12k_mon_data *pmon, - int mac_id, struct sk_buff *skb, struct napi_struct *napi, u32 ppdu_id); void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info); -int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id, - struct napi_struct *napi, int *budget); +int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, struct napi_struct *napi); #endif diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index dad35bfd83f62..ff6a709b5042c 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -2392,6 +2392,23 @@ static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc, rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi); rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); break; + case RX_MSDU_START_PKT_TYPE_11BE: + rx_status->rate_idx = rate_mcs; + + if (rate_mcs > ATH12K_EHT_MCS_MAX) { + ath12k_warn(ar->ab, + "Received with invalid mcs in EHT mode %d\n", + rate_mcs); + break; + } + + rx_status->encoding = RX_ENC_EHT; + rx_status->nss = nss; + rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi); + rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw); + break; + default: + break; } } @@ -2486,7 +2503,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap spin_unlock_bh(&ab->base_lock); ath12k_dbg(ab, ATH12K_DBG_DATA, - "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", + "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", msdu, msdu->len, peer ? peer->addr : NULL, @@ -2497,6 +2514,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap (status->encoding == RX_ENC_HT) ? "ht" : "", (status->encoding == RX_ENC_VHT) ? "vht" : "", (status->encoding == RX_ENC_HE) ? "he" : "", + (status->encoding == RX_ENC_EHT) ? "eht" : "", (status->bw == RATE_INFO_BW_40) ? "40" : "", (status->bw == RATE_INFO_BW_80) ? "80" : "", (status->bw == RATE_INFO_BW_160) ? "160" : "", @@ -2530,6 +2548,29 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi); } +static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab, + struct hal_rx_desc *rx_desc, + struct sk_buff *msdu) +{ + struct ieee80211_hdr *hdr; + u8 decap_type; + u32 hdr_len; + + decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc); + if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI) + return true; + + hdr = (struct ieee80211_hdr *)msdu->data; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN))) + return true; + + ab->soc_stats.invalid_rbm++; + WARN_ON_ONCE(1); + return false; +} + static int ath12k_dp_rx_process_msdu(struct ath12k *ar, struct sk_buff *msdu, struct sk_buff_head *msdu_list, @@ -2588,6 +2629,11 @@ static int ath12k_dp_rx_process_msdu(struct ath12k *ar, } } + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) { + ret = -EINVAL; + goto free_out; + } + ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status); ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status); @@ -2978,6 +3024,9 @@ static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED; skb_pull(msdu, hal_rx_desc_sz); + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) + return -EINVAL; + ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs); ath12k_dp_rx_h_undecap(ar, msdu, rx_desc, HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true); @@ -3720,6 +3769,9 @@ static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu, skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); } + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) + return -EINVAL; + ath12k_dp_rx_h_ppdu(ar, desc, status); ath12k_dp_rx_h_mpdu(ar, msdu, desc, status); @@ -3764,7 +3816,7 @@ static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu, return drop; } -static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, +static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status) { struct ath12k_base *ab = ar->ab; @@ -3782,6 +3834,9 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len); skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes); + if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu))) + return true; + ath12k_dp_rx_h_ppdu(ar, desc, status); status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR | @@ -3789,6 +3844,7 @@ static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu, ath12k_dp_rx_h_undecap(ar, msdu, desc, HAL_ENCRYPT_TYPE_TKIP_MIC, status, false); + return false; } static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, @@ -3807,7 +3863,7 @@ static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu, case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR: err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc); if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) { - ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status); + drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status); break; } fallthrough; @@ -4032,7 +4088,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, hw_links[hw_link_id].pdev_idx); ar = partner_ab->pdevs[pdev_id].ar; - if (!ar || !rcu_dereference(ar->ab->pdevs_active[hw_link_id])) { + if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) { dev_kfree_skb_any(msdu); continue; } diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h index 1ce82088c9540..88e42365a9d8b 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.h +++ b/drivers/net/wireless/ath/ath12k/dp_rx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_DP_RX_H #define ATH12K_DP_RX_H @@ -79,6 +79,9 @@ static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi) case RX_MSDU_START_SGI_3_2_US: ret = NL80211_RATE_INFO_HE_GI_3_2; break; + default: + ret = NL80211_RATE_INFO_HE_GI_0_8; + break; } return ret; @@ -135,9 +138,6 @@ u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab, struct hal_rx_desc *desc); void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status); -struct ath12k_peer * -ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu); - int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab); int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab); diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index a8d341a6df01e..ced232bf4aed0 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -1,13 +1,15 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" #include "dp_tx.h" #include "debug.h" #include "hw.h" +#include "peer.h" +#include "mac.h" static enum hal_tcl_encap_type ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb) @@ -117,7 +119,7 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, le32_encode_bits(ti->data_len, HAL_TX_MSDU_EXT_INFO1_BUF_LEN); - tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) | + tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) | le32_encode_bits(ti->encap_type, HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) | le32_encode_bits(ti->encrypt_type, @@ -217,7 +219,7 @@ static int ath12k_dp_tx_align_payload(struct ath12k_base *ab, } int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, - struct sk_buff *skb) + struct sk_buff *skb, bool gsn_valid, int mcbc_gsn) { struct ath12k_base *ab = ar->ab; struct ath12k_dp *dp = &ab->dp; @@ -290,13 +292,27 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, msdu_ext_desc = true; } + if (gsn_valid) { + /* Reset and Initialize meta_data_flags with Global Sequence + * Number (GSN) info. + */ + ti.meta_data_flags = + u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM, + HTT_TCL_META_DATA_TYPE) | + u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM); + } + ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb); ti.addr_search_flags = arvif->hal_addr_search_flags; ti.search_type = arvif->search_type; ti.type = HAL_TCL_DESC_TYPE_BUFFER; ti.pkt_offset = 0; ti.lmac_id = ar->lmac_id; + ti.vdev_id = arvif->vdev_id; + if (gsn_valid) + ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID; + ti.bss_ast_hash = arvif->ast_hash; ti.bss_ast_idx = arvif->ast_idx; ti.dscp_tid_tbl_idx = 0; @@ -368,6 +384,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, add_htt_metadata = true; msdu_ext_desc = true; ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW); + ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT; ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW; ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN; } @@ -398,6 +415,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, if (ret < 0) { ath12k_dbg(ab, ATH12K_DBG_DP_TX, "Failed to add HTT meta data, dropping packet\n"); + kfree_skb(skb_ext_desc); goto fail_unmap_dma; } } @@ -558,13 +576,13 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, switch (wbm_status) { case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK: - case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: - case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK); ts.ack_rssi = le32_get_bits(status_desc->info2, HTT_TX_WBM_COMP_INFO2_ACK_RSSI); ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts); break; + case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: + case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring); @@ -580,6 +598,124 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, } } +static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_peer *peer; + struct ieee80211_sta *sta; + struct ath12k_sta *ahsta; + struct ath12k_link_sta *arsta; + struct rate_info txrate = {0}; + u16 rate, ru_tones; + u8 rate_idx = 0; + int ret; + + spin_lock_bh(&ab->base_lock); + peer = ath12k_peer_find_by_id(ab, ts->peer_id); + if (!peer || !peer->sta) { + ath12k_dbg(ab, ATH12K_DBG_DP_TX, + "failed to find the peer by id %u\n", ts->peer_id); + spin_unlock_bh(&ab->base_lock); + return; + } + sta = peer->sta; + ahsta = ath12k_sta_to_ahsta(sta); + arsta = &ahsta->deflink; + + /* This is to prefer choose the real NSS value arsta->last_txrate.nss, + * if it is invalid, then choose the NSS value while assoc. + */ + if (arsta->last_txrate.nss) + txrate.nss = arsta->last_txrate.nss; + else + txrate.nss = arsta->peer_nss; + spin_unlock_bh(&ab->base_lock); + + switch (ts->pkt_type) { + case HAL_TX_RATE_STATS_PKT_TYPE_11A: + case HAL_TX_RATE_STATS_PKT_TYPE_11B: + ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs, + ts->pkt_type, + &rate_idx, + &rate); + if (ret < 0) { + ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret); + return; + } + + txrate.legacy = rate; + break; + case HAL_TX_RATE_STATS_PKT_TYPE_11N: + if (ts->mcs > ATH12K_HT_MCS_MAX) { + ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs); + return; + } + + if (txrate.nss != 0) + txrate.mcs = ts->mcs + 8 * (txrate.nss - 1); + + txrate.flags = RATE_INFO_FLAGS_MCS; + + if (ts->sgi) + txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case HAL_TX_RATE_STATS_PKT_TYPE_11AC: + if (ts->mcs > ATH12K_VHT_MCS_MAX) { + ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs); + return; + } + + txrate.mcs = ts->mcs; + txrate.flags = RATE_INFO_FLAGS_VHT_MCS; + + if (ts->sgi) + txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + break; + case HAL_TX_RATE_STATS_PKT_TYPE_11AX: + if (ts->mcs > ATH12K_HE_MCS_MAX) { + ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs); + return; + } + + txrate.mcs = ts->mcs; + txrate.flags = RATE_INFO_FLAGS_HE_MCS; + txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi); + break; + case HAL_TX_RATE_STATS_PKT_TYPE_11BE: + if (ts->mcs > ATH12K_EHT_MCS_MAX) { + ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs); + return; + } + + txrate.mcs = ts->mcs; + txrate.flags = RATE_INFO_FLAGS_EHT_MCS; + txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi); + break; + default: + ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type); + return; + } + + txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw); + + if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { + txrate.bw = RATE_INFO_BW_HE_RU; + ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones); + txrate.he_ru_alloc = + ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); + } + + if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) { + txrate.bw = RATE_INFO_BW_EHT_RU; + txrate.eht_ru_alloc = + ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones); + } + + spin_lock_bh(&ab->base_lock); + arsta->txrate = txrate; + spin_unlock_bh(&ab->base_lock); +} + static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, struct sk_buff *msdu, struct hal_tx_status *ts) @@ -658,6 +794,8 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, * Might end up reporting it out-of-band from HTT stats. */ + ath12k_dp_tx_update_txcompl(ar, ts); + ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu); exit: @@ -668,6 +806,8 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab, struct hal_wbm_completion_ring_tx *desc, struct hal_tx_status *ts) { + u32 info0 = le32_to_cpu(desc->rate_stats.info0); + ts->buf_rel_source = le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE); if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && @@ -682,10 +822,17 @@ static void ath12k_dp_tx_status_parse(struct ath12k_base *ab, ts->ppdu_id = le32_get_bits(desc->info1, HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER); - if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID) - ts->rate_stats = le32_to_cpu(desc->rate_stats.info0); - else - ts->rate_stats = 0; + + ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID); + + if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) { + ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE); + ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS); + ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI); + ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW); + ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU); + ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX); + } } void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id) @@ -814,7 +961,7 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab, *htt_ring_type = HTT_HW_TO_SW_RING; break; case HAL_RXDMA_MONITOR_BUF: - *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; + *htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING; *htt_ring_type = HTT_SW_TO_HW_RING; break; case HAL_RXDMA_MONITOR_STATUS: @@ -822,7 +969,7 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab, *htt_ring_type = HTT_SW_TO_HW_RING; break; case HAL_RXDMA_MONITOR_DST: - *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; + *htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING; *htt_ring_type = HTT_HW_TO_SW_RING; break; case HAL_RXDMA_MONITOR_DESC: @@ -971,7 +1118,14 @@ int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab) skb_put(skb, len); cmd = (struct htt_ver_req_cmd *)skb->data; cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ, - HTT_VER_REQ_INFO_MSG_ID); + HTT_OPTION_TAG); + + cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION, + HTT_OPTION_TAG) | + le32_encode_bits(HTT_TCL_METADATA_VER_SZ, + HTT_OPTION_LEN) | + le32_encode_bits(HTT_OPTION_TCL_METADATA_VER_V2, + HTT_OPTION_VALUE); ret = ath12k_htc_send(&ab->htc, dp->eid, skb); if (ret) { @@ -1077,15 +1231,46 @@ int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id, cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP), HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS); cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid, - HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID); + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID); + cmd->info0 |= + le32_encode_bits(tlv_filter->drop_threshold_valid, + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL); + cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable, + HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON); + cmd->info1 = le32_encode_bits(rx_buf_size, HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE); + cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt, + HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT); + cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl, + HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL); + cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data, + HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA); cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0); cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1); cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2); cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3); cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter); + cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold, + HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD); + cmd->info2 |= + le32_encode_bits(tlv_filter->enable_log_mgmt_type, + HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE); + cmd->info2 |= + le32_encode_bits(tlv_filter->enable_log_ctrl_type, + HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE); + cmd->info2 |= + le32_encode_bits(tlv_filter->enable_log_data_type, + HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE); + + cmd->info3 = + le32_encode_bits(tlv_filter->enable_rx_tlv_offset, + HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET); + cmd->info3 |= + le32_encode_bits(tlv_filter->rx_tlv_offset, + HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET); + if (tlv_filter->offset_valid) { cmd->rx_packet_offset = le32_encode_bits(tlv_filter->rx_packet_offset, @@ -1210,15 +1395,28 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset) int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset) { struct ath12k_base *ab = ar->ab; - struct ath12k_dp *dp = &ab->dp; struct htt_rx_ring_tlv_filter tlv_filter = {0}; - int ret, ring_id; + int ret, ring_id, i; - ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; tlv_filter.offset_valid = false; if (!reset) { - tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING; + tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING; + + tlv_filter.drop_threshold_valid = true; + tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE; + + tlv_filter.enable_log_mgmt_type = true; + tlv_filter.enable_log_ctrl_type = true; + tlv_filter.enable_log_data_type = true; + + tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH; + tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH; + tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH; + + tlv_filter.enable_rx_tlv_offset = true; + tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET; + tlv_filter.pkt_filter_flags0 = HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 | HTT_RX_MON_MO_MGMT_FILTER_FLAGS0; @@ -1236,14 +1434,19 @@ int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset) } if (ab->hw_params->rxdma1_enable) { - ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0, - HAL_RXDMA_MONITOR_BUF, - DP_RXDMA_REFILL_RING_SIZE, - &tlv_filter); - if (ret) { - ath12k_err(ab, - "failed to setup filter for monitor buf %d\n", ret); - return ret; + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { + ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id; + ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, + ar->dp.mac_id + i, + HAL_RXDMA_MONITOR_DST, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + if (ret) { + ath12k_err(ab, + "failed to setup filter for monitor buf %d\n", + ret); + return ret; + } } } diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h index 46dce23501f38..a5904097dc342 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.h +++ b/drivers/net/wireless/ath/ath12k/dp_tx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_DP_TX_H @@ -17,7 +17,7 @@ struct ath12k_dp_htt_wbm_tx_status { int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab); int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif, - struct sk_buff *skb); + struct sk_buff *skb, bool gsn_valid, int mcbc_gsn); void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id); int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask); diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h index 7b0403d245e59..3e8983b85de86 100644 --- a/drivers/net/wireless/ath/ath12k/hal_desc.h +++ b/drivers/net/wireless/ath/ath12k/hal_desc.h @@ -2968,9 +2968,8 @@ struct hal_mon_buf_ring { #define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0) -#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0) -#define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16) -#define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17) +#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(11, 0) +#define HAL_MON_DEST_INFO0_END_REASON GENMASK(17, 16) #define HAL_MON_DEST_INFO0_INITIATOR BIT(18) #define HAL_MON_DEST_INFO0_EMPTY_DESC BIT(19) #define HAL_MON_DEST_INFO0_RING_ID GENMASK(27, 20) diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h index b08aa2e79f411..6bdcd0867d86e 100644 --- a/drivers/net/wireless/ath/ath12k/hal_rx.h +++ b/drivers/net/wireless/ath/ath12k/hal_rx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HAL_RX_H @@ -22,9 +22,6 @@ struct hal_rx_wbm_rel_info { #define HAL_INVALID_PEERID 0x3fff #define VHT_SIG_SU_NSS_MASK 0x7 -#define HAL_RX_MAX_MCS 12 -#define HAL_RX_MAX_NSS 8 - #define HAL_RX_MPDU_INFO_PN_GET_BYTE1(__val) \ le32_get_bits((__val), GENMASK(7, 0)) @@ -71,6 +68,8 @@ enum hal_rx_preamble { HAL_RX_PREAMBLE_11N, HAL_RX_PREAMBLE_11AC, HAL_RX_PREAMBLE_11AX, + HAL_RX_PREAMBLE_11BA, + HAL_RX_PREAMBLE_11BE, HAL_RX_PREAMBLE_MAX, }; @@ -108,6 +107,9 @@ enum hal_rx_mon_status { HAL_RX_MON_STATUS_PPDU_NOT_DONE, HAL_RX_MON_STATUS_PPDU_DONE, HAL_RX_MON_STATUS_BUF_DONE, + HAL_RX_MON_STATUS_BUF_ADDR, + HAL_RX_MON_STATUS_MPDU_END, + HAL_RX_MON_STATUS_MSDU_END, }; #define HAL_RX_MAX_MPDU 256 @@ -143,10 +145,43 @@ struct hal_rx_user_status { u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP]; u32 mpdu_ok_byte_count; u32 mpdu_err_byte_count; + bool ampdu_present; + u16 ampdu_id; }; #define HAL_MAX_UL_MU_USERS 37 +struct hal_rx_u_sig_info { + bool ul_dl; + u8 bw; + u8 ppdu_type_comp_mode; + u8 eht_sig_mcs; + u8 num_eht_sig_sym; + struct ieee80211_radiotap_eht_usig usig; +}; + +#define HAL_RX_MON_MAX_AGGR_SIZE 128 + +struct hal_rx_tlv_aggr_info { + bool in_progress; + u16 cur_len; + u16 tlv_tag; + u8 buf[HAL_RX_MON_MAX_AGGR_SIZE]; +}; + +struct hal_rx_radiotap_eht { + __le32 known; + __le32 data[9]; +}; + +#define EHT_MAX_USER_INFO 4 + +struct hal_rx_eht_info { + u8 num_user_info; + struct hal_rx_radiotap_eht eht; + u32 user_info[EHT_MAX_USER_INFO]; +}; + struct hal_rx_mon_ppdu_info { u32 ppdu_id; u32 last_ppdu_id; @@ -227,10 +262,15 @@ struct hal_rx_mon_ppdu_info { u8 addr4[ETH_ALEN]; struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS]; u8 userid; - u16 ampdu_id[HAL_MAX_UL_MU_USERS]; bool first_msdu_in_mpdu; bool is_ampdu; u8 medium_prot_type; + bool ppdu_continuation; + bool eht_usig; + struct hal_rx_u_sig_info u_sig_info; + bool is_eht; + struct hal_rx_eht_info eht_info; + struct hal_rx_tlv_aggr_info tlv_aggr; }; #define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0) @@ -641,6 +681,395 @@ struct hal_rx_resp_req_info { #define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6) #define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7) +#define HAL_RX_PHY_CMN_USER_INFO0_GI GENMASK(17, 16) + +struct hal_phyrx_common_user_info { + __le32 rsvd[2]; + __le32 info0; + __le32 rsvd1; +} __packed; + +#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE GENMASK(3, 0) +#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_GI_LTF GENMASK(5, 4) +#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM GENMASK(8, 6) +#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS GENMASK(10, 7) +#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED BIT(11) +#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD GENMASK(13, 12) +#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC GENMASK(17, 14) + +struct hal_eht_sig_ndp_cmn_eb { + __le32 info0; +} __packed; + +#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE GENMASK(3, 0) +#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_GI_LTF GENMASK(5, 4) +#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM GENMASK(8, 6) +#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM BIT(9) +#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR GENMASK(11, 10) +#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY BIT(12) +#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD GENMASK(16, 13) + +struct hal_eht_sig_usig_overflow { + __le32 info0; +} __packed; + +#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID GENMASK(10, 0) +#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS GENMASK(14, 11) +#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_VALIDATE BIT(15) +#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS GENMASK(19, 16) +#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED BIT(20) +#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING BIT(21) +#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CRC GENMASK(25, 22) + +struct hal_eht_sig_non_mu_mimo { + __le32 info0; +} __packed; + +#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID GENMASK(10, 0) +#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS GENMASK(14, 11) +#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING BIT(15) +#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING GENMASK(22, 16) +#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CRC GENMASK(26, 23) + +struct hal_eht_sig_mu_mimo { + __le32 info0; +} __packed; + +union hal_eht_sig_user_field { + struct hal_eht_sig_mu_mimo mu_mimo; + struct hal_eht_sig_non_mu_mimo n_mu_mimo; +}; + +#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_SPATIAL_REUSE GENMASK(3, 0) +#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_GI_LTF GENMASK(5, 4) +#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_LTF_SYM GENMASK(8, 6) +#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_LDPC_EXTA_SYM BIT(9) +#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_PRE_FEC_PAD_FACTOR GENMASK(11, 10) +#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISAMBIGUITY BIT(12) +#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISREGARD GENMASK(16, 13) +#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS GENMASK(19, 17) + +struct hal_eht_sig_non_ofdma_cmn_eb { + __le32 info0; + union hal_eht_sig_user_field user_field; +} __packed; + +#define HAL_RX_EHT_SIG_OFDMA_EB1_SPATIAL_REUSE GENMASK_ULL(3, 0) +#define HAL_RX_EHT_SIG_OFDMA_EB1_GI_LTF GENMASK_ULL(5, 4) +#define HAL_RX_EHT_SIG_OFDMA_EB1_NUM_LFT_SYM GENMASK_ULL(8, 6) +#define HAL_RX_EHT_SIG_OFDMA_EB1_LDPC_EXTRA_SYM BIT(9) +#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_FEC_PAD_FACTOR GENMASK_ULL(11, 10) +#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_DISAMBIGUITY BIT(12) +#define HAL_RX_EHT_SIG_OFDMA_EB1_DISREGARD GENMASK_ULL(16, 13) +#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1 GENMASK_ULL(25, 17) +#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2 GENMASK_ULL(34, 26) +#define HAL_RX_EHT_SIG_OFDMA_EB1_CRC GENMASK_ULL(30, 27) + +struct hal_eht_sig_ofdma_cmn_eb1 { + __le64 info0; +} __packed; + +#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1 GENMASK_ULL(8, 0) +#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2 GENMASK_ULL(17, 9) +#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3 GENMASK_ULL(26, 18) +#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4 GENMASK_ULL(35, 27) +#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5 GENMASK_ULL(44, 36) +#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6 GENMASK_ULL(53, 45) +#define HAL_RX_EHT_SIG_OFDMA_EB2_MCS GNEMASK_ULL(57, 54) + +struct hal_eht_sig_ofdma_cmn_eb2 { + __le64 info0; +} __packed; + +struct hal_eht_sig_ofdma_cmn_eb { + struct hal_eht_sig_ofdma_cmn_eb1 eb1; + struct hal_eht_sig_ofdma_cmn_eb2 eb2; + union hal_eht_sig_user_field user_field; +} __packed; + +enum hal_eht_bw { + HAL_EHT_BW_20, + HAL_EHT_BW_40, + HAL_EHT_BW_80, + HAL_EHT_BW_160, + HAL_EHT_BW_320_1, + HAL_EHT_BW_320_2, +}; + +#define HAL_RX_USIG_CMN_INFO0_PHY_VERSION GENMASK(2, 0) +#define HAL_RX_USIG_CMN_INFO0_BW GENMASK(5, 3) +#define HAL_RX_USIG_CMN_INFO0_UL_DL BIT(6) +#define HAL_RX_USIG_CMN_INFO0_BSS_COLOR GENMASK(12, 7) +#define HAL_RX_USIG_CMN_INFO0_TXOP GENMASK(19, 13) +#define HAL_RX_USIG_CMN_INFO0_DISREGARD GENMASK(25, 20) +#define HAL_RX_USIG_CMN_INFO0_VALIDATE BIT(26) + +struct hal_mon_usig_cmn { + __le32 info0; +} __packed; + +#define HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE GENMASK(1, 0) +#define HAL_RX_USIG_TB_INFO0_VALIDATE BIT(2) +#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1 GENMASK(6, 3) +#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2 GENMASK(10, 7) +#define HAL_RX_USIG_TB_INFO0_DISREGARD_1 GENMASK(15, 11) +#define HAL_RX_USIG_TB_INFO0_CRC GENMASK(19, 16) +#define HAL_RX_USIG_TB_INFO0_TAIL GENMASK(25, 20) +#define HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS BIT(31) + +struct hal_mon_usig_tb { + __le32 info0; +} __packed; + +#define HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE GENMASK(1, 0) +#define HAL_RX_USIG_MU_INFO0_VALIDATE_1 BIT(2) +#define HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO GENMASK(7, 3) +#define HAL_RX_USIG_MU_INFO0_VALIDATE_2 BIT(8) +#define HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS GENMASK(10, 9) +#define HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM GENMASK(15, 11) +#define HAL_RX_USIG_MU_INFO0_CRC GENMASK(20, 16) +#define HAL_RX_USIG_MU_INFO0_TAIL GENMASK(26, 21) +#define HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS BIT(31) + +struct hal_mon_usig_mu { + __le32 info0; +} __packed; + +union hal_mon_usig_non_cmn { + struct hal_mon_usig_tb tb; + struct hal_mon_usig_mu mu; +}; + +struct hal_mon_usig_hdr { + struct hal_mon_usig_cmn cmn; + union hal_mon_usig_non_cmn non_cmn; +} __packed; + +#define HAL_RX_USR_INFO0_PHY_PPDU_ID GENMASK(15, 0) +#define HAL_RX_USR_INFO0_USR_RSSI GENMASK(23, 16) +#define HAL_RX_USR_INFO0_PKT_TYPE GENMASK(27, 24) +#define HAL_RX_USR_INFO0_STBC BIT(28) +#define HAL_RX_USR_INFO0_RECEPTION_TYPE GENMASK(31, 29) + +#define HAL_RX_USR_INFO1_MCS GENMASK(3, 0) +#define HAL_RX_USR_INFO1_SGI GENMASK(5, 4) +#define HAL_RX_USR_INFO1_HE_RANGING_NDP BIT(6) +#define HAL_RX_USR_INFO1_MIMO_SS_BITMAP GENMASK(15, 8) +#define HAL_RX_USR_INFO1_RX_BW GENMASK(18, 16) +#define HAL_RX_USR_INFO1_DL_OFMDA_USR_IDX GENMASK(31, 24) + +#define HAL_RX_USR_INFO2_DL_OFDMA_CONTENT_CHAN BIT(0) +#define HAL_RX_USR_INFO2_NSS GENMASK(10, 8) +#define HAL_RX_USR_INFO2_STREAM_OFFSET GENMASK(13, 11) +#define HAL_RX_USR_INFO2_STA_DCM BIT(14) +#define HAL_RX_USR_INFO2_LDPC BIT(15) +#define HAL_RX_USR_INFO2_RU_TYPE_80_0 GENMASK(19, 16) +#define HAL_RX_USR_INFO2_RU_TYPE_80_1 GENMASK(23, 20) +#define HAL_RX_USR_INFO2_RU_TYPE_80_2 GENMASK(27, 24) +#define HAL_RX_USR_INFO2_RU_TYPE_80_3 GENMASK(31, 28) + +#define HAL_RX_USR_INFO3_RU_START_IDX_80_0 GENMASK(5, 0) +#define HAL_RX_USR_INFO3_RU_START_IDX_80_1 GENMASK(13, 8) +#define HAL_RX_USR_INFO3_RU_START_IDX_80_2 GENMASK(21, 16) +#define HAL_RX_USR_INFO3_RU_START_IDX_80_3 GENMASK(29, 24) + +struct hal_receive_user_info { + __le32 info0; + __le32 info1; + __le32 info2; + __le32 info3; + __le32 user_fd_rssi_seg0; + __le32 user_fd_rssi_seg1; + __le32 user_fd_rssi_seg2; + __le32 user_fd_rssi_seg3; +} __packed; + +enum hal_mon_reception_type { + HAL_RECEPTION_TYPE_SU, + HAL_RECEPTION_TYPE_DL_MU_MIMO, + HAL_RECEPTION_TYPE_DL_MU_OFMA, + HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO, + HAL_RECEPTION_TYPE_UL_MU_MIMO, + HAL_RECEPTION_TYPE_UL_MU_OFDMA, + HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO, +}; + +/* Different allowed RU in 11BE */ +#define HAL_EHT_RU_26 0ULL +#define HAL_EHT_RU_52 1ULL +#define HAL_EHT_RU_78 2ULL +#define HAL_EHT_RU_106 3ULL +#define HAL_EHT_RU_132 4ULL +#define HAL_EHT_RU_242 5ULL +#define HAL_EHT_RU_484 6ULL +#define HAL_EHT_RU_726 7ULL +#define HAL_EHT_RU_996 8ULL +#define HAL_EHT_RU_996x2 9ULL +#define HAL_EHT_RU_996x3 10ULL +#define HAL_EHT_RU_996x4 11ULL +#define HAL_EHT_RU_NONE 15ULL +#define HAL_EHT_RU_INVALID 31ULL +/* MRUs spanning above 80Mhz + * HAL_EHT_RU_996_484 = HAL_EHT_RU_484 + HAL_EHT_RU_996 + 4 (reserved) + */ +#define HAL_EHT_RU_996_484 18ULL +#define HAL_EHT_RU_996x2_484 28ULL +#define HAL_EHT_RU_996x3_484 40ULL +#define HAL_EHT_RU_996_484_242 23ULL + +#define NUM_RU_BITS_PER80 16 +#define NUM_RU_BITS_PER20 4 + +/* Different per_80Mhz band in 320Mhz bandwidth */ +#define HAL_80_0 0 +#define HAL_80_1 1 +#define HAL_80_2 2 +#define HAL_80_3 3 + +#define HAL_RU_80MHZ(num_band) ((num_band) * NUM_RU_BITS_PER80) +#define HAL_RU_20MHZ(idx_per_80) ((idx_per_80) * NUM_RU_BITS_PER20) + +#define HAL_RU_SHIFT(num_band, idx_per_80) \ + (HAL_RU_80MHZ(num_band) + HAL_RU_20MHZ(idx_per_80)) + +#define HAL_RU(ru, num_band, idx_per_80) \ + ((u64)(ru) << HAL_RU_SHIFT(num_band, idx_per_80)) + +/* MRU-996+484 */ +#define HAL_EHT_RU_996_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \ + HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0)) +#define HAL_EHT_RU_996_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0)) +#define HAL_EHT_RU_996_484_2 (HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1)) +#define HAL_EHT_RU_996_484_3 (HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0)) +#define HAL_EHT_RU_996_484_4 (HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \ + HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0)) +#define HAL_EHT_RU_996_484_5 (HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0)) +#define HAL_EHT_RU_996_484_6 (HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1)) +#define HAL_EHT_RU_996_484_7 (HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0)) + +/* MRU-996x2+484 */ +#define HAL_EHT_RU_996x2_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0)) +#define HAL_EHT_RU_996x2_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0)) +#define HAL_EHT_RU_996x2_484_2 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0)) +#define HAL_EHT_RU_996x2_484_3 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0)) +#define HAL_EHT_RU_996x2_484_4 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1)) +#define HAL_EHT_RU_996x2_484_5 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0)) +#define HAL_EHT_RU_996x2_484_6 (HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0)) +#define HAL_EHT_RU_996x2_484_7 (HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0)) +#define HAL_EHT_RU_996x2_484_8 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0)) +#define HAL_EHT_RU_996x2_484_9 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0)) +#define HAL_EHT_RU_996x2_484_10 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1)) +#define HAL_EHT_RU_996x2_484_11 (HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0)) + +/* MRU-996x3+484 */ +#define HAL_EHT_RU_996x3_484_0 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) +#define HAL_EHT_RU_996x3_484_1 (HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) +#define HAL_EHT_RU_996x3_484_2 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) +#define HAL_EHT_RU_996x3_484_3 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) +#define HAL_EHT_RU_996x3_484_4 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) +#define HAL_EHT_RU_996x3_484_5 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0)) +#define HAL_EHT_RU_996x3_484_6 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1)) +#define HAL_EHT_RU_996x3_484_7 (HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) | \ + HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) | \ + HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0)) + +#define HAL_RU_PER80(ru_per80, num_80mhz, ru_idx_per80mhz) \ + (HAL_RU(ru_per80, num_80mhz, ru_idx_per80mhz)) + +#define RU_INVALID 0 +#define RU_26 1 +#define RU_52 2 +#define RU_106 4 +#define RU_242 9 +#define RU_484 18 +#define RU_996 37 +#define RU_2X996 74 +#define RU_3X996 111 +#define RU_4X996 148 +#define RU_52_26 (RU_52 + RU_26) +#define RU_106_26 (RU_106 + RU_26) +#define RU_484_242 (RU_484 + RU_242) +#define RU_996_484 (RU_996 + RU_484) +#define RU_996_484_242 (RU_996 + RU_484_242) +#define RU_2X996_484 (RU_2X996 + RU_484) +#define RU_3X996_484 (RU_3X996 + RU_484) + +enum ath12k_eht_ru_size { + ATH12K_EHT_RU_26, + ATH12K_EHT_RU_52, + ATH12K_EHT_RU_106, + ATH12K_EHT_RU_242, + ATH12K_EHT_RU_484, + ATH12K_EHT_RU_996, + ATH12K_EHT_RU_996x2, + ATH12K_EHT_RU_996x4, + ATH12K_EHT_RU_52_26, + ATH12K_EHT_RU_106_26, + ATH12K_EHT_RU_484_242, + ATH12K_EHT_RU_996_484, + ATH12K_EHT_RU_996_484_242, + ATH12K_EHT_RU_996x2_484, + ATH12K_EHT_RU_996x3, + ATH12K_EHT_RU_996x3_484, + + /* Keep last */ + ATH12K_EHT_RU_INVALID, +}; + +#define HAL_RX_RU_ALLOC_TYPE_MAX ATH12K_EHT_RU_INVALID + static inline enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones) { @@ -662,6 +1091,9 @@ enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones) case RU_996: ret = NL80211_RATE_INFO_HE_RU_ALLOC_996; break; + case RU_2X996: + ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + break; case RU_26: fallthrough; default: diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h index 3cf5973771d78..eb065a79f6c64 100644 --- a/drivers/net/wireless/ath/ath12k/hal_tx.h +++ b/drivers/net/wireless/ath/ath12k/hal_tx.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. + * All rights reserved. */ #ifndef ATH12K_HAL_TX_H @@ -63,7 +64,12 @@ struct hal_tx_status { u8 try_cnt; u8 tid; u16 peer_id; - u32 rate_stats; + enum hal_tx_rate_stats_pkt_type pkt_type; + enum hal_tx_rate_stats_sgi sgi; + enum ath12k_supported_bw bw; + u8 mcs; + u16 tones; + u8 ofdma; }; #define HAL_TX_PHY_DESC_INFO0_BF_TYPE GENMASK(17, 16) diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index b7b583fadb5ac..a106ebed7870d 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -543,7 +543,11 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = { ATH12K_TX_RING_MASK_3, }, .rx_mon_dest = { - 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + ATH12K_RX_MON_RING_MASK_0, + ATH12K_RX_MON_RING_MASK_1, + ATH12K_RX_MON_RING_MASK_2, }, .rx = { 0, 0, 0, 0, @@ -1035,7 +1039,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_params = &ath12k_hw_hal_params_qcn9274, - .rxdma1_enable = false, + .rxdma1_enable = true, .num_rxdma_per_pdev = 1, .num_rxdma_dst_ring = 0, .rx_mac_buf_ring = false, diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 2d062b5904a8e..dfa05f0ee6c9f 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -15,10 +15,12 @@ #include "hw.h" #include "dp_tx.h" #include "dp_rx.h" +#include "testmode.h" #include "peer.h" #include "debugfs.h" #include "hif.h" #include "wow.h" +#include "debugfs_sta.h" #define CHAN2G(_channel, _freq, _flags) { \ .band = NL80211_BAND_2GHZ, \ @@ -337,6 +339,82 @@ static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode) return ""; } +u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones) +{ + switch (tones) { + case 26: + return RU_26; + case 52: + return RU_52; + case 106: + return RU_106; + case 242: + return RU_242; + case 484: + return RU_484; + case 996: + return RU_996; + case (996 * 2): + return RU_2X996; + default: + return RU_26; + } +} + +enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi) +{ + switch (sgi) { + case RX_MSDU_START_SGI_0_8_US: + return NL80211_RATE_INFO_EHT_GI_0_8; + case RX_MSDU_START_SGI_1_6_US: + return NL80211_RATE_INFO_EHT_GI_1_6; + case RX_MSDU_START_SGI_3_2_US: + return NL80211_RATE_INFO_EHT_GI_3_2; + default: + return NL80211_RATE_INFO_EHT_GI_0_8; + } +} + +enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones) +{ + switch (ru_tones) { + case 26: + return NL80211_RATE_INFO_EHT_RU_ALLOC_26; + case 52: + return NL80211_RATE_INFO_EHT_RU_ALLOC_52; + case (52 + 26): + return NL80211_RATE_INFO_EHT_RU_ALLOC_52P26; + case 106: + return NL80211_RATE_INFO_EHT_RU_ALLOC_106; + case (106 + 26): + return NL80211_RATE_INFO_EHT_RU_ALLOC_106P26; + case 242: + return NL80211_RATE_INFO_EHT_RU_ALLOC_242; + case 484: + return NL80211_RATE_INFO_EHT_RU_ALLOC_484; + case (484 + 242): + return NL80211_RATE_INFO_EHT_RU_ALLOC_484P242; + case 996: + return NL80211_RATE_INFO_EHT_RU_ALLOC_996; + case (996 + 484): + return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484; + case (996 + 484 + 242): + return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242; + case (2 * 996): + return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996; + case (2 * 996 + 484): + return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484; + case (3 * 996): + return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996; + case (3 * 996 + 484): + return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484; + case (4 * 996): + return NL80211_RATE_INFO_EHT_RU_ALLOC_4x996; + default: + return NL80211_RATE_INFO_EHT_RU_ALLOC_26; + } +} + enum rate_info_bw ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw) { @@ -502,7 +580,20 @@ static int ath12k_mac_vif_link_chan(struct ieee80211_vif *vif, u8 link_id, return 0; } -static struct ieee80211_bss_conf * +static struct ath12k_link_vif *ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif) +{ + struct ath12k_vif *tx_ahvif; + + if (arvif->ahvif->vif->mbssid_tx_vif) { + tx_ahvif = ath12k_vif_to_ahvif(arvif->ahvif->vif->mbssid_tx_vif); + if (tx_ahvif) + return &tx_ahvif->deflink; + } + + return NULL; +} + +struct ieee80211_bss_conf * ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif) { struct ieee80211_vif *vif = arvif->ahvif->vif; @@ -675,7 +766,10 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id) return NULL; for (i = 0; i < ab->num_radios; i++) { - pdev = rcu_dereference(ab->pdevs_active[i]); + if (ab->fw_mode == ATH12K_FIRMWARE_MODE_FTM) + pdev = &ab->pdevs[i]; + else + pdev = rcu_dereference(ab->pdevs_active[i]); if (pdev && pdev->pdev_id == pdev_id) return (pdev->ar ? pdev->ar : NULL); @@ -725,9 +819,9 @@ static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw, return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan); } -static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - u8 link_id) +struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u8 link_id) { struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); struct ath12k_hw *ah = ath12k_hw_to_ah(hw); @@ -1549,30 +1643,18 @@ static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_bu } } -static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif) +static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif, + struct ath12k_link_vif *tx_arvif, + u8 bssid_index) { - struct ath12k_vif *ahvif = arvif->ahvif; - struct ieee80211_bss_conf *bss_conf; struct ath12k_wmi_bcn_tmpl_ema_arg ema_args; struct ieee80211_ema_beacons *beacons; - struct ath12k_link_vif *tx_arvif; bool nontx_profile_found = false; - struct ath12k_vif *tx_ahvif; int ret = 0; u8 i; - bss_conf = ath12k_mac_get_link_bss_conf(arvif); - if (!bss_conf) { - ath12k_warn(arvif->ar->ab, - "failed to get link bss conf to update bcn tmpl for vif %pM link %u\n", - ahvif->vif->addr, arvif->link_id); - return -ENOLINK; - } - - tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif); - tx_arvif = &tx_ahvif->deflink; beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar), - tx_ahvif->vif, + tx_arvif->ahvif->vif, tx_arvif->link_id); if (!beacons || !beacons->cnt) { ath12k_warn(arvif->ar->ab, @@ -1586,13 +1668,12 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif) for (i = 0; i < beacons->cnt; i++) { if (tx_arvif != arvif && !nontx_profile_found) ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb, - bss_conf->bssid_index, + bssid_index, &nontx_profile_found); ema_args.bcn_cnt = beacons->cnt; ema_args.bcn_index = i; - ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id, - &beacons->bcn[i].offs, + ret = ath12k_wmi_bcn_tmpl(tx_arvif, &beacons->bcn[i].offs, beacons->bcn[i].skb, &ema_args); if (ret) { ath12k_warn(tx_arvif->ar->ab, @@ -1605,7 +1686,7 @@ static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif) if (tx_arvif != arvif && !nontx_profile_found) ath12k_warn(arvif->ar->ab, "nontransmitted bssid index %u not found in beacon template\n", - bss_conf->bssid_index); + bssid_index); ieee80211_beacon_free_ema_list(beacons); return ret; @@ -1616,11 +1697,10 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif) struct ath12k_vif *ahvif = arvif->ahvif; struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); struct ieee80211_bss_conf *link_conf; - struct ath12k_link_vif *tx_arvif = arvif; + struct ath12k_link_vif *tx_arvif; struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; struct ieee80211_mutable_offsets offs = {}; - struct ath12k_vif *tx_ahvif = ahvif; bool nontx_profile_found = false; struct sk_buff *bcn; int ret; @@ -1635,17 +1715,20 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif) return -ENOLINK; } - if (vif->mbssid_tx_vif) { - tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif); - tx_arvif = &tx_ahvif->deflink; + tx_arvif = ath12k_mac_get_tx_arvif(arvif); + if (tx_arvif) { if (tx_arvif != arvif && arvif->is_up) return 0; if (link_conf->ema_ap) - return ath12k_mac_setup_bcn_tmpl_ema(arvif); + return ath12k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif, + link_conf->bssid_index); + } else { + tx_arvif = arvif; } - bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif, + bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), + tx_arvif->ahvif->vif, &offs, tx_arvif->link_id); if (!bcn) { ath12k_warn(ab, "failed to get beacon template from mac80211\n"); @@ -1686,7 +1769,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_link_vif *arvif) } } - ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL); + ret = ath12k_wmi_bcn_tmpl(arvif, &offs, bcn, NULL); if (ret) ath12k_warn(ab, "failed to submit beacon template command: %d\n", @@ -1702,6 +1785,7 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif, { struct ath12k_wmi_vdev_up_params params = {}; struct ath12k_vif *ahvif = arvif->ahvif; + struct ath12k_link_vif *tx_arvif; struct ath12k *ar = arvif->ar; int ret; @@ -1732,11 +1816,9 @@ static void ath12k_control_beaconing(struct ath12k_link_vif *arvif, params.vdev_id = arvif->vdev_id; params.aid = ahvif->aid; params.bssid = arvif->bssid; - if (ahvif->vif->mbssid_tx_vif) { - struct ath12k_vif *tx_ahvif = - ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif); - struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink; + tx_arvif = ath12k_mac_get_tx_arvif(arvif); + if (tx_arvif) { params.tx_bssid = tx_arvif->bssid; params.nontx_profile_idx = info->bssid_index; params.nontx_profile_cnt = 1 << info->bssid_indicator; @@ -3116,6 +3198,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar, ath12k_peer_assoc_h_smps(arsta, arg); ath12k_peer_assoc_h_mlo(arsta, arg); + arsta->peer_nss = arg->peer_nss; /* TODO: amsdu_disable req? */ } @@ -3138,6 +3221,37 @@ static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_link_vif *arv ath12k_smps_map[smps]); } +static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar, + struct ieee80211_link_sta *link_sta) +{ + u32 bw; + + switch (link_sta->bandwidth) { + case IEEE80211_STA_RX_BW_20: + bw = WMI_PEER_CHWIDTH_20MHZ; + break; + case IEEE80211_STA_RX_BW_40: + bw = WMI_PEER_CHWIDTH_40MHZ; + break; + case IEEE80211_STA_RX_BW_80: + bw = WMI_PEER_CHWIDTH_80MHZ; + break; + case IEEE80211_STA_RX_BW_160: + bw = WMI_PEER_CHWIDTH_160MHZ; + break; + case IEEE80211_STA_RX_BW_320: + bw = WMI_PEER_CHWIDTH_320MHZ; + break; + default: + ath12k_warn(ar->ab, "Invalid bandwidth %d for link station %pM\n", + link_sta->bandwidth, link_sta->addr); + bw = WMI_PEER_CHWIDTH_20MHZ; + break; + } + + return bw; +} + static void ath12k_bss_assoc(struct ath12k *ar, struct ath12k_link_vif *arvif, struct ieee80211_bss_conf *bss_conf) @@ -3358,12 +3472,178 @@ static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); } +static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif, + struct ath12k_link_vif *arvif, int link_id) +{ + struct ath12k_hw *ah = ahvif->ah; + u8 _link_id; + int i; + + lockdep_assert_wiphy(ah->hw->wiphy); + + if (WARN_ON(!arvif)) + return; + + if (WARN_ON(link_id >= ATH12K_NUM_MAX_LINKS)) + return; + + if (link_id < 0) + _link_id = 0; + else + _link_id = link_id; + + arvif->ahvif = ahvif; + arvif->link_id = _link_id; + + INIT_LIST_HEAD(&arvif->list); + INIT_DELAYED_WORK(&arvif->connection_loss_work, + ath12k_mac_vif_sta_connection_loss_work); + + for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { + arvif->bitrate_mask.control[i].legacy = 0xffffffff; + memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].ht_mcs)); + memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, + sizeof(arvif->bitrate_mask.control[i].vht_mcs)); + } + + /* Handle MLO related assignments */ + if (link_id >= 0) { + rcu_assign_pointer(ahvif->link[arvif->link_id], arvif); + ahvif->links_map |= BIT(_link_id); + } + + ath12k_generic_dbg(ATH12K_DBG_MAC, + "mac init link arvif (link_id %d%s) for vif %pM. links_map 0x%x", + _link_id, (link_id < 0) ? " deflink" : "", ahvif->vif->addr, + ahvif->links_map); +} + +static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw, + struct ath12k_link_vif *arvif) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ath12k_hw *ah = hw->priv; + struct ath12k *ar = arvif->ar; + int ret; + + lockdep_assert_wiphy(ah->hw->wiphy); + + cancel_delayed_work_sync(&arvif->connection_loss_work); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)", + arvif->vdev_id, arvif->link_id); + + if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) { + ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid); + if (ret) + ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d", + arvif->vdev_id, arvif->link_id, ret); + } + ath12k_mac_vdev_delete(ar, arvif); +} + +static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah, + struct ieee80211_vif *vif, + u8 link_id) +{ + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + struct ath12k_link_vif *arvif; + + lockdep_assert_wiphy(ah->hw->wiphy); + + arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); + if (arvif) + return arvif; + + if (!vif->valid_links) { + /* Use deflink for Non-ML VIFs and mark the link id as 0 + */ + link_id = 0; + arvif = &ahvif->deflink; + } else { + /* If this is the first link arvif being created for an ML VIF + * use the preallocated deflink memory except for scan arvifs + */ + if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) { + arvif = &ahvif->deflink; + } else { + arvif = (struct ath12k_link_vif *) + kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL); + if (!arvif) + return NULL; + } + } + + ath12k_mac_init_arvif(ahvif, arvif, link_id); + + return arvif; +} + +static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif) +{ + struct ath12k_vif *ahvif = arvif->ahvif; + struct ath12k_hw *ah = ahvif->ah; + + lockdep_assert_wiphy(ah->hw->wiphy); + + rcu_assign_pointer(ahvif->link[arvif->link_id], NULL); + synchronize_rcu(); + ahvif->links_map &= ~BIT(arvif->link_id); + + if (arvif != &ahvif->deflink) + kfree(arvif); + else + memset(arvif, 0, sizeof(*arvif)); +} + static int ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 old_links, u16 new_links, struct ieee80211_bss_conf *ol[IEEE80211_MLD_MAX_NUM_LINKS]) { + struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); + unsigned long to_remove = old_links & ~new_links; + unsigned long to_add = ~old_links & new_links; + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k_link_vif *arvif; + u8 link_id; + + lockdep_assert_wiphy(hw->wiphy); + + ath12k_generic_dbg(ATH12K_DBG_MAC, + "mac vif link changed for MLD %pM old_links 0x%x new_links 0x%x\n", + vif->addr, old_links, new_links); + + for_each_set_bit(link_id, &to_add, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + /* mac80211 wants to add link but driver already has the + * link. This should not happen ideally. + */ + if (WARN_ON(arvif)) + return -EINVAL; + + arvif = ath12k_mac_assign_link_vif(ah, vif, link_id); + if (WARN_ON(!arvif)) + return -EINVAL; + } + + for_each_set_bit(link_id, &to_remove, IEEE80211_MLD_MAX_NUM_LINKS) { + arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]); + if (WARN_ON(!arvif)) + return -EINVAL; + + if (!arvif->is_created) + continue; + + if (WARN_ON(!arvif->ar)) + return -EINVAL; + + ath12k_mac_remove_link_interface(hw, arvif); + ath12k_mac_unassign_link_vif(arvif); + } + return 0; } @@ -3862,109 +4142,6 @@ static void ath12k_mac_op_link_info_changed(struct ieee80211_hw *hw, ath12k_mac_bss_info_changed(ar, arvif, info, changed); } -static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah, - struct ieee80211_vif *vif, - u8 link_id) -{ - struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif); - struct ath12k_link_vif *arvif; - int i; - - lockdep_assert_wiphy(ah->hw->wiphy); - - arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]); - if (arvif) - return arvif; - - if (!vif->valid_links) { - /* Use deflink for Non-ML VIFs and mark the link id as 0 - */ - link_id = 0; - arvif = &ahvif->deflink; - } else { - /* If this is the first link arvif being created for an ML VIF - * use the preallocated deflink memory except for scan arvifs - */ - if (!ahvif->links_map && link_id != ATH12K_DEFAULT_SCAN_LINK) { - arvif = &ahvif->deflink; - } else { - arvif = (struct ath12k_link_vif *) - kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL); - if (!arvif) - return NULL; - } - } - - arvif->ahvif = ahvif; - arvif->link_id = link_id; - ahvif->links_map |= BIT(link_id); - - INIT_LIST_HEAD(&arvif->list); - INIT_DELAYED_WORK(&arvif->connection_loss_work, - ath12k_mac_vif_sta_connection_loss_work); - - for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { - arvif->bitrate_mask.control[i].legacy = 0xffffffff; - memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, - sizeof(arvif->bitrate_mask.control[i].ht_mcs)); - memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, - sizeof(arvif->bitrate_mask.control[i].vht_mcs)); - } - - /* Allocate Default Queue now and reassign during actual vdev create */ - vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE; - for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) - vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE; - - vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; - - rcu_assign_pointer(ahvif->link[arvif->link_id], arvif); - ahvif->links_map |= BIT(link_id); - synchronize_rcu(); - return arvif; -} - -static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif) -{ - struct ath12k_vif *ahvif = arvif->ahvif; - struct ath12k_hw *ah = ahvif->ah; - - lockdep_assert_wiphy(ah->hw->wiphy); - - rcu_assign_pointer(ahvif->link[arvif->link_id], NULL); - synchronize_rcu(); - ahvif->links_map &= ~BIT(arvif->link_id); - - if (arvif != &ahvif->deflink) - kfree(arvif); - else - memset(arvif, 0, sizeof(*arvif)); -} - -static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw, - struct ath12k_link_vif *arvif) -{ - struct ath12k_vif *ahvif = arvif->ahvif; - struct ath12k_hw *ah = hw->priv; - struct ath12k *ar = arvif->ar; - int ret; - - lockdep_assert_wiphy(ah->hw->wiphy); - - cancel_delayed_work_sync(&arvif->connection_loss_work); - - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)", - arvif->vdev_id, arvif->link_id); - - if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) { - ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid); - if (ret) - ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d", - arvif->vdev_id, arvif->link_id, ret); - } - ath12k_mac_vdev_delete(ar, arvif); -} - static struct ath12k* ath12k_mac_select_scan_device(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -4534,9 +4711,6 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, struct ath12k_link_sta *arsta, struct ieee80211_key_conf *key) { - struct ath12k_vif *ahvif = arvif->ahvif; - struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); - struct ieee80211_bss_conf *link_conf; struct ieee80211_sta *sta = NULL; struct ath12k_base *ab = ar->ab; struct ath12k_peer *peer; @@ -4553,19 +4727,10 @@ static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) return 1; - link_conf = ath12k_mac_get_link_bss_conf(arvif); - if (!link_conf) { - ath12k_warn(ab, "unable to access bss link conf in set key for vif %pM link %u\n", - vif->addr, arvif->link_id); - return -ENOLINK; - } - if (sta) peer_addr = arsta->addr; - else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) - peer_addr = link_conf->bssid; else - peer_addr = link_conf->addr; + peer_addr = arvif->bssid; key->hw_key_idx = key->keyidx; @@ -4909,6 +5074,11 @@ static int ath12k_mac_station_assoc(struct ath12k *ar, return -EINVAL; } + spin_lock_bh(&ar->data_lock); + arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, link_sta); + arsta->bw_prev = link_sta->bandwidth; + spin_unlock_bh(&ar->data_lock); + if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) { ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask, band); @@ -5403,6 +5573,7 @@ static int ath12k_mac_station_add(struct ath12k *ar, } } + ewma_avg_rssi_init(&arsta->avg_rssi); return 0; free_peer: @@ -5415,37 +5586,6 @@ static int ath12k_mac_station_add(struct ath12k *ar, return ret; } -static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar, - struct ieee80211_sta *sta) -{ - u32 bw = WMI_PEER_CHWIDTH_20MHZ; - - switch (sta->deflink.bandwidth) { - case IEEE80211_STA_RX_BW_20: - bw = WMI_PEER_CHWIDTH_20MHZ; - break; - case IEEE80211_STA_RX_BW_40: - bw = WMI_PEER_CHWIDTH_40MHZ; - break; - case IEEE80211_STA_RX_BW_80: - bw = WMI_PEER_CHWIDTH_80MHZ; - break; - case IEEE80211_STA_RX_BW_160: - bw = WMI_PEER_CHWIDTH_160MHZ; - break; - case IEEE80211_STA_RX_BW_320: - bw = WMI_PEER_CHWIDTH_320MHZ; - break; - default: - ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n", - sta->deflink.bandwidth, sta->addr); - bw = WMI_PEER_CHWIDTH_20MHZ; - break; - } - - return bw; -} - static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah, struct ath12k_sta *ahsta, struct ath12k_link_sta *arsta, @@ -5529,7 +5669,6 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw, enum ieee80211_sta_state new_state) { struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif); - struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta); struct ath12k *ar = arvif->ar; int ret = 0; @@ -5572,13 +5711,6 @@ static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw, ath12k_warn(ar->ab, "Failed to associate station: %pM\n", arsta->addr); - spin_lock_bh(&ar->data_lock); - - arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta); - arsta->bw_prev = sta->deflink.bandwidth; - - spin_unlock_bh(&ar->data_lock); - /* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as * authorized */ @@ -5846,7 +5978,7 @@ static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw, spin_lock_bh(&ar->data_lock); if (changed & IEEE80211_RC_BW_CHANGED) { - bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta); + bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, link_sta); arsta->bw_prev = arsta->bw; arsta->bw = bw; } @@ -6674,7 +6806,8 @@ static void ath12k_mac_copy_eht_cap(struct ath12k *ar, memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap)); - if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map))) + if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map)) || + ath12k_acpi_get_disable_11be(ar->ab)) return; eht_cap->has_eht = true; @@ -7070,6 +7203,22 @@ static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar, spin_unlock_bh(&ar->data_lock); } +/* Note: called under rcu_read_lock() */ +static void ath12k_mlo_mcast_update_tx_link_address(struct ieee80211_vif *vif, + u8 link_id, struct sk_buff *skb, + u32 info_flags) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_bss_conf *bss_conf; + + if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) + return; + + bss_conf = rcu_dereference(vif->link_conf[link_id]); + if (bss_conf) + ether_addr_copy(hdr->addr2, bss_conf->addr); +} + /* Note: called under rcu_read_lock() */ static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif, u8 link, struct sk_buff *skb, u32 info_flags) @@ -7181,9 +7330,16 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_key_conf *key = info->control.hw_key; struct ieee80211_sta *sta = control->sta; + struct ath12k_link_vif *tmp_arvif; u32 info_flags = info->flags; - struct ath12k *ar; + struct sk_buff *msdu_copied; + struct ath12k *ar, *tmp_ar; + struct ath12k_peer *peer; + unsigned long links_map; + bool is_mcast = false; + struct ethhdr *eth; bool is_prb_rsp; + u16 mcbc_gsn; u8 link_id; int ret; @@ -7220,6 +7376,9 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw, is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { + eth = (struct ethhdr *)skb->data; + is_mcast = is_multicast_ether_addr(eth->h_dest); + skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP; } else if (ieee80211_is_mgmt(hdr->frame_control)) { ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp); @@ -7231,14 +7390,99 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw, return; } + if (!(info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) + is_mcast = is_multicast_ether_addr(hdr->addr1); + /* This is case only for P2P_GO */ if (vif->type == NL80211_IFTYPE_AP && vif->p2p) ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp); - ret = ath12k_dp_tx(ar, arvif, skb); - if (ret) { - ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret); - ieee80211_free_txskb(hw, skb); + if (!vif->valid_links || !is_mcast || + test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) { + ret = ath12k_dp_tx(ar, arvif, skb, false, 0); + if (unlikely(ret)) { + ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret); + ieee80211_free_txskb(ar->ah->hw, skb); + return; + } + } else { + mcbc_gsn = atomic_inc_return(&ahvif->mcbc_gsn) & 0xfff; + + links_map = ahvif->links_map; + for_each_set_bit(link_id, &links_map, + IEEE80211_MLD_MAX_NUM_LINKS) { + tmp_arvif = rcu_dereference(ahvif->link[link_id]); + if (!tmp_arvif || !tmp_arvif->is_up) + continue; + + tmp_ar = tmp_arvif->ar; + msdu_copied = skb_copy(skb, GFP_ATOMIC); + if (!msdu_copied) { + ath12k_err(ar->ab, + "skb copy failure link_id 0x%X vdevid 0x%X\n", + link_id, tmp_arvif->vdev_id); + continue; + } + + ath12k_mlo_mcast_update_tx_link_address(vif, link_id, + msdu_copied, + info_flags); + + skb_cb = ATH12K_SKB_CB(msdu_copied); + info = IEEE80211_SKB_CB(msdu_copied); + skb_cb->link_id = link_id; + + /* For open mode, skip peer find logic */ + if (unlikely(ahvif->key_cipher == WMI_CIPHER_NONE)) + goto skip_peer_find; + + spin_lock_bh(&tmp_ar->ab->base_lock); + peer = ath12k_peer_find_by_addr(tmp_ar->ab, tmp_arvif->bssid); + if (!peer) { + spin_unlock_bh(&tmp_ar->ab->base_lock); + ath12k_warn(tmp_ar->ab, + "failed to find peer for vdev_id 0x%X addr %pM link_map 0x%X\n", + tmp_arvif->vdev_id, tmp_arvif->bssid, + ahvif->links_map); + dev_kfree_skb_any(msdu_copied); + continue; + } + + key = peer->keys[peer->mcast_keyidx]; + if (key) { + skb_cb->cipher = key->cipher; + skb_cb->flags |= ATH12K_SKB_CIPHER_SET; + info->control.hw_key = key; + + hdr = (struct ieee80211_hdr *)msdu_copied->data; + if (!ieee80211_has_protected(hdr->frame_control)) + hdr->frame_control |= + cpu_to_le16(IEEE80211_FCTL_PROTECTED); + } + spin_unlock_bh(&tmp_ar->ab->base_lock); + +skip_peer_find: + ret = ath12k_dp_tx(tmp_ar, tmp_arvif, + msdu_copied, true, mcbc_gsn); + if (unlikely(ret)) { + if (ret == -ENOMEM) { + /* Drops are expected during heavy multicast + * frame flood. Print with debug log + * level to avoid lot of console prints + */ + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, + "failed to transmit frame %d\n", + ret); + } else { + ath12k_warn(ar->ab, + "failed to transmit frame %d\n", + ret); + } + + dev_kfree_skb_any(msdu_copied); + } + } + ieee80211_free_txskb(ar->ah->hw, skb); } } @@ -7255,8 +7499,40 @@ void ath12k_mac_drain_tx(struct ath12k *ar) static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable) { - return -EOPNOTSUPP; - /* TODO: Need to support new monitor mode */ + struct htt_rx_ring_tlv_filter tlv_filter = {}; + struct ath12k_base *ab = ar->ab; + u32 ring_id, i; + int ret = 0; + + lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); + + if (!ab->hw_params->rxdma1_enable) + return ret; + + if (enable) { + tlv_filter = ath12k_mac_mon_status_filter_default; + + if (ath12k_debugfs_rx_filter(ar)) + tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar); + } else { + tlv_filter.rxmon_disable = true; + } + + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { + ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id; + ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, + ar->dp.mac_id + i, + HAL_RXDMA_MONITOR_DST, + DP_RXDMA_REFILL_RING_SIZE, + &tlv_filter); + if (ret) { + ath12k_err(ab, + "failed to setup filter for monitor buf %d\n", + ret); + } + } + + return ret; } static int ath12k_mac_start(struct ath12k *ar) @@ -7363,9 +7639,14 @@ static int ath12k_mac_start(struct ath12k *ar) static void ath12k_drain_tx(struct ath12k_hw *ah) { - struct ath12k *ar; + struct ath12k *ar = ah->radio; int i; + if (ath12k_ftm_mode) { + ath12k_err(ar->ab, "fail to start mac operations in ftm mode\n"); + return; + } + lockdep_assert_wiphy(ah->hw->wiphy); for_each_ar(ah, ar, i) @@ -7394,6 +7675,7 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw) case ATH12K_HW_STATE_RESTARTED: case ATH12K_HW_STATE_WEDGED: case ATH12K_HW_STATE_ON: + case ATH12K_HW_STATE_TM: ah->state = ATH12K_HW_STATE_OFF; WARN_ON(1); @@ -7561,14 +7843,9 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif, u32 *flags, u32 *tx_vdev_id) { struct ath12k_vif *ahvif = arvif->ahvif; - struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif; struct ieee80211_bss_conf *link_conf; struct ath12k *ar = arvif->ar; struct ath12k_link_vif *tx_arvif; - struct ath12k_vif *tx_ahvif; - - if (!tx_vif) - return 0; link_conf = ath12k_mac_get_link_bss_conf(arvif); if (!link_conf) { @@ -7577,11 +7854,13 @@ static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_link_vif *arvif, return -ENOLINK; } - tx_ahvif = ath12k_vif_to_ahvif(tx_vif); - tx_arvif = &tx_ahvif->deflink; + tx_arvif = ath12k_mac_get_tx_arvif(arvif); + if (!tx_arvif) + return 0; if (link_conf->nontransmitted) { - if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) + if (ath12k_ar_to_hw(ar)->wiphy != + ath12k_ar_to_hw(tx_arvif->ar)->wiphy) return -EINVAL; *flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP; @@ -8066,6 +8345,7 @@ static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif); struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id]; struct ath12k_base *ab = ar->ab; + struct ieee80211_bss_conf *link_conf; int ret; @@ -8084,7 +8364,13 @@ static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ath12k_link_vif } if (cache->bss_conf_changed) { - ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf, + link_conf = ath12k_mac_get_link_bss_conf(arvif); + if (!link_conf) { + ath12k_warn(ar->ab, "unable to access bss link conf in cache flush for vif %pM link %u\n", + vif->addr, arvif->link_id); + return; + } + ath12k_mac_bss_info_changed(ar, arvif, link_conf, cache->bss_conf_changed); } @@ -8207,19 +8493,8 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, ahvif->ah = ah; ahvif->vif = vif; arvif = &ahvif->deflink; - arvif->ahvif = ahvif; - INIT_LIST_HEAD(&arvif->list); - INIT_DELAYED_WORK(&arvif->connection_loss_work, - ath12k_mac_vif_sta_connection_loss_work); - - for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { - arvif->bitrate_mask.control[i].legacy = 0xffffffff; - memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, - sizeof(arvif->bitrate_mask.control[i].ht_mcs)); - memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, - sizeof(arvif->bitrate_mask.control[i].vht_mcs)); - } + ath12k_mac_init_arvif(ahvif, arvif, -1); /* Allocate Default Queue now and reassign during actual vdev create */ vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE; @@ -8381,29 +8656,6 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, FIF_PROBE_REQ | \ FIF_FCSFAIL) -static void ath12k_mac_configure_filter(struct ath12k *ar, - unsigned int total_flags) -{ - bool reset_flag; - int ret; - - lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy); - - ar->filter_flags = total_flags; - - /* For monitor mode */ - reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC); - - ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag); - if (ret) - ath12k_warn(ar->ab, - "fail to set monitor filter: %d\n", ret); - - ath12k_dbg(ar->ab, ATH12K_DBG_MAC, - "total_flags:0x%x, reset_flag:%d\n", - total_flags, reset_flag); -} - static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, @@ -8417,7 +8669,7 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, ar = ath12k_ah_to_ar(ah, 0); *total_flags &= SUPPORTED_FILTERS; - ath12k_mac_configure_filter(ar, *total_flags); + ar->filter_flags = *total_flags; } static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) @@ -8677,6 +8929,9 @@ ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif, if (arvif == arvif_p) continue; + if (!arvif_p->is_created) + continue; + link_conf = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->vif->link_conf[arvif_p->link_id]); @@ -8982,9 +9237,9 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, int n_vifs) { struct ath12k_wmi_vdev_up_params params = {}; + struct ath12k_link_vif *arvif, *tx_arvif; struct ieee80211_bss_conf *link_conf; struct ath12k_base *ab = ar->ab; - struct ath12k_link_vif *arvif; struct ieee80211_vif *vif; struct ath12k_vif *ahvif; u8 link_id; @@ -9052,11 +9307,9 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, params.vdev_id = arvif->vdev_id; params.aid = ahvif->aid; params.bssid = arvif->bssid; - if (vif->mbssid_tx_vif) { - struct ath12k_vif *tx_ahvif = - ath12k_vif_to_ahvif(vif->mbssid_tx_vif); - struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink; + tx_arvif = ath12k_mac_get_tx_arvif(arvif); + if (tx_arvif) { params.tx_bssid = tx_arvif->bssid; params.nontx_profile_idx = link_conf->bssid_index; params.nontx_profile_cnt = 1 << link_conf->bssid_indicator; @@ -9322,9 +9575,6 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->num_started_vdevs == 1 && ar->monitor_vdev_created) ath12k_mac_monitor_stop(ar); - - ath12k_mac_remove_link_interface(hw, arvif); - ath12k_mac_unassign_link_vif(arvif); } static int @@ -10017,6 +10267,40 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, return 0; } +static int ath12k_mac_get_fw_stats(struct ath12k *ar, u32 pdev_id, + u32 vdev_id, u32 stats_id) +{ + struct ath12k_base *ab = ar->ab; + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); + unsigned long time_left; + int ret; + + guard(mutex)(&ah->hw_mutex); + + if (ah->state != ATH12K_HW_STATE_ON) + return -ENETDOWN; + + reinit_completion(&ar->fw_stats_complete); + + ret = ath12k_wmi_send_stats_request_cmd(ar, stats_id, vdev_id, pdev_id); + + if (ret) { + ath12k_warn(ab, "failed to request fw stats: %d\n", ret); + return ret; + } + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "get fw stat pdev id %d vdev id %d stats id 0x%x\n", + pdev_id, vdev_id, stats_id); + + time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ); + + if (!time_left) + ath12k_warn(ab, "time out while waiting for get fw stats\n"); + + return ret; +} + static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -10024,10 +10308,19 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, { struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta); struct ath12k_link_sta *arsta; + struct ath12k *ar; + s8 signal; + bool db2dbm; lockdep_assert_wiphy(hw->wiphy); arsta = &ahsta->deflink; + ar = ath12k_get_ar_by_vif(hw, vif, arsta->link_id); + if (!ar) + return; + + db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ar->ab->wmi_ab.svc_map); sinfo->rx_duration = arsta->rx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); @@ -10035,25 +10328,43 @@ static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, sinfo->tx_duration = arsta->tx_duration; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); - if (!arsta->txrate.legacy && !arsta->txrate.nss) - return; - - if (arsta->txrate.legacy) { - sinfo->txrate.legacy = arsta->txrate.legacy; - } else { - sinfo->txrate.mcs = arsta->txrate.mcs; - sinfo->txrate.nss = arsta->txrate.nss; - sinfo->txrate.bw = arsta->txrate.bw; - sinfo->txrate.he_gi = arsta->txrate.he_gi; - sinfo->txrate.he_dcm = arsta->txrate.he_dcm; - sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; + if (arsta->txrate.legacy || arsta->txrate.nss) { + if (arsta->txrate.legacy) { + sinfo->txrate.legacy = arsta->txrate.legacy; + } else { + sinfo->txrate.mcs = arsta->txrate.mcs; + sinfo->txrate.nss = arsta->txrate.nss; + sinfo->txrate.bw = arsta->txrate.bw; + sinfo->txrate.he_gi = arsta->txrate.he_gi; + sinfo->txrate.he_dcm = arsta->txrate.he_dcm; + sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; + sinfo->txrate.eht_gi = arsta->txrate.eht_gi; + sinfo->txrate.eht_ru_alloc = arsta->txrate.eht_ru_alloc; + } + sinfo->txrate.flags = arsta->txrate.flags; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); } - sinfo->txrate.flags = arsta->txrate.flags; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); /* TODO: Use real NF instead of default one. */ - sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR; - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + signal = arsta->rssi_comb; + + if (!signal && + ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA && + !(ath12k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0, + WMI_REQUEST_VDEV_STAT))) + signal = arsta->rssi_beacon; + + if (signal) { + sinfo->signal = db2dbm ? signal : signal + ATH12K_DEFAULT_NOISE_FLOOR; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); + } + + sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi); + + if (!db2dbm) + sinfo->signal_avg += ATH12K_DEFAULT_NOISE_FLOOR; + + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw, @@ -10298,6 +10609,10 @@ static const struct ieee80211_ops ath12k_ops = { .suspend = ath12k_wow_op_suspend, .resume = ath12k_wow_op_resume, .set_wakeup = ath12k_wow_op_set_wakeup, +#endif + CFG80211_TESTMODE_CMD(ath12k_tm_cmd) +#ifdef CONFIG_ATH12K_DEBUGFS + .link_sta_add_debugfs = ath12k_debugfs_link_sta_op_add, #endif }; @@ -10950,6 +11265,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap; ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap; wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + + ieee80211_hw_set(hw, MLO_MCAST_MULTI_LINK_TX); } hw->queues = ATH12K_HW_MAX_QUEUES; @@ -11033,6 +11350,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) ath12k_debugfs_register(ar); } + init_completion(&ar->fw_stats_complete); + return 0; err_unregister_hw: @@ -11133,6 +11452,9 @@ static int __ath12k_mac_mlo_setup(struct ath12k *ar) } } + if (num_link == 0) + return 0; + mlo.group_id = cpu_to_le32(ag->id); mlo.partner_link_id = partner_link_id; mlo.num_partner_links = num_link; @@ -11162,10 +11484,16 @@ static int __ath12k_mac_mlo_teardown(struct ath12k *ar) { struct ath12k_base *ab = ar->ab; int ret; + u8 num_link; if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) return 0; + num_link = ath12k_get_num_partner_link(ar); + + if (num_link == 0) + return 0; + ret = ath12k_wmi_mlo_teardown(ar); if (ret) { ath12k_warn(ab, "failed to send MLO teardown WMI command for pdev %d: %d\n", diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h index 3594729b63974..ae35b73312bf2 100644 --- a/drivers/net/wireless/ath/ath12k/mac.h +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_MAC_H @@ -108,5 +108,11 @@ int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif); void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *conf, void *data); - +u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones); +enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones); +enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi); +struct ieee80211_bss_conf *ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif); +struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u8 link_id); #endif diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 06cff3849ab8d..b474696ac6d8c 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -483,8 +483,11 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab) ath12k_pci_ext_grp_disable(irq_grp); - napi_synchronize(&irq_grp->napi); - napi_disable(&irq_grp->napi); + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } } } @@ -646,7 +649,7 @@ static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci, if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) return 0; - return irq_set_affinity_hint(ab_pci->pdev->irq, m); + return irq_set_affinity_and_hint(ab_pci->pdev->irq, m); } static int ath12k_pci_config_irq(struct ath12k_base *ab) @@ -1114,7 +1117,11 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; - napi_enable(&irq_grp->napi); + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } + ath12k_pci_ext_grp_enable(irq_grp); } @@ -1561,6 +1568,7 @@ static int ath12k_pci_probe(struct pci_dev *pdev, ab_pci->ab = ab; ab_pci->pdev = pdev; ab->hif.ops = &ath12k_pci_hif_ops; + ab->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL; pci_set_drvdata(pdev, ab); spin_lock_init(&ab_pci->window_lock); @@ -1689,6 +1697,8 @@ static int ath12k_pci_probe(struct pci_dev *pdev, return 0; err_free_irq: + /* __free_irq() expects the caller to have cleared the affinity hint */ + ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); ath12k_pci_free_irq(ab); err_ce_free: @@ -1734,9 +1744,9 @@ static void ath12k_pci_remove(struct pci_dev *pdev) cancel_work_sync(&ab->reset_work); cancel_work_sync(&ab->dump_work); ath12k_core_deinit(ab); - ath12k_fw_unmap(ab); qmi_fail: + ath12k_fw_unmap(ab); ath12k_mhi_unregister(ab_pci); ath12k_pci_free_irq(ab); diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c index 5c3563383fabb..348dbc81bad8c 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.c +++ b/drivers/net/wireless/ath/ath12k/qmi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -2056,8 +2056,7 @@ static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab, } if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) { - ab->single_chip_mlo_supp = false; - + ag->mlo_capable = false; ath12k_dbg(ab, ATH12K_DBG_QMI, "skip QMI MLO cap due to invalid num_radio %d\n", ab->qmi.num_radios); @@ -2265,10 +2264,6 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab) goto out; } - if (resp.single_chip_mlo_support_valid && - resp.single_chip_mlo_support) - ab->single_chip_mlo_supp = true; - if (!resp.num_phy_valid) { ret = -ENODATA; goto out; @@ -2277,10 +2272,9 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab) ab->qmi.num_radios = resp.num_phy; ath12k_dbg(ab, ATH12K_DBG_QMI, - "phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n", + "phy capability resp valid %d num_phy %d valid %d board_id %d\n", resp.num_phy_valid, resp.num_phy, - resp.board_id_valid, resp.board_id, - resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support); + resp.board_id_valid, resp.board_id); return; @@ -2740,6 +2734,15 @@ int ath12k_qmi_request_target_cap(struct ath12k_base *ab) if (r) ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n"); + r = ath12k_acpi_start(ab); + if (r) + /* ACPI is optional so continue in case of an error */ + ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", r); + + r = ath12k_acpi_check_bdf_variant_name(ab); + if (r) + ath12k_dbg(ab, ATH12K_DBG_BOOT, "ACPI bdf variant name not set.\n"); + out: return ret; } diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h index 29c7ec3260daf..75f80df2aa0c7 100644 --- a/drivers/net/wireless/ath/ath12k/reg.h +++ b/drivers/net/wireless/ath/ath12k/reg.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_REG_H @@ -13,6 +13,9 @@ struct ath12k_base; struct ath12k; +#define ATH12K_2GHZ_MAX_FREQUENCY 2495 +#define ATH12K_5GHZ_MAX_FREQUENCY 5920 + /* DFS regdomains supported by Firmware */ enum ath12k_dfs_region { ATH12K_DFS_REG_UNSET, diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h index 10366bbe99999..6c600473b4021 100644 --- a/drivers/net/wireless/ath/ath12k/rx_desc.h +++ b/drivers/net/wireless/ath/ath12k/rx_desc.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_RX_DESC_H #define ATH12K_RX_DESC_H @@ -637,6 +637,8 @@ enum rx_msdu_start_pkt_type { RX_MSDU_START_PKT_TYPE_11N, RX_MSDU_START_PKT_TYPE_11AC, RX_MSDU_START_PKT_TYPE_11AX, + RX_MSDU_START_PKT_TYPE_11BA, + RX_MSDU_START_PKT_TYPE_11BE, }; enum rx_msdu_start_sgi { @@ -1539,12 +1541,4 @@ struct hal_rx_desc { #define MAX_MU_GROUP_SHOW 16 #define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW) -#define HAL_RX_RU_ALLOC_TYPE_MAX 6 -#define RU_26 1 -#define RU_52 2 -#define RU_106 4 -#define RU_242 9 -#define RU_484 18 -#define RU_996 37 - #endif /* ATH12K_RX_DESC_H */ diff --git a/drivers/net/wireless/ath/ath12k/testmode.c b/drivers/net/wireless/ath/ath12k/testmode.c new file mode 100644 index 0000000000000..18d56a976dc74 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/testmode.c @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "testmode.h" +#include +#include "debug.h" +#include "wmi.h" +#include "hw.h" +#include "core.h" +#include "hif.h" +#include "../testmode_i.h" + +#define ATH12K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0) +#define ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4) + +static const struct nla_policy ath12k_tm_policy[ATH_TM_ATTR_MAX + 1] = { + [ATH_TM_ATTR_CMD] = { .type = NLA_U32 }, + [ATH_TM_ATTR_DATA] = { .type = NLA_BINARY, + .len = ATH_TM_DATA_MAX_LEN }, + [ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 }, + [ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 }, + [ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 }, +}; + +static struct ath12k *ath12k_tm_get_ar(struct ath12k_base *ab) +{ + struct ath12k_pdev *pdev; + struct ath12k *ar; + int i; + + for (i = 0; i < ab->num_radios; i++) { + pdev = &ab->pdevs[i]; + ar = pdev->ar; + + if (ar && ar->ah->state == ATH12K_HW_STATE_TM) + return ar; + } + + return NULL; +} + +void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id, + struct sk_buff *skb) +{ + struct sk_buff *nl_skb; + struct ath12k *ar; + + ath12k_dbg(ab, ATH12K_DBG_TESTMODE, + "testmode event wmi cmd_id %d skb length %d\n", + cmd_id, skb->len); + + ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", skb->data, skb->len); + + ar = ath12k_tm_get_ar(ab); + if (!ar) { + ath12k_warn(ab, "testmode event not handled due to invalid pdev\n"); + return; + } + + spin_lock_bh(&ar->data_lock); + + nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy, + 2 * nla_total_size(sizeof(u32)) + + nla_total_size(skb->len), + GFP_ATOMIC); + spin_unlock_bh(&ar->data_lock); + + if (!nl_skb) { + ath12k_warn(ab, + "failed to allocate skb for unsegmented testmode wmi event\n"); + return; + } + + if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) || + nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) || + nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) { + ath12k_warn(ab, "failed to populate testmode unsegmented event\n"); + kfree_skb(nl_skb); + return; + } + + cfg80211_testmode_event(nl_skb, GFP_ATOMIC); +} + +void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id, + const struct ath12k_wmi_ftm_event *ftm_msg, + u16 length) +{ + struct sk_buff *nl_skb; + struct ath12k *ar; + u32 data_pos, pdev_id; + u16 datalen; + u8 total_segments, current_seq; + u8 const *buf_pos; + + ath12k_dbg(ab, ATH12K_DBG_TESTMODE, + "testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n", + cmd_id, ftm_msg, length); + ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", ftm_msg, length); + pdev_id = DP_HW2SW_MACID(le32_to_cpu(ftm_msg->seg_hdr.pdev_id)); + + if (pdev_id >= ab->num_radios) { + ath12k_warn(ab, "testmode event not handled due to invalid pdev id\n"); + return; + } + + ar = ab->pdevs[pdev_id].ar; + + if (!ar) { + ath12k_warn(ab, "testmode event not handled due to absence of pdev\n"); + return; + } + + current_seq = le32_get_bits(ftm_msg->seg_hdr.segmentinfo, + ATH12K_FTM_SEGHDR_CURRENT_SEQ); + total_segments = le32_get_bits(ftm_msg->seg_hdr.segmentinfo, + ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS); + datalen = length - (sizeof(struct ath12k_wmi_ftm_seg_hdr_params)); + buf_pos = ftm_msg->data; + + if (current_seq == 0) { + ab->ftm_event_obj.expected_seq = 0; + ab->ftm_event_obj.data_pos = 0; + } + + data_pos = ab->ftm_event_obj.data_pos; + + if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) { + ath12k_warn(ab, + "Invalid event length date_pos[%d] datalen[%d]\n", + data_pos, datalen); + return; + } + + memcpy(&ab->ftm_event_obj.eventdata[data_pos], buf_pos, datalen); + data_pos += datalen; + + if (++ab->ftm_event_obj.expected_seq != total_segments) { + ab->ftm_event_obj.data_pos = data_pos; + ath12k_dbg(ab, ATH12K_DBG_TESTMODE, + "partial data received current_seq[%d], total_seg[%d]\n", + current_seq, total_segments); + return; + } + + ath12k_dbg(ab, ATH12K_DBG_TESTMODE, + "total data length[%d] = [%d]\n", + data_pos, ftm_msg->seg_hdr.len); + + spin_lock_bh(&ar->data_lock); + nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy, + 2 * nla_total_size(sizeof(u32)) + + nla_total_size(data_pos), + GFP_ATOMIC); + spin_unlock_bh(&ar->data_lock); + + if (!nl_skb) { + ath12k_warn(ab, + "failed to allocate skb for testmode wmi event\n"); + return; + } + + if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, + ATH_TM_CMD_WMI_FTM) || + nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) || + nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos, + &ab->ftm_event_obj.eventdata[0])) { + ath12k_warn(ab, "failed to populate testmode event"); + kfree_skb(nl_skb); + return; + } + + cfg80211_testmode_event(nl_skb, GFP_ATOMIC); +} + +static int ath12k_tm_cmd_get_version(struct ath12k *ar, struct nlattr *tb[]) +{ + struct sk_buff *skb; + + ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, + "testmode cmd get version_major %d version_minor %d\n", + ATH_TESTMODE_VERSION_MAJOR, + ATH_TESTMODE_VERSION_MINOR); + + spin_lock_bh(&ar->data_lock); + skb = cfg80211_testmode_alloc_reply_skb(ar->ah->hw->wiphy, + 2 * nla_total_size(sizeof(u32))); + spin_unlock_bh(&ar->data_lock); + + if (!skb) + return -ENOMEM; + + if (nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR, + ATH_TESTMODE_VERSION_MAJOR) || + nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR, + ATH_TESTMODE_VERSION_MINOR)) { + kfree_skb(skb); + return -ENOBUFS; + } + + return cfg80211_testmode_reply(skb); +} + +static int ath12k_tm_cmd_process_ftm(struct ath12k *ar, struct nlattr *tb[]) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct sk_buff *skb; + struct ath12k_wmi_ftm_cmd *ftm_cmd; + int ret = 0; + void *buf; + size_t aligned_len; + u32 cmd_id, buf_len; + u16 chunk_len, total_bytes, num_segments; + u8 segnumber = 0, *bufpos; + + ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, "ah->state %d\n", ar->ah->state); + if (ar->ah->state != ATH12K_HW_STATE_TM) + return -ENETDOWN; + + if (!tb[ATH_TM_ATTR_DATA]) + return -EINVAL; + + buf = nla_data(tb[ATH_TM_ATTR_DATA]); + buf_len = nla_len(tb[ATH_TM_ATTR_DATA]); + cmd_id = WMI_PDEV_UTF_CMDID; + ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, + "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n", + cmd_id, buf, buf_len); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len); + bufpos = buf; + total_bytes = buf_len; + num_segments = total_bytes / MAX_WMI_UTF_LEN; + + if (buf_len - (num_segments * MAX_WMI_UTF_LEN)) + num_segments++; + + while (buf_len) { + if (buf_len > MAX_WMI_UTF_LEN) + chunk_len = MAX_WMI_UTF_LEN; /* MAX message */ + else + chunk_len = buf_len; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len + + sizeof(struct ath12k_wmi_ftm_cmd))); + + if (!skb) + return -ENOMEM; + + ftm_cmd = (struct ath12k_wmi_ftm_cmd *)skb->data; + aligned_len = chunk_len + sizeof(struct ath12k_wmi_ftm_seg_hdr_params); + ftm_cmd->tlv_header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len); + ftm_cmd->seg_hdr.len = cpu_to_le32(total_bytes); + ftm_cmd->seg_hdr.msgref = cpu_to_le32(ar->ftm_msgref); + ftm_cmd->seg_hdr.segmentinfo = + le32_encode_bits(num_segments, + ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS) | + le32_encode_bits(segnumber, + ATH12K_FTM_SEGHDR_CURRENT_SEQ); + ftm_cmd->seg_hdr.pdev_id = cpu_to_le32(ar->pdev->pdev_id); + segnumber++; + memcpy(&ftm_cmd->data, bufpos, chunk_len); + ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id); + + if (ret) { + ath12k_warn(ar->ab, "ftm wmi command fail: %d\n", ret); + kfree_skb(skb); + return ret; + } + + buf_len -= chunk_len; + bufpos += chunk_len; + } + + ++ar->ftm_msgref; + return ret; +} + +static int ath12k_tm_cmd_testmode_start(struct ath12k *ar, struct nlattr *tb[]) +{ + if (ar->ah->state == ATH12K_HW_STATE_TM) + return -EALREADY; + + if (ar->ah->state != ATH12K_HW_STATE_OFF) + return -EBUSY; + + ar->ab->ftm_event_obj.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH, + GFP_KERNEL); + + if (!ar->ab->ftm_event_obj.eventdata) + return -ENOMEM; + + ar->ah->state = ATH12K_HW_STATE_TM; + ar->ftm_msgref = 0; + return 0; +} + +static int ath12k_tm_cmd_wmi(struct ath12k *ar, struct nlattr *tb[]) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct sk_buff *skb; + struct wmi_pdev_set_param_cmd *cmd; + int ret = 0, tag; + void *buf; + u32 cmd_id, buf_len; + + if (!tb[ATH_TM_ATTR_DATA]) + return -EINVAL; + + if (!tb[ATH_TM_ATTR_WMI_CMDID]) + return -EINVAL; + + buf = nla_data(tb[ATH_TM_ATTR_DATA]); + buf_len = nla_len(tb[ATH_TM_ATTR_DATA]); + + if (!buf_len) { + ath12k_warn(ar->ab, "No data present in testmode command\n"); + return -EINVAL; + } + + cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]); + + cmd = buf; + tag = le32_get_bits(cmd->tlv_header, WMI_TLV_TAG); + + if (tag == WMI_TAG_PDEV_SET_PARAM_CMD) + cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id); + + ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, + "testmode cmd wmi cmd_id %d buf length %d\n", + cmd_id, buf_len); + + ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len); + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); + + if (!skb) + return -ENOMEM; + + memcpy(skb->data, buf, buf_len); + + ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id); + if (ret) { + dev_kfree_skb(skb); + ath12k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n", + ret); + } + + return ret; +} + +int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) +{ + struct ath12k_hw *ah = hw->priv; + struct ath12k *ar = NULL; + struct nlattr *tb[ATH_TM_ATTR_MAX + 1]; + struct ath12k_base *ab; + struct wiphy *wiphy = hw->wiphy; + int ret; + + lockdep_assert_held(&wiphy->mtx); + + ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath12k_tm_policy, + NULL); + if (ret) + return ret; + + if (!tb[ATH_TM_ATTR_CMD]) + return -EINVAL; + + /* TODO: have to handle ar for MLO case */ + if (ah->num_radio) + ar = ah->radio; + + if (!ar) + return -EINVAL; + + ab = ar->ab; + switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) { + case ATH_TM_CMD_WMI: + return ath12k_tm_cmd_wmi(ar, tb); + case ATH_TM_CMD_TESTMODE_START: + return ath12k_tm_cmd_testmode_start(ar, tb); + case ATH_TM_CMD_GET_VERSION: + return ath12k_tm_cmd_get_version(ar, tb); + case ATH_TM_CMD_WMI_FTM: + set_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags); + return ath12k_tm_cmd_process_ftm(ar, tb); + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/wireless/ath/ath12k/testmode.h b/drivers/net/wireless/ath/ath12k/testmode.h new file mode 100644 index 0000000000000..ef6ab21d19b82 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/testmode.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include "core.h" +#include "hif.h" + +#ifdef CONFIG_NL80211_TESTMODE + +void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id, + struct sk_buff *skb); +void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id, + const struct ath12k_wmi_ftm_event *ftm_msg, + u16 length); +int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); + +#else + +static inline void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id, + struct sk_buff *skb) +{ +} + +static inline void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id, + const struct ath12k_wmi_ftm_event *msg, + u16 length) +{ +} + +static inline int ath12k_tm_cmd(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + void *data, int len) +{ + return 0; +} + +#endif diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index abb510d235a52..6d1ea5f3a791b 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include @@ -15,16 +15,22 @@ #include #include #include "core.h" +#include "debugfs.h" #include "debug.h" #include "mac.h" #include "hw.h" #include "peer.h" #include "p2p.h" +#include "testmode.h" struct ath12k_wmi_svc_ready_parse { bool wmi_svc_bitmap_done; }; +struct wmi_tlv_fw_stats_parse { + const struct wmi_stats_event *ev; +}; + struct ath12k_wmi_dma_ring_caps_parse { struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps; u32 n_dma_ring_caps; @@ -173,7 +179,7 @@ static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = { .min_len = sizeof(struct wmi_p2p_noa_event) }, }; -static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) +__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len) { return le32_encode_bits(cmd, WMI_TLV_TAG) | le32_encode_bits(len, WMI_TLV_LEN); @@ -814,6 +820,39 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id, return ret; } +int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, + u32 vdev_id, u32 pdev_id) +{ + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_request_stats_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_request_stats_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD, + sizeof(*cmd)); + + cmd->stats_id = cpu_to_le32(stats_id); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->pdev_id = cpu_to_le32(pdev_id); + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID); + if (ret) { + ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n"); + dev_kfree_skb(skb); + } + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "WMI request stats 0x%x vdev id %d pdev id %d\n", + stats_id, vdev_id, pdev_id); + + return ret; +} + int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, struct ath12k_wmi_vdev_create_arg *args) { @@ -1888,14 +1927,19 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, return ret; } -int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, +int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif, struct ieee80211_mutable_offsets *offs, struct sk_buff *bcn, struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args) { + struct ath12k *ar = arvif->ar; struct ath12k_wmi_pdev *wmi = ar->wmi; + struct ath12k_base *ab = ar->ab; struct wmi_bcn_tmpl_cmd *cmd; struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info; + struct ath12k_vif *ahvif = arvif->ahvif; + struct ieee80211_bss_conf *conf; + u32 vdev_id = arvif->vdev_id; struct wmi_tlv *tlv; struct sk_buff *skb; u32 ema_params = 0; @@ -1903,6 +1947,14 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, int ret, len; size_t aligned_len = roundup(bcn->len, 4); + conf = ath12k_mac_get_link_bss_conf(arvif); + if (!conf) { + ath12k_warn(ab, + "unable to access bss link conf in beacon template command for vif %pM link %u\n", + ahvif->vif->addr, arvif->link_id); + return -EINVAL; + } + len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len; skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); @@ -1914,8 +1966,16 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, sizeof(*cmd)); cmd->vdev_id = cpu_to_le32(vdev_id); cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset); - cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]); - cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]); + + if (conf->csa_active) { + cmd->csa_switch_count_offset = + cpu_to_le32(offs->cntdwn_counter_offs[0]); + cmd->ext_csa_switch_count_offset = + cpu_to_le32(offs->cntdwn_counter_offs[1]); + cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF); + arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]]; + } + cmd->buf_len = cpu_to_le32(bcn->len); cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off); if (ema_args) { @@ -1945,7 +2005,7 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID); if (ret) { - ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n"); + ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n"); dev_kfree_skb(skb); } @@ -2373,8 +2433,8 @@ void ath12k_wmi_start_scan_init(struct ath12k *ar, arg->dwell_time_active = 50; arg->dwell_time_active_2g = 0; arg->dwell_time_passive = 150; - arg->dwell_time_active_6g = 40; - arg->dwell_time_passive_6g = 30; + arg->dwell_time_active_6g = 70; + arg->dwell_time_passive_6g = 70; arg->min_rest_time = 50; arg->max_rest_time = 500; arg->repeat_probe_time = 0; @@ -2794,6 +2854,8 @@ int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar, WMI_CHAN_REG_INFO1_REG_CLS); *reg2 |= le32_encode_bits(channel_arg->antennamax, WMI_CHAN_REG_INFO2_ANT_MAX); + *reg2 |= le32_encode_bits(channel_arg->maxregpower, + WMI_CHAN_REG_INFO2_MAX_TX_PWR); ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", @@ -6842,8 +6904,584 @@ static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff rcu_read_unlock(); } +static void +ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar, + struct ath12k_fw_stats *fw_stats, + char *buf, u32 *length) +{ + const struct ath12k_fw_stats_vdev *vdev; + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; + struct ath12k_link_vif *arvif; + u32 len = *length; + u8 *vif_macaddr; + int i; + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s\n", + "ath12k VDEV stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + list_for_each_entry(vdev, &fw_stats->vdevs, list) { + arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id); + if (!arvif) + continue; + vif_macaddr = arvif->ahvif->vif->addr; + + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "VDEV ID", vdev->vdev_id); + len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", + "VDEV MAC address", vif_macaddr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "beacon snr", vdev->beacon_snr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "data snr", vdev->data_snr); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx frames", vdev->num_rx_frames); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rts fail", vdev->num_rts_fail); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rts success", vdev->num_rts_success); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx err", vdev->num_rx_err); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num rx discard", vdev->num_rx_discard); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "num tx not acked", vdev->num_tx_not_acked); + + for (i = 0 ; i < WLAN_MAX_AC; i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames", i, + vdev->num_tx_frames[i]); + + for (i = 0 ; i < WLAN_MAX_AC; i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames retries", i, + vdev->num_tx_frames_retries[i]); + + for (i = 0 ; i < WLAN_MAX_AC; i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "num tx frames failures", i, + vdev->num_tx_frames_failures[i]); + + for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] 0x%08x\n", + "tx rate history", i, + vdev->tx_rate_history[i]); + for (i = 0 ; i < MAX_TX_RATE_VALUES; i++) + len += scnprintf(buf + len, buf_len - len, + "%25s [%02d] %u\n", + "beacon rssi history", i, + vdev->beacon_rssi_history[i]); + + len += scnprintf(buf + len, buf_len - len, "\n"); + *length = len; + } +} + +static void +ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar, + struct ath12k_fw_stats *fw_stats, + char *buf, u32 *length) +{ + const struct ath12k_fw_stats_bcn *bcn; + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; + struct ath12k_link_vif *arvif; + u32 len = *length; + size_t num_bcn; + + num_bcn = list_count_nodes(&fw_stats->bcn); + + len += scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n", + "ath12k Beacon stats", num_bcn); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "==================="); + + list_for_each_entry(bcn, &fw_stats->bcn, list) { + arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id); + if (!arvif) + continue; + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "VDEV ID", bcn->vdev_id); + len += scnprintf(buf + len, buf_len - len, "%30s %pM\n", + "VDEV MAC address", arvif->ahvif->vif->addr); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================"); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Num of beacon tx success", bcn->tx_bcn_succ_cnt); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Num of beacon tx failures", bcn->tx_bcn_outage_cnt); + + len += scnprintf(buf + len, buf_len - len, "\n"); + *length = len; + } +} + +static void +ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev, + char *buf, u32 *length, u64 fw_soc_drop_cnt) +{ + u32 len = *length; + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; + + len = scnprintf(buf + len, buf_len - len, "\n"); + len += scnprintf(buf + len, buf_len - len, "%30s\n", + "ath12k PDEV stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Channel noise floor", pdev->ch_noise_floor); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Channel TX power", pdev->chan_tx_power); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "TX frame count", pdev->tx_frame_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RX frame count", pdev->rx_frame_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "RX clear count", pdev->rx_clear_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Cycle count", pdev->cycle_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "PHY error count", pdev->phy_err_count); + len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n", + "soc drop count", fw_soc_drop_cnt); + + *length = len; +} + +static void +ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "\n%30s\n", + "ath12k PDEV TX stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "===================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HTT cookies queued", pdev->comp_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HTT cookies disp.", pdev->comp_delivered); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDU queued", pdev->msdu_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU queued", pdev->mpdu_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs dropped", pdev->wmm_drop); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Local enqued", pdev->local_enqued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Local freed", pdev->local_freed); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "HW queued", pdev->hw_queued); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PPDUs reaped", pdev->hw_reaped); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Num underruns", pdev->underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PPDUs cleaned", pdev->tx_abort); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs requeued", pdev->mpdus_requed); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Excessive retries", pdev->tx_ko); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "HW rate", pdev->data_rc); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Sched self triggers", pdev->self_triggers); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Dropped due to SW retries", + pdev->sw_retry_failure); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Illegal rate phy errors", + pdev->illgl_rate_phy_err); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "PDEV continuous xretry", pdev->pdev_cont_xretry); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "TX timeout", pdev->pdev_tx_timeout); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "PDEV resets", pdev->pdev_resets); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "Stateless TIDs alloc failures", + pdev->stateless_tid_alloc_failure); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "PHY underrun", pdev->phy_underrun); + len += scnprintf(buf + len, buf_len - len, "%30s %10u\n", + "MPDU is more than txop limit", pdev->txop_ovf); + *length = len; +} + +static void +ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev, + char *buf, u32 *length) +{ + u32 len = *length; + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; + + len += scnprintf(buf + len, buf_len - len, "\n%30s\n", + "ath12k PDEV RX stats"); + len += scnprintf(buf + len, buf_len - len, "%30s\n\n", + "===================="); + + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Mid PPDU route change", + pdev->mid_ppdu_route_change); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Tot. number of statuses", pdev->status_rcvd); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 0", pdev->r0_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 1", pdev->r1_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 2", pdev->r2_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Extra frags on rings 3", pdev->r3_frags); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs delivered to HTT", pdev->htt_msdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs delivered to HTT", pdev->htt_mpdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MSDUs delivered to stack", pdev->loc_msdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDUs delivered to stack", pdev->loc_mpdus); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "Oversized AMSUs", pdev->oversize_amsdu); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY errors", pdev->phy_errs); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "PHY errors drops", pdev->phy_err_drop); + len += scnprintf(buf + len, buf_len - len, "%30s %10d\n", + "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs); + *length = len; +} + +static void +ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar, + struct ath12k_fw_stats *fw_stats, + char *buf, u32 *length) +{ + const struct ath12k_fw_stats_pdev *pdev; + u32 len = *length; + + pdev = list_first_entry_or_null(&fw_stats->pdevs, + struct ath12k_fw_stats_pdev, list); + if (!pdev) { + ath12k_warn(ar->ab, "failed to get pdev stats\n"); + return; + } + + ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len, + ar->ab->fw_soc_drop_count); + ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len); + ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len); + + *length = len; +} + +void ath12k_wmi_fw_stats_dump(struct ath12k *ar, + struct ath12k_fw_stats *fw_stats, + u32 stats_id, char *buf) +{ + u32 len = 0; + u32 buf_len = ATH12K_FW_STATS_BUF_SIZE; + + spin_lock_bh(&ar->data_lock); + + switch (stats_id) { + case WMI_REQUEST_VDEV_STAT: + ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len); + break; + case WMI_REQUEST_BCN_STAT: + ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len); + break; + case WMI_REQUEST_PDEV_STAT: + ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len); + break; + default: + break; + } + + spin_unlock_bh(&ar->data_lock); + + if (len >= buf_len) + buf[len - 1] = 0; + else + buf[len] = 0; + + ath12k_debugfs_fw_stats_reset(ar); +} + +static void +ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src, + struct ath12k_fw_stats_vdev *dst) +{ + int i; + + dst->vdev_id = le32_to_cpu(src->vdev_id); + dst->beacon_snr = le32_to_cpu(src->beacon_snr); + dst->data_snr = le32_to_cpu(src->data_snr); + dst->num_rx_frames = le32_to_cpu(src->num_rx_frames); + dst->num_rts_fail = le32_to_cpu(src->num_rts_fail); + dst->num_rts_success = le32_to_cpu(src->num_rts_success); + dst->num_rx_err = le32_to_cpu(src->num_rx_err); + dst->num_rx_discard = le32_to_cpu(src->num_rx_discard); + dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked); + + for (i = 0; i < WLAN_MAX_AC; i++) + dst->num_tx_frames[i] = + le32_to_cpu(src->num_tx_frames[i]); + + for (i = 0; i < WLAN_MAX_AC; i++) + dst->num_tx_frames_retries[i] = + le32_to_cpu(src->num_tx_frames_retries[i]); + + for (i = 0; i < WLAN_MAX_AC; i++) + dst->num_tx_frames_failures[i] = + le32_to_cpu(src->num_tx_frames_failures[i]); + + for (i = 0; i < MAX_TX_RATE_VALUES; i++) + dst->tx_rate_history[i] = + le32_to_cpu(src->tx_rate_history[i]); + + for (i = 0; i < MAX_TX_RATE_VALUES; i++) + dst->beacon_rssi_history[i] = + le32_to_cpu(src->beacon_rssi_history[i]); +} + +static void +ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src, + struct ath12k_fw_stats_bcn *dst) +{ + dst->vdev_id = le32_to_cpu(src->vdev_id); + dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt); + dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt); +} + +static void +ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src, + struct ath12k_fw_stats_pdev *dst) +{ + dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf); + dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count); + dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count); + dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count); + dst->cycle_count = __le32_to_cpu(src->cycle_count); + dst->phy_err_count = __le32_to_cpu(src->phy_err_count); + dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr); +} + +static void +ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src, + struct ath12k_fw_stats_pdev *dst) +{ + dst->comp_queued = a_sle32_to_cpu(src->comp_queued); + dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered); + dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued); + dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued); + dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop); + dst->local_enqued = a_sle32_to_cpu(src->local_enqued); + dst->local_freed = a_sle32_to_cpu(src->local_freed); + dst->hw_queued = a_sle32_to_cpu(src->hw_queued); + dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped); + dst->underrun = a_sle32_to_cpu(src->underrun); + dst->tx_abort = a_sle32_to_cpu(src->tx_abort); + dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed); + dst->tx_ko = __le32_to_cpu(src->tx_ko); + dst->data_rc = __le32_to_cpu(src->data_rc); + dst->self_triggers = __le32_to_cpu(src->self_triggers); + dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure); + dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err); + dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry); + dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout); + dst->pdev_resets = __le32_to_cpu(src->pdev_resets); + dst->stateless_tid_alloc_failure = + __le32_to_cpu(src->stateless_tid_alloc_failure); + dst->phy_underrun = __le32_to_cpu(src->phy_underrun); + dst->txop_ovf = __le32_to_cpu(src->txop_ovf); +} + +static void +ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src, + struct ath12k_fw_stats_pdev *dst) +{ + dst->mid_ppdu_route_change = + a_sle32_to_cpu(src->mid_ppdu_route_change); + dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd); + dst->r0_frags = a_sle32_to_cpu(src->r0_frags); + dst->r1_frags = a_sle32_to_cpu(src->r1_frags); + dst->r2_frags = a_sle32_to_cpu(src->r2_frags); + dst->r3_frags = a_sle32_to_cpu(src->r3_frags); + dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus); + dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus); + dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus); + dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus); + dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu); + dst->phy_errs = a_sle32_to_cpu(src->phy_errs); + dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop); + dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs); +} + +static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab, + struct wmi_tlv_fw_stats_parse *parse, + const void *ptr, + u16 len) +{ + const struct wmi_stats_event *ev = parse->ev; + struct ath12k_fw_stats stats = {0}; + struct ath12k *ar; + struct ath12k_link_vif *arvif; + struct ieee80211_sta *sta; + struct ath12k_sta *ahsta; + struct ath12k_link_sta *arsta; + int i, ret = 0; + const void *data = ptr; + + INIT_LIST_HEAD(&stats.vdevs); + INIT_LIST_HEAD(&stats.bcn); + INIT_LIST_HEAD(&stats.pdevs); + + if (!ev) { + ath12k_warn(ab, "failed to fetch update stats ev"); + return -EPROTO; + } + + rcu_read_lock(); + + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id)); + if (!ar) { + ath12k_warn(ab, "invalid pdev id %d in update stats event\n", + le32_to_cpu(ev->pdev_id)); + ret = -EPROTO; + goto exit; + } + + for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) { + const struct wmi_vdev_stats_params *src; + struct ath12k_fw_stats_vdev *dst; + + src = data; + if (len < sizeof(*src)) { + ret = -EPROTO; + goto exit; + } + + arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id)); + if (arvif) { + sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar), + arvif->bssid, + NULL); + if (sta) { + ahsta = ath12k_sta_to_ahsta(sta); + arsta = &ahsta->deflink; + arsta->rssi_beacon = le32_to_cpu(src->beacon_snr); + ath12k_dbg(ab, ATH12K_DBG_WMI, + "wmi stats vdev id %d snr %d\n", + src->vdev_id, src->beacon_snr); + } else { + ath12k_dbg(ab, ATH12K_DBG_WMI, + "not found station bssid %pM for vdev stat\n", + arvif->bssid); + } + } + + data += sizeof(*src); + len -= sizeof(*src); + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + ath12k_wmi_pull_vdev_stats(src, dst); + stats.stats_id = WMI_REQUEST_VDEV_STAT; + list_add_tail(&dst->list, &stats.vdevs); + } + for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) { + const struct ath12k_wmi_bcn_stats_params *src; + struct ath12k_fw_stats_bcn *dst; + + src = data; + if (len < sizeof(*src)) { + ret = -EPROTO; + goto exit; + } + + data += sizeof(*src); + len -= sizeof(*src); + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + ath12k_wmi_pull_bcn_stats(src, dst); + stats.stats_id = WMI_REQUEST_BCN_STAT; + list_add_tail(&dst->list, &stats.bcn); + } + for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) { + const struct ath12k_wmi_pdev_stats_params *src; + struct ath12k_fw_stats_pdev *dst; + + src = data; + if (len < sizeof(*src)) { + ret = -EPROTO; + goto exit; + } + + stats.stats_id = WMI_REQUEST_PDEV_STAT; + + data += sizeof(*src); + len -= sizeof(*src); + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ath12k_wmi_pull_pdev_stats_base(&src->base, dst); + ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst); + ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst); + list_add_tail(&dst->list, &stats.pdevs); + } + + complete(&ar->fw_stats_complete); + ath12k_debugfs_fw_stats_process(ar, &stats); +exit: + rcu_read_unlock(); + return ret; +} + +static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_fw_stats_parse *parse = data; + int ret = 0; + + switch (tag) { + case WMI_TAG_STATS_EVENT: + parse->ev = ptr; + break; + case WMI_TAG_ARRAY_BYTE: + ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len); + break; + default: + break; + } + return ret; +} + static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb) { + int ret; + struct wmi_tlv_fw_stats_parse parse = {}; + + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_tlv_fw_stats_parse, + &parse); + if (ret) + ath12k_warn(ab, "failed to parse fw stats %d\n", ret); } /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned @@ -6889,17 +7527,15 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, const struct ath12k_wmi_pdev_csa_event *ev, const u32 *vdev_ids) { - int i; + u32 current_switch_count = le32_to_cpu(ev->current_switch_count); + u32 num_vdevs = le32_to_cpu(ev->num_vdevs); struct ieee80211_bss_conf *conf; struct ath12k_link_vif *arvif; struct ath12k_vif *ahvif; - - /* Finish CSA once the switch count becomes NULL */ - if (ev->current_switch_count) - return; + int i; rcu_read_lock(); - for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) { + for (i = 0; i < num_vdevs; i++) { arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]); if (!arvif) { @@ -6922,8 +7558,26 @@ ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab, continue; } - if (arvif->is_up && conf->csa_active) - ieee80211_csa_finish(ahvif->vif, 0); + if (!arvif->is_up || !conf->csa_active) + continue; + + /* Finish CSA when counter reaches zero */ + if (!current_switch_count) { + ieee80211_csa_finish(ahvif->vif, arvif->link_id); + arvif->current_cntdown_counter = 0; + } else if (current_switch_count > 1) { + /* If the count in event is not what we expect, don't update the + * mac80211 count. Since during beacon Tx failure, count in the + * firmware will not decrement and this event will come with the + * previous count value again + */ + if (current_switch_count != arvif->current_cntdown_counter) + continue; + + arvif->current_cntdown_counter = + ieee80211_beacon_update_cntdwn(ahvif->vif, + arvif->link_id); + } } rcu_read_unlock(); } @@ -7026,6 +7680,35 @@ ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff kfree(tb); } +static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id, + struct sk_buff *skb) +{ + const struct ath12k_wmi_ftm_event *ev; + const void **tb; + int ret; + u16 length; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_ARRAY_BYTE]; + if (!ev) { + ath12k_warn(ab, "failed to fetch ftm msg\n"); + kfree(tb); + return; + } + + length = skb->len - TLV_HDR_SIZE; + ath12k_tm_process_event(ab, cmd_id, ev, length); + kfree(tb); + tb = NULL; +} + static void ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab, struct sk_buff *skb) @@ -7446,6 +8129,389 @@ static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab, kfree(tb); } +#ifdef CONFIG_ATH12K_DEBUGFS +static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab, + const void *ptr, u16 tag, u16 len, + struct wmi_tpc_stats_arg *tpc_stats) +{ + u32 len1, len2, len3, len4; + s16 *dst_ptr; + s8 *dst_ptr_ctl; + + len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len); + len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len); + len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len); + len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len); + + switch (tpc_stats->event_count) { + case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT: + if (len1 > len) + return -ENOBUFS; + + if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) { + dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array; + memcpy(dst_ptr, ptr, len1); + } + break; + case ATH12K_TPC_STATS_RATES_EVENT1: + if (len2 > len) + return -ENOBUFS; + + if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) { + dst_ptr = tpc_stats->rates_array1.rate_array; + memcpy(dst_ptr, ptr, len2); + } + break; + case ATH12K_TPC_STATS_RATES_EVENT2: + if (len3 > len) + return -ENOBUFS; + + if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) { + dst_ptr = tpc_stats->rates_array2.rate_array; + memcpy(dst_ptr, ptr, len3); + } + break; + case ATH12K_TPC_STATS_CTL_TABLE_EVENT: + if (len4 > len) + return -ENOBUFS; + + if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) { + dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table; + memcpy(dst_ptr_ctl, ptr, len4); + } + break; + } + return 0; +} + +static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab, + struct wmi_tpc_stats_arg *tpc_stats, + struct wmi_max_reg_power_fixed_params *ev) +{ + struct wmi_max_reg_power_allowed_arg *reg_pwr; + u32 total_size; + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "Received reg power array type %d length %d for tpc stats\n", + ev->reg_power_type, ev->reg_array_len); + + switch (le32_to_cpu(ev->reg_power_type)) { + case TPC_STATS_REG_PWR_ALLOWED_TYPE: + reg_pwr = &tpc_stats->max_reg_allowed_power; + break; + default: + return -EINVAL; + } + + /* Each entry is 2 byte hence multiplying the indices with 2 */ + total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * + le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2; + if (le32_to_cpu(ev->reg_array_len) != total_size) { + ath12k_warn(ab, + "Total size and reg_array_len doesn't match for tpc stats\n"); + return -EINVAL; + } + + memcpy(®_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params)); + + reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len), + GFP_ATOMIC); + if (!reg_pwr->reg_pwr_array) + return -ENOMEM; + + tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED; + + return 0; +} + +static int ath12k_tpc_get_rate_array(struct ath12k_base *ab, + struct wmi_tpc_stats_arg *tpc_stats, + struct wmi_tpc_rates_array_fixed_params *ev) +{ + struct wmi_tpc_rates_array_arg *rates_array; + u32 flag = 0, rate_array_len; + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "Received rates array type %d length %d for tpc stats\n", + ev->rate_array_type, ev->rate_array_len); + + switch (le32_to_cpu(ev->rate_array_type)) { + case ATH12K_TPC_STATS_RATES_ARRAY1: + rates_array = &tpc_stats->rates_array1; + flag = WMI_TPC_RATES_ARRAY1; + break; + case ATH12K_TPC_STATS_RATES_ARRAY2: + rates_array = &tpc_stats->rates_array2; + flag = WMI_TPC_RATES_ARRAY2; + break; + default: + ath12k_warn(ab, + "Received invalid type of rates array for tpc stats\n"); + return -EINVAL; + } + memcpy(&rates_array->tpc_rates_array, ev, + sizeof(struct wmi_tpc_rates_array_fixed_params)); + rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len); + rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC); + if (!rates_array->rate_array) + return -ENOMEM; + + tpc_stats->tlvs_rcvd |= flag; + return 0; +} + +static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab, + struct wmi_tpc_stats_arg *tpc_stats, + struct wmi_tpc_ctl_pwr_fixed_params *ev) +{ + struct wmi_tpc_ctl_pwr_table_arg *ctl_array; + u32 total_size, ctl_array_len, flag = 0; + + ath12k_dbg(ab, ATH12K_DBG_WMI, + "Received ctl array type %d length %d for tpc stats\n", + ev->ctl_array_type, ev->ctl_array_len); + + switch (le32_to_cpu(ev->ctl_array_type)) { + case ATH12K_TPC_STATS_CTL_ARRAY: + ctl_array = &tpc_stats->ctl_array; + flag = WMI_TPC_CTL_PWR_ARRAY; + break; + default: + ath12k_warn(ab, + "Received invalid type of ctl pwr table for tpc stats\n"); + return -EINVAL; + } + + total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) * + le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4); + if (le32_to_cpu(ev->ctl_array_len) != total_size) { + ath12k_warn(ab, + "Total size and ctl_array_len doesn't match for tpc stats\n"); + return -EINVAL; + } + + memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params)); + ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len); + ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC); + if (!ctl_array->ctl_pwr_table) + return -ENOMEM; + + tpc_stats->tlvs_rcvd |= flag; + return 0; +} + +static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tpc_rates_array_fixed_params *tpc_rates_array; + struct wmi_max_reg_power_fixed_params *tpc_reg_pwr; + struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr; + struct wmi_tpc_stats_arg *tpc_stats = data; + struct wmi_tpc_config_params *tpc_config; + int ret = 0; + + if (!tpc_stats) { + ath12k_warn(ab, "tpc stats memory unavailable\n"); + return -EINVAL; + } + + switch (tag) { + case WMI_TAG_TPC_STATS_CONFIG_EVENT: + tpc_config = (struct wmi_tpc_config_params *)ptr; + memcpy(&tpc_stats->tpc_config, tpc_config, + sizeof(struct wmi_tpc_config_params)); + break; + case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED: + tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr; + ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr); + break; + case WMI_TAG_TPC_STATS_RATES_ARRAY: + tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr; + ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array); + break; + case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT: + tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr; + ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr); + break; + default: + ath12k_warn(ab, + "Received invalid tag for tpc stats in subtlvs\n"); + return -EINVAL; + } + return ret; +} + +static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data; + int ret; + + switch (tag) { + case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM: + ret = 0; + /* Fixed param is already processed*/ + break; + case WMI_TAG_ARRAY_STRUCT: + /* len 0 is expected for array of struct when there + * is no content of that type to pack inside that tlv + */ + if (len == 0) + return 0; + ret = ath12k_wmi_tlv_iter(ab, ptr, len, + ath12k_wmi_tpc_stats_subtlv_parser, + tpc_stats); + break; + case WMI_TAG_ARRAY_INT16: + if (len == 0) + return 0; + ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, + WMI_TAG_ARRAY_INT16, + len, tpc_stats); + break; + case WMI_TAG_ARRAY_BYTE: + if (len == 0) + return 0; + ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr, + WMI_TAG_ARRAY_BYTE, + len, tpc_stats); + break; + default: + ath12k_warn(ab, "Received invalid tag for tpc stats\n"); + ret = -EINVAL; + break; + } + return ret; +} + +void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar) +{ + struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats; + + lockdep_assert_held(&ar->data_lock); + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n"); + if (tpc_stats) { + kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array); + kfree(tpc_stats->rates_array1.rate_array); + kfree(tpc_stats->rates_array2.rate_array); + kfree(tpc_stats->ctl_array.ctl_pwr_table); + kfree(tpc_stats); + ar->debug.tpc_stats = NULL; + } +} + +static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param; + struct wmi_tpc_stats_arg *tpc_stats; + const struct wmi_tlv *tlv; + void *ptr = skb->data; + struct ath12k *ar; + u16 tlv_tag; + u32 event_count; + int ret; + + if (!skb->data) { + ath12k_warn(ab, "No data present in tpc stats event\n"); + return; + } + + if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) { + ath12k_warn(ab, "TPC stats event size invalid\n"); + return; + } + + tlv = (struct wmi_tlv *)ptr; + tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG); + ptr += sizeof(*tlv); + + if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) { + ath12k_warn(ab, "TPC stats without fixed param tlv at start\n"); + return; + } + + fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr; + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1); + if (!ar) { + ath12k_warn(ab, "Failed to get ar for tpc stats\n"); + rcu_read_unlock(); + return; + } + spin_lock_bh(&ar->data_lock); + if (!ar->debug.tpc_request) { + /* Event is received either without request or the + * timeout, if memory is already allocated free it + */ + if (ar->debug.tpc_stats) { + ath12k_warn(ab, "Freeing memory for tpc_stats\n"); + ath12k_wmi_free_tpc_stats_mem(ar); + } + goto unlock; + } + + event_count = le32_to_cpu(fixed_param->event_count); + if (event_count == 0) { + if (ar->debug.tpc_stats) { + ath12k_warn(ab, + "Invalid tpc memory present\n"); + goto unlock; + } + ar->debug.tpc_stats = + kzalloc(sizeof(struct wmi_tpc_stats_arg), + GFP_ATOMIC); + if (!ar->debug.tpc_stats) { + ath12k_warn(ab, + "Failed to allocate memory for tpc stats\n"); + goto unlock; + } + } + + tpc_stats = ar->debug.tpc_stats; + if (!tpc_stats) { + ath12k_warn(ab, "tpc stats memory unavailable\n"); + goto unlock; + } + + if (!(event_count == 0)) { + if (event_count != tpc_stats->event_count + 1) { + ath12k_warn(ab, + "Invalid tpc event received\n"); + goto unlock; + } + } + tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id); + tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event); + tpc_stats->event_count = le32_to_cpu(fixed_param->event_count); + ath12k_dbg(ab, ATH12K_DBG_WMI, + "tpc stats event_count %d\n", + tpc_stats->event_count); + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_tpc_stats_event_parser, + tpc_stats); + if (ret) { + ath12k_wmi_free_tpc_stats_mem(ar); + ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret); + goto unlock; + } + + if (tpc_stats->end_of_event) + complete(&ar->debug.tpc_complete); + +unlock: + spin_unlock_bh(&ar->data_lock); + rcu_read_unlock(); +} +#else +static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab, + struct sk_buff *skb) +{ +} +#endif + static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -7571,6 +8637,9 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) case WMI_MLO_TEARDOWN_COMPLETE_EVENTID: ath12k_wmi_event_teardown_complete(ab, skb); break; + case WMI_HALPHY_STATS_CTRL_PATH_EVENTID: + ath12k_wmi_process_tpc_stats(ab, skb); + break; /* add Unsupported events (rare) here */ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: case WMI_PEER_OPER_MODE_CHANGE_EVENTID: @@ -7584,7 +8653,12 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID: /* debug might flood hence silently ignore (no-op) */ break; - /* TODO: Add remaining events */ + case WMI_PDEV_UTF_EVENTID: + if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags)) + ath12k_tm_wmi_event_segmented(ab, id, skb); + else + ath12k_tm_wmi_event_unsegmented(ab, id, skb); + break; default: ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); break; @@ -7721,6 +8795,74 @@ int ath12k_wmi_simulate_radar(struct ath12k *ar) return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args); } +int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar, + enum wmi_halphy_ctrl_path_stats_id tpc_stats_type) +{ + struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd; + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct sk_buff *skb; + struct wmi_tlv *tlv; + __le32 *pdev_id; + u32 buf_len; + void *ptr; + int ret; + + buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE; + + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len); + if (!skb) + return -ENOMEM; + cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM, + sizeof(*cmd)); + + cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT); + cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET); + cmd->subid = cpu_to_le32(tpc_stats_type); + + ptr = skb->data + sizeof(*cmd); + + /* The below TLV arrays optionally follow this fixed param TLV structure + * 1. ARRAY_UINT32 pdev_ids[] + * If this array is present and non-zero length, stats should only + * be provided from the pdevs identified in the array. + * 2. ARRAY_UNIT32 vdev_ids[] + * If this array is present and non-zero length, stats should only + * be provided from the vdevs identified in the array. + * 3. ath12k_wmi_mac_addr_params peer_macaddr[]; + * If this array is present and non-zero length, stats should only + * be provided from the peers with the MAC addresses specified + * in the array + */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); + ptr += TLV_HDR_SIZE; + + pdev_id = ptr; + *pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar)); + ptr += sizeof(*pdev_id); + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); + ptr += TLV_HDR_SIZE; + + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0); + ptr += TLV_HDR_SIZE; + + ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID); + if (ret) { + ath12k_warn(ar->ab, + "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n"); + dev_kfree_skb(skb); + return ret; + } + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n", + ar->pdev->pdev_id); + + return ret; +} + int ath12k_wmi_connect(struct ath12k_base *ab) { u32 i; diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index 45fe699ce8a5d..1ba33e30ddd27 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_WMI_H @@ -25,6 +25,7 @@ struct ath12k_base; struct ath12k; struct ath12k_link_vif; +struct ath12k_fw_stats; /* There is no signed version of __le32, so for a temporary solution come * up with our own version. The idea is from fs/ntfs/endian.h. @@ -516,6 +517,9 @@ enum wmi_tlv_cmd_id { WMI_REQUEST_RCPI_CMDID, WMI_REQUEST_PEER_STATS_INFO_CMDID, WMI_REQUEST_RADIO_CHAN_STATS_CMDID, + WMI_REQUEST_WLM_STATS_CMDID, + WMI_REQUEST_CTRL_PATH_STATS_CMDID, + WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID = WMI_REQUEST_CTRL_PATH_STATS_CMDID + 3, WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL), WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID, WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID, @@ -785,6 +789,9 @@ enum wmi_tlv_event_id { WMI_UPDATE_RCPI_EVENTID, WMI_PEER_STATS_INFO_EVENTID, WMI_RADIO_CHAN_STATS_EVENTID, + WMI_WLM_STATS_EVENTID, + WMI_CTRL_PATH_STATS_EVENTID, + WMI_HALPHY_STATS_CTRL_PATH_EVENTID, WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL), WMI_NLO_SCAN_COMPLETE_EVENTID, WMI_APFIND_EVENTID, @@ -1191,6 +1198,7 @@ enum wmi_tlv_tag { WMI_TAG_ARRAY_BYTE, WMI_TAG_ARRAY_STRUCT, WMI_TAG_ARRAY_FIXED_STRUCT, + WMI_TAG_ARRAY_INT16, WMI_TAG_LAST_ARRAY_ENUM = 31, WMI_TAG_SERVICE_READY_EVENT, WMI_TAG_HAL_REG_CAPABILITIES, @@ -1941,6 +1949,12 @@ enum wmi_tlv_tag { WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F, WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9, WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT, + WMI_TAG_TPC_STATS_GET_CMD = 0x38B, + WMI_TAG_TPC_STATS_EVENT_FIXED_PARAM, + WMI_TAG_TPC_STATS_CONFIG_EVENT, + WMI_TAG_TPC_STATS_REG_PWR_ALLOWED, + WMI_TAG_TPC_STATS_RATES_ARRAY, + WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT, WMI_TAG_EHT_RATE_SET = 0x3C4, WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5, WMI_TAG_MLO_TX_SEND_PARAMS, @@ -1958,6 +1972,8 @@ enum wmi_tlv_tag { WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9, WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB, + WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM = 0x442, + WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM, WMI_TAG_MAX }; @@ -3624,6 +3640,26 @@ struct ath12k_wmi_p2p_noa_info { struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS]; } __packed; +#define MAX_WMI_UTF_LEN 252 + +struct ath12k_wmi_ftm_seg_hdr_params { + __le32 len; + __le32 msgref; + __le32 segmentinfo; + __le32 pdev_id; +} __packed; + +struct ath12k_wmi_ftm_cmd { + __le32 tlv_header; + struct ath12k_wmi_ftm_seg_hdr_params seg_hdr; + u8 data[]; +} __packed; + +struct ath12k_wmi_ftm_event { + struct ath12k_wmi_ftm_seg_hdr_params seg_hdr; + u8 data[]; +} __packed; + #define WMI_BEACON_TX_BUFFER_SIZE 512 #define WMI_EMA_BEACON_CNT GENMASK(7, 0) @@ -4604,6 +4640,7 @@ enum wmi_rate_preamble { WMI_RATE_PREAMBLE_HT, WMI_RATE_PREAMBLE_VHT, WMI_RATE_PREAMBLE_HE, + WMI_RATE_PREAMBLE_EHT, }; /** @@ -5628,6 +5665,245 @@ enum wmi_sta_keepalive_method { #define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30 #define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 +struct wmi_stats_event { + __le32 stats_id; + __le32 num_pdev_stats; + __le32 num_vdev_stats; + __le32 num_peer_stats; + __le32 num_bcnflt_stats; + __le32 num_chan_stats; + __le32 num_mib_stats; + __le32 pdev_id; + __le32 num_bcn_stats; + __le32 num_peer_extd_stats; + __le32 num_peer_extd2_stats; +} __packed; + +enum wmi_stats_id { + WMI_REQUEST_PDEV_STAT = BIT(2), + WMI_REQUEST_VDEV_STAT = BIT(3), + WMI_REQUEST_BCN_STAT = BIT(11), +}; + +struct wmi_request_stats_cmd { + __le32 tlv_header; + __le32 stats_id; + __le32 vdev_id; + struct ath12k_wmi_mac_addr_params peer_macaddr; + __le32 pdev_id; +} __packed; + +#define WLAN_MAX_AC 4 +#define MAX_TX_RATE_VALUES 10 + +struct wmi_vdev_stats_params { + __le32 vdev_id; + __le32 beacon_snr; + __le32 data_snr; + __le32 num_tx_frames[WLAN_MAX_AC]; + __le32 num_rx_frames; + __le32 num_tx_frames_retries[WLAN_MAX_AC]; + __le32 num_tx_frames_failures[WLAN_MAX_AC]; + __le32 num_rts_fail; + __le32 num_rts_success; + __le32 num_rx_err; + __le32 num_rx_discard; + __le32 num_tx_not_acked; + __le32 tx_rate_history[MAX_TX_RATE_VALUES]; + __le32 beacon_rssi_history[MAX_TX_RATE_VALUES]; +} __packed; + +struct ath12k_wmi_bcn_stats_params { + __le32 vdev_id; + __le32 tx_bcn_succ_cnt; + __le32 tx_bcn_outage_cnt; +} __packed; + +struct ath12k_wmi_pdev_base_stats_params { + a_sle32 chan_nf; + __le32 tx_frame_count; /* Cycles spent transmitting frames */ + __le32 rx_frame_count; /* Cycles spent receiving frames */ + __le32 rx_clear_count; /* Total channel busy time, evidently */ + __le32 cycle_count; /* Total on-channel time */ + __le32 phy_err_count; + __le32 chan_tx_pwr; +} __packed; + +struct ath12k_wmi_pdev_tx_stats_params { + a_sle32 comp_queued; + a_sle32 comp_delivered; + a_sle32 msdu_enqued; + a_sle32 mpdu_enqued; + a_sle32 wmm_drop; + a_sle32 local_enqued; + a_sle32 local_freed; + a_sle32 hw_queued; + a_sle32 hw_reaped; + a_sle32 underrun; + a_sle32 tx_abort; + a_sle32 mpdus_requed; + __le32 tx_ko; + __le32 data_rc; + __le32 self_triggers; + __le32 sw_retry_failure; + __le32 illgl_rate_phy_err; + __le32 pdev_cont_xretry; + __le32 pdev_tx_timeout; + __le32 pdev_resets; + __le32 stateless_tid_alloc_failure; + __le32 phy_underrun; + __le32 txop_ovf; +} __packed; + +struct ath12k_wmi_pdev_rx_stats_params { + a_sle32 mid_ppdu_route_change; + a_sle32 status_rcvd; + a_sle32 r0_frags; + a_sle32 r1_frags; + a_sle32 r2_frags; + a_sle32 r3_frags; + a_sle32 htt_msdus; + a_sle32 htt_mpdus; + a_sle32 loc_msdus; + a_sle32 loc_mpdus; + a_sle32 oversize_amsdu; + a_sle32 phy_errs; + a_sle32 phy_err_drop; + a_sle32 mpdu_errs; +} __packed; + +struct ath12k_wmi_pdev_stats_params { + struct ath12k_wmi_pdev_base_stats_params base; + struct ath12k_wmi_pdev_tx_stats_params tx; + struct ath12k_wmi_pdev_rx_stats_params rx; +} __packed; + +struct ath12k_fw_stats_req_params { + u32 stats_id; + u32 vdev_id; + u32 pdev_id; +}; + +#define WMI_REQ_CTRL_PATH_PDEV_TX_STAT 1 +#define WMI_REQUEST_CTRL_PATH_STAT_GET 1 + +#define WMI_TPC_CONFIG BIT(1) +#define WMI_TPC_REG_PWR_ALLOWED BIT(2) +#define WMI_TPC_RATES_ARRAY1 BIT(3) +#define WMI_TPC_RATES_ARRAY2 BIT(4) +#define WMI_TPC_RATES_DL_OFDMA_ARRAY BIT(5) +#define WMI_TPC_CTL_PWR_ARRAY BIT(6) +#define WMI_TPC_CONFIG_PARAM 0x1 +#define ATH12K_TPC_RATE_ARRAY_MU GENMASK(15, 8) +#define ATH12K_TPC_RATE_ARRAY_SU GENMASK(7, 0) +#define TPC_STATS_REG_PWR_ALLOWED_TYPE 0 + +enum wmi_halphy_ctrl_path_stats_id { + WMI_HALPHY_PDEV_TX_SU_STATS = 0, + WMI_HALPHY_PDEV_TX_SUTXBF_STATS, + WMI_HALPHY_PDEV_TX_MU_STATS, + WMI_HALPHY_PDEV_TX_MUTXBF_STATS, + WMI_HALPHY_PDEV_TX_STATS_MAX, +}; + +enum ath12k_wmi_tpc_stats_rates_array { + ATH12K_TPC_STATS_RATES_ARRAY1, + ATH12K_TPC_STATS_RATES_ARRAY2, +}; + +enum ath12k_wmi_tpc_stats_ctl_array { + ATH12K_TPC_STATS_CTL_ARRAY, + ATH12K_TPC_STATS_CTL_160ARRAY, +}; + +enum ath12k_wmi_tpc_stats_events { + ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT, + ATH12K_TPC_STATS_RATES_EVENT1, + ATH12K_TPC_STATS_RATES_EVENT2, + ATH12K_TPC_STATS_CTL_TABLE_EVENT +}; + +struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params { + __le32 tlv_header; + __le32 stats_id_mask; + __le32 request_id; + __le32 action; + __le32 subid; +} __packed; + +struct ath12k_wmi_pdev_tpc_stats_event_fixed_params { + __le32 pdev_id; + __le32 end_of_event; + __le32 event_count; +} __packed; + +struct wmi_tpc_config_params { + __le32 reg_domain; + __le32 chan_freq; + __le32 phy_mode; + __le32 twice_antenna_reduction; + __le32 twice_max_reg_power; + __le32 twice_antenna_gain; + __le32 power_limit; + __le32 rate_max; + __le32 num_tx_chain; + __le32 ctl; + __le32 flags; + __le32 caps; +} __packed; + +struct wmi_max_reg_power_fixed_params { + __le32 reg_power_type; + __le32 reg_array_len; + __le32 d1; + __le32 d2; + __le32 d3; + __le32 d4; +} __packed; + +struct wmi_max_reg_power_allowed_arg { + struct wmi_max_reg_power_fixed_params tpc_reg_pwr; + s16 *reg_pwr_array; +}; + +struct wmi_tpc_rates_array_fixed_params { + __le32 rate_array_type; + __le32 rate_array_len; +} __packed; + +struct wmi_tpc_rates_array_arg { + struct wmi_tpc_rates_array_fixed_params tpc_rates_array; + s16 *rate_array; +}; + +struct wmi_tpc_ctl_pwr_fixed_params { + __le32 ctl_array_type; + __le32 ctl_array_len; + __le32 end_of_ctl_pwr; + __le32 ctl_pwr_count; + __le32 d1; + __le32 d2; + __le32 d3; + __le32 d4; +} __packed; + +struct wmi_tpc_ctl_pwr_table_arg { + struct wmi_tpc_ctl_pwr_fixed_params tpc_ctl_pwr; + s8 *ctl_pwr_table; +}; + +struct wmi_tpc_stats_arg { + u32 pdev_id; + u32 event_count; + u32 end_of_event; + u32 tlvs_rcvd; + struct wmi_max_reg_power_allowed_arg max_reg_allowed_power; + struct wmi_tpc_rates_array_arg rates_array1; + struct wmi_tpc_rates_array_arg rates_array2; + struct wmi_tpc_config_params tpc_config; + struct wmi_tpc_ctl_pwr_table_arg ctl_array; +}; + void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, struct ath12k_wmi_resource_config_arg *config); void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -5639,7 +5915,7 @@ int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id, struct sk_buff *frame); int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, const u8 *p2p_ie); -int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, +int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif, struct ieee80211_mutable_offsets *offs, struct sk_buff *bcn, struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args); @@ -5753,6 +6029,13 @@ int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id, const u8 *buf, size_t buf_len); int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table); int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table); +int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id, + u32 vdev_id, u32 pdev_id); +__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len); + +int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar, + enum wmi_halphy_ctrl_path_stats_id tpc_stats_type); +void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar); static inline u32 ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param) @@ -5806,5 +6089,8 @@ int ath12k_wmi_sta_keepalive(struct ath12k *ar, int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params); int ath12k_wmi_mlo_ready(struct ath12k *ar); int ath12k_wmi_mlo_teardown(struct ath12k *ar); +void ath12k_wmi_fw_stats_dump(struct ath12k *ar, + struct ath12k_fw_stats *fw_stats, u32 stats_id, + char *buf); #endif diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c index 9e1c0bfd212f5..dce9bd0bcaefb 100644 --- a/drivers/net/wireless/ath/ath12k/wow.c +++ b/drivers/net/wireless/ath/ath12k/wow.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -990,6 +990,7 @@ int ath12k_wow_op_resume(struct ieee80211_hw *hw) case ATH12K_HW_STATE_RESTARTING: case ATH12K_HW_STATE_RESTARTED: case ATH12K_HW_STATE_WEDGED: + case ATH12K_HW_STATE_TM: ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n", ah->state); ret = -EIO; diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index a728cc0387df8..6e38aa7351e33 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -274,7 +274,6 @@ struct ath_node { struct ath_tx_control { struct ath_txq *txq; - struct ath_node *an; struct ieee80211_sta *sta; u8 paprd; }; @@ -1018,7 +1017,7 @@ struct ath_softc { u8 gtt_cnt; u32 intrstatus; - u32 rx_active_check_time; + unsigned long rx_active_check_time; u32 rx_active_count; u16 ps_flags; /* PS_* */ bool ps_enabled; diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index 628eeec4b82fe..300d178830adf 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -628,12 +628,12 @@ int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_h else RX_STAT_INC(sc, rx_spectral_sample_err); - memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN); - /* Mix the received bins to the /dev/random * pool */ add_device_randomness(sample_buf, num_bins); + + memset(sample_buf, 0, SPECTRAL_SAMPLE_MAX_LEN); } /* Process a normal frame */ diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index f9e77c4624d99..01e0dffbf57ed 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -647,7 +647,9 @@ static int ath9k_of_init(struct ath_softc *sc) ah->ah_flags |= AH_NO_EEP_SWAP; } - of_get_mac_address(np, common->macaddr); + ret = of_get_mac_address(np, common->macaddr); + if (ret == -EPROBE_DEFER) + return ret; return 0; } diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index db07ce6dbc080..0ac9212e42f75 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2291,19 +2291,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = txctl->sta; struct ieee80211_vif *vif = info->control.vif; - struct ath_vif *avp; struct ath_softc *sc = hw->priv; int frmlen = skb->len + FCS_LEN; int padpos, padsize; - /* NOTE: sta can be NULL according to net/mac80211.h */ - if (sta) - txctl->an = (struct ath_node *)sta->drv_priv; - else if (vif && ieee80211_is_data(hdr->frame_control)) { - avp = (void *)vif->drv_priv; - txctl->an = &avp->mcast_node; - } - if (info->control.hw_key) frmlen += info->control.hw_key->icv_len; diff --git a/drivers/net/wireless/ath/ath11k/testmode_i.h b/drivers/net/wireless/ath/testmode_i.h similarity index 50% rename from drivers/net/wireless/ath/ath11k/testmode_i.h rename to drivers/net/wireless/ath/testmode_i.h index 91b83873d6608..980ef2f3f05f8 100644 --- a/drivers/net/wireless/ath/ath11k/testmode_i.h +++ b/drivers/net/wireless/ath/testmode_i.h @@ -1,59 +1,59 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved. */ -/* "API" level of the ath11k testmode interface. Bump it after every +/* "API" level of the ath testmode interface. Bump it after every * incompatible interface change. */ -#define ATH11K_TESTMODE_VERSION_MAJOR 1 +#define ATH_TESTMODE_VERSION_MAJOR 1 /* Bump this after every _compatible_ interface change, for example * addition of a new command or an attribute. */ -#define ATH11K_TESTMODE_VERSION_MINOR 1 +#define ATH_TESTMODE_VERSION_MINOR 1 -#define ATH11K_TM_DATA_MAX_LEN 5000 -#define ATH11K_FTM_EVENT_MAX_BUF_LENGTH 2048 +#define ATH_TM_DATA_MAX_LEN 5000 +#define ATH_FTM_EVENT_MAX_BUF_LENGTH 2048 -enum ath11k_tm_attr { - __ATH11K_TM_ATTR_INVALID = 0, - ATH11K_TM_ATTR_CMD = 1, - ATH11K_TM_ATTR_DATA = 2, - ATH11K_TM_ATTR_WMI_CMDID = 3, - ATH11K_TM_ATTR_VERSION_MAJOR = 4, - ATH11K_TM_ATTR_VERSION_MINOR = 5, - ATH11K_TM_ATTR_WMI_OP_VERSION = 6, +enum ath_tm_attr { + __ATH_TM_ATTR_INVALID = 0, + ATH_TM_ATTR_CMD = 1, + ATH_TM_ATTR_DATA = 2, + ATH_TM_ATTR_WMI_CMDID = 3, + ATH_TM_ATTR_VERSION_MAJOR = 4, + ATH_TM_ATTR_VERSION_MINOR = 5, + ATH_TM_ATTR_WMI_OP_VERSION = 6, /* keep last */ - __ATH11K_TM_ATTR_AFTER_LAST, - ATH11K_TM_ATTR_MAX = __ATH11K_TM_ATTR_AFTER_LAST - 1, + __ATH_TM_ATTR_AFTER_LAST, + ATH_TM_ATTR_MAX = __ATH_TM_ATTR_AFTER_LAST - 1, }; -/* All ath11k testmode interface commands specified in - * ATH11K_TM_ATTR_CMD +/* All ath testmode interface commands specified in + * ATH_TM_ATTR_CMD */ -enum ath11k_tm_cmd { - /* Returns the supported ath11k testmode interface version in - * ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space +enum ath_tm_cmd { + /* Returns the supported ath testmode interface version in + * ATH_TM_ATTR_VERSION. Always guaranteed to work. User space * uses this to verify it's using the correct version of the * testmode interface */ - ATH11K_TM_CMD_GET_VERSION = 0, + ATH_TM_CMD_GET_VERSION = 0, /* The command used to transmit a WMI command to the firmware and * the event to receive WMI events from the firmware. Without * struct wmi_cmd_hdr header, only the WMI payload. Command id is - * provided with ATH11K_TM_ATTR_WMI_CMDID and payload in - * ATH11K_TM_ATTR_DATA. + * provided with ATH_TM_ATTR_WMI_CMDID and payload in + * ATH_TM_ATTR_DATA. */ - ATH11K_TM_CMD_WMI = 1, + ATH_TM_CMD_WMI = 1, /* Boots the UTF firmware, the netdev interface must be down at the * time. */ - ATH11K_TM_CMD_TESTMODE_START = 2, + ATH_TM_CMD_TESTMODE_START = 2, /* The command used to transmit a FTM WMI command to the firmware * and the event to receive WMI events from the firmware. The data @@ -62,5 +62,5 @@ enum ath11k_tm_cmd { * The data payload size could be large and the driver needs to * send segmented data to firmware. */ - ATH11K_TM_CMD_WMI_FTM = 3, + ATH_TM_CMD_WMI_FTM = 3, }; diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 25b4ef9d3c9a1..7529afd24aed1 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -2166,7 +2166,7 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error) { const char text[] = "You must go to " \ - "https://wireless.wiki.kernel.org/en/users/Drivers/b43#devicefirmware " \ + "https://wireless.docs.kernel.org/en/latest/en/users/drivers/b43/developers.html#list-of-firmware " \ "and download the correct firmware for this driver version. " \ "Please carefully read all instructions on this website.\n"; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 60eb95fc19a5a..6bc107476a2a3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1172,6 +1172,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev) struct brcmf_bus *bus_if; struct brcmf_sdio_dev *sdiodev; mmc_pm_flag_t sdio_flags; + bool cap_power_off; int ret = 0; func = container_of(dev, struct sdio_func, dev); @@ -1179,19 +1180,23 @@ static int brcmf_ops_sdio_suspend(struct device *dev) if (func->num != 1) return 0; + cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD); bus_if = dev_get_drvdata(dev); sdiodev = bus_if->bus_priv.sdio; - if (sdiodev->wowl_enabled) { + if (sdiodev->wowl_enabled || !cap_power_off) { brcmf_sdiod_freezer_on(sdiodev); brcmf_sdio_wd_timer(sdiodev->bus, 0); sdio_flags = MMC_PM_KEEP_POWER; - if (sdiodev->settings->bus.sdio.oob_irq_supported) - enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); - else - sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; + + if (sdiodev->wowl_enabled) { + if (sdiodev->settings->bus.sdio.oob_irq_supported) + enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); + else + sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; + } if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags)) brcmf_err("Failed to set pm_flags %x\n", sdio_flags); @@ -1213,18 +1218,19 @@ static int brcmf_ops_sdio_resume(struct device *dev) struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct sdio_func *func = container_of(dev, struct sdio_func, dev); int ret = 0; + bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD); brcmf_dbg(SDIO, "Enter: F%d\n", func->num); if (func->num != 2) return 0; - if (!sdiodev->wowl_enabled) { + if (!sdiodev->wowl_enabled && cap_power_off) { /* bus was powered off and device removed, probe again */ ret = brcmf_sdiod_probe(sdiodev); if (ret) brcmf_err("Failed to probe device on resume\n"); } else { - if (sdiodev->settings->bus.sdio.oob_irq_supported) + if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported) disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); brcmf_sdiod_freezer_off(sdiodev); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index be1d971b3d32d..24a5624ef2071 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -3295,7 +3295,7 @@ static int ipw_init_nic(struct ipw_priv *priv) rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_CLOCK_READY, 250); if (rc < 0) - IPW_DEBUG_INFO("FAILED wait for clock stablization\n"); + IPW_DEBUG_INFO("FAILED wait for clock stabilization\n"); /* assert SW reset */ ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET); diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h index 3c20353e5a412..e031e8692ca64 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw.h +++ b/drivers/net/wireless/intel/ipw2x00/libipw.h @@ -1011,8 +1011,6 @@ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev); void libipw_txb_free(struct libipw_txb *); /* libipw_rx.c */ -void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb, - struct libipw_rx_stats *stats); int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb, struct libipw_rx_stats *rx_stats); /* make sure to set stats->len */ diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c index dc4e91f58bb4a..b7bc94f7abd8a 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c @@ -823,96 +823,6 @@ int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb, return 0; } -/* Filter out unrelated packets, call libipw_rx[_mgt] - * This function takes over the skb, it should not be used again after calling - * this function. */ -void libipw_rx_any(struct libipw_device *ieee, - struct sk_buff *skb, struct libipw_rx_stats *stats) -{ - struct libipw_hdr_4addr *hdr; - int is_packet_for_us; - u16 fc; - - if (ieee->iw_mode == IW_MODE_MONITOR) { - if (!libipw_rx(ieee, skb, stats)) - dev_kfree_skb_irq(skb); - return; - } - - if (skb->len < sizeof(struct ieee80211_hdr)) - goto drop_free; - - hdr = (struct libipw_hdr_4addr *)skb->data; - fc = le16_to_cpu(hdr->frame_ctl); - - if ((fc & IEEE80211_FCTL_VERS) != 0) - goto drop_free; - - switch (fc & IEEE80211_FCTL_FTYPE) { - case IEEE80211_FTYPE_MGMT: - if (skb->len < sizeof(struct libipw_hdr_3addr)) - goto drop_free; - libipw_rx_mgt(ieee, hdr, stats); - dev_kfree_skb_irq(skb); - return; - case IEEE80211_FTYPE_DATA: - break; - case IEEE80211_FTYPE_CTL: - return; - default: - return; - } - - is_packet_for_us = 0; - switch (ieee->iw_mode) { - case IW_MODE_ADHOC: - /* our BSS and not from/to DS */ - if (ether_addr_equal(hdr->addr3, ieee->bssid) && - ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == 0)) { - /* promisc: get all */ - if (ieee->dev->flags & IFF_PROMISC) - is_packet_for_us = 1; - /* to us */ - else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr)) - is_packet_for_us = 1; - /* mcast */ - else if (is_multicast_ether_addr(hdr->addr1)) - is_packet_for_us = 1; - } - break; - case IW_MODE_INFRA: - /* our BSS (== from our AP) and from DS */ - if (ether_addr_equal(hdr->addr2, ieee->bssid) && - ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS)) { - /* promisc: get all */ - if (ieee->dev->flags & IFF_PROMISC) - is_packet_for_us = 1; - /* to us */ - else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr)) - is_packet_for_us = 1; - /* mcast */ - else if (is_multicast_ether_addr(hdr->addr1)) { - /* not our own packet bcasted from AP */ - if (!ether_addr_equal(hdr->addr3, ieee->dev->dev_addr)) - is_packet_for_us = 1; - } - } - break; - default: - /* ? */ - break; - } - - if (is_packet_for_us) - if (!libipw_rx(ieee, skb, stats)) - dev_kfree_skb_irq(skb); - return; - -drop_free: - dev_kfree_skb_irq(skb); - ieee->dev->stats.rx_dropped++; -} - #define MGMT_FRAME_FIXED_PART_LENGTH 0x24 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; @@ -1729,6 +1639,5 @@ void libipw_rx_mgt(struct libipw_device *ieee, } } -EXPORT_SYMBOL_GPL(libipw_rx_any); EXPORT_SYMBOL(libipw_rx_mgt); EXPORT_SYMBOL(libipw_rx); diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index 718efb1aa1b0c..0e5130d1fccd3 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c @@ -132,15 +132,8 @@ static void il4965_rs_fill_link_cmd(struct il_priv *il, static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search); -#ifdef CONFIG_MAC80211_DEBUGFS static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 *rate_n_flags, int idx); -#else -static void -il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx) -{ -} -#endif /* * The following tables contain the expected throughput metrics for all rates @@ -2495,8 +2488,6 @@ il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta) D_RATE("leave\n"); } -#ifdef CONFIG_MAC80211_DEBUGFS - static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx) { @@ -2504,6 +2495,9 @@ il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx) u8 valid_tx_ant; u8 ant_sel_tx; + if (!IS_ENABLED(CONFIG_MAC80211_DEBUGFS)) + return; + il = lq_sta->drv; valid_tx_ant = il->hw_params.valid_tx_ant; if (lq_sta->dbg_fixed_rate) { @@ -2758,7 +2752,6 @@ il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir) debugfs_create_u8("tx_agg_tid_enable", 0600, dir, &lq_sta->tx_agg_tid_en); } -#endif /* * Initialization of rate scaling information is done by driver after @@ -2781,9 +2774,8 @@ static const struct rate_control_ops rs_4965_ops = { .free = il4965_rs_free, .alloc_sta = il4965_rs_alloc_sta, .free_sta = il4965_rs_free_sta, -#ifdef CONFIG_MAC80211_DEBUGFS - .add_sta_debugfs = il4965_rs_add_debugfs, -#endif + .add_sta_debugfs = PTR_IF(IS_ENABLED(CONFIG_MAC80211_DEBUGFS), + il4965_rs_add_debugfs), }; int diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h index 92285412ab109..52610f5e57a39 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.h +++ b/drivers/net/wireless/intel/iwlegacy/common.h @@ -2815,9 +2815,7 @@ struct il_lq_sta { struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ struct il_traffic_load load[TID_MAX_LOAD_COUNT]; u8 tx_agg_tid_en; -#ifdef CONFIG_MAC80211_DEBUGFS u32 dbg_fixed_rate; -#endif struct il_priv *drv; /* used to be in sta_info */ diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 4b04865fc2c9e..82f577da1a8b5 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -81,14 +81,25 @@ config IWLMVM of the devices that use this firmware is available here: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware +config IWLMLD + tristate "Intel Wireless WiFi MLD Firmware support" + select WANT_DEV_COREDUMP + depends on MAC80211 + depends on PTP_1588_CLOCK_OPTIONAL + help + This is the driver that supports firmwares of MLD capable devices. + The list of the devices that use this firmware is available here: + https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware + # don't call it _MODULE -- will confuse Kconfig/fixdep/... config IWLWIFI_OPMODE_MODULAR bool default y if IWLDVM=m default y if IWLMVM=m + default y if IWLMLD=m -comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM" - depends on IWLDVM=n && IWLMVM=n +comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM or IWLMLD" + depends on IWLDVM=n && IWLMVM=n && IWLMLD=n menu "Debugging Options" diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 19c4ce6f2465f..9546ceeaf5e37 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -12,7 +12,8 @@ iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o -iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o cfg/dr.o +iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o +iwlwifi-$(CONFIG_IWLMLD) += cfg/bz.o cfg/sc.o cfg/dr.o iwlwifi-objs += iwl-dbg-tlv.o iwlwifi-objs += iwl-trans.o @@ -20,6 +21,7 @@ iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o iwlwifi-objs += fw/regulatory.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o +iwlwifi-$(CONFIG_IWLMLD) += fw/smem.o fw/init.o iwlwifi-$(CONFIG_ACPI) += fw/acpi.o iwlwifi-$(CONFIG_EFI) += fw/uefi.o iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o @@ -33,6 +35,7 @@ ccflags-y += -I$(src) obj-$(CONFIG_IWLDVM) += dvm/ obj-$(CONFIG_IWLMVM) += mvm/ obj-$(CONFIG_IWLMEI) += mei/ +obj-$(CONFIG_IWLMLD) += mld/ obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/ diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 2e2fcb3807efb..130b9a8aa7ebe 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -205,7 +205,6 @@ const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; -const char iwl_ax204_name[] = "Intel(R) Wi-Fi 6 AX204 160MHz"; const char iwl_ax200_killer_1650w_name[] = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c index dcba1a5d793b7..e87b57b9e2c04 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c @@ -180,7 +180,6 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { }; const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; -const char iwl_ax221_name[] = "Intel(R) Wi-Fi 6E AX221 160MHz"; const char iwl_ax231_name[] = "Intel(R) Wi-Fi 6E AX231 160MHz"; const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6E AX411 160MHz"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index efa3e0e35f799..f055255a7c937 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include #include @@ -10,10 +10,10 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_BZ_UCODE_API_MAX 96 +#define IWL_BZ_UCODE_API_MAX 98 /* Lowest firmware API version supported */ -#define IWL_BZ_UCODE_API_MIN 92 +#define IWL_BZ_UCODE_API_MIN 93 /* NVM versions */ #define IWL_BZ_NVM_VERSION 0x0a1d @@ -38,6 +38,11 @@ #define IWL_BZ_A_HR_B_MODULE_FIRMWARE(api) \ IWL_BZ_A_HR_B_FW_PRE "-" __stringify(api) ".ucode" +#if !IS_ENABLED(CONFIG_IWLMVM) +const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6E AX211 160MHz"; +const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; +#endif + static const struct iwl_base_params iwl_bz_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 512, @@ -50,6 +55,13 @@ static const struct iwl_base_params iwl_bz_base_params = { .pcie_l1_allowed = true, }; +const struct iwl_ht_params iwl_bz_ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ) | + BIT(NL80211_BAND_6GHZ), +}; + #define IWL_DEVICE_BZ_COMMON \ .ucode_api_max = IWL_BZ_UCODE_API_MAX, \ .ucode_api_min = IWL_BZ_UCODE_API_MIN, \ @@ -113,7 +125,7 @@ static const struct iwl_base_params iwl_bz_base_params = { #define IWL_DEVICE_BZ \ IWL_DEVICE_BZ_COMMON, \ - .ht_params = &iwl_22000_ht_params + .ht_params = &iwl_bz_ht_params /* * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an @@ -145,7 +157,6 @@ const struct iwl_cfg_trans_params iwl_gl_trans_cfg = { .low_latency_xtal = true, }; -const char iwl_bz_name[] = "Intel(R) TBD Bz device"; const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz"; const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz"; const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c index ab7c0f8d54f42..282b9b846c3ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/dr.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/dr.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2024 Intel Corporation + * Copyright (C) 2024-2025 Intel Corporation */ #include #include @@ -9,7 +9,7 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_DR_UCODE_API_MAX 96 +#define IWL_DR_UCODE_API_MAX 98 /* Lowest firmware API version supported */ #define IWL_DR_UCODE_API_MIN 96 @@ -114,7 +114,7 @@ static const struct iwl_base_params iwl_dr_base_params = { .uhb_supported = true, \ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .num_rbds = IWL_NUM_RBDS_DR_EHT, \ - .ht_params = &iwl_22000_ht_params + .ht_params = &iwl_bz_ht_params /* * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an @@ -148,11 +148,9 @@ const struct iwl_cfg_trans_params iwl_br_trans_cfg = { .mq_rx_supported = true, .rf_id = true, .gen2 = true, - .integrated = true, .umac_prph_offset = 0x300000, .xtal_latency = 12000, .low_latency_xtal = true, - .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; const char iwl_br_name[] = "Intel(R) TBD Br device"; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index c9eeb3f6704e2..670031fd60dc9 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include #include @@ -10,10 +10,10 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_SC_UCODE_API_MAX 96 +#define IWL_SC_UCODE_API_MAX 98 /* Lowest firmware API version supported */ -#define IWL_SC_UCODE_API_MIN 92 +#define IWL_SC_UCODE_API_MIN 93 /* NVM versions */ #define IWL_SC_NVM_VERSION 0x0a1d @@ -121,7 +121,7 @@ static const struct iwl_base_params iwl_sc_base_params = { .uhb_supported = true, \ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ .num_rbds = IWL_NUM_RBDS_SC_EHT, \ - .ht_params = &iwl_22000_ht_params + .ht_params = &iwl_bz_ht_params /* * This size was picked according to 8 MSDUs inside 512 A-MSDUs in an @@ -142,22 +142,18 @@ const struct iwl_cfg_trans_params iwl_sc_trans_cfg = { .ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US, }; -const char iwl_sc_name[] = "Intel(R) TBD Sc device"; +const char iwl_sp_name[] = "Intel(R) Wi-Fi 7 BE213 160MHz"; const struct iwl_cfg iwl_cfg_sc = { .fw_name_mac = "sc", IWL_DEVICE_SC, }; -const char iwl_sc2_name[] = "Intel(R) TBD Sc2 device"; - const struct iwl_cfg iwl_cfg_sc2 = { .fw_name_mac = "sc2", IWL_DEVICE_SC, }; -const char iwl_sc2f_name[] = "Intel(R) TBD Sc2f device"; - const struct iwl_cfg iwl_cfg_sc2f = { .fw_name_mac = "sc2f", IWL_DEVICE_SC, diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index 3f49c0bccb286..96ea6c8dfc890 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -1180,85 +1180,87 @@ struct iwl_dram_scratch { } __packed; struct iwl_tx_cmd { - /* - * MPDU byte count: - * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, - * + 8 byte IV for CCM or TKIP (not used for WEP) - * + Data payload - * + 8-byte MIC (not used for CCM/WEP) - * NOTE: Does not include Tx command bytes, post-MAC pad bytes, - * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i - * Range: 14-2342 bytes. - */ - __le16 len; - - /* - * MPDU or MSDU byte count for next frame. - * Used for fragmentation and bursting, but not 11n aggregation. - * Same as "len", but for next frame. Set to 0 if not applicable. - */ - __le16 next_frame_len; - - __le32 tx_flags; /* TX_CMD_FLG_* */ - - /* uCode may modify this field of the Tx command (in host DRAM!). - * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ - struct iwl_dram_scratch scratch; - - /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ - __le32 rate_n_flags; /* RATE_MCS_* */ - - /* Index of destination station in uCode's station table */ - u8 sta_id; - - /* Type of security encryption: CCM or TKIP */ - u8 sec_ctl; /* TX_CMD_SEC_* */ - - /* - * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial - * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for - * data frames, this field may be used to selectively reduce initial - * rate (via non-0 value) for special frames (e.g. management), while - * still supporting rate scaling for all frames. - */ - u8 initial_rate_index; - u8 reserved; - u8 key[16]; - __le16 next_frame_flags; - __le16 reserved2; - union { - __le32 life_time; - __le32 attempt; - } stop_time; - - /* Host DRAM physical address pointer to "scratch" in this command. - * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */ - __le32 dram_lsb_ptr; - u8 dram_msb_ptr; - - u8 rts_retry_limit; /*byte 50 */ - u8 data_retry_limit; /*byte 51 */ - u8 tid_tspec; - union { - __le16 pm_frame_timeout; - __le16 attempt_duration; - } timeout; - - /* - * Duration of EDCA burst Tx Opportunity, in 32-usec units. - * Set this if txop time is not specified by HCCA protocol (e.g. by AP). - */ - __le16 driver_txop; - + /* New members MUST be added within the __struct_group() macro below. */ + __struct_group(iwl_tx_cmd_hdr, __hdr, __packed, + /* + * MPDU byte count: + * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size, + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * NOTE: Does not include Tx command bytes, post-MAC pad bytes, + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i + * Range: 14-2342 bytes. + */ + __le16 len; + + /* + * MPDU or MSDU byte count for next frame. + * Used for fragmentation and bursting, but not 11n aggregation. + * Same as "len", but for next frame. Set to 0 if not applicable. + */ + __le16 next_frame_len; + + __le32 tx_flags; /* TX_CMD_FLG_* */ + + /* uCode may modify this field of the Tx command (in host DRAM!). + * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */ + struct iwl_dram_scratch scratch; + + /* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */ + __le32 rate_n_flags; /* RATE_MCS_* */ + + /* Index of destination station in uCode's station table */ + u8 sta_id; + + /* Type of security encryption: CCM or TKIP */ + u8 sec_ctl; /* TX_CMD_SEC_* */ + + /* + * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial + * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for + * data frames, this field may be used to selectively reduce initial + * rate (via non-0 value) for special frames (e.g. management), while + * still supporting rate scaling for all frames. + */ + u8 initial_rate_index; + u8 reserved; + u8 key[16]; + __le16 next_frame_flags; + __le16 reserved2; + union { + __le32 life_time; + __le32 attempt; + } stop_time; + + /* Host DRAM physical address pointer to "scratch" in this command. + * Must be dword aligned. "0" in dram_lsb_ptr disables usage. */ + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + + u8 rts_retry_limit; /*byte 50 */ + u8 data_retry_limit; /*byte 51 */ + u8 tid_tspec; + union { + __le16 pm_frame_timeout; + __le16 attempt_duration; + } timeout; + + /* + * Duration of EDCA burst Tx Opportunity, in 32-usec units. + * Set this if txop time is not specified by HCCA protocol (e.g. by AP). + */ + __le16 driver_txop; + + ); /* * MAC header goes here, followed by 2 bytes padding if MAC header * length is 26 or 30 bytes, followed by payload data */ - union { - DECLARE_FLEX_ARRAY(u8, payload); - DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr); - }; + struct ieee80211_hdr hdr[]; } __packed; +static_assert(offsetof(struct iwl_tx_cmd, hdr) == sizeof(struct iwl_tx_cmd_hdr), + "struct member likely outside of __struct_group()"); /* * TX command response is sent after *agn* transmission attempts. @@ -2312,7 +2314,7 @@ struct iwl_scan_cmd { /* For active scans (set to all-0s for passive scans). * Does not include payload. Must specify Tx rate; no rate scaling. */ - struct iwl_tx_cmd tx_cmd; + struct iwl_tx_cmd_hdr tx_cmd; /* For directed active scans (set to all-0s otherwise) */ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; @@ -2423,7 +2425,7 @@ struct iwlagn_beacon_notif { */ struct iwl_tx_beacon_cmd { - struct iwl_tx_cmd tx; + struct iwl_tx_cmd_hdr tx; __le16 tim_idx; u8 tim_size; u8 reserved1; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c index 43e8d04d5a8b2..e1d78550e4439 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c @@ -124,17 +124,6 @@ enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv) return restriction->tx_stream; } -enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv) -{ - struct iwl_tt_mgmt *tt = &priv->thermal_throttle; - struct iwl_tt_restriction *restriction; - - if (!priv->thermal_throttle.advanced_tt) - return IWL_ANT_OK_MULTI; - restriction = tt->restriction + tt->state; - return restriction->rx_stream; -} - #define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */ #define CT_KILL_WAITING_DURATION (300) /* 300ms duration */ diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h index 4af792d41dce6..198f934a0d166 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h @@ -100,7 +100,6 @@ u8 iwl_tt_current_power_mode(struct iwl_priv *priv); bool iwl_tt_is_low_power_state(struct iwl_priv *priv); bool iwl_ht_enabled(struct iwl_priv *priv); enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv); -enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv); void iwl_tt_enter_ct_kill(struct iwl_priv *priv); void iwl_tt_exit_ct_kill(struct iwl_priv *priv); void iwl_tt_handler(struct iwl_priv *priv); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index ebe85fdf08d31..42360a8f23aaf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020-2021, 2024 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2021, 2024-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -117,7 +117,7 @@ struct iwl_alive_ntf_v6 { * finishing init flow * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands - * @IWL_INIT_PHY: driver is going to send PHY_DB commands + * @IWL_INIT_PHY: driver is going to send the PHY_CONFIGURATION_CMD */ enum iwl_extended_cfg_flags { IWL_INIT_DEBUG_CFG, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 34a1f97653c06..d43adb6f92200 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -502,10 +502,15 @@ enum iwl_legacy_cmds { /** * @DTS_MEASUREMENT_NOTIFICATION: * &struct iwl_dts_measurement_notif_v1 or - * &struct iwl_dts_measurement_notif_v2 + * &struct iwl_dts_measurement_notif */ DTS_MEASUREMENT_NOTIFICATION = 0xdd, + /** + * @DEBUG_HOST_COMMAND: &struct iwl_dhc_cmd + */ + DEBUG_HOST_COMMAND = 0xf1, + /** * @LDBG_CONFIG_CMD: configure continuous trace recording */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h index a9fa5f054ce08..464eed9b5e71d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2022 Intel Corporation + * Copyright (C) 2012-2014, 2022, 2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -14,6 +14,9 @@ * @FW_CTXT_COLOR_POS: position of the color * @FW_CTXT_COLOR_MSK: mask of the color * @FW_CTXT_INVALID: value used to indicate unused/invalid + * @FW_CTXT_ID_INVALID: value used to indicate unused/invalid. This can be + * used with newer firmware which no longer use the color. Typically, + * firmware versions supported by iwlmld can use this value. */ enum iwl_ctxt_id_and_color { FW_CTXT_ID_POS = 0, @@ -21,6 +24,7 @@ enum iwl_ctxt_id_and_color { FW_CTXT_COLOR_POS = 8, FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS, FW_CTXT_INVALID = 0xffffffff, + FW_CTXT_ID_INVALID = 0xff, }; #define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index c2362bc786b28..9c271ea67155c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -1006,7 +1006,7 @@ struct iwl_wowlan_wake_pkt_notif { * struct iwl_mvm_d3_end_notif - d3 end notification * @flags: See &enum iwl_d0i3_flags */ -struct iwl_mvm_d3_end_notif { +struct iwl_d3_end_notif { __le32 flags; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index 570a3f7225103..c139b965980d5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2024 Intel Corporation + * Copyright (C) 2024-2025 Intel Corporation * Copyright (C) 2012-2014, 2018-2022 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH @@ -90,9 +90,15 @@ enum iwl_data_path_subcmd_ids { */ SEC_KEY_CMD = 0x18, + /** + * @OMI_SEND_STATUS_NOTIF: notification after OMI was sent + * uses &struct iwl_omi_send_status_notif + */ + OMI_SEND_STATUS_NOTIF = 0xF2, + /** * @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode, - * uses &struct iwl_mvm_esr_mode_notif + * uses &struct iwl_esr_mode_notif */ ESR_MODE_NOTIF = 0xF3, @@ -688,4 +694,13 @@ struct iwl_sec_key_cmd { } __packed u; /* SEC_KEY_OPERATION_API_U_VER_1 */ } __packed; /* SEC_KEY_CMD_API_S_VER_1 */ +/** + * struct iwl_omi_send_status_notif - OMI status notification + * @success: indicates that the OMI was sent successfully + * (currently always set) + */ +struct iwl_omi_send_status_notif { + __le32 success; +} __packed; /* OMI_SEND_STATUS_NTFY_API_S_VER_1 */ + #endif /* __iwl_fw_api_datapath_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 550de6db18340..4fab6c66994e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #ifndef __iwl_fw_dbg_tlv_h__ #define __iwl_fw_dbg_tlv_h__ @@ -527,6 +527,8 @@ enum iwl_fw_ini_time_point { * @IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA: override trigger data. * Append otherwise * @IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD: send cmd once dump collected + * @IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE: perform reset handshake and + * split dump to before/after with region marking */ enum iwl_fw_ini_trigger_apply_policy { IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT = BIT(0), @@ -535,6 +537,7 @@ enum iwl_fw_ini_trigger_apply_policy { IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG = BIT(9), IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA = BIT(10), IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD = BIT(16), + IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE = BIT(17), }; /** @@ -556,12 +559,14 @@ enum iwl_fw_ini_trigger_reset_fw_policy { * @IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT: OS has no limit of dump size * @IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB: mini dump only 600KB region dump * @IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB: mini dump 5MB size dump + * @IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET: dump this region before reset + * handshake (if requested by %IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE) */ enum iwl_fw_ini_dump_policy { IWL_FW_INI_DEBUG_DUMP_POLICY_NO_LIMIT = BIT(0), IWL_FW_INI_DEBUG_DUMP_POLICY_MAX_LIMIT_600KB = BIT(1), IWL_FW_IWL_DEBUG_DUMP_POLICY_MAX_LIMIT_5MB = BIT(2), - + IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET = BIT(3), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index aa88e91d117ee..0cf1e5124fba7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -51,7 +51,7 @@ enum iwl_debug_cmds { /** * @GET_TAS_STATUS: * sends command to fw to get TAS status - * the response is &struct iwl_mvm_tas_status_resp + * the response is &struct iwl_tas_status_resp */ GET_TAS_STATUS = 0xA, /** @@ -439,25 +439,20 @@ struct iwl_dbg_dump_complete_cmd { __le32 tp_data; } __packed; /* FW_DUMP_COMPLETE_CMD_API_S_VER_1 */ -#define TAS_LMAC_BAND_HB 0 -#define TAS_LMAC_BAND_LB 1 -#define TAS_LMAC_BAND_UHB 2 -#define TAS_LMAC_BAND_INVALID 3 - /** - * struct iwl_mvm_tas_status_per_mac - tas status per lmac + * struct iwl_tas_status_per_mac - tas status per lmac * @static_status: tas statically enabled or disabled per lmac - TRUE/FALSE * @static_dis_reason: TAS static disable reason, uses - * &enum iwl_mvm_tas_statically_disabled_reason + * &enum iwl_tas_statically_disabled_reason * @dynamic_status: Current TAS status. uses - * &enum iwl_mvm_tas_dyna_status + * &enum iwl_tas_dyna_status * @near_disconnection: is TAS currently near disconnection per lmac? - TRUE/FALSE * @max_reg_pwr_limit: Regulatory power limits in dBm * @sar_limit: SAR limits per lmac in dBm * @band: Band per lmac * @reserved: reserved */ -struct iwl_mvm_tas_status_per_mac { +struct iwl_tas_status_per_mac { u8 static_status; u8 static_dis_reason; u8 dynamic_status; @@ -466,35 +461,35 @@ struct iwl_mvm_tas_status_per_mac { __le16 sar_limit; u8 band; u8 reserved[3]; -} __packed; /*DEBUG_GET_TAS_STATUS_PER_MAC_S_VER_1*/ +} __packed; /* DEBUG_GET_TAS_STATUS_PER_MAC_S_VER_1 */ /** - * struct iwl_mvm_tas_status_resp - Response to GET_TAS_STATUS + * struct iwl_tas_status_resp - Response to GET_TAS_STATUS * @tas_fw_version: TAS FW version * @is_uhb_for_usa_enable: is UHB enabled in USA? - TRUE/FALSE * @curr_mcc: current mcc * @block_list: country block list * @tas_status_mac: TAS status per lmac, uses - * &struct iwl_mvm_tas_status_per_mac + * &struct iwl_tas_status_per_mac * @in_dual_radio: is TAS in dual radio? - TRUE/FALSE * @uhb_allowed_flags: see &enum iwl_tas_uhb_allowed_flags. * This member is valid only when fw has * %IWL_UCODE_TLV_CAPA_UHB_CANADA_TAS_SUPPORT capability. * @reserved: reserved */ -struct iwl_mvm_tas_status_resp { +struct iwl_tas_status_resp { u8 tas_fw_version; u8 is_uhb_for_usa_enable; __le16 curr_mcc; __le16 block_list[16]; - struct iwl_mvm_tas_status_per_mac tas_status_mac[2]; + struct iwl_tas_status_per_mac tas_status_mac[2]; u8 in_dual_radio; u8 uhb_allowed_flags; u8 reserved[2]; -} __packed; /*DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3*/ +} __packed; /* DEBUG_GET_TAS_STATUS_RSP_API_S_VER_3 */ /** - * enum iwl_mvm_tas_dyna_status - TAS current running status + * enum iwl_tas_dyna_status - TAS current running status * @TAS_DYNA_INACTIVE: TAS status is inactive * @TAS_DYNA_INACTIVE_MVM_MODE: TAS is disabled due because FW is in MVM mode * or is in softap mode. @@ -507,7 +502,7 @@ struct iwl_mvm_tas_status_resp { * @TAS_DYNA_ACTIVE: TAS is currently active * @TAS_DYNA_STATUS_MAX: TAS status max value */ -enum iwl_mvm_tas_dyna_status { +enum iwl_tas_dyna_status { TAS_DYNA_INACTIVE, TAS_DYNA_INACTIVE_MVM_MODE, TAS_DYNA_INACTIVE_TRIGGER_MODE, @@ -516,19 +511,22 @@ enum iwl_mvm_tas_dyna_status { TAS_DYNA_ACTIVE, TAS_DYNA_STATUS_MAX, -}; /*_TAS_DYNA_STATUS_E*/ +}; /** - * enum iwl_mvm_tas_statically_disabled_reason - TAS statically disabled reason + * enum iwl_tas_statically_disabled_reason - TAS statically disabled reason * @TAS_DISABLED_DUE_TO_BIOS: TAS is disabled because TAS is disabled in BIOS * @TAS_DISABLED_DUE_TO_SAR_6DBM: TAS is disabled because SAR limit is less than 6 Dbm * @TAS_DISABLED_REASON_INVALID: TAS disable reason is invalid + * @TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID: TAS is disabled due to + * table source invalid * @TAS_DISABLED_REASON_MAX: TAS disable reason max value */ -enum iwl_mvm_tas_statically_disabled_reason { +enum iwl_tas_statically_disabled_reason { TAS_DISABLED_DUE_TO_BIOS, TAS_DISABLED_DUE_TO_SAR_6DBM, TAS_DISABLED_REASON_INVALID, + TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID, TAS_DISABLED_REASON_MAX, }; /*_TAS_STATICALLY_DISABLED_REASON_E*/ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h new file mode 100644 index 0000000000000..b6d79c678cd89 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h @@ -0,0 +1,226 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2025 Intel Corporation + */ +#ifndef __iwl_fw_api_dhc_h__ +#define __iwl_fw_api_dhc_h__ + +#define DHC_TABLE_MASK_POS (28) + +/** + * enum iwl_dhc_table_id - DHC table operations index + */ +enum iwl_dhc_table_id { + /** + * @DHC_TABLE_INTEGRATION: select the integration table + */ + DHC_TABLE_INTEGRATION = 2 << DHC_TABLE_MASK_POS, + /** + * @DHC_TABLE_TOOLS: select the tools table + */ + DHC_TABLE_TOOLS = 0, +}; + +/** + * enum iwl_dhc_umac_tools_table - tools operations + * @DHC_TOOLS_UMAC_GET_TAS_STATUS: Get TAS status. + * See @struct iwl_dhc_tas_status_resp + */ +enum iwl_dhc_umac_tools_table { + DHC_TOOLS_UMAC_GET_TAS_STATUS = 0, +}; + +/** + * enum iwl_dhc_umac_integration_table - integration operations + */ +enum iwl_dhc_umac_integration_table { + /** + * @DHC_INT_UMAC_TWT_OPERATION: trigger a TWT operation + */ + DHC_INT_UMAC_TWT_OPERATION = 4, + /** + * @DHC_INTEGRATION_TLC_DEBUG_CONFIG: TLC debug + */ + DHC_INTEGRATION_TLC_DEBUG_CONFIG = 1, + /** + * @DHC_INTEGRATION_MAX: Maximum UMAC integration table entries + */ + DHC_INTEGRATION_MAX +}; + +#define DHC_TARGET_UMAC BIT(27) + +/** + * struct iwl_dhc_cmd - debug host command + * @length: length in DWs of the data structure that is concatenated to the end + * of this struct + * @index_and_mask: bit 31 is 1 for data set operation else it's 0 + * bits 28-30 is the index of the table of the operation - + * &enum iwl_dhc_table_id * + * bit 27 is 0 if the cmd targeted to LMAC and 1 if targeted to UMAC, + * (LMAC is 0 for backward compatibility) + * bit 26 is 0 if the cmd targeted to LMAC0 and 1 if targeted to LMAC1, + * relevant only if bit 27 set to 0 + * bits 0-25 is a specific entry index in the table specified in bits 28-30 + * + * @data: the concatenated data. + */ +struct iwl_dhc_cmd { + __le32 length; + __le32 index_and_mask; + __le32 data[]; +} __packed; /* DHC_CMD_API_S */ + +/** + * struct iwl_dhc_payload_hdr - DHC payload header + * @version: a version of a payload + * @reserved: reserved for alignment + */ +struct iwl_dhc_payload_hdr { + u8 version; + u8 reserved[3]; +} __packed; /* DHC_PAYLOAD_HDR_API_S_VER_1 */ + +/** + * struct iwl_dhc_tas_status_per_radio - TAS status per radio + * @band: &PHY_BAND_5 for high band, PHY_BAND_24 for low band and + * &PHY_BAND_6 for ultra high band. + * @static_status: TAS statically enabled or disabled + * @static_disable_reason: TAS static disable reason, uses + * &enum iwl_tas_statically_disabled_reason + * @near_disconnection: is TAS currently near disconnection per radio + * @dynamic_status_ant_a: Antenna A current TAS status. + * uses &enum iwl_tas_dyna_status + * @dynamic_status_ant_b: Antenna B current TAS status. + * uses &enum iwl_tas_dyna_status + * @max_reg_pwr_limit_ant_a: Antenna A regulatory power limits in dBm + * @max_reg_pwr_limit_ant_b: Antenna B regulatory power limits in dBm + * @sar_limit_ant_a: Antenna A SAR limit per radio in dBm + * @sar_limit_ant_b: Antenna B SAR limit per radio in dBm + * @reserved: reserved for alignment + */ +struct iwl_dhc_tas_status_per_radio { + u8 band; + u8 static_status; + u8 static_disable_reason; + u8 near_disconnection; + u8 dynamic_status_ant_a; + u8 dynamic_status_ant_b; + __le16 max_reg_pwr_limit_ant_a; + __le16 max_reg_pwr_limit_ant_b; + __le16 sar_limit_ant_a; + __le16 sar_limit_ant_b; + u8 reserved[2]; +} __packed; /* DHC_TAS_STATUS_PER_RADIO_S_VER_1 */ + +/** + * struct iwl_dhc_tas_status_resp - Response to DHC_TOOLS_UMAC_GET_TAS_STATUS + * @header: DHC payload header, uses &struct iwl_dhc_payload_hdr + * @tas_config_info: see @struct bios_value_u32 + * @mcc_block_list: block listed country codes + * @tas_status_radio: TAS status, uses &struct iwl_dhc_tas_status_per_radio + * @curr_mcc: current mcc + * @valid_radio_mask: represent entry in tas_status_per_radio is valid. + * @reserved: reserved for alignment + */ +struct iwl_dhc_tas_status_resp { + struct iwl_dhc_payload_hdr header; + struct bios_value_u32 tas_config_info; + __le16 mcc_block_list[IWL_WTAS_BLACK_LIST_MAX]; + struct iwl_dhc_tas_status_per_radio tas_status_radio[2]; + __le16 curr_mcc; + u8 valid_radio_mask; + u8 reserved; +} __packed; /* DHC_TAS_STATUS_RSP_API_S_VER_1 */ + +/** + * struct iwl_dhc_cmd_resp_v1 - debug host command response + * @status: status of the command + * @data: the response data + */ +struct iwl_dhc_cmd_resp_v1 { + __le32 status; + __le32 data[]; +} __packed; /* DHC_RESP_API_S_VER_1 */ + +/** + * struct iwl_dhc_cmd_resp - debug host command response + * @status: status of the command + * @descriptor: command descriptor (index_and_mask) returned + * @data: the response data + */ +struct iwl_dhc_cmd_resp { + __le32 status; + __le32 descriptor; + __le32 data[]; +} __packed; /* DHC_RESP_API_S_VER_2 and DHC_RESP_API_S_VER_3 */ + +/** + * enum iwl_dhc_twt_operation_type - describes the TWT operation type + * + * @DHC_TWT_REQUEST: Send a Request TWT command + * @DHC_TWT_SUGGEST: Send a Suggest TWT command + * @DHC_TWT_DEMAND: Send a Demand TWT command + * @DHC_TWT_GROUPING: Send a Grouping TWT command + * @DHC_TWT_ACCEPT: Send a Accept TWT command + * @DHC_TWT_ALTERNATE: Send a Alternate TWT command + * @DHC_TWT_DICTATE: Send a Dictate TWT command + * @DHC_TWT_REJECT: Send a Reject TWT command + * @DHC_TWT_TEARDOWN: Send a TearDown TWT command + */ +enum iwl_dhc_twt_operation_type { + DHC_TWT_REQUEST, + DHC_TWT_SUGGEST, + DHC_TWT_DEMAND, + DHC_TWT_GROUPING, + DHC_TWT_ACCEPT, + DHC_TWT_ALTERNATE, + DHC_TWT_DICTATE, + DHC_TWT_REJECT, + DHC_TWT_TEARDOWN, +}; /* DHC_TWT_OPERATION_TYPE_E */ + +/** + * struct iwl_dhc_twt_operation - trigger a TWT operation + * + * @mac_id: the mac Id on which to trigger TWT operation + * @twt_operation: see &enum iwl_dhc_twt_operation_type + * @target_wake_time: when should we be on channel + * @interval_exp: the exponent for the interval + * @interval_mantissa: the mantissa for the interval + * @min_wake_duration: the minimum duration for the wake period + * @trigger: is the TWT triggered or not + * @flow_type: is the TWT announced or not + * @flow_id: the TWT flow identifier from 0 to 7 + * @protection: is the TWT protected + * @ndo_paging_indicator: is ndo_paging_indicator set + * @responder_pm_mode: is responder_pm_mode set + * @negotiation_type: if the responder wants to doze outside the TWT SP + * @twt_request: 1 for TWT request, 0 otherwise + * @implicit: is TWT implicit + * @twt_group_assignment: the TWT group assignment + * @twt_channel: the TWT channel + * @reserved: reserved + */ +struct iwl_dhc_twt_operation { + __le32 mac_id; + __le32 twt_operation; + __le64 target_wake_time; + __le32 interval_exp; + __le32 interval_mantissa; + __le32 min_wake_duration; + u8 trigger; + u8 flow_type; + u8 flow_id; + u8 protection; + u8 ndo_paging_indicator; + u8 responder_pm_mode; + u8 negotiation_type; + u8 twt_request; + u8 implicit; + u8 twt_group_assignment; + u8 twt_channel; + u8 reserved; +}; /* DHC_TWT_OPERATION_API_S */ + +#endif /* __iwl_fw_api_dhc_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index b8dff139aa05f..e1952fc6d1490 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2022 Intel Corporation - * Copyright (C) 2024 Intel Corporation + * Copyright (C) 2024-2025 Intel Corporation */ #ifndef __iwl_fw_api_location_h__ #define __iwl_fw_api_location_h__ @@ -1015,7 +1015,7 @@ struct iwl_tof_range_req_ap_entry_v9 { } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */ /** - * struct iwl_tof_range_req_ap_entry_v10 - AP configuration parameters + * struct iwl_tof_range_req_ap_entry - AP configuration parameters * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. * @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz * @channel_num: AP Channel number @@ -1063,7 +1063,7 @@ struct iwl_tof_range_req_ap_entry_v9 { * @min_time_between_msr: For non trigger based NDP ranging, the minimum time * between measurements in units of milliseconds */ -struct iwl_tof_range_req_ap_entry_v10 { +struct iwl_tof_range_req_ap_entry { __le32 initiator_ap_flags; u8 band; u8 channel_num; @@ -1134,7 +1134,7 @@ enum iwl_tof_initiator_flags { IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT = BIT(20), }; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ -#define IWL_MVM_TOF_MAX_APS 5 +#define IWL_TOF_MAX_APS 5 #define IWL_MVM_TOF_MAX_TWO_SIDED_APS 5 /** @@ -1153,7 +1153,7 @@ enum iwl_tof_initiator_flags { * when the session is done (successfully / partially). * one of iwl_tof_response_mode. * @reserved0: reserved - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS) * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), * '1' Use MAC Address randomization according to the below * @range_req_bssid: ranging request BSSID @@ -1183,7 +1183,7 @@ struct iwl_tof_range_req_cmd_v5 { u8 ftm_tx_chains; __le16 common_calib; __le16 specific_calib; - struct iwl_tof_range_req_ap_entry_v2 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_req_ap_entry_v2 ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_5 */ @@ -1192,7 +1192,7 @@ struct iwl_tof_range_req_cmd_v5 { * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC @@ -1216,7 +1216,7 @@ struct iwl_tof_range_req_cmd_v7 { __le32 tsf_mac_id; __le16 common_calib; __le16 specific_calib; - struct iwl_tof_range_req_ap_entry_v3 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_req_ap_entry_v3 ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_7 */ /** @@ -1224,7 +1224,7 @@ struct iwl_tof_range_req_cmd_v7 { * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC @@ -1248,7 +1248,7 @@ struct iwl_tof_range_req_cmd_v8 { __le32 tsf_mac_id; __le16 common_calib; __le16 specific_calib; - struct iwl_tof_range_req_ap_entry_v4 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_req_ap_entry_v4 ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */ /** @@ -1256,7 +1256,7 @@ struct iwl_tof_range_req_cmd_v8 { * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC @@ -1276,7 +1276,7 @@ struct iwl_tof_range_req_cmd_v9 { u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; - struct iwl_tof_range_req_ap_entry_v6 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_req_ap_entry_v6 ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */ /** @@ -1284,7 +1284,7 @@ struct iwl_tof_range_req_cmd_v9 { * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC @@ -1304,7 +1304,7 @@ struct iwl_tof_range_req_cmd_v11 { u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; - struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_req_ap_entry_v7 ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */ /** @@ -1312,7 +1312,7 @@ struct iwl_tof_range_req_cmd_v11 { * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC @@ -1332,7 +1332,7 @@ struct iwl_tof_range_req_cmd_v12 { u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; - struct iwl_tof_range_req_ap_entry_v8 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_req_ap_entry_v8 ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_12 */ /** @@ -1340,7 +1340,7 @@ struct iwl_tof_range_req_cmd_v12 { * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC @@ -1360,15 +1360,15 @@ struct iwl_tof_range_req_cmd_v13 { u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; - struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_req_ap_entry_v9 ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */ /** - * struct iwl_tof_range_req_cmd_v14 - start measurement cmd + * struct iwl_tof_range_req_cmd - start measurement cmd * @initiator_flags: see flags @ iwl_tof_initiator_flags * @request_id: A Token incremented per request. The same Token will be * sent back in the range response - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @num_of_ap: Number of APs to measure (error if > IWL_TOF_MAX_APS) * @range_req_bssid: ranging request BSSID * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. * Bits set to 1 shall be randomized by the UMAC @@ -1377,9 +1377,9 @@ struct iwl_tof_range_req_cmd_v13 { * This is the session time for completing the measurement. * @tsf_mac_id: report the measurement start time for each ap in terms of the * TSF of this mac id. 0xff to disable TSF reporting. - * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v10. + * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry. */ -struct iwl_tof_range_req_cmd_v14 { +struct iwl_tof_range_req_cmd { __le32 initiator_flags; u8 request_id; u8 num_of_ap; @@ -1388,8 +1388,8 @@ struct iwl_tof_range_req_cmd_v14 { u8 macaddr_template[ETH_ALEN]; __le32 req_timeout_ms; __le32 tsf_mac_id; - struct iwl_tof_range_req_ap_entry_v10 ap[IWL_MVM_TOF_MAX_APS]; -} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */ + struct iwl_tof_range_req_ap_entry ap[IWL_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_15 */ /* * enum iwl_tof_range_request_status - status of the sent request @@ -1609,7 +1609,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 { } __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */ /** - * struct iwl_tof_range_rsp_ap_entry_ntfy_v6 - AP parameters (response) + * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) * @bssid: BSSID of the AP * @measure_status: current APs measurement status, one of * &enum iwl_tof_entry_status. @@ -1645,7 +1645,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 { * @tx_pn: the last PN used for this responder Tx in case PMF is configured in * LE byte order. */ -struct iwl_tof_range_rsp_ap_entry_ntfy_v6 { +struct iwl_tof_range_rsp_ap_entry_ntfy { u8 bssid[ETH_ALEN]; u8 measure_status; u8 measure_bw; @@ -1695,7 +1695,7 @@ enum iwl_tof_response_status { * @request_status: status of current measurement session, one of * &enum iwl_tof_response_status. * @last_in_batch: reprot policy (when not all responses are uploaded at once) - * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @num_of_aps: Number of APs to measure (error if > IWL_TOF_MAX_APS) * @ap: per-AP data */ struct iwl_tof_range_rsp_ntfy_v5 { @@ -1703,7 +1703,7 @@ struct iwl_tof_range_rsp_ntfy_v5 { u8 request_status; u8 last_in_batch; u8 num_of_aps; - struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_rsp_ap_entry_ntfy_v3 ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */ /** @@ -1719,7 +1719,7 @@ struct iwl_tof_range_rsp_ntfy_v6 { u8 num_of_aps; u8 last_report; u8 reserved; - struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */ /** @@ -1735,23 +1735,23 @@ struct iwl_tof_range_rsp_ntfy_v7 { u8 num_of_aps; u8 last_report; u8 reserved; - struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */ /** - * struct iwl_tof_range_rsp_ntfy_v8 - ranging response notification + * struct iwl_tof_range_rsp_ntfy - ranging response notification * @request_id: A Token ID of the corresponding Range request * @num_of_aps: Number of APs results * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise. * @reserved: reserved * @ap: per-AP data */ -struct iwl_tof_range_rsp_ntfy_v8 { +struct iwl_tof_range_rsp_ntfy { u8 request_id; u8 num_of_aps; u8 last_report; u8 reserved; - struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS]; + struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8, LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index 37bb7002c1c93..b511e3aa6bb2f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -46,7 +46,7 @@ enum iwl_mac_conf_subcmd_ids { */ STA_CONFIG_CMD = 0xA, /** - * @AUX_STA_CMD: &struct iwl_mvm_aux_sta_cmd + * @AUX_STA_CMD: &struct iwl_aux_sta_cmd */ AUX_STA_CMD = 0xB, /** @@ -61,6 +61,10 @@ enum iwl_mac_conf_subcmd_ids { * @ROC_CMD: &struct iwl_roc_req */ ROC_CMD = 0xE, + /** + * @TWT_OPERATION_CMD: &struct iwl_twt_operation_cmd + */ + TWT_OPERATION_CMD = 0x10, /** * @MISSED_BEACONS_NOTIF: &struct iwl_missed_beacons_notif */ @@ -641,7 +645,7 @@ struct iwl_sta_cfg_cmd { } __packed; /* STA_CMD_API_S_VER_1 */ /** - * struct iwl_mvm_aux_sta_cmd - command for AUX STA configuration + * struct iwl_aux_sta_cmd - command for AUX STA configuration * ( AUX_STA_CMD = 0xB ) * * @sta_id: index of aux sta to configure @@ -649,7 +653,7 @@ struct iwl_sta_cfg_cmd { * @mac_addr: mac addr of the auxilary sta * @reserved_for_mac_addr: reserved */ -struct iwl_mvm_aux_sta_cmd { +struct iwl_aux_sta_cmd { __le32 sta_id; __le32 lmac_id; u8 mac_addr[ETH_ALEN]; @@ -693,11 +697,11 @@ enum iwl_mvm_fw_esr_recommendation { }; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */ /** - * struct iwl_mvm_esr_mode_notif - FWs recommendation/force for esr mode + * struct iwl_esr_mode_notif - FWs recommendation/force for esr mode * * @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation */ -struct iwl_mvm_esr_mode_notif { +struct iwl_esr_mode_notif { __le32 action; } __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */ @@ -748,4 +752,83 @@ struct iwl_esr_trans_fail_notif { __le32 err_code; } __packed; /* ESR_TRANSITION_FAILED_NTFY_API_S_VER_1 */ +/* + * enum iwl_twt_operation_type: TWT operation in a TWT action frame + * + * @TWT_OPERATION_REQUEST: TWT Request + * @TWT_OPERATION_SUGGEST: TWT Suggest + * @TWT_OPERATION_DEMAND: TWT Demand + * @TWT_OPERATION_GROUPING: TWT Grouping + * @TWT_OPERATION_ACCEPT: TWT Accept + * @TWT_OPERATION_ALTERNATE: TWT Alternate + * @TWT_OPERATION_DICTATE: TWT Dictate + * @TWT_OPERATION_REJECT: TWT Reject + * @TWT_OPERATION_TEARDOWN: TWT Teardown + * @TWT_OPERATION_UNAVAILABILITY: TWT Unavailability + */ +enum iwl_twt_operation_type { + TWT_OPERATION_REQUEST, + TWT_OPERATION_SUGGEST, + TWT_OPERATION_DEMAND, + TWT_OPERATION_GROUPING, + TWT_OPERATION_ACCEPT, + TWT_OPERATION_ALTERNATE, + TWT_OPERATION_DICTATE, + TWT_OPERATION_REJECT, + TWT_OPERATION_TEARDOWN, + TWT_OPERATION_UNAVAILABILITY, + TWT_OPERATION_MAX, +}; /* TWT_OPERATION_TYPE_E_VER_1 */ + +/** + * struct iwl_twt_operation_cmd - initiate a TWT session from driver + * + * @link_id: FW link id to initiate the TWT + * @twt_operation: &enum iwl_twt_operation_type + * @target_wake_time: TSF time to start the TWT + * @interval_exponent: the exponent for the interval + * @interval_mantissa: the mantissa for the interval + * @minimum_wake_duration: the minimum duration for the wake period + * @trigger: is the TWT triggered or not + * @flow_type: is the TWT announced (0) or not (1) + * @flow_id: the TWT flow identifier 0 - 7 + * @twt_protection: is the TWT protected + * @ndp_paging_indicator: is ndp paging indicator set + * @responder_pm_mode: is responder pm mode set + * @negotiation_type: if the responder wants to doze outside the TWT SP + * @twt_request: 1 for TWT request (STA), 0 for TWT response (AP) + * @implicit: is TWT implicit + * @twt_group_assignment: the TWT group assignment + * @twt_channel: the TWT channel + * @restricted_info_present: is this a restricted TWT + * @dl_bitmap_valid: is DL (download) bitmap valid (restricted TWT) + * @ul_bitmap_valid: is UL (upload) bitmap valid (restricted TWT) + * @dl_tid_bitmap: DL TID bitmap (restricted TWT) + * @ul_tid_bitmap: UL TID bitmap (restricted TWT) + */ +struct iwl_twt_operation_cmd { + __le32 link_id; + __le32 twt_operation; + __le64 target_wake_time; + __le32 interval_exponent; + __le32 interval_mantissa; + __le32 minimum_wake_duration; + u8 trigger; + u8 flow_type; + u8 flow_id; + u8 twt_protection; + u8 ndp_paging_indicator; + u8 responder_pm_mode; + u8 negotiation_type; + u8 twt_request; + u8 implicit; + u8 twt_group_assignment; + u8 twt_channel; + u8 restricted_info_present; + u8 dl_bitmap_valid; + u8 ul_bitmap_valid; + u8 dl_tid_bitmap; + u8 ul_tid_bitmap; +} __packed; /* TWT_OPERATION_API_S_VER_1 */ + #endif /* __iwl_fw_api_mac_cfg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h index c73d4d5978572..f63b25b03b7e4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -1,11 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation + * Copyright (C) 2012-2014, 2019-2022, 2024-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_phy_h__ #define __iwl_fw_api_phy_h__ +#include +#include /** * enum iwl_phy_ops_subcmd_ids - PHY group commands @@ -19,7 +21,7 @@ enum iwl_phy_ops_subcmd_ids { CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, /** - * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd + * @CTDP_CONFIG_CMD: &struct iwl_ctdp_cmd */ CTDP_CONFIG_CMD = 0x03, @@ -55,7 +57,7 @@ enum iwl_phy_ops_subcmd_ids { /** * @DTS_MEASUREMENT_NOTIF_WIDE: * &struct iwl_dts_measurement_notif_v1 or - * &struct iwl_dts_measurement_notif_v2 + * &struct iwl_dts_measurement_notif */ DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; @@ -152,13 +154,13 @@ struct iwl_dts_measurement_notif_v1 { } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ /** - * struct iwl_dts_measurement_notif_v2 - measurements notification + * struct iwl_dts_measurement_notif - measurements notification * * @temp: the measured temperature * @voltage: the measured voltage * @threshold_idx: the trip index that was crossed */ -struct iwl_dts_measurement_notif_v2 { +struct iwl_dts_measurement_notif { __le32 temp; __le32 voltage; __le32 threshold_idx; @@ -195,25 +197,25 @@ struct ct_kill_notif { } __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */ /** -* enum iwl_mvm_ctdp_cmd_operation - CTDP command operations +* enum iwl_ctdp_cmd_operation - CTDP command operations * @CTDP_CMD_OPERATION_START: update the current budget * @CTDP_CMD_OPERATION_STOP: stop ctdp * @CTDP_CMD_OPERATION_REPORT: get the average budget */ -enum iwl_mvm_ctdp_cmd_operation { +enum iwl_ctdp_cmd_operation { CTDP_CMD_OPERATION_START = 0x1, CTDP_CMD_OPERATION_STOP = 0x2, CTDP_CMD_OPERATION_REPORT = 0x4, };/* CTDP_CMD_OPERATION_TYPE_E */ /** - * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget + * struct iwl_ctdp_cmd - track and manage the FW power consumption budget * - * @operation: see &enum iwl_mvm_ctdp_cmd_operation + * @operation: see &enum iwl_ctdp_cmd_operation * @budget: the budget in milliwatt * @window_size: defined in API but not used */ -struct iwl_mvm_ctdp_cmd { +struct iwl_ctdp_cmd { __le32 operation; __le32 budget; __le32 window_size; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index 1a60f0cdf9728..c2f806cbab59d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -226,6 +226,58 @@ struct iwl_tlc_update_notif { __le32 amsdu_enabled; } __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */ +/** + * enum iwl_tlc_debug_types - debug options + */ +enum iwl_tlc_debug_types { + /** + * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling + */ + IWL_TLC_DEBUG_FIXED_RATE, + /** + * @IWL_TLC_DEBUG_AGG_DURATION_LIM: time limit for a BA + * session, in usec + */ + IWL_TLC_DEBUG_AGG_DURATION_LIM, + /** + * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames + * in an aggregation + */ + IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM, + /** + * @IWL_TLC_DEBUG_TPC_ENABLED: enable or disable tpc + */ + IWL_TLC_DEBUG_TPC_ENABLED, + /** + * @IWL_TLC_DEBUG_TPC_STATS: get number of frames Tx'ed in each + * tpc step + */ + IWL_TLC_DEBUG_TPC_STATS, + /** + * @IWL_TLC_DEBUG_RTS_DISABLE: disable RTS (bool true/false). + */ + IWL_TLC_DEBUG_RTS_DISABLE, + /** + * @IWL_TLC_DEBUG_PARTIAL_FIXED_RATE: set partial fixed rate to fw + */ + IWL_TLC_DEBUG_PARTIAL_FIXED_RATE, +}; /* TLC_MNG_DEBUG_TYPES_API_E */ + +#define MAX_DATA_IN_DHC_TLC_CMD 10 + +/** + * struct iwl_dhc_tlc_cmd - fixed debug config + * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id + * @reserved1: reserved + * @type: type id of %enum iwl_tlc_debug_types + * @data: data to send + */ +struct iwl_dhc_tlc_cmd { + u8 sta_id; + u8 reserved1[3]; + __le32 type; + __le32 data[MAX_DATA_IN_DHC_TLC_CMD]; +} __packed; /* TLC_MNG_DEBUG_CMD_S */ #define IWL_MAX_MCS_DISPLAY_SIZE 12 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h index d7f8a276b6833..ecbcd5084cd8d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -191,6 +191,7 @@ enum iwl_sta_sleep_flag { #define STA_KEY_IDX_INVALID (0xff) #define STA_KEY_MAX_DATA_KEY_NUM (4) #define IWL_MAX_GLOBAL_KEYS (4) +#define IWL_MAX_NUM_IGTKS 2 #define STA_KEY_LEN_WEP40 (5) #define STA_KEY_LEN_WEP104 (13) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 18d030334a6a6..f586379d66ddd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020, 2022-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2020, 2022-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -351,7 +351,7 @@ enum iwl_roc_activity { }; /* ROC_ACTIVITY_API_E_VER_1 */ /* - * ROC command + * ROC command v5 * * Command requests the firmware to remain on a channel for a certain duration. * @@ -366,7 +366,7 @@ enum iwl_roc_activity { * @max_delay: max delay the ROC can start in TU * @duration: remain on channel duration in TU */ -struct iwl_roc_req { +struct iwl_roc_req_v5 { __le32 action; __le32 activity; __le32 sta_id; @@ -375,7 +375,41 @@ struct iwl_roc_req { __le16 reserved; __le32 max_delay; __le32 duration; -} __packed; /* ROC_CMD_API_S_VER_3 */ +} __packed; /* ROC_CMD_API_S_VER_5 */ + +/* + * ROC command + * + * Command requests the firmware to remain on a channel for a certain duration. + * + * ( MAC_CONF_GROUP 0x3, ROC_CMD 0xE ) + * + * @action: action to perform, see &enum iwl_ctxt_action + * @activity: type of activity, see &enum iwl_roc_activity + * @sta_id: station id, resumed during "Remain On Channel" activity. + * @channel_info: &struct iwl_fw_channel_info + * @node_addr: node MAC address for Rx filtering + * @reserved1: align to a dword + * @max_delay: max delay the ROC can start in TU + * @duration: remain on channel duration in TU + * @interval: interval between repetitions (when repetitions > 1). + * @repetitions: number of repetitions + * 0xFF: infinite repetitions. 0 or 1: single repetition. + * @reserved2: align to a dword + */ +struct iwl_roc_req { + __le32 action; + __le32 activity; + __le32 sta_id; + struct iwl_fw_channel_info channel_info; + u8 node_addr[ETH_ALEN]; + __le16 reserved1; + __le32 max_delay; + __le32 duration; + __le32 interval; + u8 repetitions; + u8 reserved2[3]; +} __packed; /* ROC_CMD_API_S_VER_6 */ /* * ROC notification diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index fb2ea38e89aca..03f639fbf9b61 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -558,41 +558,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, } /* - * alloc_sgtable - allocates scallerlist table in the given size, - * fills it with pages and returns it + * alloc_sgtable - allocates (chained) scatterlist in the given size, + * fills it with pages and returns it * @size: the size (in bytes) of the table -*/ -static struct scatterlist *alloc_sgtable(int size) + */ +static struct scatterlist *alloc_sgtable(ssize_t size) { - int alloc_size, nents, i; - struct page *new_page; - struct scatterlist *iter; - struct scatterlist *table; + struct scatterlist *result = NULL, *prev; + int nents, i, n_prev; nents = DIV_ROUND_UP(size, PAGE_SIZE); - table = kcalloc(nents, sizeof(*table), GFP_KERNEL); - if (!table) - return NULL; - sg_init_table(table, nents); - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = alloc_page(GFP_KERNEL); - if (!new_page) { - /* release all previous allocated pages in the table */ - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = sg_page(iter); - if (new_page) - __free_page(new_page); - } - kfree(table); + +#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result)) + /* + * We need an additional entry for table chaining, + * this ensures the loop can finish i.e. we can + * fit at least two entries per page (obviously, + * many more really fit.) + */ + BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2); + + while (nents > 0) { + struct scatterlist *new, *iter; + int n_fill, n_alloc; + + if (nents <= N_ENTRIES_PER_PAGE) { + /* last needed table */ + n_fill = nents; + n_alloc = nents; + nents = 0; + } else { + /* fill a page with entries */ + n_alloc = N_ENTRIES_PER_PAGE; + /* reserve one for chaining */ + n_fill = n_alloc - 1; + nents -= n_fill; + } + + new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL); + if (!new) { + if (result) + _devcd_free_sgtable(result); return NULL; } - alloc_size = min_t(int, size, PAGE_SIZE); - size -= PAGE_SIZE; - sg_set_page(iter, new_page, alloc_size, 0); + sg_init_table(new, n_alloc); + + if (!result) + result = new; + else + sg_chain(prev, n_prev, new); + prev = new; + n_prev = n_alloc; + + for_each_sg(new, iter, n_fill, i) { + struct page *new_page = alloc_page(GFP_KERNEL); + + if (!new_page) { + _devcd_free_sgtable(result); + return NULL; + } + + sg_set_page(iter, new_page, PAGE_SIZE, 0); + } } - return table; + + return result; } static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, @@ -2588,29 +2618,28 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = { }, }; -static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, - struct iwl_fwrt_dump_data *dump_data, - struct list_head *list) +enum iwl_dump_ini_region_selector { + IWL_INI_DUMP_ALL_REGIONS, + IWL_INI_DUMP_EARLY_REGIONS, + IWL_INI_DUMP_LATE_REGIONS, +}; + +static u32 +iwl_dump_ini_dump_regions(struct iwl_fw_runtime *fwrt, + struct iwl_fwrt_dump_data *dump_data, + struct list_head *list, + enum iwl_fw_ini_time_point tp_id, + u64 regions_mask, + struct iwl_dump_ini_region_data *imr_reg_data, + enum iwl_dump_ini_region_selector which) { - struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig; - enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point); - struct iwl_dump_ini_region_data reg_data = { - .dump_data = dump_data, - }; - struct iwl_dump_ini_region_data imr_reg_data = { - .dump_data = dump_data, - }; - int i; u32 size = 0; - u64 regions_mask = le64_to_cpu(trigger->regions_mask) & - ~(fwrt->trans->dbg.unsupported_region_msk); - - BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask)); - BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) < - ARRAY_SIZE(fwrt->trans->dbg.active_regions)); - for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) { - u32 reg_type; + for (int i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.active_regions); i++) { + struct iwl_dump_ini_region_data reg_data = { + .dump_data = dump_data, + }; + u32 reg_type, dp; struct iwl_fw_ini_region_tlv *reg; if (!(BIT_ULL(i) & regions_mask)) @@ -2628,6 +2657,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, if (reg_type >= ARRAY_SIZE(iwl_dump_ini_region_ops)) continue; + dp = le32_get_bits(reg->id, IWL_FW_INI_REGION_DUMP_POLICY_MASK); + if ((reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY || reg_type == IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE || reg_type == IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP) && @@ -2637,6 +2668,20 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, tp_id); continue; } + + switch (which) { + case IWL_INI_DUMP_ALL_REGIONS: + break; + case IWL_INI_DUMP_EARLY_REGIONS: + if (!(dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET)) + continue; + break; + case IWL_INI_DUMP_LATE_REGIONS: + if (dp & IWL_FW_IWL_DEBUG_DUMP_POLICY_BEFORE_RESET) + continue; + break; + } + /* * DRAM_IMR can be collected only for FW/HW error timepoint * when fw is not alive. In addition, it must be collected @@ -2646,7 +2691,8 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, if (reg_type == IWL_FW_INI_REGION_DRAM_IMR) { if (tp_id == IWL_FW_INI_TIME_POINT_FW_ASSERT || tp_id == IWL_FW_INI_TIME_POINT_FW_HW_ERROR) - imr_reg_data.reg_tlv = fwrt->trans->dbg.active_regions[i]; + imr_reg_data->reg_tlv = + fwrt->trans->dbg.active_regions[i]; else IWL_INFO(fwrt, "WRT: trying to collect DRAM_IMR at time point: %d, skipping\n", @@ -2659,9 +2705,44 @@ static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, size += iwl_dump_ini_mem(fwrt, list, ®_data, &iwl_dump_ini_region_ops[reg_type]); } + + return size; +} + +static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt, + struct iwl_fwrt_dump_data *dump_data, + struct list_head *list) +{ + struct iwl_fw_ini_trigger_tlv *trigger = dump_data->trig; + enum iwl_fw_ini_time_point tp_id = le32_to_cpu(trigger->time_point); + struct iwl_dump_ini_region_data imr_reg_data = { + .dump_data = dump_data, + }; + u32 size = 0; + u64 regions_mask = le64_to_cpu(trigger->regions_mask) & + ~(fwrt->trans->dbg.unsupported_region_msk); + + BUILD_BUG_ON(sizeof(trigger->regions_mask) != sizeof(regions_mask)); + BUILD_BUG_ON((sizeof(trigger->regions_mask) * BITS_PER_BYTE) < + ARRAY_SIZE(fwrt->trans->dbg.active_regions)); + + if (trigger->time_point & + cpu_to_le32(IWL_FW_INI_APPLY_POLICY_RESET_HANDSHAKE)) { + size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id, + regions_mask, &imr_reg_data, + IWL_INI_DUMP_EARLY_REGIONS); + iwl_trans_pcie_fw_reset_handshake(fwrt->trans); + size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id, + regions_mask, &imr_reg_data, + IWL_INI_DUMP_LATE_REGIONS); + } else { + size += iwl_dump_ini_dump_regions(fwrt, dump_data, list, tp_id, + regions_mask, &imr_reg_data, + IWL_INI_DUMP_ALL_REGIONS); + } /* collect DRAM_IMR region in the last */ if (imr_reg_data.reg_tlv) - size += iwl_dump_ini_mem(fwrt, list, ®_data, + size += iwl_dump_ini_mem(fwrt, list, &imr_reg_data, &iwl_dump_ini_region_ops[IWL_FW_INI_REGION_DRAM_IMR]); if (size) { @@ -3042,9 +3123,8 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) } IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); -void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, - u32 timepoint, - u32 timepoint_data) +static void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, + u32 timepoint, u32 timepoint_data) { struct iwl_dbg_dump_complete_cmd hcmd_data; struct iwl_host_cmd hcmd = { @@ -3072,6 +3152,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) struct iwl_fw_dbg_params params = {0}; struct iwl_fwrt_dump_data *dump_data = &fwrt->dump.wks[wk_idx].dump_data; + if (!test_bit(wk_idx, &fwrt->dump.active_wks)) return; @@ -3096,9 +3177,9 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection start\n"); if (iwl_trans_dbg_ini_valid(fwrt->trans)) - iwl_fw_error_ini_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data); + iwl_fw_error_ini_dump(fwrt, dump_data); else - iwl_fw_error_dump(fwrt, &fwrt->dump.wks[wk_idx].dump_data); + iwl_fw_error_dump(fwrt, dump_data); IWL_DEBUG_FW_INFO(fwrt, "WRT: Data collection done\n"); iwl_fw_dbg_stop_restart_recording(fwrt, ¶ms, false); @@ -3115,7 +3196,6 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx) if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) iwl_force_nmi(fwrt->trans); - out: if (iwl_trans_dbg_ini_valid(fwrt->trans)) { iwl_fw_error_dump_data_free(dump_data); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 87998374f459e..35e30e5db462d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -324,9 +324,6 @@ static inline void iwl_fwrt_update_fw_versions(struct iwl_fw_runtime *fwrt, } void iwl_fwrt_dump_error_logs(struct iwl_fw_runtime *fwrt); -void iwl_send_dbg_dump_complete_cmd(struct iwl_fw_runtime *fwrt, - u32 timepoint, - u32 timepoint_data); bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id); void iwl_fw_disable_dbg_asserts(struct iwl_fw_runtime *fwrt); void iwl_fw_dbg_clear_monitor_buf(struct iwl_fw_runtime *fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h b/drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h new file mode 100644 index 0000000000000..983acee5cd7d8 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2021, 2025 Intel Corporation + */ +#ifndef __iwl_fw_dhc_utils_h__ +#define __iwl_fw_dhc_utils_h__ + +#include +#include "fw/img.h" +#include "api/commands.h" +#include "api/dhc.h" + +/** + * iwl_dhc_resp_status - return status of DHC response + * @fw: firwmware image information + * @pkt: response packet, must not be %NULL + * + * Returns: the status value of the DHC command or (u32)-1 if the + * response was too short. + */ +static inline u32 iwl_dhc_resp_status(const struct iwl_fw *fw, + struct iwl_rx_packet *pkt) +{ + if (iwl_fw_lookup_notif_ver(fw, IWL_ALWAYS_LONG_GROUP, + DEBUG_HOST_COMMAND, 1) >= 2) { + struct iwl_dhc_cmd_resp *resp = (void *)pkt->data; + + if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp)) + return (u32)-1; + + return le32_to_cpu(resp->status); + } else { + struct iwl_dhc_cmd_resp_v1 *resp = (void *)pkt->data; + + if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp)) + return (u32)-1; + + return le32_to_cpu(resp->status); + } +} + +/** + * iwl_dhc_resp_data - return data pointer of DHC response + * @fw: firwmware image information + * @pkt: response packet, must not be %NULL + * @len: where to store the length + * + * Returns: The data pointer, or an ERR_PTR() if the data was + * not valid (too short). + */ +static inline void *iwl_dhc_resp_data(const struct iwl_fw *fw, + struct iwl_rx_packet *pkt, + unsigned int *len) +{ + if (iwl_fw_lookup_notif_ver(fw, IWL_ALWAYS_LONG_GROUP, + DEBUG_HOST_COMMAND, 1) >= 2) { + struct iwl_dhc_cmd_resp *resp = (void *)pkt->data; + + if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp)) + return ERR_PTR(-EINVAL); + + *len = iwl_rx_packet_payload_len(pkt) - sizeof(*resp); + return (void *)&resp->data; + } else { + struct iwl_dhc_cmd_resp_v1 *resp = (void *)pkt->data; + + if (iwl_rx_packet_payload_len(pkt) < sizeof(*resp)) + return ERR_PTR(-EINVAL); + + *len = iwl_rx_packet_payload_len(pkt) - sizeof(*resp); + return (void *)&resp->data; + } +} + +#endif /* __iwl_fw_dhc_utils_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c index 8e0c85a1240d7..c7b261c8ec969 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c @@ -540,6 +540,9 @@ bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id) } err_info = {}; int ret; + if (err_id) + *err_id = 0; + if (!base) return false; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c index ea435ee94312b..6adcfa6e214a0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c @@ -456,7 +456,25 @@ iwl_parse_tas_selection(const u32 tas_selection_in, const u8 tbl_rev) } IWL_EXPORT_SYMBOL(iwl_parse_tas_selection); -static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) +bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc) +{ + for (int i = 0; i < *size; i++) { + if (list[i] == mcc) + return true; + } + + /* Verify that there is room for another country + * If *size == IWL_WTAS_BLACK_LIST_MAX, then the table is full. + */ + if (*size >= IWL_WTAS_BLACK_LIST_MAX) + return false; + + list[*size++] = mcc; + return true; +} +IWL_EXPORT_SYMBOL(iwl_add_mcc_to_tas_block_list); + +__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) { int ret; u32 val; @@ -503,6 +521,7 @@ static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) return config_bitmap; } +IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap); static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver) { @@ -553,6 +572,10 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, WIDE_ID(REGULATORY_AND_NVM_GROUP, LARI_CONFIG_CHANGE), 1); + if (WARN_ONCE(cmd_ver > 12, + "Don't add newer versions to this function\n")) + return -EINVAL; + memset(cmd, 0, sizeof(*cmd)); *cmd_size = iwl_get_lari_config_cmd_size(cmd_ver); @@ -674,3 +697,34 @@ bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc) } } IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios); + +bool iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime *fwrt) +{ + /* default behaviour is disabled */ + u32 value = 0; + int ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_RFI_CONFIG, &value); + + if (ret < 0) { + IWL_DEBUG_RADIO(fwrt, "Failed to get DSM RFI, ret=%d\n", ret); + return false; + } + + value &= DSM_VALUE_RFI_DISABLE; + /* RFI BIOS CONFIG value can be 0 or 3 only. + * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled. + * 1 and 2 are invalid BIOS configurations, So, it's not possible to + * disable ddr/dlvr separately. + */ + if (!value) { + IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to enable\n"); + return true; + } else if (value == DSM_VALUE_RFI_DISABLE) { + IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to disable\n"); + } else { + IWL_DEBUG_RADIO(fwrt, + "DSM RFI got invalid value, value=%d\n", value); + } + + return false; +} +IWL_EXPORT_SYMBOL(iwl_rfi_is_enabled_in_bios); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h index b355d7bef14cd..53693314d5059 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h @@ -167,6 +167,8 @@ enum iwl_dsm_values_rfi { #define DSM_VALUE_RFI_DISABLE (DSM_VALUE_RFI_DLVR_DISABLE |\ DSM_VALUE_RFI_DDR_DISABLE) +bool iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime *fwrt); + enum iwl_dsm_masks_reg { DSM_MASK_CHINA_22_REG = BIT(2) }; @@ -190,6 +192,7 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt); bool iwl_is_tas_approved(void); +bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc); struct iwl_tas_selection_data iwl_parse_tas_selection(const u32 tas_selection, const u8 tbl_rev); @@ -212,6 +215,7 @@ int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc); int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk); int iwl_bios_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value); +__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt); int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, struct iwl_lari_config_change_cmd *cmd, size_t *cmd_size); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 048877fa7c71a..a9e6bba2419e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -104,6 +104,7 @@ struct iwl_txf_iter_data { * the driver by calling &iwl_fw_set_current_image() * @dump: debug dump data * @uats_table: AP type table + * @uats_valid: is AP type table valid * @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock: * 0: Unlocked, 1 and 2: Locked. * Only read the UEFI variables if locked. @@ -181,6 +182,7 @@ struct iwl_fw_runtime { struct iwl_sar_offset_mapping_cmd sgom_table; bool sgom_enabled; struct iwl_mcc_allowed_ap_type_cmd uats_table; + bool uats_valid; u8 uefi_tables_lock_status; }; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c index 434eed4130b90..386aadbce2a2d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright(c) 2021-2024 Intel Corporation + * Copyright(c) 2021-2025 Intel Corporation */ #include "iwl-drv.h" @@ -402,6 +402,9 @@ static int iwl_uefi_uats_parse(struct uefi_cnv_wlan_uats_data *uats_data, memcpy(fwrt->uats_table.offset_map, uats_data->offset_map, sizeof(fwrt->uats_table.offset_map)); + + fwrt->uats_valid = true; + return 0; } @@ -678,14 +681,16 @@ int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk) struct uefi_cnv_var_eckv *data; int ret = 0; - data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_ECKV_NAME, - "ECKV", sizeof(*data), NULL); + data = iwl_uefi_get_verified_variable_guid(fwrt->trans, + &IWL_EFI_WIFI_BT_GUID, + IWL_UEFI_ECKV_NAME, + "ECKV", sizeof(*data), NULL); if (IS_ERR(data)) return -EINVAL; if (data->revision != IWL_UEFI_ECKV_REVISION) { ret = -EINVAL; - IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WRDD revision:%d\n", + IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI ECKV revision:%d\n", data->revision); goto out; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h index 0c8943a8bd011..eb3c05417da37 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright(c) 2021-2024 Intel Corporation + * Copyright(c) 2021-2025 Intel Corporation */ #ifndef __iwl_fw_uefi__ #define __iwl_fw_uefi__ @@ -19,7 +19,7 @@ #define IWL_UEFI_WTAS_NAME L"UefiCnvWlanWTAS" #define IWL_UEFI_SPLC_NAME L"UefiCnvWlanSPLC" #define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD" -#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV" +#define IWL_UEFI_ECKV_NAME L"UefiCnvCommonECKV" #define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg" #define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM" #define IWL_UEFI_PUNCTURING_NAME L"UefiCnvWlanPuncturing" diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 2b6a80142aba9..b9bd89bfdd748 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -2,7 +2,7 @@ /* * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #ifndef __IWL_CONFIG_H__ #define __IWL_CONFIG_H__ @@ -451,11 +451,8 @@ struct iwl_cfg { #define IWL_CFG_RF_ID_HR 0x7 #define IWL_CFG_RF_ID_HR1 0x4 -#define IWL_CFG_NO_160 0x1 -#define IWL_CFG_160 0x0 - -#define IWL_CFG_NO_320 0x1 -#define IWL_CFG_320 0x0 +#define IWL_CFG_BW_NO_LIM (U16_MAX - 1) +#define IWL_CFG_BW_ANY U16_MAX #define IWL_CFG_CORES_BT 0x0 #define IWL_CFG_CORES_BT_GNSS 0x5 @@ -467,7 +464,7 @@ struct iwl_cfg { #define IWL_CFG_IS_JACKET 0x1 #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4) -#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9) +#define IWL_SUBDEVICE_BW_LIM(subdevice) ((u16)((subdevice) & 0x0200) >> 9) #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10) struct iwl_dev_info { @@ -475,10 +472,10 @@ struct iwl_dev_info { u16 subdevice; u16 mac_type; u16 rf_type; + u16 bw_limit; u8 mac_step; u8 rf_step; u8 rf_id; - u8 no_160; u8 cores; u8 cdb; u8 jacket; @@ -492,7 +489,7 @@ extern const unsigned int iwl_dev_info_table_size; const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, - u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step); + u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step); extern const struct pci_device_id iwl_hw_card_ids[]; #endif @@ -534,7 +531,6 @@ extern const char iwl9560_killer_1550i_name[]; extern const char iwl9560_killer_1550s_name[]; extern const char iwl_ax200_name[]; extern const char iwl_ax203_name[]; -extern const char iwl_ax204_name[]; extern const char iwl_ax201_name[]; extern const char iwl_ax101_name[]; extern const char iwl_ax200_killer_1650w_name[]; @@ -550,17 +546,13 @@ extern const char iwl_ax211_killer_1675i_name[]; extern const char iwl_ax411_killer_1690s_name[]; extern const char iwl_ax411_killer_1690i_name[]; extern const char iwl_ax211_name[]; -extern const char iwl_ax221_name[]; extern const char iwl_ax231_name[]; extern const char iwl_ax411_name[]; -extern const char iwl_bz_name[]; extern const char iwl_fm_name[]; extern const char iwl_wh_name[]; +extern const char iwl_sp_name[]; extern const char iwl_gl_name[]; extern const char iwl_mtp_name[]; -extern const char iwl_sc_name[]; -extern const char iwl_sc2_name[]; -extern const char iwl_sc2f_name[]; extern const char iwl_dr_name[]; extern const char iwl_br_name[]; #if IS_ENABLED(CONFIG_IWLDVM) @@ -662,6 +654,12 @@ extern const struct iwl_cfg iwl_cfg_ma; extern const struct iwl_cfg iwl_cfg_so_a0_hr_a0; extern const struct iwl_cfg iwl_cfg_quz_a0_hr_b0; +#endif /* CONFIG_IWLMVM */ + +#if IS_ENABLED(CONFIG_IWLMLD) +extern const struct iwl_ht_params iwl_bz_ht_params; + +extern const struct iwl_ht_params iwl_bz_ht_params; extern const struct iwl_cfg iwl_cfg_bz; extern const struct iwl_cfg iwl_cfg_gl; @@ -671,6 +669,6 @@ extern const struct iwl_cfg iwl_cfg_sc2; extern const struct iwl_cfg iwl_cfg_sc2f; extern const struct iwl_cfg iwl_cfg_dr; extern const struct iwl_cfg iwl_cfg_br; -#endif /* CONFIG_IWLMVM */ +#endif /* CONFIG_IWLMLD */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h index cd25a1b9f2ff4..20563a32a21a8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018, 2020-2024 Intel Corporation + * Copyright (C) 2018, 2020-2025 Intel Corporation */ #ifndef __iwl_context_info_file_gen3_h__ #define __iwl_context_info_file_gen3_h__ @@ -80,10 +80,12 @@ enum iwl_prph_scratch_flags { * enum iwl_prph_scratch_ext_flags - PRPH scratch control ext flags * @IWL_PRPH_SCRATCH_EXT_URM_FW: switch to URM mode based on fw setting * @IWL_PRPH_SCRATCH_EXT_URM_PERM: switch to permanent URM mode + * @IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID: use external 32 KHz clock */ enum iwl_prph_scratch_ext_flags { - IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4), - IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5), + IWL_PRPH_SCRATCH_EXT_URM_FW = BIT(4), + IWL_PRPH_SCRATCH_EXT_URM_PERM = BIT(5), + IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID = BIT(8), }; /** diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 08d990ba8a794..ce787326aa69d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include #include "iwl-drv.h" @@ -1372,15 +1372,15 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, switch (tp_id) { case IWL_FW_INI_TIME_POINT_EARLY: iwl_dbg_tlv_init_cfg(fwrt); - iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_update_drams(fwrt); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); + iwl_dbg_tlv_apply_config(fwrt, conf_list); break; case IWL_FW_INI_TIME_POINT_AFTER_ALIVE: iwl_dbg_tlv_apply_buffers(fwrt); iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); + iwl_dbg_tlv_apply_config(fwrt, conf_list); break; case IWL_FW_INI_TIME_POINT_PERIODIC: iwl_dbg_tlv_set_periodic_trigs(fwrt); @@ -1390,14 +1390,14 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt, case IWL_FW_INI_TIME_POINT_MISSED_BEACONS: case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, iwl_dbg_tlv_check_fw_pkt); + iwl_dbg_tlv_apply_config(fwrt, conf_list); break; default: iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list); - iwl_dbg_tlv_apply_config(fwrt, conf_list); iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL); + iwl_dbg_tlv_apply_config(fwrt, conf_list); break; } } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h index bf52c2edaad12..ea32dc88584f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2018 - 2021, 2024 Intel Corporation + * Copyright(c) 2018 - 2021, 2024-2025 Intel Corporation * * Portions of this file are derived from the ipw3945 project. *****************************************************************************/ @@ -156,6 +156,7 @@ do { \ #define IWL_DL_FW 0x00010000 #define IWL_DL_RF_KILL 0x00020000 #define IWL_DL_TPT 0x00040000 +#define IWL_DL_PTP 0x00080000 /* 0x00F00000 - 0x00100000 */ #define IWL_DL_RATE 0x00100000 #define IWL_DL_CALIB 0x00200000 @@ -165,7 +166,7 @@ do { \ #define IWL_DL_RX 0x01000000 #define IWL_DL_ISR 0x02000000 #define IWL_DL_HT 0x04000000 -#define IWL_DL_EXTERNAL 0x08000000 +#define IWL_DL_EHT 0x08000000 /* 0xF0000000 - 0x10000000 */ #define IWL_DL_11H 0x10000000 #define IWL_DL_STATS 0x20000000 @@ -175,7 +176,6 @@ do { \ #define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) #define IWL_DEBUG_TDLS(p, f, a...) IWL_DEBUG(p, IWL_DL_TDLS, f, ## a) #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) -#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a) #define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) #define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) #define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) @@ -216,5 +216,6 @@ do { \ #define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a) #define IWL_DEBUG_FW_INFO(p, f, a...) \ IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a) - +#define IWL_DEBUG_PTP(p, f, a...) IWL_DEBUG(p, IWL_DL_PTP, f, ## a) +#define IWL_DEBUG_EHT(p, f, a...) IWL_DEBUG(p, IWL_DL_EHT, f, ## a) #endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index d3a65f33097cb..d36837501e08a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -75,6 +75,9 @@ struct iwl_drv { enum { DVM_OP_MODE, MVM_OP_MODE, +#if IS_ENABLED(CONFIG_IWLMLD) + MLD_OP_MODE, +#endif }; /* Protects the table contents, i.e. the ops pointer & drv list */ @@ -86,6 +89,9 @@ static struct iwlwifi_opmode_table { } iwlwifi_opmode_table[] = { /* ops set when driver is initialized */ [DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL }, [MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL }, +#if IS_ENABLED(CONFIG_IWLMLD) + [MLD_OP_MODE] = { .name = "iwlmld", .ops = NULL }, +#endif }; #define IWL_DEFAULT_SCAN_CHANNELS 40 @@ -168,6 +174,11 @@ static inline char iwl_drv_get_step(int step) return 'a' + step; } +static bool iwl_drv_is_wifi7_supported(struct iwl_trans *trans) +{ + return CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM; +} + const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf) { char mac_step, rf_step; @@ -316,6 +327,7 @@ struct iwl_firmware_pieces { size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv; size_t n_mem_tlv; + u32 major; }; static void alloc_sec_data(struct iwl_firmware_pieces *pieces, @@ -957,19 +969,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, break; case IWL_UCODE_TLV_FW_VERSION: { const __le32 *ptr = (const void *)tlv_data; - u32 major, minor; + u32 minor; u8 local_comp; if (tlv_len != sizeof(u32) * 3) goto invalid_tlv_len; - major = le32_to_cpup(ptr++); + pieces->major = le32_to_cpup(ptr++); minor = le32_to_cpup(ptr++); local_comp = le32_to_cpup(ptr); snprintf(drv->fw.fw_version, sizeof(drv->fw.fw_version), - "%u.%08x.%u %s", major, minor, + "%u.%08x.%u %s", pieces->major, minor, local_comp, iwl_reduced_fw_name(drv)); break; } @@ -1181,7 +1193,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(*fseq_ver)) goto invalid_tlv_len; - IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n", + IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n", fseq_ver->version); } break; @@ -1468,6 +1480,8 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv) } } +#define IWL_MLD_SUPPORTED_FW_VERSION 97 + /* * iwl_req_fw_callback - callback when firmware was loaded * @@ -1720,6 +1734,20 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) break; } +#if IS_ENABLED(CONFIG_IWLMLD) + if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION && + iwl_drv_is_wifi7_supported(drv->trans)) + op = &iwlwifi_opmode_table[MLD_OP_MODE]; +#else + if (pieces->major >= IWL_MLD_SUPPORTED_FW_VERSION && + iwl_drv_is_wifi7_supported(drv->trans)) { + IWL_ERR(drv, + "IWLMLD needs to be compiled to support this firmware\n"); + mutex_unlock(&iwlwifi_opmode_table_mtx); + goto out_unbind; + } +#endif + IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", drv->fw.fw_version, op->name); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 9f7e013252feb..cd1b0048bb6da 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -684,10 +684,13 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 | IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC, + .mac_cap_info[1] = + IEEE80211_EHT_MAC_CAP1_UNSOL_EPCS_PRIO_ACCESS, .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | @@ -913,11 +916,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, { bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO)); - bool no_320; - - no_320 = (!trans->trans_cfg->integrated && - trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB) || - trans->reduced_cap_sku; + bool slow_pcie = (!trans->trans_cfg->integrated && + trans->pcie_link_speed < PCI_EXP_LNKSTA_CLS_8_0GB); if (!data->sku_cap_11be_enable || iwlwifi_mod_params.disable_11be) iftype_data->eht_cap.has_eht = false; @@ -944,7 +944,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK); break; case NL80211_BAND_6GHZ: - if (!no_320) { + if (!trans->reduced_cap_sku && + trans->bw_limit >= 320) { iftype_data->eht_cap.eht_cap_elem.phy_cap_info[0] |= IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; iftype_data->eht_cap.eht_cap_elem.phy_cap_info[1] |= @@ -986,6 +987,14 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->eht_cap.eht_cap_elem.phy_cap_info[4] |= 0x10; } } + + if (slow_pcie) { + struct ieee80211_eht_mcs_nss_supp *mcs_nss = + &iftype_data->eht_cap.eht_mcs_nss_supp; + + mcs_nss->bw._320.rx_tx_mcs11_max_nss = 0; + mcs_nss->bw._320.rx_tx_mcs13_max_nss = 0; + } } else { struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp = &iftype_data->he_cap.he_mcs_nss_supp; @@ -1086,19 +1095,22 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans, iftype_data->eht_cap.eht_mcs_nss_supp.bw._320.rx_tx_mcs13_max_nss = 0; } - if (trans->no_160) + if (trans->bw_limit < 160) iftype_data->he_cap.he_cap_elem.phy_cap_info[0] &= ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G; - if (trans->reduced_cap_sku) { + if (trans->bw_limit < 320 || trans->reduced_cap_sku) { memset(&iftype_data->eht_cap.eht_mcs_nss_supp.bw._320, 0, sizeof(iftype_data->eht_cap.eht_mcs_nss_supp.bw._320)); + iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= + ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; + } + + if (trans->reduced_cap_sku) { iftype_data->eht_cap.eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss = 0; iftype_data->eht_cap.eht_mcs_nss_supp.bw._160.rx_tx_mcs13_max_nss = 0; iftype_data->eht_cap.eht_cap_elem.phy_cap_info[8] &= ~IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA; - iftype_data->eht_cap.eht_cap_elem.phy_cap_info[2] &= - ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; } } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 49c8507d1a6b1..c1607b6d0759e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021, 2023-2024 Intel Corporation + * Copyright (C) 2019-2021, 2023-2025 Intel Corporation */ #include #include @@ -97,7 +97,7 @@ static void iwl_trans_reprobe_wk(struct work_struct *wk) module_put(THIS_MODULE); } -#define IWL_TRANS_RESET_OK_TIME 180 /* seconds */ +#define IWL_TRANS_RESET_OK_TIME 7 /* seconds */ static enum iwl_reset_mode iwl_trans_determine_restart_mode(struct iwl_trans *trans) @@ -403,6 +403,8 @@ void iwl_trans_op_mode_leave(struct iwl_trans *trans) iwl_trans_pcie_op_mode_leave(trans); + cancel_work_sync(&trans->restart.wk); + trans->op_mode = NULL; trans->state = IWL_TRANS_NO_FW; @@ -639,6 +641,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_tx); void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs, bool is_flush) { + if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) + return; + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, "bad state = %d\n", trans->state)) return; @@ -671,6 +676,9 @@ IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg); int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) { + if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) + return -EIO; + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, "bad state = %d\n", trans->state)) return -EIO; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index f6234065dbdde..25fb4c50e38b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2023, 2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -876,7 +876,7 @@ struct iwl_txq { * only valid for discrete (not integrated) NICs * @invalid_tx_cmd: invalid TX command buffer * @reduced_cap_sku: reduced capability supported SKU - * @no_160: device not supporting 160 MHz + * @bw_limit: the max bandwidth * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz * @restart: restart worker data * @restart.wk: restart worker @@ -888,6 +888,7 @@ struct iwl_txq { * @trans_specific: data for the specific transport this is allocated for/with * @dsbr_urm_fw_dependent: switch to URM based on fw settings * @dsbr_urm_permanent: switch to URM permanently + * @ext_32khz_clock_valid: if true, the external 32 KHz clock can be used */ struct iwl_trans { bool csme_own; @@ -910,11 +911,14 @@ struct iwl_trans { char hw_id_str[52]; u32 sku_id[3]; bool reduced_cap_sku; - u8 no_160:1, step_urm:1; + u16 bw_limit; + bool step_urm; u8 dsbr_urm_fw_dependent:1, dsbr_urm_permanent:1; + bool ext_32khz_clock_valid; + u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; bool pm_support; @@ -1261,6 +1265,7 @@ enum iwl_reset_mode { }; void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode); +void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/Makefile new file mode 100644 index 0000000000000..ece66e7a9be41 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +obj-$(CONFIG_IWLMLD) += iwlmld.o +obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/ + +iwlmld-y += mld.o notif.o mac80211.o fw.o power.o iface.o link.o rx.o mcc.o session-protect.o phy.o +iwlmld-y += scan.o sta.o tx.o coex.o tlc.o agg.o key.o regulatory.o ap.o thermal.o roc.o stats.o +iwlmld-y += low_latency.o mlo.o ptp.o time_sync.o ftm-initiator.o +iwlmld-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o +iwlmld-$(CONFIG_IWLWIFI_LEDS) += led.o +iwlmld-$(CONFIG_PM_SLEEP) += d3.o + +# non-upstream things +iwlmld-$(CONFIG_IWL_VENDOR_CMDS) += vendor-cmd.o +iwlmld-$(CONFIG_IWLMVM_AX_SOFTAP_TESTMODE) += ax-softap-testmode.o + +subdir-ccflags-y += -I$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/agg.c new file mode 100644 index 0000000000000..db9e0f04f4b77 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.c @@ -0,0 +1,670 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#include "agg.h" +#include "sta.h" +#include "hcmd.h" + +static void +iwl_mld_reorder_release_frames(struct iwl_mld *mld, struct ieee80211_sta *sta, + struct napi_struct *napi, + struct iwl_mld_baid_data *baid_data, + struct iwl_mld_reorder_buffer *reorder_buf, + u16 nssn) +{ + struct iwl_mld_reorder_buf_entry *entries = + &baid_data->entries[reorder_buf->queue * + baid_data->entries_per_queue]; + u16 ssn = reorder_buf->head_sn; + + while (ieee80211_sn_less(ssn, nssn)) { + int index = ssn % baid_data->buf_size; + struct sk_buff_head *skb_list = &entries[index].frames; + struct sk_buff *skb; + + ssn = ieee80211_sn_inc(ssn); + + /* Empty the list. Will have more than one frame for A-MSDU. + * Empty list is valid as well since nssn indicates frames were + * received. + */ + while ((skb = __skb_dequeue(skb_list))) { + iwl_mld_pass_packet_to_mac80211(mld, napi, skb, + reorder_buf->queue, + sta); + reorder_buf->num_stored--; + } + } + reorder_buf->head_sn = nssn; +} + +static void iwl_mld_release_frames_from_notif(struct iwl_mld *mld, + struct napi_struct *napi, + u8 baid, u16 nssn, int queue) +{ + struct iwl_mld_reorder_buffer *reorder_buf; + struct iwl_mld_baid_data *ba_data; + struct ieee80211_link_sta *link_sta; + u32 sta_id; + + IWL_DEBUG_HT(mld, "Frame release notification for BAID %u, NSSN %d\n", + baid, nssn); + + if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID || + baid >= ARRAY_SIZE(mld->fw_id_to_ba))) + return; + + rcu_read_lock(); + + ba_data = rcu_dereference(mld->fw_id_to_ba[baid]); + if (!ba_data) { + IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid); + goto out_unlock; + } + + /* pick any STA ID to find the pointer */ + sta_id = ffs(ba_data->sta_mask) - 1; + link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta)) + goto out_unlock; + + reorder_buf = &ba_data->reorder_buf[queue]; + + iwl_mld_reorder_release_frames(mld, link_sta->sta, napi, ba_data, + reorder_buf, nssn); +out_unlock: + rcu_read_unlock(); +} + +void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld, + struct napi_struct *napi, + struct iwl_rx_packet *pkt, int queue) +{ + struct iwl_frame_release *release = (void *)pkt->data; + u32 pkt_len = iwl_rx_packet_payload_len(pkt); + + if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release), + "Unexpected frame release notif size %u (expected %zu)\n", + pkt_len, sizeof(*release))) + return; + + iwl_mld_release_frames_from_notif(mld, napi, release->baid, + le16_to_cpu(release->nssn), + queue); +} + +void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld, + struct napi_struct *napi, + struct iwl_rx_packet *pkt, + int queue) +{ + struct iwl_bar_frame_release *release = (void *)pkt->data; + struct iwl_mld_baid_data *baid_data; + unsigned int baid, nssn, sta_id, tid; + u32 pkt_len = iwl_rx_packet_payload_len(pkt); + + if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release), + "Unexpected frame release notif size %u (expected %zu)\n", + pkt_len, sizeof(*release))) + return; + + baid = le32_get_bits(release->ba_info, + IWL_BAR_FRAME_RELEASE_BAID_MASK); + nssn = le32_get_bits(release->ba_info, + IWL_BAR_FRAME_RELEASE_NSSN_MASK); + sta_id = le32_get_bits(release->sta_tid, + IWL_BAR_FRAME_RELEASE_STA_MASK); + tid = le32_get_bits(release->sta_tid, + IWL_BAR_FRAME_RELEASE_TID_MASK); + + if (IWL_FW_CHECK(mld, baid >= ARRAY_SIZE(mld->fw_id_to_ba), + "BAR release: invalid BAID (%x)\n", baid)) + return; + + rcu_read_lock(); + baid_data = rcu_dereference(mld->fw_id_to_ba[baid]); + if (!IWL_FW_CHECK(mld, !baid_data, + "Got valid BAID %d but not allocated, invalid BAR release!\n", + baid)) + goto out_unlock; + + if (IWL_FW_CHECK(mld, tid != baid_data->tid || + sta_id > mld->fw->ucode_capa.num_stations || + !(baid_data->sta_mask & BIT(sta_id)), + "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n", + baid, baid_data->sta_mask, baid_data->tid, sta_id, + tid)) + goto out_unlock; + + IWL_DEBUG_DROP(mld, "Received a BAR, expect packet loss: nssn %d\n", + nssn); + + iwl_mld_release_frames_from_notif(mld, napi, baid, nssn, queue); +out_unlock: + rcu_read_unlock(); +} + +void iwl_mld_del_ba(struct iwl_mld *mld, int queue, + struct iwl_mld_delba_data *data) +{ + struct iwl_mld_baid_data *ba_data; + struct iwl_mld_reorder_buffer *reorder_buf; + struct ieee80211_link_sta *link_sta; + u8 baid = data->baid; + u32 sta_id; + + if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid)) + return; + + rcu_read_lock(); + + ba_data = rcu_dereference(mld->fw_id_to_ba[baid]); + if (WARN_ON_ONCE(!ba_data)) + goto out_unlock; + + /* pick any STA ID to find the pointer */ + sta_id = ffs(ba_data->sta_mask) - 1; + link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta)) + goto out_unlock; + + reorder_buf = &ba_data->reorder_buf[queue]; + + /* release all frames that are in the reorder buffer to the stack */ + iwl_mld_reorder_release_frames(mld, link_sta->sta, NULL, + ba_data, reorder_buf, + ieee80211_sn_add(reorder_buf->head_sn, + ba_data->buf_size)); +out_unlock: + rcu_read_unlock(); +} + +/* Returns true if the MPDU was buffered\dropped, false if it should be passed + * to upper layer. + */ +enum iwl_mld_reorder_result +iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi, + int queue, struct ieee80211_sta *sta, + struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc) +{ + struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); + struct iwl_mld_baid_data *baid_data; + struct iwl_mld_reorder_buffer *buffer; + struct iwl_mld_reorder_buf_entry *entries; + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct iwl_mld_link_sta *mld_link_sta; + u32 reorder = le32_to_cpu(desc->reorder_data); + bool amsdu, last_subframe, is_old_sn, is_dup; + u8 tid = ieee80211_get_tid(hdr); + u8 baid; + u16 nssn, sn; + u32 sta_mask = 0; + int index; + u8 link_id; + + baid = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_BAID_MASK); + + /* This also covers the case of receiving a Block Ack Request + * outside a BA session; we'll pass it to mac80211 and that + * then sends a delBA action frame. + * This also covers pure monitor mode, in which case we won't + * have any BA sessions. + */ + if (baid == IWL_RX_REORDER_DATA_INVALID_BAID) + return IWL_MLD_PASS_SKB; + + /* no sta yet */ + if (WARN_ONCE(!sta, + "Got valid BAID without a valid station assigned\n")) + return IWL_MLD_PASS_SKB; + + /* not a data packet */ + if (!ieee80211_is_data_qos(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) + return IWL_MLD_PASS_SKB; + + if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) + return IWL_MLD_PASS_SKB; + + baid_data = rcu_dereference(mld->fw_id_to_ba[baid]); + if (!baid_data) { + IWL_DEBUG_HT(mld, + "Got valid BAID but no baid allocated, bypass re-ordering (BAID=%d reorder=0x%x)\n", + baid, reorder); + return IWL_MLD_PASS_SKB; + } + + for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) + sta_mask |= BIT(mld_link_sta->fw_id); + + /* verify the BAID is correctly mapped to the sta and tid */ + if (IWL_FW_CHECK(mld, + tid != baid_data->tid || + !(sta_mask & baid_data->sta_mask), + "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n", + baid, baid_data->sta_mask, baid_data->tid, + sta_mask, tid)) + return IWL_MLD_PASS_SKB; + + buffer = &baid_data->reorder_buf[queue]; + entries = &baid_data->entries[queue * baid_data->entries_per_queue]; + + is_old_sn = !!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN); + + if (!buffer->valid && is_old_sn) + return IWL_MLD_PASS_SKB; + + buffer->valid = true; + + is_dup = !!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE)); + + /* drop any duplicated or outdated packets */ + if (is_dup || is_old_sn) + return IWL_MLD_DROP_SKB; + + sn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_SN_MASK); + nssn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_NSSN_MASK); + amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; + last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; + + /* release immediately if allowed by nssn and no stored frames */ + if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { + if (!amsdu || last_subframe) + buffer->head_sn = nssn; + return IWL_MLD_PASS_SKB; + } + + /* release immediately if there are no stored frames, and the sn is + * equal to the head. + * This can happen due to reorder timer, where NSSN is behind head_sn. + * When we released everything, and we got the next frame in the + * sequence, according to the NSSN we can't release immediately, + * while technically there is no hole and we can move forward. + */ + if (!buffer->num_stored && sn == buffer->head_sn) { + if (!amsdu || last_subframe) + buffer->head_sn = ieee80211_sn_inc(buffer->head_sn); + return IWL_MLD_PASS_SKB; + } + + /* put in reorder buffer */ + index = sn % baid_data->buf_size; + __skb_queue_tail(&entries[index].frames, skb); + buffer->num_stored++; + + /* We cannot trust NSSN for AMSDU sub-frames that are not the last. The + * reason is that NSSN advances on the first sub-frame, and may cause + * the reorder buffer to advance before all the sub-frames arrive. + * + * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with + * SN 1. NSSN for first sub frame will be 3 with the result of driver + * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is + * already ahead and it will be dropped. + * If the last sub-frame is not on this queue - we will get frame + * release notification with up to date NSSN. + */ + if (!amsdu || last_subframe) + iwl_mld_reorder_release_frames(mld, sta, napi, baid_data, + buffer, nssn); + + return IWL_MLD_BUFFERED_SKB; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder); + +static void iwl_mld_rx_agg_session_expired(struct timer_list *t) +{ + struct iwl_mld_baid_data *data = + from_timer(data, t, session_timer); + struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr; + struct iwl_mld_baid_data *ba_data; + struct ieee80211_link_sta *link_sta; + struct iwl_mld_sta *mld_sta; + unsigned long timeout; + unsigned int sta_id; + + rcu_read_lock(); + + ba_data = rcu_dereference(*rcu_ptr); + if (WARN_ON(!ba_data)) + goto unlock; + + if (WARN_ON(!ba_data->timeout)) + goto unlock; + + timeout = ba_data->last_rx_timestamp + + TU_TO_JIFFIES(ba_data->timeout * 2); + if (time_is_after_jiffies(timeout)) { + mod_timer(&ba_data->session_timer, timeout); + goto unlock; + } + + /* timer expired, pick any STA ID to find the pointer */ + sta_id = ffs(ba_data->sta_mask) - 1; + link_sta = rcu_dereference(ba_data->mld->fw_id_to_link_sta[sta_id]); + + /* sta should be valid unless the following happens: + * The firmware asserts which triggers a reconfig flow, but + * the reconfig fails before we set the pointer to sta into + * the fw_id_to_link_sta pointer table. mac80211 can't stop + * A-MPDU and hence the timer continues to run. Then, the + * timer expires and sta is NULL. + */ + if (IS_ERR_OR_NULL(link_sta) || WARN_ON(!link_sta->sta)) + goto unlock; + + mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); + ieee80211_rx_ba_timer_expired(mld_sta->vif, link_sta->sta->addr, + ba_data->tid); +unlock: + rcu_read_unlock(); +} + +static int +iwl_mld_stop_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, int tid) +{ + struct iwl_rx_baid_cfg_cmd cmd = { + .action = cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), + .remove.sta_id_mask = + cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)), + .remove.tid = cpu_to_le32(tid), + + }; + int ret; + + ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(DATA_PATH_GROUP, + RX_BAID_ALLOCATION_CONFIG_CMD), + &cmd); + if (ret) + return ret; + + IWL_DEBUG_HT(mld, "RX BA Session stopped in fw\n"); + + return ret; +} + +static int +iwl_mld_start_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, + int tid, u16 ssn, u16 buf_size) +{ + struct iwl_rx_baid_cfg_cmd cmd = { + .action = cpu_to_le32(IWL_RX_BAID_ACTION_ADD), + .alloc.sta_id_mask = + cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)), + .alloc.tid = tid, + .alloc.ssn = cpu_to_le16(ssn), + .alloc.win_size = cpu_to_le16(buf_size), + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD), + .flags = CMD_WANT_SKB, + .len[0] = sizeof(cmd), + .data[0] = &cmd, + }; + struct iwl_rx_baid_cfg_resp *resp; + struct iwl_rx_packet *pkt; + u32 resp_len; + int ret, baid; + + BUILD_BUG_ON(sizeof(*resp) != sizeof(baid)); + + ret = iwl_mld_send_cmd(mld, &hcmd); + if (ret) + return ret; + + pkt = hcmd.resp_pkt; + + resp_len = iwl_rx_packet_payload_len(pkt); + if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp), + "BAID_ALLOC_CMD: unexpected response length %d\n", + resp_len)) { + ret = -EIO; + goto out; + } + + IWL_DEBUG_HT(mld, "RX BA Session started in fw\n"); + + resp = (void *)pkt->data; + baid = le32_to_cpu(resp->baid); + + if (IWL_FW_CHECK(mld, baid < 0 || baid >= ARRAY_SIZE(mld->fw_id_to_ba), + "BAID_ALLOC_CMD: invalid BAID response %d\n", baid)) { + ret = -EINVAL; + goto out; + } + + ret = baid; +out: + iwl_free_resp(&hcmd); + return ret; +} + +static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld, + struct iwl_mld_baid_data *data, + u16 ssn) +{ + for (int i = 0; i < mld->trans->num_rx_queues; i++) { + struct iwl_mld_reorder_buffer *reorder_buf = + &data->reorder_buf[i]; + struct iwl_mld_reorder_buf_entry *entries = + &data->entries[i * data->entries_per_queue]; + + reorder_buf->head_sn = ssn; + reorder_buf->queue = i; + + for (int j = 0; j < data->buf_size; j++) + __skb_queue_head_init(&entries[j].frames); + } +} + +static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld, + struct iwl_mld_baid_data *data) +{ + struct iwl_mld_delba_data delba_data = { + .baid = data->baid, + }; + + iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA, + &delba_data, sizeof(delba_data)); + + for (int i = 0; i < mld->trans->num_rx_queues; i++) { + struct iwl_mld_reorder_buffer *reorder_buf = + &data->reorder_buf[i]; + struct iwl_mld_reorder_buf_entry *entries = + &data->entries[i * data->entries_per_queue]; + + if (likely(!reorder_buf->num_stored)) + continue; + + /* This shouldn't happen in regular DELBA since the RX queues + * sync internal DELBA notification should trigger a release + * of all frames in the reorder buffer. + */ + WARN_ON(1); + + for (int j = 0; j < data->buf_size; j++) + __skb_queue_purge(&entries[j].frames); + } +} + +int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta, + int tid, u16 ssn, u16 buf_size, u16 timeout) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct iwl_mld_baid_data *baid_data = NULL; + u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); + int ret, baid; + u32 sta_mask; + + lockdep_assert_wiphy(mld->wiphy); + + if (mld->num_rx_ba_sessions >= IWL_MAX_BAID) { + IWL_DEBUG_HT(mld, + "Max num of RX BA sessions reached; blocking new session\n"); + return -ENOSPC; + } + + sta_mask = iwl_mld_fw_sta_id_mask(mld, sta); + if (WARN_ON(!sta_mask)) + return -EINVAL; + + /* sparse doesn't like the __align() so don't check */ +#ifndef __CHECKER__ + /* The division below will be OK if either the cache line size + * can be divided by the entry size (ALIGN will round up) or if + * the entry size can be divided by the cache line size, in which + * case the ALIGN() will do nothing. + */ + BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && + sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); +#endif + + /* Upward align the reorder buffer size to fill an entire cache + * line for each queue, to avoid sharing cache lines between + * different queues. + */ + reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); + + /* Allocate here so if allocation fails we can bail out early + * before starting the BA session in the firmware + */ + baid_data = kzalloc(sizeof(*baid_data) + + mld->trans->num_rx_queues * reorder_buf_size, + GFP_KERNEL); + if (!baid_data) + return -ENOMEM; + + /* This division is why we need the above BUILD_BUG_ON(), + * if that doesn't hold then this will not be right. + */ + baid_data->entries_per_queue = + reorder_buf_size / sizeof(baid_data->entries[0]); + + baid = iwl_mld_start_ba_in_fw(mld, sta, tid, ssn, buf_size); + if (baid < 0) { + ret = baid; + goto out_free; + } + + mld->num_rx_ba_sessions++; + mld_sta->tid_to_baid[tid] = baid; + + baid_data->baid = baid; + baid_data->mld = mld; + baid_data->tid = tid; + baid_data->buf_size = buf_size; + baid_data->sta_mask = sta_mask; + baid_data->timeout = timeout; + baid_data->last_rx_timestamp = jiffies; + baid_data->rcu_ptr = &mld->fw_id_to_ba[baid]; + + iwl_mld_init_reorder_buffer(mld, baid_data, ssn); + + timer_setup(&baid_data->session_timer, iwl_mld_rx_agg_session_expired, + 0); + if (timeout) + mod_timer(&baid_data->session_timer, + TU_TO_EXP_TIME(timeout * 2)); + + IWL_DEBUG_HT(mld, "STA mask=0x%x (tid=%d) is assigned to BAID %d\n", + baid_data->sta_mask, tid, baid); + + /* protect the BA data with RCU to cover a case where our + * internal RX sync mechanism will timeout (not that it's + * supposed to happen) and we will free the session data while + * RX is being processed in parallel + */ + WARN_ON(rcu_access_pointer(mld->fw_id_to_ba[baid])); + rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data); + + return 0; + +out_free: + kfree(baid_data); + return ret; +} + +int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta, + int tid) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + int baid = mld_sta->tid_to_baid[tid]; + struct iwl_mld_baid_data *baid_data; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + /* during firmware restart, do not send the command as the firmware no + * longer recognizes the session. instead, only clear the driver BA + * session data. + */ + if (!mld->fw_status.in_hw_restart) { + ret = iwl_mld_stop_ba_in_fw(mld, sta, tid); + if (ret) + return ret; + } + + if (!WARN_ON(mld->num_rx_ba_sessions == 0)) + mld->num_rx_ba_sessions--; + + baid_data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]); + if (WARN_ON(!baid_data)) + return -EINVAL; + + if (timer_pending(&baid_data->session_timer)) + timer_shutdown_sync(&baid_data->session_timer); + + iwl_mld_free_reorder_buffer(mld, baid_data); + + RCU_INIT_POINTER(mld->fw_id_to_ba[baid], NULL); + kfree_rcu(baid_data, rcu_head); + + IWL_DEBUG_HT(mld, "BAID %d is free\n", baid); + + return 0; +} + +int iwl_mld_update_sta_baids(struct iwl_mld *mld, + u32 old_sta_mask, + u32 new_sta_mask) +{ + struct iwl_rx_baid_cfg_cmd cmd = { + .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY), + .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask), + .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask), + }; + u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); + int baid; + + /* mac80211 will remove sessions later, but we ignore all that */ + if (mld->fw_status.in_hw_restart) + return 0; + + BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); + + for (baid = 0; baid < ARRAY_SIZE(mld->fw_id_to_ba); baid++) { + struct iwl_mld_baid_data *data; + int ret; + + data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]); + if (!data) + continue; + + if (!(data->sta_mask & old_sta_mask)) + continue; + + WARN_ONCE(data->sta_mask != old_sta_mask, + "BAID data for %d corrupted - expected 0x%x found 0x%x\n", + baid, old_sta_mask, data->sta_mask); + + cmd.modify.tid = cpu_to_le32(data->tid); + + ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd); + if (ret) + return ret; + data->sta_mask = new_sta_mask; + } + + return 0; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/agg.h b/drivers/net/wireless/intel/iwlwifi/mld/agg.h new file mode 100644 index 0000000000000..651c80d1c7cda --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/agg.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_agg_h__ +#define __iwl_agg_h__ + +#include "mld.h" +#include "fw/api/rx.h" + +/** + * struct iwl_mld_reorder_buffer - per ra/tid/queue reorder buffer + * @head_sn: reorder window head sequence number + * @num_stored: number of MPDUs stored in the buffer + * @queue: queue of this reorder buffer + * @valid: true if reordering is valid for this queue + */ +struct iwl_mld_reorder_buffer { + u16 head_sn; + u16 num_stored; + int queue; + bool valid; +} ____cacheline_aligned_in_smp; + +/** + * struct iwl_mld_reorder_buf_entry - reorder buffer entry per-queue/per-seqno + * @frames: list of skbs stored. a list is necessary because in an A-MSDU, + * all sub-frames share the same sequence number, so they are stored + * together in the same list. + */ +struct iwl_mld_reorder_buf_entry { + struct sk_buff_head frames; +} +#ifndef __CHECKER__ +/* sparse doesn't like this construct: "bad integer constant expression" */ +__aligned(roundup_pow_of_two(sizeof(struct sk_buff_head))) +#endif +; + +/** + * struct iwl_mld_baid_data - Block Ack session data + * @rcu_head: RCU head for freeing this data + * @sta_mask: station mask for the BAID + * @tid: tid of the session + * @baid: baid of the session + * @buf_size: the reorder buffer size as set by the last ADDBA request + * @entries_per_queue: number of buffers per queue, this actually gets + * aligned up to avoid cache line sharing between queues + * @timeout: the timeout value specified in the ADDBA request. + * @last_rx_timestamp: timestamp of the last received packet (in jiffies). This + * value is updated only when the configured @timeout has passed since + * the last update to minimize cache bouncing between RX queues. + * @session_timer: timer is set to expire after 2 * @timeout (since we want + * to minimize the cache bouncing by updating @last_rx_timestamp only once + * after @timeout has passed). If no packets are received within this + * period, it informs mac80211 to initiate delBA flow, terminating the + * BA session. + * @rcu_ptr: BA data RCU protected access + * @mld: mld pointer, needed for timer context + * @reorder_buf: reorder buffer, allocated per queue + * @entries: data + */ +struct iwl_mld_baid_data { + struct rcu_head rcu_head; + u32 sta_mask; + u8 tid; + u8 baid; + u16 buf_size; + u16 entries_per_queue; + u16 timeout; + struct timer_list session_timer; + unsigned long last_rx_timestamp; + struct iwl_mld_baid_data __rcu **rcu_ptr; + struct iwl_mld *mld; + struct iwl_mld_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES]; + struct iwl_mld_reorder_buf_entry entries[] ____cacheline_aligned_in_smp; +}; + +/** + * struct iwl_mld_delba_data - RX queue sync data for %IWL_MLD_RXQ_NOTIF_DEL_BA + * + * @baid: Block Ack id, used to identify the BA session to be removed + */ +struct iwl_mld_delba_data { + u32 baid; +} __packed; + +/** + * enum iwl_mld_reorder_result - Possible return values for iwl_mld_reorder() + * indicating how the caller should handle the skb based on the result. + * + * @IWL_MLD_PASS_SKB: skb should be passed to upper layer. + * @IWL_MLD_BUFFERED_SKB: skb has been buffered, don't pass it to upper layer. + * @IWL_MLD_DROP_SKB: skb should be dropped and freed by the caller. + */ +enum iwl_mld_reorder_result { + IWL_MLD_PASS_SKB, + IWL_MLD_BUFFERED_SKB, + IWL_MLD_DROP_SKB +}; + +int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta, + int tid, u16 ssn, u16 buf_size, u16 timeout); +int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta, + int tid); + +enum iwl_mld_reorder_result +iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi, + int queue, struct ieee80211_sta *sta, + struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc); + +void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld, + struct napi_struct *napi, + struct iwl_rx_packet *pkt, int queue); +void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld, + struct napi_struct *napi, + struct iwl_rx_packet *pkt, + int queue); + +void iwl_mld_del_ba(struct iwl_mld *mld, int queue, + struct iwl_mld_delba_data *data); + +int iwl_mld_update_sta_baids(struct iwl_mld *mld, + u32 old_sta_mask, + u32 new_sta_mask); + +#endif /* __iwl_agg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ap.c b/drivers/net/wireless/intel/iwlwifi/mld/ap.c new file mode 100644 index 0000000000000..571eabd0b511f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/ap.c @@ -0,0 +1,344 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024 Intel Corporation + */ +#include + +#include + +#include "ap.h" +#include "hcmd.h" +#include "tx.h" +#include "power.h" +#include "key.h" +#include "iwl-utils.h" + +#include "fw/api/sta.h" + +void iwl_mld_set_tim_idx(struct iwl_mld *mld, __le32 *tim_index, + u8 *beacon, u32 frame_size) +{ + u32 tim_idx; + struct ieee80211_mgmt *mgmt = (void *)beacon; + + /* The index is relative to frame start but we start looking at the + * variable-length part of the beacon. + */ + tim_idx = mgmt->u.beacon.variable - beacon; + + /* Parse variable-length elements of beacon to find WLAN_EID_TIM */ + while ((tim_idx < (frame_size - 2)) && + (beacon[tim_idx] != WLAN_EID_TIM)) + tim_idx += beacon[tim_idx + 1] + 2; + + /* If TIM field was found, set variables */ + if ((tim_idx < (frame_size - 1)) && beacon[tim_idx] == WLAN_EID_TIM) + *tim_index = cpu_to_le32(tim_idx); + else + IWL_WARN(mld, "Unable to find TIM Element in beacon\n"); +} + +u8 iwl_mld_get_rate_flags(struct iwl_mld *mld, + struct ieee80211_tx_info *info, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link, + enum nl80211_band band) +{ + u32 legacy = link->beacon_tx_rate.control[band].legacy; + u32 rate_idx, rate_flags = 0, fw_rate; + + /* if beacon rate was configured try using it */ + if (hweight32(legacy) == 1) { + u32 rate = ffs(legacy) - 1; + struct ieee80211_supported_band *sband = + mld->hw->wiphy->bands[band]; + + rate_idx = sband->bitrates[rate].hw_value; + } else { + rate_idx = iwl_mld_get_lowest_rate(mld, info, vif); + } + + if (rate_idx <= IWL_LAST_CCK_RATE) + rate_flags = IWL_MAC_BEACON_CCK; + + /* Legacy rates are indexed as follows: + * 0 - 3 for CCK and 0 - 7 for OFDM. + */ + fw_rate = (rate_idx >= IWL_FIRST_OFDM_RATE ? + rate_idx - IWL_FIRST_OFDM_RATE : rate_idx); + + return fw_rate | rate_flags; +} + +int iwl_mld_send_beacon_template_cmd(struct iwl_mld *mld, + struct sk_buff *beacon, + struct iwl_mac_beacon_cmd *cmd) +{ + struct iwl_host_cmd hcmd = { + .id = BEACON_TEMPLATE_CMD, + }; + + hcmd.len[0] = sizeof(*cmd); + hcmd.data[0] = cmd; + + hcmd.len[1] = beacon->len; + hcmd.data[1] = beacon->data; + hcmd.dataflags[1] = IWL_HCMD_DFL_DUP; + + return iwl_mld_send_cmd(mld, &hcmd); +} + +static int iwl_mld_fill_beacon_template_cmd(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct sk_buff *beacon, + struct iwl_mac_beacon_cmd *cmd, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon); + struct ieee80211_chanctx_conf *ctx; + bool enable_fils; + u16 flags = 0; + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(!mld_link)) + return -EINVAL; + + cmd->link_id = cpu_to_le32(mld_link->fw_id); + + ctx = wiphy_dereference(mld->wiphy, link->chanctx_conf); + if (WARN_ON(!ctx || !ctx->def.chan)) + return -EINVAL; + + enable_fils = cfg80211_channel_is_psc(ctx->def.chan) || + (ctx->def.chan->band == NL80211_BAND_6GHZ && + ctx->def.width >= NL80211_CHAN_WIDTH_80); + + if (enable_fils) { + flags |= IWL_MAC_BEACON_FILS; + cmd->short_ssid = cpu_to_le32(~crc32_le(~0, vif->cfg.ssid, + vif->cfg.ssid_len)); + } + + cmd->byte_cnt = cpu_to_le16((u16)beacon->len); + + flags |= iwl_mld_get_rate_flags(mld, info, vif, link, + ctx->def.chan->band); + + cmd->flags = cpu_to_le16(flags); + + if (vif->type == NL80211_IFTYPE_AP) { + iwl_mld_set_tim_idx(mld, &cmd->tim_idx, + beacon->data, beacon->len); + + cmd->btwt_offset = + cpu_to_le32(iwl_find_ie_offset(beacon->data, + WLAN_EID_S1G_TWT, + beacon->len)); + } + + cmd->csa_offset = + cpu_to_le32(iwl_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon->len)); + cmd->ecsa_offset = + cpu_to_le32(iwl_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon->len)); + + return 0; +} + +/* The beacon template for the AP/GO/IBSS has changed and needs update */ +int iwl_mld_update_beacon_template(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mac_beacon_cmd cmd = {}; + struct sk_buff *beacon; + int ret; +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); +#endif + + WARN_ON(vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC); + + if (IWL_MLD_NON_TRANSMITTING_AP) + return 0; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mld_vif->beacon_inject_active) { + IWL_DEBUG_INFO(mld, + "Can't update template, beacon injection's active\n"); + return -EBUSY; + } + +#endif + beacon = ieee80211_beacon_get_template(mld->hw, vif, NULL, + link_conf->link_id); + if (!beacon) + return -ENOMEM; + + ret = iwl_mld_fill_beacon_template_cmd(mld, vif, beacon, &cmd, + link_conf); + + if (!ret) + ret = iwl_mld_send_beacon_template_cmd(mld, beacon, &cmd); + + dev_kfree_skb(beacon); + + return ret; +} + +void iwl_mld_free_ap_early_key(struct iwl_mld *mld, + struct ieee80211_key_conf *key, + struct iwl_mld_vif *mld_vif) +{ + struct iwl_mld_link *link; + + if (WARN_ON(key->link_id < 0)) + return; + + link = iwl_mld_link_dereference_check(mld_vif, key->link_id); + if (WARN_ON(!link)) + return; + + for (int i = 0; i < ARRAY_SIZE(link->ap_early_keys); i++) { + if (link->ap_early_keys[i] != key) + continue; + /* Those weren't sent to FW, so should be marked as INVALID */ + if (WARN_ON(key->hw_key_idx != STA_KEY_IDX_INVALID)) + key->hw_key_idx = STA_KEY_IDX_INVALID; + link->ap_early_keys[i] = NULL; + } +} + +int iwl_mld_store_ap_early_key(struct iwl_mld *mld, + struct ieee80211_key_conf *key, + struct iwl_mld_vif *mld_vif) +{ + struct iwl_mld_link *link; + + if (WARN_ON(key->link_id < 0)) + return -EINVAL; + + link = iwl_mld_link_dereference_check(mld_vif, key->link_id); + if (WARN_ON(!link)) + return -EINVAL; + + for (int i = 0; i < ARRAY_SIZE(link->ap_early_keys); i++) { + if (!link->ap_early_keys[i]) { + link->ap_early_keys[i] = key; + return 0; + } + } + + return -ENOSPC; +} + +static int iwl_mld_send_ap_early_keys(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + int ret = 0; + + if (WARN_ON(!link)) + return -EINVAL; + + for (int i = 0; i < ARRAY_SIZE(mld_link->ap_early_keys); i++) { + struct ieee80211_key_conf *key = mld_link->ap_early_keys[i]; + + if (!key) + continue; + + mld_link->ap_early_keys[i] = NULL; + + ret = iwl_mld_add_key(mld, vif, NULL, key); + if (ret) + break; + } + return ret; +} + +int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int ret; + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link); + + ret = iwl_mld_update_beacon_template(mld, vif, link); + if (ret) + return ret; + + /* the link should be already activated when assigning chan context, + * and LINK_CONTEXT_MODIFY_EHT_PARAMS is deprecated + */ + ret = iwl_mld_change_link_in_fw(mld, link, + LINK_CONTEXT_MODIFY_ALL & + ~(LINK_CONTEXT_MODIFY_ACTIVE | + LINK_CONTEXT_MODIFY_EHT_PARAMS)); + if (ret) + return ret; + + ret = iwl_mld_add_mcast_sta(mld, vif, link); + if (ret) + return ret; + + ret = iwl_mld_add_bcast_sta(mld, vif, link); + if (ret) + goto rm_mcast; + + /* Those keys were configured by the upper layers before starting the + * AP. Now that it is started and the bcast and mcast sta were added to + * the FW, we can add the keys too. + */ + ret = iwl_mld_send_ap_early_keys(mld, vif, link); + if (ret) + goto rm_bcast; + + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP) + iwl_mld_vif_update_low_latency(mld, vif, true, + LOW_LATENCY_VIF_TYPE); + + mld_vif->ap_ibss_active = true; + + if (vif->p2p && mld->p2p_device_vif) + return iwl_mld_mac_fw_action(mld, mld->p2p_device_vif, + FW_CTXT_ACTION_MODIFY); + + return 0; +rm_bcast: + iwl_mld_remove_bcast_sta(mld, vif, link); +rm_mcast: + iwl_mld_remove_mcast_sta(mld, vif, link); + return ret; +} + +void iwl_mld_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + mld_vif->ap_ibss_active = false; + + if (vif->p2p && mld->p2p_device_vif) + iwl_mld_mac_fw_action(mld, mld->p2p_device_vif, + FW_CTXT_ACTION_MODIFY); + + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP) + iwl_mld_vif_update_low_latency(mld, vif, false, + LOW_LATENCY_VIF_TYPE); + + iwl_mld_remove_bcast_sta(mld, vif, link); + + iwl_mld_remove_mcast_sta(mld, vif, link); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ap.h b/drivers/net/wireless/intel/iwlwifi/mld/ap.h new file mode 100644 index 0000000000000..4a6f52b9552d7 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/ap.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_ap_h__ +#define __iwl_ap_h__ + +#include "mld.h" +#include "iface.h" + +#include "fw/api/tx.h" + +int iwl_mld_update_beacon_template(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); + +int iwl_mld_start_ap_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); + +void iwl_mld_stop_ap_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); + +int iwl_mld_store_ap_early_key(struct iwl_mld *mld, + struct ieee80211_key_conf *key, + struct iwl_mld_vif *mld_vif); + +void iwl_mld_free_ap_early_key(struct iwl_mld *mld, + struct ieee80211_key_conf *key, + struct iwl_mld_vif *mld_vif); + +u8 iwl_mld_get_rate_flags(struct iwl_mld *mld, + struct ieee80211_tx_info *info, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link, + enum nl80211_band band); + +void iwl_mld_set_tim_idx(struct iwl_mld *mld, __le32 *tim_index, + u8 *beacon, u32 frame_size); + +int iwl_mld_send_beacon_template_cmd(struct iwl_mld *mld, + struct sk_buff *beacon, + struct iwl_mac_beacon_cmd *cmd); + +#endif /* __iwl_ap_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/coex.c b/drivers/net/wireless/intel/iwlwifi/mld/coex.c new file mode 100644 index 0000000000000..5f262bd43f21d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/coex.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include "fw/api/coex.h" + +#include "coex.h" +#include "mld.h" +#include "hcmd.h" +#include "mlo.h" + +int iwl_mld_send_bt_init_conf(struct iwl_mld *mld) +{ + struct iwl_bt_coex_cmd cmd = { + .mode = cpu_to_le32(BT_COEX_NW), + .enabled_modules = cpu_to_le32(BT_COEX_MPLUT_ENABLED | + BT_COEX_HIGH_BAND_RET), + }; + + return iwl_mld_send_cmd_pdu(mld, BT_CONFIG, &cmd); +} + +void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + const struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; + const struct iwl_bt_coex_profile_notif zero_notif = {}; + /* zeroed structure means that BT is OFF */ + bool bt_is_active = memcmp(notif, &zero_notif, sizeof(*notif)); + + if (bt_is_active == mld->bt_is_active) + return; + + IWL_DEBUG_INFO(mld, "BT was turned %s\n", bt_is_active ? "ON" : "OFF"); + + mld->bt_is_active = bt_is_active; + + iwl_mld_emlsr_check_bt(mld); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/coex.h b/drivers/net/wireless/intel/iwlwifi/mld/coex.h new file mode 100644 index 0000000000000..a77c5dc9613c0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/coex.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_coex_h__ +#define __iwl_mld_coex_h__ + +#include "mld.h" + +int iwl_mld_send_bt_init_conf(struct iwl_mld *mld); + +void iwl_mld_handle_bt_coex_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +#endif /* __iwl_mld_coex_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/constants.h b/drivers/net/wireless/intel/iwlwifi/mld/constants.h new file mode 100644 index 0000000000000..2a59b29b75cb2 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/constants.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_constants_h__ +#define __iwl_mld_constants_h__ + +#define IWL_MLD_MISSED_BEACONS_SINCE_RX_THOLD 4 +#define IWL_MLD_MISSED_BEACONS_THRESHOLD 8 +#define IWL_MLD_MISSED_BEACONS_THRESHOLD_LONG 19 +#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS 5 +#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH 15 +#define IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED 11 +#define IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH -72 + +#define IWL_MLD_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC) +#define IWL_MLD_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) +#define IWL_MLD_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) +#define IWL_MLD_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) +#define IWL_MLD_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ +#define IWL_MLD_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ + +#define IWL_MLD_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) +#define IWL_MLD_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) + +#define IWL_MLD_PS_SNOOZE_INTERVAL 25 +#define IWL_MLD_PS_SNOOZE_INTERVAL 25 +#define IWL_MLD_PS_SNOOZE_WINDOW 50 + +#define IWL_MLD_PS_SNOOZE_HEAVY_TX_THLD_PACKETS 30 +#define IWL_MLD_PS_SNOOZE_HEAVY_RX_THLD_PACKETS 20 + +#define IWL_MLD_PS_HEAVY_TX_THLD_PERCENT 50 +#define IWL_MLD_PS_HEAVY_RX_THLD_PERCENT 50 +#define IWL_MLD_PS_HEAVY_TX_THLD_PACKETS 20 +#define IWL_MLD_PS_HEAVY_RX_THLD_PACKETS 8 + +#define IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC 30 +#define IWL_MLD_SCAN_EXPIRE_TIME_SEC 20 + +#define IWL_MLD_TPT_COUNT_WINDOW (5 * HZ) + +/* OMI reduced BW thresholds (channel load percentage) */ +#define IWL_MLD_OMI_ENTER_CHAN_LOAD 10 +#define IWL_MLD_OMI_EXIT_CHAN_LOAD_160 20 +#define IWL_MLD_OMI_EXIT_CHAN_LOAD_320 30 +/* time (in milliseconds) to let AP "settle" the OMI */ +#define IWL_MLD_OMI_AP_SETTLE_DELAY 27 +/* time (in milliseconds) to not enter OMI reduced BW after leaving */ +#define IWL_MLD_OMI_EXIT_PROTECTION 5000 + +#define IWL_MLD_DIS_RANDOM_FW_ID false +#define IWL_MLD_D3_DEBUG false +#define IWL_MLD_NON_TRANSMITTING_AP false +#define IWL_MLD_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */ +#define IWL_MLD_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */ +#define IWL_MLD_CONN_LISTEN_INTERVAL 10 +#define IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE 0 +#define IWL_MLD_AUTO_EML_ENABLE true + +#define IWL_MLD_HIGH_RSSI_THRESH_20MHZ -67 +#define IWL_MLD_LOW_RSSI_THRESH_20MHZ -72 +#define IWL_MLD_HIGH_RSSI_THRESH_40MHZ -64 +#define IWL_MLD_LOW_RSSI_THRESH_40MHZ -72 +#define IWL_MLD_HIGH_RSSI_THRESH_80MHZ -61 +#define IWL_MLD_LOW_RSSI_THRESH_80MHZ -72 +#define IWL_MLD_HIGH_RSSI_THRESH_160MHZ -58 +#define IWL_MLD_LOW_RSSI_THRESH_160MHZ -72 + +#define IWL_MLD_ENTER_EMLSR_TPT_THRESH 400 +#define IWL_MLD_EXIT_EMLSR_CHAN_LOAD 2 /* in percentage */ + +#define IWL_MLD_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE +#define IWL_MLD_FTM_INITIATOR_DYNACK true +#define IWL_MLD_FTM_LMR_FEEDBACK_TERMINATE false +#define IWL_MLD_FTM_TEST_INCORRECT_SAC false +#define IWL_MLD_FTM_R2I_MAX_REP 7 +#define IWL_MLD_FTM_I2R_MAX_REP 7 +#define IWL_MLD_FTM_R2I_MAX_STS 1 +#define IWL_MLD_FTM_I2R_MAX_STS 1 +#define IWL_MLD_FTM_R2I_MAX_TOTAL_LTF 3 +#define IWL_MLD_FTM_I2R_MAX_TOTAL_LTF 3 +#define IWL_MLD_FTM_RESP_NDP_SUPPORT true +#define IWL_MLD_FTM_RESP_LMR_FEEDBACK_SUPPORT true +#define IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR 7 +#define IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR 1000 + +#endif /* __iwl_mld_constants_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.c b/drivers/net/wireless/intel/iwlwifi/mld/d3.c new file mode 100644 index 0000000000000..5a7207accd867 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.c @@ -0,0 +1,1998 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#include "mld.h" + +#include "d3.h" +#include "power.h" +#include "hcmd.h" +#include "iface.h" +#include "mcc.h" +#include "sta.h" +#include "mlo.h" + +#include "fw/api/d3.h" +#include "fw/api/offload.h" +#include "fw/api/sta.h" +#include "fw/dbg.h" + +#include +#include +#include + +/** + * enum iwl_mld_d3_notif - d3 notifications + * @IWL_D3_NOTIF_WOWLAN_INFO: WOWLAN_INFO_NOTIF is expected/was received + * @IWL_D3_NOTIF_WOWLAN_WAKE_PKT: WOWLAN_WAKE_PKT_NOTIF is expected/was received + * @IWL_D3_NOTIF_PROT_OFFLOAD: PROT_OFFLOAD_NOTIF is expected/was received + * @IWL_D3_ND_MATCH_INFO: OFFLOAD_MATCH_INFO_NOTIF is expected/was received + * @IWL_D3_NOTIF_D3_END_NOTIF: D3_END_NOTIF is expected/was received + */ +enum iwl_mld_d3_notif { + IWL_D3_NOTIF_WOWLAN_INFO = BIT(0), + IWL_D3_NOTIF_WOWLAN_WAKE_PKT = BIT(1), + IWL_D3_NOTIF_PROT_OFFLOAD = BIT(2), + IWL_D3_ND_MATCH_INFO = BIT(3), + IWL_D3_NOTIF_D3_END_NOTIF = BIT(4) +}; + +struct iwl_mld_resume_key_iter_data { + struct iwl_mld *mld; + struct iwl_mld_wowlan_status *wowlan_status; + u32 num_keys, gtk_cipher, igtk_cipher, bigtk_cipher; + bool unhandled_cipher; +}; + +struct iwl_mld_suspend_key_iter_data { + struct iwl_wowlan_rsc_tsc_params_cmd *rsc; + bool have_rsc; + int gtks; + int found_gtk_idx[4]; + __le32 gtk_cipher; + __le32 igtk_cipher; + __le32 bigtk_cipher; +}; + +struct iwl_mld_mcast_key_data { + u8 key[WOWLAN_KEY_MAX_SIZE]; + u8 len; + u8 flags; + u8 id; + union { + struct { + struct ieee80211_key_seq aes_seq[IWL_MAX_TID_COUNT]; + struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT]; + } gtk; + struct { + struct ieee80211_key_seq cmac_gmac_seq; + } igtk_bigtk; + }; + +}; + +/** + * struct iwl_mld_wowlan_status - contains wowlan status data from + * all wowlan notifications + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched patterns on packets + * @last_qos_seq: QoS sequence counter of offloaded tid + * @num_of_gtk_rekeys: number of GTK rekeys during D3 + * @tid_offloaded_tx: tid used by the firmware to transmit data packets + * while in wowlan + * @wake_packet: wakeup packet received + * @wake_packet_length: wake packet length + * @wake_packet_bufsize: wake packet bufsize + * @gtk: data of the last two used gtk's by the FW upon resume + * @igtk: data of the last used igtk by the FW upon resume + * @bigtk: data of the last two used gtk's by the FW upon resume + * @ptk: last seq numbers per tid passed by the FW, + * holds both in tkip and aes formats + */ +struct iwl_mld_wowlan_status { + u32 wakeup_reasons; + u64 replay_ctr; + u16 pattern_number; + u16 last_qos_seq; + u32 num_of_gtk_rekeys; + u8 tid_offloaded_tx; + u8 *wake_packet; + u32 wake_packet_length; + u32 wake_packet_bufsize; + struct iwl_mld_mcast_key_data gtk[WOWLAN_GTK_KEYS_NUM]; + struct iwl_mld_mcast_key_data igtk; + struct iwl_mld_mcast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM]; + struct { + struct ieee80211_key_seq aes_seq[IWL_MAX_TID_COUNT]; + struct ieee80211_key_seq tkip_seq[IWL_MAX_TID_COUNT]; + + } ptk; +}; + +#define NETDETECT_QUERY_BUF_LEN \ + (sizeof(struct iwl_scan_offload_profile_match) * \ + IWL_SCAN_MAX_PROFILES_V2) + +/** + * struct iwl_mld_netdetect_res - contains netdetect results from + * match_info_notif + * @matched_profiles: bitmap of matched profiles, referencing the + * matches passed in the scan offload request + * @matches: array of match information, one for each match + */ +struct iwl_mld_netdetect_res { + u32 matched_profiles; + u8 matches[NETDETECT_QUERY_BUF_LEN]; +}; + +/** + * struct iwl_mld_resume_data - d3 resume flow data + * @notifs_expected: bitmap of expected notifications from fw, + * see &enum iwl_mld_d3_notif + * @notifs_received: bitmap of received notifications from fw, + * see &enum iwl_mld_d3_notif + * @d3_end_flags: bitmap of flags from d3_end_notif + * @notif_handling_err: error handling one of the resume notifications + * @wowlan_status: wowlan status data from all wowlan notifications + * @netdetect_res: contains netdetect results from match_info_notif + */ +struct iwl_mld_resume_data { + u32 notifs_expected; + u32 notifs_received; + u32 d3_end_flags; + bool notif_handling_err; + struct iwl_mld_wowlan_status *wowlan_status; + struct iwl_mld_netdetect_res *netdetect_res; +}; + +#define IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT \ + (IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET | \ + IWL_WOWLAN_WAKEUP_BY_PATTERN | \ + IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN |\ + IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD |\ + IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN |\ + IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD) + +#define IWL_WOWLAN_OFFLOAD_TID 0 + +void iwl_mld_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data; + + lockdep_assert_wiphy(mld->wiphy); + + wowlan_data->rekey_data.kek_len = data->kek_len; + wowlan_data->rekey_data.kck_len = data->kck_len; + memcpy(wowlan_data->rekey_data.kek, data->kek, data->kek_len); + memcpy(wowlan_data->rekey_data.kck, data->kck, data->kck_len); + wowlan_data->rekey_data.akm = data->akm & 0xFF; + wowlan_data->rekey_data.replay_ctr = + cpu_to_le64(be64_to_cpup((const __be64 *)data->replay_ctr)); + wowlan_data->rekey_data.valid = true; +} + +#if IS_ENABLED(CONFIG_IPV6) +void iwl_mld_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data; + struct inet6_ifaddr *ifa; + int idx = 0; + + memset(wowlan_data->tentative_addrs, 0, + sizeof(wowlan_data->tentative_addrs)); + + read_lock_bh(&idev->lock); + list_for_each_entry(ifa, &idev->addr_list, if_list) { + wowlan_data->target_ipv6_addrs[idx] = ifa->addr; + if (ifa->flags & IFA_F_TENTATIVE) + __set_bit(idx, wowlan_data->tentative_addrs); + idx++; + if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX) + break; + } + read_unlock_bh(&idev->lock); + + wowlan_data->num_target_ipv6_addrs = idx; +} +#endif + +enum rt_status { + FW_ALIVE, + FW_NEEDS_RESET, + FW_ERROR, +}; + +static enum rt_status iwl_mld_check_err_tables(struct iwl_mld *mld, + struct ieee80211_vif *vif) +{ + u32 err_id; + + /* check for lmac1 error */ + if (iwl_fwrt_read_err_table(mld->trans, + mld->trans->dbg.lmac_error_event_table[0], + &err_id)) { + if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) { + struct cfg80211_wowlan_wakeup wakeup = { + .rfkill_release = true, + }; + ieee80211_report_wowlan_wakeup(vif, &wakeup, + GFP_KERNEL); + + return FW_NEEDS_RESET; + } + return FW_ERROR; + } + + /* check if we have lmac2 set and check for error */ + if (iwl_fwrt_read_err_table(mld->trans, + mld->trans->dbg.lmac_error_event_table[1], + NULL)) + return FW_ERROR; + + /* check for umac error */ + if (iwl_fwrt_read_err_table(mld->trans, + mld->trans->dbg.umac_error_event_table, + NULL)) + return FW_ERROR; + + return FW_ALIVE; +} + +static bool iwl_mld_fw_needs_restart(struct iwl_mld *mld, + struct ieee80211_vif *vif) +{ + enum rt_status rt_status = iwl_mld_check_err_tables(mld, vif); + + if (rt_status == FW_ALIVE) + return false; + + if (rt_status == FW_ERROR) { + IWL_ERR(mld, "FW Error occurred during suspend\n"); + iwl_fwrt_dump_error_logs(&mld->fwrt); + iwl_dbg_tlv_time_point(&mld->fwrt, + IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); + } + + return true; +} + +static int +iwl_mld_netdetect_config(struct iwl_mld *mld, + struct ieee80211_vif *vif, + const struct cfg80211_wowlan *wowlan) +{ + int ret; + struct cfg80211_sched_scan_request *netdetect_cfg = + wowlan->nd_config; + struct ieee80211_scan_ies ies = {}; + + ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_SCHED, true); + if (ret) + return ret; + + ret = iwl_mld_sched_scan_start(mld, vif, netdetect_cfg, &ies, + IWL_MLD_SCAN_NETDETECT); + return ret; +} + +static void +iwl_mld_le64_to_tkip_seq(__le64 le_pn, struct ieee80211_key_seq *seq) +{ + u64 pn = le64_to_cpu(le_pn); + + seq->tkip.iv16 = (u16)pn; + seq->tkip.iv32 = (u32)(pn >> 16); +} + +static void +iwl_mld_le64_to_aes_seq(__le64 le_pn, struct ieee80211_key_seq *seq) +{ + u64 pn = le64_to_cpu(le_pn); + + seq->ccmp.pn[0] = pn >> 40; + seq->ccmp.pn[1] = pn >> 32; + seq->ccmp.pn[2] = pn >> 24; + seq->ccmp.pn[3] = pn >> 16; + seq->ccmp.pn[4] = pn >> 8; + seq->ccmp.pn[5] = pn; +} + +static void +iwl_mld_convert_gtk_resume_seq(struct iwl_mld_mcast_key_data *gtk_data, + const struct iwl_wowlan_all_rsc_tsc_v5 *sc, + int rsc_idx) +{ + struct ieee80211_key_seq *aes_seq = gtk_data->gtk.aes_seq; + struct ieee80211_key_seq *tkip_seq = gtk_data->gtk.tkip_seq; + + if (rsc_idx >= ARRAY_SIZE(sc->mcast_rsc)) + return; + + /* We store both the TKIP and AES representations coming from the + * FW because we decode the data from there before we iterate + * the keys and know which type is used. + */ + for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + iwl_mld_le64_to_tkip_seq(sc->mcast_rsc[rsc_idx][tid], + &tkip_seq[tid]); + iwl_mld_le64_to_aes_seq(sc->mcast_rsc[rsc_idx][tid], + &aes_seq[tid]); + } +} + +static void +iwl_mld_convert_gtk_resume_data(struct iwl_mld *mld, + struct iwl_mld_wowlan_status *wowlan_status, + const struct iwl_wowlan_gtk_status_v3 *gtk_data, + const struct iwl_wowlan_all_rsc_tsc_v5 *sc) +{ + int status_idx = 0; + + BUILD_BUG_ON(sizeof(wowlan_status->gtk[0].key) < + sizeof(gtk_data[0].key)); + BUILD_BUG_ON(ARRAY_SIZE(wowlan_status->gtk) < WOWLAN_GTK_KEYS_NUM); + + for (int notif_idx = 0; notif_idx < ARRAY_SIZE(wowlan_status->gtk); + notif_idx++) { + int rsc_idx; + + if (!(gtk_data[notif_idx].key_len)) + continue; + + wowlan_status->gtk[status_idx].len = + gtk_data[notif_idx].key_len; + wowlan_status->gtk[status_idx].flags = + gtk_data[notif_idx].key_flags; + wowlan_status->gtk[status_idx].id = + wowlan_status->gtk[status_idx].flags & + IWL_WOWLAN_GTK_IDX_MASK; + memcpy(wowlan_status->gtk[status_idx].key, + gtk_data[notif_idx].key, + sizeof(gtk_data[notif_idx].key)); + + /* The rsc for both gtk keys are stored in gtk[0]->sc->mcast_rsc + * The gtk ids can be any two numbers between 0 and 3, + * the id_map maps between the key id and the index in sc->mcast + */ + rsc_idx = + sc->mcast_key_id_map[wowlan_status->gtk[status_idx].id]; + iwl_mld_convert_gtk_resume_seq(&wowlan_status->gtk[status_idx], + sc, rsc_idx); + + /* if it's as long as the TKIP encryption key, copy MIC key */ + if (wowlan_status->gtk[status_idx].len == + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) + memcpy(wowlan_status->gtk[status_idx].key + + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + gtk_data[notif_idx].tkip_mic_key, + sizeof(gtk_data[notif_idx].tkip_mic_key)); + status_idx++; + } +} + +static void +iwl_mld_convert_ptk_resume_seq(struct iwl_mld *mld, + struct iwl_mld_wowlan_status *wowlan_status, + const struct iwl_wowlan_all_rsc_tsc_v5 *sc) +{ + struct ieee80211_key_seq *aes_seq = wowlan_status->ptk.aes_seq; + struct ieee80211_key_seq *tkip_seq = wowlan_status->ptk.tkip_seq; + + BUILD_BUG_ON(ARRAY_SIZE(sc->ucast_rsc) != IWL_MAX_TID_COUNT); + + for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + iwl_mld_le64_to_aes_seq(sc->ucast_rsc[tid], &aes_seq[tid]); + iwl_mld_le64_to_tkip_seq(sc->ucast_rsc[tid], &tkip_seq[tid]); + } +} + +static void +iwl_mld_convert_mcast_ipn(struct iwl_mld_mcast_key_data *key_status, + const struct iwl_wowlan_igtk_status *key) +{ + struct ieee80211_key_seq *seq = + &key_status->igtk_bigtk.cmac_gmac_seq; + u8 ipn_len = ARRAY_SIZE(key->ipn); + + BUILD_BUG_ON(ipn_len != ARRAY_SIZE(seq->aes_gmac.pn)); + BUILD_BUG_ON(ipn_len != ARRAY_SIZE(seq->aes_cmac.pn)); + BUILD_BUG_ON(offsetof(struct ieee80211_key_seq, aes_gmac) != + offsetof(struct ieee80211_key_seq, aes_cmac)); + + /* mac80211 expects big endian for memcmp() to work, convert. + * We don't have the key cipher yet so copy to both to cmac and gmac + */ + for (int i = 0; i < ipn_len; i++) { + seq->aes_gmac.pn[i] = key->ipn[ipn_len - i - 1]; + seq->aes_cmac.pn[i] = key->ipn[ipn_len - i - 1]; + } +} + +static void +iwl_mld_convert_igtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status, + const struct iwl_wowlan_igtk_status *igtk) +{ + BUILD_BUG_ON(sizeof(wowlan_status->igtk.key) < sizeof(igtk->key)); + + if (!igtk->key_len) + return; + + wowlan_status->igtk.len = igtk->key_len; + wowlan_status->igtk.flags = igtk->key_flags; + wowlan_status->igtk.id = + u32_get_bits(igtk->key_flags, + IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) + + WOWLAN_IGTK_MIN_INDEX; + + memcpy(wowlan_status->igtk.key, igtk->key, sizeof(igtk->key)); + iwl_mld_convert_mcast_ipn(&wowlan_status->igtk, igtk); +} + +static void +iwl_mld_convert_bigtk_resume_data(struct iwl_mld_wowlan_status *wowlan_status, + const struct iwl_wowlan_igtk_status *bigtk) +{ + int status_idx = 0; + + BUILD_BUG_ON(ARRAY_SIZE(wowlan_status->bigtk) < WOWLAN_BIGTK_KEYS_NUM); + + for (int notif_idx = 0; notif_idx < WOWLAN_BIGTK_KEYS_NUM; + notif_idx++) { + if (!bigtk[notif_idx].key_len) + continue; + + wowlan_status->bigtk[status_idx].len = bigtk[notif_idx].key_len; + wowlan_status->bigtk[status_idx].flags = + bigtk[notif_idx].key_flags; + wowlan_status->bigtk[status_idx].id = + u32_get_bits(bigtk[notif_idx].key_flags, + IWL_WOWLAN_IGTK_BIGTK_IDX_MASK) + + WOWLAN_BIGTK_MIN_INDEX; + + BUILD_BUG_ON(sizeof(wowlan_status->bigtk[status_idx].key) < + sizeof(bigtk[notif_idx].key)); + memcpy(wowlan_status->bigtk[status_idx].key, + bigtk[notif_idx].key, sizeof(bigtk[notif_idx].key)); + iwl_mld_convert_mcast_ipn(&wowlan_status->bigtk[status_idx], + &bigtk[notif_idx]); + status_idx++; + } +} + +static bool +iwl_mld_handle_wowlan_info_notif(struct iwl_mld *mld, + struct iwl_mld_wowlan_status *wowlan_status, + struct iwl_rx_packet *pkt) +{ + const struct iwl_wowlan_info_notif *notif = (void *)pkt->data; + u32 expected_len, len = iwl_rx_packet_payload_len(pkt); + + expected_len = sizeof(*notif); + + if (IWL_FW_CHECK(mld, len < expected_len, + "Invalid wowlan_info_notif (expected=%ud got=%ud)\n", + expected_len, len)) + return true; + + if (IWL_FW_CHECK(mld, notif->tid_offloaded_tx != IWL_WOWLAN_OFFLOAD_TID, + "Invalid tid_offloaded_tx %d\n", + wowlan_status->tid_offloaded_tx)) + return true; + + iwl_mld_convert_gtk_resume_data(mld, wowlan_status, notif->gtk, + ¬if->gtk[0].sc); + iwl_mld_convert_ptk_resume_seq(mld, wowlan_status, ¬if->gtk[0].sc); + /* only one igtk is passed by FW */ + iwl_mld_convert_igtk_resume_data(wowlan_status, ¬if->igtk[0]); + iwl_mld_convert_bigtk_resume_data(wowlan_status, notif->bigtk); + + wowlan_status->replay_ctr = le64_to_cpu(notif->replay_ctr); + wowlan_status->pattern_number = le16_to_cpu(notif->pattern_number); + + wowlan_status->tid_offloaded_tx = notif->tid_offloaded_tx; + wowlan_status->last_qos_seq = le16_to_cpu(notif->qos_seq_ctr); + wowlan_status->num_of_gtk_rekeys = + le32_to_cpu(notif->num_of_gtk_rekeys); + wowlan_status->wakeup_reasons = le32_to_cpu(notif->wakeup_reasons); + return false; + /* TODO: mlo_links (task=MLO)*/ +} + +static bool +iwl_mld_handle_wake_pkt_notif(struct iwl_mld *mld, + struct iwl_mld_wowlan_status *wowlan_status, + struct iwl_rx_packet *pkt) +{ + const struct iwl_wowlan_wake_pkt_notif *notif = (void *)pkt->data; + u32 actual_size, len = iwl_rx_packet_payload_len(pkt); + u32 expected_size = le32_to_cpu(notif->wake_packet_length); + + if (IWL_FW_CHECK(mld, len < sizeof(*notif), + "Invalid WoWLAN wake packet notification (expected size=%zu got=%u)\n", + sizeof(*notif), len)) + return true; + + if (IWL_FW_CHECK(mld, !(wowlan_status->wakeup_reasons & + IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT), + "Got wake packet but wakeup reason is %x\n", + wowlan_status->wakeup_reasons)) + return true; + + actual_size = len - offsetof(struct iwl_wowlan_wake_pkt_notif, + wake_packet); + + /* actual_size got the padding from the notification, remove it. */ + if (expected_size < actual_size) + actual_size = expected_size; + wowlan_status->wake_packet = kmemdup(notif->wake_packet, actual_size, + GFP_ATOMIC); + if (!wowlan_status->wake_packet) + return true; + + wowlan_status->wake_packet_length = expected_size; + wowlan_status->wake_packet_bufsize = actual_size; + + return false; +} + +static void +iwl_mld_set_wake_packet(struct iwl_mld *mld, + struct ieee80211_vif *vif, + const struct iwl_mld_wowlan_status *wowlan_status, + struct cfg80211_wowlan_wakeup *wakeup, + struct sk_buff **_pkt) +{ + int pkt_bufsize = wowlan_status->wake_packet_bufsize; + int expected_pktlen = wowlan_status->wake_packet_length; + const u8 *pktdata = wowlan_status->wake_packet; + const struct ieee80211_hdr *hdr = (const void *)pktdata; + int truncated = expected_pktlen - pkt_bufsize; + + if (ieee80211_is_data(hdr->frame_control)) { + int hdrlen = ieee80211_hdrlen(hdr->frame_control); + int ivlen = 0, icvlen = 4; /* also FCS */ + + struct sk_buff *pkt = alloc_skb(pkt_bufsize, GFP_KERNEL); + *_pkt = pkt; + if (!pkt) + return; + + skb_put_data(pkt, pktdata, hdrlen); + pktdata += hdrlen; + pkt_bufsize -= hdrlen; + + /* if truncated, FCS/ICV is (partially) gone */ + if (truncated >= icvlen) { + truncated -= icvlen; + icvlen = 0; + } else { + icvlen -= truncated; + truncated = 0; + } + + pkt_bufsize -= ivlen + icvlen; + pktdata += ivlen; + + skb_put_data(pkt, pktdata, pkt_bufsize); + + if (ieee80211_data_to_8023(pkt, vif->addr, vif->type)) + return; + wakeup->packet = pkt->data; + wakeup->packet_present_len = pkt->len; + wakeup->packet_len = pkt->len - truncated; + wakeup->packet_80211 = false; + } else { + int fcslen = 4; + + if (truncated >= 4) { + truncated -= 4; + fcslen = 0; + } else { + fcslen -= truncated; + truncated = 0; + } + pkt_bufsize -= fcslen; + wakeup->packet = wowlan_status->wake_packet; + wakeup->packet_present_len = pkt_bufsize; + wakeup->packet_len = expected_pktlen - truncated; + wakeup->packet_80211 = true; + } +} + +static void +iwl_mld_report_wowlan_wakeup(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_wowlan_status *wowlan_status) +{ + struct sk_buff *pkt = NULL; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; + u32 reasons = wowlan_status->wakeup_reasons; + + if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { + ieee80211_report_wowlan_wakeup(vif, NULL, GFP_KERNEL); + return; + } + + pm_wakeup_event(mld->dev, 0); + + if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET) + wakeup.magic_pkt = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN) + wakeup.pattern_idx = + wowlan_status->pattern_number; + + if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH | + IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)) + wakeup.disconnect = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE) + wakeup.gtk_rekey_failure = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) + wakeup.rfkill_release = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST) + wakeup.eap_identity_req = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) + wakeup.four_way_handshake = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS) + wakeup.tcp_connlost = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE) + wakeup.tcp_nomoretokens = true; + + if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET) + wakeup.tcp_match = true; + + if (reasons & IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC) + wakeup.unprot_deauth_disassoc = true; + + if (wowlan_status->wake_packet) + iwl_mld_set_wake_packet(mld, vif, wowlan_status, &wakeup, &pkt); + + ieee80211_report_wowlan_wakeup(vif, &wakeup, GFP_KERNEL); + kfree_skb(pkt); +} + +static void +iwl_mld_set_key_rx_seq_tids(struct ieee80211_key_conf *key, + struct ieee80211_key_seq *seq) +{ + int tid; + + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) + ieee80211_set_key_rx_seq(key, tid, &seq[tid]); +} + +static void +iwl_mld_set_key_rx_seq(struct ieee80211_key_conf *key, + struct iwl_mld_mcast_key_data *key_data) +{ + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + iwl_mld_set_key_rx_seq_tids(key, + key_data->gtk.aes_seq); + break; + case WLAN_CIPHER_SUITE_TKIP: + iwl_mld_set_key_rx_seq_tids(key, + key_data->gtk.tkip_seq); + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + /* igtk/bigtk ciphers*/ + ieee80211_set_key_rx_seq(key, 0, + &key_data->igtk_bigtk.cmac_gmac_seq); + break; + default: + WARN_ON(1); + } +} + +static void +iwl_mld_d3_update_mcast_key(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_wowlan_status *wowlan_status, + struct ieee80211_key_conf *key, + struct iwl_mld_mcast_key_data *key_data) +{ + if (key->keyidx != key_data->id && + (key->keyidx < 4 || key->keyidx > 5)) { + IWL_ERR(mld, + "Unexpected keyId mismatch. Old keyId:%d, New keyId:%d\n", + key->keyidx, key_data->id); + return; + } + + /* All installed keys are sent by the FW, even weren't + * rekeyed during D3. + * We remove an existing key if it has the same index as + * a new key and a rekey has occurred during d3 + */ + if (wowlan_status->num_of_gtk_rekeys && key_data->len) { + if (key->keyidx == 4 || key->keyidx == 5) { + struct iwl_mld_vif *mld_vif = + iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *mld_link; + int link_id = vif->active_links ? + __ffs(vif->active_links) : 0; + + mld_link = iwl_mld_link_dereference_check(mld_vif, + link_id); + if (WARN_ON(!mld_link)) + return; + + if (mld_link->igtk == key) + mld_link->igtk = NULL; + mld->num_igtks--; + } + + ieee80211_remove_key(key); + return; + } + + iwl_mld_set_key_rx_seq(key, key_data); +} + +static void +iwl_mld_update_ptk_rx_seq(struct iwl_mld *mld, + struct iwl_mld_wowlan_status *wowlan_status, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + bool is_tkip) +{ + struct iwl_mld_sta *mld_sta = + iwl_mld_sta_from_mac80211(sta); + struct iwl_mld_ptk_pn *mld_ptk_pn = + wiphy_dereference(mld->wiphy, + mld_sta->ptk_pn[key->keyidx]); + + iwl_mld_set_key_rx_seq_tids(key, is_tkip ? + wowlan_status->ptk.tkip_seq : + wowlan_status->ptk.aes_seq); + if (is_tkip) + return; + + if (WARN_ON(!mld_ptk_pn)) + return; + + for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + for (int i = 1; i < mld->trans->num_rx_queues; i++) + memcpy(mld_ptk_pn->q[i].pn[tid], + wowlan_status->ptk.aes_seq[tid].ccmp.pn, + IEEE80211_CCMP_PN_LEN); + } +} + +static void +iwl_mld_resume_keys_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mld_resume_key_iter_data *data = _data; + struct iwl_mld_wowlan_status *wowlan_status = data->wowlan_status; + u8 status_idx; + + /* TODO: check key link id (task=MLO) */ + if (data->unhandled_cipher) + return; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + /* ignore WEP completely, nothing to do */ + return; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_TKIP: + if (sta) { + iwl_mld_update_ptk_rx_seq(data->mld, wowlan_status, + sta, key, + key->cipher == + WLAN_CIPHER_SUITE_TKIP); + return; + } + + if (WARN_ON(data->gtk_cipher && + data->gtk_cipher != key->cipher)) + return; + + data->gtk_cipher = key->cipher; + status_idx = key->keyidx == wowlan_status->gtk[1].id; + iwl_mld_d3_update_mcast_key(data->mld, vif, wowlan_status, key, + &wowlan_status->gtk[status_idx]); + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + if (key->keyidx == 4 || key->keyidx == 5) { + if (WARN_ON(data->igtk_cipher && + data->igtk_cipher != key->cipher)) + return; + + data->igtk_cipher = key->cipher; + iwl_mld_d3_update_mcast_key(data->mld, vif, + wowlan_status, + key, &wowlan_status->igtk); + } + if (key->keyidx == 6 || key->keyidx == 7) { + if (WARN_ON(data->bigtk_cipher && + data->bigtk_cipher != key->cipher)) + return; + + data->bigtk_cipher = key->cipher; + status_idx = key->keyidx == wowlan_status->bigtk[1].id; + iwl_mld_d3_update_mcast_key(data->mld, vif, + wowlan_status, key, + &wowlan_status->bigtk[status_idx]); + } + break; + default: + data->unhandled_cipher = true; + return; + } + data->num_keys++; +} + +static bool +iwl_mld_add_mcast_rekey(struct ieee80211_vif *vif, + struct iwl_mld *mld, + struct iwl_mld_mcast_key_data *key_data, + struct ieee80211_bss_conf *link_conf, + u32 cipher) +{ + struct ieee80211_key_conf *key_config; + struct { + struct ieee80211_key_conf conf; + u8 key[WOWLAN_KEY_MAX_SIZE]; + } conf = { + .conf.cipher = cipher, + .conf.keyidx = key_data->id, + }; + int link_id = vif->active_links ? __ffs(vif->active_links) : -1; + + BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_128); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_BIP_GMAC_256); + BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_AES_CMAC); + BUILD_BUG_ON(sizeof(conf.key) < sizeof(key_data->key)); + + if (!key_data->len) + return true; + + switch (cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + conf.conf.keylen = WLAN_KEY_LEN_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + conf.conf.keylen = WLAN_KEY_LEN_GCMP_256; + break; + case WLAN_CIPHER_SUITE_TKIP: + conf.conf.keylen = WLAN_KEY_LEN_TKIP; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC; + break; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256; + break; + default: + WARN_ON(1); + } + + memcpy(conf.conf.key, key_data->key, conf.conf.keylen); + key_config = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id); + if (IS_ERR(key_config)) + return false; + + iwl_mld_set_key_rx_seq(key_config, key_data); + + /* The FW holds only one igtk so we keep track of the valid one */ + if (key_config->keyidx == 4 || key_config->keyidx == 5) { + struct iwl_mld_link *mld_link = + iwl_mld_link_from_mac80211(link_conf); + mld_link->igtk = key_config; + mld->num_igtks++; + } + return true; +} + +static bool +iwl_mld_add_all_rekeys(struct ieee80211_vif *vif, + struct iwl_mld_wowlan_status *wowlan_status, + struct iwl_mld_resume_key_iter_data *key_iter_data, + struct ieee80211_bss_conf *link_conf) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(wowlan_status->gtk); i++) + if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld, + &wowlan_status->gtk[i], + link_conf, + key_iter_data->gtk_cipher)) + return false; + + if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld, + &wowlan_status->igtk, + link_conf, key_iter_data->igtk_cipher)) + return false; + + for (i = 0; i < ARRAY_SIZE(wowlan_status->bigtk); i++) + if (!iwl_mld_add_mcast_rekey(vif, key_iter_data->mld, + &wowlan_status->bigtk[i], + link_conf, + key_iter_data->bigtk_cipher)) + return false; + + return true; +} + +static bool +iwl_mld_update_sec_keys(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_wowlan_status *wowlan_status) +{ + int link_id = vif->active_links ? __ffs(vif->active_links) : 0; + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + __be64 replay_ctr = cpu_to_be64(wowlan_status->replay_ctr); + struct iwl_mld_resume_key_iter_data key_iter_data = { + .mld = mld, + .wowlan_status = wowlan_status, + }; + + if (WARN_ON(!link_conf)) + return false; + + ieee80211_iter_keys(mld->hw, vif, iwl_mld_resume_keys_iter, + &key_iter_data); + + if (key_iter_data.unhandled_cipher) + return false; + + IWL_DEBUG_WOWLAN(mld, + "Number of installed keys: %d, Number of rekeys: %d\n", + key_iter_data.num_keys, + wowlan_status->num_of_gtk_rekeys); + + if (!key_iter_data.num_keys || !wowlan_status->num_of_gtk_rekeys) + return true; + + iwl_mld_add_all_rekeys(vif, wowlan_status, &key_iter_data, + link_conf); + + ieee80211_gtk_rekey_notify(vif, link_conf->bssid, + (void *)&replay_ctr, GFP_KERNEL); + /* TODO: MLO rekey (task=MLO) */ + return true; +} + +static bool +iwl_mld_process_wowlan_status(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_wowlan_status *wowlan_status) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct ieee80211_sta *ap_sta = mld_vif->ap_sta; + struct iwl_mld_txq *mld_txq; + + iwl_mld_report_wowlan_wakeup(mld, vif, wowlan_status); + + if (WARN_ON(!ap_sta)) + return false; + + mld_txq = + iwl_mld_txq_from_mac80211(ap_sta->txq[wowlan_status->tid_offloaded_tx]); + + /* Update the pointers of the Tx queue that may have moved during + * suspend if the firmware sent frames. + * The firmware stores last-used value, we store next value. + */ + WARN_ON(!mld_txq->status.allocated); + iwl_trans_set_q_ptrs(mld->trans, mld_txq->fw_id, + (wowlan_status->last_qos_seq + + 0x10) >> 4); + + if (!iwl_mld_update_sec_keys(mld, vif, wowlan_status)) + return false; + + if (wowlan_status->wakeup_reasons & + (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON | + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH | + IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)) + return false; + + return true; +} + +static bool +iwl_mld_netdetect_match_info_handler(struct iwl_mld *mld, + struct iwl_mld_resume_data *resume_data, + struct iwl_rx_packet *pkt) +{ + struct iwl_mld_netdetect_res *results = resume_data->netdetect_res; + const struct iwl_scan_offload_match_info *notif = (void *)pkt->data; + u32 len = iwl_rx_packet_payload_len(pkt); + + if (IWL_FW_CHECK(mld, !mld->netdetect, + "Got scan match info notif when mld->netdetect==%d\n", + mld->netdetect)) + return true; + + if (IWL_FW_CHECK(mld, len < sizeof(*notif), + "Invalid scan offload match notif of length: %d\n", + len)) + return true; + + if (IWL_FW_CHECK(mld, resume_data->wowlan_status->wakeup_reasons != + IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS, + "Ignore scan match info: unexpected wakeup reason (expected=0x%x got=0x%x)\n", + IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS, + resume_data->wowlan_status->wakeup_reasons)) + return true; + + results->matched_profiles = le32_to_cpu(notif->matched_profiles); + IWL_DEBUG_WOWLAN(mld, "number of matched profiles=%u\n", + results->matched_profiles); + + if (results->matched_profiles) + memcpy(results->matches, notif->matches, + NETDETECT_QUERY_BUF_LEN); + + /* No scan should be active at this point */ + mld->scan.status = 0; + memset(mld->scan.uid_status, 0, sizeof(mld->scan.uid_status)); + return false; +} + +static void +iwl_mld_set_netdetect_info(struct iwl_mld *mld, + const struct cfg80211_sched_scan_request *netdetect_cfg, + struct cfg80211_wowlan_nd_info *netdetect_info, + struct iwl_mld_netdetect_res *netdetect_res, + unsigned long matched_profiles) +{ + int i; + + for_each_set_bit(i, &matched_profiles, netdetect_cfg->n_match_sets) { + struct cfg80211_wowlan_nd_match *match; + int idx, j, n_channels = 0; + struct iwl_scan_offload_profile_match *matches = + (void *)netdetect_res->matches; + + for (int k = 0; k < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; k++) + n_channels += + hweight8(matches[i].matching_channels[k]); + match = kzalloc(struct_size(match, channels, n_channels), + GFP_KERNEL); + if (!match) + return; + + netdetect_info->matches[netdetect_info->n_matches++] = match; + + /* We inverted the order of the SSIDs in the scan + * request, so invert the index here. + */ + idx = netdetect_cfg->n_match_sets - i - 1; + match->ssid.ssid_len = + netdetect_cfg->match_sets[idx].ssid.ssid_len; + memcpy(match->ssid.ssid, + netdetect_cfg->match_sets[idx].ssid.ssid, + match->ssid.ssid_len); + + if (netdetect_cfg->n_channels < n_channels) + continue; + + for_each_set_bit(j, + (unsigned long *)&matches[i].matching_channels[0], + sizeof(matches[i].matching_channels)) + match->channels[match->n_channels++] = + netdetect_cfg->channels[j]->center_freq; + } +} + +static void +iwl_mld_process_netdetect_res(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_resume_data *resume_data) +{ + struct cfg80211_wowlan_nd_info *netdetect_info = NULL; + const struct cfg80211_sched_scan_request *netdetect_cfg; + struct cfg80211_wowlan_wakeup wakeup = { + .pattern_idx = -1, + }; + struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; + unsigned long matched_profiles; + u32 wakeup_reasons; + int n_matches; + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(!mld->wiphy->wowlan_config || + !mld->wiphy->wowlan_config->nd_config)) { + IWL_DEBUG_WOWLAN(mld, + "Netdetect isn't configured on resume flow\n"); + goto out; + } + + netdetect_cfg = mld->wiphy->wowlan_config->nd_config; + wakeup_reasons = resume_data->wowlan_status->wakeup_reasons; + + if (wakeup_reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) + wakeup.rfkill_release = true; + + if (wakeup_reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) + goto out; + + if (!resume_data->netdetect_res->matched_profiles) { + IWL_DEBUG_WOWLAN(mld, + "Netdetect results aren't valid\n"); + wakeup_report = NULL; + goto out; + } + + matched_profiles = resume_data->netdetect_res->matched_profiles; + if (!netdetect_cfg->n_match_sets) { + IWL_DEBUG_WOWLAN(mld, + "No netdetect match sets are configured\n"); + goto out; + } + n_matches = hweight_long(matched_profiles); + netdetect_info = kzalloc(struct_size(netdetect_info, matches, + n_matches), GFP_KERNEL); + if (netdetect_info) + iwl_mld_set_netdetect_info(mld, netdetect_cfg, netdetect_info, + resume_data->netdetect_res, + matched_profiles); + + wakeup.net_detect = netdetect_info; + out: + ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); + if (netdetect_info) { + for (int i = 0; i < netdetect_info->n_matches; i++) + kfree(netdetect_info->matches[i]); + kfree(netdetect_info); + } +} + +static bool iwl_mld_handle_d3_notif(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mld_resume_data *resume_data = data; + struct iwl_mld *mld = + container_of(notif_wait, struct iwl_mld, notif_wait); + + switch (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) { + case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION): { + if (resume_data->notifs_received & IWL_D3_NOTIF_WOWLAN_INFO) { + IWL_DEBUG_WOWLAN(mld, + "got additional wowlan_info notif\n"); + break; + } + resume_data->notif_handling_err = + iwl_mld_handle_wowlan_info_notif(mld, + resume_data->wowlan_status, + pkt); + resume_data->notifs_received |= IWL_D3_NOTIF_WOWLAN_INFO; + + if (resume_data->wowlan_status->wakeup_reasons & + IWL_WOWLAN_WAKEUP_REASON_HAS_WAKEUP_PKT) + resume_data->notifs_expected |= + IWL_D3_NOTIF_WOWLAN_WAKE_PKT; + break; + } + case WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION): { + if (resume_data->notifs_received & + IWL_D3_NOTIF_WOWLAN_WAKE_PKT) { + /* We shouldn't get two wake packet notifications */ + IWL_DEBUG_WOWLAN(mld, + "Got additional wowlan wake packet notification\n"); + break; + } + resume_data->notif_handling_err = + iwl_mld_handle_wake_pkt_notif(mld, + resume_data->wowlan_status, + pkt); + resume_data->notifs_received |= IWL_D3_NOTIF_WOWLAN_WAKE_PKT; + break; + } + case WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF): { + if (resume_data->notifs_received & IWL_D3_ND_MATCH_INFO) { + IWL_ERR(mld, + "Got additional netdetect match info\n"); + break; + } + + resume_data->notif_handling_err = + iwl_mld_netdetect_match_info_handler(mld, resume_data, + pkt); + resume_data->notifs_received |= IWL_D3_ND_MATCH_INFO; + break; + } + case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): { + struct iwl_d3_end_notif *notif = (void *)pkt->data; + + resume_data->d3_end_flags = le32_to_cpu(notif->flags); + resume_data->notifs_received |= IWL_D3_NOTIF_D3_END_NOTIF; + break; + } + default: + WARN_ON(1); + } + + return resume_data->notifs_received == resume_data->notifs_expected; +} + +#define IWL_MLD_D3_NOTIF_TIMEOUT (HZ / 3) + +static int iwl_mld_wait_d3_notif(struct iwl_mld *mld, + struct iwl_mld_resume_data *resume_data, + bool with_wowlan) +{ + static const u16 wowlan_resume_notif[] = { + WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_INFO_NOTIFICATION), + WIDE_ID(PROT_OFFLOAD_GROUP, WOWLAN_WAKE_PKT_NOTIFICATION), + WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF), + WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION) + }; + static const u16 d3_resume_notif[] = { + WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION) + }; + struct iwl_notification_wait wait_d3_notif; + enum iwl_d3_status d3_status; + int ret; + + if (with_wowlan) + iwl_init_notification_wait(&mld->notif_wait, &wait_d3_notif, + wowlan_resume_notif, + ARRAY_SIZE(wowlan_resume_notif), + iwl_mld_handle_d3_notif, + resume_data); + else + iwl_init_notification_wait(&mld->notif_wait, &wait_d3_notif, + d3_resume_notif, + ARRAY_SIZE(d3_resume_notif), + iwl_mld_handle_d3_notif, + resume_data); + + ret = iwl_trans_d3_resume(mld->trans, &d3_status, false, false); + if (ret || d3_status != IWL_D3_STATUS_ALIVE) { + if (d3_status != IWL_D3_STATUS_ALIVE) { + IWL_INFO(mld, "Device was reset during suspend\n"); + ret = -ENOENT; + } else { + IWL_ERR(mld, "Transport resume failed\n"); + } + iwl_remove_notification(&mld->notif_wait, &wait_d3_notif); + return ret; + } + + ret = iwl_wait_notification(&mld->notif_wait, &wait_d3_notif, + IWL_MLD_D3_NOTIF_TIMEOUT); + if (ret) + IWL_ERR(mld, "Couldn't get the d3 notif %d\n", ret); + + if (resume_data->notif_handling_err) + ret = -EIO; + + return ret; +} + +int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld) +{ + struct iwl_d3_manager_config d3_cfg_cmd_data = {}; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan suspend flow\n"); + + iwl_mld_low_latency_stop(mld); + + /* This will happen if iwl_mld_supsend failed with FW error */ + if (mld->trans->state == IWL_TRANS_NO_FW && + test_bit(STATUS_FW_ERROR, &mld->trans->status)) + return -ENODEV; + + ret = iwl_mld_update_device_power(mld, true); + if (ret) { + IWL_ERR(mld, + "d3 suspend: couldn't send power_device %d\n", ret); + goto out; + } + + ret = iwl_mld_send_cmd_pdu(mld, D3_CONFIG_CMD, + &d3_cfg_cmd_data); + if (ret) { + IWL_ERR(mld, + "d3 suspend: couldn't send D3_CONFIG_CMD %d\n", ret); + goto out; + } + + ret = iwl_trans_d3_suspend(mld->trans, false, false); + if (ret) { + IWL_ERR(mld, "d3 suspend: trans_d3_suspend failed %d\n", ret); + } else { + mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + mld->fw_status.in_d3 = true; + } + + out: + if (ret) { + mld->trans->state = IWL_TRANS_NO_FW; + set_bit(STATUS_FW_ERROR, &mld->trans->status); + } + + return ret; +} + +int iwl_mld_no_wowlan_resume(struct iwl_mld *mld) +{ + struct iwl_mld_resume_data resume_data = { + .notifs_expected = + IWL_D3_NOTIF_D3_END_NOTIF, + }; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + IWL_DEBUG_WOWLAN(mld, "Starting the no wowlan resume flow\n"); + + mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + mld->fw_status.in_d3 = false; + iwl_fw_dbg_read_d3_debug_data(&mld->fwrt); + + if (iwl_mld_fw_needs_restart(mld, NULL)) + ret = -ENODEV; + else + ret = iwl_mld_wait_d3_notif(mld, &resume_data, false); + + if (!ret && (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE)) + return -ENODEV; + + if (ret) { + mld->trans->state = IWL_TRANS_NO_FW; + set_bit(STATUS_FW_ERROR, &mld->trans->status); + return ret; + } + iwl_mld_low_latency_restart(mld); + + return iwl_mld_update_device_power(mld, false); +} + +static void +iwl_mld_aes_seq_to_le64_pn(struct ieee80211_key_conf *key, + __le64 *key_rsc) +{ + for (int i = 0; i < IWL_MAX_TID_COUNT; i++) { + struct ieee80211_key_seq seq; + u8 *pn = key->cipher == WLAN_CIPHER_SUITE_CCMP ? seq.ccmp.pn : + seq.gcmp.pn; + + ieee80211_get_key_rx_seq(key, i, &seq); + key_rsc[i] = cpu_to_le64((u64)pn[5] | + ((u64)pn[4] << 8) | + ((u64)pn[3] << 16) | + ((u64)pn[2] << 24) | + ((u64)pn[1] << 32) | + ((u64)pn[0] << 40)); + } +} + +static void +iwl_mld_suspend_set_ucast_pn(struct iwl_mld *mld, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, __le64 *key_rsc) +{ + struct iwl_mld_sta *mld_sta = + iwl_mld_sta_from_mac80211(sta); + struct iwl_mld_ptk_pn *mld_ptk_pn; + + if (WARN_ON(key->keyidx >= ARRAY_SIZE(mld_sta->ptk_pn))) + return; + + mld_ptk_pn = wiphy_dereference(mld->wiphy, + mld_sta->ptk_pn[key->keyidx]); + if (WARN_ON(!mld_ptk_pn)) + return; + + for (int tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + struct ieee80211_key_seq seq; + u8 *max_pn = seq.ccmp.pn; + + /* get the PN from mac80211, used on the default queue */ + ieee80211_get_key_rx_seq(key, tid, &seq); + + /* and use the internal data for all queues */ + for (int que = 1; que < mld->trans->num_rx_queues; que++) { + u8 *cur_pn = mld_ptk_pn->q[que].pn[tid]; + + if (memcmp(max_pn, cur_pn, IEEE80211_CCMP_PN_LEN) < 0) + max_pn = cur_pn; + } + key_rsc[tid] = cpu_to_le64((u64)max_pn[5] | + ((u64)max_pn[4] << 8) | + ((u64)max_pn[3] << 16) | + ((u64)max_pn[2] << 24) | + ((u64)max_pn[1] << 32) | + ((u64)max_pn[0] << 40)); + } +} + +static void +iwl_mld_suspend_convert_tkip_ipn(struct ieee80211_key_conf *key, + __le64 *rsc) +{ + struct ieee80211_key_seq seq; + + for (int i = 0; i < IWL_MAX_TID_COUNT; i++) { + ieee80211_get_key_rx_seq(key, i, &seq); + rsc[i] = + cpu_to_le64(((u64)seq.tkip.iv32 << 16) | + seq.tkip.iv16); + } +} + +static void +iwl_mld_suspend_key_data_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_suspend_key_iter_data *data = _data; + __le64 *key_rsc; + __le32 cipher = 0; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + cipher = cpu_to_le32(STA_KEY_FLG_CCM); + fallthrough; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + if (!cipher) + cipher = cpu_to_le32(STA_KEY_FLG_GCMP); + fallthrough; + case WLAN_CIPHER_SUITE_TKIP: + if (!cipher) + cipher = cpu_to_le32(STA_KEY_FLG_TKIP); + if (sta) { + key_rsc = data->rsc->ucast_rsc; + if (key->cipher == WLAN_CIPHER_SUITE_TKIP) + iwl_mld_suspend_convert_tkip_ipn(key, key_rsc); + else + iwl_mld_suspend_set_ucast_pn(mld, sta, key, + key_rsc); + + data->have_rsc = true; + return; + } + /* We're iterating from old to new, there're 4 possible + * gtk ids, and only the last two keys matter + */ + if (WARN_ON(data->gtks >= + ARRAY_SIZE(data->found_gtk_idx))) + return; + + if (WARN_ON(key->keyidx >= + ARRAY_SIZE(data->rsc->mcast_key_id_map))) + return; + data->gtk_cipher = cipher; + data->found_gtk_idx[data->gtks] = key->keyidx; + key_rsc = data->rsc->mcast_rsc[data->gtks % 2]; + data->rsc->mcast_key_id_map[key->keyidx] = + data->gtks % 2; + + if (data->gtks >= 2) { + int prev = data->gtks % 2; + int prev_idx = data->found_gtk_idx[prev]; + + data->rsc->mcast_key_id_map[prev_idx] = + IWL_MCAST_KEY_MAP_INVALID; + } + + if (key->cipher == WLAN_CIPHER_SUITE_TKIP) + iwl_mld_suspend_convert_tkip_ipn(key, key_rsc); + else + iwl_mld_aes_seq_to_le64_pn(key, key_rsc); + + data->gtks++; + data->have_rsc = true; + break; + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + cipher = cpu_to_le32(STA_KEY_FLG_GCMP); + fallthrough; + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + if (!cipher) + cipher = cpu_to_le32(STA_KEY_FLG_CCM); + if (key->keyidx == 4 || key->keyidx == 5) + data->igtk_cipher = cipher; + + if (key->keyidx == 6 || key->keyidx == 7) + data->bigtk_cipher = cipher; + + break; + } +} + +static int +iwl_mld_send_kek_kck_cmd(struct iwl_mld *mld, + struct iwl_mld_vif *mld_vif, + struct iwl_mld_suspend_key_iter_data data, + int ap_sta_id) +{ + struct iwl_wowlan_kek_kck_material_cmd_v4 kek_kck_cmd = {}; + struct iwl_mld_rekey_data *rekey_data = + &mld_vif->wowlan_data.rekey_data; + + memcpy(kek_kck_cmd.kck, rekey_data->kck, + rekey_data->kck_len); + kek_kck_cmd.kck_len = cpu_to_le16(rekey_data->kck_len); + memcpy(kek_kck_cmd.kek, rekey_data->kek, + rekey_data->kek_len); + kek_kck_cmd.kek_len = cpu_to_le16(rekey_data->kek_len); + kek_kck_cmd.replay_ctr = rekey_data->replay_ctr; + kek_kck_cmd.akm = cpu_to_le32(rekey_data->akm); + kek_kck_cmd.sta_id = cpu_to_le32(ap_sta_id); + kek_kck_cmd.gtk_cipher = data.gtk_cipher; + kek_kck_cmd.igtk_cipher = data.igtk_cipher; + kek_kck_cmd.bigtk_cipher = data.bigtk_cipher; + + IWL_DEBUG_WOWLAN(mld, "setting akm %d\n", + rekey_data->akm); + + return iwl_mld_send_cmd_pdu(mld, WOWLAN_KEK_KCK_MATERIAL, + &kek_kck_cmd); +} + +static int +iwl_mld_suspend_send_security_cmds(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_vif *mld_vif, + int ap_sta_id) +{ + struct iwl_mld_suspend_key_iter_data data = {}; + int ret; + + data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL); + if (!data.rsc) + return -ENOMEM; + + memset(data.rsc->mcast_key_id_map, IWL_MCAST_KEY_MAP_INVALID, + ARRAY_SIZE(data.rsc->mcast_key_id_map)); + + data.rsc->sta_id = cpu_to_le32(ap_sta_id); + ieee80211_iter_keys(mld->hw, vif, + iwl_mld_suspend_key_data_iter, + &data); + + if (data.have_rsc) + ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_TSC_RSC_PARAM, + data.rsc); + else + ret = 0; + + if (!ret && mld_vif->wowlan_data.rekey_data.valid) + ret = iwl_mld_send_kek_kck_cmd(mld, mld_vif, data, ap_sta_id); + + kfree(data.rsc); + + return ret; +} + +static void +iwl_mld_set_wowlan_config_cmd(struct iwl_mld *mld, + struct cfg80211_wowlan *wowlan, + struct iwl_wowlan_config_cmd *wowlan_config_cmd, + struct ieee80211_sta *ap_sta) +{ + wowlan_config_cmd->is_11n_connection = + ap_sta->deflink.ht_cap.ht_supported; + wowlan_config_cmd->flags = ENABLE_L3_FILTERING | + ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; + + if (ap_sta->mfp) + wowlan_config_cmd->flags |= IS_11W_ASSOC; + + if (wowlan->disconnect) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | + IWL_WOWLAN_WAKEUP_LINK_CHANGE); + if (wowlan->magic_pkt) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET); + if (wowlan->gtk_rekey_failure) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL); + if (wowlan->eap_identity_req) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ); + if (wowlan->four_way_handshake) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE); + if (wowlan->n_patterns) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH); + + if (wowlan->rfkill_release) + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); + + if (wowlan->any) { + wowlan_config_cmd->wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS | + IWL_WOWLAN_WAKEUP_LINK_CHANGE | + IWL_WOWLAN_WAKEUP_RX_FRAME | + IWL_WOWLAN_WAKEUP_BCN_FILTERING); + } +} + +static int iwl_mld_send_patterns(struct iwl_mld *mld, + struct cfg80211_wowlan *wowlan, + int ap_sta_id) +{ + struct iwl_wowlan_patterns_cmd *pattern_cmd; + struct iwl_host_cmd cmd = { + .id = WOWLAN_PATTERNS, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + int ret; + + if (!wowlan->n_patterns) + return 0; + + cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns); + + pattern_cmd = kzalloc(cmd.len[0], GFP_KERNEL); + if (!pattern_cmd) + return -ENOMEM; + + pattern_cmd->n_patterns = wowlan->n_patterns; + pattern_cmd->sta_id = ap_sta_id; + + for (int i = 0; i < wowlan->n_patterns; i++) { + int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8); + + pattern_cmd->patterns[i].pattern_type = + WOWLAN_PATTERN_TYPE_BITMASK; + + memcpy(&pattern_cmd->patterns[i].u.bitmask.mask, + wowlan->patterns[i].mask, mask_len); + memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern, + wowlan->patterns[i].pattern, + wowlan->patterns[i].pattern_len); + pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len; + pattern_cmd->patterns[i].u.bitmask.pattern_size = + wowlan->patterns[i].pattern_len; + } + + cmd.data[0] = pattern_cmd; + ret = iwl_mld_send_cmd(mld, &cmd); + kfree(pattern_cmd); + return ret; +} + +static int +iwl_mld_send_proto_offload(struct iwl_mld *mld, + struct ieee80211_vif *vif, + u8 ap_sta_id) +{ + struct iwl_proto_offload_cmd_v4 *cmd __free(kfree); + struct iwl_host_cmd hcmd = { + .id = PROT_OFFLOAD_CONFIG_CMD, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + .len[0] = sizeof(*cmd), + }; + u32 enabled = 0; + + cmd = kzalloc(hcmd.len[0], GFP_KERNEL); + +#if IS_ENABLED(CONFIG_IPV6) + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_wowlan_data *wowlan_data = &mld_vif->wowlan_data; + struct iwl_ns_config *nsc; + struct iwl_targ_addr *addrs; + int n_nsc, n_addrs; + int i, c; + int num_skipped = 0; + + nsc = cmd->ns_config; + n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L; + addrs = cmd->targ_addrs; + n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L; + + /* For each address we have (and that will fit) fill a target + * address struct and combine for NS offload structs with the + * solicited node addresses. + */ + for (i = 0, c = 0; + i < wowlan_data->num_target_ipv6_addrs && + i < n_addrs && c < n_nsc; i++) { + int j; + struct in6_addr solicited_addr; + + /* Because ns is offloaded skip tentative address to avoid + * violating RFC4862. + */ + if (test_bit(i, wowlan_data->tentative_addrs)) { + num_skipped++; + continue; + } + + addrconf_addr_solict_mult(&wowlan_data->target_ipv6_addrs[i], + &solicited_addr); + for (j = 0; j < c; j++) + if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr, + &solicited_addr) == 0) + break; + if (j == c) + c++; + addrs[i].addr = wowlan_data->target_ipv6_addrs[i]; + addrs[i].config_num = cpu_to_le32(j); + nsc[j].dest_ipv6_addr = solicited_addr; + memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN); + } + + if (wowlan_data->num_target_ipv6_addrs - num_skipped) + enabled |= IWL_D3_PROTO_IPV6_VALID; + + cmd->num_valid_ipv6_addrs = cpu_to_le32(i - num_skipped); + if (enabled & IWL_D3_PROTO_IPV6_VALID) + enabled |= IWL_D3_PROTO_OFFLOAD_NS; +#endif + + if (vif->cfg.arp_addr_cnt) { + enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID; + cmd->common.host_ipv4_addr = vif->cfg.arp_addr_list[0]; + ether_addr_copy(cmd->common.arp_mac_addr, vif->addr); + } + + enabled |= IWL_D3_PROTO_OFFLOAD_BTM; + cmd->common.enabled = cpu_to_le32(enabled); + cmd->sta_id = cpu_to_le32(ap_sta_id); + hcmd.data[0] = cmd; + return iwl_mld_send_cmd(mld, &hcmd); +} + +static int +iwl_mld_wowlan_config(struct iwl_mld *mld, struct ieee80211_vif *bss_vif, + struct cfg80211_wowlan *wowlan) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_vif); + struct ieee80211_sta *ap_sta = mld_vif->ap_sta; + struct iwl_wowlan_config_cmd wowlan_config_cmd = { + .offloading_tid = IWL_WOWLAN_OFFLOAD_TID, + }; + u32 sta_id_mask; + int ap_sta_id, ret; + int link_id = iwl_mld_get_primary_link(bss_vif); + struct ieee80211_bss_conf *link_conf; + + ret = iwl_mld_block_emlsr_sync(mld, bss_vif, + IWL_MLD_EMLSR_BLOCKED_WOWLAN, link_id); + if (ret) + return ret; + + link_conf = link_conf_dereference_protected(bss_vif, link_id); + + if (WARN_ON(!ap_sta || !link_conf)) + return -EINVAL; + + sta_id_mask = iwl_mld_fw_sta_id_mask(mld, ap_sta); + if (WARN_ON(hweight32(sta_id_mask) != 1)) + return -EINVAL; + + ap_sta_id = __ffs(sta_id_mask); + wowlan_config_cmd.sta_id = ap_sta_id; + + ret = iwl_mld_ensure_queue(mld, + ap_sta->txq[wowlan_config_cmd.offloading_tid]); + if (ret) + return ret; + + iwl_mld_set_wowlan_config_cmd(mld, wowlan, + &wowlan_config_cmd, ap_sta); + ret = iwl_mld_send_cmd_pdu(mld, WOWLAN_CONFIGURATION, + &wowlan_config_cmd); + if (ret) + return ret; + + ret = iwl_mld_suspend_send_security_cmds(mld, bss_vif, mld_vif, + ap_sta_id); + if (ret) + return ret; + + ret = iwl_mld_send_patterns(mld, wowlan, ap_sta_id); + if (ret) + return ret; + + ret = iwl_mld_send_proto_offload(mld, bss_vif, ap_sta_id); + if (ret) + return ret; + + iwl_mld_enable_beacon_filter(mld, link_conf, true); + return iwl_mld_update_mac_power(mld, bss_vif, true); +} + +int iwl_mld_wowlan_suspend(struct iwl_mld *mld, struct cfg80211_wowlan *wowlan) +{ + struct ieee80211_vif *bss_vif; + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(!wowlan)) + return 1; + + IWL_DEBUG_WOWLAN(mld, "Starting the wowlan suspend flow\n"); + + bss_vif = iwl_mld_get_bss_vif(mld); + if (WARN_ON(!bss_vif)) + return 1; + + if (!bss_vif->cfg.assoc) { + int ret; + /* If we're not associated, this must be netdetect */ + if (WARN_ON(!wowlan->nd_config)) + return 1; + + ret = iwl_mld_netdetect_config(mld, bss_vif, wowlan); + if (!ret) + mld->netdetect = true; + + return ret; + } + + return iwl_mld_wowlan_config(mld, bss_vif, wowlan); +} + +/* Returns 0 on success, 1 if an error occurred in firmware during d3, + * A negative value is expected only in unrecovreable cases. + */ +int iwl_mld_wowlan_resume(struct iwl_mld *mld) +{ + struct ieee80211_vif *bss_vif; + struct ieee80211_bss_conf *link_conf; + struct iwl_mld_netdetect_res netdetect_res; + struct iwl_mld_resume_data resume_data = { + .notifs_expected = + IWL_D3_NOTIF_WOWLAN_INFO | + IWL_D3_NOTIF_D3_END_NOTIF, + .netdetect_res = &netdetect_res, + }; + int link_id; + int ret; + bool fw_err = false; + bool keep_connection; + + lockdep_assert_wiphy(mld->wiphy); + + IWL_DEBUG_WOWLAN(mld, "Starting the wowlan resume flow\n"); + + mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + if (!mld->fw_status.in_d3) { + IWL_DEBUG_WOWLAN(mld, + "Device_powered_off() was called during wowlan\n"); + goto err; + } + + mld->fw_status.in_d3 = false; + mld->scan.last_start_time_jiffies = jiffies; + + bss_vif = iwl_mld_get_bss_vif(mld); + if (WARN_ON(!bss_vif)) + goto err; + + /* We can't have several links upon wowlan entry, + * this is enforced in the suspend flow. + */ + WARN_ON(hweight16(bss_vif->active_links) > 1); + link_id = bss_vif->active_links ? __ffs(bss_vif->active_links) : 0; + link_conf = link_conf_dereference_protected(bss_vif, link_id); + + if (WARN_ON(!link_conf)) + goto err; + + iwl_fw_dbg_read_d3_debug_data(&mld->fwrt); + + if (iwl_mld_fw_needs_restart(mld, bss_vif)) { + fw_err = true; + goto err; + } + + resume_data.wowlan_status = kzalloc(sizeof(*resume_data.wowlan_status), + GFP_KERNEL); + if (!resume_data.wowlan_status) + return -1; + + if (mld->netdetect) + resume_data.notifs_expected |= IWL_D3_ND_MATCH_INFO; + + ret = iwl_mld_wait_d3_notif(mld, &resume_data, true); + if (ret) { + IWL_ERR(mld, "Couldn't get the d3 notifs %d\n", ret); + fw_err = true; + goto err; + } + + if (resume_data.d3_end_flags & IWL_D0I3_RESET_REQUIRE) { + mld->fw_status.in_hw_restart = true; + goto process_wakeup_results; + } + + iwl_mld_update_changed_regdomain(mld); + iwl_mld_update_mac_power(mld, bss_vif, false); + iwl_mld_enable_beacon_filter(mld, link_conf, false); + iwl_mld_update_device_power(mld, false); + + if (mld->netdetect) + ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_NETDETECT, false); + + process_wakeup_results: + if (mld->netdetect) { + iwl_mld_process_netdetect_res(mld, bss_vif, &resume_data); + mld->netdetect = false; + } else { + keep_connection = + iwl_mld_process_wowlan_status(mld, bss_vif, + resume_data.wowlan_status); + + /* EMLSR state will be cleared if the connection is not kept */ + if (keep_connection) + iwl_mld_unblock_emlsr(mld, bss_vif, + IWL_MLD_EMLSR_BLOCKED_WOWLAN); + } + + if (!mld->netdetect && !keep_connection) + ieee80211_resume_disconnect(bss_vif); + + goto out; + + err: + if (fw_err) { + mld->trans->state = IWL_TRANS_NO_FW; + set_bit(STATUS_FW_ERROR, &mld->trans->status); + } + + mld->fw_status.in_hw_restart = true; + ret = 1; + out: + if (resume_data.wowlan_status) { + kfree(resume_data.wowlan_status->wake_packet); + kfree(resume_data.wowlan_status); + } + + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/d3.h b/drivers/net/wireless/intel/iwlwifi/mld/d3.h new file mode 100644 index 0000000000000..618d6fb3c7964 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/d3.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_d3_h__ +#define __iwl_mld_d3_h__ + +#include "fw/api/d3.h" + +struct iwl_mld_rekey_data { + bool valid; + u8 kck[NL80211_KCK_EXT_LEN]; + u8 kek[NL80211_KEK_EXT_LEN]; + size_t kck_len; + size_t kek_len; + __le64 replay_ctr; + u32 akm; +}; + +/** + * struct iwl_mld_wowlan_data - data used by the wowlan suspend flow + * + * @target_ipv6_addrs: IPv6 addresses on this interface for offload + * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs + * @num_target_ipv6_addrs: number of @target_ipv6_addrs + * @rekey_data: security key data used for rekeying during D3 + */ +struct iwl_mld_wowlan_data { +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX]; + unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)]; + int num_target_ipv6_addrs; +#endif + struct iwl_mld_rekey_data rekey_data; +}; + +int iwl_mld_no_wowlan_resume(struct iwl_mld *mld); +int iwl_mld_no_wowlan_suspend(struct iwl_mld *mld); +int iwl_mld_wowlan_suspend(struct iwl_mld *mld, + struct cfg80211_wowlan *wowlan); +int iwl_mld_wowlan_resume(struct iwl_mld *mld); +void iwl_mld_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data); +#if IS_ENABLED(CONFIG_IPV6) +void iwl_mld_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev); +#endif + +#endif /* __iwl_mld_d3_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c new file mode 100644 index 0000000000000..453ce2ba39d1f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c @@ -0,0 +1,1082 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include "mld.h" +#include "debugfs.h" +#include "iwl-io.h" +#include "hcmd.h" +#include "iface.h" +#include "sta.h" +#include "tlc.h" +#include "power.h" +#include "notif.h" +#include "ap.h" +#include "iwl-utils.h" +#include "scan.h" +#ifdef CONFIG_THERMAL +#include "thermal.h" +#endif + +#include "fw/api/rs.h" +#include "fw/api/dhc.h" +#include "fw/api/rfi.h" +#include "fw/dhc-utils.h" +#include + +#define MLD_DEBUGFS_READ_FILE_OPS(name, bufsz) \ + _MLD_DEBUGFS_READ_FILE_OPS(name, bufsz, struct iwl_mld) + +#define MLD_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \ + debugfs_create_file(alias, mode, parent, mld, \ + &iwl_dbgfs_##name##_ops) +#define MLD_DEBUGFS_ADD_FILE(name, parent, mode) \ + MLD_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) + +static bool iwl_mld_dbgfs_fw_cmd_disabled(struct iwl_mld *mld) +{ +#ifdef CONFIG_PM_SLEEP + return !mld->fw_status.running || mld->fw_status.in_d3; +#else + return !mld->fw_status.running; +#endif /* CONFIG_PM_SLEEP */ +} + +static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mld *mld, + char *buf, size_t count) +{ + /* If the firmware is not running, silently succeed since there is + * no data to clear. + */ + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return 0; + + iwl_fw_dbg_clear_monitor_buf(&mld->fwrt); + + return count; +} + +static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mld *mld, char *buf, + size_t count) +{ + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + IWL_ERR(mld, "Triggering an NMI from debugfs\n"); + + if (count == 6 && !strcmp(buf, "nolog\n")) + mld->fw_status.do_not_dump_once = true; + + iwl_force_nmi(mld->trans); + + return count; +} + +static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mld *mld, char *buf, + size_t count) +{ + int __maybe_unused ret; + + if (!iwlwifi_mod_params.fw_restart) + return -EPERM; + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + if (count == 6 && !strcmp(buf, "nolog\n")) { + mld->fw_status.do_not_dump_once = true; + set_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, &mld->trans->status); + } + + /* take the return value to make compiler happy - it will + * fail anyway + */ + ret = iwl_mld_send_cmd_empty(mld, WIDE_ID(LONG_GROUP, REPLY_ERROR)); + + return count; +} + +static ssize_t iwl_dbgfs_send_echo_cmd_write(struct iwl_mld *mld, char *buf, + size_t count) +{ + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + return iwl_mld_send_cmd_empty(mld, ECHO_CMD) ?: count; +} + +struct iwl_mld_sniffer_apply { + struct iwl_mld *mld; + const u8 *bssid; + u16 aid; +}; + +static bool iwl_mld_sniffer_apply(struct iwl_notif_wait_data *notif_data, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mld_sniffer_apply *apply = data; + + apply->mld->monitor.cur_aid = cpu_to_le16(apply->aid); + memcpy(apply->mld->monitor.cur_bssid, apply->bssid, + sizeof(apply->mld->monitor.cur_bssid)); + + return true; +} + +static ssize_t +iwl_dbgfs_he_sniffer_params_write(struct iwl_mld *mld, char *buf, + size_t count) +{ + struct iwl_notification_wait wait; + struct iwl_he_monitor_cmd he_mon_cmd = {}; + struct iwl_mld_sniffer_apply apply = { + .mld = mld, + }; + u16 wait_cmds[] = { + WIDE_ID(DATA_PATH_GROUP, HE_AIR_SNIFFER_CONFIG_CMD), + }; + u32 aid; + int ret; + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + if (!mld->monitor.on) + return -ENODEV; + + ret = sscanf(buf, "%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx", &aid, + &he_mon_cmd.bssid[0], &he_mon_cmd.bssid[1], + &he_mon_cmd.bssid[2], &he_mon_cmd.bssid[3], + &he_mon_cmd.bssid[4], &he_mon_cmd.bssid[5]); + if (ret != 7) + return -EINVAL; + + he_mon_cmd.aid = cpu_to_le16(aid); + + apply.aid = aid; + apply.bssid = (void *)he_mon_cmd.bssid; + + /* Use the notification waiter to get our function triggered + * in sequence with other RX. This ensures that frames we get + * on the RX queue _before_ the new configuration is applied + * still have mld->cur_aid pointing to the old AID, and that + * frames on the RX queue _after_ the firmware processed the + * new configuration (and sent the response, synchronously) + * get mld->cur_aid correctly set to the new AID. + */ + iwl_init_notification_wait(&mld->notif_wait, &wait, + wait_cmds, ARRAY_SIZE(wait_cmds), + iwl_mld_sniffer_apply, &apply); + + ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(DATA_PATH_GROUP, + HE_AIR_SNIFFER_CONFIG_CMD), + &he_mon_cmd); + + /* no need to really wait, we already did anyway */ + iwl_remove_notification(&mld->notif_wait, &wait); + + return ret ?: count; +} + +static ssize_t +iwl_dbgfs_he_sniffer_params_read(struct iwl_mld *mld, char *buf, size_t count) +{ + return scnprintf(buf, count, + "%d %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx\n", + le16_to_cpu(mld->monitor.cur_aid), + mld->monitor.cur_bssid[0], mld->monitor.cur_bssid[1], + mld->monitor.cur_bssid[2], mld->monitor.cur_bssid[3], + mld->monitor.cur_bssid[4], mld->monitor.cur_bssid[5]); +} + +/* The size computation is as follows: + * each number needs at most 3 characters, number of rows is the size of + * the table; So, need 5 chars for the "freq: " part and each tuple afterwards + * needs 6 characters for numbers and 5 for the punctuation around. 32 bytes + * for feature support message. + */ +#define IWL_RFI_DDR_BUF_SIZE (IWL_RFI_DDR_LUT_INSTALLED_SIZE *\ + (5 + IWL_RFI_DDR_LUT_ENTRY_CHANNELS_NUM *\ + (6 + 5)) + 32) +#define IWL_RFI_DLVR_BUF_SIZE (IWL_RFI_DLVR_LUT_INSTALLED_SIZE *\ + (5 + IWL_RFI_DLVR_LUT_ENTRY_CHANNELS_NUM *\ + (6 + 5)) + 32) +#define IWL_RFI_DESENSE_BUF_SIZE IWL_RFI_DDR_BUF_SIZE + +/* Extra 32 for "DDR and DLVR table" message */ +#define IWL_RFI_BUF_SIZE (IWL_RFI_DDR_BUF_SIZE + IWL_RFI_DLVR_BUF_SIZE +\ + IWL_RFI_DESENSE_BUF_SIZE + 32) + +static size_t iwl_mld_dump_tas_resp(struct iwl_dhc_tas_status_resp *resp, + size_t count, u8 *buf) +{ + const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = { + [TAS_DISABLED_DUE_TO_BIOS] = + "Due To BIOS", + [TAS_DISABLED_DUE_TO_SAR_6DBM] = + "Due To SAR Limit Less Than 6 dBm", + [TAS_DISABLED_REASON_INVALID] = + "N/A", + [TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID] = + "Due to table source invalid" + }; + const char * const tas_current_status[TAS_DYNA_STATUS_MAX] = { + [TAS_DYNA_INACTIVE] = "INACTIVE", + [TAS_DYNA_INACTIVE_MVM_MODE] = + "inactive due to mvm mode", + [TAS_DYNA_INACTIVE_TRIGGER_MODE] = + "inactive due to trigger mode", + [TAS_DYNA_INACTIVE_BLOCK_LISTED] = + "inactive due to block listed", + [TAS_DYNA_INACTIVE_UHB_NON_US] = + "inactive due to uhb non US", + [TAS_DYNA_ACTIVE] = "ACTIVE", + }; + ssize_t pos = 0; + + if (resp->header.version != 1) { + pos += scnprintf(buf + pos, count - pos, + "Unsupported TAS response version:%d", + resp->header.version); + return pos; + } + + pos += scnprintf(buf + pos, count - pos, "TAS Report\n"); + switch (resp->tas_config_info.table_source) { + case BIOS_SOURCE_NONE: + pos += scnprintf(buf + pos, count - pos, + "BIOS SOURCE NONE "); + break; + case BIOS_SOURCE_ACPI: + pos += scnprintf(buf + pos, count - pos, + "BIOS SOURCE ACPI "); + break; + case BIOS_SOURCE_UEFI: + pos += scnprintf(buf + pos, count - pos, + "BIOS SOURCE UEFI "); + break; + default: + pos += scnprintf(buf + pos, count - pos, + "BIOS SOURCE UNKNOWN (%d) ", + resp->tas_config_info.table_source); + break; + } + + pos += scnprintf(buf + pos, count - pos, + "revision is: %d data is: 0x%08x\n", + resp->tas_config_info.table_revision, + resp->tas_config_info.value); + pos += scnprintf(buf + pos, count - pos, "Current MCC: 0x%x\n", + le16_to_cpu(resp->curr_mcc)); + + pos += scnprintf(buf + pos, count - pos, "Block list entries:"); + for (int i = 0; i < ARRAY_SIZE(resp->mcc_block_list); i++) + pos += scnprintf(buf + pos, count - pos, " 0x%x", + le16_to_cpu(resp->mcc_block_list[i])); + + pos += scnprintf(buf + pos, count - pos, + "\nDo TAS Support Dual Radio?: %s\n", + hweight8(resp->valid_radio_mask) > 1 ? + "TRUE" : "FALSE"); + + for (int i = 0; i < ARRAY_SIZE(resp->tas_status_radio); i++) { + int tmp; + unsigned long dynamic_status; + + if (!(resp->valid_radio_mask & BIT(i))) + continue; + + pos += scnprintf(buf + pos, count - pos, + "TAS report for radio:%d\n", i + 1); + pos += scnprintf(buf + pos, count - pos, + "Static status: %sabled\n", + resp->tas_status_radio[i].static_status ? + "En" : "Dis"); + if (!resp->tas_status_radio[i].static_status) { + u8 static_disable_reason = + resp->tas_status_radio[i].static_disable_reason; + + pos += scnprintf(buf + pos, count - pos, + "\tStatic Disabled Reason: "); + if (static_disable_reason >= TAS_DISABLED_REASON_MAX) { + pos += scnprintf(buf + pos, count - pos, + "unsupported value (%d)\n", + static_disable_reason); + continue; + } + + pos += scnprintf(buf + pos, count - pos, + "%s (%d)\n", + tas_dis_reason[static_disable_reason], + static_disable_reason); + continue; + } + + pos += scnprintf(buf + pos, count - pos, "\tANT A %s and ", + (resp->tas_status_radio[i].dynamic_status_ant_a + & BIT(TAS_DYNA_ACTIVE)) ? "ON" : "OFF"); + + pos += scnprintf(buf + pos, count - pos, "ANT B %s for ", + (resp->tas_status_radio[i].dynamic_status_ant_b + & BIT(TAS_DYNA_ACTIVE)) ? "ON" : "OFF"); + + switch (resp->tas_status_radio[i].band) { + case PHY_BAND_5: + pos += scnprintf(buf + pos, count - pos, "HB\n"); + break; + case PHY_BAND_24: + pos += scnprintf(buf + pos, count - pos, "LB\n"); + break; + case PHY_BAND_6: + pos += scnprintf(buf + pos, count - pos, "UHB\n"); + break; + default: + pos += scnprintf(buf + pos, count - pos, + "Unsupported band (%d)\n", + resp->tas_status_radio[i].band); + break; + } + + pos += scnprintf(buf + pos, count - pos, + "Is near disconnection?: %s\n", + resp->tas_status_radio[i].near_disconnection ? + "True" : "False"); + + pos += scnprintf(buf + pos, count - pos, + "Dynamic status antenna A:\n"); + dynamic_status = resp->tas_status_radio[i].dynamic_status_ant_a; + for_each_set_bit(tmp, &dynamic_status, TAS_DYNA_STATUS_MAX) { + pos += scnprintf(buf + pos, count - pos, "\t%s (%d)\n", + tas_current_status[tmp], tmp); + } + pos += scnprintf(buf + pos, count - pos, + "\nDynamic status antenna B:\n"); + dynamic_status = resp->tas_status_radio[i].dynamic_status_ant_b; + for_each_set_bit(tmp, &dynamic_status, TAS_DYNA_STATUS_MAX) { + pos += scnprintf(buf + pos, count - pos, "\t%s (%d)\n", + tas_current_status[tmp], tmp); + } + + tmp = le16_to_cpu(resp->tas_status_radio[i].max_reg_pwr_limit_ant_a); + pos += scnprintf(buf + pos, count - pos, + "Max antenna A regulatory pwr limit (dBm): %d.%03d\n", + tmp / 8, 125 * (tmp % 8)); + tmp = le16_to_cpu(resp->tas_status_radio[i].max_reg_pwr_limit_ant_b); + pos += scnprintf(buf + pos, count - pos, + "Max antenna B regulatory pwr limit (dBm): %d.%03d\n", + tmp / 8, 125 * (tmp % 8)); + + tmp = le16_to_cpu(resp->tas_status_radio[i].sar_limit_ant_a); + pos += scnprintf(buf + pos, count - pos, + "Antenna A SAR limit (dBm): %d.%03d\n", + tmp / 8, 125 * (tmp % 8)); + tmp = le16_to_cpu(resp->tas_status_radio[i].sar_limit_ant_b); + pos += scnprintf(buf + pos, count - pos, + "Antenna B SAR limit (dBm): %d.%03d\n", + tmp / 8, 125 * (tmp % 8)); + } + + return pos; +} + +static ssize_t iwl_dbgfs_tas_get_status_read(struct iwl_mld *mld, char *buf, + size_t count) +{ + struct iwl_dhc_cmd cmd = { + .index_and_mask = cpu_to_le32(DHC_TABLE_TOOLS | + DHC_TARGET_UMAC | + DHC_TOOLS_UMAC_GET_TAS_STATUS), + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(LEGACY_GROUP, DEBUG_HOST_COMMAND), + .flags = CMD_WANT_SKB, + .len[0] = sizeof(cmd), + .data[0] = &cmd, + }; + struct iwl_dhc_tas_status_resp *resp = NULL; + ssize_t pos = 0; + u32 resp_len; + u32 status; + int ret; + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + ret = iwl_mld_send_cmd(mld, &hcmd); + if (ret) + return ret; + + pos += scnprintf(buf + pos, count - pos, "\nOEM name: %s\n", + dmi_get_system_info(DMI_SYS_VENDOR) ?: ""); + pos += scnprintf(buf + pos, count - pos, + "\tVendor In Approved List: %s\n", + iwl_is_tas_approved() ? "YES" : "NO"); + + status = iwl_dhc_resp_status(mld->fwrt.fw, hcmd.resp_pkt); + if (status != 1) { + pos += scnprintf(buf + pos, count - pos, + "response status is not success: %d\n", + status); + goto out; + } + + resp = iwl_dhc_resp_data(mld->fwrt.fw, hcmd.resp_pkt, &resp_len); + if (IS_ERR(resp) || resp_len != sizeof(*resp)) { + pos += scnprintf(buf + pos, count - pos, + "Invalid size for TAS response (%u instead of %zd)\n", + resp_len, sizeof(*resp)); + goto out; + } + + pos += iwl_mld_dump_tas_resp(resp, count - pos, buf + pos); + +out: + iwl_free_resp(&hcmd); + return pos; +} + +WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_nmi, 10); +WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_restart, 10); +WIPHY_DEBUGFS_READ_WRITE_FILE_OPS_MLD(he_sniffer_params, 32); +WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(fw_dbg_clear, 10); +WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(send_echo_cmd, 8); +WIPHY_DEBUGFS_READ_FILE_OPS_MLD(tas_get_status, 2048); + +static ssize_t iwl_dbgfs_wifi_6e_enable_read(struct iwl_mld *mld, + size_t count, u8 *buf) +{ + int err; + u32 value; + + err = iwl_bios_get_dsm(&mld->fwrt, DSM_FUNC_ENABLE_6E, &value); + if (err) + return err; + + return scnprintf(buf, count, "0x%08x\n", value); +} + +MLD_DEBUGFS_READ_FILE_OPS(wifi_6e_enable, 64); + +static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mld *mld, + char *buf, size_t count) +{ + struct iwl_op_mode *opmode = container_of((void *)mld, + struct iwl_op_mode, + op_mode_specific); + struct iwl_rx_cmd_buffer rxb = {}; + struct iwl_rx_packet *pkt; + int n_bytes = count / 2; + int ret = -EINVAL; + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + rxb._page = alloc_pages(GFP_KERNEL, 0); + if (!rxb._page) + return -ENOMEM; + pkt = rxb_addr(&rxb); + + ret = hex2bin(page_address(rxb._page), buf, n_bytes); + if (ret) + goto out; + + /* avoid invalid memory access and malformed packet */ + if (n_bytes < sizeof(*pkt) || + n_bytes != sizeof(*pkt) + iwl_rx_packet_payload_len(pkt)) + goto out; + + local_bh_disable(); + iwl_mld_rx(opmode, NULL, &rxb); + local_bh_enable(); + ret = 0; + +out: + iwl_free_rxb(&rxb); + + return ret ?: count; +} + +WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(inject_packet, 512); + +#ifdef CONFIG_THERMAL + +static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mld *mld, + char *buf, size_t count) +{ + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + return iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state, + CTDP_CMD_OPERATION_STOP) ? : count; +} + +WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(stop_ctdp, 8); + +static ssize_t iwl_dbgfs_start_ctdp_write(struct iwl_mld *mld, + char *buf, size_t count) +{ + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + return iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state, + CTDP_CMD_OPERATION_START) ? : count; +} + +WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(start_ctdp, 8); + +#endif /* CONFIG_THERMAL */ + +void +iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir) +{ + /* Add debugfs files here */ + + MLD_DEBUGFS_ADD_FILE(fw_nmi, debugfs_dir, 0200); + MLD_DEBUGFS_ADD_FILE(fw_restart, debugfs_dir, 0200); + MLD_DEBUGFS_ADD_FILE(wifi_6e_enable, debugfs_dir, 0400); + MLD_DEBUGFS_ADD_FILE(he_sniffer_params, debugfs_dir, 0600); + MLD_DEBUGFS_ADD_FILE(fw_dbg_clear, debugfs_dir, 0200); + MLD_DEBUGFS_ADD_FILE(send_echo_cmd, debugfs_dir, 0200); + MLD_DEBUGFS_ADD_FILE(tas_get_status, debugfs_dir, 0400); +#ifdef CONFIG_THERMAL + MLD_DEBUGFS_ADD_FILE(start_ctdp, debugfs_dir, 0200); + MLD_DEBUGFS_ADD_FILE(stop_ctdp, debugfs_dir, 0200); +#endif + MLD_DEBUGFS_ADD_FILE(inject_packet, debugfs_dir, 0200); + + /* Create a symlink with mac80211. It will be removed when mac80211 + * exits (before the opmode exits which removes the target.) + */ + if (!IS_ERR(debugfs_dir)) { + char buf[100]; + + snprintf(buf, 100, "../../%pd2", debugfs_dir->d_parent); + debugfs_create_symlink("iwlwifi", mld->wiphy->debugfsdir, + buf); + } +} + +#define VIF_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ + WIPHY_DEBUGFS_WRITE_FILE_OPS(vif_##name, bufsz, vif) + +#define VIF_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ + IEEE80211_WIPHY_DEBUGFS_READ_WRITE_FILE_OPS(vif_##name, bufsz, vif) \ + +#define VIF_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \ + debugfs_create_file(alias, mode, parent, vif, \ + &iwl_dbgfs_vif_##name##_ops) +#define VIF_DEBUGFS_ADD_FILE(name, parent, mode) \ + VIF_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) + +static ssize_t iwl_dbgfs_vif_bf_params_write(struct iwl_mld *mld, char *buf, + size_t count, void *data) +{ + struct ieee80211_vif *vif = data; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int link_id = vif->active_links ? __ffs(vif->active_links) : 0; + struct ieee80211_bss_conf *link_conf; + int val; + + if (!strncmp("bf_enable_beacon_filter=", buf, 24)) { + if (sscanf(buf + 24, "%d", &val) != 1) + return -EINVAL; + } else { + return -EINVAL; + } + + if (val != 0 && val != 1) + return -EINVAL; + + link_conf = link_conf_dereference_protected(vif, link_id); + if (WARN_ON(!link_conf)) + return -ENODEV; + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + mld_vif->disable_bf = !val; + + if (val) + return iwl_mld_enable_beacon_filter(mld, link_conf, + false) ?: count; + else + return iwl_mld_disable_beacon_filter(mld, vif) ?: count; +} + +static ssize_t iwl_dbgfs_vif_pm_params_write(struct iwl_mld *mld, + char *buf, + size_t count, void *data) +{ + struct ieee80211_vif *vif = data; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int val; + + if (!strncmp("use_ps_poll=", buf, 12)) { + if (sscanf(buf + 12, "%d", &val) != 1) + return -EINVAL; + } else { + return -EINVAL; + } + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + mld_vif->use_ps_poll = val; + + return iwl_mld_update_mac_power(mld, vif, false) ?: count; +} + +static ssize_t iwl_dbgfs_vif_low_latency_write(struct iwl_mld *mld, + char *buf, size_t count, + void *data) +{ + struct ieee80211_vif *vif = data; + u8 value; + int ret; + + ret = kstrtou8(buf, 0, &value); + if (ret) + return ret; + + if (value > 1) + return -EINVAL; + + iwl_mld_vif_update_low_latency(mld, vif, value, LOW_LATENCY_DEBUGFS); + + return count; +} + +static ssize_t iwl_dbgfs_vif_low_latency_read(struct ieee80211_vif *vif, + size_t count, char *buf) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + char format[] = "traffic=%d\ndbgfs=%d\nvif_type=%d\nactual=%d\n"; + u8 ll_causes; + + if (WARN_ON(count < sizeof(format))) + return -EINVAL; + + ll_causes = READ_ONCE(mld_vif->low_latency_causes); + + /* all values in format are boolean so the size of format is enough + * for holding the result string + */ + return scnprintf(buf, count, format, + !!(ll_causes & LOW_LATENCY_TRAFFIC), + !!(ll_causes & LOW_LATENCY_DEBUGFS), + !!(ll_causes & LOW_LATENCY_VIF_TYPE), + !!(ll_causes)); +} + +VIF_DEBUGFS_WRITE_FILE_OPS(pm_params, 32); +VIF_DEBUGFS_WRITE_FILE_OPS(bf_params, 32); +VIF_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 45); + +static int +_iwl_dbgfs_inject_beacon_ie(struct iwl_mld *mld, struct ieee80211_vif *vif, + char *bin, ssize_t len, + bool restore) +{ + struct iwl_mld_vif *mld_vif; + struct iwl_mld_link *mld_link; + struct iwl_mac_beacon_cmd beacon_cmd = {}; + int n_bytes = len / 2; + + /* Element len should be represented by u8 */ + if (n_bytes >= U8_MAX) + return -EINVAL; + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + if (!vif) + return -EINVAL; + + mld_vif = iwl_mld_vif_from_mac80211(vif); + mld_vif->beacon_inject_active = true; + mld->hw->extra_beacon_tailroom = n_bytes; + + for_each_mld_vif_valid_link(mld_vif, mld_link) { + u32 offset; + struct ieee80211_tx_info *info; + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + struct ieee80211_chanctx_conf *ctx = + wiphy_dereference(mld->wiphy, link_conf->chanctx_conf); + struct sk_buff *beacon = + ieee80211_beacon_get_template(mld->hw, vif, + NULL, link_id); + + if (!beacon) + return -EINVAL; + + if (!restore && (WARN_ON(!n_bytes || !bin) || + hex2bin(skb_put_zero(beacon, n_bytes), + bin, n_bytes))) { + dev_kfree_skb(beacon); + return -EINVAL; + } + + info = IEEE80211_SKB_CB(beacon); + + beacon_cmd.flags = + cpu_to_le16(iwl_mld_get_rate_flags(mld, info, vif, + link_conf, + ctx->def.chan->band)); + beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); + beacon_cmd.link_id = + cpu_to_le32(mld_link->fw_id); + + iwl_mld_set_tim_idx(mld, &beacon_cmd.tim_idx, + beacon->data, beacon->len); + + offset = iwl_find_ie_offset(beacon->data, + WLAN_EID_S1G_TWT, + beacon->len); + + beacon_cmd.btwt_offset = cpu_to_le32(offset); + + iwl_mld_send_beacon_template_cmd(mld, beacon, &beacon_cmd); + dev_kfree_skb(beacon); + } + + if (restore) + mld_vif->beacon_inject_active = false; + + return 0; +} + +static ssize_t +iwl_dbgfs_vif_inject_beacon_ie_write(struct iwl_mld *mld, + char *buf, size_t count, + void *data) +{ + struct ieee80211_vif *vif = data; + int ret = _iwl_dbgfs_inject_beacon_ie(mld, vif, buf, + count, false); + + mld->hw->extra_beacon_tailroom = 0; + return ret ?: count; +} + +VIF_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie, 512); + +static ssize_t +iwl_dbgfs_vif_inject_beacon_ie_restore_write(struct iwl_mld *mld, + char *buf, + size_t count, + void *data) +{ + struct ieee80211_vif *vif = data; + int ret = _iwl_dbgfs_inject_beacon_ie(mld, vif, NULL, + 0, true); + + mld->hw->extra_beacon_tailroom = 0; + return ret ?: count; +} + +VIF_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512); + +static ssize_t +iwl_dbgfs_vif_twt_setup_write(struct iwl_mld *mld, char *buf, size_t count, + void *data) +{ + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, DEBUG_HOST_COMMAND), + }; + struct ieee80211_vif *vif = data; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_dhc_cmd *cmd __free(kfree) = NULL; + struct iwl_dhc_twt_operation *dhc_twt_cmd; + u64 target_wake_time; + u32 twt_operation, interval_exp, interval_mantissa, min_wake_duration; + u8 trigger, flow_type, flow_id, protection, tenth_param; + u8 twt_request = 1, broadcast = 0; + int ret; + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + ret = sscanf(buf, "%u %llu %u %u %u %hhu %hhu %hhu %hhu %hhu", + &twt_operation, &target_wake_time, &interval_exp, + &interval_mantissa, &min_wake_duration, &trigger, + &flow_type, &flow_id, &protection, &tenth_param); + + /* the new twt_request parameter is optional for station */ + if ((ret != 9 && ret != 10) || + (ret == 10 && vif->type != NL80211_IFTYPE_STATION && + tenth_param == 1)) + return -EINVAL; + + /* The 10th parameter: + * In STA mode - the TWT type (broadcast or individual) + * In AP mode - the role (0 responder, 2 unsolicited) + */ + if (ret == 10) { + if (vif->type == NL80211_IFTYPE_STATION) + broadcast = tenth_param; + else + twt_request = tenth_param; + } + + cmd = kzalloc(sizeof(*cmd) + sizeof(*dhc_twt_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + dhc_twt_cmd = (void *)cmd->data; + dhc_twt_cmd->mac_id = cpu_to_le32(mld_vif->fw_id); + dhc_twt_cmd->twt_operation = cpu_to_le32(twt_operation); + dhc_twt_cmd->target_wake_time = cpu_to_le64(target_wake_time); + dhc_twt_cmd->interval_exp = cpu_to_le32(interval_exp); + dhc_twt_cmd->interval_mantissa = cpu_to_le32(interval_mantissa); + dhc_twt_cmd->min_wake_duration = cpu_to_le32(min_wake_duration); + dhc_twt_cmd->trigger = trigger; + dhc_twt_cmd->flow_type = flow_type; + dhc_twt_cmd->flow_id = flow_id; + dhc_twt_cmd->protection = protection; + dhc_twt_cmd->twt_request = twt_request; + dhc_twt_cmd->negotiation_type = broadcast ? 3 : 0; + + cmd->length = cpu_to_le32(sizeof(*dhc_twt_cmd) >> 2); + cmd->index_and_mask = + cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | + DHC_INT_UMAC_TWT_OPERATION); + + hcmd.len[0] = sizeof(*cmd) + sizeof(*dhc_twt_cmd); + hcmd.data[0] = cmd; + + ret = iwl_mld_send_cmd(mld, &hcmd); + + return ret ?: count; +} + +VIF_DEBUGFS_WRITE_FILE_OPS(twt_setup, 256); + +static ssize_t +iwl_dbgfs_vif_twt_operation_write(struct iwl_mld *mld, char *buf, size_t count, + void *data) +{ + struct ieee80211_vif *vif = data; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_twt_operation_cmd twt_cmd = {}; + int link_id = vif->active_links ? __ffs(vif->active_links) : 0; + struct iwl_mld_link *mld_link = iwl_mld_link_dereference_check(mld_vif, + link_id); + int ret; + + if (WARN_ON(!mld_link)) + return -ENODEV; + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + if (hweight16(vif->active_links) > 1) + return -EOPNOTSUPP; + + ret = sscanf(buf, + "%u %llu %u %u %u %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu", + &twt_cmd.twt_operation, &twt_cmd.target_wake_time, + &twt_cmd.interval_exponent, &twt_cmd.interval_mantissa, + &twt_cmd.minimum_wake_duration, &twt_cmd.trigger, + &twt_cmd.flow_type, &twt_cmd.flow_id, + &twt_cmd.twt_protection, &twt_cmd.ndp_paging_indicator, + &twt_cmd.responder_pm_mode, &twt_cmd.negotiation_type, + &twt_cmd.twt_request, &twt_cmd.implicit, + &twt_cmd.twt_group_assignment, &twt_cmd.twt_channel, + &twt_cmd.restricted_info_present, &twt_cmd.dl_bitmap_valid, + &twt_cmd.ul_bitmap_valid, &twt_cmd.dl_tid_bitmap, + &twt_cmd.ul_tid_bitmap); + + if (ret != 21) + return -EINVAL; + + twt_cmd.link_id = cpu_to_le32(mld_link->fw_id); + + ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(MAC_CONF_GROUP, TWT_OPERATION_CMD), + &twt_cmd); + return ret ?: count; +} + +VIF_DEBUGFS_WRITE_FILE_OPS(twt_operation, 256); + +static ssize_t iwl_dbgfs_vif_int_mlo_scan_write(struct iwl_mld *mld, char *buf, + size_t count, void *data) +{ + struct ieee80211_vif *vif = data; + u32 action; + int ret; + + if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif)) + return -EINVAL; + + if (kstrtou32(buf, 0, &action)) + return -EINVAL; + + if (action == 0) { + ret = iwl_mld_scan_stop(mld, IWL_MLD_SCAN_INT_MLO, false); + } else if (action == 1) { + iwl_mld_int_mlo_scan(mld, vif); + ret = 0; + } else { + ret = -EINVAL; + } + + return ret ?: count; +} + +VIF_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32); + +void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct dentry *mld_vif_dbgfs = + debugfs_create_dir("iwlmld", vif->debugfs_dir); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + char target[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) + + (7 + IFNAMSIZ + 1) + 6 + 1]; + char name[7 + IFNAMSIZ + 1]; + + /* Create symlink for convenience pointing to interface specific + * debugfs entries for the driver. For example, under + * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmld/ + * find + * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmld/ + */ + snprintf(name, sizeof(name), "%pd", vif->debugfs_dir); + snprintf(target, sizeof(target), "../../../%pd3/iwlmld", + vif->debugfs_dir); + mld_vif->dbgfs_slink = + debugfs_create_symlink(name, mld->debugfs_dir, target); + + if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM && + vif->type == NL80211_IFTYPE_STATION) { + VIF_DEBUGFS_ADD_FILE(pm_params, mld_vif_dbgfs, 0200); + VIF_DEBUGFS_ADD_FILE(bf_params, mld_vif_dbgfs, 0200); + } + + if (vif->type == NL80211_IFTYPE_AP) { + VIF_DEBUGFS_ADD_FILE(inject_beacon_ie, mld_vif_dbgfs, 0200); + VIF_DEBUGFS_ADD_FILE(inject_beacon_ie_restore, + mld_vif_dbgfs, 0200); + } + + VIF_DEBUGFS_ADD_FILE(low_latency, mld_vif_dbgfs, 0600); + VIF_DEBUGFS_ADD_FILE(twt_setup, mld_vif_dbgfs, 0200); + VIF_DEBUGFS_ADD_FILE(twt_operation, mld_vif_dbgfs, 0200); + VIF_DEBUGFS_ADD_FILE(int_mlo_scan, mld_vif_dbgfs, 0200); +} +#define LINK_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ + WIPHY_DEBUGFS_WRITE_FILE_OPS(link_##name, bufsz, bss_conf) + +#define LINK_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \ + debugfs_create_file(alias, mode, parent, link_conf, \ + &iwl_dbgfs_link_##name##_ops) +#define LINK_DEBUGFS_ADD_FILE(name, parent, mode) \ + LINK_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) + +void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct dentry *dir) +{ + struct dentry *mld_link_dir; + + mld_link_dir = debugfs_lookup("iwlmld", dir); + + /* For non-MLO vifs, the dir of deflink is the same as the vif's one. + * so if iwlmld dir already exists, this means that this is deflink. + * If not, this is a per-link dir of a MLO vif, add in it the iwlmld + * dir. + */ + if (!mld_link_dir) + mld_link_dir = debugfs_create_dir("iwlmld", dir); +} + +static ssize_t iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf, + size_t count, void *data) +{ + struct ieee80211_link_sta *link_sta = data; + struct iwl_mld_link_sta *mld_link_sta; + u32 rate; + u32 partial = false; + char pretty_rate[100]; + int ret; + u8 fw_sta_id; + + mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta); + if (WARN_ON(!mld_link_sta)) + return -EINVAL; + + fw_sta_id = mld_link_sta->fw_id; + + if (sscanf(buf, "%i %i", &rate, &partial) == 0) + return -EINVAL; + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id, + partial ? IWL_TLC_DEBUG_PARTIAL_FIXED_RATE : + IWL_TLC_DEBUG_FIXED_RATE, + rate); + + rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), rate); + + IWL_DEBUG_RATE(mld, "sta_id %d rate %s partial: %d, ret:%d\n", + fw_sta_id, pretty_rate, partial, ret); + + return ret ? : count; +} + +static ssize_t iwl_dbgfs_tlc_dhc_write(struct iwl_mld *mld, char *buf, + size_t count, void *data) +{ + struct ieee80211_link_sta *link_sta = data; + struct iwl_mld_link_sta *mld_link_sta; + u32 type, value; + int ret; + u8 fw_sta_id; + + mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta); + if (WARN_ON(!mld_link_sta)) + return -EINVAL; + + fw_sta_id = mld_link_sta->fw_id; + + if (sscanf(buf, "%i %i", &type, &value) != 2) { + IWL_DEBUG_RATE(mld, "usage \n"); + return -EINVAL; + } + + if (iwl_mld_dbgfs_fw_cmd_disabled(mld)) + return -EIO; + + ret = iwl_mld_send_tlc_dhc(mld, fw_sta_id, type, value); + + return ret ? : count; +} + +#define LINK_STA_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) \ + debugfs_create_file(alias, mode, parent, link_sta, \ + &iwl_dbgfs_##name##_ops) +#define LINK_STA_DEBUGFS_ADD_FILE(name, parent, mode) \ + LINK_STA_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode) + +#define LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(name, bufsz) \ + WIPHY_DEBUGFS_WRITE_FILE_OPS(name, bufsz, link_sta) + +LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(tlc_dhc, 64); +LINK_STA_WIPHY_DEBUGFS_WRITE_OPS(fixed_rate, 64); + +void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct dentry *dir) +{ + LINK_STA_DEBUGFS_ADD_FILE(fixed_rate, dir, 0200); + LINK_STA_DEBUGFS_ADD_FILE(tlc_dhc, dir, 0200); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.h new file mode 100644 index 0000000000000..eeba35342ba1b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.h @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#include "iface.h" +#include "sta.h" + +#define MLD_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ +struct dbgfs_##name##_data { \ + argtype *arg; \ + bool read_done; \ + ssize_t rlen; \ + char buf[buflen]; \ +}; \ +static int _iwl_dbgfs_##name##_open(struct inode *inode, \ + struct file *file) \ +{ \ + struct dbgfs_##name##_data *data; \ + \ + if ((file->f_flags & O_ACCMODE) == O_RDWR) \ + return -EOPNOTSUPP; \ + \ + data = kzalloc(sizeof(*data), GFP_KERNEL); \ + if (!data) \ + return -ENOMEM; \ + \ + data->read_done = false; \ + data->arg = inode->i_private; \ + file->private_data = data; \ + \ + return 0; \ +} + +#define MLD_DEBUGFS_READ_WRAPPER(name) \ +static ssize_t _iwl_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + struct dbgfs_##name##_data *data = file->private_data; \ + \ + if (!data->read_done) { \ + data->read_done = true; \ + data->rlen = iwl_dbgfs_##name##_read(data->arg, \ + sizeof(data->buf),\ + data->buf); \ + } \ + \ + if (data->rlen < 0) \ + return data->rlen; \ + return simple_read_from_buffer(user_buf, count, ppos, \ + data->buf, data->rlen); \ +} + +static int _iwl_dbgfs_release(struct inode *inode, struct file *file) +{ + kfree(file->private_data); + return 0; +} + +#define _MLD_DEBUGFS_READ_FILE_OPS(name, buflen, argtype) \ +MLD_DEBUGFS_OPEN_WRAPPER(name, buflen, argtype) \ +MLD_DEBUGFS_READ_WRAPPER(name) \ +static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = _iwl_dbgfs_##name##_read, \ + .open = _iwl_dbgfs_##name##_open, \ + .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ +} + +#define WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER(name) \ +static ssize_t iwl_dbgfs_##name##_write_handler(struct wiphy *wiphy, \ + struct file *file, char *buf, \ + size_t count, void *data) \ +{ \ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \ + return iwl_dbgfs_##name##_write(mld, buf, count, data); \ +} + +static inline struct iwl_mld * +iwl_mld_from_link_sta(struct ieee80211_link_sta *link_sta) +{ + struct ieee80211_vif *vif = + iwl_mld_sta_from_mac80211(link_sta->sta)->vif; + return iwl_mld_vif_from_mac80211(vif)->mld; +} + +static inline struct iwl_mld * +iwl_mld_from_bss_conf(struct ieee80211_bss_conf *link) +{ + return iwl_mld_vif_from_mac80211(link->vif)->mld; +} + +static inline struct iwl_mld *iwl_mld_from_vif(struct ieee80211_vif *vif) +{ + return iwl_mld_vif_from_mac80211(vif)->mld; +} + +#define WIPHY_DEBUGFS_WRITE_WRAPPER(name, bufsz, objtype) \ +WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER(name) \ +static ssize_t __iwl_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + struct ieee80211_##objtype *arg = file->private_data; \ + struct iwl_mld *mld = iwl_mld_from_##objtype(arg); \ + char buf[bufsz] = {}; \ + \ + return wiphy_locked_debugfs_write(mld->wiphy, file, \ + buf, sizeof(buf), \ + user_buf, count, \ + iwl_dbgfs_##name##_write_handler, \ + arg); \ +} + +#define WIPHY_DEBUGFS_WRITE_FILE_OPS(name, bufsz, objtype) \ + WIPHY_DEBUGFS_WRITE_WRAPPER(name, bufsz, objtype) \ + static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = __iwl_dbgfs_##name##_write, \ + .open = simple_open, \ + .llseek = generic_file_llseek, \ + } + +#define WIPHY_DEBUGFS_READ_HANDLER_WRAPPER_MLD(name) \ +static ssize_t iwl_dbgfs_##name##_read_handler(struct wiphy *wiphy, \ + struct file *file, char *buf, \ + size_t count, void *data) \ +{ \ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \ + return iwl_dbgfs_##name##_read(mld, buf, count); \ +} + +#define WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER_MLD(name) \ +static ssize_t iwl_dbgfs_##name##_write_handler(struct wiphy *wiphy, \ + struct file *file, char *buf, \ + size_t count, void *data) \ +{ \ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); \ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); \ + return iwl_dbgfs_##name##_write(mld, buf, count); \ +} + +#define WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \ +WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER_MLD(name) \ +static ssize_t __iwl_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + struct dbgfs_##name##_data *data = file->private_data; \ + struct iwl_mld *mld = data->arg; \ + \ + return wiphy_locked_debugfs_write(mld->wiphy, file, \ + data->buf, sizeof(data->buf), \ + user_buf, count, \ + iwl_dbgfs_##name##_write_handler, \ + NULL); \ +} + +#define WIPHY_DEBUGFS_READ_WRAPPER_MLD(name) \ +WIPHY_DEBUGFS_READ_HANDLER_WRAPPER_MLD(name) \ +static ssize_t __iwl_dbgfs_##name##_read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + struct dbgfs_##name##_data *data = file->private_data; \ + struct iwl_mld *mld = data->arg; \ + \ + if (!data->read_done) { \ + data->read_done = true; \ + data->rlen = wiphy_locked_debugfs_read(mld->wiphy, \ + file, data->buf, sizeof(data->buf), \ + user_buf, count, ppos, \ + iwl_dbgfs_##name##_read_handler, NULL); \ + return data->rlen; \ + } \ + \ + if (data->rlen < 0) \ + return data->rlen; \ + return simple_read_from_buffer(user_buf, count, ppos, \ + data->buf, data->rlen); \ +} + +#define WIPHY_DEBUGFS_READ_FILE_OPS_MLD(name, bufsz) \ + MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \ + WIPHY_DEBUGFS_READ_WRAPPER_MLD(name) \ + static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .read = __iwl_dbgfs_##name##_read, \ + .open = _iwl_dbgfs_##name##_open, \ + .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ + } + +#define WIPHY_DEBUGFS_WRITE_FILE_OPS_MLD(name, bufsz) \ + MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \ + WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \ + static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = __iwl_dbgfs_##name##_write, \ + .open = _iwl_dbgfs_##name##_open, \ + .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ + } + +#define WIPHY_DEBUGFS_READ_WRITE_FILE_OPS_MLD(name, bufsz) \ + MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct iwl_mld) \ + WIPHY_DEBUGFS_WRITE_WRAPPER_MLD(name) \ + WIPHY_DEBUGFS_READ_WRAPPER_MLD(name) \ + static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = __iwl_dbgfs_##name##_write, \ + .read = __iwl_dbgfs_##name##_read, \ + .open = _iwl_dbgfs_##name##_open, \ + .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ + } + +#define WIPHY_DEBUGFS_WRITE_WRAPPER_IEEE80211(name, bufsz, objtype) \ +WIPHY_DEBUGFS_WRITE_HANDLER_WRAPPER(name) \ +static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + struct dbgfs_##name##_data *data = file->private_data; \ + struct ieee80211_##objtype *arg = data->arg; \ + struct iwl_mld *mld = iwl_mld_from_##objtype(arg); \ + char buf[bufsz] = {}; \ + \ + return wiphy_locked_debugfs_write(mld->wiphy, file, \ + buf, sizeof(buf), \ + user_buf, count, \ + iwl_dbgfs_##name##_write_handler, \ + arg); \ +} + +#define IEEE80211_WIPHY_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, objtype) \ + MLD_DEBUGFS_OPEN_WRAPPER(name, bufsz, struct ieee80211_##objtype) \ + WIPHY_DEBUGFS_WRITE_WRAPPER_IEEE80211(name, bufsz, objtype) \ + MLD_DEBUGFS_READ_WRAPPER(name) \ + static const struct file_operations iwl_dbgfs_##name##_ops = { \ + .write = _iwl_dbgfs_##name##_write, \ + .read = _iwl_dbgfs_##name##_read, \ + .open = _iwl_dbgfs_##name##_open, \ + .llseek = generic_file_llseek, \ + .release = _iwl_dbgfs_release, \ + } diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c new file mode 100644 index 0000000000000..f77ba21a174dd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2025 Intel Corporation + */ +#include +#include +#include +#include "mld.h" +#include "iface.h" +#include "phy.h" +#include "iwl-io.h" +#include "iwl-prph.h" +#include "constants.h" +#include "fw/api/location.h" +#include "ftm-initiator.h" + +static void iwl_mld_ftm_cmd_common(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_tof_range_req_cmd *cmd, + struct cfg80211_pmsr_request *req) +{ + int i; + + cmd->initiator_flags = + cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM | + IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT); + cmd->request_id = req->cookie; + cmd->num_of_ap = req->n_peers; + + /* Use a large value for "no timeout". Don't use the maximum value + * because of fw limitations. + */ + if (req->timeout) + cmd->req_timeout_ms = cpu_to_le32(min(req->timeout, 0xfffff)); + else + cmd->req_timeout_ms = cpu_to_le32(0xfffff); + + memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN); + for (i = 0; i < ETH_ALEN; i++) + cmd->macaddr_mask[i] = ~req->mac_addr_mask[i]; + + if (vif->cfg.assoc) { + memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN); + + /* AP's TSF is only relevant if associated */ + for (i = 0; i < req->n_peers; i++) { + if (req->peers[i].report_ap_tsf) { + struct iwl_mld_vif *mld_vif = + iwl_mld_vif_from_mac80211(vif); + + cmd->tsf_mac_id = cpu_to_le32(mld_vif->fw_id); + return; + } + } + } else { + eth_broadcast_addr(cmd->range_req_bssid); + } + + /* Don't report AP's TSF */ + cmd->tsf_mac_id = cpu_to_le32(0xff); +} + +static int +iwl_mld_ftm_set_target_chandef(struct iwl_mld *mld, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry *target) +{ + u32 freq = peer->chandef.chan->center_freq; + + target->channel_num = ieee80211_frequency_to_channel(freq); + + switch (peer->chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + target->format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY; + target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; + break; + case NL80211_CHAN_WIDTH_20: + target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT; + target->format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS; + break; + case NL80211_CHAN_WIDTH_40: + target->format_bw = IWL_LOCATION_FRAME_FORMAT_HT; + target->format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS; + break; + case NL80211_CHAN_WIDTH_80: + target->format_bw = IWL_LOCATION_FRAME_FORMAT_VHT; + target->format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS; + break; + case NL80211_CHAN_WIDTH_160: + target->format_bw = IWL_LOCATION_FRAME_FORMAT_HE; + target->format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS; + break; + default: + IWL_ERR(mld, "Unsupported BW in FTM request (%d)\n", + peer->chandef.width); + return -EINVAL; +} + + /* non EDCA based measurement must use HE preamble */ + if (peer->ftm.trigger_based || peer->ftm.non_trigger_based) + target->format_bw |= IWL_LOCATION_FRAME_FORMAT_HE; + + target->ctrl_ch_position = + (peer->chandef.width > NL80211_CHAN_WIDTH_20) ? + iwl_mld_get_fw_ctrl_pos(&peer->chandef) : 0; + + target->band = iwl_mld_nl80211_band_to_fw(peer->chandef.chan->band); + return 0; +} + +#define FTM_SET_FLAG(flag) (target->initiator_ap_flags |= \ + cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) + +static void +iwl_mld_ftm_set_target_flags(struct iwl_mld *mld, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry *target) +{ + target->initiator_ap_flags = cpu_to_le32(0); + + if (peer->ftm.asap) + FTM_SET_FLAG(ASAP); + + if (peer->ftm.request_lci) + FTM_SET_FLAG(LCI_REQUEST); + + if (peer->ftm.request_civicloc) + FTM_SET_FLAG(CIVIC_REQUEST); + + if (IWL_MLD_FTM_INITIATOR_DYNACK) + FTM_SET_FLAG(DYN_ACK); + + if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) + FTM_SET_FLAG(ALGO_LR); + else if (IWL_MLD_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) + FTM_SET_FLAG(ALGO_FFT); + + if (peer->ftm.trigger_based) + FTM_SET_FLAG(TB); + else if (peer->ftm.non_trigger_based) + FTM_SET_FLAG(NON_TB); + + if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && + peer->ftm.lmr_feedback) + FTM_SET_FLAG(LMR_FEEDBACK); +} + +static void iwl_mld_ftm_set_sta(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry *target) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + u32 sta_id_mask; + + target->sta_id = IWL_INVALID_STA; + + /* TODO: add ftm_unprotected debugfs support */ + + if (!vif->cfg.assoc || !mld_vif->ap_sta) + return; + + sta_id_mask = iwl_mld_fw_sta_id_mask(mld, mld_vif->ap_sta); + if (WARN_ON(hweight32(sta_id_mask) != 1)) + return; + + target->sta_id = __ffs(sta_id_mask); + + if (mld_vif->ap_sta->mfp && + (peer->ftm.trigger_based || peer->ftm.non_trigger_based)) + FTM_SET_FLAG(PMF); +} + +static int +iwl_mld_ftm_set_target(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry *target) +{ + u32 i2r_max_sts; + int ret; + + ret = iwl_mld_ftm_set_target_chandef(mld, peer, target); + if (ret) + return ret; + + memcpy(target->bssid, peer->addr, ETH_ALEN); + target->burst_period = cpu_to_le16(peer->ftm.burst_period); + target->samples_per_burst = peer->ftm.ftms_per_burst; + target->num_of_bursts = peer->ftm.num_bursts_exp; + iwl_mld_ftm_set_target_flags(mld, peer, target); + iwl_mld_ftm_set_sta(mld, vif, peer, target); + + /* TODO: add secured ranging support */ + + i2r_max_sts = IWL_MLD_FTM_I2R_MAX_STS > 1 ? 1 : + IWL_MLD_FTM_I2R_MAX_STS; + + target->r2i_ndp_params = IWL_MLD_FTM_R2I_MAX_REP | + (IWL_MLD_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) | + (IWL_MLD_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); + target->i2r_ndp_params = IWL_MLD_FTM_I2R_MAX_REP | + (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) | + (IWL_MLD_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); + + if (peer->ftm.non_trigger_based) { + target->min_time_between_msr = + cpu_to_le16(IWL_MLD_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); + target->burst_period = + cpu_to_le16(IWL_MLD_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); + } else { + target->min_time_between_msr = cpu_to_le16(0); + } + + /* TODO: Beacon interval is currently unknown, so use the common value + * of 100 TUs. + */ + target->beacon_interval = cpu_to_le16(100); + + return 0; +} + +int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_req_cmd cmd; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), + .dataflags[0] = IWL_HCMD_DFL_DUP, + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u8 i; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + if (mld->ftm_initiator.req) + return -EBUSY; + + if (req->n_peers > ARRAY_SIZE(cmd.ap)) + return -EINVAL; + + memset(&cmd, 0, sizeof(cmd)); + + iwl_mld_ftm_cmd_common(mld, vif, (void *)&cmd, req); + + for (i = 0; i < cmd.num_of_ap; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i]; + + ret = iwl_mld_ftm_set_target(mld, vif, peer, target); + if (ret) + return ret; + } + + /* TODO: get the status from the response*/ + ret = iwl_mld_send_cmd(mld, &hcmd); + if (!ret) { + mld->ftm_initiator.req = req; + mld->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif); + } + + return ret; +} + +static void iwl_mld_ftm_reset(struct iwl_mld *mld) +{ + lockdep_assert_wiphy(mld->wiphy); + + mld->ftm_initiator.req = NULL; + mld->ftm_initiator.req_wdev = NULL; + memset(mld->ftm_initiator.responses, 0, + sizeof(mld->ftm_initiator.responses)); +} + +static int iwl_mld_ftm_range_resp_valid(struct iwl_mld *mld, u8 request_id, + u8 num_of_aps) +{ + if (IWL_FW_CHECK(mld, request_id != (u8)mld->ftm_initiator.req->cookie, + "Request ID mismatch, got %u, active %u\n", + request_id, (u8)mld->ftm_initiator.req->cookie)) + return -EINVAL; + + if (IWL_FW_CHECK(mld, num_of_aps > mld->ftm_initiator.req->n_peers || + num_of_aps > IWL_TOF_MAX_APS, + "FTM range response: invalid num of APs (%u)\n", + num_of_aps)) + return -EINVAL; + + return 0; +} + +static int iwl_mld_ftm_find_peer(struct cfg80211_pmsr_request *req, + const u8 *addr) +{ + for (int i = 0; i < req->n_peers; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + + if (ether_addr_equal_unaligned(peer->addr, addr)) + return i; + } + + return -ENOENT; +} + +static void iwl_mld_debug_range_resp(struct iwl_mld *mld, u8 index, + struct cfg80211_pmsr_result *res) +{ + s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666); + + IWL_DEBUG_INFO(mld, "entry %d\n", index); + IWL_DEBUG_INFO(mld, "\tstatus: %d\n", res->status); + IWL_DEBUG_INFO(mld, "\tBSSID: %pM\n", res->addr); + IWL_DEBUG_INFO(mld, "\thost time: %llu\n", res->host_time); + IWL_DEBUG_INFO(mld, "\tburst index: %d\n", res->ftm.burst_index); + IWL_DEBUG_INFO(mld, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes); + IWL_DEBUG_INFO(mld, "\trssi: %d\n", res->ftm.rssi_avg); + IWL_DEBUG_INFO(mld, "\trssi spread: %d\n", res->ftm.rssi_spread); + IWL_DEBUG_INFO(mld, "\trtt: %lld\n", res->ftm.rtt_avg); + IWL_DEBUG_INFO(mld, "\trtt var: %llu\n", res->ftm.rtt_variance); + IWL_DEBUG_INFO(mld, "\trtt spread: %llu\n", res->ftm.rtt_spread); + IWL_DEBUG_INFO(mld, "\tdistance: %lld\n", rtt_avg); +} + +void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data; + u8 num_of_aps, last_in_batch; + + if (IWL_FW_CHECK(mld, !mld->ftm_initiator.req, + "FTM response without a pending request\n")) + return; + + if (iwl_mld_ftm_range_resp_valid(mld, fw_resp->request_id, + fw_resp->num_of_aps)) + return; + + num_of_aps = fw_resp->num_of_aps; + last_in_batch = fw_resp->last_report; + + IWL_DEBUG_INFO(mld, "Range response received\n"); + IWL_DEBUG_INFO(mld, "request id: %llu, num of entries: %u\n", + mld->ftm_initiator.req->cookie, num_of_aps); + + for (int i = 0; i < num_of_aps; i++) { + struct cfg80211_pmsr_result result = {}; + struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap; + int peer_idx; + + fw_ap = &fw_resp->ap[i]; + result.final = fw_ap->last_burst; + result.ap_tsf = le32_to_cpu(fw_ap->start_tsf); + result.ap_tsf_valid = 1; + + peer_idx = iwl_mld_ftm_find_peer(mld->ftm_initiator.req, + fw_ap->bssid); + if (peer_idx < 0) { + IWL_WARN(mld, + "Unknown address (%pM, target #%d) in FTM response\n", + fw_ap->bssid, i); + continue; + } + + switch (fw_ap->measure_status) { + case IWL_TOF_ENTRY_SUCCESS: + result.status = NL80211_PMSR_STATUS_SUCCESS; + break; + case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT: + result.status = NL80211_PMSR_STATUS_TIMEOUT; + break; + case IWL_TOF_ENTRY_NO_RESPONSE: + result.status = NL80211_PMSR_STATUS_FAILURE; + result.ftm.failure_reason = + NL80211_PMSR_FTM_FAILURE_NO_RESPONSE; + break; + case IWL_TOF_ENTRY_REQUEST_REJECTED: + result.status = NL80211_PMSR_STATUS_FAILURE; + result.ftm.failure_reason = + NL80211_PMSR_FTM_FAILURE_PEER_BUSY; + result.ftm.busy_retry_time = fw_ap->refusal_period; + break; + default: + result.status = NL80211_PMSR_STATUS_FAILURE; + result.ftm.failure_reason = + NL80211_PMSR_FTM_FAILURE_UNSPECIFIED; + break; + } + memcpy(result.addr, fw_ap->bssid, ETH_ALEN); + + /* TODO: convert the timestamp from the result to systime */ + result.host_time = ktime_get_boottime_ns(); + + result.type = NL80211_PMSR_TYPE_FTM; + result.ftm.burst_index = mld->ftm_initiator.responses[peer_idx]; + mld->ftm_initiator.responses[peer_idx]++; + result.ftm.rssi_avg = fw_ap->rssi; + result.ftm.rssi_avg_valid = 1; + result.ftm.rssi_spread = fw_ap->rssi_spread; + result.ftm.rssi_spread_valid = 1; + result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt); + result.ftm.rtt_avg_valid = 1; + result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance); + result.ftm.rtt_variance_valid = 1; + result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread); + result.ftm.rtt_spread_valid = 1; + + cfg80211_pmsr_report(mld->ftm_initiator.req_wdev, + mld->ftm_initiator.req, + &result, GFP_KERNEL); + + if (fw_has_api(&mld->fw->ucode_capa, + IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) + IWL_DEBUG_INFO(mld, "RTT confidence: %u\n", + fw_ap->rttConfidence); + + iwl_mld_debug_range_resp(mld, i, &result); + } + + if (last_in_batch) { + cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev, + mld->ftm_initiator.req, + GFP_KERNEL); + iwl_mld_ftm_reset(mld); + } +} + +void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld) +{ + struct cfg80211_pmsr_result result = { + .status = NL80211_PMSR_STATUS_FAILURE, + .final = 1, + .host_time = ktime_get_boottime_ns(), + .type = NL80211_PMSR_TYPE_FTM, + }; + + if (!mld->ftm_initiator.req) + return; + + for (int i = 0; i < mld->ftm_initiator.req->n_peers; i++) { + memcpy(result.addr, mld->ftm_initiator.req->peers[i].addr, + ETH_ALEN); + + cfg80211_pmsr_report(mld->ftm_initiator.req_wdev, + mld->ftm_initiator.req, + &result, GFP_KERNEL); + } + + cfg80211_pmsr_complete(mld->ftm_initiator.req_wdev, + mld->ftm_initiator.req, GFP_KERNEL); + iwl_mld_ftm_reset(mld); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h new file mode 100644 index 0000000000000..3fab25a52508a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2025 Intel Corporation + */ +#ifndef __iwl_mld_ftm_initiator_h__ +#define __iwl_mld_ftm_initiator_h__ + +/** + * struct ftm_initiator_data - FTM initiator data + * + * @req: a pointer to cfg80211 FTM request + * @req_wdev: a pointer to the wdev that requested the current FTM request + * @responses: the number of responses received for the current FTM session. + * Used for tracking the burst index in a periodic request. + */ +struct ftm_initiator_data { + struct cfg80211_pmsr_request *req; + struct wireless_dev *req_wdev; + int responses[IWL_TOF_MAX_APS]; +}; + +int iwl_mld_ftm_start(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req); + +void iwl_mld_handle_ftm_resp_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); +void iwl_mld_ftm_restart_cleanup(struct iwl_mld *mld); + +#endif /* __iwl_mld_ftm_initiator_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/fw.c b/drivers/net/wireless/intel/iwlwifi/mld/fw.c new file mode 100644 index 0000000000000..62da137e10247 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/fw.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include "mld.h" + +#include "fw/api/alive.h" +#include "fw/api/scan.h" +#include "fw/api/rx.h" +#include "fw/dbg.h" +#include "fw/pnvm.h" +#include "hcmd.h" +#include "iwl-nvm-parse.h" +#include "power.h" +#include "mcc.h" +#include "led.h" +#include "coex.h" +#include "regulatory.h" +#include "thermal.h" + +static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld) +{ + struct iwl_tx_ant_cfg_cmd cmd; + + lockdep_assert_wiphy(mld->wiphy); + + cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)); + + IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid); + + return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd); +} + +static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld) +{ + struct iwl_rss_config_cmd cmd = { + .flags = cpu_to_le32(IWL_RSS_ENABLE), + .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) | + BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) | + BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) | + BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) | + BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) | + BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD), + }; + + lockdep_assert_wiphy(mld->wiphy); + + /* Do not direct RSS traffic to Q 0 which is our fallback queue */ + for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) + cmd.indirection_table[i] = + 1 + (i % (mld->trans->num_rx_queues - 1)); + netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); + + return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd); +} + +static int iwl_mld_config_scan(struct iwl_mld *mld) +{ + struct iwl_scan_config cmd = { + .tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)), + .rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld)) + }; + + return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD), + &cmd); +} + +static void iwl_mld_alive_imr_data(struct iwl_trans *trans, + const struct iwl_imr_alive_info *imr_info) +{ + struct iwl_imr_data *imr_data = &trans->dbg.imr_data; + + imr_data->imr_enable = le32_to_cpu(imr_info->enabled); + imr_data->imr_size = le32_to_cpu(imr_info->size); + imr_data->imr2sram_remainbyte = imr_data->imr_size; + imr_data->imr_base_addr = imr_info->base_addr; + imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr); + + if (imr_data->imr_enable) + return; + + for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) { + struct iwl_fw_ini_region_tlv *reg; + + if (!trans->dbg.active_regions[i]) + continue; + + reg = (void *)trans->dbg.active_regions[i]->data; + + /* We have only one DRAM IMR region, so we + * can break as soon as we find the first + * one. + */ + if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) { + trans->dbg.unsupported_region_msk |= BIT(i); + break; + } + } +} + +static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *data) +{ + unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); + struct iwl_mld *mld = + container_of(notif_wait, struct iwl_mld, notif_wait); + struct iwl_trans *trans = mld->trans; + u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP, + UCODE_ALIVE_NTFY, 0); + struct iwl_alive_ntf_v6 *palive; + bool *alive_valid = data; + struct iwl_umac_alive *umac; + struct iwl_lmac_alive *lmac1; + struct iwl_lmac_alive *lmac2 = NULL; + u32 lmac_error_event_table; + u32 umac_error_table; + u16 status; + + if (version < 6 || version > 7 || pkt_len != sizeof(*palive)) + return false; + + palive = (void *)pkt->data; + + iwl_mld_alive_imr_data(trans, &palive->imr); + + umac = &palive->umac_data; + lmac1 = &palive->lmac_data[0]; + lmac2 = &palive->lmac_data[1]; + status = le16_to_cpu(palive->status); + + trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]); + trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]); + trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]); + + IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n", + trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]); + + lmac_error_event_table = + le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr); + iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table); + + if (lmac2) + trans->dbg.lmac_error_event_table[1] = + le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr); + + umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) & + ~FW_ADDR_CACHE_CONTROL; + + if (umac_error_table >= trans->cfg->min_umac_error_event_table) + iwl_fw_umac_set_alive_err_table(trans, umac_error_table); + else + IWL_ERR(mld, "Not valid error log pointer 0x%08X\n", + umac_error_table); + + *alive_valid = status == IWL_ALIVE_STATUS_OK; + + IWL_DEBUG_FW(mld, + "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", + status, lmac1->ver_type, lmac1->ver_subtype); + + if (lmac2) + IWL_DEBUG_FW(mld, "Alive ucode CDB\n"); + + IWL_DEBUG_FW(mld, + "UMAC version: Major - 0x%x, Minor - 0x%x\n", + le32_to_cpu(umac->umac_major), + le32_to_cpu(umac->umac_minor)); + + if (version >= 7) + IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n", + le16_to_cpu(palive->flags)); + + iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac); + + return true; +} + +#define MLD_ALIVE_TIMEOUT (2 * HZ) +#define MLD_INIT_COMPLETE_TIMEOUT (2 * HZ) + +static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld) +{ + struct iwl_trans *trans = mld->trans; + struct iwl_pc_data *pc_data; + u8 count; + + IWL_ERR(mld, + "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", + iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS), + iwl_read_umac_prph(trans, + UMAG_SB_CPU_2_STATUS)); +#define IWL_FW_PRINT_REG_INFO(reg_name) \ + IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name)) + + IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION); + + IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE); + + /* print OTP info */ + IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR); + IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA); +#undef IWL_FW_PRINT_REG_INFO + + pc_data = trans->dbg.pc_data; + for (count = 0; count < trans->dbg.num_pc; count++, pc_data++) + IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name, + pc_data->pc_address); +} + +static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld) +{ + const struct fw_img *fw = + iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR); + static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY }; + struct iwl_notification_wait alive_wait; + bool alive_valid = false; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + iwl_init_notification_wait(&mld->notif_wait, &alive_wait, + alive_cmd, ARRAY_SIZE(alive_cmd), + iwl_alive_fn, &alive_valid); + + iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL); + + ret = iwl_trans_start_fw(mld->trans, fw, true); + if (ret) { + iwl_remove_notification(&mld->notif_wait, &alive_wait); + return ret; + } + + ret = iwl_wait_notification(&mld->notif_wait, &alive_wait, + MLD_ALIVE_TIMEOUT); + + if (ret) { + if (ret == -ETIMEDOUT) + iwl_fw_dbg_error_collect(&mld->fwrt, + FW_DBG_TRIGGER_ALIVE_TIMEOUT); + iwl_mld_print_alive_notif_timeout(mld); + goto alive_failure; + } + + if (!alive_valid) { + IWL_ERR(mld, "Loaded firmware is not valid!\n"); + ret = -EIO; + goto alive_failure; + } + + iwl_trans_fw_alive(mld->trans, 0); + + return 0; + +alive_failure: + iwl_trans_stop_device(mld->trans); + return ret; +} + +static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld) +{ + struct iwl_notification_wait init_wait; + struct iwl_init_extended_cfg_cmd init_cfg = {}; + static const u16 init_complete[] = { + INIT_COMPLETE_NOTIF, + }; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + ret = iwl_mld_load_fw_wait_alive(mld); + if (ret) + return ret; + + mld->trans->step_urm = + !!(iwl_read_umac_prph(mld->trans, CNVI_PMU_STEP_FLOW) & + CNVI_PMU_STEP_FLOW_FORCE_URM); + + ret = iwl_pnvm_load(mld->trans, &mld->notif_wait, + &mld->fw->ucode_capa); + if (ret) { + IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret); + goto init_failure; + } + + iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE, + NULL); + + iwl_init_notification_wait(&mld->notif_wait, + &init_wait, + init_complete, + ARRAY_SIZE(init_complete), + NULL, NULL); + + ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD), + &init_cfg); + if (ret) { + IWL_ERR(mld, "Failed to send init config command: %d\n", ret); + iwl_remove_notification(&mld->notif_wait, &init_wait); + goto init_failure; + } + + ret = iwl_wait_notification(&mld->notif_wait, &init_wait, + MLD_INIT_COMPLETE_TIMEOUT); + if (ret) { + IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret); + goto init_failure; + } + + if (!mld->nvm_data) { + mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0); + if (IS_ERR(mld->nvm_data)) { + ret = PTR_ERR(mld->nvm_data); + mld->nvm_data = NULL; + IWL_ERR(mld, "Failed to read NVM: %d\n", ret); + goto init_failure; + } + } + + return 0; + +init_failure: + iwl_trans_stop_device(mld->trans); + return ret; +} + +int iwl_mld_load_fw(struct iwl_mld *mld) +{ + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + ret = iwl_trans_start_hw(mld->trans); + if (ret) + return ret; + + ret = iwl_mld_run_fw_init_sequence(mld); + if (ret) + return ret; + + ret = iwl_mld_init_mcc(mld); + if (ret) + return ret; + + mld->fw_status.running = true; + + return 0; +} + +void iwl_mld_stop_fw(struct iwl_mld *mld) +{ + lockdep_assert_wiphy(mld->wiphy); + + iwl_abort_notification_waits(&mld->notif_wait); + + iwl_fw_dbg_stop_sync(&mld->fwrt); + + iwl_trans_stop_device(mld->trans); + + mld->fw_status.running = false; +} + +static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + if (vif->type == NL80211_IFTYPE_STATION) + ieee80211_hw_restart_disconnect(vif); +} + +void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags) +{ + u32 error_log_size = mld->fw->ucode_capa.error_log_size; + struct iwl_fw_error_recovery_cmd recovery_cmd = { + .flags = cpu_to_le32(flags), + }; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD), + .flags = CMD_WANT_SKB, + .data = {&recovery_cmd, }, + .len = {sizeof(recovery_cmd), }, + }; + int ret; + + /* no error log was defined in TLV */ + if (!error_log_size) + return; + + if (flags & ERROR_RECOVERY_UPDATE_DB) { + /* no buf was allocated upon NIC error */ + if (!mld->error_recovery_buf) + return; + + cmd.data[1] = mld->error_recovery_buf; + cmd.len[1] = error_log_size; + cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; + recovery_cmd.buf_size = cpu_to_le32(error_log_size); + } + + ret = iwl_mld_send_cmd(mld, &cmd); + + /* we no longer need the recovery buffer */ + kfree(mld->error_recovery_buf); + mld->error_recovery_buf = NULL; + + if (ret) { + IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret); + return; + } + + if (flags & ERROR_RECOVERY_UPDATE_DB) { + struct iwl_rx_packet *pkt = cmd.resp_pkt; + u32 pkt_len = iwl_rx_packet_payload_len(pkt); + u32 resp; + + if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp), + "Unexpected recovery cmd response size %u (expected %zu)\n", + pkt_len, sizeof(resp))) + goto out; + + resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data); + if (!resp) + goto out; + + IWL_ERR(mld, + "Failed to send recovery cmd blob was invalid %d\n", + resp); + + ieee80211_iterate_interfaces(mld->hw, 0, + iwl_mld_restart_disconnect_iter, + NULL); + } + +out: + iwl_free_resp(&cmd); +} + +static int iwl_mld_config_fw(struct iwl_mld *mld) +{ + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + iwl_fw_disable_dbg_asserts(&mld->fwrt); + iwl_get_shared_mem_conf(&mld->fwrt); + + ret = iwl_mld_send_tx_ant_cfg(mld); + if (ret) + return ret; + + ret = iwl_mld_send_bt_init_conf(mld); + if (ret) + return ret; + + ret = iwl_set_soc_latency(&mld->fwrt); + if (ret) + return ret; + + iwl_mld_configure_lari(mld); + + ret = iwl_mld_config_temp_report_ths(mld); + if (ret) + return ret; + +#ifdef CONFIG_THERMAL + ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state, + CTDP_CMD_OPERATION_START); + if (ret) + return ret; +#endif + + ret = iwl_configure_rxq(&mld->fwrt); + if (ret) + return ret; + + ret = iwl_mld_send_rss_cfg_cmd(mld); + if (ret) + return ret; + + ret = iwl_mld_config_scan(mld); + if (ret) + return ret; + + ret = iwl_mld_update_device_power(mld, false); + if (ret) + return ret; + + if (mld->fw_status.in_hw_restart) { + iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB); + iwl_mld_time_sync_fw_config(mld); + } + + iwl_mld_led_config_fw(mld); + + ret = iwl_mld_init_ppag(mld); + if (ret) + return ret; + + ret = iwl_mld_init_sar(mld); + if (ret) + return ret; + + ret = iwl_mld_init_sgom(mld); + if (ret) + return ret; + + iwl_mld_init_tas(mld); + iwl_mld_init_uats(mld); + + return 0; +} + +int iwl_mld_start_fw(struct iwl_mld *mld) +{ + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + ret = iwl_mld_load_fw(mld); + if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) { + iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER); + goto error; + } + + IWL_DEBUG_INFO(mld, "uCode started.\n"); + + ret = iwl_mld_config_fw(mld); + if (ret) + goto error; + + return 0; + +error: + iwl_mld_stop_fw(mld); + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/hcmd.h b/drivers/net/wireless/intel/iwlwifi/mld/hcmd.h new file mode 100644 index 0000000000000..64a8d42483241 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/hcmd.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_hcmd_h__ +#define __iwl_mld_hcmd_h__ + +static inline int iwl_mld_send_cmd(struct iwl_mld *mld, struct iwl_host_cmd *cmd) +{ + /* No commands, including the d3 related commands, should be sent + * after entering d3 + */ +#ifdef CONFIG_PM_SLEEP + if (WARN_ON(mld->fw_status.in_d3)) + return -EIO; +#endif + + if (!(cmd->flags & CMD_ASYNC)) + lockdep_assert_wiphy(mld->wiphy); + + /* Devices that need to shutdown immediately on rfkill are not + * supported, so we can send all the cmds in rfkill + */ + cmd->flags |= CMD_SEND_IN_RFKILL; + + return iwl_trans_send_cmd(mld->trans, cmd); +} + +static inline int +__iwl_mld_send_cmd_with_flags_pdu(struct iwl_mld *mld, u32 id, + u32 flags, const void *data, u16 len) +{ + struct iwl_host_cmd cmd = { + .id = id, + .len = { data ? len : 0, }, + .data = { data, }, + .flags = flags, + }; + + return iwl_mld_send_cmd(mld, &cmd); +} + +#define _iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len, \ + ignored...) \ + __iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len) +#define iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, len...) \ + _iwl_mld_send_cmd_with_flags_pdu(mld, id, flags, data, ##len, \ + sizeof(*(data))) + +#define iwl_mld_send_cmd_pdu(mld, id, ...) \ + iwl_mld_send_cmd_with_flags_pdu(mld, id, 0, __VA_ARGS__) + +#define iwl_mld_send_cmd_empty(mld, id) \ + iwl_mld_send_cmd_with_flags_pdu(mld, id, 0, NULL, 0) + +#endif /* __iwl_mld_hcmd_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.c b/drivers/net/wireless/intel/iwlwifi/mld/iface.c new file mode 100644 index 0000000000000..e49e2260ac05e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.c @@ -0,0 +1,671 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#include + +#include "iface.h" +#include "hcmd.h" +#include "key.h" +#include "mlo.h" +#include "mac80211.h" + +#include "fw/api/context.h" +#include "fw/api/mac.h" +#include "fw/api/time-event.h" +#include "fw/api/datapath.h" + +/* Cleanup function for struct iwl_mld_vif, will be called in restart */ +void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld *mld = mld_vif->mld; + struct iwl_mld_link *link; + + /* EMLSR is turned back on during recovery */ + vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE; + + mld_vif->roc_activity = ROC_NUM_ACTIVITIES; + + for_each_mld_vif_valid_link(mld_vif, link) { + iwl_mld_cleanup_link(mld_vif->mld, link); + + /* Correctly allocated primary link in non-MLO mode */ + if (!ieee80211_vif_is_mld(vif) && + link_id == 0 && link == &mld_vif->deflink) + continue; + + if (vif->active_links & BIT(link_id)) + continue; + + /* Should not happen as link removal should always succeed */ + WARN_ON(1); + if (link != &mld_vif->deflink) + kfree_rcu(link, rcu_head); + RCU_INIT_POINTER(mld_vif->link[link_id], NULL); + } + + ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL); + + CLEANUP_STRUCT(mld_vif); +} + +static int iwl_mld_send_mac_cmd(struct iwl_mld *mld, + struct iwl_mac_config_cmd *cmd) +{ + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(MAC_CONF_GROUP, MAC_CONFIG_CMD), + cmd); + if (ret) + IWL_ERR(mld, "Failed to send MAC_CONFIG_CMD ret = %d\n", ret); + + return ret; +} + +int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif) +{ + switch (vif->type) { + case NL80211_IFTYPE_STATION: + return vif->p2p ? FW_MAC_TYPE_P2P_STA : FW_MAC_TYPE_BSS_STA; + case NL80211_IFTYPE_AP: + return FW_MAC_TYPE_GO; + case NL80211_IFTYPE_MONITOR: + return FW_MAC_TYPE_LISTENER; + case NL80211_IFTYPE_P2P_DEVICE: + return FW_MAC_TYPE_P2P_DEVICE; + case NL80211_IFTYPE_ADHOC: + return FW_MAC_TYPE_IBSS; + default: + WARN_ON_ONCE(1); + } + return FW_MAC_TYPE_BSS_STA; +} + +static bool iwl_mld_is_nic_ack_enabled(struct iwl_mld *mld, + struct ieee80211_vif *vif) +{ + const struct ieee80211_supported_band *sband; + const struct ieee80211_sta_he_cap *own_he_cap; + + lockdep_assert_wiphy(mld->wiphy); + + /* This capability is the same for all bands, + * so take it from one of them. + */ + sband = mld->hw->wiphy->bands[NL80211_BAND_2GHZ]; + own_he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); + + return own_he_cap && (own_he_cap->he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_ACK_EN); +} + +/* fill the common part for all interface types */ +static void iwl_mld_mac_cmd_fill_common(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mac_config_cmd *cmd, + u32 action) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + + lockdep_assert_wiphy(mld->wiphy); + + cmd->id_and_color = cpu_to_le32(mld_vif->fw_id); + cmd->action = cpu_to_le32(action); + + cmd->mac_type = + cpu_to_le32(iwl_mld_mac80211_iftype_to_fw(vif)); + + memcpy(cmd->local_mld_addr, vif->addr, ETH_ALEN); + + if (iwlwifi_mod_params.disable_11ax) + return; + + cmd->nic_not_ack_enabled = + cpu_to_le32(!iwl_mld_is_nic_ack_enabled(mld, vif)); + + /* If we have MLO enabled, then the firmware needs to enable + * address translation for the station(s) we add. That depends + * on having EHT enabled in firmware, which in turn depends on + * mac80211 in the code below. + * However, mac80211 doesn't enable HE/EHT until it has parsed + * the association response successfully, so just skip all that + * and enable both when we have MLO. + */ + if (ieee80211_vif_is_mld(vif)) { + if (vif->type == NL80211_IFTYPE_AP) + cmd->he_ap_support = cpu_to_le16(1); + else + cmd->he_support = cpu_to_le16(1); + + cmd->eht_support = cpu_to_le32(1); + return; + } + + for_each_vif_active_link(vif, link_conf, link_id) { + if (!link_conf->he_support) + continue; + + if (vif->type == NL80211_IFTYPE_AP) + cmd->he_ap_support = cpu_to_le16(1); + else + cmd->he_support = cpu_to_le16(1); + + /* EHT, if supported, was already set above */ + break; + } +} + +static void iwl_mld_fill_mac_cmd_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, u32 action, + struct iwl_mac_config_cmd *cmd) +{ + struct ieee80211_bss_conf *link; + u32 twt_policy = 0; + int link_id; + + lockdep_assert_wiphy(mld->wiphy); + + WARN_ON(vif->type != NL80211_IFTYPE_STATION); + + /* We always want to hear MCAST frames, if we're not authorized yet, + * we'll drop them. + */ + cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_GRP); + + /* Adding a MAC ctxt with is_assoc set is not allowed in fw + * (and shouldn't happen) + */ + if (vif->cfg.assoc && action != FW_CTXT_ACTION_ADD) { + cmd->client.is_assoc = 1; + + if (!iwl_mld_vif_from_mac80211(vif)->authorized) + cmd->client.data_policy |= + cpu_to_le16(COEX_HIGH_PRIORITY_ENABLE); + } else { + /* Allow beacons to pass through as long as we are not + * associated + */ + cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON); + } + + cmd->client.assoc_id = cpu_to_le16(vif->cfg.aid); + + if (ieee80211_vif_is_mld(vif)) { + u16 esr_transition_timeout = + u16_get_bits(vif->cfg.eml_cap, + IEEE80211_EML_CAP_TRANSITION_TIMEOUT); + + cmd->client.esr_transition_timeout = + min_t(u16, IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU, + esr_transition_timeout); + cmd->client.medium_sync_delay = + cpu_to_le16(vif->cfg.eml_med_sync_delay); + } + + for_each_vif_active_link(vif, link, link_id) { + if (!link->he_support) + continue; + + if (link->twt_requester) + twt_policy |= TWT_SUPPORTED; + if (link->twt_protected) + twt_policy |= PROTECTED_TWT_SUPPORTED; + if (link->twt_broadcast) + twt_policy |= BROADCAST_TWT_SUPPORTED; + } + + if (!iwlwifi_mod_params.disable_11ax) + cmd->client.data_policy |= cpu_to_le16(twt_policy); + + if (vif->probe_req_reg && vif->cfg.assoc && vif->p2p) + cmd->filter_flags |= + cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ); + + if (vif->p2p) + cmd->client.ctwin = + cpu_to_le32(vif->bss_conf.p2p_noa_attr.oppps_ctwindow & + IEEE80211_P2P_OPPPS_CTWINDOW_MASK); +} + +static void iwl_mld_fill_mac_cmd_ap(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mac_config_cmd *cmd) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + lockdep_assert_wiphy(mld->wiphy); + + WARN_ON(vif->type != NL80211_IFTYPE_AP); + + cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_PROBE_REQ); + + /* in AP mode, pass beacons from other APs (needed for ht protection). + * When there're no any associated station, which means that we are not + * TXing anyway, don't ask FW to pass beacons to prevent unnecessary + * wake-ups. + */ + if (mld_vif->num_associated_stas) + cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON); +} + +static void iwl_mld_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif) +{ + bool *go_active = _data; + + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO && + iwl_mld_vif_from_mac80211(vif)->ap_ibss_active) + *go_active = true; +} + +static bool iwl_mld_p2p_dev_has_extended_disc(struct iwl_mld *mld) +{ + bool go_active = false; + + /* This flag should be set to true when the P2P Device is + * discoverable and there is at least a P2P GO. Setting + * this flag will allow the P2P Device to be discoverable on other + * channels in addition to its listen channel. + * Note that this flag should not be set in other cases as it opens the + * Rx filters on all MAC and increases the number of interrupts. + */ + ieee80211_iterate_active_interfaces(mld->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + iwl_mld_go_iterator, &go_active); + + return go_active; +} + +static void iwl_mld_fill_mac_cmd_p2p_dev(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mac_config_cmd *cmd) +{ + bool ext_disc = iwl_mld_p2p_dev_has_extended_disc(mld); + + lockdep_assert_wiphy(mld->wiphy); + + /* Override the filter flags to accept all management frames. This is + * needed to support both P2P device discovery using probe requests and + * P2P service discovery using action frames + */ + cmd->filter_flags = cpu_to_le32(MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT); + + if (ext_disc) + cmd->p2p_dev.is_disc_extended = cpu_to_le32(1); +} + +static void iwl_mld_fill_mac_cmd_ibss(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mac_config_cmd *cmd) +{ + lockdep_assert_wiphy(mld->wiphy); + + WARN_ON(vif->type != NL80211_IFTYPE_ADHOC); + + cmd->filter_flags |= cpu_to_le32(MAC_CFG_FILTER_ACCEPT_BEACON | + MAC_CFG_FILTER_ACCEPT_PROBE_REQ | + MAC_CFG_FILTER_ACCEPT_GRP); +} + +static int +iwl_mld_rm_mac_from_fw(struct iwl_mld *mld, struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mac_config_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .id_and_color = cpu_to_le32(mld_vif->fw_id), + }; + + return iwl_mld_send_mac_cmd(mld, &cmd); +} + +int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif, + u32 action) +{ + struct iwl_mac_config_cmd cmd = {}; + + lockdep_assert_wiphy(mld->wiphy); + + if (action == FW_CTXT_ACTION_REMOVE) + return iwl_mld_rm_mac_from_fw(mld, vif); + + iwl_mld_mac_cmd_fill_common(mld, vif, &cmd, action); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + iwl_mld_fill_mac_cmd_sta(mld, vif, action, &cmd); + break; + case NL80211_IFTYPE_AP: + iwl_mld_fill_mac_cmd_ap(mld, vif, &cmd); + break; + case NL80211_IFTYPE_MONITOR: + cmd.filter_flags = + cpu_to_le32(MAC_CFG_FILTER_PROMISC | + MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT | + MAC_CFG_FILTER_ACCEPT_BEACON | + MAC_CFG_FILTER_ACCEPT_PROBE_REQ | + MAC_CFG_FILTER_ACCEPT_GRP); + break; + case NL80211_IFTYPE_P2P_DEVICE: + iwl_mld_fill_mac_cmd_p2p_dev(mld, vif, &cmd); + break; + case NL80211_IFTYPE_ADHOC: + iwl_mld_fill_mac_cmd_ibss(mld, vif, &cmd); + break; + default: + WARN(1, "not supported yet\n"); + return -EOPNOTSUPP; + } + + return iwl_mld_send_mac_cmd(mld, &cmd); +} + +IWL_MLD_ALLOC_FN(vif, vif) + +/* Constructor function for struct iwl_mld_vif */ +static int +iwl_mld_init_vif(struct iwl_mld *mld, struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + mld_vif->mld = mld; + mld_vif->roc_activity = ROC_NUM_ACTIVITIES; + + ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif); + if (ret) + return ret; + + if (!mld->fw_status.in_hw_restart) { + wiphy_work_init(&mld_vif->emlsr.unblock_tpt_wk, + iwl_mld_emlsr_unblock_tpt_wk); + wiphy_delayed_work_init(&mld_vif->emlsr.check_tpt_wk, + iwl_mld_emlsr_check_tpt); + wiphy_delayed_work_init(&mld_vif->emlsr.prevent_done_wk, + iwl_mld_emlsr_prevent_done_wk); + wiphy_delayed_work_init(&mld_vif->emlsr.tmp_non_bss_done_wk, + iwl_mld_emlsr_tmp_non_bss_done_wk); + } + + return 0; +} + +int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + ret = iwl_mld_init_vif(mld, vif); + if (ret) + return ret; + + ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_ADD); + if (ret) + RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL); + + return ret; +} + +int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_REMOVE); + + if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->fw_id_to_vif))) + return -EINVAL; + + RCU_INIT_POINTER(mld->fw_id_to_vif[mld_vif->fw_id], NULL); + + iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_VIF, + mld_vif->fw_id); + + return ret; +} + +void iwl_mld_set_vif_associated(struct iwl_mld *mld, + struct ieee80211_vif *vif) +{ + struct ieee80211_bss_conf *link; + unsigned int link_id; + + for_each_vif_active_link(vif, link, link_id) { + if (iwl_mld_link_set_associated(mld, vif, link)) + IWL_ERR(mld, "failed to update link %d\n", link_id); + } + + iwl_mld_recalc_multicast_filter(mld); +} + +static void iwl_mld_get_fw_id_bss_bitmap_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + u8 *fw_id_bitmap = _data; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) + return; + + *fw_id_bitmap |= BIT(mld_vif->fw_id); +} + +u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld) +{ + u8 fw_id_bitmap = 0; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER, + iwl_mld_get_fw_id_bss_bitmap_iter, + &fw_id_bitmap); + + return fw_id_bitmap; +} + +void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + const struct iwl_probe_resp_data_notif *notif = (void *)pkt->data; + struct iwl_probe_resp_data *old_data, *new_data; + struct ieee80211_vif *vif; + struct iwl_mld_link *mld_link; + + IWL_DEBUG_INFO(mld, "Probe response data notif: noa %d, csa %d\n", + notif->noa_active, notif->csa_counter); + + if (IWL_FW_CHECK(mld, le32_to_cpu(notif->mac_id) >= + ARRAY_SIZE(mld->fw_id_to_vif), + "mac id is invalid: %d\n", + le32_to_cpu(notif->mac_id))) + return; + + vif = wiphy_dereference(mld->wiphy, + mld->fw_id_to_vif[le32_to_cpu(notif->mac_id)]); + + /* the firmware gives us the mac_id (and not the link_id), mac80211 + * gets a vif and not a link, bottom line, this flow is not MLD ready + * yet. + */ + if (WARN_ON(!vif) || ieee80211_vif_is_mld(vif)) + return; + + if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA && + notif->csa_counter >= 1) + ieee80211_beacon_set_cntdwn(vif, notif->csa_counter); + + if (!vif->p2p) + return; + + mld_link = &iwl_mld_vif_from_mac80211(vif)->deflink; + + new_data = kzalloc(sizeof(*new_data), GFP_KERNEL); + if (!new_data) + return; + + memcpy(&new_data->notif, notif, sizeof(new_data->notif)); + + /* noa_attr contains 1 reserved byte, need to substruct it */ + new_data->noa_len = sizeof(struct ieee80211_vendor_ie) + + sizeof(new_data->notif.noa_attr) - 1; + + /* + * If it's a one time NoA, only one descriptor is needed, + * adjust the length according to len_low. + */ + if (new_data->notif.noa_attr.len_low == + sizeof(struct ieee80211_p2p_noa_desc) + 2) + new_data->noa_len -= sizeof(struct ieee80211_p2p_noa_desc); + + old_data = wiphy_dereference(mld->wiphy, mld_link->probe_resp_data); + rcu_assign_pointer(mld_link->probe_resp_data, new_data); + + if (old_data) + kfree_rcu(old_data, rcu_head); +} + +void iwl_mld_handle_uapsd_misbehaving_ap_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data; + struct ieee80211_vif *vif; + + if (IWL_FW_CHECK(mld, notif->mac_id >= ARRAY_SIZE(mld->fw_id_to_vif), + "mac id is invalid: %d\n", notif->mac_id)) + return; + + vif = wiphy_dereference(mld->wiphy, mld->fw_id_to_vif[notif->mac_id]); + + if (WARN_ON(!vif) || ieee80211_vif_is_mld(vif)) + return; + + IWL_WARN(mld, "uapsd misbehaving AP: %pM\n", vif->bss_conf.bssid); +} + +void iwl_mld_handle_datapath_monitor_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_datapath_monitor_notif *notif = (void *)pkt->data; + struct ieee80211_bss_conf *link; + struct ieee80211_supported_band *sband; + const struct ieee80211_sta_he_cap *he_cap; + struct ieee80211_vif *vif; + struct iwl_mld_vif *mld_vif; + + if (notif->type != cpu_to_le32(IWL_DP_MON_NOTIF_TYPE_EXT_CCA)) + return; + + link = iwl_mld_fw_id_to_link_conf(mld, notif->link_id); + if (WARN_ON(!link)) + return; + + vif = link->vif; + if (WARN_ON(!vif) || vif->type != NL80211_IFTYPE_STATION || + !vif->cfg.assoc) + return; + + if (!link->chanreq.oper.chan || + link->chanreq.oper.chan->band != NL80211_BAND_2GHZ || + link->chanreq.oper.width < NL80211_CHAN_WIDTH_40) + return; + + mld_vif = iwl_mld_vif_from_mac80211(vif); + + /* this shouldn't happen *again*, ignore it */ + if (mld_vif->cca_40mhz_workaround != CCA_40_MHZ_WA_NONE) + return; + + mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_RECONNECT; + + /* + * This capability manipulation isn't really ideal, but it's the + * easiest choice - otherwise we'd have to do some major changes + * in mac80211 to support this, which isn't worth it. This does + * mean that userspace may have outdated information, but that's + * actually not an issue at all. + */ + sband = mld->wiphy->bands[NL80211_BAND_2GHZ]; + + WARN_ON(!sband->ht_cap.ht_supported); + WARN_ON(!(sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)); + sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; + + he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); + + if (he_cap) { + /* we know that ours is writable */ + struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; + + WARN_ON(!he->has_he); + WARN_ON(!(he->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G)); + he->he_cap_elem.phy_cap_info[0] &= + ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; + } + + ieee80211_disconnect(vif, true); +} + +void iwl_mld_reset_cca_40mhz_workaround(struct iwl_mld *mld, + struct ieee80211_vif *vif) +{ + struct ieee80211_supported_band *sband; + const struct ieee80211_sta_he_cap *he_cap; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (mld_vif->cca_40mhz_workaround == CCA_40_MHZ_WA_NONE) + return; + + /* Now we are just reconnecting with the new capabilities, + * but remember to reset the capabilities when we disconnect for real + */ + if (mld_vif->cca_40mhz_workaround == CCA_40_MHZ_WA_RECONNECT) { + mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_RESET; + return; + } + + /* Now cca_40mhz_workaround == CCA_40_MHZ_WA_RESET */ + + sband = mld->wiphy->bands[NL80211_BAND_2GHZ]; + + sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + + he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif); + + if (he_cap) { + /* we know that ours is writable */ + struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; + + he->he_cap_elem.phy_cap_info[0] |= + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; + } + + mld_vif->cca_40mhz_workaround = CCA_40_MHZ_WA_NONE; +} + +struct ieee80211_vif *iwl_mld_get_bss_vif(struct iwl_mld *mld) +{ + unsigned long fw_id_bitmap = iwl_mld_get_fw_bss_vifs_ids(mld); + int fw_id; + + if (hweight8(fw_id_bitmap) != 1) + return NULL; + + fw_id = __ffs(fw_id_bitmap); + + return wiphy_dereference(mld->wiphy, + mld->fw_id_to_vif[fw_id]); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/iface.h b/drivers/net/wireless/intel/iwlwifi/mld/iface.h new file mode 100644 index 0000000000000..d1d56b081bf63 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/iface.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_iface_h__ +#define __iwl_mld_iface_h__ + +#include + +#include "link.h" +#include "session-protect.h" +#include "d3.h" + +enum iwl_mld_cca_40mhz_wa_status { + CCA_40_MHZ_WA_NONE, + CCA_40_MHZ_WA_RESET, + CCA_40_MHZ_WA_RECONNECT, +}; + +/** + * enum iwl_mld_emlsr_blocked - defines reasons for which EMLSR is blocked + * + * These blocks are applied/stored per-VIF. + * + * @IWL_MLD_EMLSR_BLOCKED_PREVENTION: Prevent repeated EMLSR enter/exit + * @IWL_MLD_EMLSR_BLOCKED_WOWLAN: WOWLAN is preventing EMLSR + * @IWL_MLD_EMLSR_BLOCKED_ROC: remain-on-channel is preventing EMLSR + * @IWL_MLD_EMLSR_BLOCKED_NON_BSS: An active non-BSS interface's link is + * preventing EMLSR + * @IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS: An expected active non-BSS interface's + * link is preventing EMLSR. This is a temporary blocking that is set when + * there is an indication that a non-BSS interface is to be added. + * @IWL_MLD_EMLSR_BLOCKED_TPT: throughput is too low to make EMLSR worthwhile + */ +enum iwl_mld_emlsr_blocked { + IWL_MLD_EMLSR_BLOCKED_PREVENTION = 0x1, + IWL_MLD_EMLSR_BLOCKED_WOWLAN = 0x2, + IWL_MLD_EMLSR_BLOCKED_ROC = 0x4, + IWL_MLD_EMLSR_BLOCKED_NON_BSS = 0x8, + IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS = 0x10, + IWL_MLD_EMLSR_BLOCKED_TPT = 0x20, +}; + +/** + * enum iwl_mld_emlsr_exit - defines reasons for exiting EMLSR + * + * Reasons to exit EMLSR may be either link specific or even specific to a + * combination of links. + * + * @IWL_MLD_EMLSR_EXIT_BLOCK: Exit due to a block reason being set + * @IWL_MLD_EMLSR_EXIT_MISSED_BEACON: Exit due to missed beacons + * @IWL_MLD_EMLSR_EXIT_FAIL_ENTRY: FW failed to enter EMLSR + * @IWL_MLD_EMLSR_EXIT_CSA: EMLSR prevented due to channel switch on link + * @IWL_MLD_EMLSR_EXIT_EQUAL_BAND: EMLSR prevented as both links share the band + * @IWL_MLD_EMLSR_EXIT_LOW_RSSI: Link RSSI is unsuitable for EMLSR + * @IWL_MLD_EMLSR_EXIT_LINK_USAGE: Exit EMLSR due to low TPT on secondary link + * @IWL_MLD_EMLSR_EXIT_BT_COEX: Exit EMLSR due to BT coexistence + * @IWL_MLD_EMLSR_EXIT_CHAN_LOAD: Exit EMLSR because the primary channel is not + * loaded enough to justify EMLSR. + * @IWL_MLD_EMLSR_EXIT_RFI: Exit EMLSR due to RFI + * @IWL_MLD_EMLSR_EXIT_FW_REQUEST: Exit EMLSR because the FW requested it + */ +enum iwl_mld_emlsr_exit { + IWL_MLD_EMLSR_EXIT_BLOCK = 0x1, + IWL_MLD_EMLSR_EXIT_MISSED_BEACON = 0x2, + IWL_MLD_EMLSR_EXIT_FAIL_ENTRY = 0x4, + IWL_MLD_EMLSR_EXIT_CSA = 0x8, + IWL_MLD_EMLSR_EXIT_EQUAL_BAND = 0x10, + IWL_MLD_EMLSR_EXIT_LOW_RSSI = 0x20, + IWL_MLD_EMLSR_EXIT_LINK_USAGE = 0x40, + IWL_MLD_EMLSR_EXIT_BT_COEX = 0x80, + IWL_MLD_EMLSR_EXIT_CHAN_LOAD = 0x100, + IWL_MLD_EMLSR_EXIT_RFI = 0x200, + IWL_MLD_EMLSR_EXIT_FW_REQUEST = 0x400, +}; + +/** + * struct iwl_mld_emlsr - per-VIF data about EMLSR operation + * + * @primary: The current primary link + * @selected_primary: Primary link as selected during the last link selection + * @selected_links: Links as selected during the last link selection + * @blocked_reasons: Reasons preventing EMLSR from being enabled + * @last_exit_reason: Reason for the last EMLSR exit + * @last_exit_ts: Time of the last EMLSR exit (if @last_exit_reason is non-zero) + * @exit_repeat_count: Number of times EMLSR was exited for the same reason + * @unblock_tpt_wk: Unblock EMLSR because the throughput limit was reached + * @check_tpt_wk: a worker to check if IWL_MLD_EMLSR_BLOCKED_TPT should be + * added, for example if there is no longer enough traffic. + * @prevent_done_wk: Worker to remove %IWL_MLD_EMLSR_BLOCKED_PREVENTION + * @tmp_non_bss_done_wk: Worker to remove %IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS + */ +struct iwl_mld_emlsr { + struct_group(zeroed_on_not_authorized, + u8 primary; + + u8 selected_primary; + u16 selected_links; + + enum iwl_mld_emlsr_blocked blocked_reasons; + + enum iwl_mld_emlsr_exit last_exit_reason; + unsigned long last_exit_ts; + u8 exit_repeat_count; + ); + + struct wiphy_work unblock_tpt_wk; + struct wiphy_delayed_work check_tpt_wk; + + struct wiphy_delayed_work prevent_done_wk; + struct wiphy_delayed_work tmp_non_bss_done_wk; +}; + +/** + * struct iwl_mld_vif - virtual interface (MAC context) configuration parameters + * + * @fw_id: fw id of the mac context. + * @session_protect: session protection parameters + * @ap_sta: pointer to AP sta, for easier access to it. + * Relevant only for STA vifs. + * @authorized: indicates the AP station was set to authorized + * @bigtks: BIGTKs of the AP, for beacon protection. + * Only valid for STA. (FIXME: needs to be per link) + * @num_associated_stas: number of associated STAs. Relevant only for AP mode. + * @ap_ibss_active: whether the AP/IBSS was started + * @roc_activity: the id of the roc_activity running. Relevant for p2p device + * only. Set to %ROC_NUM_ACTIVITIES when not in use. + * @cca_40mhz_workaround: When we are connected in 2.4 GHz and 40 MHz, and the + * environment is too loaded, we work around this by reconnecting to the + * same AP with 20 MHz. This manages the status of the workaround. + * @beacon_inject_active: indicates an active debugfs beacon ie injection + * @low_latency_causes: bit flags, indicating the causes for low-latency, + * see @iwl_mld_low_latency_cause. + * @ps_disabled: indicates that PS is disabled for this interface + * @mld: pointer to the mld structure. + * @deflink: default link data, for use in non-MLO, + * @link: reference to link data for each valid link, for use in MLO. + * @emlsr: information related to EMLSR + * @wowlan_data: data used by the wowlan suspend flow + * @use_ps_poll: use ps_poll frames + * @disable_bf: disable beacon filter + * @dbgfs_slink: debugfs symlink for this interface + */ +struct iwl_mld_vif { + /* Add here fields that need clean up on restart */ + struct_group(zeroed_on_hw_restart, + u8 fw_id; + struct iwl_mld_session_protect session_protect; + struct ieee80211_sta *ap_sta; + bool authorized; + struct ieee80211_key_conf __rcu *bigtks[2]; + u8 num_associated_stas; + bool ap_ibss_active; + u32 roc_activity; + enum iwl_mld_cca_40mhz_wa_status cca_40mhz_workaround; +#ifdef CONFIG_IWLWIFI_DEBUGFS + bool beacon_inject_active; +#endif + u8 low_latency_causes; + bool ps_disabled; + ); + /* And here fields that survive a fw restart */ + struct iwl_mld *mld; + struct iwl_mld_link deflink; + struct iwl_mld_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + + struct iwl_mld_emlsr emlsr; + +#if CONFIG_PM_SLEEP + struct iwl_mld_wowlan_data wowlan_data; +#endif +#ifdef CONFIG_IWLWIFI_DEBUGFS + bool use_ps_poll; + bool disable_bf; + struct dentry *dbgfs_slink; +#endif +}; + +static inline struct iwl_mld_vif * +iwl_mld_vif_from_mac80211(struct ieee80211_vif *vif) +{ + return (void *)vif->drv_priv; +} + +#define iwl_mld_link_dereference_check(mld_vif, link_id) \ + rcu_dereference_check((mld_vif)->link[link_id], \ + lockdep_is_held(&mld_vif->mld->wiphy->mtx)) + +#define for_each_mld_vif_valid_link(mld_vif, mld_link) \ + for (int link_id = 0; link_id < ARRAY_SIZE((mld_vif)->link); \ + link_id++) \ + if ((mld_link = iwl_mld_link_dereference_check(mld_vif, link_id))) + +/* Retrieve pointer to mld link from mac80211 structures */ +static inline struct iwl_mld_link * +iwl_mld_link_from_mac80211(struct ieee80211_bss_conf *bss_conf) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif); + + return iwl_mld_link_dereference_check(mld_vif, bss_conf->link_id); +} + +int iwl_mld_mac80211_iftype_to_fw(const struct ieee80211_vif *vif); + +/* Cleanup function for struct iwl_mld_vif, will be called in restart */ +void iwl_mld_cleanup_vif(void *data, u8 *mac, struct ieee80211_vif *vif); +int iwl_mld_mac_fw_action(struct iwl_mld *mld, struct ieee80211_vif *vif, + u32 action); +int iwl_mld_add_vif(struct iwl_mld *mld, struct ieee80211_vif *vif); +int iwl_mld_rm_vif(struct iwl_mld *mld, struct ieee80211_vif *vif); +void iwl_mld_set_vif_associated(struct iwl_mld *mld, + struct ieee80211_vif *vif); +u8 iwl_mld_get_fw_bss_vifs_ids(struct iwl_mld *mld); +void iwl_mld_handle_probe_resp_data_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +void iwl_mld_handle_datapath_monitor_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +void iwl_mld_handle_uapsd_misbehaving_ap_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +void iwl_mld_reset_cca_40mhz_workaround(struct iwl_mld *mld, + struct ieee80211_vif *vif); + +static inline bool iwl_mld_vif_low_latency(const struct iwl_mld_vif *mld_vif) +{ + return !!mld_vif->low_latency_causes; +} + +struct ieee80211_vif *iwl_mld_get_bss_vif(struct iwl_mld *mld); + +#endif /* __iwl_mld_iface_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.c b/drivers/net/wireless/intel/iwlwifi/mld/key.c new file mode 100644 index 0000000000000..0eff13e5ffd57 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/key.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024 Intel Corporation + */ +#include "key.h" +#include "iface.h" +#include "sta.h" +#include "fw/api/datapath.h" + +static u32 iwl_mld_get_key_flags(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + bool pairwise = key->flags & IEEE80211_KEY_FLAG_PAIRWISE; + bool igtk = key->keyidx == 4 || key->keyidx == 5; + u32 flags = 0; + + if (!pairwise) + flags |= IWL_SEC_KEY_FLAG_MCAST_KEY; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_TKIP: + flags |= IWL_SEC_KEY_FLAG_CIPHER_TKIP; + break; + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_CCMP: + flags |= IWL_SEC_KEY_FLAG_CIPHER_CCMP; + break; + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + flags |= IWL_SEC_KEY_FLAG_KEY_SIZE; + fallthrough; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + flags |= IWL_SEC_KEY_FLAG_CIPHER_GCMP; + break; + } + + if (!sta && vif->type == NL80211_IFTYPE_STATION) + sta = mld_vif->ap_sta; + + /* If we are installing an iGTK (in AP or STA mode), we need to tell + * the firmware this key will en/decrypt MGMT frames. + * Same goes if we are installing a pairwise key for an MFP station. + * In case we're installing a groupwise key (which is not an iGTK), + * then, we will not use this key for MGMT frames. + */ + if ((sta && sta->mfp && pairwise) || igtk) + flags |= IWL_SEC_KEY_FLAG_MFP; + + if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU) + flags |= IWL_SEC_KEY_FLAG_SPP_AMSDU; + + return flags; +} + +static u32 iwl_mld_get_key_sta_mask(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct ieee80211_link_sta *link_sta; + int sta_id; + + lockdep_assert_wiphy(mld->wiphy); + + /* AP group keys are per link and should be on the mcast/bcast STA */ + if (vif->type == NL80211_IFTYPE_AP && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { + struct iwl_mld_link *link = NULL; + + if (key->link_id >= 0) + link = iwl_mld_link_dereference_check(mld_vif, + key->link_id); + + if (WARN_ON(!link)) + return 0; + + /* In this stage we should have both the bcast and mcast STAs */ + if (WARN_ON(link->bcast_sta.sta_id == IWL_INVALID_STA || + link->mcast_sta.sta_id == IWL_INVALID_STA)) + return 0; + + /* IGTK/BIGTK to bcast STA */ + if (key->keyidx >= 4) + return BIT(link->bcast_sta.sta_id); + + /* GTK for data to mcast STA */ + return BIT(link->mcast_sta.sta_id); + } + + /* for client mode use the AP STA also for group keys */ + if (!sta && vif->type == NL80211_IFTYPE_STATION) + sta = mld_vif->ap_sta; + + /* STA should be non-NULL now */ + if (WARN_ON(!sta)) + return 0; + + /* Key is not per-link, get the full sta mask */ + if (key->link_id < 0) + return iwl_mld_fw_sta_id_mask(mld, sta); + + /* The link_sta shouldn't be NULL now, but this is checked in + * iwl_mld_fw_sta_id_mask + */ + link_sta = link_sta_dereference_check(sta, key->link_id); + + sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta); + if (sta_id < 0) + return 0; + + return BIT(sta_id); +} + +static int iwl_mld_add_key_to_fw(struct iwl_mld *mld, u32 sta_mask, + u32 key_flags, struct ieee80211_key_conf *key) +{ + struct iwl_sec_key_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .u.add.sta_mask = cpu_to_le32(sta_mask), + .u.add.key_id = cpu_to_le32(key->keyidx), + .u.add.key_flags = cpu_to_le32(key_flags), + .u.add.tx_seq = cpu_to_le64(atomic64_read(&key->tx_pn)), + }; + bool tkip = key->cipher == WLAN_CIPHER_SUITE_TKIP; + int max_key_len = sizeof(cmd.u.add.key); + + if (WARN_ON(!sta_mask)) + return -EINVAL; + + if (WARN_ON(key->keylen > max_key_len)) + return -EINVAL; + + memcpy(cmd.u.add.key, key->key, key->keylen); + + if (tkip) { + memcpy(cmd.u.add.tkip_mic_rx_key, + key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY, + 8); + memcpy(cmd.u.add.tkip_mic_tx_key, + key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, + 8); + } + + return iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD), + &cmd); +} + +static void iwl_mld_remove_key_from_fw(struct iwl_mld *mld, u32 sta_mask, + u32 key_flags, u32 keyidx) +{ + struct iwl_sec_key_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .u.remove.sta_mask = cpu_to_le32(sta_mask), + .u.remove.key_id = cpu_to_le32(keyidx), + .u.remove.key_flags = cpu_to_le32(key_flags), + }; + + if (WARN_ON(!sta_mask)) + return; + + iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD), &cmd); +} + +void iwl_mld_remove_key(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + u32 sta_mask = iwl_mld_get_key_sta_mask(mld, vif, sta, key); + u32 key_flags = iwl_mld_get_key_flags(mld, vif, sta, key); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + lockdep_assert_wiphy(mld->wiphy); + + if (!sta_mask) + return; + + if (key->keyidx == 4 || key->keyidx == 5) { + struct iwl_mld_link *mld_link; + unsigned int link_id = 0; + + /* set to -1 for non-MLO right now */ + if (key->link_id >= 0) + link_id = key->link_id; + + mld_link = iwl_mld_link_dereference_check(mld_vif, link_id); + if (WARN_ON(!mld_link)) + return; + + if (mld_link->igtk == key) + mld_link->igtk = NULL; + + mld->num_igtks--; + } + + iwl_mld_remove_key_from_fw(mld, sta_mask, key_flags, key->keyidx); + + /* no longer in HW */ + key->hw_key_idx = STA_KEY_IDX_INVALID; +} + +int iwl_mld_add_key(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + u32 sta_mask = iwl_mld_get_key_sta_mask(mld, vif, sta, key); + u32 key_flags = iwl_mld_get_key_flags(mld, vif, sta, key); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *mld_link = NULL; + bool igtk = key->keyidx == 4 || key->keyidx == 5; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + if (!sta_mask) + return -EINVAL; + + if (igtk) { + if (mld->num_igtks == IWL_MAX_NUM_IGTKS) + return -EOPNOTSUPP; + + u8 link_id = 0; + + /* set to -1 for non-MLO right now */ + if (key->link_id >= 0) + link_id = key->link_id; + + mld_link = iwl_mld_link_dereference_check(mld_vif, link_id); + + if (WARN_ON(!mld_link)) + return -EINVAL; + + if (mld_link->igtk) { + IWL_DEBUG_MAC80211(mld, "remove old IGTK %d\n", + mld_link->igtk->keyidx); + iwl_mld_remove_key(mld, vif, sta, mld_link->igtk); + } + + WARN_ON(mld_link->igtk); + } + + ret = iwl_mld_add_key_to_fw(mld, sta_mask, key_flags, key); + if (ret) + return ret; + + if (mld_link) { + mld_link->igtk = key; + mld->num_igtks++; + } + + /* We don't really need this, but need it to be not invalid, + * so we will know if the key is in fw. + */ + key->hw_key_idx = 0; + + return 0; +} + +struct remove_ap_keys_iter_data { + u8 link_id; + struct ieee80211_sta *sta; +}; + +static void iwl_mld_remove_ap_keys_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct remove_ap_keys_iter_data *data = _data; + + if (key->hw_key_idx == STA_KEY_IDX_INVALID) + return; + + /* All the pairwise keys should have been removed by now */ + if (WARN_ON(sta)) + return; + + if (key->link_id >= 0 && key->link_id != data->link_id) + return; + + iwl_mld_remove_key(mld, vif, data->sta, key); +} + +void iwl_mld_remove_ap_keys(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, unsigned int link_id) +{ + struct remove_ap_keys_iter_data iter_data = { + .link_id = link_id, + .sta = sta, + }; + + if (WARN_ON_ONCE(vif->type != NL80211_IFTYPE_STATION)) + return; + + ieee80211_iter_keys(mld->hw, vif, + iwl_mld_remove_ap_keys_iter, + &iter_data); +} + +struct iwl_mvm_sta_key_update_data { + struct ieee80211_sta *sta; + u32 old_sta_mask; + u32 new_sta_mask; + int err; +}; + +static void iwl_mld_update_sta_key_iter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, + void *_data) +{ + struct iwl_mvm_sta_key_update_data *data = _data; + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_sec_key_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), + .u.modify.old_sta_mask = cpu_to_le32(data->old_sta_mask), + .u.modify.new_sta_mask = cpu_to_le32(data->new_sta_mask), + .u.modify.key_id = cpu_to_le32(key->keyidx), + .u.modify.key_flags = + cpu_to_le32(iwl_mld_get_key_flags(mld, vif, sta, key)), + }; + int err; + + /* only need to do this for pairwise keys (link_id == -1) */ + if (sta != data->sta || key->link_id >= 0) + return; + + err = iwl_mld_send_cmd_pdu(mld, WIDE_ID(DATA_PATH_GROUP, SEC_KEY_CMD), + &cmd); + + if (err) + data->err = err; +} + +int iwl_mld_update_sta_keys(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask) +{ + struct iwl_mvm_sta_key_update_data data = { + .sta = sta, + .old_sta_mask = old_sta_mask, + .new_sta_mask = new_sta_mask, + }; + + ieee80211_iter_keys(mld->hw, vif, iwl_mld_update_sta_key_iter, + &data); + return data.err; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/key.h b/drivers/net/wireless/intel/iwlwifi/mld/key.h new file mode 100644 index 0000000000000..a68ea48913be6 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/key.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_key_h__ +#define __iwl_mld_key_h__ + +#include "mld.h" +#include +#include "fw/api/sta.h" + +void iwl_mld_remove_key(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +int iwl_mld_add_key(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key); +void iwl_mld_remove_ap_keys(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + unsigned int link_id); + +int iwl_mld_update_sta_keys(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask); + +static inline void +iwl_mld_cleanup_keys_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, void *data) +{ + key->hw_key_idx = STA_KEY_IDX_INVALID; +} + +#endif /* __iwl_mld_key_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/led.c b/drivers/net/wireless/intel/iwlwifi/mld/led.c new file mode 100644 index 0000000000000..a37b32cbc6e65 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/led.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024 Intel Corporation + */ +#include +#include + +#include "fw/api/led.h" +#include "mld.h" +#include "led.h" +#include "hcmd.h" + +static void iwl_mld_send_led_fw_cmd(struct iwl_mld *mld, bool on) +{ + struct iwl_led_cmd led_cmd = { + .status = cpu_to_le32(on), + }; + int err; + + if (WARN_ON(!mld->fw_status.running)) + return; + + err = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(LONG_GROUP, + LEDS_CMD), + CMD_ASYNC, &led_cmd); + + if (err) + IWL_WARN(mld, "LED command failed: %d\n", err); +} + +static void iwl_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct iwl_mld *mld = container_of(led_cdev, struct iwl_mld, led); + + if (!mld->fw_status.running) + return; + + iwl_mld_send_led_fw_cmd(mld, brightness > 0); +} + +int iwl_mld_leds_init(struct iwl_mld *mld) +{ + int mode = iwlwifi_mod_params.led_mode; + int ret; + + switch (mode) { + case IWL_LED_BLINK: + IWL_ERR(mld, "Blink led mode not supported, used default\n"); + fallthrough; + case IWL_LED_DEFAULT: + case IWL_LED_RF_STATE: + mode = IWL_LED_RF_STATE; + break; + case IWL_LED_DISABLE: + IWL_INFO(mld, "Led disabled\n"); + return 0; + default: + return -EINVAL; + } + + mld->led.name = kasprintf(GFP_KERNEL, "%s-led", + wiphy_name(mld->hw->wiphy)); + if (!mld->led.name) + return -ENOMEM; + + mld->led.brightness_set = iwl_led_brightness_set; + mld->led.max_brightness = 1; + + if (mode == IWL_LED_RF_STATE) + mld->led.default_trigger = + ieee80211_get_radio_led_name(mld->hw); + + ret = led_classdev_register(mld->trans->dev, &mld->led); + if (ret) { + kfree(mld->led.name); + mld->led.name = NULL; + IWL_INFO(mld, "Failed to enable led\n"); + } + + return ret; +} + +void iwl_mld_led_config_fw(struct iwl_mld *mld) +{ + if (!mld->led.name) + return; + + iwl_mld_send_led_fw_cmd(mld, mld->led.brightness > 0); +} + +void iwl_mld_leds_exit(struct iwl_mld *mld) +{ + if (!mld->led.name) + return; + + led_classdev_unregister(&mld->led); + kfree(mld->led.name); + mld->led.name = NULL; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/led.h b/drivers/net/wireless/intel/iwlwifi/mld/led.h new file mode 100644 index 0000000000000..0954e214e73d6 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/led.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_led_h__ +#define __iwl_mld_led_h__ + +#include "mld.h" + +#ifdef CONFIG_IWLWIFI_LEDS +int iwl_mld_leds_init(struct iwl_mld *mld); +void iwl_mld_leds_exit(struct iwl_mld *mld); +void iwl_mld_led_config_fw(struct iwl_mld *mld); +#else +static inline int iwl_mld_leds_init(struct iwl_mld *mld) +{ + return 0; +} + +static inline void iwl_mld_leds_exit(struct iwl_mld *mld) +{ +} + +static inline void iwl_mld_led_config_fw(struct iwl_mld *mld) +{ +} +#endif + +#endif /* __iwl_mld_led_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c new file mode 100644 index 0000000000000..82a4979a3af3c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c @@ -0,0 +1,1213 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include "constants.h" +#include "link.h" +#include "iface.h" +#include "mlo.h" +#include "hcmd.h" +#include "phy.h" +#include "fw/api/rs.h" +#include "fw/api/txq.h" +#include "fw/api/mac.h" + +#include "fw/api/context.h" +#include "fw/dbg.h" + +static int iwl_mld_send_link_cmd(struct iwl_mld *mld, + struct iwl_link_config_cmd *cmd, + enum iwl_ctxt_action action) +{ + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + cmd->action = cpu_to_le32(action); + ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD), + cmd); + if (ret) + IWL_ERR(mld, "Failed to send LINK_CONFIG_CMD (action:%d): %d\n", + action, ret); + return ret; +} + +static int iwl_mld_add_link_to_fw(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf) +{ + struct ieee80211_vif *vif = link_conf->vif; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *link = iwl_mld_link_from_mac80211(link_conf); + struct iwl_link_config_cmd cmd = {}; + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(!link)) + return -EINVAL; + + cmd.link_id = cpu_to_le32(link->fw_id); + cmd.mac_id = cpu_to_le32(mld_vif->fw_id); + cmd.spec_link_id = link_conf->link_id; + cmd.phy_id = cpu_to_le32(FW_CTXT_ID_INVALID); + + ether_addr_copy(cmd.local_link_addr, link_conf->addr); + + if (vif->type == NL80211_IFTYPE_ADHOC && link_conf->bssid) + ether_addr_copy(cmd.ibss_bssid_addr, link_conf->bssid); + + return iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_ADD); +} + +/* Get the basic rates of the used band and add the mandatory ones */ +static void iwl_mld_fill_rates(struct iwl_mld *mld, + struct ieee80211_bss_conf *link, + struct ieee80211_chanctx_conf *chan_ctx, + __le32 *cck_rates, __le32 *ofdm_rates) +{ + struct cfg80211_chan_def *chandef = + iwl_mld_get_chandef_from_chanctx(mld, chan_ctx); + struct ieee80211_supported_band *sband = + mld->hw->wiphy->bands[chandef->chan->band]; + unsigned long basic = link->basic_rates; + int lowest_present_ofdm = 100; + int lowest_present_cck = 100; + u32 cck = 0; + u32 ofdm = 0; + int i; + + for_each_set_bit(i, &basic, BITS_PER_LONG) { + int hw = sband->bitrates[i].hw_value; + + if (hw >= IWL_FIRST_OFDM_RATE) { + ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE); + if (lowest_present_ofdm > hw) + lowest_present_ofdm = hw; + } else { + BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0); + + cck |= BIT(hw); + if (lowest_present_cck > hw) + lowest_present_cck = hw; + } + } + + /* Now we've got the basic rates as bitmaps in the ofdm and cck + * variables. This isn't sufficient though, as there might not + * be all the right rates in the bitmap. E.g. if the only basic + * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps + * and 6 Mbps because the 802.11-2007 standard says in 9.6: + * + * [...] a STA responding to a received frame shall transmit + * its Control Response frame [...] at the highest rate in the + * BSSBasicRateSet parameter that is less than or equal to the + * rate of the immediately previous frame in the frame exchange + * sequence ([...]) and that is of the same modulation class + * ([...]) as the received frame. If no rate contained in the + * BSSBasicRateSet parameter meets these conditions, then the + * control frame sent in response to a received frame shall be + * transmitted at the highest mandatory rate of the PHY that is + * less than or equal to the rate of the received frame, and + * that is of the same modulation class as the received frame. + * + * As a consequence, we need to add all mandatory rates that are + * lower than all of the basic rates to these bitmaps. + */ + + if (lowest_present_ofdm > IWL_RATE_24M_INDEX) + ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE; + if (lowest_present_ofdm > IWL_RATE_12M_INDEX) + ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE; + /* 6M already there or needed so always add */ + ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE; + + /* CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP. + * Note, however: + * - if no CCK rates are basic, it must be ERP since there must + * be some basic rates at all, so they're OFDM => ERP PHY + * (or we're in 5 GHz, and the cck bitmap will never be used) + * - if 11M is a basic rate, it must be ERP as well, so add 5.5M + * - if 5.5M is basic, 1M and 2M are mandatory + * - if 2M is basic, 1M is mandatory + * - if 1M is basic, that's the only valid ACK rate. + * As a consequence, it's not as complicated as it sounds, just add + * any lower rates to the ACK rate bitmap. + */ + if (lowest_present_cck > IWL_RATE_11M_INDEX) + cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE; + if (lowest_present_cck > IWL_RATE_5M_INDEX) + cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE; + if (lowest_present_cck > IWL_RATE_2M_INDEX) + cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE; + /* 1M already there or needed so always add */ + cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE; + + *cck_rates = cpu_to_le32((u32)cck); + *ofdm_rates = cpu_to_le32((u32)ofdm); +} + +static void iwl_mld_fill_protection_flags(struct iwl_mld *mld, + struct ieee80211_bss_conf *link, + __le32 *protection_flags) +{ + u8 protection_mode = link->ht_operation_mode & + IEEE80211_HT_OP_MODE_PROTECTION; + u8 ht_flag = LINK_PROT_FLG_HT_PROT | LINK_PROT_FLG_FAT_PROT; + + IWL_DEBUG_RATE(mld, "HT protection mode: %d\n", protection_mode); + + if (link->use_cts_prot) + *protection_flags |= cpu_to_le32(LINK_PROT_FLG_TGG_PROTECT); + + /* See section 9.23.3.1 of IEEE 80211-2012. + * Nongreenfield HT STAs Present is not supported. + */ + switch (protection_mode) { + case IEEE80211_HT_OP_MODE_PROTECTION_NONE: + break; + case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: + case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: + *protection_flags |= cpu_to_le32(ht_flag); + break; + case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: + /* Protect when channel wider than 20MHz */ + if (link->chanreq.oper.width > NL80211_CHAN_WIDTH_20) + *protection_flags |= cpu_to_le32(ht_flag); + break; + } +} + +static u8 iwl_mld_mac80211_ac_to_fw_ac(enum ieee80211_ac_numbers ac) +{ + static const u8 mac80211_ac_to_fw[] = { + AC_VO, + AC_VI, + AC_BE, + AC_BK + }; + + return mac80211_ac_to_fw[ac]; +} + +static void iwl_mld_fill_qos_params(struct ieee80211_bss_conf *link, + struct iwl_ac_qos *ac, __le32 *qos_flags) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + + /* no need to check mld_link since it is done in the caller */ + + for (int mac_ac = 0; mac_ac < IEEE80211_NUM_ACS; mac_ac++) { + u8 txf = iwl_mld_mac80211_ac_to_fw_tx_fifo(mac_ac); + u8 fw_ac = iwl_mld_mac80211_ac_to_fw_ac(mac_ac); + + ac[fw_ac].cw_min = + cpu_to_le16(mld_link->queue_params[mac_ac].cw_min); + ac[fw_ac].cw_max = + cpu_to_le16(mld_link->queue_params[mac_ac].cw_max); + ac[fw_ac].edca_txop = + cpu_to_le16(mld_link->queue_params[mac_ac].txop * 32); + ac[fw_ac].aifsn = mld_link->queue_params[mac_ac].aifs; + ac[fw_ac].fifos_mask = BIT(txf); + } + + if (link->qos) + *qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); + + if (link->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT) + *qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN); +} + +static bool iwl_mld_fill_mu_edca(struct iwl_mld *mld, + const struct iwl_mld_link *mld_link, + struct iwl_he_backoff_conf *trig_based_txf) +{ + for (int mac_ac = 0; mac_ac < IEEE80211_NUM_ACS; mac_ac++) { + const struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = + &mld_link->queue_params[mac_ac].mu_edca_param_rec; + u8 fw_ac = iwl_mld_mac80211_ac_to_fw_ac(mac_ac); + + if (!mld_link->queue_params[mac_ac].mu_edca) + return false; + + trig_based_txf[fw_ac].cwmin = + cpu_to_le16(mu_edca->ecw_min_max & 0xf); + trig_based_txf[fw_ac].cwmax = + cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); + trig_based_txf[fw_ac].aifsn = + cpu_to_le16(mu_edca->aifsn & 0xf); + trig_based_txf[fw_ac].mu_time = + cpu_to_le16(mu_edca->mu_edca_timer); + } + return true; +} + +static u8 iwl_mld_sta_rx_bw_to_fw(enum ieee80211_sta_rx_bandwidth bw) +{ + switch (bw) { + default: /* potential future values not supported by this hw/driver */ + case IEEE80211_STA_RX_BW_20: + return IWL_LINK_MODIFY_BW_20; + case IEEE80211_STA_RX_BW_40: + return IWL_LINK_MODIFY_BW_40; + case IEEE80211_STA_RX_BW_80: + return IWL_LINK_MODIFY_BW_80; + case IEEE80211_STA_RX_BW_160: + return IWL_LINK_MODIFY_BW_160; + case IEEE80211_STA_RX_BW_320: + return IWL_LINK_MODIFY_BW_320; + } +} + +static int _iwl_mld_change_link_in_fw(struct iwl_mld *mld, + struct ieee80211_bss_conf *link, + enum ieee80211_sta_rx_bandwidth bw, + u32 changes) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + struct ieee80211_vif *vif = link->vif; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct ieee80211_chanctx_conf *chan_ctx; + struct iwl_link_config_cmd cmd = {}; + u32 flags = 0; + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(!mld_link)) + return -EINVAL; + + cmd.link_id = cpu_to_le32(mld_link->fw_id); + cmd.spec_link_id = link->link_id; + cmd.mac_id = cpu_to_le32(mld_vif->fw_id); + + chan_ctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx); + + cmd.phy_id = cpu_to_le32(chan_ctx ? + iwl_mld_phy_from_mac80211(chan_ctx)->fw_id : + FW_CTXT_ID_INVALID); + + ether_addr_copy(cmd.local_link_addr, link->addr); + + cmd.active = cpu_to_le32(mld_link->active); + + if ((changes & LINK_CONTEXT_MODIFY_ACTIVE) && !mld_link->active && + mld_link->silent_deactivation) { + /* We are de-activating a link that is having CSA with + * immediate quiet in EMLSR. Tell the firmware not to send any + * frame. + */ + cmd.block_tx = 1; + mld_link->silent_deactivation = false; + } + + if (vif->type == NL80211_IFTYPE_ADHOC && link->bssid) + ether_addr_copy(cmd.ibss_bssid_addr, link->bssid); + + /* Channel context is needed to get the rates */ + if (chan_ctx) + iwl_mld_fill_rates(mld, link, chan_ctx, &cmd.cck_rates, + &cmd.ofdm_rates); + + cmd.cck_short_preamble = cpu_to_le32(link->use_short_preamble); + cmd.short_slot = cpu_to_le32(link->use_short_slot); + + iwl_mld_fill_protection_flags(mld, link, &cmd.protection_flags); + + iwl_mld_fill_qos_params(link, cmd.ac, &cmd.qos_flags); + + cmd.bi = cpu_to_le32(link->beacon_int); + cmd.dtim_interval = cpu_to_le32(link->beacon_int * link->dtim_period); + + if (changes & LINK_CONTEXT_MODIFY_BANDWIDTH) + cmd.modify_bandwidth = iwl_mld_sta_rx_bw_to_fw(bw); + + /* Configure HE parameters only if HE is supported, and only after + * the parameters are set in mac80211 (meaning after assoc) + */ + if (!link->he_support || iwlwifi_mod_params.disable_11ax || + (vif->type == NL80211_IFTYPE_STATION && !vif->cfg.assoc)) { + changes &= ~LINK_CONTEXT_MODIFY_HE_PARAMS; + goto send_cmd; + } + + /* ap_sta may be NULL if we're disconnecting */ + if (mld_vif->ap_sta) { + struct ieee80211_link_sta *link_sta = + link_sta_dereference_check(mld_vif->ap_sta, + link->link_id); + + if (!WARN_ON(!link_sta) && link_sta->he_cap.has_he && + link_sta->he_cap.he_cap_elem.mac_cap_info[5] & + IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX) + cmd.ul_mu_data_disable = 1; + } + + cmd.htc_trig_based_pkt_ext = link->htc_trig_based_pkt_ext; + + if (link->uora_exists) { + cmd.rand_alloc_ecwmin = link->uora_ocw_range & 0x7; + cmd.rand_alloc_ecwmax = (link->uora_ocw_range >> 3) & 0x7; + } + + if (iwl_mld_fill_mu_edca(mld, mld_link, cmd.trig_based_txf)) + flags |= LINK_FLG_MU_EDCA_CW; + + cmd.bss_color = link->he_bss_color.color; + + if (!link->he_bss_color.enabled) + flags |= LINK_FLG_BSS_COLOR_DIS; + + cmd.frame_time_rts_th = cpu_to_le16(link->frame_time_rts_th); + + /* Block 26-tone RU OFDMA transmissions */ + if (mld_link->he_ru_2mhz_block) + flags |= LINK_FLG_RU_2MHZ_BLOCK; + + if (link->nontransmitted) { + ether_addr_copy(cmd.ref_bssid_addr, link->transmitter_bssid); + cmd.bssid_index = link->bssid_index; + } + + /* The only EHT parameter is puncturing, and starting from PHY cmd + * version 6 - it is sent there. For older versions of the PHY cmd, + * puncturing is not needed at all. + */ + if (WARN_ON(changes & LINK_CONTEXT_MODIFY_EHT_PARAMS)) + changes &= ~LINK_CONTEXT_MODIFY_EHT_PARAMS; + +send_cmd: + cmd.modify_mask = cpu_to_le32(changes); + cmd.flags = cpu_to_le32(flags); + + return iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_MODIFY); +} + +int iwl_mld_change_link_in_fw(struct iwl_mld *mld, + struct ieee80211_bss_conf *link, + u32 changes) +{ + if (WARN_ON(changes & LINK_CONTEXT_MODIFY_BANDWIDTH)) + changes &= ~LINK_CONTEXT_MODIFY_BANDWIDTH; + + return _iwl_mld_change_link_in_fw(mld, link, 0, changes); +} + +int iwl_mld_change_link_omi_bw(struct iwl_mld *mld, + struct ieee80211_bss_conf *link, + enum ieee80211_sta_rx_bandwidth bw) +{ + return _iwl_mld_change_link_in_fw(mld, link, bw, + LINK_CONTEXT_MODIFY_BANDWIDTH); +} + +int iwl_mld_activate_link(struct iwl_mld *mld, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(!mld_link || mld_link->active)) + return -EINVAL; + + mld_link->rx_omi.exit_ts = jiffies; + mld_link->active = true; + + ret = iwl_mld_change_link_in_fw(mld, link, + LINK_CONTEXT_MODIFY_ACTIVE); + if (ret) + mld_link->active = false; + + return ret; +} + +void iwl_mld_deactivate_link(struct iwl_mld *mld, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + struct iwl_probe_resp_data *probe_data; + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(!mld_link || !mld_link->active)) + return; + + iwl_mld_cancel_session_protection(mld, link->vif, link->link_id); + + /* If we deactivate the link, we will probably remove it, or switch + * channel. In both cases, the CSA or Notice of Absence information is + * now irrelevant. Remove the data here. + */ + probe_data = wiphy_dereference(mld->wiphy, mld_link->probe_resp_data); + RCU_INIT_POINTER(mld_link->probe_resp_data, NULL); + if (probe_data) + kfree_rcu(probe_data, rcu_head); + + mld_link->active = false; + + iwl_mld_change_link_in_fw(mld, link, LINK_CONTEXT_MODIFY_ACTIVE); + + /* Now that the link is not active in FW, we don't expect any new + * notifications for it. Cancel the ones that are already pending + */ + iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_LINK, + mld_link->fw_id); +} + +static void +iwl_mld_rm_link_from_fw(struct iwl_mld *mld, struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + struct iwl_link_config_cmd cmd = {}; + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(!mld_link)) + return; + + cmd.link_id = cpu_to_le32(mld_link->fw_id); + cmd.spec_link_id = link->link_id; + cmd.phy_id = cpu_to_le32(FW_CTXT_ID_INVALID); + + iwl_mld_send_link_cmd(mld, &cmd, FW_CTXT_ACTION_REMOVE); +} + +static void iwl_mld_omi_bw_update(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf, + struct iwl_mld_link *mld_link, + struct ieee80211_link_sta *link_sta, + enum ieee80211_sta_rx_bandwidth bw, + bool ap_update) +{ + enum ieee80211_sta_rx_bandwidth apply_bw; + + mld_link->rx_omi.desired_bw = bw; + + /* Can't update OMI while already in progress, desired_bw was + * set so on FW notification the worker will see the change + * and apply new the new desired bw. + */ + if (mld_link->rx_omi.bw_in_progress) + return; + + if (bw == IEEE80211_STA_RX_BW_MAX) + apply_bw = ieee80211_chan_width_to_rx_bw(link_conf->chanreq.oper.width); + else + apply_bw = bw; + + if (!ap_update) { + /* The update isn't due to AP tracking after leaving OMI, + * where the AP could increase BW and then we must tell + * it that we can do the increased BW as well, if we did + * update the chandef. + * In this case, if we want MAX, then we will need to send + * a new OMI to the AP if it increases its own bandwidth as + * we can (due to internal and FW limitations, and being + * worried the AP might break) only send to what we're doing + * at the moment. In this case, set last_max_bw; otherwise + * if we really want to decrease our bandwidth set it to 0 + * to indicate no updates are needed if the AP changes. + */ + if (bw != IEEE80211_STA_RX_BW_MAX) + mld_link->rx_omi.last_max_bw = apply_bw; + else + mld_link->rx_omi.last_max_bw = 0; + } else { + /* Otherwise, if we're already trying to do maximum and + * the AP is changing, set last_max_bw to the new max the + * AP is using, we'll only get to this code path if the + * new bandwidth of the AP is bigger than what we sent it + * previously. This avoids repeatedly sending updates if + * it changes bandwidth, only doing it once on an increase. + */ + mld_link->rx_omi.last_max_bw = apply_bw; + } + + if (ieee80211_prepare_rx_omi_bw(link_sta, bw)) { + mld_link->rx_omi.bw_in_progress = apply_bw; + iwl_mld_change_link_omi_bw(mld, link_conf, apply_bw); + } +} + +static void iwl_mld_omi_bw_finished_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_link *mld_link = + container_of(work, typeof(*mld_link), rx_omi.finished_work.work); + enum ieee80211_sta_rx_bandwidth desired_bw, switched_to_bw; + struct ieee80211_vif *vif = mld_link->vif; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + struct ieee80211_link_sta *link_sta; + + if (!mld_vif->ap_sta) + return; + + link_sta = wiphy_dereference(mld->wiphy, + mld_vif->ap_sta->link[mld_link->link_id]); + if (WARN_ON_ONCE(!link_sta)) + return; + + link_conf = link_conf_dereference_protected(vif, link_sta->link_id); + if (WARN_ON_ONCE(!link_conf)) + return; + + if (WARN_ON(!mld_link->rx_omi.bw_in_progress)) + return; + + desired_bw = mld_link->rx_omi.desired_bw; + switched_to_bw = mld_link->rx_omi.bw_in_progress; + + ieee80211_finalize_rx_omi_bw(link_sta); + mld_link->rx_omi.bw_in_progress = 0; + + if (desired_bw != switched_to_bw) + iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, + desired_bw, false); +} + +static struct ieee80211_vif * +iwl_mld_get_omi_bw_reduction_pointers(struct iwl_mld *mld, + struct ieee80211_link_sta **link_sta, + struct iwl_mld_link **mld_link) +{ + struct iwl_mld_vif *mld_vif; + struct ieee80211_vif *vif; + int n_link_stas = 0; + + *link_sta = NULL; + + if (mld->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_SC) + return NULL; + + vif = iwl_mld_get_bss_vif(mld); + if (!vif) + return NULL; + + for (int i = 0; i < ARRAY_SIZE(mld->fw_id_to_link_sta); i++) { + struct ieee80211_link_sta *tmp; + + tmp = wiphy_dereference(mld->wiphy, mld->fw_id_to_link_sta[i]); + if (IS_ERR_OR_NULL(tmp)) + continue; + + n_link_stas++; + *link_sta = tmp; + } + + /* can't do anything if we have TDLS peers or EMLSR */ + if (n_link_stas != 1) + return NULL; + + mld_vif = iwl_mld_vif_from_mac80211(vif); + *mld_link = iwl_mld_link_dereference_check(mld_vif, + (*link_sta)->link_id); + if (WARN_ON(!*mld_link)) + return NULL; + + return vif; +} + +void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf, + enum ieee80211_sta_rx_bandwidth bw) +{ + struct ieee80211_link_sta *link_sta; + struct iwl_mld_link *mld_link; + struct ieee80211_vif *vif; + + vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link); + if (!vif) + return; + + if (WARN_ON(link_conf->vif != vif)) + return; + + /* This is 0 if we requested an OMI BW reduction and don't want to + * be sending an OMI when the AP's bandwidth changes. + */ + if (!mld_link->rx_omi.last_max_bw) + return; + + /* We only need to tell the AP if it increases BW over what we last + * told it we were using, if it reduces then our last OMI to it will + * not get used anyway (e.g. we said we want 160 but it's doing 80.) + */ + if (bw < mld_link->rx_omi.last_max_bw) + return; + + iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, bw, true); +} + +void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct ieee80211_link_sta *link_sta; + struct iwl_mld_link *mld_link; + struct ieee80211_vif *vif; + + vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link); + if (IWL_FW_CHECK(mld, !vif, "unexpected OMI notification\n")) + return; + + if (IWL_FW_CHECK(mld, !mld_link->rx_omi.bw_in_progress, + "OMI notification when not requested\n")) + return; + + wiphy_delayed_work_queue(mld->hw->wiphy, + &mld_link->rx_omi.finished_work, + msecs_to_jiffies(IWL_MLD_OMI_AP_SETTLE_DELAY)); +} + +void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld) +{ + struct ieee80211_bss_conf *link_conf; + struct ieee80211_link_sta *link_sta; + struct iwl_mld_link *mld_link; + struct ieee80211_vif *vif; + + vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link); + if (!vif) + return; + + link_conf = link_conf_dereference_protected(vif, link_sta->link_id); + if (WARN_ON_ONCE(!link_conf)) + return; + + if (!link_conf->he_support) + return; + + mld_link->rx_omi.exit_ts = jiffies; + + iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, + IEEE80211_STA_RX_BW_MAX, false); +} + +void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld) +{ + enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_MAX; + struct ieee80211_chanctx_conf *chanctx; + struct ieee80211_bss_conf *link_conf; + struct ieee80211_link_sta *link_sta; + struct cfg80211_chan_def chandef; + struct iwl_mld_link *mld_link; + struct iwl_mld_vif *mld_vif; + struct ieee80211_vif *vif; + struct iwl_mld_phy *phy; + u16 punctured; + int exit_thr; + + /* not allowed in CAM mode */ + if (iwlmld_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) + return; + + /* must have one BSS connection (no P2P), no TDLS, nor EMLSR */ + vif = iwl_mld_get_omi_bw_reduction_pointers(mld, &link_sta, &mld_link); + if (!vif) + return; + + link_conf = link_conf_dereference_protected(vif, link_sta->link_id); + if (WARN_ON_ONCE(!link_conf)) + return; + + if (!link_conf->he_support) + return; + + chanctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx); + if (WARN_ON(!chanctx)) + return; + + mld_vif = iwl_mld_vif_from_mac80211(vif); + if (!mld_vif->authorized) + goto apply; + + /* must not be in low-latency mode */ + if (iwl_mld_vif_low_latency(mld_vif)) + goto apply; + + chandef = link_conf->chanreq.oper; + + switch (chandef.width) { + case NL80211_CHAN_WIDTH_320: + exit_thr = IWL_MLD_OMI_EXIT_CHAN_LOAD_320; + break; + case NL80211_CHAN_WIDTH_160: + exit_thr = IWL_MLD_OMI_EXIT_CHAN_LOAD_160; + break; + default: + /* since we reduce to 80 MHz, must have more to start with */ + goto apply; + } + + /* not to be done if primary 80 MHz is punctured */ + if (cfg80211_chandef_primary(&chandef, NL80211_CHAN_WIDTH_80, + &punctured) < 0 || + punctured != 0) + goto apply; + + phy = iwl_mld_phy_from_mac80211(chanctx); + + if (phy->channel_load_by_us > exit_thr) { + /* send OMI for max bandwidth */ + goto apply; + } + + if (phy->channel_load_by_us > IWL_MLD_OMI_ENTER_CHAN_LOAD) { + /* no changes between enter/exit thresholds */ + return; + } + + if (time_is_after_jiffies(mld_link->rx_omi.exit_ts + + msecs_to_jiffies(IWL_MLD_OMI_EXIT_PROTECTION))) + return; + + /* reduce bandwidth to 80 MHz to save power */ + bw = IEEE80211_STA_RX_BW_80; +apply: + iwl_mld_omi_bw_update(mld, link_conf, mld_link, link_sta, bw, false); +} + +IWL_MLD_ALLOC_FN(link, bss_conf) + +/* Constructor function for struct iwl_mld_link */ +static int +iwl_mld_init_link(struct iwl_mld *mld, struct ieee80211_bss_conf *link, + struct iwl_mld_link *mld_link) +{ + mld_link->vif = link->vif; + mld_link->link_id = link->link_id; + + iwl_mld_init_internal_sta(&mld_link->bcast_sta); + iwl_mld_init_internal_sta(&mld_link->mcast_sta); + iwl_mld_init_internal_sta(&mld_link->aux_sta); + + wiphy_delayed_work_init(&mld_link->rx_omi.finished_work, + iwl_mld_omi_bw_finished_work); + + return iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link); +} + +/* Initializes the link structure, maps fw id to the ieee80211_bss_conf, and + * adds a link to the fw + */ +int iwl_mld_add_link(struct iwl_mld *mld, + struct ieee80211_bss_conf *bss_conf) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif); + struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf); + bool is_deflink = bss_conf == &bss_conf->vif->bss_conf; + int ret; + + if (!link) { + if (is_deflink) + link = &mld_vif->deflink; + else + link = kzalloc(sizeof(*link), GFP_KERNEL); + } else { + WARN_ON(!mld->fw_status.in_hw_restart); + } + + ret = iwl_mld_init_link(mld, bss_conf, link); + if (ret) + goto free; + + rcu_assign_pointer(mld_vif->link[bss_conf->link_id], link); + + ret = iwl_mld_add_link_to_fw(mld, bss_conf); + if (ret) { + RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL); + RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL); + goto free; + } + + return ret; + +free: + if (!is_deflink) + kfree(link); + return ret; +} + +/* Remove link from fw, unmap the bss_conf, and destroy the link structure */ +void iwl_mld_remove_link(struct iwl_mld *mld, + struct ieee80211_bss_conf *bss_conf) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif); + struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf); + bool is_deflink = link == &mld_vif->deflink; + + if (WARN_ON(!link || link->active)) + return; + + iwl_mld_rm_link_from_fw(mld, bss_conf); + /* Continue cleanup on failure */ + + if (!is_deflink) + kfree_rcu(link, rcu_head); + + RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL); + + wiphy_delayed_work_cancel(mld->wiphy, &link->rx_omi.finished_work); + + if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links)) + return; + + RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL); +} + +void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + const struct iwl_missed_beacons_notif *notif = (const void *)pkt->data; + union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt }; + u32 link_id = le32_to_cpu(notif->link_id); + u32 missed_bcon = le32_to_cpu(notif->consec_missed_beacons); + u32 missed_bcon_since_rx = + le32_to_cpu(notif->consec_missed_beacons_since_last_rx); + u32 scnd_lnk_bcn_lost = + le32_to_cpu(notif->consec_missed_beacons_other_link); + struct ieee80211_bss_conf *link_conf = + iwl_mld_fw_id_to_link_conf(mld, link_id); + u32 bss_param_ch_cnt_link_id; + struct ieee80211_vif *vif; + + if (WARN_ON(!link_conf)) + return; + + vif = link_conf->vif; + bss_param_ch_cnt_link_id = link_conf->bss_param_ch_cnt_link_id; + + IWL_DEBUG_INFO(mld, + "missed bcn link_id=%u, %u consecutive=%u\n", + link_id, missed_bcon, missed_bcon_since_rx); + + if (WARN_ON(!vif)) + return; + + mld->trans->dbg.dump_file_name_ext_valid = true; + snprintf(mld->trans->dbg.dump_file_name_ext, IWL_FW_INI_MAX_NAME, + "LinkId_%d_MacType_%d", link_id, + iwl_mld_mac80211_iftype_to_fw(vif)); + + iwl_dbg_tlv_time_point(&mld->fwrt, + IWL_FW_INI_TIME_POINT_MISSED_BEACONS, &tp_data); + + if (missed_bcon >= IWL_MLD_MISSED_BEACONS_THRESHOLD_LONG) { + if (missed_bcon_since_rx >= + IWL_MLD_MISSED_BEACONS_SINCE_RX_THOLD) { + ieee80211_connection_loss(vif); + return; + } + IWL_WARN(mld, + "missed beacons exceeds threshold, but receiving data. Stay connected, Expect bugs.\n"); + return; + } + + if (missed_bcon_since_rx > IWL_MLD_MISSED_BEACONS_THRESHOLD) { + ieee80211_cqm_beacon_loss_notify(vif, GFP_ATOMIC); + + /* try to switch links, no-op if we don't have MLO */ + iwl_mld_int_mlo_scan(mld, vif); + } + + /* no more logic if we're not in EMLSR */ + if (hweight16(vif->active_links) <= 1) + return; + + /* We are processing a notification before link activation */ + if (le32_to_cpu(notif->other_link_id) == FW_CTXT_ID_INVALID) + return; + + /* Exit EMLSR if we lost more than + * IWL_MLD_MISSED_BEACONS_EXIT_ESR_THRESH beacons on boths links + * OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH on current link. + * OR more than IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED + * on current link and the link's bss_param_ch_count has changed on + * the other link's beacon. + */ + if ((missed_bcon >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS && + scnd_lnk_bcn_lost >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_2_LINKS) || + missed_bcon >= IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH || + (bss_param_ch_cnt_link_id != link_id && + missed_bcon >= + IWL_MLD_BCN_LOSS_EXIT_ESR_THRESH_BSS_PARAM_CHANGED)) { + iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_MISSED_BEACON, + iwl_mld_get_primary_link(vif)); + } +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_handle_missed_beacon_notif); + +bool iwl_mld_cancel_missed_beacon_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt, + u32 removed_link_id) +{ + struct iwl_missed_beacons_notif *notif = (void *)pkt->data; + + if (le32_to_cpu(notif->other_link_id) == removed_link_id) { + /* Second link is being removed. Don't cancel the notification, + * but mark second link as invalid. + */ + notif->other_link_id = cpu_to_le32(FW_CTXT_ID_INVALID); + } + + /* If the primary link is removed, cancel the notification */ + return le32_to_cpu(notif->link_id) == removed_link_id; +} + +int iwl_mld_link_set_associated(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + return iwl_mld_change_link_in_fw(mld, link, LINK_CONTEXT_MODIFY_ALL & + ~(LINK_CONTEXT_MODIFY_ACTIVE | + LINK_CONTEXT_MODIFY_EHT_PARAMS)); +} + +struct iwl_mld_rssi_to_grade { + s8 rssi[2]; + u16 grade; +}; + +#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \ + { \ + .rssi = {_lb, _hb_uhb}, \ + .grade = _grade \ + } + +/* + * This array must be sorted by increasing RSSI for proper functionality. + * The grades are actually estimated throughput, represented as fixed-point + * with a scale factor of 1/10. + */ +static const struct iwl_mld_rssi_to_grade rssi_to_grade_map[] = { + RSSI_TO_GRADE_LINE(-85, -89, 172), + RSSI_TO_GRADE_LINE(-83, -86, 344), + RSSI_TO_GRADE_LINE(-82, -85, 516), + RSSI_TO_GRADE_LINE(-80, -83, 688), + RSSI_TO_GRADE_LINE(-77, -79, 1032), + RSSI_TO_GRADE_LINE(-73, -76, 1376), + RSSI_TO_GRADE_LINE(-70, -74, 1548), + RSSI_TO_GRADE_LINE(-69, -72, 1720), + RSSI_TO_GRADE_LINE(-65, -68, 2064), + RSSI_TO_GRADE_LINE(-61, -66, 2294), + RSSI_TO_GRADE_LINE(-58, -61, 2580), + RSSI_TO_GRADE_LINE(-55, -58, 2868), + RSSI_TO_GRADE_LINE(-46, -55, 3098), + RSSI_TO_GRADE_LINE(-43, -54, 3442) +}; + +#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade) + +#define DEFAULT_CHAN_LOAD_2GHZ 30 +#define DEFAULT_CHAN_LOAD_5GHZ 15 +#define DEFAULT_CHAN_LOAD_6GHZ 0 + +/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */ +#define SCALE_FACTOR 256 +#define MAX_CHAN_LOAD 256 + +static unsigned int +iwl_mld_get_n_subchannels(const struct ieee80211_bss_conf *link_conf) +{ + enum nl80211_chan_width chan_width = + link_conf->chanreq.oper.width; + int mhz = nl80211_chan_width_to_mhz(chan_width); + unsigned int n_subchannels; + + if (WARN_ONCE(mhz < 20 || mhz > 320, + "Invalid channel width : (%d)\n", mhz)) + return 1; + + /* total number of subchannels */ + n_subchannels = mhz / 20; + + /* No puncturing if less than 80 MHz */ + if (mhz >= 80) + n_subchannels -= hweight16(link_conf->chanreq.oper.punctured); + + return n_subchannels; +} + +static int +iwl_mld_get_chan_load_from_element(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf) +{ + struct ieee80211_vif *vif = link_conf->vif; + const struct cfg80211_bss_ies *ies; + const struct element *bss_load_elem = NULL; + const struct ieee80211_bss_load_elem *bss_load; + + guard(rcu)(); + + if (ieee80211_vif_link_active(vif, link_conf->link_id)) + ies = rcu_dereference(link_conf->bss->beacon_ies); + else + ies = rcu_dereference(link_conf->bss->ies); + + if (ies) + bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD, + ies->data, ies->len); + + if (!bss_load_elem || + bss_load_elem->datalen != sizeof(*bss_load)) + return -EINVAL; + + bss_load = (const void *)bss_load_elem->data; + + return bss_load->channel_util; +} + +static unsigned int +iwl_mld_get_chan_load_by_us(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf, + bool expect_active_link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf); + struct ieee80211_chanctx_conf *chan_ctx; + struct iwl_mld_phy *phy; + + if (!mld_link || !mld_link->active) { + WARN_ON(expect_active_link); + return 0; + } + + if (WARN_ONCE(!rcu_access_pointer(mld_link->chan_ctx), + "Active link (%u) without channel ctxt assigned!\n", + link_conf->link_id)) + return 0; + + chan_ctx = wiphy_dereference(mld->wiphy, mld_link->chan_ctx); + phy = iwl_mld_phy_from_mac80211(chan_ctx); + + return phy->channel_load_by_us; +} + +/* Returns error if the channel utilization element is invalid/unavailable */ +int iwl_mld_get_chan_load_by_others(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf, + bool expect_active_link) +{ + int chan_load; + unsigned int chan_load_by_us; + + /* get overall load */ + chan_load = iwl_mld_get_chan_load_from_element(mld, link_conf); + if (chan_load < 0) + return chan_load; + + chan_load_by_us = iwl_mld_get_chan_load_by_us(mld, link_conf, + expect_active_link); + + /* channel load by us is given in percentage */ + chan_load_by_us = + NORMALIZE_PERCENT_TO_255(chan_load_by_us); + + /* Use only values that firmware sends that can possibly be valid */ + if (chan_load_by_us <= chan_load) + chan_load -= chan_load_by_us; + + return chan_load; +} + +static unsigned int +iwl_mld_get_default_chan_load(struct ieee80211_bss_conf *link_conf) +{ + enum nl80211_band band = link_conf->chanreq.oper.chan->band; + + switch (band) { + case NL80211_BAND_2GHZ: + return DEFAULT_CHAN_LOAD_2GHZ; + case NL80211_BAND_5GHZ: + return DEFAULT_CHAN_LOAD_5GHZ; + case NL80211_BAND_6GHZ: + return DEFAULT_CHAN_LOAD_6GHZ; + default: + WARN_ON(1); + return 0; + } +} + +unsigned int iwl_mld_get_chan_load(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf) +{ + int chan_load; + + chan_load = iwl_mld_get_chan_load_by_others(mld, link_conf, false); + if (chan_load >= 0) + return chan_load; + + /* No information from the element, take the defaults */ + chan_load = iwl_mld_get_default_chan_load(link_conf); + + /* The defaults are given in percentage */ + return NORMALIZE_PERCENT_TO_255(chan_load); +} + +static unsigned int +iwl_mld_get_avail_chan_load(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf) +{ + return MAX_CHAN_LOAD - iwl_mld_get_chan_load(mld, link_conf); +} + +/* This function calculates the grade of a link. Returns 0 in error case */ +unsigned int iwl_mld_get_link_grade(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf) +{ + enum nl80211_band band; + int rssi_idx; + s32 link_rssi; + unsigned int grade = MAX_GRADE; + + if (WARN_ON_ONCE(!link_conf)) + return 0; + + band = link_conf->chanreq.oper.chan->band; + if (WARN_ONCE(band != NL80211_BAND_2GHZ && + band != NL80211_BAND_5GHZ && + band != NL80211_BAND_6GHZ, + "Invalid band (%u)\n", band)) + return 0; + + link_rssi = MBM_TO_DBM(link_conf->bss->signal); + /* + * For 6 GHz the RSSI of the beacons is lower than + * the RSSI of the data. + */ + if (band == NL80211_BAND_6GHZ && link_rssi) + link_rssi += 4; + + rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1; + + /* No valid RSSI - take the lowest grade */ + if (!link_rssi) + link_rssi = rssi_to_grade_map[0].rssi[rssi_idx]; + + IWL_DEBUG_EHT(mld, + "Calculating grade of link %d: band = %d, bandwidth = %d, punctured subchannels =0x%x RSSI = %d\n", + link_conf->link_id, band, + link_conf->chanreq.oper.width, + link_conf->chanreq.oper.punctured, link_rssi); + + /* Get grade based on RSSI */ + for (int i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) { + const struct iwl_mld_rssi_to_grade *line = + &rssi_to_grade_map[i]; + + if (link_rssi > line->rssi[rssi_idx]) + continue; + grade = line->grade; + break; + } + + /* Apply the channel load and puncturing factors */ + grade = grade * iwl_mld_get_avail_chan_load(mld, link_conf) / SCALE_FACTOR; + grade = grade * iwl_mld_get_n_subchannels(link_conf); + + IWL_DEBUG_EHT(mld, "Link %d's grade: %d\n", link_conf->link_id, grade); + + return grade; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_get_link_grade); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.h b/drivers/net/wireless/intel/iwlwifi/mld/link.h new file mode 100644 index 0000000000000..42b7bdcbd7412 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/link.h @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_link_h__ +#define __iwl_mld_link_h__ + +#include + +#include "mld.h" +#include "sta.h" + +/** + * struct iwl_probe_resp_data - data for NoA/CSA updates + * @rcu_head: used for freeing the data on update + * @notif: notification data + * @noa_len: length of NoA attribute, calculated from the notification + */ +struct iwl_probe_resp_data { + struct rcu_head rcu_head; + struct iwl_probe_resp_data_notif notif; + int noa_len; +}; + +/** + * struct iwl_mld_link - link configuration parameters + * + * @rcu_head: RCU head for freeing this data. + * @fw_id: the fw id of the link. + * @active: if the link is active or not. + * @queue_params: QoS data from mac80211. This is updated with a call to + * drv_conf_tx per each AC, and then notified once with BSS_CHANGED_QOS. + * So we store it here and then send one link cmd for all the ACs. + * @chan_ctx: pointer to the channel context assigned to the link. If a link + * has an assigned channel context it means that it is active. + * @he_ru_2mhz_block: 26-tone RU OFDMA transmissions should be blocked. + * @igtk: fw can only have one IGTK at a time, whereas mac80211 can have two. + * This tracks the one IGTK that currently exists in FW. + * @vif: the vif this link belongs to + * @bcast_sta: station used for broadcast packets. Used in AP, GO and IBSS. + * @mcast_sta: station used for multicast packets. Used in AP, GO and IBSS. + * @aux_sta: station used for remain on channel. Used in P2P device. + * @link_id: over the air link ID + * @ap_early_keys: The firmware cannot install keys before bcast/mcast STAs, + * but higher layers work differently, so we store the keys here for + * later installation. + * @silent_deactivation: next deactivation needs to be silent. + * @probe_resp_data: data from FW notification to store NOA related data to be + * inserted into probe response. + * @rx_omi: data for BW reduction with OMI + * @rx_omi.bw_in_progress: update is in progress (indicates target BW) + * @rx_omi.exit_ts: timestamp of last exit + * @rx_omi.finished_work: work for the delayed reaction to the firmware saying + * the change was applied, and for then applying a new mode if it was + * updated while waiting for firmware/AP settle delay. + * @rx_omi.desired_bw: desired bandwidth + * @rx_omi.last_max_bw: last maximum BW used by firmware, for AP BW changes + */ +struct iwl_mld_link { + struct rcu_head rcu_head; + + /* Add here fields that need clean up on restart */ + struct_group(zeroed_on_hw_restart, + u8 fw_id; + bool active; + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; + struct ieee80211_chanctx_conf __rcu *chan_ctx; + bool he_ru_2mhz_block; + struct ieee80211_key_conf *igtk; + ); + /* And here fields that survive a fw restart */ + struct ieee80211_vif *vif; + struct iwl_mld_int_sta bcast_sta; + struct iwl_mld_int_sta mcast_sta; + struct iwl_mld_int_sta aux_sta; + u8 link_id; + + struct { + struct wiphy_delayed_work finished_work; + unsigned long exit_ts; + enum ieee80211_sta_rx_bandwidth bw_in_progress, + desired_bw, + last_max_bw; + } rx_omi; + + /* we can only have 2 GTK + 2 IGTK + 2 BIGTK active at a time */ + struct ieee80211_key_conf *ap_early_keys[6]; + bool silent_deactivation; + struct iwl_probe_resp_data __rcu *probe_resp_data; +}; + +/* Cleanup function for struct iwl_mld_phy, will be called in restart */ +static inline void +iwl_mld_cleanup_link(struct iwl_mld *mld, struct iwl_mld_link *link) +{ + struct iwl_probe_resp_data *probe_data; + + probe_data = wiphy_dereference(mld->wiphy, link->probe_resp_data); + RCU_INIT_POINTER(link->probe_resp_data, NULL); + if (probe_data) + kfree_rcu(probe_data, rcu_head); + + CLEANUP_STRUCT(link); + if (link->bcast_sta.sta_id != IWL_INVALID_STA) + iwl_mld_free_internal_sta(mld, &link->bcast_sta); + if (link->mcast_sta.sta_id != IWL_INVALID_STA) + iwl_mld_free_internal_sta(mld, &link->mcast_sta); + if (link->aux_sta.sta_id != IWL_INVALID_STA) + iwl_mld_free_internal_sta(mld, &link->aux_sta); +} + +/* Convert a percentage from [0,100] to [0,255] */ +#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * 256 / 100) + +int iwl_mld_add_link(struct iwl_mld *mld, + struct ieee80211_bss_conf *bss_conf); +void iwl_mld_remove_link(struct iwl_mld *mld, + struct ieee80211_bss_conf *bss_conf); +int iwl_mld_activate_link(struct iwl_mld *mld, + struct ieee80211_bss_conf *link); +void iwl_mld_deactivate_link(struct iwl_mld *mld, + struct ieee80211_bss_conf *link); +int iwl_mld_change_link_omi_bw(struct iwl_mld *mld, + struct ieee80211_bss_conf *link, + enum ieee80211_sta_rx_bandwidth bw); +int iwl_mld_change_link_in_fw(struct iwl_mld *mld, + struct ieee80211_bss_conf *link, u32 changes); +void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); +bool iwl_mld_cancel_missed_beacon_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt, + u32 removed_link_id); +int iwl_mld_link_set_associated(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); + +unsigned int iwl_mld_get_link_grade(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf); + +unsigned int iwl_mld_get_chan_load(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf); + +int iwl_mld_get_chan_load_by_others(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf, + bool expect_active_link); +void iwl_mld_handle_omi_status_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); +void iwl_mld_leave_omi_bw_reduction(struct iwl_mld *mld); +void iwl_mld_check_omi_bw_reduction(struct iwl_mld *mld); +void iwl_mld_omi_ap_changed_bw(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf, + enum ieee80211_sta_rx_bandwidth bw); + +#endif /* __iwl_mld_link_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c new file mode 100644 index 0000000000000..a4a612afb3b30 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.c @@ -0,0 +1,339 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#include "mld.h" +#include "iface.h" +#include "low_latency.h" +#include "hcmd.h" +#include "power.h" +#include "mlo.h" + +#define MLD_LL_WK_INTERVAL_MSEC 500 +#define MLD_LL_PERIOD (HZ * MLD_LL_WK_INTERVAL_MSEC / 1000) +#define MLD_LL_ACTIVE_WK_PERIOD (HZ * 10) + +/* packets/MLD_LL_WK_PERIOD seconds */ +#define MLD_LL_ENABLE_THRESH 100 + +static bool iwl_mld_calc_low_latency(struct iwl_mld *mld, + unsigned long timestamp) +{ + struct iwl_mld_low_latency *ll = &mld->low_latency; + bool global_low_latency = false; + u8 num_rx_q = mld->trans->num_rx_queues; + + for (int mac_id = 0; mac_id < NUM_MAC_INDEX_DRIVER; mac_id++) { + u32 total_vo_vi_pkts = 0; + bool ll_period_expired; + + /* If it's not initialized yet, it means we have not yet + * received/transmitted any vo/vi packet on this MAC. + */ + if (!ll->window_start[mac_id]) + continue; + + ll_period_expired = + time_after(timestamp, ll->window_start[mac_id] + + MLD_LL_ACTIVE_WK_PERIOD); + + if (ll_period_expired) + ll->window_start[mac_id] = timestamp; + + for (int q = 0; q < num_rx_q; q++) { + struct iwl_mld_low_latency_packets_counters *counters = + &mld->low_latency.pkts_counters[q]; + + spin_lock_bh(&counters->lock); + + total_vo_vi_pkts += counters->vo_vi[mac_id]; + + if (ll_period_expired) + counters->vo_vi[mac_id] = 0; + + spin_unlock_bh(&counters->lock); + } + + /* enable immediately with enough packets but defer + * disabling only if the low-latency period expired and + * below threshold. + */ + if (total_vo_vi_pkts > MLD_LL_ENABLE_THRESH) + mld->low_latency.result[mac_id] = true; + else if (ll_period_expired) + mld->low_latency.result[mac_id] = false; + + global_low_latency |= mld->low_latency.result[mac_id]; + } + + return global_low_latency; +} + +static void iwl_mld_low_latency_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld *mld = _data; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + bool prev = mld_vif->low_latency_causes & LOW_LATENCY_TRAFFIC; + bool low_latency; + + if (WARN_ON(mld_vif->fw_id >= ARRAY_SIZE(mld->low_latency.result))) + return; + + low_latency = mld->low_latency.result[mld_vif->fw_id]; + + if (prev != low_latency) + iwl_mld_vif_update_low_latency(mld, vif, low_latency, + LOW_LATENCY_TRAFFIC); +} + +static void iwl_mld_low_latency_wk(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mld *mld = container_of(wk, struct iwl_mld, + low_latency.work.work); + unsigned long timestamp = jiffies; + bool low_latency_active; + + if (mld->fw_status.in_hw_restart) + return; + + /* It is assumed that the work was scheduled only after checking + * at least MLD_LL_PERIOD has passed since the last update. + */ + + low_latency_active = iwl_mld_calc_low_latency(mld, timestamp); + + /* Update the timestamp now after the low-latency calculation */ + mld->low_latency.timestamp = timestamp; + + /* If low-latency is active we need to force re-evaluation after + * 10 seconds, so that we can disable low-latency when + * the low-latency traffic ends. + * + * Otherwise, we don't need to run the work because there is nothing to + * disable. + * + * Note that this has no impact on the regular scheduling of the + * updates triggered by traffic - those happen whenever the + * MLD_LL_PERIOD timeout expire. + */ + if (low_latency_active) + wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work, + MLD_LL_ACTIVE_WK_PERIOD); + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_low_latency_iter, mld); +} + +int iwl_mld_low_latency_init(struct iwl_mld *mld) +{ + struct iwl_mld_low_latency *ll = &mld->low_latency; + unsigned long ts = jiffies; + + ll->pkts_counters = kcalloc(mld->trans->num_rx_queues, + sizeof(*ll->pkts_counters), GFP_KERNEL); + if (!ll->pkts_counters) + return -ENOMEM; + + for (int q = 0; q < mld->trans->num_rx_queues; q++) + spin_lock_init(&ll->pkts_counters[q].lock); + + wiphy_delayed_work_init(&ll->work, iwl_mld_low_latency_wk); + + ll->timestamp = ts; + + /* The low-latency window_start will be initialized per-MAC on + * the first vo/vi packet received/transmitted. + */ + + return 0; +} + +void iwl_mld_low_latency_free(struct iwl_mld *mld) +{ + struct iwl_mld_low_latency *ll = &mld->low_latency; + + kfree(ll->pkts_counters); + ll->pkts_counters = NULL; +} + +void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld) +{ + struct iwl_mld_low_latency *ll = &mld->low_latency; + + ll->timestamp = jiffies; + + memset(ll->window_start, 0, sizeof(ll->window_start)); + memset(ll->result, 0, sizeof(ll->result)); + + for (int q = 0; q < mld->trans->num_rx_queues; q++) + memset(ll->pkts_counters[q].vo_vi, 0, + sizeof(ll->pkts_counters[q].vo_vi)); +} + +static int iwl_mld_send_low_latency_cmd(struct iwl_mld *mld, bool low_latency, + u16 mac_id) +{ + struct iwl_mac_low_latency_cmd cmd = { + .mac_id = cpu_to_le32(mac_id) + }; + u16 cmd_id = WIDE_ID(MAC_CONF_GROUP, LOW_LATENCY_CMD); + int ret; + + if (low_latency) { + /* Currently we don't care about the direction */ + cmd.low_latency_rx = 1; + cmd.low_latency_tx = 1; + } + + ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd); + if (ret) + IWL_ERR(mld, "Failed to send low latency command\n"); + + return ret; +} + +static void iwl_mld_vif_set_low_latency(struct iwl_mld_vif *mld_vif, bool set, + enum iwl_mld_low_latency_cause cause) +{ + if (set) + mld_vif->low_latency_causes |= cause; + else + mld_vif->low_latency_causes &= ~cause; +} + +void iwl_mld_vif_update_low_latency(struct iwl_mld *mld, + struct ieee80211_vif *vif, + bool low_latency, + enum iwl_mld_low_latency_cause cause) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + bool prev; + + prev = iwl_mld_vif_low_latency(mld_vif); + iwl_mld_vif_set_low_latency(mld_vif, low_latency, cause); + + low_latency = iwl_mld_vif_low_latency(mld_vif); + if (low_latency == prev) + return; + + if (iwl_mld_send_low_latency_cmd(mld, low_latency, mld_vif->fw_id)) { + /* revert to previous low-latency state */ + iwl_mld_vif_set_low_latency(mld_vif, prev, cause); + return; + } + + if (low_latency) + iwl_mld_leave_omi_bw_reduction(mld); + + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_CLIENT) + return; + + iwl_mld_update_mac_power(mld, vif, false); + + if (low_latency) + iwl_mld_retry_emlsr(mld, vif); +} + +static bool iwl_mld_is_vo_vi_pkt(struct ieee80211_hdr *hdr) +{ + u8 tid; + static const u8 tid_to_mac80211_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO, + }; + + if (!hdr || !ieee80211_is_data_qos(hdr->frame_control)) + return false; + + tid = ieee80211_get_tid(hdr); + if (tid >= IWL_MAX_TID_COUNT) + return false; + + return tid_to_mac80211_ac[tid] < IEEE80211_AC_VI; +} + +void iwl_mld_low_latency_update_counters(struct iwl_mld *mld, + struct ieee80211_hdr *hdr, + struct ieee80211_sta *sta, + u8 queue) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif); + struct iwl_mld_low_latency_packets_counters *counters; + unsigned long ts = jiffies ? jiffies : 1; + u8 fw_id = mld_vif->fw_id; + + /* we should have failed op mode init if NULL */ + if (WARN_ON_ONCE(!mld->low_latency.pkts_counters)) + return; + + if (WARN_ON_ONCE(fw_id >= ARRAY_SIZE(counters->vo_vi) || + queue >= mld->trans->num_rx_queues)) + return; + + if (mld->low_latency.stopped) + return; + + if (!iwl_mld_is_vo_vi_pkt(hdr)) + return; + + counters = &mld->low_latency.pkts_counters[queue]; + + spin_lock_bh(&counters->lock); + counters->vo_vi[fw_id]++; + spin_unlock_bh(&counters->lock); + + /* Initialize the window_start on the first vo/vi packet */ + if (!mld->low_latency.window_start[fw_id]) + mld->low_latency.window_start[fw_id] = ts; + + if (time_is_before_jiffies(mld->low_latency.timestamp + MLD_LL_PERIOD)) + wiphy_delayed_work_queue(mld->wiphy, &mld->low_latency.work, + 0); +} + +void iwl_mld_low_latency_stop(struct iwl_mld *mld) +{ + lockdep_assert_wiphy(mld->wiphy); + + mld->low_latency.stopped = true; + + wiphy_delayed_work_cancel(mld->wiphy, &mld->low_latency.work); +} + +void iwl_mld_low_latency_restart(struct iwl_mld *mld) +{ + struct iwl_mld_low_latency *ll = &mld->low_latency; + bool low_latency = false; + unsigned long ts = jiffies; + + lockdep_assert_wiphy(mld->wiphy); + + ll->timestamp = ts; + mld->low_latency.stopped = false; + + for (int mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) { + ll->window_start[mac] = 0; + low_latency |= ll->result[mac]; + + for (int q = 0; q < mld->trans->num_rx_queues; q++) { + spin_lock_bh(&ll->pkts_counters[q].lock); + ll->pkts_counters[q].vo_vi[mac] = 0; + spin_unlock_bh(&ll->pkts_counters[q].lock); + } + } + + /* if low latency is active, force re-evaluation to cover the case of + * no traffic. + */ + if (low_latency) + wiphy_delayed_work_queue(mld->wiphy, &ll->work, MLD_LL_PERIOD); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/low_latency.h b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.h new file mode 100644 index 0000000000000..f59684d235af1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/low_latency.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_low_latency_h__ +#define __iwl_mld_low_latency_h__ + +/** + * struct iwl_mld_low_latency_packets_counters - Packets counters + * @lock: synchronize the counting in data path against the worker + * @vo_vi: per-mac, counts the number of TX and RX voice and video packets + */ +struct iwl_mld_low_latency_packets_counters { + spinlock_t lock; + u32 vo_vi[NUM_MAC_INDEX_DRIVER]; +} ____cacheline_aligned_in_smp; + +/** + * enum iwl_mld_low_latency_cause - low-latency set causes + * + * @LOW_LATENCY_TRAFFIC: indicates low-latency traffic was detected + * @LOW_LATENCY_DEBUGFS: low-latency mode set from debugfs + * @LOW_LATENCY_VIF_TYPE: low-latency mode set because of vif type (AP) + */ +enum iwl_mld_low_latency_cause { + LOW_LATENCY_TRAFFIC = BIT(0), + LOW_LATENCY_DEBUGFS = BIT(1), + LOW_LATENCY_VIF_TYPE = BIT(2), +}; + +/** + * struct iwl_mld_low_latency - Manage low-latency detection and activation. + * @work: this work is used to detect low-latency by monitoring the number of + * voice and video packets transmitted in a period of time. If the + * threshold is reached, low-latency is activated. When active, + * it is deactivated if the threshold is not reached within a + * 10-second period. + * @timestamp: timestamp of the last update. + * @window_start: per-mac, timestamp of the start of the current window. when + * the window is over, the counters are reset. + * @pkts_counters: per-queue array voice/video packet counters + * @result: per-mac latest low-latency result + * @stopped: if true, ignore the requests to update the counters + */ +struct iwl_mld_low_latency { + struct wiphy_delayed_work work; + unsigned long timestamp; + unsigned long window_start[NUM_MAC_INDEX_DRIVER]; + struct iwl_mld_low_latency_packets_counters *pkts_counters; + bool result[NUM_MAC_INDEX_DRIVER]; + bool stopped; +}; + +int iwl_mld_low_latency_init(struct iwl_mld *mld); +void iwl_mld_low_latency_free(struct iwl_mld *mld); +void iwl_mld_low_latency_restart_cleanup(struct iwl_mld *mld); +void iwl_mld_vif_update_low_latency(struct iwl_mld *mld, + struct ieee80211_vif *vif, + bool low_latency, + enum iwl_mld_low_latency_cause cause); +void iwl_mld_low_latency_update_counters(struct iwl_mld *mld, + struct ieee80211_hdr *hdr, + struct ieee80211_sta *sta, + u8 queue); +void iwl_mld_low_latency_stop(struct iwl_mld *mld); +void iwl_mld_low_latency_restart(struct iwl_mld *mld); + +#endif /* __iwl_mld_low_latency_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c new file mode 100644 index 0000000000000..6851064b82da1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.c @@ -0,0 +1,2670 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include +#include + +#include "mld.h" +#include "mac80211.h" +#include "phy.h" +#include "iface.h" +#include "power.h" +#include "sta.h" +#include "agg.h" +#include "scan.h" +#include "d3.h" +#include "tlc.h" +#include "key.h" +#include "ap.h" +#include "tx.h" +#include "roc.h" +#include "mlo.h" +#include "stats.h" +#include "ftm-initiator.h" +#include "low_latency.h" +#include "fw/api/scan.h" +#include "fw/api/context.h" +#include "fw/api/filter.h" +#include "fw/api/sta.h" +#include "fw/api/tdls.h" +#ifdef CONFIG_PM_SLEEP +#include "fw/api/d3.h" +#endif /* CONFIG_PM_SLEEP */ +#include "iwl-trans.h" + +#define IWL_MLD_LIMITS(ap) \ + { \ + .max = 2, \ + .types = BIT(NL80211_IFTYPE_STATION), \ + }, \ + { \ + .max = 1, \ + .types = ap | \ + BIT(NL80211_IFTYPE_P2P_CLIENT) | \ + BIT(NL80211_IFTYPE_P2P_GO), \ + }, \ + { \ + .max = 1, \ + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), \ + } + +static const struct ieee80211_iface_limit iwl_mld_limits[] = { + IWL_MLD_LIMITS(0) +}; + +static const struct ieee80211_iface_limit iwl_mld_limits_ap[] = { + IWL_MLD_LIMITS(BIT(NL80211_IFTYPE_AP)) +}; + +static const struct ieee80211_iface_combination +iwl_mld_iface_combinations[] = { + { + .num_different_channels = 2, + .max_interfaces = 4, + .limits = iwl_mld_limits, + .n_limits = ARRAY_SIZE(iwl_mld_limits), + }, + { + .num_different_channels = 1, + .max_interfaces = 4, + .limits = iwl_mld_limits_ap, + .n_limits = ARRAY_SIZE(iwl_mld_limits_ap), + }, +}; + +static const u8 if_types_ext_capa_sta[] = { + [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF | + WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB, + [8] = WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB, + [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, +}; + +#define IWL_MLD_EMLSR_CAPA (IEEE80211_EML_CAP_EMLSR_SUPP | \ + IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US << \ + __bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \ + IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \ + __bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY)) +#define IWL_MLD_CAPA_OPS (FIELD_PREP_CONST( \ + IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \ + IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \ + IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT) + +static const struct wiphy_iftype_ext_capab iftypes_ext_capa[] = { + { + .iftype = NL80211_IFTYPE_STATION, + .extended_capabilities = if_types_ext_capa_sta, + .extended_capabilities_mask = if_types_ext_capa_sta, + .extended_capabilities_len = sizeof(if_types_ext_capa_sta), + /* relevant only if EHT is supported */ + .eml_capabilities = IWL_MLD_EMLSR_CAPA, + .mld_capa_and_ops = IWL_MLD_CAPA_OPS, + }, +}; + +static void iwl_mld_hw_set_addresses(struct iwl_mld *mld) +{ + struct wiphy *wiphy = mld->wiphy; + int num_addrs = 1; + + /* Extract MAC address */ + memcpy(mld->addresses[0].addr, mld->nvm_data->hw_addr, ETH_ALEN); + wiphy->addresses = mld->addresses; + wiphy->n_addresses = 1; + + /* Extract additional MAC addresses if available */ + if (mld->nvm_data->n_hw_addrs > 1) + num_addrs = min(mld->nvm_data->n_hw_addrs, + IWL_MLD_MAX_ADDRESSES); + + for (int i = 1; i < num_addrs; i++) { + memcpy(mld->addresses[i].addr, + mld->addresses[i - 1].addr, + ETH_ALEN); + mld->addresses[i].addr[ETH_ALEN - 1]++; + wiphy->n_addresses++; + } +} + +static void iwl_mld_hw_set_channels(struct iwl_mld *mld) +{ + struct wiphy *wiphy = mld->wiphy; + struct ieee80211_supported_band *bands = mld->nvm_data->bands; + + wiphy->bands[NL80211_BAND_2GHZ] = &bands[NL80211_BAND_2GHZ]; + wiphy->bands[NL80211_BAND_5GHZ] = &bands[NL80211_BAND_5GHZ]; + + if (bands[NL80211_BAND_6GHZ].n_channels) + wiphy->bands[NL80211_BAND_6GHZ] = &bands[NL80211_BAND_6GHZ]; +} + +static void iwl_mld_hw_set_security(struct iwl_mld *mld) +{ + struct ieee80211_hw *hw = mld->hw; + static const u32 mld_ciphers[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_GCMP, + WLAN_CIPHER_SUITE_GCMP_256, + WLAN_CIPHER_SUITE_AES_CMAC, + WLAN_CIPHER_SUITE_BIP_GMAC_128, + WLAN_CIPHER_SUITE_BIP_GMAC_256 + }; + + hw->wiphy->n_cipher_suites = ARRAY_SIZE(mld_ciphers); + hw->wiphy->cipher_suites = mld_ciphers; + + ieee80211_hw_set(hw, MFP_CAPABLE); + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_BEACON_PROTECTION); +} + +static void iwl_mld_hw_set_antennas(struct iwl_mld *mld) +{ + struct wiphy *wiphy = mld->wiphy; + + wiphy->available_antennas_tx = iwl_mld_get_valid_tx_ant(mld); + wiphy->available_antennas_rx = iwl_mld_get_valid_rx_ant(mld); +} + +static void iwl_mld_hw_set_pm(struct iwl_mld *mld) +{ +#ifdef CONFIG_PM_SLEEP + struct wiphy *wiphy = mld->wiphy; + + if (!device_can_wakeup(mld->trans->dev)) + return; + + mld->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_EAP_IDENTITY_REQ | + WIPHY_WOWLAN_RFKILL_RELEASE | + WIPHY_WOWLAN_NET_DETECT | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE | + WIPHY_WOWLAN_4WAY_HANDSHAKE; + + mld->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; + mld->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; + mld->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; + mld->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES_V2; + + wiphy->wowlan = &mld->wowlan; +#endif /* CONFIG_PM_SLEEP */ +} + +static void iwl_mac_hw_set_radiotap(struct iwl_mld *mld) +{ + struct ieee80211_hw *hw = mld->hw; + + hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | + IEEE80211_RADIOTAP_MCS_HAVE_STBC; + + hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | + IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; + + hw->radiotap_timestamp.units_pos = + IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | + IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; + + /* this is the case for CCK frames, it's better (only 8) for OFDM */ + hw->radiotap_timestamp.accuracy = 22; +} + +static void iwl_mac_hw_set_flags(struct iwl_mld *mld) +{ + struct ieee80211_hw *hw = mld->hw; + + ieee80211_hw_set(hw, USES_RSS); + ieee80211_hw_set(hw, HANDLES_QUIET_CSA); + ieee80211_hw_set(hw, AP_LINK_PS); + ieee80211_hw_set(hw, SIGNAL_DBM); + ieee80211_hw_set(hw, SPECTRUM_MGMT); + ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, WANT_MONITOR_VIF); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); + ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, CONNECTION_MONITOR); + ieee80211_hw_set(hw, CHANCTX_STA_CSA); + ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); + ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); + ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); + ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); + ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); + ieee80211_hw_set(hw, STA_MMPDU_TXQ); + ieee80211_hw_set(hw, TX_AMSDU); + ieee80211_hw_set(hw, TX_FRAG_LIST); + ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); + ieee80211_hw_set(hw, HAS_RATE_CONTROL); + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ); + ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); + ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); + ieee80211_hw_set(hw, TDLS_WIDER_BW); +} + +static void iwl_mac_hw_set_wiphy(struct iwl_mld *mld) +{ + struct ieee80211_hw *hw = mld->hw; + struct wiphy *wiphy = hw->wiphy; + const struct iwl_ucode_capabilities *ucode_capa = &mld->fw->ucode_capa; + + snprintf(wiphy->fw_version, + sizeof(wiphy->fw_version), + "%.31s", mld->fw->fw_version); + + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_DEVICE) | + BIT(NL80211_IFTYPE_ADHOC); + + wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | + NL80211_FEATURE_ND_RANDOM_MAC_ADDR | + NL80211_FEATURE_HT_IBSS | + NL80211_FEATURE_P2P_GO_CTWIN | + NL80211_FEATURE_LOW_PRIORITY_SCAN | + NL80211_FEATURE_P2P_GO_OPPPS | + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | + NL80211_FEATURE_SUPPORTS_WMM_ADMISSION | + NL80211_FEATURE_TX_POWER_INSERTION | + NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; + + wiphy->flags |= WIPHY_FLAG_IBSS_RSN | + WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_HAS_CHANNEL_SWITCH | + WIPHY_FLAG_SPLIT_SCAN_6GHZ | + WIPHY_FLAG_SUPPORTS_TDLS | + WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; + + if (mld->nvm_data->sku_cap_11be_enable && + !iwlwifi_mod_params.disable_11ax && + !iwlwifi_mod_params.disable_11be) + wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + + /* the firmware uses u8 for num of iterations, but 0xff is saved for + * infinite loop, so the maximum number of iterations is actually 254. + */ + wiphy->max_sched_scan_plan_iterations = 254; + wiphy->max_sched_scan_ie_len = iwl_mld_scan_max_template_size(); + wiphy->max_scan_ie_len = iwl_mld_scan_max_template_size(); + wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; + wiphy->max_scan_ssids = PROBE_OPTION_MAX; + wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; + wiphy->max_sched_scan_reqs = 1; + wiphy->max_sched_scan_plan_interval = U16_MAX; + wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES_V2; + + wiphy->max_remain_on_channel_duration = 10000; + + wiphy->hw_version = mld->trans->hw_id; + + wiphy->hw_timestamp_max_peers = 1; + + wiphy->iface_combinations = iwl_mld_iface_combinations; + wiphy->n_iface_combinations = ARRAY_SIZE(iwl_mld_iface_combinations); + + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_DFS_CONCURRENT); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_START_TIME); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_PARENT_TSF); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT); + + if (fw_has_capa(ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PROTECTED_TWT); + + wiphy->iftype_ext_capab = NULL; + wiphy->num_iftype_ext_capab = 0; + + if (!iwlwifi_mod_params.disable_11ax) { + wiphy->iftype_ext_capab = iftypes_ext_capa; + wiphy->num_iftype_ext_capab = ARRAY_SIZE(iftypes_ext_capa); + + ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); + ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); + } + + if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) + wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; + else + wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; +} + +static void iwl_mac_hw_set_misc(struct iwl_mld *mld) +{ + struct ieee80211_hw *hw = mld->hw; + + hw->queues = IEEE80211_NUM_ACS; + + hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; + hw->netdev_features |= mld->cfg->features; + + hw->max_tx_fragments = mld->trans->max_skb_frags; + hw->max_listen_interval = IWL_MLD_CONN_LISTEN_INTERVAL; + + hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; + hw->uapsd_queues = IEEE80211_WMM_IE_STA_QOSINFO_AC_VO | + IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | + IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | + IEEE80211_WMM_IE_STA_QOSINFO_AC_BE; + + hw->chanctx_data_size = sizeof(struct iwl_mld_phy); + hw->vif_data_size = sizeof(struct iwl_mld_vif); + hw->sta_data_size = sizeof(struct iwl_mld_sta); + hw->txq_data_size = sizeof(struct iwl_mld_txq); + + /* TODO: Remove this division when IEEE80211_MAX_AMPDU_BUF_EHT size + * is supported. + * Note: ensure that IWL_DEFAULT_QUEUE_SIZE_EHT is updated accordingly. + */ + hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT / 2; +} + +static int iwl_mld_hw_verify_preconditions(struct iwl_mld *mld) +{ + /* 11ax is expected to be enabled for all supported devices */ + if (WARN_ON(!mld->nvm_data->sku_cap_11ax_enable)) + return -EINVAL; + + /* LAR is expected to be enabled for all supported devices */ + if (WARN_ON(!mld->nvm_data->lar_enabled)) + return -EINVAL; + + /* All supported devices are currently using version 3 of the cmd. + * Since version 3, IWL_SCAN_MAX_PROFILES_V2 shall be used where + * necessary. + */ + if (WARN_ON(iwl_fw_lookup_cmd_ver(mld->fw, + SCAN_OFFLOAD_UPDATE_PROFILES_CMD, + IWL_FW_CMD_VER_UNKNOWN) != 3)) + return -EINVAL; + + return 0; +} + +int iwl_mld_register_hw(struct iwl_mld *mld) +{ + /* verify once essential preconditions required for setting + * the hw capabilities + */ + if (iwl_mld_hw_verify_preconditions(mld)) + return -EINVAL; + + iwl_mld_hw_set_addresses(mld); + iwl_mld_hw_set_channels(mld); + iwl_mld_hw_set_security(mld); + iwl_mld_hw_set_pm(mld); + iwl_mld_hw_set_antennas(mld); + iwl_mac_hw_set_radiotap(mld); + iwl_mac_hw_set_flags(mld); + iwl_mac_hw_set_wiphy(mld); + iwl_mac_hw_set_misc(mld); + + SET_IEEE80211_DEV(mld->hw, mld->trans->dev); + + return ieee80211_register_hw(mld->hw); +} + +static void +iwl_mld_mac80211_tx(struct ieee80211_hw *hw, + struct ieee80211_tx_control *control, struct sk_buff *skb) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct ieee80211_sta *sta = control->sta; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + u32 link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + + /* In AP mode, mgmt frames are sent on the bcast station, + * so the FW can't translate the MLD addr to the link addr. Do it here + */ + if (ieee80211_is_mgmt(hdr->frame_control) && sta && + link_id != IEEE80211_LINK_UNSPECIFIED && + !ieee80211_is_probe_resp(hdr->frame_control)) { + /* translate MLD addresses to LINK addresses */ + struct ieee80211_link_sta *link_sta = + rcu_dereference(sta->link[link_id]); + struct ieee80211_bss_conf *link_conf = + rcu_dereference(info->control.vif->link_conf[link_id]); + struct ieee80211_mgmt *mgmt; + + if (WARN_ON(!link_sta || !link_conf)) { + ieee80211_free_txskb(hw, skb); + return; + } + + mgmt = (void *)hdr; + memcpy(mgmt->da, link_sta->addr, ETH_ALEN); + memcpy(mgmt->sa, link_conf->addr, ETH_ALEN); + memcpy(mgmt->bssid, link_conf->bssid, ETH_ALEN); + } + + iwl_mld_tx_skb(mld, skb, NULL); +} + +static void +iwl_mld_restart_cleanup(struct iwl_mld *mld) +{ + iwl_cleanup_mld(mld); + + ieee80211_iterate_interfaces(mld->hw, IEEE80211_IFACE_ITER_ACTIVE, + iwl_mld_cleanup_vif, NULL); + + ieee80211_iterate_stations_atomic(mld->hw, + iwl_mld_cleanup_sta, NULL); + + iwl_mld_ftm_restart_cleanup(mld); +} + +static +int iwl_mld_mac80211_start(struct ieee80211_hw *hw) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + int ret; + bool in_d3 = false; + + lockdep_assert_wiphy(mld->wiphy); + +#ifdef CONFIG_PM_SLEEP + /* Unless the host goes into hibernate the FW always stays on and + * the d3_resume flow is used. When wowlan is configured, mac80211 + * would call it's resume callback and the wowlan_resume flow + * would be used. + */ + + in_d3 = mld->fw_status.in_d3; + if (in_d3) { + /* mac80211 already cleaned up the state, no need for cleanup */ + ret = iwl_mld_no_wowlan_resume(mld); + if (ret) + iwl_mld_stop_fw(mld); + } +#endif /* CONFIG_PM_SLEEP */ + + if (mld->fw_status.in_hw_restart) { + iwl_mld_stop_fw(mld); + iwl_mld_restart_cleanup(mld); + } + + if (!in_d3 || ret) { + ret = iwl_mld_start_fw(mld); + if (ret) + goto error; + } + + mld->scan.last_start_time_jiffies = jiffies; + + iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, + NULL); + iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, + NULL); + + return 0; + +error: + /* If we failed to restart the hw, there is nothing useful + * we can do but indicate we are no longer in restart. + */ + mld->fw_status.in_hw_restart = false; + + return ret; +} + +static +void iwl_mld_mac80211_stop(struct ieee80211_hw *hw, bool suspend) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + lockdep_assert_wiphy(mld->wiphy); + + wiphy_work_cancel(mld->wiphy, &mld->add_txqs_wk); + + /* if the suspend flow fails the fw is in error. Stop it here, and it + * will be started upon wakeup + */ + if (!suspend || iwl_mld_no_wowlan_suspend(mld)) + iwl_mld_stop_fw(mld); + + /* HW is stopped, no more coming RX. OTOH, the worker can't run as the + * wiphy lock is held. Cancel it in case it was scheduled just before + * we stopped the HW. + */ + wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk); + + /* Empty out the list, as the worker won't do that */ + iwl_mld_purge_async_handlers_list(mld); + + /* Clear in_hw_restart flag when stopping the hw, as mac80211 won't + * execute the restart. + */ + mld->fw_status.in_hw_restart = false; + + /* We shouldn't have any UIDs still set. Loop over all the UIDs to + * make sure there's nothing left there and warn if any is found. + */ + for (int i = 0; i < ARRAY_SIZE(mld->scan.uid_status); i++) + if (WARN_ONCE(mld->scan.uid_status[i], + "UMAC scan UID %d status was not cleaned (0x%x 0x%x)\n", + i, mld->scan.uid_status[i], mld->scan.status)) + mld->scan.uid_status[i] = 0; +} + +static +int iwl_mld_mac80211_config(struct ieee80211_hw *hw, u32 changed) +{ + return 0; +} + +static +int iwl_mld_mac80211_add_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + /* Construct mld_vif, add it to fw, and map its ID to ieee80211_vif */ + ret = iwl_mld_add_vif(mld, vif); + if (ret) + return ret; + + /* + * Add the default link, but not if this is an MLD vif as that implies + * the HW is restarting and it will be configured by change_vif_links. + */ + if (!ieee80211_vif_is_mld(vif)) + ret = iwl_mld_add_link(mld, &vif->bss_conf); + if (ret) + goto err; + + if (vif->type == NL80211_IFTYPE_STATION) { + vif->driver_flags |= IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC; + if (!vif->p2p) + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI; + } + + if (vif->p2p || iwl_fw_lookup_cmd_ver(mld->fw, PHY_CONTEXT_CMD, 0) < 5) + vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW; + + /* + * For an MLD vif (in restart) we may not have a link; delay the call + * the initial change_vif_links. + */ + if (vif->type == NL80211_IFTYPE_STATION && + !ieee80211_vif_is_mld(vif)) + iwl_mld_update_mac_power(mld, vif, false); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + mld->monitor.on = true; + ieee80211_hw_set(mld->hw, RX_INCLUDES_FCS); + } + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + mld->p2p_device_vif = vif; + + return 0; + +err: + iwl_mld_rm_vif(mld, vif); + return ret; +} + +static +void iwl_mld_mac80211_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + lockdep_assert_wiphy(mld->wiphy); + + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) + vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | + IEEE80211_VIF_SUPPORTS_CQM_RSSI); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mld->hw->flags); + mld->monitor.on = false; + } + + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + mld->p2p_device_vif = NULL; + + iwl_mld_remove_link(mld, &vif->bss_conf); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + debugfs_remove(iwl_mld_vif_from_mac80211(vif)->dbgfs_slink); +#endif + + iwl_mld_rm_vif(mld, vif); +} + +struct iwl_mld_mc_iter_data { + struct iwl_mld *mld; + int port_id; +}; + +static void iwl_mld_mc_iface_iterator(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_mc_iter_data *mc_data = data; + struct iwl_mld *mld = mc_data->mld; + struct iwl_mcast_filter_cmd *cmd = mld->mcast_filter_cmd; + struct iwl_host_cmd hcmd = { + .id = MCAST_FILTER_CMD, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + int ret, len; + + /* If we don't have free ports, mcast frames will be dropped */ + if (WARN_ON_ONCE(mc_data->port_id >= MAX_PORT_ID_NUM)) + return; + + if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc) + return; + + cmd->port_id = mc_data->port_id++; + ether_addr_copy(cmd->bssid, vif->bss_conf.bssid); + len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); + + hcmd.len[0] = len; + hcmd.data[0] = cmd; + + ret = iwl_mld_send_cmd(mld, &hcmd); + if (ret) + IWL_ERR(mld, "mcast filter cmd error. ret=%d\n", ret); +} + +void iwl_mld_recalc_multicast_filter(struct iwl_mld *mld) +{ + struct iwl_mld_mc_iter_data iter_data = { + .mld = mld, + }; + + if (WARN_ON_ONCE(!mld->mcast_filter_cmd)) + return; + + ieee80211_iterate_active_interfaces(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_mc_iface_iterator, + &iter_data); +} + +static u64 +iwl_mld_mac80211_prepare_multicast(struct ieee80211_hw *hw, + struct netdev_hw_addr_list *mc_list) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mcast_filter_cmd *cmd; + struct netdev_hw_addr *addr; + int addr_count = netdev_hw_addr_list_count(mc_list); + bool pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES; + int len; + + if (pass_all) + addr_count = 0; + + /* len must be a multiple of 4 */ + len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); + cmd = kzalloc(len, GFP_ATOMIC); + if (!cmd) + return 0; + + if (pass_all) { + cmd->pass_all = 1; + goto out; + } + + netdev_hw_addr_list_for_each(addr, mc_list) { + IWL_DEBUG_MAC80211(mld, "mcast addr (%d): %pM\n", + cmd->count, addr->addr); + ether_addr_copy(&cmd->addr_list[cmd->count * ETH_ALEN], + addr->addr); + cmd->count++; + } + +out: + return (u64)(unsigned long)cmd; +} + +static +void iwl_mld_mac80211_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; + + /* Replace previous configuration */ + kfree(mld->mcast_filter_cmd); + mld->mcast_filter_cmd = cmd; + + if (!cmd) + goto out; + + if (changed_flags & FIF_ALLMULTI) + cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); + + if (cmd->pass_all) + cmd->count = 0; + + iwl_mld_recalc_multicast_filter(mld); +out: + *total_flags = 0; +} + +static +void iwl_mld_mac80211_wake_tx_queue(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq); + + if (likely(mld_txq->status.allocated) || !txq->sta) { + iwl_mld_tx_from_txq(mld, txq); + return; + } + + /* We don't support TSPEC tids. %IEEE80211_NUM_TIDS is for mgmt */ + if (txq->tid != IEEE80211_NUM_TIDS && txq->tid >= IWL_MAX_TID_COUNT) { + IWL_DEBUG_MAC80211(mld, "TID %d is not supported\n", txq->tid); + return; + } + + /* The worker will handle any packets we leave on the txq now */ + + spin_lock_bh(&mld->add_txqs_lock); + /* The list is being deleted only after the queue is fully allocated. */ + if (list_empty(&mld_txq->list) && + /* recheck under lock, otherwise it can be added twice */ + !mld_txq->status.allocated) { + list_add_tail(&mld_txq->list, &mld->txqs_to_add); + wiphy_work_queue(mld->wiphy, &mld->add_txqs_wk); + } + spin_unlock_bh(&mld->add_txqs_lock); +} + +static void iwl_mld_teardown_tdls_peers(struct iwl_mld *mld) +{ + lockdep_assert_wiphy(mld->wiphy); + + for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) { + struct ieee80211_link_sta *link_sta; + struct iwl_mld_sta *mld_sta; + + link_sta = wiphy_dereference(mld->wiphy, + mld->fw_id_to_link_sta[i]); + if (IS_ERR_OR_NULL(link_sta)) + continue; + + if (!link_sta->sta->tdls) + continue; + + mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); + + ieee80211_tdls_oper_request(mld_sta->vif, link_sta->addr, + NL80211_TDLS_TEARDOWN, + WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED, + GFP_KERNEL); + } +} + +static +int iwl_mld_add_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx); + int fw_id = iwl_mld_allocate_fw_phy_id(mld); + int ret; + + if (fw_id < 0) + return fw_id; + + phy->mld = mld; + phy->fw_id = fw_id; + phy->chandef = *iwl_mld_get_chandef_from_chanctx(mld, ctx); + + ret = iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_ADD); + if (ret) { + mld->used_phy_ids &= ~BIT(phy->fw_id); + return ret; + } + + if (hweight8(mld->used_phy_ids) > 1) + iwl_mld_teardown_tdls_peers(mld); + + return 0; +} + +static +void iwl_mld_remove_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx); + + iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_REMOVE); + mld->used_phy_ids &= ~BIT(phy->fw_id); +} + +static +void iwl_mld_change_chanctx(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, u32 changed) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx); + struct cfg80211_chan_def *chandef = + iwl_mld_get_chandef_from_chanctx(mld, ctx); + + /* We don't care about these */ + if (!(changed & ~(IEEE80211_CHANCTX_CHANGE_RX_CHAINS | + IEEE80211_CHANCTX_CHANGE_RADAR | + IEEE80211_CHANCTX_CHANGE_CHANNEL))) + return; + + /* Check if a FW update is required */ + + if (changed & IEEE80211_CHANCTX_CHANGE_AP) + goto update; + + if (chandef->chan == phy->chandef.chan && + chandef->center_freq1 == phy->chandef.center_freq1 && + chandef->punctured == phy->chandef.punctured) { + /* Check if we are toggling between HT and non-HT, no-op */ + if (phy->chandef.width == chandef->width || + (phy->chandef.width <= NL80211_CHAN_WIDTH_20 && + chandef->width <= NL80211_CHAN_WIDTH_20)) + return; + } +update: + phy->chandef = *chandef; + + iwl_mld_phy_fw_action(mld, ctx, FW_CTXT_ACTION_MODIFY); +} + +static u8 +iwl_mld_chandef_get_primary_80(struct cfg80211_chan_def *chandef) +{ + int data_start; + int control_start; + int bw; + + if (chandef->width == NL80211_CHAN_WIDTH_320) + bw = 320; + else if (chandef->width == NL80211_CHAN_WIDTH_160) + bw = 160; + else + return 0; + + /* data is bw wide so the start is half the width */ + data_start = chandef->center_freq1 - bw / 2; + /* control is 20Mhz width */ + control_start = chandef->chan->center_freq - 10; + + return (control_start - data_start) / 80; +} + +static bool iwl_mld_can_activate_link(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_sta *mld_sta; + struct iwl_mld_link_sta *link_sta; + + /* In association, we activate the assoc link before adding the STA. */ + if (!mld_vif->ap_sta || !vif->cfg.assoc) + return true; + + mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta); + + /* When switching links, we need to wait with the activation until the + * STA was added to the FW. It'll be activated in + * iwl_mld_update_link_stas + */ + link_sta = wiphy_dereference(mld->wiphy, mld_sta->link[link->link_id]); + + /* In restart we can have a link_sta that doesn't exist in FW yet */ + return link_sta && link_sta->in_fw; +} + +static +int iwl_mld_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + unsigned int n_active = iwl_mld_count_active_links(mld, vif); + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(!mld_link)) + return -EINVAL; + + /* if the assigned one was not counted yet, count it now */ + if (!rcu_access_pointer(mld_link->chan_ctx)) { + n_active++; + + /* Track addition of non-BSS link */ + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { + ret = iwl_mld_emlsr_check_non_bss_block(mld, 1); + if (ret) + return ret; + } + } + + /* for AP, mac parameters such as HE support are updated at this stage. */ + if (vif->type == NL80211_IFTYPE_AP) { + ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY); + + if (ret) { + IWL_ERR(mld, "failed to update MAC %pM\n", vif->addr); + return -EINVAL; + } + } + + rcu_assign_pointer(mld_link->chan_ctx, ctx); + + if (n_active > 1) { + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + iwl_mld_leave_omi_bw_reduction(mld); + + /* Indicate to mac80211 that EML is enabled */ + vif->driver_flags |= IEEE80211_VIF_EML_ACTIVE; + + if (vif->active_links & BIT(mld_vif->emlsr.selected_links)) + mld_vif->emlsr.primary = mld_vif->emlsr.selected_primary; + else + mld_vif->emlsr.primary = __ffs(vif->active_links); + + iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_ESR_LINK_UP, + NULL); + } + + /* First send the link command with the phy context ID. + * Now that we have the phy, we know the band so also the rates + */ + ret = iwl_mld_change_link_in_fw(mld, link, + LINK_CONTEXT_MODIFY_RATES_INFO); + if (ret) + goto err; + + /* TODO: Initialize rate control for the AP station, since we might be + * doing a link switch here - we cannot initialize it before since + * this needs the phy context assigned (and in FW?), and we cannot + * do it later because it needs to be initialized as soon as we're + * able to TX on the link, i.e. when active. (task=link-switch) + */ + + /* Now activate the link */ + if (iwl_mld_can_activate_link(mld, vif, link)) { + ret = iwl_mld_activate_link(mld, link); + if (ret) + goto err; + } + + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link); + + if (vif->type == NL80211_IFTYPE_MONITOR) { + /* TODO: task=sniffer add sniffer station */ + mld->monitor.p80 = + iwl_mld_chandef_get_primary_80(&vif->bss_conf.chanreq.oper); + } + + return 0; +err: + RCU_INIT_POINTER(mld_link->chan_ctx, NULL); + return ret; +} + +static +void iwl_mld_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + unsigned int n_active = iwl_mld_count_active_links(mld, vif); + + if (WARN_ON(!mld_link)) + return; + + /* Track removal of non-BSS link */ + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) + iwl_mld_emlsr_check_non_bss_block(mld, -1); + + iwl_mld_deactivate_link(mld, link); + + /* TODO: task=sniffer remove sniffer station */ + + if (n_active > 1) { + /* Indicate to mac80211 that EML is disabled */ + vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE; + + iwl_dbg_tlv_time_point(&mld->fwrt, + IWL_FW_INI_TIME_ESR_LINK_DOWN, + NULL); + } + + RCU_INIT_POINTER(mld_link->chan_ctx, NULL); + + /* in the non-MLO case, remove/re-add the link to clean up FW state. + * In MLO, it'll be done in drv_change_vif_link + */ + if (!ieee80211_vif_is_mld(vif) && !mld_vif->ap_sta && + !WARN_ON_ONCE(vif->cfg.assoc) && + vif->type != NL80211_IFTYPE_AP && !mld->fw_status.in_hw_restart) { + iwl_mld_remove_link(mld, link); + iwl_mld_add_link(mld, link); + } +} + +static +int iwl_mld_mac80211_set_rts_threshold(struct ieee80211_hw *hw, u32 value) +{ + return 0; +} + +static void +iwl_mld_link_info_changed_ap_ibss(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link, + u64 changes) +{ + u32 link_changes = 0; + + if (changes & BSS_CHANGED_ERP_SLOT) + link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO; + + if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT)) + link_changes |= LINK_CONTEXT_MODIFY_PROTECT_FLAGS; + + if (changes & (BSS_CHANGED_QOS | BSS_CHANGED_BANDWIDTH)) + link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS; + + if (changes & BSS_CHANGED_HE_BSS_COLOR) + link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS; + + if (link_changes) + iwl_mld_change_link_in_fw(mld, link, link_changes); + + if (changes & BSS_CHANGED_BEACON) + iwl_mld_update_beacon_template(mld, vif, link); +} + +static +u32 iwl_mld_link_changed_mapping(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + u32 link_changes = 0; + bool has_he, has_eht; + + if (changes & BSS_CHANGED_QOS && vif->cfg.assoc && link_conf->qos) + link_changes |= LINK_CONTEXT_MODIFY_QOS_PARAMS; + + if (changes & (BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_BASIC_RATES | + BSS_CHANGED_ERP_SLOT)) + link_changes |= LINK_CONTEXT_MODIFY_RATES_INFO; + + if (changes & (BSS_CHANGED_HT | BSS_CHANGED_ERP_CTS_PROT)) + link_changes |= LINK_CONTEXT_MODIFY_PROTECT_FLAGS; + + /* TODO: task=MLO check mac80211's HE flags and if command is needed + * every time there's a link change. Currently used flags are + * BSS_CHANGED_HE_OBSS_PD and BSS_CHANGED_HE_BSS_COLOR. + */ + has_he = link_conf->he_support && !iwlwifi_mod_params.disable_11ax; + has_eht = link_conf->eht_support && !iwlwifi_mod_params.disable_11be; + + if (vif->cfg.assoc && (has_he || has_eht)) { + IWL_DEBUG_MAC80211(mld, "Associated in HE mode\n"); + link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS; + } + + return link_changes; +} + +static void +iwl_mld_mac80211_link_info_changed_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + u32 link_changes = iwl_mld_link_changed_mapping(mld, vif, link_conf, + changes); + + if (link_changes) + iwl_mld_change_link_in_fw(mld, link_conf, link_changes); + + if (changes & BSS_CHANGED_TPE) + iwl_mld_send_ap_tx_power_constraint_cmd(mld, vif, link_conf); + + if (changes & BSS_CHANGED_BEACON_INFO) + iwl_mld_update_mac_power(mld, vif, false); + + /* The firmware will wait quite a while after association before it + * starts filtering the beacons. We can safely enable beacon filtering + * upon CQM configuration, even if we didn't get a beacon yet. + */ + if (changes & (BSS_CHANGED_CQM | BSS_CHANGED_BEACON_INFO)) + iwl_mld_enable_beacon_filter(mld, link_conf, false); + + /* If we have used OMI before to reduce bandwidth to 80 MHz and then + * increased to 160 MHz again, and then the AP changes to 320 MHz, it + * will think that we're limited to 160 MHz right now. Update it by + * requesting a new OMI bandwidth. + */ + if (changes & BSS_CHANGED_BANDWIDTH) { + enum ieee80211_sta_rx_bandwidth bw; + + bw = ieee80211_chan_width_to_rx_bw(link_conf->chanreq.oper.width); + + iwl_mld_omi_ap_changed_bw(mld, link_conf, bw); + + } + + if (changes & BSS_CHANGED_BANDWIDTH) + iwl_mld_retry_emlsr(mld, vif); +} + +static int iwl_mld_update_mu_groups(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mu_group_mgmt_cmd cmd = {}; + + BUILD_BUG_ON(sizeof(cmd.membership_status) != + sizeof(link_conf->mu_group.membership)); + BUILD_BUG_ON(sizeof(cmd.user_position) != + sizeof(link_conf->mu_group.position)); + + memcpy(cmd.membership_status, link_conf->mu_group.membership, + WLAN_MEMBERSHIP_LEN); + memcpy(cmd.user_position, link_conf->mu_group.position, + WLAN_USER_POSITION_LEN); + + return iwl_mld_send_cmd_pdu(mld, + WIDE_ID(DATA_PATH_GROUP, + UPDATE_MU_GROUPS_CMD), + &cmd); +} + +static void +iwl_mld_mac80211_link_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + u64 changes) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + iwl_mld_mac80211_link_info_changed_sta(mld, vif, link_conf, + changes); + break; + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + iwl_mld_link_info_changed_ap_ibss(mld, vif, link_conf, + changes); + break; + case NL80211_IFTYPE_MONITOR: + /* The firmware tracks this on its own in STATION mode, but + * obviously not in sniffer mode. + */ + if (changes & BSS_CHANGED_MU_GROUPS) + iwl_mld_update_mu_groups(mld, link_conf); + break; + default: + /* shouldn't happen */ + WARN_ON_ONCE(1); + } + + /* We now know our BSSID, we can configure the MAC context with + * eht_support if needed. + */ + if (changes & BSS_CHANGED_BSSID) + iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY); + + if (changes & BSS_CHANGED_TXPOWER) + iwl_mld_set_tx_power(mld, link_conf, link_conf->txpower); +} + +static void +iwl_mld_smps_wa(struct iwl_mld *mld, struct ieee80211_vif *vif, bool enable) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + /* Send the device-level power commands since the + * firmware checks the POWER_TABLE_CMD's POWER_SAVE_EN bit to + * determine SMPS mode. + */ + if (mld_vif->ps_disabled == !enable) + return; + + mld_vif->ps_disabled = !enable; + + iwl_mld_update_device_power(mld, false); +} + +static +void iwl_mld_mac80211_vif_cfg_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u64 changes) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (changes & BSS_CHANGED_ASSOC) { + ret = iwl_mld_mac_fw_action(mld, vif, FW_CTXT_ACTION_MODIFY); + if (ret) + IWL_ERR(mld, "failed to update context\n"); + + if (vif->cfg.assoc) { + /* Clear statistics to get clean beacon counter, and + * ask for periodic statistics, as they are needed for + * link selection and RX OMI decisions. + */ + iwl_mld_clear_stats_in_fw(mld); + iwl_mld_request_periodic_fw_stats(mld, true); + + iwl_mld_set_vif_associated(mld, vif); + } else { + iwl_mld_request_periodic_fw_stats(mld, false); + } + } + + if (changes & BSS_CHANGED_PS) { + iwl_mld_smps_wa(mld, vif, vif->cfg.ps); + iwl_mld_update_mac_power(mld, vif, false); + } + + /* TODO: task=MLO BSS_CHANGED_MLD_VALID_LINKS/CHANGED_MLD_TTLM */ +} + +static int +iwl_mld_mac80211_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_scan_request *hw_req) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + if (WARN_ON(!hw_req->req.n_channels || + hw_req->req.n_channels > + mld->fw->ucode_capa.n_scan_channels)) + return -EINVAL; + + return iwl_mld_regular_scan_start(mld, vif, &hw_req->req, &hw_req->ies); +} + +static void +iwl_mld_mac80211_cancel_hw_scan(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + /* Due to a race condition, it's possible that mac80211 asks + * us to stop a hw_scan when it's already stopped. This can + * happen, for instance, if we stopped the scan ourselves, + * called ieee80211_scan_completed() and the userspace called + * cancel scan before ieee80211_scan_work() could run. + * To handle that, simply return if the scan is not running. + */ + if (mld->scan.status & IWL_MLD_SCAN_REGULAR) + iwl_mld_scan_stop(mld, IWL_MLD_SCAN_REGULAR, true); +} + +static int +iwl_mld_mac80211_sched_scan_start(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + return iwl_mld_sched_scan_start(mld, vif, req, ies, IWL_MLD_SCAN_SCHED); +} + +static int +iwl_mld_mac80211_sched_scan_stop(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + /* Due to a race condition, it's possible that mac80211 asks + * us to stop a sched_scan when it's already stopped. This + * can happen, for instance, if we stopped the scan ourselves, + * called ieee80211_sched_scan_stopped() and the userspace called + * stop sched scan before ieee80211_sched_scan_stopped_work() + * could run. To handle this, simply return if the scan is + * not running. + */ + if (!(mld->scan.status & IWL_MLD_SCAN_SCHED)) + return 0; + + return iwl_mld_scan_stop(mld, IWL_MLD_SCAN_SCHED, false); +} + +static void +iwl_mld_restart_complete_vif(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf; + struct iwl_mld *mld = data; + int link_id; + + for_each_vif_active_link(vif, link_conf, link_id) { + enum ieee80211_sta_rx_bandwidth bw; + struct iwl_mld_link *mld_link; + + mld_link = wiphy_dereference(mld->wiphy, + mld_vif->link[link_id]); + + if (WARN_ON_ONCE(!mld_link)) + continue; + + bw = mld_link->rx_omi.bw_in_progress; + if (bw) + iwl_mld_change_link_omi_bw(mld, link_conf, bw); + } +} + +static void +iwl_mld_mac80211_reconfig_complete(struct ieee80211_hw *hw, + enum ieee80211_reconfig_type reconfig_type) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + switch (reconfig_type) { + case IEEE80211_RECONFIG_TYPE_RESTART: + mld->fw_status.in_hw_restart = false; + iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_END_OF_RECOVERY); + + ieee80211_iterate_interfaces(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_restart_complete_vif, mld); + + iwl_trans_finish_sw_reset(mld->trans); + /* no need to lock, adding in parallel would schedule too */ + if (!list_empty(&mld->txqs_to_add)) + wiphy_work_queue(mld->wiphy, &mld->add_txqs_wk); + + IWL_INFO(mld, "restart completed\n"); + break; + case IEEE80211_RECONFIG_TYPE_SUSPEND: + break; + } +} + +static +void iwl_mld_mac80211_mgd_prepare_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + u32 duration = IWL_MLD_SESSION_PROTECTION_ASSOC_TIME_MS; + + /* After a successful association the connection is etalibeshed + * and we can rely on the quota to send the disassociation frame. + */ + if (info->was_assoc) + return; + + if (info->duration > duration) + duration = info->duration; + + iwl_mld_schedule_session_protection(mld, vif, duration, + IWL_MLD_SESSION_PROTECTION_MIN_TIME_MS, + info->link_id); +} + +static +void iwl_mld_mac_mgd_complete_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_prep_tx_info *info) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + /* Successful authentication is the only case that requires to let + * the session protection go. We'll need it for the upcoming + * association. For all the other cases, we need to cancel the session + * protection. + * After successful association the connection is established and + * further mgd tx can rely on the quota. + */ + if (info->success && info->subtype == IEEE80211_STYPE_AUTH) + return; + + /* The firmware will be on medium after we configure the vif as + * associated. Removing the session protection allows the firmware + * to stop being on medium. In order to ensure the continuity of our + * presence on medium, we need first to configure the vif as associated + * and only then, remove the session protection. + * Currently, mac80211 calls vif_cfg_changed() first and then, + * drv_mgd_complete_tx(). Ensure that this assumption stays true by + * a warning. + */ + WARN_ON(info->success && + (info->subtype == IEEE80211_STYPE_ASSOC_REQ || + info->subtype == IEEE80211_STYPE_REASSOC_REQ) && + !vif->cfg.assoc); + + iwl_mld_cancel_session_protection(mld, vif, info->link_id); +} + +static int +iwl_mld_mac80211_conf_tx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id, u16 ac, + const struct ieee80211_tx_queue_params *params) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *link; + + lockdep_assert_wiphy(mld->wiphy); + + link = iwl_mld_link_dereference_check(mld_vif, link_id); + if (!link) + return -EINVAL; + + link->queue_params[ac] = *params; + + /* No need to update right away, we'll get BSS_CHANGED_QOS + * The exception is P2P_DEVICE interface which needs immediate update. + */ + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) + iwl_mld_change_link_in_fw(mld, &vif->bss_conf, + LINK_CONTEXT_MODIFY_QOS_PARAMS); + + return 0; +} + +static void iwl_mld_set_uapsd(struct iwl_mld *mld, struct ieee80211_vif *vif) +{ + vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + if (vif->p2p && + !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_P2P_CLIENT)) + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; + + if (!vif->p2p && + !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; +} + +int iwl_mld_tdls_sta_count(struct iwl_mld *mld) +{ + int count = 0; + + lockdep_assert_wiphy(mld->wiphy); + + for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) { + struct ieee80211_link_sta *link_sta; + + link_sta = wiphy_dereference(mld->wiphy, + mld->fw_id_to_link_sta[i]); + if (IS_ERR_OR_NULL(link_sta)) + continue; + + if (!link_sta->sta->tdls) + continue; + + count++; + } + + return count; +} + +static void iwl_mld_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, + struct cfg80211_bss *bss, + void *_data) +{ + bool *tolerated = _data; + const struct cfg80211_bss_ies *ies; + const struct element *elem; + + rcu_read_lock(); + ies = rcu_dereference(bss->ies); + elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, + ies->len); + + if (!elem || elem->datalen < 10 || + !(elem->data[10] & + WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { + *tolerated = false; + } + rcu_read_unlock(); +} + +static void +iwl_mld_check_he_obss_narrow_bw_ru(struct iwl_mld *mld, + struct iwl_mld_link *mld_link, + struct ieee80211_bss_conf *link_conf) +{ + bool tolerated = true; + + if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan)) + return; + + if (!(link_conf->chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) { + mld_link->he_ru_2mhz_block = false; + return; + } + + cfg80211_bss_iter(mld->wiphy, &link_conf->chanreq.oper, + iwl_mld_check_he_obss_narrow_bw_ru_iter, &tolerated); + + /* If there is at least one AP on radar channel that cannot + * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. + */ + mld_link->he_ru_2mhz_block = !tolerated; +} + +static void iwl_mld_link_set_2mhz_block(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + struct iwl_mld_link *mld_link = + iwl_mld_link_from_mac80211(link_conf); + + if (WARN_ON(!link_conf || !mld_link)) + continue; + + if (link_sta->he_cap.has_he) + iwl_mld_check_he_obss_narrow_bw_ru(mld, mld_link, + link_conf); + } +} + +static int iwl_mld_move_sta_state_up(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int tdls_count = 0; + int ret; + + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) { + if (sta->tdls) { + if (vif->p2p || hweight8(mld->used_phy_ids) != 1) + return -EBUSY; + + tdls_count = iwl_mld_tdls_sta_count(mld); + if (tdls_count >= IWL_TDLS_STA_COUNT) + return -EBUSY; + } + + /* + * If this is the first STA (i.e. the AP) it won't do + * anything, otherwise must leave for any new STA on + * any other interface, or for TDLS, etc. + * Need to call this _before_ adding the STA so it can + * look up the one STA to use to ask mac80211 to leave + * OMI; in the unlikely event that adding the new STA + * then fails we'll just re-enter OMI later (via the + * statistics notification handling.) + */ + iwl_mld_leave_omi_bw_reduction(mld); + + ret = iwl_mld_add_sta(mld, sta, vif, STATION_TYPE_PEER); + if (ret) + return ret; + + /* just added first TDLS STA, so disable PM */ + if (sta->tdls && tdls_count == 0) + iwl_mld_update_mac_power(mld, vif, false); + + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + mld_vif->ap_sta = sta; + + /* Initialize TLC here already - this really tells + * the firmware only what the supported legacy rates are + * (may be) since it's initialized already from what the + * AP advertised in the beacon/probe response. This will + * allow the firmware to send auth/assoc frames with one + * of the supported rates already, rather than having to + * use a mandatory rate. + * If we're the AP, we'll just assume mandatory rates at + * this point, but we know nothing about the STA anyway. + */ + iwl_mld_config_tlc(mld, vif, sta); + + return ret; + } else if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_AUTH) { + iwl_mld_set_uapsd(mld, vif); + return 0; + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) { + ret = iwl_mld_update_all_link_stations(mld, sta); + + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mld_link_set_2mhz_block(mld, vif, sta); + /* Now the link_sta's capabilities are set, update the FW */ + iwl_mld_config_tlc(mld, vif, sta); + + if (vif->type == NL80211_IFTYPE_AP) { + /* Update MAC_CFG_FILTER_ACCEPT_BEACON if at least + * one sta is associated + */ + if (++mld_vif->num_associated_stas == 1) + ret = iwl_mld_mac_fw_action(mld, vif, + FW_CTXT_ACTION_MODIFY); + } + + return ret; + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTHORIZED) { + ret = 0; + + if (!sta->tdls) { + mld_vif->authorized = true; + + /* Ensure any block due to a non-BSS link is synced */ + iwl_mld_emlsr_check_non_bss_block(mld, 0); + + /* Block EMLSR until a certain throughput it reached */ + if (!mld->fw_status.in_hw_restart && + IWL_MLD_ENTER_EMLSR_TPT_THRESH > 0) + iwl_mld_block_emlsr(mld_vif->mld, vif, + IWL_MLD_EMLSR_BLOCKED_TPT, + 0); + + /* clear COEX_HIGH_PRIORITY_ENABLE */ + ret = iwl_mld_mac_fw_action(mld, vif, + FW_CTXT_ACTION_MODIFY); + if (ret) + return ret; + iwl_mld_smps_wa(mld, vif, vif->cfg.ps); + } + + /* MFP is set by default before the station is authorized. + * Clear it here in case it's not used. + */ + if (!sta->mfp) + ret = iwl_mld_update_all_link_stations(mld, sta); + + /* We can use wide bandwidth now, not only 20 MHz */ + iwl_mld_config_tlc(mld, vif, sta); + + return ret; + } else { + return -EINVAL; + } +} + +static int iwl_mld_move_sta_state_down(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + if (old_state == IEEE80211_STA_AUTHORIZED && + new_state == IEEE80211_STA_ASSOC) { + if (!sta->tdls) { + mld_vif->authorized = false; + + memset(&mld_vif->emlsr.zeroed_on_not_authorized, 0, + sizeof(mld_vif->emlsr.zeroed_on_not_authorized)); + + wiphy_delayed_work_cancel(mld->wiphy, + &mld_vif->emlsr.prevent_done_wk); + wiphy_delayed_work_cancel(mld->wiphy, + &mld_vif->emlsr.tmp_non_bss_done_wk); + wiphy_work_cancel(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk); + wiphy_delayed_work_cancel(mld->wiphy, + &mld_vif->emlsr.check_tpt_wk); + + iwl_mld_reset_cca_40mhz_workaround(mld, vif); + iwl_mld_smps_wa(mld, vif, true); + } + + /* once we move into assoc state, need to update the FW to + * stop using wide bandwidth + */ + iwl_mld_config_tlc(mld, vif, sta); + } else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) { + if (vif->type == NL80211_IFTYPE_AP && + !WARN_ON(!mld_vif->num_associated_stas)) { + /* Update MAC_CFG_FILTER_ACCEPT_BEACON if the last sta + * is disassociating + */ + if (--mld_vif->num_associated_stas == 0) + iwl_mld_mac_fw_action(mld, vif, + FW_CTXT_ACTION_MODIFY); + } + } else if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_NONE) { + /* nothing */ + } else if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) { + iwl_mld_remove_sta(mld, sta); + + if (sta->tdls && iwl_mld_tdls_sta_count(mld) == 0) { + /* just removed last TDLS STA, so enable PM */ + iwl_mld_update_mac_power(mld, vif, false); + } + } else { + return -EINVAL; + } + return 0; +} + +static int iwl_mld_mac80211_sta_state(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + + IWL_DEBUG_MAC80211(mld, "station %pM state change %d->%d\n", + sta->addr, old_state, new_state); + + mld_sta->sta_state = new_state; + + if (old_state < new_state) + return iwl_mld_move_sta_state_up(mld, vif, sta, old_state, + new_state); + else + return iwl_mld_move_sta_state_down(mld, vif, sta, old_state, + new_state); +} + +static void iwl_mld_mac80211_flush(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 queues, bool drop) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + /* Make sure we're done with the deferred traffic before flushing */ + iwl_mld_add_txq_list(mld); + + for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) { + struct ieee80211_link_sta *link_sta = + wiphy_dereference(mld->wiphy, + mld->fw_id_to_link_sta[i]); + + if (IS_ERR_OR_NULL(link_sta)) + continue; + + /* Check that the sta belongs to the given vif */ + if (vif && vif != iwl_mld_sta_from_mac80211(link_sta->sta)->vif) + continue; + + if (drop) + iwl_mld_flush_sta_txqs(mld, link_sta->sta); + else + iwl_mld_wait_sta_txqs_empty(mld, link_sta->sta); + } +} + +static void iwl_mld_mac80211_flush_sta(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + iwl_mld_flush_sta_txqs(mld, sta); +} + +static int +iwl_mld_mac80211_ampdu_action(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_ampdu_params *params) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 ssn = params->ssn; + u16 buf_size = params->buf_size; + u16 timeout = params->timeout; + int ret; + + IWL_DEBUG_HT(mld, "A-MPDU action on addr %pM tid: %d action: %d\n", + sta->addr, tid, action); + + switch (action) { + case IEEE80211_AMPDU_RX_START: + ret = iwl_mld_ampdu_rx_start(mld, sta, tid, ssn, buf_size, + timeout); + break; + case IEEE80211_AMPDU_RX_STOP: + ret = iwl_mld_ampdu_rx_stop(mld, sta, tid); + break; + default: + /* The mac80211 TX_AMPDU_SETUP_IN_HW flag is set for all + * devices, since all support TX A-MPDU offload in hardware. + * Therefore, no TX action should be requested here. + */ + WARN_ON_ONCE(1); + return -EINVAL; + } + + return ret; +} + +static bool iwl_mld_can_hw_csum(struct sk_buff *skb) +{ + u8 protocol = ip_hdr(skb)->protocol; + + return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; +} + +static bool iwl_mld_mac80211_can_aggregate(struct ieee80211_hw *hw, + struct sk_buff *head, + struct sk_buff *skb) +{ + if (!IS_ENABLED(CONFIG_INET)) + return false; + + /* For now don't aggregate IPv6 in AMSDU */ + if (skb->protocol != htons(ETH_P_IP)) + return false; + + /* Allow aggregation only if both frames have the same HW csum offload + * capability, ensuring consistent HW or SW csum handling in A-MSDU. + */ + return iwl_mld_can_hw_csum(skb) == iwl_mld_can_hw_csum(head); +} + +static void iwl_mld_mac80211_sync_rx_queues(struct ieee80211_hw *hw) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_EMPTY, NULL, 0); +} + +static void iwl_mld_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + u32 changed) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + if (changed & (IEEE80211_RC_BW_CHANGED | + IEEE80211_RC_SUPP_RATES_CHANGED | + IEEE80211_RC_NSS_CHANGED)) { + struct ieee80211_bss_conf *link = + link_conf_dereference_check(vif, link_sta->link_id); + + if (WARN_ON(!link)) + return; + + iwl_mld_config_tlc_link(mld, vif, link, link_sta); + } +} + +static void iwl_mld_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + device_set_wakeup_enable(mld->trans->dev, enabled); +} + +/* Returns 0 on success. 1 if failed to suspend with wowlan: + * If the circumstances didn't satisfy the conditions for suspension + * with wowlan, mac80211 would use the no_wowlan flow. + * If an error had occurred we update the trans status and state here + * and the result will be stopping the FW. + */ +static int +iwl_mld_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + int ret; + + iwl_fw_runtime_suspend(&mld->fwrt); + + ret = iwl_mld_wowlan_suspend(mld, wowlan); + if (ret) { + if (ret < 0) { + mld->trans->state = IWL_TRANS_NO_FW; + set_bit(STATUS_FW_ERROR, &mld->trans->status); + } + return 1; + } + + if (iwl_mld_no_wowlan_suspend(mld)) + return 1; + + return 0; +} + +static int iwl_mld_resume(struct ieee80211_hw *hw) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + int ret; + + ret = iwl_mld_wowlan_resume(mld); + if (ret) + return ret; + + iwl_fw_runtime_resume(&mld->fwrt); + + iwl_mld_low_latency_restart(mld); + + return 0; +} + +static int iwl_mld_alloc_ptk_pn(struct iwl_mld *mld, + struct iwl_mld_sta *mld_sta, + struct ieee80211_key_conf *key, + struct iwl_mld_ptk_pn **ptk_pn) +{ + u8 num_rx_queues = mld->trans->num_rx_queues; + int keyidx = key->keyidx; + struct ieee80211_key_seq seq; + + if (WARN_ON(keyidx >= ARRAY_SIZE(mld_sta->ptk_pn))) + return -EINVAL; + + WARN_ON(rcu_access_pointer(mld_sta->ptk_pn[keyidx])); + *ptk_pn = kzalloc(struct_size(*ptk_pn, q, num_rx_queues), + GFP_KERNEL); + if (!*ptk_pn) + return -ENOMEM; + + for (u8 tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { + ieee80211_get_key_rx_seq(key, tid, &seq); + for (u8 q = 0; q < num_rx_queues; q++) + memcpy((*ptk_pn)->q[q].pn[tid], seq.ccmp.pn, + IEEE80211_CCMP_PN_LEN); + } + + rcu_assign_pointer(mld_sta->ptk_pn[keyidx], *ptk_pn); + + return 0; +} + +static int iwl_mld_set_key_add(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_sta *mld_sta = + sta ? iwl_mld_sta_from_mac80211(sta) : NULL; + struct iwl_mld_ptk_pn *ptk_pn = NULL; + int keyidx = key->keyidx; + int ret; + + /* Will be set to 0 if added successfully */ + key->hw_key_idx = STA_KEY_IDX_INVALID; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + IWL_DEBUG_MAC80211(mld, "Use SW encryption for WEP\n"); + return -EOPNOTSUPP; + case WLAN_CIPHER_SUITE_TKIP: + if (vif->type == NL80211_IFTYPE_STATION) { + key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; + break; + } + IWL_DEBUG_MAC80211(mld, "Use SW encryption for TKIP\n"); + return -EOPNOTSUPP; + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + break; + default: + return -EOPNOTSUPP; + } + + if (vif->type == NL80211_IFTYPE_STATION && + (keyidx == 6 || keyidx == 7)) + rcu_assign_pointer(mld_vif->bigtks[keyidx - 6], key); + + /* After exiting from RFKILL, hostapd configures GTK/ITGK before the + * AP is started, but those keys can't be sent to the FW before the + * MCAST/BCAST STAs are added to it (which happens upon AP start). + * Store it here to be sent later when the AP is started. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_AP) && !sta && + !mld_vif->ap_ibss_active) + return iwl_mld_store_ap_early_key(mld, key, mld_vif); + + if (!mld->fw_status.in_hw_restart && mld_sta && + key->flags & IEEE80211_KEY_FLAG_PAIRWISE && + (key->cipher == WLAN_CIPHER_SUITE_CCMP || + key->cipher == WLAN_CIPHER_SUITE_GCMP || + key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { + ret = iwl_mld_alloc_ptk_pn(mld, mld_sta, key, &ptk_pn); + if (ret) + return ret; + } + + IWL_DEBUG_MAC80211(mld, "set hwcrypto key (sta:%pM, id:%d)\n", + sta ? sta->addr : NULL, keyidx); + + ret = iwl_mld_add_key(mld, vif, sta, key); + if (ret) { + IWL_WARN(mld, "set key failed (%d)\n", ret); + if (ptk_pn) { + RCU_INIT_POINTER(mld_sta->ptk_pn[keyidx], NULL); + kfree(ptk_pn); + } + + return -EOPNOTSUPP; + } + + return 0; +} + +static void iwl_mld_set_key_remove(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_sta *mld_sta = + sta ? iwl_mld_sta_from_mac80211(sta) : NULL; + int keyidx = key->keyidx; + + if (vif->type == NL80211_IFTYPE_STATION && + (keyidx == 6 || keyidx == 7)) + RCU_INIT_POINTER(mld_vif->bigtks[keyidx - 6], NULL); + + if (mld_sta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && + (key->cipher == WLAN_CIPHER_SUITE_CCMP || + key->cipher == WLAN_CIPHER_SUITE_GCMP || + key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { + struct iwl_mld_ptk_pn *ptk_pn; + + if (WARN_ON(keyidx >= ARRAY_SIZE(mld_sta->ptk_pn))) + return; + + ptk_pn = wiphy_dereference(mld->wiphy, + mld_sta->ptk_pn[keyidx]); + RCU_INIT_POINTER(mld_sta->ptk_pn[keyidx], NULL); + if (!WARN_ON(!ptk_pn)) + kfree_rcu(ptk_pn, rcu_head); + } + + /* if this key was stored to be added later to the FW - free it here */ + if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + iwl_mld_free_ap_early_key(mld, key, mld_vif); + + /* We already removed it */ + if (key->hw_key_idx == STA_KEY_IDX_INVALID) + return; + + IWL_DEBUG_MAC80211(mld, "disable hwcrypto key\n"); + + iwl_mld_remove_key(mld, vif, sta, key); +} + +static int iwl_mld_mac80211_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + int ret; + + switch (cmd) { + case SET_KEY: + ret = iwl_mld_set_key_add(mld, vif, sta, key); + if (ret) + ret = -EOPNOTSUPP; + break; + case DISABLE_KEY: + iwl_mld_set_key_remove(mld, vif, sta, key); + ret = 0; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int +iwl_mld_pre_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *mld_link = + iwl_mld_link_dereference_check(mld_vif, chsw->link_id); + u8 primary; + int selected; + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(!mld_link)) + return -EINVAL; + + IWL_DEBUG_MAC80211(mld, "pre CSA to freq %d\n", + chsw->chandef.center_freq1); + + if (!iwl_mld_emlsr_active(vif)) + return 0; + + primary = iwl_mld_get_primary_link(vif); + + /* stay on the primary link unless it is undergoing a CSA with quiet */ + if (chsw->link_id == primary && chsw->block_tx) + selected = iwl_mld_get_other_link(vif, primary); + else + selected = primary; + + /* Remember to tell the firmware that this link can't tx + * Note that this logic seems to be unrelated to emlsr, but it + * really is needed only when emlsr is active. When we have a + * single link, the firmware will handle all this on its own. + * In multi-link scenarios, we can learn about the CSA from + * another link and this logic is too complex for the firmware + * to track. + * Since we want to de-activate the link that got a CSA with mode=1, + * we need to tell the firmware not to send any frame on that link + * as the firmware may not be aware that link is under a CSA + * with mode=1 (no Tx allowed). + */ + mld_link->silent_deactivation = chsw->block_tx; + iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_CSA, selected); + + return 0; +} + +static void +iwl_mld_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_channel_switch *chsw) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + /* By implementing this operation, we prevent mac80211 from + * starting its own channel switch timer, so that we can call + * ieee80211_chswitch_done() ourselves at the right time + * (Upon receiving the channel_switch_start notification from the fw) + */ + IWL_DEBUG_MAC80211(mld, + "dummy channel switch op\n"); +} + +static int +iwl_mld_post_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf); + + lockdep_assert_wiphy(mld->wiphy); + + WARN_ON(mld_link->silent_deactivation); + + return 0; +} + +static void +iwl_mld_abort_channel_switch(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf); + + IWL_DEBUG_MAC80211(mld, + "abort channel switch op\n"); + mld_link->silent_deactivation = false; +} + +static int +iwl_mld_switch_vif_chanctx_swap(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + int ret; + + iwl_mld_unassign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx); + iwl_mld_remove_chanctx(hw, vifs[0].old_ctx); + + ret = iwl_mld_add_chanctx(hw, vifs[0].new_ctx); + if (ret) { + IWL_ERR(mld, "failed to add new_ctx during channel switch\n"); + goto out_reassign; + } + + ret = iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf, + vifs[0].new_ctx); + if (ret) { + IWL_ERR(mld, + "failed to assign new_ctx during channel switch\n"); + goto out_remove; + } + + return 0; + + out_remove: + iwl_mld_remove_chanctx(hw, vifs[0].new_ctx); + out_reassign: + if (iwl_mld_add_chanctx(hw, vifs[0].old_ctx)) { + IWL_ERR(mld, "failed to add old_ctx after failure\n"); + return ret; + } + + if (iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx)) + IWL_ERR(mld, "failed to reassign old_ctx after failure\n"); + + return ret; +} + +static int +iwl_mld_switch_vif_chanctx_reassign(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + int ret; + + iwl_mld_unassign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx); + ret = iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf, + vifs[0].new_ctx); + if (ret) { + IWL_ERR(mld, + "failed to assign new_ctx during channel switch\n"); + goto out_reassign; + } + + return 0; + +out_reassign: + if (iwl_mld_assign_vif_chanctx(hw, vifs[0].vif, vifs[0].link_conf, + vifs[0].old_ctx)) + IWL_ERR(mld, "failed to reassign old_ctx after failure\n"); + + return ret; +} + +static int +iwl_mld_switch_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif_chanctx_switch *vifs, + int n_vifs, + enum ieee80211_chanctx_switch_mode mode) +{ + int ret; + + /* we only support a single-vif right now */ + if (n_vifs > 1) + return -EOPNOTSUPP; + + switch (mode) { + case CHANCTX_SWMODE_SWAP_CONTEXTS: + ret = iwl_mld_switch_vif_chanctx_swap(hw, vifs); + break; + case CHANCTX_SWMODE_REASSIGN_VIF: + ret = iwl_mld_switch_vif_chanctx_reassign(hw, vifs); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static void iwl_mld_sta_pre_rcu_remove(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link_sta *mld_link_sta; + u8 link_id; + + lockdep_assert_wiphy(mld->wiphy); + + /* This is called before mac80211 does RCU synchronisation, + * so here we already invalidate our internal RCU-protected + * station pointer. The rest of the code will thus no longer + * be able to find the station this way, and we don't rely + * on further RCU synchronisation after the sta_state() + * callback deleted the station. + */ + for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) + RCU_INIT_POINTER(mld->fw_id_to_link_sta[mld_link_sta->fw_id], + NULL); + + if (sta == mld_vif->ap_sta) + mld_vif->ap_sta = NULL; +} + +static void +iwl_mld_mac80211_mgd_protect_tdls_discover(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + unsigned int link_id) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct ieee80211_bss_conf *link_conf; + u32 duration; + int ret; + + link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]); + if (WARN_ON_ONCE(!link_conf)) + return; + + /* Protect the session to hear the TDLS setup response on the channel */ + + duration = 2 * link_conf->dtim_period * link_conf->beacon_int; + + ret = iwl_mld_start_session_protection(mld, vif, duration, duration, + link_id, HZ / 5); + if (ret) + IWL_ERR(mld, + "Failed to start session protection for TDLS: %d\n", + ret); +} + +static bool iwl_mld_can_activate_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 desired_links) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + int n_links = hweight16(desired_links); + + /* Check if HW supports the wanted number of links */ + return n_links <= iwl_mld_max_active_links(mld, vif); +} + +static int +iwl_mld_change_vif_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct ieee80211_bss_conf *link_conf; + u16 removed = old_links & ~new_links; + u16 added = new_links & ~old_links; + int err; + + lockdep_assert_wiphy(mld->wiphy); + + /* + * No bits designate non-MLO mode. We can handle MLO exit/enter by + * simply mapping that to link ID zero internally. + * Note that mac80211 does such a non-MLO to MLO switch during restart + * if it was in MLO before. In that case, we do not have a link to + * remove. + */ + if (old_links == 0 && !mld->fw_status.in_hw_restart) + removed |= BIT(0); + + if (new_links == 0) + added |= BIT(0); + + for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + if (removed & BIT(i)) + iwl_mld_remove_link(mld, old[i]); + } + + for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + if (added & BIT(i)) { + link_conf = link_conf_dereference_protected(vif, i); + if (WARN_ON(!link_conf)) + return -EINVAL; + + err = iwl_mld_add_link(mld, link_conf); + if (err) + goto remove_added_links; + } + } + + /* + * Ensure we always have a valid primary_link. When using multiple + * links the proper value is set in assign_vif_chanctx. + */ + mld_vif->emlsr.primary = new_links ? __ffs(new_links) : 0; + + /* + * Special MLO restart case. We did not have a link when the interface + * was added, so do the power configuration now. + */ + if (old_links == 0 && mld->fw_status.in_hw_restart) + iwl_mld_update_mac_power(mld, vif, false); + + return 0; + +remove_added_links: + for (int i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) { + if (!(added & BIT(i))) + continue; + + link_conf = link_conf_dereference_protected(vif, i); + if (!link_conf || !iwl_mld_link_from_mac80211(link_conf)) + continue; + + iwl_mld_remove_link(mld, link_conf); + } + + return err; +} + +static int iwl_mld_change_sta_links(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + return iwl_mld_update_link_stas(mld, vif, sta, old_links, new_links); +} + +static int iwl_mld_mac80211_join_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + return iwl_mld_start_ap_ibss(hw, vif, &vif->bss_conf); +} + +static void iwl_mld_mac80211_leave_ibss(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + return iwl_mld_stop_ap_ibss(hw, vif, &vif->bss_conf); +} + +static int iwl_mld_mac80211_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + return mld->ibss_manager; +} + +#define IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT (5 * HZ) + +static void iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int ret; + + if (!iwl_mld_vif_has_emlsr_cap(vif)) + return; + + ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif, + IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS, + iwl_mld_get_primary_link(vif)); + if (ret) + return; + + wiphy_delayed_work_queue(mld_vif->mld->wiphy, + &mld_vif->emlsr.tmp_non_bss_done_wk, + IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT); +} + +static void iwl_mld_prep_add_interface(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + IWL_DEBUG_MAC80211(mld, "prep_add_interface: type=%u\n", type); + + if (!(type == NL80211_IFTYPE_AP || + type == NL80211_IFTYPE_P2P_GO || + type == NL80211_IFTYPE_P2P_CLIENT)) + return; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_vif_iter_emlsr_block_tmp_non_bss, + NULL); +} + +static int iwl_mld_set_hw_timestamp(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_set_hw_timestamp *hwts) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + u32 protocols = 0; + + /* HW timestamping is only supported for a specific station */ + if (!hwts->macaddr) + return -EOPNOTSUPP; + + if (hwts->enable) + protocols = + IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM; + + return iwl_mld_time_sync_config(mld, hwts->macaddr, protocols); +} + +static int iwl_mld_start_pmsr(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *request) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + + return iwl_mld_ftm_start(mld, vif, request); +} + +const struct ieee80211_ops iwl_mld_hw_ops = { + .tx = iwl_mld_mac80211_tx, + .start = iwl_mld_mac80211_start, + .stop = iwl_mld_mac80211_stop, + .config = iwl_mld_mac80211_config, + .add_interface = iwl_mld_mac80211_add_interface, + .remove_interface = iwl_mld_mac80211_remove_interface, + .conf_tx = iwl_mld_mac80211_conf_tx, + .prepare_multicast = iwl_mld_mac80211_prepare_multicast, + .configure_filter = iwl_mld_mac80211_configure_filter, + .reconfig_complete = iwl_mld_mac80211_reconfig_complete, + .wake_tx_queue = iwl_mld_mac80211_wake_tx_queue, + .add_chanctx = iwl_mld_add_chanctx, + .remove_chanctx = iwl_mld_remove_chanctx, + .change_chanctx = iwl_mld_change_chanctx, + .assign_vif_chanctx = iwl_mld_assign_vif_chanctx, + .unassign_vif_chanctx = iwl_mld_unassign_vif_chanctx, + .set_rts_threshold = iwl_mld_mac80211_set_rts_threshold, + .link_info_changed = iwl_mld_mac80211_link_info_changed, + .vif_cfg_changed = iwl_mld_mac80211_vif_cfg_changed, + .set_key = iwl_mld_mac80211_set_key, + .hw_scan = iwl_mld_mac80211_hw_scan, + .cancel_hw_scan = iwl_mld_mac80211_cancel_hw_scan, + .sched_scan_start = iwl_mld_mac80211_sched_scan_start, + .sched_scan_stop = iwl_mld_mac80211_sched_scan_stop, + .mgd_prepare_tx = iwl_mld_mac80211_mgd_prepare_tx, + .mgd_complete_tx = iwl_mld_mac_mgd_complete_tx, + .sta_state = iwl_mld_mac80211_sta_state, + .sta_statistics = iwl_mld_mac80211_sta_statistics, + .flush = iwl_mld_mac80211_flush, + .flush_sta = iwl_mld_mac80211_flush_sta, + .ampdu_action = iwl_mld_mac80211_ampdu_action, + .can_aggregate_in_amsdu = iwl_mld_mac80211_can_aggregate, + .sync_rx_queues = iwl_mld_mac80211_sync_rx_queues, + .link_sta_rc_update = iwl_mld_sta_rc_update, + .start_ap = iwl_mld_start_ap_ibss, + .stop_ap = iwl_mld_stop_ap_ibss, + .pre_channel_switch = iwl_mld_pre_channel_switch, + .channel_switch = iwl_mld_channel_switch, + .post_channel_switch = iwl_mld_post_channel_switch, + .abort_channel_switch = iwl_mld_abort_channel_switch, + .switch_vif_chanctx = iwl_mld_switch_vif_chanctx, + .sta_pre_rcu_remove = iwl_mld_sta_pre_rcu_remove, + .remain_on_channel = iwl_mld_start_roc, + .cancel_remain_on_channel = iwl_mld_cancel_roc, + .can_activate_links = iwl_mld_can_activate_links, + .change_vif_links = iwl_mld_change_vif_links, + .change_sta_links = iwl_mld_change_sta_links, +#ifdef CONFIG_PM_SLEEP + .suspend = iwl_mld_suspend, + .resume = iwl_mld_resume, + .set_wakeup = iwl_mld_set_wakeup, + .set_rekey_data = iwl_mld_set_rekey_data, +#if IS_ENABLED(CONFIG_IPV6) + .ipv6_addr_change = iwl_mld_ipv6_addr_change, +#endif /* IS_ENABLED(CONFIG_IPV6) */ +#endif /* CONFIG_PM_SLEEP */ +#ifdef CONFIG_IWLWIFI_DEBUGFS + .vif_add_debugfs = iwl_mld_add_vif_debugfs, + .link_add_debugfs = iwl_mld_add_link_debugfs, + .link_sta_add_debugfs = iwl_mld_add_link_sta_debugfs, +#endif + .mgd_protect_tdls_discover = iwl_mld_mac80211_mgd_protect_tdls_discover, + .join_ibss = iwl_mld_mac80211_join_ibss, + .leave_ibss = iwl_mld_mac80211_leave_ibss, + .tx_last_beacon = iwl_mld_mac80211_tx_last_beacon, + .prep_add_interface = iwl_mld_prep_add_interface, + .set_hw_timestamp = iwl_mld_set_hw_timestamp, + .start_pmsr = iwl_mld_start_pmsr, +}; diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mac80211.h b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.h new file mode 100644 index 0000000000000..aad04d7b2617d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/mac80211.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_mac80211_h__ +#define __iwl_mld_mac80211_h__ + +#include "mld.h" + +int iwl_mld_register_hw(struct iwl_mld *mld); +void iwl_mld_recalc_multicast_filter(struct iwl_mld *mld); + +#endif /* __iwl_mld_mac80211_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mcc.c b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c new file mode 100644 index 0000000000000..daca14e208bde --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/mcc.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024 Intel Corporation + */ + +#include +#include + +#include +#include + +#include "mld.h" +#include "hcmd.h" +#include "mcc.h" + +/* It is the caller's responsibility to free the pointer returned here */ +static struct iwl_mcc_update_resp_v8 * +iwl_mld_parse_mcc_update_resp_v8(const struct iwl_rx_packet *pkt) +{ + const struct iwl_mcc_update_resp_v8 *mcc_resp_v8 = (const void *)pkt->data; + int n_channels = __le32_to_cpu(mcc_resp_v8->n_channels); + struct iwl_mcc_update_resp_v8 *resp_cp; + int notif_len = struct_size(resp_cp, channels, n_channels); + + if (iwl_rx_packet_payload_len(pkt) != notif_len) + return ERR_PTR(-EINVAL); + + resp_cp = kmemdup(mcc_resp_v8, notif_len, GFP_KERNEL); + if (!resp_cp) + return ERR_PTR(-ENOMEM); + + return resp_cp; +} + +/* It is the caller's responsibility to free the pointer returned here */ +static struct iwl_mcc_update_resp_v8 * +iwl_mld_parse_mcc_update_resp_v5_v6(const struct iwl_rx_packet *pkt) +{ + const struct iwl_mcc_update_resp_v4 *mcc_resp_v4 = (const void *)pkt->data; + struct iwl_mcc_update_resp_v8 *resp_cp; + int n_channels = __le32_to_cpu(mcc_resp_v4->n_channels); + int resp_len; + + if (iwl_rx_packet_payload_len(pkt) != + struct_size(mcc_resp_v4, channels, n_channels)) + return ERR_PTR(-EINVAL); + + resp_len = struct_size(resp_cp, channels, n_channels); + resp_cp = kzalloc(resp_len, GFP_KERNEL); + if (!resp_cp) + return ERR_PTR(-ENOMEM); + + resp_cp->status = mcc_resp_v4->status; + resp_cp->mcc = mcc_resp_v4->mcc; + resp_cp->cap = cpu_to_le32(le16_to_cpu(mcc_resp_v4->cap)); + resp_cp->source_id = mcc_resp_v4->source_id; + resp_cp->geo_info = mcc_resp_v4->geo_info; + resp_cp->n_channels = mcc_resp_v4->n_channels; + memcpy(resp_cp->channels, mcc_resp_v4->channels, + n_channels * sizeof(__le32)); + + return resp_cp; +} + +/* It is the caller's responsibility to free the pointer returned here */ +static struct iwl_mcc_update_resp_v8 * +iwl_mld_update_mcc(struct iwl_mld *mld, const char *alpha2, + enum iwl_mcc_source src_id) +{ + int resp_ver = iwl_fw_lookup_notif_ver(mld->fw, LONG_GROUP, + MCC_UPDATE_CMD, 0); + struct iwl_mcc_update_cmd mcc_update_cmd = { + .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]), + .source_id = (u8)src_id, + }; + struct iwl_mcc_update_resp_v8 *resp_cp; + struct iwl_rx_packet *pkt; + struct iwl_host_cmd cmd = { + .id = MCC_UPDATE_CMD, + .flags = CMD_WANT_SKB, + .data = { &mcc_update_cmd }, + .len[0] = sizeof(mcc_update_cmd), + }; + int ret; + u16 mcc; + + IWL_DEBUG_LAR(mld, "send MCC update to FW with '%c%c' src = %d\n", + alpha2[0], alpha2[1], src_id); + + ret = iwl_mld_send_cmd(mld, &cmd); + if (ret) + return ERR_PTR(ret); + + pkt = cmd.resp_pkt; + + /* For Wifi-7 radios, we get version 8 + * For Wifi-6E radios, we get version 6 + * For Wifi-6 radios, we get version 5, but 5, 6, and 4 are compatible. + */ + switch (resp_ver) { + case 5: + case 6: + resp_cp = iwl_mld_parse_mcc_update_resp_v5_v6(pkt); + break; + case 8: + resp_cp = iwl_mld_parse_mcc_update_resp_v8(pkt); + break; + default: + IWL_FW_CHECK_FAILED(mld, "Unknown MCC_UPDATE_CMD version %d\n", resp_ver); + resp_cp = ERR_PTR(-EINVAL); + } + + if (IS_ERR(resp_cp)) + goto exit; + + mcc = le16_to_cpu(resp_cp->mcc); + + IWL_FW_CHECK(mld, !mcc, "mcc can't be 0: %d\n", mcc); + + IWL_DEBUG_LAR(mld, + "MCC response status: 0x%x. new MCC: 0x%x ('%c%c')\n", + le32_to_cpu(resp_cp->status), mcc, mcc >> 8, mcc & 0xff); + +exit: + iwl_free_resp(&cmd); + return resp_cp; +} + +/* It is the caller's responsibility to free the pointer returned here */ +struct ieee80211_regdomain * +iwl_mld_get_regdomain(struct iwl_mld *mld, + const char *alpha2, + enum iwl_mcc_source src_id, + bool *changed) +{ + struct ieee80211_regdomain *regd = NULL; + struct iwl_mcc_update_resp_v8 *resp; + u8 resp_ver = iwl_fw_lookup_notif_ver(mld->fw, IWL_ALWAYS_LONG_GROUP, + MCC_UPDATE_CMD, 0); + + IWL_DEBUG_LAR(mld, "Getting regdomain data for %s from FW\n", alpha2); + + lockdep_assert_wiphy(mld->wiphy); + + resp = iwl_mld_update_mcc(mld, alpha2, src_id); + if (IS_ERR(resp)) { + IWL_DEBUG_LAR(mld, "Could not get update from FW %ld\n", + PTR_ERR(resp)); + resp = NULL; + goto out; + } + + if (changed) { + u32 status = le32_to_cpu(resp->status); + + *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || + status == MCC_RESP_ILLEGAL); + } + IWL_DEBUG_LAR(mld, "MCC update response version: %d\n", resp_ver); + + regd = iwl_parse_nvm_mcc_info(mld->trans->dev, mld->cfg, + __le32_to_cpu(resp->n_channels), + resp->channels, + __le16_to_cpu(resp->mcc), + __le16_to_cpu(resp->geo_info), + le32_to_cpu(resp->cap), resp_ver); + + if (IS_ERR(regd)) { + IWL_DEBUG_LAR(mld, "Could not get parse update from FW %ld\n", + PTR_ERR(regd)); + goto out; + } + + IWL_DEBUG_LAR(mld, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", + regd->alpha2, regd->alpha2[0], + regd->alpha2[1], resp->source_id); + + mld->mcc_src = resp->source_id; + + if (!iwl_puncturing_is_allowed_in_bios(mld->bios_enable_puncturing, + le16_to_cpu(resp->mcc))) + ieee80211_hw_set(mld->hw, DISALLOW_PUNCTURING); + else + __clear_bit(IEEE80211_HW_DISALLOW_PUNCTURING, mld->hw->flags); + +out: + kfree(resp); + return regd; +} + +/* It is the caller's responsibility to free the pointer returned here */ +static struct ieee80211_regdomain * +iwl_mld_get_current_regdomain(struct iwl_mld *mld, + bool *changed) +{ + return iwl_mld_get_regdomain(mld, "ZZ", + MCC_SOURCE_GET_CURRENT, changed); +} + +void iwl_mld_update_changed_regdomain(struct iwl_mld *mld) +{ + struct ieee80211_regdomain *regd; + bool changed; + + regd = iwl_mld_get_current_regdomain(mld, &changed); + + if (IS_ERR_OR_NULL(regd)) + return; + + if (changed) + regulatory_set_wiphy_regd(mld->wiphy, regd); + kfree(regd); +} + +static int iwl_mld_apply_last_mcc(struct iwl_mld *mld, + const char *alpha2) +{ + struct ieee80211_regdomain *regd; + u32 used_src; + bool changed; + int ret; + + /* save the last source in case we overwrite it below */ + used_src = mld->mcc_src; + + /* Notify the firmware we support wifi location updates */ + regd = iwl_mld_get_current_regdomain(mld, NULL); + if (!IS_ERR_OR_NULL(regd)) + kfree(regd); + + /* Now set our last stored MCC and source */ + regd = iwl_mld_get_regdomain(mld, alpha2, used_src, + &changed); + if (IS_ERR_OR_NULL(regd)) + return -EIO; + + /* update cfg80211 if the regdomain was changed */ + if (changed) + ret = regulatory_set_wiphy_regd_sync(mld->wiphy, regd); + else + ret = 0; + + kfree(regd); + return ret; +} + +int iwl_mld_init_mcc(struct iwl_mld *mld) +{ + const struct ieee80211_regdomain *r; + struct ieee80211_regdomain *regd; + char mcc[3]; + int retval; + + /* try to replay the last set MCC to FW */ + r = wiphy_dereference(mld->wiphy, mld->wiphy->regd); + + if (r) + return iwl_mld_apply_last_mcc(mld, r->alpha2); + + regd = iwl_mld_get_current_regdomain(mld, NULL); + if (IS_ERR_OR_NULL(regd)) + return -EIO; + + if (!iwl_bios_get_mcc(&mld->fwrt, mcc)) { + kfree(regd); + regd = iwl_mld_get_regdomain(mld, mcc, MCC_SOURCE_BIOS, NULL); + if (IS_ERR_OR_NULL(regd)) + return -EIO; + } + + retval = regulatory_set_wiphy_regd_sync(mld->wiphy, regd); + + kfree(regd); + return retval; +} + +static void iwl_mld_find_assoc_vif_iterator(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + bool *assoc = data; + + if (vif->type == NL80211_IFTYPE_STATION && + vif->cfg.assoc) + *assoc = true; +} + +static bool iwl_mld_is_a_vif_assoc(struct iwl_mld *mld) +{ + bool assoc = false; + + ieee80211_iterate_active_interfaces_atomic(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_find_assoc_vif_iterator, + &assoc); + return assoc; +} + +void iwl_mld_handle_update_mcc(struct iwl_mld *mld, struct iwl_rx_packet *pkt) +{ + struct iwl_mcc_chub_notif *notif = (void *)pkt->data; + enum iwl_mcc_source src; + char mcc[3]; + struct ieee80211_regdomain *regd; + bool changed; + + lockdep_assert_wiphy(mld->wiphy); + + if (iwl_mld_is_a_vif_assoc(mld) && + notif->source_id == MCC_SOURCE_WIFI) { + IWL_DEBUG_LAR(mld, "Ignore mcc update while associated\n"); + return; + } + + mcc[0] = le16_to_cpu(notif->mcc) >> 8; + mcc[1] = le16_to_cpu(notif->mcc) & 0xff; + mcc[2] = '\0'; + src = notif->source_id; + + IWL_DEBUG_LAR(mld, + "RX: received chub update mcc cmd (mcc '%s' src %d)\n", + mcc, src); + regd = iwl_mld_get_regdomain(mld, mcc, src, &changed); + if (IS_ERR_OR_NULL(regd)) + return; + + if (changed) + regulatory_set_wiphy_regd(mld->hw->wiphy, regd); + kfree(regd); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mcc.h b/drivers/net/wireless/intel/iwlwifi/mld/mcc.h new file mode 100644 index 0000000000000..2b31e5b5e2edf --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/mcc.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_mcc_h__ +#define __iwl_mld_mcc_h__ + +int iwl_mld_init_mcc(struct iwl_mld *mld); +void iwl_mld_handle_update_mcc(struct iwl_mld *mld, struct iwl_rx_packet *pkt); +void iwl_mld_update_changed_regdomain(struct iwl_mld *mld); +struct ieee80211_regdomain * +iwl_mld_get_regdomain(struct iwl_mld *mld, + const char *alpha2, + enum iwl_mcc_source src_id, + bool *changed); + +#endif /* __iwl_mld_mcc_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.c b/drivers/net/wireless/intel/iwlwifi/mld/mld.c new file mode 100644 index 0000000000000..d4a99ae64074f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.c @@ -0,0 +1,720 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#include +#include + +#include "fw/api/rx.h" +#include "fw/api/datapath.h" +#include "fw/api/commands.h" +#include "fw/api/offload.h" +#include "fw/api/coex.h" +#include "fw/dbg.h" +#include "fw/uefi.h" + +#include "mld.h" +#include "mlo.h" +#include "mac80211.h" +#include "led.h" +#include "scan.h" +#include "tx.h" +#include "sta.h" +#include "regulatory.h" +#include "thermal.h" +#include "low_latency.h" +#include "hcmd.h" +#include "fw/api/location.h" + +#define DRV_DESCRIPTION "Intel(R) MLD wireless driver for Linux" +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS("IWLWIFI"); + +static const struct iwl_op_mode_ops iwl_mld_ops; + +static int __init iwl_mld_init(void) +{ + int ret = iwl_opmode_register("iwlmld", &iwl_mld_ops); + + if (ret) + pr_err("Unable to register MLD op_mode: %d\n", ret); + + return ret; +} +module_init(iwl_mld_init); + +static void __exit iwl_mld_exit(void) +{ + iwl_opmode_deregister("iwlmld"); +} +module_exit(iwl_mld_exit); + +static void iwl_mld_hw_set_regulatory(struct iwl_mld *mld) +{ + struct wiphy *wiphy = mld->wiphy; + + wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; + wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; +} + +VISIBLE_IF_IWLWIFI_KUNIT +void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans, + const struct iwl_cfg *cfg, const struct iwl_fw *fw, + struct ieee80211_hw *hw, struct dentry *dbgfs_dir) +{ + mld->dev = trans->dev; + mld->trans = trans; + mld->cfg = cfg; + mld->fw = fw; + mld->hw = hw; + mld->wiphy = hw->wiphy; + mld->debugfs_dir = dbgfs_dir; + + iwl_notification_wait_init(&mld->notif_wait); + + /* Setup async RX handling */ + spin_lock_init(&mld->async_handlers_lock); + wiphy_work_init(&mld->async_handlers_wk, + iwl_mld_async_handlers_wk); + + /* Dynamic Queue Allocation */ + spin_lock_init(&mld->add_txqs_lock); + INIT_LIST_HEAD(&mld->txqs_to_add); + wiphy_work_init(&mld->add_txqs_wk, iwl_mld_add_txqs_wk); + + /* Setup RX queues sync wait queue */ + init_waitqueue_head(&mld->rxq_sync.waitq); +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_construct_mld); + +static void __acquires(&mld->wiphy->mtx) +iwl_mld_fwrt_dump_start(void *ctx) +{ + struct iwl_mld *mld = ctx; + + wiphy_lock(mld->wiphy); +} + +static void __releases(&mld->wiphy->mtx) +iwl_mld_fwrt_dump_end(void *ctx) +{ + struct iwl_mld *mld = ctx; + + wiphy_unlock(mld->wiphy); +} + +static bool iwl_mld_d3_debug_enable(void *ctx) +{ + return IWL_MLD_D3_DEBUG; +} + +static int iwl_mld_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) +{ + struct iwl_mld *mld = (struct iwl_mld *)ctx; + int ret; + + wiphy_lock(mld->wiphy); + ret = iwl_mld_send_cmd(mld, host_cmd); + wiphy_unlock(mld->wiphy); + + return ret; +} + +static const struct iwl_fw_runtime_ops iwl_mld_fwrt_ops = { + .dump_start = iwl_mld_fwrt_dump_start, + .dump_end = iwl_mld_fwrt_dump_end, + .send_hcmd = iwl_mld_fwrt_send_hcmd, + .d3_debug_enable = iwl_mld_d3_debug_enable, +}; + +static void +iwl_mld_construct_fw_runtime(struct iwl_mld *mld, struct iwl_trans *trans, + const struct iwl_fw *fw, + struct dentry *debugfs_dir) +{ + iwl_fw_runtime_init(&mld->fwrt, trans, fw, &iwl_mld_fwrt_ops, mld, + NULL, NULL, debugfs_dir); + + iwl_fw_set_current_image(&mld->fwrt, IWL_UCODE_REGULAR); +} + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_legacy_names[] = { + HCMD_NAME(UCODE_ALIVE_NTFY), + HCMD_NAME(INIT_COMPLETE_NOTIF), + HCMD_NAME(PHY_CONTEXT_CMD), + HCMD_NAME(SCAN_CFG_CMD), + HCMD_NAME(SCAN_REQ_UMAC), + HCMD_NAME(SCAN_ABORT_UMAC), + HCMD_NAME(SCAN_COMPLETE_UMAC), + HCMD_NAME(TX_CMD), + HCMD_NAME(TXPATH_FLUSH), + HCMD_NAME(LEDS_CMD), + HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION), + HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION), + HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD), + HCMD_NAME(POWER_TABLE_CMD), + HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION), + HCMD_NAME(BEACON_NOTIFICATION), + HCMD_NAME(BEACON_TEMPLATE_CMD), + HCMD_NAME(TX_ANT_CONFIGURATION_CMD), + HCMD_NAME(REDUCE_TX_POWER_CMD), + HCMD_NAME(MISSED_BEACONS_NOTIFICATION), + HCMD_NAME(MAC_PM_POWER_TABLE), + HCMD_NAME(MFUART_LOAD_NOTIFICATION), + HCMD_NAME(RSS_CONFIG_CMD), + HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), + HCMD_NAME(REPLY_RX_MPDU_CMD), + HCMD_NAME(BA_NOTIF), + HCMD_NAME(MCC_UPDATE_CMD), + HCMD_NAME(MCC_CHUB_UPDATE_CMD), + HCMD_NAME(MCAST_FILTER_CMD), + HCMD_NAME(REPLY_BEACON_FILTERING_CMD), + HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD), + HCMD_NAME(MATCH_FOUND_NOTIFICATION), + HCMD_NAME(WOWLAN_PATTERNS), + HCMD_NAME(WOWLAN_CONFIGURATION), + HCMD_NAME(WOWLAN_TSC_RSC_PARAM), + HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL), + HCMD_NAME(DEBUG_HOST_COMMAND), + HCMD_NAME(LDBG_CONFIG_CMD), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_system_names[] = { + HCMD_NAME(SHARED_MEM_CFG_CMD), + HCMD_NAME(SOC_CONFIGURATION_CMD), + HCMD_NAME(INIT_EXTENDED_CFG_CMD), + HCMD_NAME(FW_ERROR_RECOVERY_CMD), + HCMD_NAME(RFI_GET_FREQ_TABLE_CMD), + HCMD_NAME(SYSTEM_STATISTICS_CMD), + HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_reg_and_nvm_names[] = { + HCMD_NAME(LARI_CONFIG_CHANGE), + HCMD_NAME(NVM_GET_INFO), + HCMD_NAME(TAS_CONFIG), + HCMD_NAME(SAR_OFFSET_MAPPING_TABLE_CMD), + HCMD_NAME(MCC_ALLOWED_AP_TYPE_CMD), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_debug_names[] = { + HCMD_NAME(HOST_EVENT_CFG), + HCMD_NAME(DBGC_SUSPEND_RESUME), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_mac_conf_names[] = { + HCMD_NAME(LOW_LATENCY_CMD), + HCMD_NAME(SESSION_PROTECTION_CMD), + HCMD_NAME(MAC_CONFIG_CMD), + HCMD_NAME(LINK_CONFIG_CMD), + HCMD_NAME(STA_CONFIG_CMD), + HCMD_NAME(AUX_STA_CMD), + HCMD_NAME(STA_REMOVE_CMD), + HCMD_NAME(ROC_CMD), + HCMD_NAME(MISSED_BEACONS_NOTIF), + HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF), + HCMD_NAME(ROC_NOTIF), + HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF), + HCMD_NAME(SESSION_PROTECTION_NOTIF), + HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF), + HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_data_path_names[] = { + HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), + HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD), + HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), + HCMD_NAME(RFH_QUEUE_CONFIG_CMD), + HCMD_NAME(TLC_MNG_CONFIG_CMD), + HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD), + HCMD_NAME(SCD_QUEUE_CONFIG_CMD), + HCMD_NAME(OMI_SEND_STATUS_NOTIF), + HCMD_NAME(ESR_MODE_NOTIF), + HCMD_NAME(MONITOR_NOTIF), + HCMD_NAME(TLC_MNG_UPDATE_NOTIF), + HCMD_NAME(MU_GROUP_MGMT_NOTIF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_location_names[] = { + HCMD_NAME(TOF_RANGE_REQ_CMD), + HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_phy_names[] = { + HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), + HCMD_NAME(CTDP_CONFIG_CMD), + HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), + HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD), + HCMD_NAME(CT_KILL_NOTIFICATION), + HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_statistics_names[] = { + HCMD_NAME(STATISTICS_OPER_NOTIF), + HCMD_NAME(STATISTICS_OPER_PART1_NOTIF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_prot_offload_names[] = { + HCMD_NAME(STORED_BEACON_NTF), +}; + +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mld_coex_names[] = { + HCMD_NAME(PROFILE_NOTIF), +}; + +VISIBLE_IF_IWLWIFI_KUNIT +const struct iwl_hcmd_arr iwl_mld_groups[] = { + [LEGACY_GROUP] = HCMD_ARR(iwl_mld_legacy_names), + [LONG_GROUP] = HCMD_ARR(iwl_mld_legacy_names), + [SYSTEM_GROUP] = HCMD_ARR(iwl_mld_system_names), + [MAC_CONF_GROUP] = HCMD_ARR(iwl_mld_mac_conf_names), + [DATA_PATH_GROUP] = HCMD_ARR(iwl_mld_data_path_names), + [LOCATION_GROUP] = HCMD_ARR(iwl_mld_location_names), + [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mld_reg_and_nvm_names), + [DEBUG_GROUP] = HCMD_ARR(iwl_mld_debug_names), + [PHY_OPS_GROUP] = HCMD_ARR(iwl_mld_phy_names), + [STATISTICS_GROUP] = HCMD_ARR(iwl_mld_statistics_names), + [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mld_prot_offload_names), + [BT_COEX_GROUP] = HCMD_ARR(iwl_mld_coex_names), +}; +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_groups); + +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +const unsigned int global_iwl_mld_goups_size = ARRAY_SIZE(iwl_mld_groups); +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(global_iwl_mld_goups_size); +#endif + +static void +iwl_mld_configure_trans(struct iwl_op_mode *op_mode) +{ + const struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + static const u8 no_reclaim_cmds[] = {TX_CMD}; + struct iwl_trans_config trans_cfg = { + .op_mode = op_mode, + /* Rx is not supported yet, but add it to avoid warnings */ + .rx_buf_size = iwl_amsdu_size_to_rxb_size(), + .command_groups = iwl_mld_groups, + .command_groups_size = ARRAY_SIZE(iwl_mld_groups), + .fw_reset_handshake = true, + .queue_alloc_cmd_ver = + iwl_fw_lookup_cmd_ver(mld->fw, + WIDE_ID(DATA_PATH_GROUP, + SCD_QUEUE_CONFIG_CMD), + 0), + .no_reclaim_cmds = no_reclaim_cmds, + .n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds), + .cb_data_offs = offsetof(struct ieee80211_tx_info, + driver_data[2]), + }; + struct iwl_trans *trans = mld->trans; + + trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; + trans->iml = mld->fw->iml; + trans->iml_len = mld->fw->iml_len; + trans->wide_cmd_header = true; + + iwl_trans_configure(trans, &trans_cfg); +} + +/* + ***************************************************** + * op mode ops functions + ***************************************************** + */ + +#define NUM_FW_LOAD_RETRIES 3 +static struct iwl_op_mode * +iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_fw *fw, struct dentry *dbgfs_dir) +{ + struct ieee80211_hw *hw; + struct iwl_op_mode *op_mode; + struct iwl_mld *mld; + u32 eckv_value; + int ret; + + /* Allocate and initialize a new hardware device */ + hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) + + sizeof(struct iwl_mld), + &iwl_mld_hw_ops); + if (!hw) + return ERR_PTR(-ENOMEM); + + op_mode = hw->priv; + + op_mode->ops = &iwl_mld_ops; + + mld = IWL_OP_MODE_GET_MLD(op_mode); + + iwl_construct_mld(mld, trans, cfg, fw, hw, dbgfs_dir); + + iwl_mld_construct_fw_runtime(mld, trans, fw, dbgfs_dir); + + iwl_mld_get_bios_tables(mld); + iwl_uefi_get_sgom_table(trans, &mld->fwrt); + iwl_uefi_get_step_table(trans); + if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value)) + IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n"); + else + trans->ext_32khz_clock_valid = !!eckv_value; + iwl_bios_setup_step(trans, &mld->fwrt); + mld->bios_enable_puncturing = iwl_uefi_get_puncturing(&mld->fwrt); + + iwl_mld_hw_set_regulatory(mld); + + /* Configure transport layer with the opmode specific params */ + iwl_mld_configure_trans(op_mode); + + /* needed for regulatory init */ + rtnl_lock(); + /* Needed for sending commands */ + wiphy_lock(mld->wiphy); + + for (int i = 0; i < NUM_FW_LOAD_RETRIES; i++) { + ret = iwl_mld_load_fw(mld); + if (!ret) + break; + } + + if (ret) { + wiphy_unlock(mld->wiphy); + rtnl_unlock(); + iwl_fw_flush_dumps(&mld->fwrt); + goto free_hw; + } + + iwl_mld_stop_fw(mld); + + wiphy_unlock(mld->wiphy); + rtnl_unlock(); + + ret = iwl_mld_leds_init(mld); + if (ret) + goto free_nvm; + + ret = iwl_mld_alloc_scan_cmd(mld); + if (ret) + goto leds_exit; + + ret = iwl_mld_low_latency_init(mld); + if (ret) + goto free_scan_cmd; + + ret = iwl_mld_register_hw(mld); + if (ret) + goto low_latency_free; + + iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant); + + iwl_mld_add_debugfs_files(mld, dbgfs_dir); + iwl_mld_thermal_initialize(mld); + + iwl_mld_ptp_init(mld); + + return op_mode; + +low_latency_free: + iwl_mld_low_latency_free(mld); +free_scan_cmd: + kfree(mld->scan.cmd); +leds_exit: + iwl_mld_leds_exit(mld); +free_nvm: + kfree(mld->nvm_data); +free_hw: + ieee80211_free_hw(mld->hw); + return ERR_PTR(ret); +} + +static void +iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode) +{ + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + + iwl_mld_ptp_remove(mld); + iwl_mld_leds_exit(mld); + + wiphy_lock(mld->wiphy); + iwl_mld_thermal_exit(mld); + iwl_mld_low_latency_stop(mld); + iwl_mld_deinit_time_sync(mld); + wiphy_unlock(mld->wiphy); + + ieee80211_unregister_hw(mld->hw); + + iwl_fw_runtime_free(&mld->fwrt); + iwl_mld_low_latency_free(mld); + + iwl_trans_op_mode_leave(mld->trans); + + kfree(mld->nvm_data); + kfree(mld->scan.cmd); + kfree(mld->error_recovery_buf); + kfree(mld->mcast_filter_cmd); + + ieee80211_free_hw(mld->hw); +} + +static void iwl_mld_queue_state_change(struct iwl_op_mode *op_mode, + int hw_queue, bool queue_full) +{ + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + struct ieee80211_txq *txq; + struct iwl_mld_sta *mld_sta; + struct iwl_mld_txq *mld_txq; + + rcu_read_lock(); + + txq = rcu_dereference(mld->fw_id_to_txq[hw_queue]); + if (!txq) { + rcu_read_unlock(); + + if (queue_full) { + /* An internal queue is not expected to become full */ + IWL_WARN(mld, + "Internal hw_queue %d is full! stopping all queues\n", + hw_queue); + /* Stop all queues, as an internal queue is not + * mapped to a mac80211 one + */ + ieee80211_stop_queues(mld->hw); + } else { + ieee80211_wake_queues(mld->hw); + } + + return; + } + + mld_txq = iwl_mld_txq_from_mac80211(txq); + mld_sta = txq->sta ? iwl_mld_sta_from_mac80211(txq->sta) : NULL; + + mld_txq->status.stop_full = queue_full; + + if (!queue_full && mld_sta && + mld_sta->sta_state != IEEE80211_STA_NOTEXIST) { + local_bh_disable(); + iwl_mld_tx_from_txq(mld, txq); + local_bh_enable(); + } + + rcu_read_unlock(); +} + +static void +iwl_mld_queue_full(struct iwl_op_mode *op_mode, int hw_queue) +{ + iwl_mld_queue_state_change(op_mode, hw_queue, true); +} + +static void +iwl_mld_queue_not_full(struct iwl_op_mode *op_mode, int hw_queue) +{ + iwl_mld_queue_state_change(op_mode, hw_queue, false); +} + +static bool +iwl_mld_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) +{ + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + + iwl_mld_set_hwkill(mld, state); + + return false; +} + +static void +iwl_mld_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) +{ + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]); + ieee80211_free_txskb(mld->hw, skb); +} + +static void iwl_mld_read_error_recovery_buffer(struct iwl_mld *mld) +{ + u32 src_size = mld->fw->ucode_capa.error_log_size; + u32 src_addr = mld->fw->ucode_capa.error_log_addr; + u8 *recovery_buf; + int ret; + + /* no recovery buffer size defined in a TLV */ + if (!src_size) + return; + + recovery_buf = kzalloc(src_size, GFP_ATOMIC); + if (!recovery_buf) + return; + + ret = iwl_trans_read_mem_bytes(mld->trans, src_addr, + recovery_buf, src_size); + if (ret) { + IWL_ERR(mld, "Failed to read error recovery buffer (%d)\n", + ret); + kfree(recovery_buf); + return; + } + + mld->error_recovery_buf = recovery_buf; +} + +static void iwl_mld_restart_nic(struct iwl_mld *mld) +{ + iwl_mld_read_error_recovery_buffer(mld); + + mld->fwrt.trans->dbg.restart_required = false; + + ieee80211_restart_hw(mld->hw); +} + +static void +iwl_mld_nic_error(struct iwl_op_mode *op_mode, + enum iwl_fw_error_type type) +{ + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + bool trans_dead = test_bit(STATUS_TRANS_DEAD, &mld->trans->status); + + if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL) + IWL_ERR(mld, "Command queue full!\n"); + else if (!trans_dead && !mld->fw_status.do_not_dump_once) + iwl_fwrt_dump_error_logs(&mld->fwrt); + + mld->fw_status.do_not_dump_once = false; + + /* It is necessary to abort any os scan here because mac80211 requires + * having the scan cleared before restarting. + * We'll reset the scan_status to NONE in restart cleanup in + * the next drv_start() call from mac80211. If ieee80211_hw_restart + * isn't called scan status will stay busy. + */ + iwl_mld_report_scan_aborted(mld); + + /* + * This should be first thing before trying to collect any + * data to avoid endless loops if any HW error happens while + * collecting debug data. + * It might not actually be true that we'll restart, but the + * setting doesn't matter if we're going to be unbound either. + */ + if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT) + mld->fw_status.in_hw_restart = true; +} + +static void iwl_mld_dump_error(struct iwl_op_mode *op_mode, + struct iwl_fw_error_dump_mode *mode) +{ + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + + /* if we come in from opmode we have the mutex held */ + if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) { + lockdep_assert_wiphy(mld->wiphy); + iwl_fw_error_collect(&mld->fwrt); + } else { + wiphy_lock(mld->wiphy); + if (mode->context != IWL_ERR_CONTEXT_ABORT) + iwl_fw_error_collect(&mld->fwrt); + wiphy_unlock(mld->wiphy); + } +} + +static bool iwl_mld_sw_reset(struct iwl_op_mode *op_mode, + enum iwl_fw_error_type type) +{ + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + + /* Do restart only in the following conditions are met: + * - we consider the FW as running + * - The trigger that brought us here is defined as one that requires + * a restart (in the debug TLVs) + */ + if (!mld->fw_status.running || !mld->fwrt.trans->dbg.restart_required) + return false; + + iwl_mld_restart_nic(mld); + return true; +} + +static void +iwl_mld_time_point(struct iwl_op_mode *op_mode, + enum iwl_fw_ini_time_point tp_id, + union iwl_dbg_tlv_tp_data *tp_data) +{ + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + + iwl_dbg_tlv_time_point(&mld->fwrt, tp_id, tp_data); +} + +#ifdef CONFIG_PM_SLEEP +static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode) +{ + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + + wiphy_lock(mld->wiphy); + mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + iwl_mld_stop_fw(mld); + mld->fw_status.in_d3 = false; + wiphy_unlock(mld->wiphy); +} +#else +static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode) +{} +#endif + +static const struct iwl_op_mode_ops iwl_mld_ops = { + .start = iwl_op_mode_mld_start, + .stop = iwl_op_mode_mld_stop, + .rx = iwl_mld_rx, + .rx_rss = iwl_mld_rx_rss, + .queue_full = iwl_mld_queue_full, + .queue_not_full = iwl_mld_queue_not_full, + .hw_rf_kill = iwl_mld_set_hw_rfkill_state, + .free_skb = iwl_mld_free_skb, + .nic_error = iwl_mld_nic_error, + .dump_error = iwl_mld_dump_error, + .sw_reset = iwl_mld_sw_reset, + .time_point = iwl_mld_time_point, + .device_powered_off = pm_sleep_ptr(iwl_mld_device_powered_off), +}; + +struct iwl_mld_mod_params iwlmld_mod_params = { + .power_scheme = IWL_POWER_SCHEME_BPS, +}; + +module_param_named(power_scheme, iwlmld_mod_params.power_scheme, int, 0444); +MODULE_PARM_DESC(power_scheme, + "power management scheme: 1-active, 2-balanced, default: 2"); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mld.h b/drivers/net/wireless/intel/iwlwifi/mld/mld.h new file mode 100644 index 0000000000000..5eceaaf7696db --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/mld.h @@ -0,0 +1,582 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_h__ +#define __iwl_mld_h__ + +#include +#include + +#include "iwl-trans.h" +#include "iwl-op-mode.h" +#include "fw/runtime.h" +#include "fw/notif-wait.h" +#include "fw/api/commands.h" +#include "fw/api/scan.h" +#include "fw/api/mac-cfg.h" +#include "fw/api/mac.h" +#include "fw/api/phy-ctxt.h" +#include "fw/api/datapath.h" +#include "fw/api/rx.h" +#include "fw/api/rs.h" +#include "fw/api/context.h" +#include "fw/api/coex.h" +#include "fw/api/location.h" + +#include "fw/dbg.h" + +#include "notif.h" +#include "scan.h" +#include "rx.h" +#include "thermal.h" +#include "low_latency.h" +#include "constants.h" +#include "ptp.h" +#include "time_sync.h" +#include "ftm-initiator.h" + +/** + * DOC: Introduction + * + * iwlmld is an operation mode (a.k.a. op_mode) for Intel wireless devices. + * It is used for devices that ship after 2024 which typically support + * the WiFi-7 features. MLD stands for multi-link device. Note that there are + * devices that do not support WiFi-7 or even WiFi 6E and yet use iwlmld, but + * the firmware APIs used in this driver are WiFi-7 compatible. + * + * In the architecture of iwlwifi, an op_mode is a layer that translates + * mac80211's APIs into commands for the firmware and, of course, notifications + * from the firmware to mac80211's APIs. An op_mode must implement the + * interface defined in iwl-op-mode.h to interact with the transport layer + * which allows to send and receive data to the device, start the hardware, + * etc... + */ + +/** + * DOC: Locking policy + * + * iwlmld has a very simple locking policy: it doesn't have any mutexes. It + * relies on cfg80211's wiphy->mtx and takes the lock when needed. All the + * control flows originating from mac80211 already acquired the lock, so that + * part is trivial, but also notifications that are received from the firmware + * and handled asynchronously are handled only after having taken the lock. + * This is described in notif.c. + * There are spin_locks needed to synchronize with the data path, around the + * allocation of the queues, for example. + */ + +/** + * DOC: Debugfs + * + * iwlmld adds its share of debugfs hooks and its handlers are synchronized + * with the wiphy_lock using wiphy_locked_debugfs. This avoids races against + * resources deletion while the debugfs hook is being used. + */ + +/** + * DOC: Main resources + * + * iwlmld is designed with the life cycle of the resource in mind. The + * resources are: + * + * - struct iwl_mld (matches mac80211's struct ieee80211_hw) + * + * - struct iwl_mld_vif (matches macu80211's struct ieee80211_vif) + * iwl_mld_vif contains an array of pointers to struct iwl_mld_link + * which describe the links for this vif. + * + * - struct iwl_mld_sta (matches mac80211's struct ieee80211_sta) + * iwl_mld_sta contains an array of points to struct iwl_mld_link_sta + * which describes the link stations for this station + * + * Each object has properties that can survive a firmware reset or not. + * Asynchronous firmware notifications can declare themselves as dependent on a + * certain instance of those resources and that means that the notifications + * will be cancelled once the instance is destroyed. + */ + +#define IWL_MLD_MAX_ADDRESSES 5 + +/** + * struct iwl_mld - MLD op mode + * + * @fw_id_to_bss_conf: maps a fw id of a link to the corresponding + * ieee80211_bss_conf. + * @fw_id_to_vif: maps a fw id of a MAC context to the corresponding + * ieee80211_vif. Mapping is valid only when the MAC exists in the fw. + * @fw_id_to_txq: maps a fw id of a txq to the corresponding + * ieee80211_txq. + * @used_phy_ids: a bitmap of the phy IDs used. If a bit is set, it means + * that the index of this bit is already used as a PHY id. + * @num_igtks: the number if iGTKs that were sent to the FW. + * @monitor: monitor related data + * @monitor.on: does a monitor vif exist (singleton hence bool) + * @monitor.ampdu_ref: the id of the A-MPDU for sniffer + * @monitor.ampdu_toggle: the state of the previous packet to track A-MPDU + * @monitor.cur_aid: current association id tracked by the sniffer + * @monitor.cur_bssid: current bssid tracked by the sniffer + * @monitor.p80: primary channel position relative to he whole bandwidth, in + * steps of 80 MHz + * @fw_id_to_link_sta: maps a fw id of a sta to the corresponding + * ieee80211_link_sta. This is not cleaned up on restart since we want to + * preserve the fw sta ids during a restart (for SN/PN restoring). + * FW ids of internal stations will be mapped to ERR_PTR, and will be + * re-allocated during a restart, so make sure to free it in restart + * cleanup using iwl_mld_free_internal_sta + * @netdetect: indicates the FW is in suspend mode with netdetect configured + * @p2p_device_vif: points to the p2p device vif if exists + * @bt_is_active: indicates that BT is active + * @dev: pointer to device struct. For printing purposes + * @trans: pointer to the transport layer + * @cfg: pointer to the device configuration + * @fw: a pointer to the fw object + * @hw: pointer to the hw object. + * @wiphy: a pointer to the wiphy struct, for easier access to it. + * @nvm_data: pointer to the nvm_data that includes all our capabilities + * @fwrt: fw runtime data + * @debugfs_dir: debugfs directory + * @notif_wait: notification wait related data. + * @async_handlers_list: a list of all async RX handlers. When a notifciation + * with an async handler is received, it is added to this list. + * When &async_handlers_wk runs - it runs these handlers one by one. + * @async_handlers_lock: a lock for &async_handlers_list. Sync + * &async_handlers_wk and RX notifcation path. + * @async_handlers_wk: A work to run all async RX handlers from + * &async_handlers_list. + * @ct_kill_exit_wk: worker to exit thermal kill + * @fw_status: bitmap of fw status bits + * @running: true if the firmware is running + * @do_not_dump_once: true if firmware dump must be prevented once + * @in_d3: indicates FW is in suspend mode and should be resumed + * @in_hw_restart: indicates that we are currently in restart flow. + * rather than restarted. Should be unset upon restart. + * @radio_kill: bitmap of radio kill status + * @radio_kill.hw: radio is killed by hw switch + * @radio_kill.ct: radio is killed because the device it too hot + * @addresses: device MAC addresses. + * @scan: instance of the scan object + * @wowlan: WoWLAN support data. + * @led: the led device + * @mcc_src: the source id of the MCC, comes from the firmware + * @bios_enable_puncturing: is puncturing enabled by bios + * @fw_id_to_ba: maps a fw (BA) id to a corresponding Block Ack session data. + * @num_rx_ba_sessions: tracks the number of active Rx Block Ack (BA) sessions. + * the driver ensures that new BA sessions are blocked once the maximum + * supported by the firmware is reached, preventing firmware asserts. + * @rxq_sync: manages RX queue sync state + * @txqs_to_add: a list of &ieee80211_txq's to allocate in &add_txqs_wk + * @add_txqs_wk: a worker to allocate txqs. + * @add_txqs_lock: to lock the &txqs_to_add list. + * @error_recovery_buf: pointer to the recovery buffer that will be read + * from firmware upon fw/hw error and sent back to the firmware in + * reconfig flow (after NIC reset). + * @mcast_filter_cmd: pointer to the multicast filter command. + * @mgmt_tx_ant: stores the last TX antenna index; used for setting + * TX rate_n_flags for non-STA mgmt frames (toggles on every TX failure). + * @low_latency: low-latency manager. + * @tzone: thermal zone device's data + * @cooling_dev: cooling device's related data + * @ibss_manager: in IBSS mode (only one vif can be active), indicates what + * firmware indicated about having transmitted the last beacon, i.e. + * being IBSS manager for that time and needing to respond to probe + * requests + * @ptp_data: data of the PTP clock + * @time_sync: time sync data. + * @ftm_initiator: FTM initiator data + */ +struct iwl_mld { + /* Add here fields that need clean up on restart */ + struct_group(zeroed_on_hw_restart, + struct ieee80211_bss_conf __rcu *fw_id_to_bss_conf[IWL_FW_MAX_LINK_ID + 1]; + struct ieee80211_vif __rcu *fw_id_to_vif[NUM_MAC_INDEX_DRIVER]; + struct ieee80211_txq __rcu *fw_id_to_txq[IWL_MAX_TVQM_QUEUES]; + u8 used_phy_ids: NUM_PHY_CTX; + u8 num_igtks; + struct { + bool on; + u32 ampdu_ref; + bool ampdu_toggle; + u8 p80; +#ifdef CONFIG_IWLWIFI_DEBUGFS + __le16 cur_aid; + u8 cur_bssid[ETH_ALEN]; +#endif + } monitor; +#ifdef CONFIG_PM_SLEEP + bool netdetect; +#endif /* CONFIG_PM_SLEEP */ + struct ieee80211_vif *p2p_device_vif; + bool bt_is_active; + ); + struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_STATION_COUNT_MAX]; + /* And here fields that survive a fw restart */ + struct device *dev; + struct iwl_trans *trans; + const struct iwl_cfg *cfg; + const struct iwl_fw *fw; + struct ieee80211_hw *hw; + struct wiphy *wiphy; + struct iwl_nvm_data *nvm_data; + struct iwl_fw_runtime fwrt; + struct dentry *debugfs_dir; + struct iwl_notif_wait_data notif_wait; + struct list_head async_handlers_list; + spinlock_t async_handlers_lock; + struct wiphy_work async_handlers_wk; + struct wiphy_delayed_work ct_kill_exit_wk; + + struct { + u32 running:1, + do_not_dump_once:1, +#ifdef CONFIG_PM_SLEEP + in_d3:1, +#endif + in_hw_restart:1; + + } fw_status; + + struct { + u32 hw:1, + ct:1; + } radio_kill; + + struct mac_address addresses[IWL_MLD_MAX_ADDRESSES]; + struct iwl_mld_scan scan; +#ifdef CONFIG_PM_SLEEP + struct wiphy_wowlan_support wowlan; +#endif /* CONFIG_PM_SLEEP */ +#ifdef CONFIG_IWLWIFI_LEDS + struct led_classdev led; +#endif + enum iwl_mcc_source mcc_src; + bool bios_enable_puncturing; + + struct iwl_mld_baid_data __rcu *fw_id_to_ba[IWL_MAX_BAID]; + u8 num_rx_ba_sessions; + + struct iwl_mld_rx_queues_sync rxq_sync; + + struct list_head txqs_to_add; + struct wiphy_work add_txqs_wk; + spinlock_t add_txqs_lock; + + u8 *error_recovery_buf; + struct iwl_mcast_filter_cmd *mcast_filter_cmd; + + u8 mgmt_tx_ant; + + struct iwl_mld_low_latency low_latency; + + bool ibss_manager; +#ifdef CONFIG_THERMAL + struct thermal_zone_device *tzone; + struct iwl_mld_cooling_device cooling_dev; +#endif + + struct ptp_data ptp_data; + + struct iwl_mld_time_sync_data __rcu *time_sync; + + struct ftm_initiator_data ftm_initiator; +}; + +/* memset the part of the struct that requires cleanup on restart */ +#define CLEANUP_STRUCT(_ptr) \ + memset((void *)&(_ptr)->zeroed_on_hw_restart, 0, \ + sizeof((_ptr)->zeroed_on_hw_restart)) + +/* Cleanup function for struct iwl_mld_vif, will be called in restart */ +static inline void +iwl_cleanup_mld(struct iwl_mld *mld) +{ + CLEANUP_STRUCT(mld); + CLEANUP_STRUCT(&mld->scan); + +#ifdef CONFIG_PM_SLEEP + mld->fw_status.in_d3 = false; +#endif + + iwl_mld_low_latency_restart_cleanup(mld); + + /* Empty the list of async notification handlers so we won't process + * notifications from the dead fw after the reconfig flow. + */ + iwl_mld_purge_async_handlers_list(mld); +} + +enum iwl_power_scheme { + IWL_POWER_SCHEME_CAM = 1, + IWL_POWER_SCHEME_BPS, +}; + +/** + * struct iwl_mld_mod_params - module parameters for iwlmld + * @power_scheme: one of enum iwl_power_scheme + */ +struct iwl_mld_mod_params { + int power_scheme; +}; + +extern struct iwl_mld_mod_params iwlmld_mod_params; + +/* Extract MLD priv from op_mode */ +#define IWL_OP_MODE_GET_MLD(_iwl_op_mode) \ + ((struct iwl_mld *)(_iwl_op_mode)->op_mode_specific) + +#define IWL_MAC80211_GET_MLD(_hw) \ + IWL_OP_MODE_GET_MLD((struct iwl_op_mode *)((_hw)->priv)) + +#ifdef CONFIG_IWLWIFI_DEBUGFS +void +iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir); +#else +static inline void +iwl_mld_add_debugfs_files(struct iwl_mld *mld, struct dentry *debugfs_dir) +{} +#endif + +int iwl_mld_load_fw(struct iwl_mld *mld); +void iwl_mld_stop_fw(struct iwl_mld *mld); +int iwl_mld_start_fw(struct iwl_mld *mld); +void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags); + +static inline void iwl_mld_set_ctkill(struct iwl_mld *mld, bool state) +{ + mld->radio_kill.ct = state; + + wiphy_rfkill_set_hw_state(mld->wiphy, + mld->radio_kill.hw || mld->radio_kill.ct); +} + +static inline void iwl_mld_set_hwkill(struct iwl_mld *mld, bool state) +{ + mld->radio_kill.hw = state; + + wiphy_rfkill_set_hw_state(mld->wiphy, + mld->radio_kill.hw || mld->radio_kill.ct); +} + +static inline u8 iwl_mld_get_valid_tx_ant(const struct iwl_mld *mld) +{ + u8 tx_ant = mld->fw->valid_tx_ant; + + if (mld->nvm_data && mld->nvm_data->valid_tx_ant) + tx_ant &= mld->nvm_data->valid_tx_ant; + + return tx_ant; +} + +static inline u8 iwl_mld_get_valid_rx_ant(const struct iwl_mld *mld) +{ + u8 rx_ant = mld->fw->valid_rx_ant; + + if (mld->nvm_data && mld->nvm_data->valid_rx_ant) + rx_ant &= mld->nvm_data->valid_rx_ant; + + return rx_ant; +} + +static inline u8 iwl_mld_nl80211_band_to_fw(enum nl80211_band band) +{ + switch (band) { + case NL80211_BAND_2GHZ: + return PHY_BAND_24; + case NL80211_BAND_5GHZ: + return PHY_BAND_5; + case NL80211_BAND_6GHZ: + return PHY_BAND_6; + default: + WARN_ONCE(1, "Unsupported band (%u)\n", band); + return PHY_BAND_5; + } +} + +static inline u8 iwl_mld_phy_band_to_nl80211(u8 phy_band) +{ + switch (phy_band) { + case PHY_BAND_24: + return NL80211_BAND_2GHZ; + case PHY_BAND_5: + return NL80211_BAND_5GHZ; + case PHY_BAND_6: + return NL80211_BAND_6GHZ; + default: + WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band); + return NL80211_BAND_5GHZ; + } +} + +static inline int +iwl_mld_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags, + enum nl80211_band band) +{ + int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + int rate = rate_n_flags & RATE_LEGACY_RATE_MSK; + bool is_lb = band == NL80211_BAND_2GHZ; + + if (format == RATE_MCS_LEGACY_OFDM_MSK) + return is_lb ? rate + IWL_FIRST_OFDM_RATE : rate; + + /* CCK is not allowed in 5 GHz */ + return is_lb ? rate : -1; +} + +extern const struct ieee80211_ops iwl_mld_hw_ops; + +/** + * enum iwl_rx_handler_context: context for Rx handler + * @RX_HANDLER_SYNC: this means that it will be called in the Rx path + * which can't acquire the wiphy->mutex. + * @RX_HANDLER_ASYNC: If the handler needs to hold wiphy->mutex + * (and only in this case!), it should be set as ASYNC. In that case, + * it will be called from a worker with wiphy->mutex held. + */ +enum iwl_rx_handler_context { + RX_HANDLER_SYNC, + RX_HANDLER_ASYNC, +}; + +/** + * struct iwl_rx_handler: handler for FW notification + * @val_fn: input validation function. + * @sizes: an array that mapps a version to the expected size. + * @fn: the function is called when notification is handled + * @cmd_id: command id + * @n_sizes: number of elements in &sizes. + * @context: see &iwl_rx_handler_context + * @obj_type: the type of the object that this handler is related to. + * See &iwl_mld_object_type. Use IWL_MLD_OBJECT_TYPE_NONE if not related. + * @cancel: function to cancel the notification. valid only if obj_type is not + * IWL_MLD_OBJECT_TYPE_NONE. + */ +struct iwl_rx_handler { + union { + bool (*val_fn)(struct iwl_mld *mld, struct iwl_rx_packet *pkt); + const struct iwl_notif_struct_size *sizes; + }; + void (*fn)(struct iwl_mld *mld, struct iwl_rx_packet *pkt); + u16 cmd_id; + u8 n_sizes; + u8 context; + enum iwl_mld_object_type obj_type; + bool (*cancel)(struct iwl_mld *mld, struct iwl_rx_packet *pkt, + u32 obj_id); +}; + +/** + * struct iwl_notif_struct_size: map a notif ver to the expected size + * + * @size: the size to expect + * @ver: the version of the notification + */ +struct iwl_notif_struct_size { + u32 size:24, ver:8; +}; + +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +extern const struct iwl_hcmd_arr iwl_mld_groups[]; +extern const unsigned int global_iwl_mld_goups_size; +extern const struct iwl_rx_handler iwl_mld_rx_handlers[]; +extern const unsigned int iwl_mld_rx_handlers_num; + +bool +iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta, + struct ieee80211_hdr *hdr, + const struct iwl_rx_mpdu_desc *mpdu_desc, + struct ieee80211_rx_status *rx_status, int queue); + +void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans, + const struct iwl_cfg *cfg, const struct iwl_fw *fw, + struct ieee80211_hw *hw, struct dentry *dbgfs_dir); +#endif + +#define IWL_MLD_INVALID_FW_ID 0xff + +#define IWL_MLD_ALLOC_FN(_type, _mac80211_type) \ +static int \ +iwl_mld_allocate_##_type##_fw_id(struct iwl_mld *mld, \ + u8 *fw_id, \ + struct ieee80211_##_mac80211_type *mac80211_ptr) \ +{ \ + u8 rand = IWL_MLD_DIS_RANDOM_FW_ID ? 0 : get_random_u8(); \ + u8 arr_sz = ARRAY_SIZE(mld->fw_id_to_##_mac80211_type); \ + if (__builtin_types_compatible_p(typeof(*mac80211_ptr), \ + struct ieee80211_link_sta)) \ + arr_sz = mld->fw->ucode_capa.num_stations; \ + if (__builtin_types_compatible_p(typeof(*mac80211_ptr), \ + struct ieee80211_bss_conf)) \ + arr_sz = mld->fw->ucode_capa.num_links; \ + for (int i = 0; i < arr_sz; i++) { \ + u8 idx = (i + rand) % arr_sz; \ + if (rcu_access_pointer(mld->fw_id_to_##_mac80211_type[idx])) \ + continue; \ + IWL_DEBUG_INFO(mld, "Allocated at index %d / %d\n", idx, arr_sz); \ + *fw_id = idx; \ + rcu_assign_pointer(mld->fw_id_to_##_mac80211_type[idx], mac80211_ptr); \ + return 0; \ + } \ + return -ENOSPC; \ +} + +static inline struct ieee80211_bss_conf * +iwl_mld_fw_id_to_link_conf(struct iwl_mld *mld, u8 fw_link_id) +{ + if (IWL_FW_CHECK(mld, fw_link_id >= mld->fw->ucode_capa.num_links, + "Invalid fw_link_id: %d\n", fw_link_id)) + return NULL; + + return wiphy_dereference(mld->wiphy, + mld->fw_id_to_bss_conf[fw_link_id]); +} + +#define MSEC_TO_TU(_msec) ((_msec) * 1000 / 1024) + +void iwl_mld_add_vif_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct dentry *dir); +void iwl_mld_add_link_sta_debugfs(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct dentry *dir); + +/* Utilities */ + +static inline u8 iwl_mld_mac80211_ac_to_fw_tx_fifo(enum ieee80211_ac_numbers ac) +{ + static const u8 mac80211_ac_to_fw_tx_fifo[] = { + IWL_BZ_EDCA_TX_FIFO_VO, + IWL_BZ_EDCA_TX_FIFO_VI, + IWL_BZ_EDCA_TX_FIFO_BE, + IWL_BZ_EDCA_TX_FIFO_BK, + IWL_BZ_TRIG_TX_FIFO_VO, + IWL_BZ_TRIG_TX_FIFO_VI, + IWL_BZ_TRIG_TX_FIFO_BE, + IWL_BZ_TRIG_TX_FIFO_BK, + }; + return mac80211_ac_to_fw_tx_fifo[ac]; +} + +static inline u32 +iwl_mld_get_lmac_id(struct iwl_mld *mld, enum nl80211_band band) +{ + if (!fw_has_capa(&mld->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CDB_SUPPORT) || + band == NL80211_BAND_2GHZ) + return IWL_LMAC_24G_INDEX; + return IWL_LMAC_5G_INDEX; +} + +/* Check if we had an error, but reconfig flow didn't start yet */ +static inline bool iwl_mld_error_before_recovery(struct iwl_mld *mld) +{ + return mld->fw_status.in_hw_restart && + !iwl_trans_fw_running(mld->trans); +} + +int iwl_mld_tdls_sta_count(struct iwl_mld *mld); + +#endif /* __iwl_mld_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.c b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c new file mode 100644 index 0000000000000..a870e169e2655 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.c @@ -0,0 +1,1076 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#include "mlo.h" +#include "phy.h" + +/* Block reasons helper */ +#define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \ + HOW(PREVENTION) \ + HOW(WOWLAN) \ + HOW(ROC) \ + HOW(NON_BSS) \ + HOW(TMP_NON_BSS) \ + HOW(TPT) + +static const char * +iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked) +{ + /* Using switch without "default" will warn about missing entries */ + switch (blocked) { +#define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x; + HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE) +#undef REASON_CASE + } + + return "ERROR"; +} + +static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask) +{ +#define NAME_FMT(x) "%s" +#define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "", + IWL_DEBUG_INFO(mld, + "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT) + " (0x%x)\n", + HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR) + mask); +#undef NAME_FMT +#undef NAME_PR +} + +/* Exit reasons helper */ +#define HANDLE_EMLSR_EXIT_REASONS(HOW) \ + HOW(BLOCK) \ + HOW(MISSED_BEACON) \ + HOW(FAIL_ENTRY) \ + HOW(CSA) \ + HOW(EQUAL_BAND) \ + HOW(LOW_RSSI) \ + HOW(LINK_USAGE) \ + HOW(BT_COEX) \ + HOW(CHAN_LOAD) \ + HOW(RFI) \ + HOW(FW_REQUEST) + +static const char * +iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit) +{ + /* Using switch without "default" will warn about missing entries */ + switch (exit) { +#define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x; + HANDLE_EMLSR_EXIT_REASONS(REASON_CASE) +#undef REASON_CASE + } + + return "ERROR"; +} + +static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask) +{ +#define NAME_FMT(x) "%s" +#define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "", + IWL_DEBUG_INFO(mld, + "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT) + " (0x%x)\n", + HANDLE_EMLSR_EXIT_REASONS(NAME_PR) + mask); +#undef NAME_FMT +#undef NAME_PR +} + +void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, + emlsr.prevent_done_wk.work); + struct ieee80211_vif *vif = + container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); + + if (WARN_ON(!(mld_vif->emlsr.blocked_reasons & + IWL_MLD_EMLSR_BLOCKED_PREVENTION))) + return; + + iwl_mld_unblock_emlsr(mld_vif->mld, vif, + IWL_MLD_EMLSR_BLOCKED_PREVENTION); +} + +void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy, + struct wiphy_work *wk) +{ + struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, + emlsr.tmp_non_bss_done_wk.work); + struct ieee80211_vif *vif = + container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); + + if (WARN_ON(!(mld_vif->emlsr.blocked_reasons & + IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS))) + return; + + iwl_mld_unblock_emlsr(mld_vif->mld, vif, + IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS); +} + +#define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC) +#define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC) + +/* Exit reasons that can cause longer EMLSR prevention */ +#define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \ + IWL_MLD_EMLSR_EXIT_LINK_USAGE | \ + IWL_MLD_EMLSR_EXIT_FW_REQUEST) +#define IWL_MLD_PREVENT_EMLSR_TIMEOUT (HZ * 400) + +#define IWL_MLD_EMLSR_PREVENT_SHORT (HZ * 300) +#define IWL_MLD_EMLSR_PREVENT_LONG (HZ * 600) + +static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld, + struct iwl_mld_vif *mld_vif, + enum iwl_mld_emlsr_exit reason) +{ + unsigned long delay; + + /* + * Reset the counter if more than 400 seconds have passed between one + * exit and the other, or if we exited due to a different reason. + * Will also reset the counter after the long prevention is done. + */ + if (time_after(jiffies, mld_vif->emlsr.last_exit_ts + + IWL_MLD_PREVENT_EMLSR_TIMEOUT) || + mld_vif->emlsr.last_exit_reason != reason) + mld_vif->emlsr.exit_repeat_count = 0; + + mld_vif->emlsr.last_exit_reason = reason; + mld_vif->emlsr.last_exit_ts = jiffies; + mld_vif->emlsr.exit_repeat_count++; + + /* + * Do not add a prevention when the reason was a block. For a block, + * EMLSR will be enabled again on unblock. + */ + if (reason == IWL_MLD_EMLSR_EXIT_BLOCK) + return; + + /* Set prevention for a minimum of 30 seconds */ + mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION; + delay = IWL_MLD_TRIGGER_LINK_SEL_TIME; + + /* Handle repeats for reasons that can cause long prevention */ + if (mld_vif->emlsr.exit_repeat_count > 1 && + reason & IWL_MLD_PREVENT_EMLSR_REASONS) { + if (mld_vif->emlsr.exit_repeat_count == 2) + delay = IWL_MLD_EMLSR_PREVENT_SHORT; + else + delay = IWL_MLD_EMLSR_PREVENT_LONG; + + /* + * The timeouts are chosen so that this will not happen, i.e. + * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT + */ + WARN_ON(mld_vif->emlsr.exit_repeat_count > 3); + } + + IWL_DEBUG_INFO(mld, + "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n", + delay / HZ, mld_vif->emlsr.exit_repeat_count, + iwl_mld_get_emlsr_exit_string(reason), reason); + + wiphy_delayed_work_queue(mld->wiphy, + &mld_vif->emlsr.prevent_done_wk, delay); +} + +static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + void *dat) +{ + struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx); + + /* It is ok to do it for all chanctx (and not only for the ones that + * belong to the EMLSR vif) since EMLSR is not allowed if there is + * another vif. + */ + phy->avg_channel_load_not_by_us = 0; +} + +static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, + enum iwl_mld_emlsr_exit exit, u8 link_to_keep, + bool sync) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + u16 new_active_links; + int ret = 0; + + lockdep_assert_wiphy(mld->wiphy); + + /* On entry failure need to exit anyway, even if entered from debugfs */ + if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE) + return 0; + + /* Ignore exit request if EMLSR is not active */ + if (!iwl_mld_emlsr_active(vif)) + return 0; + + if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized)) + return 0; + + if (WARN_ON(!(vif->active_links & BIT(link_to_keep)))) + link_to_keep = __ffs(vif->active_links); + + new_active_links = BIT(link_to_keep); + IWL_DEBUG_INFO(mld, + "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n", + iwl_mld_get_emlsr_exit_string(exit), exit, + vif->active_links, new_active_links); + + if (sync) + ret = ieee80211_set_active_links(vif, new_active_links); + else + ieee80211_set_active_links_async(vif, new_active_links); + + /* Update latest exit reason and check EMLSR prevention */ + iwl_mld_check_emlsr_prevention(mld, mld_vif, exit); + + /* channel_load_not_by_us is invalid when in EMLSR. + * Clear it so wrong values won't be used. + */ + ieee80211_iter_chan_contexts_atomic(mld->hw, + iwl_mld_clear_avg_chan_load_iter, + NULL); + + return ret; +} + +void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, + enum iwl_mld_emlsr_exit exit, u8 link_to_keep) +{ + _iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false); +} + +static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif, + enum iwl_mld_emlsr_blocked reason, + u8 link_to_keep, bool sync) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + lockdep_assert_wiphy(mld->wiphy); + + if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif)) + return 0; + + if (mld_vif->emlsr.blocked_reasons & reason) + return 0; + + mld_vif->emlsr.blocked_reasons |= reason; + + IWL_DEBUG_INFO(mld, + "Blocking EMLSR mode. reason = %s (0x%x)\n", + iwl_mld_get_emlsr_blocked_string(reason), reason); + iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons); + + if (reason == IWL_MLD_EMLSR_BLOCKED_TPT) + wiphy_delayed_work_cancel(mld_vif->mld->wiphy, + &mld_vif->emlsr.check_tpt_wk); + + return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK, + link_to_keep, sync); +} + +void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, + enum iwl_mld_emlsr_blocked reason, u8 link_to_keep) +{ + _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false); +} + +int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif, + enum iwl_mld_emlsr_blocked reason, u8 link_to_keep) +{ + return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true); +} + +static void _iwl_mld_select_links(struct iwl_mld *mld, + struct ieee80211_vif *vif); + +void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, + enum iwl_mld_emlsr_blocked reason) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + lockdep_assert_wiphy(mld->wiphy); + + if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif)) + return; + + if (!(mld_vif->emlsr.blocked_reasons & reason)) + return; + + mld_vif->emlsr.blocked_reasons &= ~reason; + + IWL_DEBUG_INFO(mld, + "Unblocking EMLSR mode. reason = %s (0x%x)\n", + iwl_mld_get_emlsr_blocked_string(reason), reason); + iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons); + + if (reason == IWL_MLD_EMLSR_BLOCKED_TPT) + wiphy_delayed_work_queue(mld_vif->mld->wiphy, + &mld_vif->emlsr.check_tpt_wk, + round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW)); + + if (mld_vif->emlsr.blocked_reasons) + return; + + IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n"); + iwl_mld_int_mlo_scan(mld, vif); +} + +static void +iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_esr_mode_notif *notif = (void *)data; + + if (!iwl_mld_vif_has_emlsr_cap(vif)) + return; + + switch (le32_to_cpu(notif->action)) { + case ESR_RECOMMEND_LEAVE: + iwl_mld_exit_emlsr(mld_vif->mld, vif, + IWL_MLD_EMLSR_EXIT_FW_REQUEST, + iwl_mld_get_primary_link(vif)); + break; + case ESR_RECOMMEND_ENTER: + case ESR_FORCE_LEAVE: + default: + IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n", + le32_to_cpu(notif->action)); + } +} + +void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_vif_iter_emlsr_mode_notif, + pkt->data); +} + +static void +iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + if (!iwl_mld_vif_has_emlsr_cap(vif)) + return; + + ieee80211_connection_loss(vif); +} + +void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data; + u32 fw_link_id = le32_to_cpu(notif->link_id); + struct ieee80211_bss_conf *bss_conf = + iwl_mld_fw_id_to_link_conf(mld, fw_link_id); + + IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n", + le32_to_cpu(notif->activation) ? "enter" : "exit", + bss_conf ? bss_conf->link_id : -1, + le32_to_cpu(notif->link_id), + le32_to_cpu(notif->err_code)); + + if (IWL_FW_CHECK(mld, !bss_conf, + "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n", + le32_to_cpu(notif->activation) ? "" : "de", + fw_link_id)) { + ieee80211_iterate_active_interfaces_mtx( + mld->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_vif_iter_disconnect_emlsr, NULL); + return; + } + + /* Disconnect if we failed to deactivate a link */ + if (!le32_to_cpu(notif->activation)) { + ieee80211_connection_loss(bss_conf->vif); + return; + } + + /* + * We failed to activate the second link, go back to the link specified + * by the firmware as that is the one that is still valid now. + */ + iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY, + bss_conf->link_id); +} + +/* Active non-station link tracking */ +static void iwl_mld_count_non_bss_links(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int *count = _data; + + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) + return; + + *count += iwl_mld_count_active_links(mld_vif->mld, vif); +} + +struct iwl_mld_update_emlsr_block_data { + bool block; + int result; +}; + +static void +iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_update_emlsr_block_data *data = _data; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int ret; + + if (data->block) { + ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif, + IWL_MLD_EMLSR_BLOCKED_NON_BSS, + iwl_mld_get_primary_link(vif)); + if (ret) + data->result = ret; + } else { + iwl_mld_unblock_emlsr(mld_vif->mld, vif, + IWL_MLD_EMLSR_BLOCKED_NON_BSS); + } +} + +int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld, + int pending_link_changes) +{ + /* An active link of a non-station vif blocks EMLSR. Upon activation + * block EMLSR on the bss vif. Upon deactivation, check if this link + * was the last non-station link active, and if so unblock the bss vif + */ + struct iwl_mld_update_emlsr_block_data block_data = {}; + int count = pending_link_changes; + + /* No need to count if we are activating a non-BSS link */ + if (count <= 0) + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_count_non_bss_links, + &count); + + /* + * We could skip updating it if the block change did not change (and + * pending_link_changes is non-zero). + */ + block_data.block = !!count; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_vif_iter_update_emlsr_non_bss_block, + &block_data); + + return block_data.result; +} + +#define EMLSR_SEC_LINK_MIN_PERC 10 +#define EMLSR_MIN_TX 3000 +#define EMLSR_MIN_RX 400 + +void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, + emlsr.check_tpt_wk.work); + struct ieee80211_vif *vif = + container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); + struct iwl_mld *mld = mld_vif->mld; + struct iwl_mld_sta *mld_sta; + struct iwl_mld_link *sec_link; + unsigned long total_tx = 0, total_rx = 0; + unsigned long sec_link_tx = 0, sec_link_rx = 0; + u8 sec_link_tx_perc, sec_link_rx_perc; + s8 sec_link_id; + + if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta) + return; + + mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta); + + /* We only count for the AP sta in a MLO connection */ + if (!mld_sta->mpdu_counters) + return; + + /* This wk should only run when the TPT blocker isn't set. + * When the blocker is set, the decision to remove it, as well as + * clearing the counters is done in DP (to avoid having a wk every + * 5 seconds when idle. When the blocker is unset, we are not idle anyway) + */ + if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT)) + return; + /* + * TPT is unblocked, need to check if the TPT criteria is still met. + * + * If EMLSR is active, then we also need to check the secondar link + * requirements. + */ + if (iwl_mld_emlsr_active(vif)) { + sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif)); + sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id); + if (WARN_ON_ONCE(!sec_link)) + return; + /* We need the FW ID here */ + sec_link_id = sec_link->fw_id; + } else { + sec_link_id = -1; + } + + /* Sum up RX and TX MPDUs from the different queues/links */ + for (int q = 0; q < mld->trans->num_rx_queues; q++) { + struct iwl_mld_per_q_mpdu_counter *queue_counter = + &mld_sta->mpdu_counters[q]; + + spin_lock_bh(&queue_counter->lock); + + /* The link IDs that doesn't exist will contain 0 */ + for (int link = 0; + link < ARRAY_SIZE(queue_counter->per_link); + link++) { + total_tx += queue_counter->per_link[link].tx; + total_rx += queue_counter->per_link[link].rx; + } + + if (sec_link_id != -1) { + sec_link_tx += queue_counter->per_link[sec_link_id].tx; + sec_link_rx += queue_counter->per_link[sec_link_id].rx; + } + + memset(queue_counter->per_link, 0, + sizeof(queue_counter->per_link)); + + spin_unlock_bh(&queue_counter->lock); + } + + IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n", + total_tx, total_rx); + + /* If we don't have enough MPDUs - exit EMLSR */ + if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH && + total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) { + iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT, + iwl_mld_get_primary_link(vif)); + return; + } + + /* EMLSR is not active */ + if (sec_link_id == -1) + return; + + IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n", + sec_link_id, sec_link_tx, sec_link_rx); + + /* Calculate the percentage of the secondary link TX/RX */ + sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0; + sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0; + + /* + * The TX/RX percentage is checked only if it exceeds the required + * minimum. In addition, RX is checked only if the TX check failed. + */ + if ((total_tx > EMLSR_MIN_TX && + sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) || + (total_rx > EMLSR_MIN_RX && + sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) { + iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE, + iwl_mld_get_primary_link(vif)); + return; + } + + /* Check again when the next window ends */ + wiphy_delayed_work_queue(mld_vif->mld->wiphy, + &mld_vif->emlsr.check_tpt_wk, + round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW)); +} + +void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, + emlsr.unblock_tpt_wk); + struct ieee80211_vif *vif = + container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); + + iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT); +} + +/* + * Link selection + */ + +s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld, + const struct cfg80211_chan_def *chandef, + bool low) +{ + if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ && + chandef->chan->band != NL80211_BAND_5GHZ && + chandef->chan->band != NL80211_BAND_6GHZ)) + return S8_MAX; + +#define RSSI_THRESHOLD(_low, _bw) \ + (_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ \ + : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ + + switch (chandef->width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + /* 320 MHz has the same thresholds as 20 MHz */ + case NL80211_CHAN_WIDTH_320: + return RSSI_THRESHOLD(low, 20); + case NL80211_CHAN_WIDTH_40: + return RSSI_THRESHOLD(low, 40); + case NL80211_CHAN_WIDTH_80: + return RSSI_THRESHOLD(low, 80); + case NL80211_CHAN_WIDTH_160: + return RSSI_THRESHOLD(low, 160); + default: + WARN_ON(1); + return S8_MAX; + } +#undef RSSI_THRESHOLD +} + +static u32 +iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_link_sel_data *link, + bool primary) +{ + struct wiphy *wiphy = mld->wiphy; + struct ieee80211_bss_conf *conf; + enum iwl_mld_emlsr_exit ret = 0; + + conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]); + if (WARN_ON_ONCE(!conf)) + return false; + + if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active) + ret |= IWL_MLD_EMLSR_EXIT_BT_COEX; + + if (link->signal < + iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false)) + ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI; + + if (conf->csa_active) + ret |= IWL_MLD_EMLSR_EXIT_CSA; + + if (ret) { + IWL_DEBUG_INFO(mld, + "Link %d is not allowed for EMLSR as %s\n", + link->link_id, + primary ? "primary" : "secondary"); + iwl_mld_print_emlsr_exit(mld, ret); + } + + return ret; +} + +static u8 +iwl_mld_set_link_sel_data(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_link_sel_data *data, + unsigned long usable_links, + u8 *best_link_idx) +{ + u8 n_data = 0; + u16 max_grade = 0; + unsigned long link_id; + + /* + * TODO: don't select links that weren't discovered in the last scan + * This requires mac80211 (or cfg80211) changes to forward/track when + * a BSS was last updated. cfg80211 already tracks this information but + * it is not exposed within the kernel. + */ + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_protected(vif, link_id); + + if (WARN_ON_ONCE(!link_conf)) + continue; + + /* Ignore any BSS that was not seen in the last MLO scan */ + if (ktime_before(link_conf->bss->ts_boottime, + mld->scan.last_mlo_scan_time)) + continue; + + data[n_data].link_id = link_id; + data[n_data].chandef = &link_conf->chanreq.oper; + data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal); + data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf); + + if (n_data == 0 || data[n_data].grade > max_grade) { + max_grade = data[n_data].grade; + *best_link_idx = n_data; + } + n_data++; + } + + return n_data; +} + +static u32 +iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx) +{ + const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx); + + switch (phy->chandef.width) { + case NL80211_CHAN_WIDTH_320: + case NL80211_CHAN_WIDTH_160: + return 5; + case NL80211_CHAN_WIDTH_80: + return 7; + default: + break; + } + return 10; +} + +VISIBLE_IF_IWLWIFI_KUNIT bool +iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld, + struct ieee80211_vif *vif, + const struct iwl_mld_link_sel_data *a, + const struct iwl_mld_link_sel_data *b) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *link_a = + iwl_mld_link_dereference_check(mld_vif, a->link_id); + struct ieee80211_chanctx_conf *chanctx_a = NULL; + u32 bw_a, bw_b, ratio; + u32 primary_load_perc; + + if (!link_a || !link_a->active) { + IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n"); + return false; + } + + chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx); + + if (WARN_ON(!chanctx_a)) + return false; + + primary_load_perc = + iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us; + + IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc); + + if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) { + IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n"); + return false; + } + + if (iwl_mld_vif_low_latency(mld_vif)) { + IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n"); + return true; + } + + if (a->chandef->width <= b->chandef->width) + return true; + + bw_a = nl80211_chan_width_to_mhz(a->chandef->width); + bw_b = nl80211_chan_width_to_mhz(b->chandef->width); + ratio = bw_a / bw_b; + + switch (ratio) { + case 2: + return primary_load_perc > 25; + case 4: + return primary_load_perc > 40; + case 8: + case 16: + return primary_load_perc > 50; + } + + return false; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_channel_load_allows_emlsr); + +static bool +iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif, + struct iwl_mld_link_sel_data *a, + struct iwl_mld_link_sel_data *b) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld *mld = mld_vif->mld; + u32 reason_mask = 0; + + /* Per-link considerations */ + if (iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true) || + iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false)) + return false; + + if (a->chandef->chan->band == b->chandef->chan->band) + reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND; + if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b)) + reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD; + + if (reason_mask) { + IWL_DEBUG_INFO(mld, + "Links %d and %d are not a valid pair for EMLSR\n", + a->link_id, b->link_id); + IWL_DEBUG_INFO(mld, + "Links bandwidth are: %d and %d\n", + nl80211_chan_width_to_mhz(a->chandef->width), + nl80211_chan_width_to_mhz(b->chandef->width)); + iwl_mld_print_emlsr_exit(mld, reason_mask); + return false; + } + + return true; +} + +/* Calculation is done with fixed-point with a scaling factor of 1/256 */ +#define SCALE_FACTOR 256 + +/* + * Returns the combined grade of two given links. + * Returns 0 if EMLSR is not allowed with these 2 links. + */ +static +unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_link_sel_data *a, + struct iwl_mld_link_sel_data *b, + u8 *primary_id) +{ + struct ieee80211_bss_conf *primary_conf; + struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy; + unsigned int primary_load; + + lockdep_assert_wiphy(wiphy); + + /* a is always primary, b is always secondary */ + if (b->grade > a->grade) + swap(a, b); + + *primary_id = a->link_id; + + if (!iwl_mld_valid_emlsr_pair(vif, a, b)) + return 0; + + primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]); + + if (WARN_ON_ONCE(!primary_conf)) + return 0; + + primary_load = iwl_mld_get_chan_load(mld, primary_conf); + + /* The more the primary link is loaded, the more worthwhile EMLSR becomes */ + return a->grade + ((b->grade * primary_load) / SCALE_FACTOR); +} + +static void _iwl_mld_select_links(struct iwl_mld *mld, + struct ieee80211_vif *vif) +{ + struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; + struct iwl_mld_link_sel_data *best_link; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int max_active_links = iwl_mld_max_active_links(mld, vif); + u16 new_active, usable_links = ieee80211_vif_usable_links(vif); + u8 best_idx, new_primary, n_data; + u16 max_grade; + + lockdep_assert_wiphy(mld->wiphy); + + if (!mld_vif->authorized || hweight16(usable_links) <= 1) + return; + + if (WARN(ktime_before(mld->scan.last_mlo_scan_time, + ktime_sub_ns(ktime_get_boottime_ns(), + 5ULL * NSEC_PER_SEC)), + "Last MLO scan was too long ago, can't select links\n")) + return; + + /* The logic below is simple and not suited for more than 2 links */ + WARN_ON_ONCE(max_active_links > 2); + + n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links, + &best_idx); + + if (WARN(!n_data, "Couldn't find a valid grade for any link!\n")) + return; + + /* Default to selecting the single best link */ + best_link = &data[best_idx]; + new_primary = best_link->link_id; + new_active = BIT(best_link->link_id); + max_grade = best_link->grade; + + /* If EMLSR is not possible, activate the best link */ + if (max_active_links == 1 || n_data == 1 || + !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE || + mld_vif->emlsr.blocked_reasons) + goto set_active; + + /* Try to find the best link combination */ + for (u8 a = 0; a < n_data; a++) { + for (u8 b = a + 1; b < n_data; b++) { + u8 best_in_pair; + u16 emlsr_grade = + iwl_mld_get_emlsr_grade(mld, vif, + &data[a], &data[b], + &best_in_pair); + + /* + * Prefer (new) EMLSR combination to prefer EMLSR over + * a single link. + */ + if (emlsr_grade < max_grade) + continue; + + max_grade = emlsr_grade; + new_primary = best_in_pair; + new_active = BIT(data[a].link_id) | + BIT(data[b].link_id); + } + } + +set_active: + IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n", + new_active, new_primary); + + mld_vif->emlsr.selected_primary = new_primary; + mld_vif->emlsr.selected_links = new_active; + + ieee80211_set_active_links_async(vif, new_active); +} + +static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld *mld = mld_vif->mld; + + _iwl_mld_select_links(mld, vif); +} + +void iwl_mld_select_links(struct iwl_mld *mld) +{ + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_vif_iter_select_links, + NULL); +} + +static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld *mld = mld_vif->mld; + struct ieee80211_bss_conf *link; + unsigned int link_id; + + if (!mld->bt_is_active) { + iwl_mld_retry_emlsr(mld, vif); + return; + } + + /* BT is turned ON but we are not in EMLSR, nothing to do */ + if (!iwl_mld_emlsr_active(vif)) + return; + + /* In EMLSR and BT is turned ON */ + + for_each_vif_active_link(vif, link, link_id) { + if (WARN_ON(!link->chanreq.oper.chan)) + continue; + + if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) { + iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX, + iwl_mld_get_primary_link(vif)); + return; + } + } +} + +void iwl_mld_emlsr_check_bt(struct iwl_mld *mld) +{ + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_emlsr_check_bt_iter, + NULL); +} + +struct iwl_mld_chan_load_data { + struct iwl_mld_phy *phy; + u32 prev_chan_load_not_by_us; +}; + +static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_chan_load_data *data = _data; + const struct iwl_mld_phy *phy = data->phy; + struct ieee80211_chanctx_conf *chanctx = + container_of((const void *)phy, struct ieee80211_chanctx_conf, + drv_priv); + struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld; + struct ieee80211_bss_conf *prim_link; + unsigned int prim_link_id; + + prim_link_id = iwl_mld_get_primary_link(vif); + prim_link = link_conf_dereference_protected(vif, prim_link_id); + + if (WARN_ON(!prim_link)) + return; + + if (chanctx != rcu_access_pointer(prim_link->chanctx_conf)) + return; + + if (iwl_mld_emlsr_active(vif)) { + int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link, + true); + + if (chan_load < 0) + return; + + /* chan_load is in range [0,255] */ + if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD)) + iwl_mld_exit_emlsr(mld, vif, + IWL_MLD_EMLSR_EXIT_CHAN_LOAD, + prim_link_id); + } else { + u32 old_chan_load = data->prev_chan_load_not_by_us; + u32 new_chan_load = phy->avg_channel_load_not_by_us; + u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx); + +#define THRESHOLD_CROSSED(threshold) \ + (old_chan_load <= (threshold) && new_chan_load > (threshold)) + + if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) || + THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50)) + iwl_mld_retry_emlsr(mld, vif); +#undef THRESHOLD_CROSSED + } +} + +void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw, + struct iwl_mld_phy *phy, + u32 prev_chan_load_not_by_us) +{ + struct iwl_mld_chan_load_data data = { + .phy = phy, + .prev_chan_load_not_by_us = prev_chan_load_not_by_us, + }; + + ieee80211_iterate_active_interfaces_mtx(hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_chan_load_update_iter, + &data); +} + +void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + if (!iwl_mld_vif_has_emlsr_cap(vif) || iwl_mld_emlsr_active(vif) || + mld_vif->emlsr.blocked_reasons) + return; + + iwl_mld_int_mlo_scan(mld, vif); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/mlo.h b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h new file mode 100644 index 0000000000000..4fb1fdbe3df9c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/mlo.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_mlo_h__ +#define __iwl_mld_mlo_h__ + +#include +#include +#include +#include "iwl-config.h" +#include "iwl-trans.h" +#include "iface.h" +#include "phy.h" + +struct iwl_mld; + +void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk); +void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy, + struct wiphy_work *wk); + +static inline bool iwl_mld_emlsr_active(struct ieee80211_vif *vif) +{ + /* Set on phy context activation, so should be a good proxy */ + return !!(vif->driver_flags & IEEE80211_VIF_EML_ACTIVE); +} + +static inline bool iwl_mld_vif_has_emlsr_cap(struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + /* We only track/permit EMLSR state once authorized */ + if (!mld_vif->authorized) + return false; + + /* No EMLSR on dual radio devices */ + return ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION && + ieee80211_vif_is_mld(vif) && + vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP && + !CSR_HW_RFID_IS_CDB(mld_vif->mld->trans->hw_rf_id); +} + +static inline int +iwl_mld_max_active_links(struct iwl_mld *mld, struct ieee80211_vif *vif) +{ + if (vif->type == NL80211_IFTYPE_AP) + return mld->fw->ucode_capa.num_beacons; + + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) + return IWL_FW_MAX_ACTIVE_LINKS_NUM; + + /* For now, do not accept more links on other interface types */ + return 1; +} + +static inline int +iwl_mld_count_active_links(struct iwl_mld *mld, struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *mld_link; + int n_active = 0; + + for_each_mld_vif_valid_link(mld_vif, mld_link) { + if (rcu_access_pointer(mld_link->chan_ctx)) + n_active++; + } + + return n_active; +} + +static inline u8 iwl_mld_get_primary_link(struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + lockdep_assert_wiphy(mld_vif->mld->wiphy); + + if (!ieee80211_vif_is_mld(vif) || WARN_ON(!vif->active_links)) + return 0; + + /* In AP mode, there is no primary link */ + if (vif->type == NL80211_IFTYPE_AP) + return __ffs(vif->active_links); + + if (iwl_mld_emlsr_active(vif) && + !WARN_ON(!(BIT(mld_vif->emlsr.primary) & vif->active_links))) + return mld_vif->emlsr.primary; + + return __ffs(vif->active_links); +} + +/* + * For non-MLO/single link, this will return the deflink/single active link, + * respectively + */ +static inline u8 iwl_mld_get_other_link(struct ieee80211_vif *vif, u8 link_id) +{ + switch (hweight16(vif->active_links)) { + case 0: + return 0; + default: + WARN_ON(1); + fallthrough; + case 1: + return __ffs(vif->active_links); + case 2: + return __ffs(vif->active_links & ~BIT(link_id)); + } +} + +s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld, + const struct cfg80211_chan_def *chandef, + bool low); + +/* EMLSR block/unblock and exit */ +void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, + enum iwl_mld_emlsr_blocked reason, u8 link_to_keep); +int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif, + enum iwl_mld_emlsr_blocked reason, u8 link_to_keep); +void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, + enum iwl_mld_emlsr_blocked reason); +void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, + enum iwl_mld_emlsr_exit exit, u8 link_to_keep); + +int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld, + int pending_link_changes); + +void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); +void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk); +void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk); + +void iwl_mld_select_links(struct iwl_mld *mld); + +void iwl_mld_emlsr_check_bt(struct iwl_mld *mld); + +void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw, + struct iwl_mld_phy *phy, + u32 prev_chan_load_not_by_us); + +/** + * iwl_mld_retry_emlsr - Retry entering EMLSR + * @mld: MLD context + * @vif: VIF to retry EMLSR on + * + * Retry entering EMLSR on the given VIF. + * Use this if one of the parameters that can prevent EMLSR has changed. + */ +void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif); + +struct iwl_mld_link_sel_data { + u8 link_id; + const struct cfg80211_chan_def *chandef; + s32 signal; + u16 grade; +}; + +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +bool iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld, + struct ieee80211_vif *vif, + const struct iwl_mld_link_sel_data *a, + const struct iwl_mld_link_sel_data *b); +#endif + +#endif /* __iwl_mld_mlo_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.c b/drivers/net/wireless/intel/iwlwifi/mld/notif.c new file mode 100644 index 0000000000000..fc18cba8aaa8d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.c @@ -0,0 +1,759 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include "mld.h" +#include "notif.h" +#include "scan.h" +#include "iface.h" +#include "mlo.h" +#include "iwl-trans.h" +#include "fw/file.h" +#include "fw/dbg.h" +#include "fw/api/cmdhdr.h" +#include "fw/api/mac-cfg.h" +#include "session-protect.h" +#include "fw/api/time-event.h" +#include "fw/api/tx.h" +#include "fw/api/rs.h" +#include "fw/api/offload.h" +#include "fw/api/stats.h" +#include "fw/api/rfi.h" +#include "fw/api/coex.h" + +#include "mcc.h" +#include "link.h" +#include "tx.h" +#include "rx.h" +#include "tlc.h" +#include "agg.h" +#include "mac80211.h" +#include "thermal.h" +#include "roc.h" +#include "stats.h" +#include "coex.h" +#include "time_sync.h" +#include "ftm-initiator.h" + +/* Please use this in an increasing order of the versions */ +#define CMD_VER_ENTRY(_ver, _struct) \ + { .size = sizeof(struct _struct), .ver = _ver }, +#define CMD_VERSIONS(name, ...) \ + static const struct iwl_notif_struct_size \ + iwl_notif_struct_sizes_##name[] = { __VA_ARGS__ }; + +#define RX_HANDLER_NO_OBJECT(_grp, _cmd, _name, _context) \ + {.cmd_id = WIDE_ID(_grp, _cmd), \ + .context = _context, \ + .fn = iwl_mld_handle_##_name, \ + .sizes = iwl_notif_struct_sizes_##_name, \ + .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name), \ + }, + +/* Use this for Rx handlers that do not need notification validation */ +#define RX_HANDLER_NO_VAL(_grp, _cmd, _name, _context) \ + {.cmd_id = WIDE_ID(_grp, _cmd), \ + .context = _context, \ + .fn = iwl_mld_handle_##_name, \ + }, + +#define RX_HANDLER_VAL_FN(_grp, _cmd, _name, _context) \ + { .cmd_id = WIDE_ID(_grp, _cmd), \ + .context = _context, \ + .fn = iwl_mld_handle_##_name, \ + .val_fn = iwl_mld_validate_##_name, \ + }, + +#define DEFINE_SIMPLE_CANCELLATION(name, notif_struct, id_member) \ +static bool iwl_mld_cancel_##name##_notif(struct iwl_mld *mld, \ + struct iwl_rx_packet *pkt, \ + u32 obj_id) \ +{ \ + const struct notif_struct *notif = (const void *)pkt->data; \ + \ + return obj_id == _Generic((notif)->id_member, \ + __le32: le32_to_cpu((notif)->id_member), \ + __le16: le16_to_cpu((notif)->id_member), \ + u8: (notif)->id_member); \ +} + +static bool iwl_mld_always_cancel(struct iwl_mld *mld, + struct iwl_rx_packet *pkt, + u32 obj_id) +{ + return true; +} + +/* Currently only defined for the RX_HANDLER_SIZES options. Use this for + * notifications that belong to a specific object, and that should be + * canceled when the object is removed + */ +#define RX_HANDLER_OF_OBJ(_grp, _cmd, _name, _obj_type) \ + {.cmd_id = WIDE_ID(_grp, _cmd), \ + /* Only async handlers can be canceled */ \ + .context = RX_HANDLER_ASYNC, \ + .fn = iwl_mld_handle_##_name, \ + .sizes = iwl_notif_struct_sizes_##_name, \ + .n_sizes = ARRAY_SIZE(iwl_notif_struct_sizes_##_name), \ + .obj_type = IWL_MLD_OBJECT_TYPE_##_obj_type, \ + .cancel = iwl_mld_cancel_##_name, \ + }, + +#define RX_HANDLER_OF_LINK(_grp, _cmd, _name) \ + RX_HANDLER_OF_OBJ(_grp, _cmd, _name, LINK) \ + +#define RX_HANDLER_OF_VIF(_grp, _cmd, _name) \ + RX_HANDLER_OF_OBJ(_grp, _cmd, _name, VIF) \ + +#define RX_HANDLER_OF_STA(_grp, _cmd, _name) \ + RX_HANDLER_OF_OBJ(_grp, _cmd, _name, STA) \ + +#define RX_HANDLER_OF_ROC(_grp, _cmd, _name) \ + RX_HANDLER_OF_OBJ(_grp, _cmd, _name, ROC) + +#define RX_HANDLER_OF_SCAN(_grp, _cmd, _name) \ + RX_HANDLER_OF_OBJ(_grp, _cmd, _name, SCAN) + +#define RX_HANDLER_OF_FTM_REQ(_grp, _cmd, _name) \ + RX_HANDLER_OF_OBJ(_grp, _cmd, _name, FTM_REQ) + +static void iwl_mld_handle_mfuart_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; + + IWL_DEBUG_INFO(mld, + "MFUART: installed ver: 0x%08x, external ver: 0x%08x\n", + le32_to_cpu(mfuart_notif->installed_ver), + le32_to_cpu(mfuart_notif->external_ver)); + IWL_DEBUG_INFO(mld, + "MFUART: status: 0x%08x, duration: 0x%08x image size: 0x%08x\n", + le32_to_cpu(mfuart_notif->status), + le32_to_cpu(mfuart_notif->duration), + le32_to_cpu(mfuart_notif->image_size)); +} + +static void iwl_mld_mu_mimo_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + unsigned int link_id = 0; + + if (WARN(hweight16(vif->active_links) > 1, + "no support for this notif while in EMLSR 0x%x\n", + vif->active_links)) + return; + + if (ieee80211_vif_is_mld(vif)) { + link_id = __ffs(vif->active_links); + bss_conf = link_conf_dereference_check(vif, link_id); + } + + if (!WARN_ON(!bss_conf) && bss_conf->mu_mimo_owner) { + const struct iwl_mu_group_mgmt_notif *notif = _data; + + BUILD_BUG_ON(sizeof(notif->membership_status) != + WLAN_MEMBERSHIP_LEN); + BUILD_BUG_ON(sizeof(notif->user_position) != + WLAN_USER_POSITION_LEN); + + /* MU-MIMO Group Id action frame is little endian. We treat + * the data received from firmware as if it came from the + * action frame, so no conversion is needed. + */ + ieee80211_update_mu_groups(vif, link_id, + (u8 *)¬if->membership_status, + (u8 *)¬if->user_position); + } +} + +/* This handler is called in SYNC mode because it needs to be serialized with + * Rx as specified in ieee80211_update_mu_groups()'s documentation. + */ +static void iwl_mld_handle_mu_mimo_grp_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; + + ieee80211_iterate_active_interfaces_atomic(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_mu_mimo_iface_iterator, + notif); +} + +static void +iwl_mld_handle_stored_beacon_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); + struct iwl_stored_beacon_notif *sb = (void *)pkt->data; + struct ieee80211_rx_status rx_status = {}; + struct sk_buff *skb; + u32 size = le32_to_cpu(sb->common.byte_count); + + if (size == 0) + return; + + if (pkt_len < struct_size(sb, data, size)) + return; + + skb = alloc_skb(size, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mld, "alloc_skb failed\n"); + return; + } + + /* update rx_status according to the notification's metadata */ + rx_status.mactime = le64_to_cpu(sb->common.tsf); + /* TSF as indicated by the firmware is at INA time */ + rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; + rx_status.device_timestamp = le32_to_cpu(sb->common.system_time); + rx_status.band = + iwl_mld_phy_band_to_nl80211(le16_to_cpu(sb->common.band)); + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(sb->common.channel), + rx_status.band); + + /* copy the data */ + skb_put_data(skb, sb->data, size); + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + + /* pass it as regular rx to mac80211 */ + ieee80211_rx_napi(mld->hw, NULL, skb, NULL); +} + +static void +iwl_mld_handle_channel_switch_start_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_channel_switch_start_notif *notif = (void *)pkt->data; + u32 link_id = le32_to_cpu(notif->link_id); + struct ieee80211_bss_conf *link_conf = + iwl_mld_fw_id_to_link_conf(mld, link_id); + struct ieee80211_vif *vif; + + if (WARN_ON(!link_conf)) + return; + + vif = link_conf->vif; + + IWL_DEBUG_INFO(mld, + "CSA Start Notification with vif type: %d, link_id: %d\n", + vif->type, + link_conf->link_id); + + switch (vif->type) { + case NL80211_IFTYPE_AP: + /* We don't support canceling a CSA as it was advertised + * by the AP itself + */ + if (!link_conf->csa_active) + return; + + ieee80211_csa_finish(vif, link_conf->link_id); + break; + case NL80211_IFTYPE_STATION: + if (!link_conf->csa_active) { + /* Either unexpected cs notif or mac80211 chose to + * ignore, for example in channel switch to same channel + */ + struct iwl_cancel_channel_switch_cmd cmd = { + .id = cpu_to_le32(link_id), + }; + + if (iwl_mld_send_cmd_pdu(mld, + WIDE_ID(MAC_CONF_GROUP, + CANCEL_CHANNEL_SWITCH_CMD), + &cmd)) + IWL_ERR(mld, + "Failed to cancel the channel switch\n"); + return; + } + + ieee80211_chswitch_done(vif, true, link_conf->link_id); + break; + + default: + WARN(1, "CSA on invalid vif type: %d", vif->type); + } +} + +static void +iwl_mld_handle_channel_switch_error_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_channel_switch_error_notif *notif = (void *)pkt->data; + struct ieee80211_bss_conf *link_conf; + struct ieee80211_vif *vif; + u32 link_id = le32_to_cpu(notif->link_id); + u32 csa_err_mask = le32_to_cpu(notif->csa_err_mask); + + link_conf = iwl_mld_fw_id_to_link_conf(mld, link_id); + if (WARN_ON(!link_conf)) + return; + + vif = link_conf->vif; + + IWL_DEBUG_INFO(mld, "FW reports CSA error: id=%u, csa_err_mask=%u\n", + link_id, csa_err_mask); + + if (csa_err_mask & (CS_ERR_COUNT_ERROR | + CS_ERR_LONG_DELAY_AFTER_CS | + CS_ERR_TX_BLOCK_TIMER_EXPIRED)) + ieee80211_channel_switch_disconnect(vif); +} + +static void iwl_mld_handle_beacon_notification(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_extended_beacon_notif *beacon = (void *)pkt->data; + + mld->ibss_manager = !!beacon->ibss_mgr_status; +} + +/** + * DOC: Notification versioning + * + * The firmware's notifications change from time to time. In order to + * differentiate between different versions of the same notification, the + * firmware advertises the version of each notification. + * Here are listed all the notifications that are supported. Several versions + * of the same notification can be allowed at the same time: + * + * CMD_VERSION(my_multi_version_notif, + * CMD_VER_ENTRY(1, iwl_my_multi_version_notif_ver1) + * CMD_VER_ENTRY(2, iwl_my_multi_version_notif_ver2) + * + * etc... + * + * The driver will enforce that the notification coming from the firmware + * has its version listed here and it'll also enforce that the firmware sent + * at least enough bytes to cover the structure listed in the CMD_VER_ENTRY. + */ + +CMD_VERSIONS(scan_complete_notif, + CMD_VER_ENTRY(1, iwl_umac_scan_complete)) +CMD_VERSIONS(scan_iter_complete_notif, + CMD_VER_ENTRY(2, iwl_umac_scan_iter_complete_notif)) +CMD_VERSIONS(mfuart_notif, + CMD_VER_ENTRY(2, iwl_mfuart_load_notif)) +CMD_VERSIONS(update_mcc, + CMD_VER_ENTRY(1, iwl_mcc_chub_notif)) +CMD_VERSIONS(session_prot_notif, + CMD_VER_ENTRY(3, iwl_session_prot_notif)) +CMD_VERSIONS(missed_beacon_notif, + CMD_VER_ENTRY(5, iwl_missed_beacons_notif)) +CMD_VERSIONS(tx_resp_notif, + CMD_VER_ENTRY(8, iwl_tx_resp)) +CMD_VERSIONS(compressed_ba_notif, + CMD_VER_ENTRY(5, iwl_compressed_ba_notif) + CMD_VER_ENTRY(6, iwl_compressed_ba_notif)) +CMD_VERSIONS(tlc_notif, + CMD_VER_ENTRY(3, iwl_tlc_update_notif)) +CMD_VERSIONS(mu_mimo_grp_notif, + CMD_VER_ENTRY(1, iwl_mu_group_mgmt_notif)) +CMD_VERSIONS(channel_switch_start_notif, + CMD_VER_ENTRY(3, iwl_channel_switch_start_notif)) +CMD_VERSIONS(channel_switch_error_notif, + CMD_VER_ENTRY(2, iwl_channel_switch_error_notif)) +CMD_VERSIONS(ct_kill_notif, + CMD_VER_ENTRY(2, ct_kill_notif)) +CMD_VERSIONS(temp_notif, + CMD_VER_ENTRY(2, iwl_dts_measurement_notif)) +CMD_VERSIONS(stored_beacon_notif, + CMD_VER_ENTRY(4, iwl_stored_beacon_notif)) +CMD_VERSIONS(roc_notif, + CMD_VER_ENTRY(1, iwl_roc_notif)) +CMD_VERSIONS(probe_resp_data_notif, + CMD_VER_ENTRY(1, iwl_probe_resp_data_notif)) +CMD_VERSIONS(datapath_monitor_notif, + CMD_VER_ENTRY(1, iwl_datapath_monitor_notif)) +CMD_VERSIONS(stats_oper_notif, + CMD_VER_ENTRY(3, iwl_system_statistics_notif_oper)) +CMD_VERSIONS(stats_oper_part1_notif, + CMD_VER_ENTRY(4, iwl_system_statistics_part1_notif_oper)) +CMD_VERSIONS(bt_coex_notif, + CMD_VER_ENTRY(1, iwl_bt_coex_profile_notif)) +CMD_VERSIONS(beacon_notification, + CMD_VER_ENTRY(6, iwl_extended_beacon_notif)) +CMD_VERSIONS(emlsr_mode_notif, + CMD_VER_ENTRY(1, iwl_esr_mode_notif)) +CMD_VERSIONS(emlsr_trans_fail_notif, + CMD_VER_ENTRY(1, iwl_esr_trans_fail_notif)) +CMD_VERSIONS(uapsd_misbehaving_ap_notif, + CMD_VER_ENTRY(1, iwl_uapsd_misbehaving_ap_notif)) +CMD_VERSIONS(time_msmt_notif, + CMD_VER_ENTRY(1, iwl_time_msmt_notify)) +CMD_VERSIONS(time_sync_confirm_notif, + CMD_VER_ENTRY(1, iwl_time_msmt_cfm_notify)) +CMD_VERSIONS(omi_status_notif, + CMD_VER_ENTRY(1, iwl_omi_send_status_notif)) +CMD_VERSIONS(ftm_resp_notif, CMD_VER_ENTRY(9, iwl_tof_range_rsp_ntfy)) + +DEFINE_SIMPLE_CANCELLATION(session_prot, iwl_session_prot_notif, mac_link_id) +DEFINE_SIMPLE_CANCELLATION(tlc, iwl_tlc_update_notif, sta_id) +DEFINE_SIMPLE_CANCELLATION(channel_switch_start, + iwl_channel_switch_start_notif, link_id) +DEFINE_SIMPLE_CANCELLATION(channel_switch_error, + iwl_channel_switch_error_notif, link_id) +DEFINE_SIMPLE_CANCELLATION(datapath_monitor, iwl_datapath_monitor_notif, + link_id) +DEFINE_SIMPLE_CANCELLATION(roc, iwl_roc_notif, activity) +DEFINE_SIMPLE_CANCELLATION(scan_complete, iwl_umac_scan_complete, uid) +DEFINE_SIMPLE_CANCELLATION(probe_resp_data, iwl_probe_resp_data_notif, + mac_id) +DEFINE_SIMPLE_CANCELLATION(uapsd_misbehaving_ap, iwl_uapsd_misbehaving_ap_notif, + mac_id) +#define iwl_mld_cancel_omi_status_notif iwl_mld_always_cancel +DEFINE_SIMPLE_CANCELLATION(ftm_resp, iwl_tof_range_rsp_ntfy, request_id) + +/** + * DOC: Handlers for fw notifications + * + * Here are listed the notifications IDs (including the group ID), the handler + * of the notification and how it should be called: + * + * - RX_HANDLER_SYNC: will be called as part of the Rx path + * - RX_HANDLER_ASYNC: will be handled in a working with the wiphy_lock held + * + * This means that if the firmware sends two notifications A and B in that + * order and notification A is RX_HANDLER_ASYNC and notification is + * RX_HANDLER_SYNC, the handler of B will likely be called before the handler + * of A. + * + * This list should be in order of frequency for performance purposes. + * The handler can be one from two contexts, see &iwl_rx_handler_context + * + * A handler can declare that it relies on a specific object in which case it + * can be cancelled in case the object is deleted. In order to use this + * mechanism, a cancellation function is needed. The cancellation function must + * receive an object id (the index of that object in the firmware) and a + * notification payload. It'll return true if that specific notification should + * be cancelled upon the obliteration of the specific instance of the object. + * + * DEFINE_SIMPLE_CANCELLATION allows to easily create a cancellation function + * that wills simply return true if a given object id matches the object id in + * the firmware notification. + */ + +VISIBLE_IF_IWLWIFI_KUNIT +const struct iwl_rx_handler iwl_mld_rx_handlers[] = { + RX_HANDLER_NO_OBJECT(LEGACY_GROUP, TX_CMD, tx_resp_notif, + RX_HANDLER_SYNC) + RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BA_NOTIF, compressed_ba_notif, + RX_HANDLER_SYNC) + RX_HANDLER_OF_SCAN(LEGACY_GROUP, SCAN_COMPLETE_UMAC, + scan_complete_notif) + RX_HANDLER_NO_OBJECT(LEGACY_GROUP, SCAN_ITERATION_COMPLETE_UMAC, + scan_iter_complete_notif, + RX_HANDLER_SYNC) + RX_HANDLER_NO_VAL(LEGACY_GROUP, MATCH_FOUND_NOTIFICATION, + match_found_notif, RX_HANDLER_SYNC) + + RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_NOTIF, + stats_oper_notif, RX_HANDLER_ASYNC) + RX_HANDLER_NO_OBJECT(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF, + stats_oper_part1_notif, RX_HANDLER_ASYNC) + + RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MFUART_LOAD_NOTIFICATION, + mfuart_notif, RX_HANDLER_SYNC) + + RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, + temp_notif, RX_HANDLER_ASYNC) + RX_HANDLER_OF_LINK(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF, + session_prot_notif) + RX_HANDLER_OF_LINK(MAC_CONF_GROUP, MISSED_BEACONS_NOTIF, + missed_beacon_notif) + RX_HANDLER_OF_STA(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF, tlc_notif) + RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF, + channel_switch_start_notif) + RX_HANDLER_OF_LINK(MAC_CONF_GROUP, CHANNEL_SWITCH_ERROR_NOTIF, + channel_switch_error_notif) + RX_HANDLER_OF_ROC(MAC_CONF_GROUP, ROC_NOTIF, roc_notif) + RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, + mu_mimo_grp_notif, RX_HANDLER_SYNC) + RX_HANDLER_NO_OBJECT(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, + stored_beacon_notif, RX_HANDLER_SYNC) + RX_HANDLER_OF_VIF(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF, + probe_resp_data_notif) + RX_HANDLER_NO_OBJECT(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, + ct_kill_notif, RX_HANDLER_ASYNC) + RX_HANDLER_OF_LINK(DATA_PATH_GROUP, MONITOR_NOTIF, + datapath_monitor_notif) + RX_HANDLER_NO_OBJECT(LEGACY_GROUP, MCC_CHUB_UPDATE_CMD, update_mcc, + RX_HANDLER_ASYNC) + RX_HANDLER_NO_OBJECT(BT_COEX_GROUP, PROFILE_NOTIF, + bt_coex_notif, RX_HANDLER_ASYNC) + RX_HANDLER_NO_OBJECT(LEGACY_GROUP, BEACON_NOTIFICATION, + beacon_notification, RX_HANDLER_ASYNC) + RX_HANDLER_NO_OBJECT(DATA_PATH_GROUP, ESR_MODE_NOTIF, + emlsr_mode_notif, RX_HANDLER_ASYNC) + RX_HANDLER_NO_OBJECT(MAC_CONF_GROUP, EMLSR_TRANS_FAIL_NOTIF, + emlsr_trans_fail_notif, RX_HANDLER_ASYNC) + RX_HANDLER_OF_VIF(LEGACY_GROUP, PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, + uapsd_misbehaving_ap_notif) + RX_HANDLER_NO_OBJECT(LEGACY_GROUP, + WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION, + time_msmt_notif, RX_HANDLER_SYNC) + RX_HANDLER_NO_OBJECT(LEGACY_GROUP, + WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION, + time_sync_confirm_notif, RX_HANDLER_ASYNC) + RX_HANDLER_OF_LINK(DATA_PATH_GROUP, OMI_SEND_STATUS_NOTIF, + omi_status_notif) + RX_HANDLER_OF_FTM_REQ(LOCATION_GROUP, TOF_RANGE_RESPONSE_NOTIF, + ftm_resp_notif) +}; +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers); + +#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS) +const unsigned int iwl_mld_rx_handlers_num = ARRAY_SIZE(iwl_mld_rx_handlers); +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_rx_handlers_num); +#endif + +static bool +iwl_mld_notif_is_valid(struct iwl_mld *mld, struct iwl_rx_packet *pkt, + const struct iwl_rx_handler *handler) +{ + unsigned int size = iwl_rx_packet_payload_len(pkt); + size_t notif_ver; + + /* If n_sizes == 0, it indicates that a validation function may be used + * or that no validation is required. + */ + if (!handler->n_sizes) { + if (handler->val_fn) + return handler->val_fn(mld, pkt); + return true; + } + + notif_ver = iwl_fw_lookup_notif_ver(mld->fw, + iwl_cmd_groupid(handler->cmd_id), + iwl_cmd_opcode(handler->cmd_id), + IWL_FW_CMD_VER_UNKNOWN); + + for (int i = 0; i < handler->n_sizes; i++) { + if (handler->sizes[i].ver != notif_ver) + continue; + + if (IWL_FW_CHECK(mld, size < handler->sizes[i].size, + "unexpected notification 0x%04x size %d, need %d\n", + handler->cmd_id, size, handler->sizes[i].size)) + return false; + return true; + } + + IWL_FW_CHECK_FAILED(mld, + "notif 0x%04x ver %zu missing expected size, use version %u size\n", + handler->cmd_id, notif_ver, + handler->sizes[handler->n_sizes - 1].ver); + + return size < handler->sizes[handler->n_sizes - 1].size; +} + +struct iwl_async_handler_entry { + struct list_head list; + struct iwl_rx_cmd_buffer rxb; + const struct iwl_rx_handler *rx_h; +}; + +static void +iwl_mld_log_async_handler_op(struct iwl_mld *mld, const char *op, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + + IWL_DEBUG_HC(mld, + "%s async handler for notif %s (%.2x.%2x, seq 0x%x)\n", + op, iwl_get_cmd_string(mld->trans, + WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)), + pkt->hdr.group_id, pkt->hdr.cmd, + le16_to_cpu(pkt->hdr.sequence)); +} + +static void iwl_mld_rx_notif(struct iwl_mld *mld, + struct iwl_rx_cmd_buffer *rxb, + struct iwl_rx_packet *pkt) +{ + for (int i = 0; i < ARRAY_SIZE(iwl_mld_rx_handlers); i++) { + const struct iwl_rx_handler *rx_h = &iwl_mld_rx_handlers[i]; + struct iwl_async_handler_entry *entry; + + if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) + continue; + + if (!iwl_mld_notif_is_valid(mld, pkt, rx_h)) + return; + + if (rx_h->context == RX_HANDLER_SYNC) { + rx_h->fn(mld, pkt); + break; + } + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + /* we can't do much... */ + if (!entry) + return; + + /* Set the async handler entry */ + entry->rxb._page = rxb_steal_page(rxb); + entry->rxb._offset = rxb->_offset; + entry->rxb._rx_page_order = rxb->_rx_page_order; + + entry->rx_h = rx_h; + + /* Add it to the list and queue the work */ + spin_lock(&mld->async_handlers_lock); + list_add_tail(&entry->list, &mld->async_handlers_list); + spin_unlock(&mld->async_handlers_lock); + + wiphy_work_queue(mld->hw->wiphy, + &mld->async_handlers_wk); + + iwl_mld_log_async_handler_op(mld, "Queued", rxb); + break; + } + + iwl_notification_wait_notify(&mld->notif_wait, pkt); +} + +void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); + + if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) + iwl_mld_rx_mpdu(mld, napi, rxb, 0); + else if (cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) + iwl_mld_handle_frame_release_notif(mld, napi, pkt, 0); + else if (cmd_id == WIDE_ID(LEGACY_GROUP, BAR_FRAME_RELEASE)) + iwl_mld_handle_bar_frame_release_notif(mld, napi, pkt, 0); + else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP, + RX_QUEUES_NOTIFICATION))) + iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, 0); + else if (cmd_id == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) + iwl_mld_rx_monitor_no_data(mld, napi, pkt, 0); + else + iwl_mld_rx_notif(mld, rxb, pkt); +} + +void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, unsigned int queue) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode); + u16 cmd_id = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); + + if (unlikely(queue >= mld->trans->num_rx_queues)) + return; + + if (likely(cmd_id == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))) + iwl_mld_rx_mpdu(mld, napi, rxb, queue); + else if (unlikely(cmd_id == WIDE_ID(DATA_PATH_GROUP, + RX_QUEUES_NOTIFICATION))) + iwl_mld_handle_rx_queues_sync_notif(mld, napi, pkt, queue); + else if (unlikely(cmd_id == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))) + iwl_mld_handle_frame_release_notif(mld, napi, pkt, queue); +} + +void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds) +{ + struct iwl_async_handler_entry *entry, *tmp; + + spin_lock_bh(&mld->async_handlers_lock); + list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { + bool match = false; + + for (int i = 0; i < n_cmds; i++) { + if (entry->rx_h->cmd_id == cmds[i]) { + match = true; + break; + } + } + + if (!match) + continue; + + iwl_mld_log_async_handler_op(mld, "Delete", &entry->rxb); + iwl_free_rxb(&entry->rxb); + list_del(&entry->list); + kfree(entry); + } + spin_unlock_bh(&mld->async_handlers_lock); +} + +void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mld *mld = + container_of(wk, struct iwl_mld, async_handlers_wk); + struct iwl_async_handler_entry *entry, *tmp; + LIST_HEAD(local_list); + + /* Sync with Rx path with a lock. Remove all the entries from this + * list, add them to a local one (lock free), and then handle them. + */ + spin_lock_bh(&mld->async_handlers_lock); + list_splice_init(&mld->async_handlers_list, &local_list); + spin_unlock_bh(&mld->async_handlers_lock); + + list_for_each_entry_safe(entry, tmp, &local_list, list) { + iwl_mld_log_async_handler_op(mld, "Handle", &entry->rxb); + entry->rx_h->fn(mld, rxb_addr(&entry->rxb)); + iwl_free_rxb(&entry->rxb); + list_del(&entry->list); + kfree(entry); + } +} + +void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld) +{ + struct iwl_async_handler_entry *entry, *tmp; + + spin_lock_bh(&mld->async_handlers_lock); + list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { + iwl_mld_log_async_handler_op(mld, "Purged", &entry->rxb); + iwl_free_rxb(&entry->rxb); + list_del(&entry->list); + kfree(entry); + } + spin_unlock_bh(&mld->async_handlers_lock); +} + +void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld, + enum iwl_mld_object_type obj_type, + u32 obj_id) +{ + struct iwl_async_handler_entry *entry, *tmp; + LIST_HEAD(cancel_list); + + lockdep_assert_wiphy(mld->wiphy); + + if (WARN_ON(obj_type == IWL_MLD_OBJECT_TYPE_NONE)) + return; + + /* Sync with RX path and remove matching entries from the async list */ + spin_lock_bh(&mld->async_handlers_lock); + list_for_each_entry_safe(entry, tmp, &mld->async_handlers_list, list) { + const struct iwl_rx_handler *rx_h = entry->rx_h; + + if (rx_h->obj_type != obj_type || WARN_ON(!rx_h->cancel)) + continue; + + if (rx_h->cancel(mld, rxb_addr(&entry->rxb), obj_id)) { + iwl_mld_log_async_handler_op(mld, "Cancel", &entry->rxb); + list_del(&entry->list); + list_add_tail(&entry->list, &cancel_list); + } + } + + spin_unlock_bh(&mld->async_handlers_lock); + + /* Free the matching entries outside of the spinlock */ + list_for_each_entry_safe(entry, tmp, &cancel_list, list) { + iwl_free_rxb(&entry->rxb); + list_del(&entry->list); + kfree(entry); + } +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/notif.h b/drivers/net/wireless/intel/iwlwifi/mld/notif.h new file mode 100644 index 0000000000000..2eaa1d4e138ee --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/notif.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_notif_h__ +#define __iwl_mld_notif_h__ + +struct iwl_mld; + +void iwl_mld_rx(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb); + +void iwl_mld_rx_rss(struct iwl_op_mode *op_mode, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, unsigned int queue); + +void iwl_mld_async_handlers_wk(struct wiphy *wiphy, struct wiphy_work *wk); + +void iwl_mld_purge_async_handlers_list(struct iwl_mld *mld); + +enum iwl_mld_object_type { + IWL_MLD_OBJECT_TYPE_NONE, + IWL_MLD_OBJECT_TYPE_LINK, + IWL_MLD_OBJECT_TYPE_STA, + IWL_MLD_OBJECT_TYPE_VIF, + IWL_MLD_OBJECT_TYPE_ROC, + IWL_MLD_OBJECT_TYPE_SCAN, + IWL_MLD_OBJECT_TYPE_FTM_REQ, +}; + +void iwl_mld_cancel_notifications_of_object(struct iwl_mld *mld, + enum iwl_mld_object_type obj_type, + u32 obj_id); +void iwl_mld_delete_handlers(struct iwl_mld *mld, const u16 *cmds, int n_cmds); + +#endif /* __iwl_mld_notif_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.c b/drivers/net/wireless/intel/iwlwifi/mld/phy.c new file mode 100644 index 0000000000000..2fbc8090088be --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#include + +#include "phy.h" +#include "hcmd.h" +#include "fw/api/phy-ctxt.h" + +int iwl_mld_allocate_fw_phy_id(struct iwl_mld *mld) +{ + int id; + unsigned long used = mld->used_phy_ids; + + for_each_clear_bit(id, &used, NUM_PHY_CTX) { + mld->used_phy_ids |= BIT(id); + return id; + } + + return -ENOSPC; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_allocate_fw_phy_id); + +struct iwl_mld_chanctx_usage_data { + struct iwl_mld *mld; + struct ieee80211_chanctx_conf *ctx; + bool use_def; +}; + +static bool iwl_mld_chanctx_fils_enabled(struct ieee80211_vif *vif, + struct ieee80211_chanctx_conf *ctx) +{ + if (vif->type != NL80211_IFTYPE_AP) + return false; + + return cfg80211_channel_is_psc(ctx->def.chan) || + (ctx->def.chan->band == NL80211_BAND_6GHZ && + ctx->def.width >= NL80211_CHAN_WIDTH_80); +} + +static void iwl_mld_chanctx_usage_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_chanctx_usage_data *data = _data; + struct ieee80211_bss_conf *link_conf; + int link_id; + + for_each_vif_active_link(vif, link_conf, link_id) { + if (rcu_access_pointer(link_conf->chanctx_conf) != data->ctx) + continue; + + if (iwl_mld_chanctx_fils_enabled(vif, data->ctx)) + data->use_def = true; + } +} + +struct cfg80211_chan_def * +iwl_mld_get_chandef_from_chanctx(struct iwl_mld *mld, + struct ieee80211_chanctx_conf *ctx) +{ + struct iwl_mld_chanctx_usage_data data = { + .mld = mld, + .ctx = ctx, + }; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_chanctx_usage_iter, + &data); + + return data.use_def ? &ctx->def : &ctx->min_def; +} + +static u8 +iwl_mld_nl80211_width_to_fw(enum nl80211_chan_width width) +{ + switch (width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + return IWL_PHY_CHANNEL_MODE20; + case NL80211_CHAN_WIDTH_40: + return IWL_PHY_CHANNEL_MODE40; + case NL80211_CHAN_WIDTH_80: + return IWL_PHY_CHANNEL_MODE80; + case NL80211_CHAN_WIDTH_160: + return IWL_PHY_CHANNEL_MODE160; + case NL80211_CHAN_WIDTH_320: + return IWL_PHY_CHANNEL_MODE320; + default: + WARN(1, "Invalid channel width=%u", width); + return IWL_PHY_CHANNEL_MODE20; + } +} + +/* Maps the driver specific control channel position (relative to the center + * freq) definitions to the fw values + */ +u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef) +{ + int offs = chandef->chan->center_freq - chandef->center_freq1; + int abs_offs = abs(offs); + u8 ret; + + if (offs == 0) { + /* The FW is expected to check the control channel position only + * when in HT/VHT and the channel width is not 20MHz. Return + * this value as the default one. + */ + return 0; + } + + /* this results in a value 0-7, i.e. fitting into 0b0111 */ + ret = (abs_offs - 10) / 20; + /* But we need the value to be in 0b1011 because 0b0100 is + * IWL_PHY_CTRL_POS_ABOVE, so shift bit 2 up to land in + * IWL_PHY_CTRL_POS_OFFS_EXT (0b1000) + */ + ret = (ret & IWL_PHY_CTRL_POS_OFFS_MSK) | + ((ret & BIT(2)) << 1); + /* and add the above bit */ + ret |= (offs > 0) * IWL_PHY_CTRL_POS_ABOVE; + + return ret; +} + +int iwl_mld_phy_fw_action(struct iwl_mld *mld, + struct ieee80211_chanctx_conf *ctx, u32 action) +{ + struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx); + struct cfg80211_chan_def *chandef = &phy->chandef; + struct iwl_phy_context_cmd cmd = { + .id_and_color = cpu_to_le32(phy->fw_id), + .action = cpu_to_le32(action), + .puncture_mask = cpu_to_le16(chandef->punctured), + /* Channel info */ + .ci.channel = cpu_to_le32(chandef->chan->hw_value), + .ci.band = iwl_mld_nl80211_band_to_fw(chandef->chan->band), + .ci.width = iwl_mld_nl80211_width_to_fw(chandef->width), + .ci.ctrl_pos = iwl_mld_get_fw_ctrl_pos(chandef), + }; + int ret; + + if (ctx->ap.chan) { + cmd.sbb_bandwidth = + iwl_mld_nl80211_width_to_fw(ctx->ap.width); + cmd.sbb_ctrl_channel_loc = iwl_mld_get_fw_ctrl_pos(&ctx->ap); + } + + ret = iwl_mld_send_cmd_pdu(mld, PHY_CONTEXT_CMD, &cmd); + if (ret) + IWL_ERR(mld, "Failed to send PHY_CONTEXT_CMD ret = %d\n", ret); + + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/phy.h b/drivers/net/wireless/intel/iwlwifi/mld/phy.h new file mode 100644 index 0000000000000..2212a89321b7e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/phy.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_phy_h__ +#define __iwl_mld_phy_h__ + +#include "mld.h" + +/** + * struct iwl_mld_phy - PHY configuration parameters + * + * @fw_id: fw id of the phy. + * @chandef: the last chandef that mac80211 configured the driver + * with. Used to detect a no-op when the chanctx changes. + * @channel_load_by_us: channel load on this channel caused by + * the NIC itself, as indicated by firmware + * @avg_channel_load_not_by_us: averaged channel load on this channel caused by + * others. This value is invalid when in EMLSR (due to FW limitations) + * @mld: pointer to the MLD context + */ +struct iwl_mld_phy { + /* Add here fields that need clean up on hw restart */ + struct_group(zeroed_on_hw_restart, + u8 fw_id; + struct cfg80211_chan_def chandef; + ); + /* And here fields that survive a hw restart */ + u32 channel_load_by_us; + u32 avg_channel_load_not_by_us; + struct iwl_mld *mld; +}; + +static inline struct iwl_mld_phy * +iwl_mld_phy_from_mac80211(struct ieee80211_chanctx_conf *channel) +{ + return (void *)channel->drv_priv; +} + +/* Cleanup function for struct iwl_mld_phy, will be called in restart */ +static inline void +iwl_mld_cleanup_phy(struct iwl_mld *mld, struct iwl_mld_phy *phy) +{ + CLEANUP_STRUCT(phy); +} + +int iwl_mld_allocate_fw_phy_id(struct iwl_mld *mld); +int iwl_mld_phy_fw_action(struct iwl_mld *mld, + struct ieee80211_chanctx_conf *ctx, u32 action); +struct cfg80211_chan_def * +iwl_mld_get_chandef_from_chanctx(struct iwl_mld *mld, + struct ieee80211_chanctx_conf *ctx); +u8 iwl_mld_get_fw_ctrl_pos(const struct cfg80211_chan_def *chandef); + +#endif /* __iwl_mld_phy_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/power.c b/drivers/net/wireless/intel/iwlwifi/mld/power.c new file mode 100644 index 0000000000000..2f16c174b57e5 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/power.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#include + +#include "mld.h" +#include "hcmd.h" +#include "power.h" +#include "iface.h" +#include "link.h" +#include "constants.h" + +static void iwl_mld_vif_ps_iterator(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + bool *ps_enable = (bool *)data; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + if (vif->type != NL80211_IFTYPE_STATION) + return; + + *ps_enable &= !mld_vif->ps_disabled; +} + +int iwl_mld_update_device_power(struct iwl_mld *mld, bool d3) +{ + struct iwl_device_power_cmd cmd = {}; + bool enable_ps = false; + + if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) { + enable_ps = true; + + /* Disable power save if any STA interface has + * power save turned off + */ + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_vif_ps_iterator, + &enable_ps); + } + + if (enable_ps) + cmd.flags |= + cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); + + if (d3) + cmd.flags |= + cpu_to_le16(DEVICE_POWER_FLAGS_NO_SLEEP_TILL_D3_MSK); + + IWL_DEBUG_POWER(mld, + "Sending device power command with flags = 0x%X\n", + cmd.flags); + + return iwl_mld_send_cmd_pdu(mld, POWER_TABLE_CMD, &cmd); +} + +int iwl_mld_enable_beacon_filter(struct iwl_mld *mld, + const struct ieee80211_bss_conf *link_conf, + bool d3) +{ + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = cpu_to_le32(1), + .ba_enable_beacon_abort = cpu_to_le32(1), + }; + + if (ieee80211_vif_type_p2p(link_conf->vif) != NL80211_IFTYPE_STATION) + return 0; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (iwl_mld_vif_from_mac80211(link_conf->vif)->disable_bf) + return 0; +#endif + + if (link_conf->cqm_rssi_thold) { + cmd.bf_energy_delta = + cpu_to_le32(link_conf->cqm_rssi_hyst); + /* fw uses an absolute value for this */ + cmd.bf_roaming_state = + cpu_to_le32(-link_conf->cqm_rssi_thold); + } + + if (d3) + cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); + + return iwl_mld_send_cmd_pdu(mld, REPLY_BEACON_FILTERING_CMD, + &cmd); +} + +int iwl_mld_disable_beacon_filter(struct iwl_mld *mld, + struct ieee80211_vif *vif) +{ + struct iwl_beacon_filter_cmd cmd = {}; + + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) + return 0; + + return iwl_mld_send_cmd_pdu(mld, REPLY_BEACON_FILTERING_CMD, + &cmd); +} + +static bool iwl_mld_power_is_radar(struct iwl_mld *mld, + const struct ieee80211_bss_conf *link_conf) +{ + const struct ieee80211_chanctx_conf *chanctx_conf; + + chanctx_conf = wiphy_dereference(mld->wiphy, link_conf->chanctx_conf); + + if (WARN_ON(!chanctx_conf)) + return false; + + return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR; +} + +static void iwl_mld_power_configure_uapsd(struct iwl_mld *mld, + struct iwl_mld_link *link, + struct iwl_mac_power_cmd *cmd, + bool ps_poll) +{ + bool tid_found = false; + + cmd->rx_data_timeout_uapsd = + cpu_to_le32(IWL_MLD_UAPSD_RX_DATA_TIMEOUT); + cmd->tx_data_timeout_uapsd = + cpu_to_le32(IWL_MLD_UAPSD_TX_DATA_TIMEOUT); + + /* set advanced pm flag with no uapsd ACs to enable ps-poll */ + if (ps_poll) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); + return; + } + + for (enum ieee80211_ac_numbers ac = IEEE80211_AC_VO; + ac <= IEEE80211_AC_BK; + ac++) { + if (!link->queue_params[ac].uapsd) + continue; + + cmd->flags |= + cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK | + POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK); + + cmd->uapsd_ac_flags |= BIT(ac); + + /* QNDP TID - the highest TID with no admission control */ + if (!tid_found && !link->queue_params[ac].acm) { + tid_found = true; + switch (ac) { + case IEEE80211_AC_VO: + cmd->qndp_tid = 6; + break; + case IEEE80211_AC_VI: + cmd->qndp_tid = 5; + break; + case IEEE80211_AC_BE: + cmd->qndp_tid = 0; + break; + case IEEE80211_AC_BK: + cmd->qndp_tid = 1; + break; + } + } + } + + if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) | + BIT(IEEE80211_AC_VI) | + BIT(IEEE80211_AC_BE) | + BIT(IEEE80211_AC_BK))) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); + cmd->snooze_interval = cpu_to_le16(IWL_MLD_PS_SNOOZE_INTERVAL); + cmd->snooze_window = cpu_to_le16(IWL_MLD_PS_SNOOZE_WINDOW); + } + + cmd->uapsd_max_sp = mld->hw->uapsd_max_sp_len; +} + +static void +iwl_mld_power_config_skip_dtim(struct iwl_mld *mld, + const struct ieee80211_bss_conf *link_conf, + struct iwl_mac_power_cmd *cmd) +{ + unsigned int dtimper_tu; + unsigned int dtimper; + unsigned int skip; + + dtimper = link_conf->dtim_period ?: 1; + dtimper_tu = dtimper * link_conf->beacon_int; + + if (dtimper >= 10 || iwl_mld_power_is_radar(mld, link_conf)) + return; + + if (WARN_ON(!dtimper_tu)) + return; + + /* configure skip over dtim up to 900 TU DTIM interval */ + skip = max_t(int, 1, 900 / dtimper_tu); + + cmd->skip_dtim_periods = skip; + cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); +} + +#define POWER_KEEP_ALIVE_PERIOD_SEC 25 +static void iwl_mld_power_build_cmd(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mac_power_cmd *cmd, + bool d3) +{ + int dtimper, bi; + int keep_alive; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct ieee80211_bss_conf *link_conf = &vif->bss_conf; + struct iwl_mld_link *link = &mld_vif->deflink; + bool ps_poll = false; + + cmd->id_and_color = cpu_to_le32(mld_vif->fw_id); + + if (ieee80211_vif_is_mld(vif)) { + int link_id; + + if (WARN_ON(!vif->active_links)) + return; + + /* The firmware consumes one single configuration for the vif + * and can't differentiate between links, just pick the lowest + * link_id's configuration and use that. + */ + link_id = __ffs(vif->active_links); + link_conf = link_conf_dereference_check(vif, link_id); + link = iwl_mld_link_dereference_check(mld_vif, link_id); + + if (WARN_ON(!link_conf || !link)) + return; + } + dtimper = link_conf->dtim_period; + bi = link_conf->beacon_int; + + /* Regardless of power management state the driver must set + * keep alive period. FW will use it for sending keep alive NDPs + * immediately after association. Check that keep alive period + * is at least 3 * DTIM + */ + keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi), + USEC_PER_SEC); + keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC); + cmd->keep_alive_seconds = cpu_to_le16(keep_alive); + + if (iwlmld_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) + cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); + + if (!vif->cfg.ps || iwl_mld_tdls_sta_count(mld) > 0) + return; + + cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); + + /* firmware supports LPRX for beacons at rate 1 Mbps or 6 Mbps only */ + if (link_conf->beacon_rate && + (link_conf->beacon_rate->bitrate == 10 || + link_conf->beacon_rate->bitrate == 60)) { + cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK); + cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD; + } + + if (d3) { + iwl_mld_power_config_skip_dtim(mld, link_conf, cmd); + cmd->rx_data_timeout = + cpu_to_le32(IWL_MLD_WOWLAN_PS_RX_DATA_TIMEOUT); + cmd->tx_data_timeout = + cpu_to_le32(IWL_MLD_WOWLAN_PS_TX_DATA_TIMEOUT); + } else if (iwl_mld_vif_low_latency(mld_vif) && vif->p2p) { + cmd->tx_data_timeout = + cpu_to_le32(IWL_MLD_SHORT_PS_TX_DATA_TIMEOUT); + cmd->rx_data_timeout = + cpu_to_le32(IWL_MLD_SHORT_PS_RX_DATA_TIMEOUT); + } else { + cmd->rx_data_timeout = + cpu_to_le32(IWL_MLD_DEFAULT_PS_RX_DATA_TIMEOUT); + cmd->tx_data_timeout = + cpu_to_le32(IWL_MLD_DEFAULT_PS_TX_DATA_TIMEOUT); + } + + /* uAPSD is only enabled for specific certifications. For those cases, + * mac80211 will allow uAPSD. Always call iwl_mld_power_configure_uapsd + * which will look at what mac80211 is saying. + */ +#ifdef CONFIG_IWLWIFI_DEBUGFS + ps_poll = mld_vif->use_ps_poll; +#endif + iwl_mld_power_configure_uapsd(mld, link, cmd, ps_poll); +} + +int iwl_mld_update_mac_power(struct iwl_mld *mld, struct ieee80211_vif *vif, + bool d3) +{ + struct iwl_mac_power_cmd cmd = {}; + + iwl_mld_power_build_cmd(mld, vif, &cmd, d3); + + return iwl_mld_send_cmd_pdu(mld, MAC_PM_POWER_TABLE, &cmd); +} + +static void +iwl_mld_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd, + const struct ieee80211_bss_conf *link) +{ + u8 i; + + /* NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT, + * we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE + */ + + BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) != + ARRAY_SIZE(link->tpe.psd_local[0].power)); + + /* if not valid, mac80211 puts default (max value) */ + for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++) + cmd->psd_pwr[i] = min(link->tpe.psd_local[0].power[i], + link->tpe.psd_reg_client[0].power[i]); + + BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) != + ARRAY_SIZE(link->tpe.max_local[0].power)); + + for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++) + cmd->eirp_pwr[i] = min(link->tpe.max_local[0].power[i], + link->tpe.max_reg_client[0].power[i]); +} + +void +iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_txpower_constraints_cmd cmd = {}; + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + if (!mld_link->active) + return; + + if (link->chanreq.oper.chan->band != NL80211_BAND_6GHZ) + return; + + cmd.link_id = cpu_to_le16(mld_link->fw_id); + memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr)); + memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr)); + + if (vif->type == NL80211_IFTYPE_AP) { + cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP); + } else if (link->power_type == IEEE80211_REG_UNSET_AP) { + return; + } else { + cmd.ap_type = cpu_to_le16(link->power_type - 1); + iwl_mld_tpe_sta_cmd_data(&cmd, link); + } + + ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(PHY_OPS_GROUP, + AP_TX_POWER_CONSTRAINTS_CMD), + &cmd); + if (ret) + IWL_ERR(mld, + "failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n", + ret); +} + +int iwl_mld_set_tx_power(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf, + s16 tx_power) +{ + u32 cmd_id = REDUCE_TX_POWER_CMD; + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link_conf); + u16 u_tx_power = tx_power == IWL_DEFAULT_MAX_TX_POWER ? + IWL_DEV_MAX_TX_POWER : 8 * tx_power; + struct iwl_dev_tx_power_cmd cmd = { + /* Those fields sit on the same place for v9 and v10 */ + .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_LINK), + .common.pwr_restriction = cpu_to_le16(u_tx_power), + }; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + int len = sizeof(cmd.common); + + if (WARN_ON(!mld_link)) + return -ENODEV; + + cmd.common.link_id = cpu_to_le32(mld_link->fw_id); + + if (cmd_ver == 10) + len += sizeof(cmd.v10); + else if (cmd_ver == 9) + len += sizeof(cmd.v9); + + return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/power.h b/drivers/net/wireless/intel/iwlwifi/mld/power.h new file mode 100644 index 0000000000000..05ce27bef1067 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/power.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_power_h__ +#define __iwl_mld_power_h__ + +#include + +#include "mld.h" + +int iwl_mld_update_device_power(struct iwl_mld *mld, bool d3); + +int iwl_mld_enable_beacon_filter(struct iwl_mld *mld, + const struct ieee80211_bss_conf *link_conf, + bool d3); + +int iwl_mld_disable_beacon_filter(struct iwl_mld *mld, + struct ieee80211_vif *vif); + +int iwl_mld_update_mac_power(struct iwl_mld *mld, struct ieee80211_vif *vif, + bool d3); + +void +iwl_mld_send_ap_tx_power_constraint_cmd(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); + +int iwl_mld_set_tx_power(struct iwl_mld *mld, + struct ieee80211_bss_conf *link_conf, + s16 tx_power); + +#endif /* __iwl_mld_power_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.c b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c new file mode 100644 index 0000000000000..d5c3f853d96c1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2025 Intel Corporation + */ + +#include "mld.h" +#include "iwl-debug.h" +#include "hcmd.h" +#include "ptp.h" +#include + +/* The scaled_ppm parameter is ppm (parts per million) with a 16-bit fractional + * part, which means that a value of 1 in one of those fields actually means + * 2^-16 ppm, and 2^16=65536 is 1 ppm. + */ +#define PTP_SCALE_FACTOR 65536000000ULL + +#define IWL_PTP_GP2_WRAP 0x100000000ULL +#define IWL_PTP_WRAP_TIME (3600 * HZ) +#define IWL_PTP_WRAP_THRESHOLD_USEC (5000) + +static int iwl_mld_get_systime(struct iwl_mld *mld, u32 *gp2) +{ + *gp2 = iwl_read_prph(mld->trans, mld->trans->cfg->gp2_reg_addr); + + if (*gp2 == 0x5a5a5a5a) + return -EINVAL; + + return 0; +} + +static void iwl_mld_ptp_update_new_read(struct iwl_mld *mld, u32 gp2) +{ + IWL_DEBUG_PTP(mld, "PTP: last_gp2=%u, new gp2 read=%u\n", + mld->ptp_data.last_gp2, gp2); + + /* If the difference is above the threshold, assume it's a wraparound. + * Otherwise assume it's an old read and ignore it. + */ + if (gp2 < mld->ptp_data.last_gp2) { + if (mld->ptp_data.last_gp2 - gp2 < + IWL_PTP_WRAP_THRESHOLD_USEC) { + IWL_DEBUG_PTP(mld, + "PTP: ignore old read (gp2=%u, last_gp2=%u)\n", + gp2, mld->ptp_data.last_gp2); + return; + } + + mld->ptp_data.wrap_counter++; + IWL_DEBUG_PTP(mld, + "PTP: wraparound detected (new counter=%u)\n", + mld->ptp_data.wrap_counter); + } + + mld->ptp_data.last_gp2 = gp2; + schedule_delayed_work(&mld->ptp_data.dwork, IWL_PTP_WRAP_TIME); +} + +u64 iwl_mld_ptp_get_adj_time(struct iwl_mld *mld, u64 base_time_ns) +{ + struct ptp_data *data = &mld->ptp_data; + u64 scale_time_gp2_ns = mld->ptp_data.scale_update_gp2 * NSEC_PER_USEC; + u64 res; + u64 diff; + s64 scaled_diff; + + lockdep_assert_held(&data->lock); + + iwl_mld_ptp_update_new_read(mld, + div64_u64(base_time_ns, NSEC_PER_USEC)); + + base_time_ns = base_time_ns + + (data->wrap_counter * IWL_PTP_GP2_WRAP * NSEC_PER_USEC); + + /* It is possible that a GP2 timestamp was received from fw before the + * last scale update. + */ + if (base_time_ns < scale_time_gp2_ns) { + diff = scale_time_gp2_ns - base_time_ns; + scaled_diff = -mul_u64_u64_div_u64(diff, + data->scaled_freq, + PTP_SCALE_FACTOR); + } else { + diff = base_time_ns - scale_time_gp2_ns; + scaled_diff = mul_u64_u64_div_u64(diff, + data->scaled_freq, + PTP_SCALE_FACTOR); + } + + IWL_DEBUG_PTP(mld, "base_time=%llu diff ns=%llu scaled_diff_ns=%lld\n", + (unsigned long long)base_time_ns, + (unsigned long long)diff, (long long)scaled_diff); + + res = data->scale_update_adj_time_ns + data->delta + scaled_diff; + + IWL_DEBUG_PTP(mld, "scale_update_ns=%llu delta=%lld adj=%llu\n", + (unsigned long long)data->scale_update_adj_time_ns, + (long long)data->delta, (unsigned long long)res); + return res; +} + +static int iwl_mld_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct iwl_mld *mld = container_of(ptp, struct iwl_mld, + ptp_data.ptp_clock_info); + struct ptp_data *data = &mld->ptp_data; + u32 gp2; + u64 ns; + + if (iwl_mld_get_systime(mld, &gp2)) { + IWL_DEBUG_PTP(mld, "PTP: gettime: failed to read systime\n"); + return -EIO; + } + + spin_lock_bh(&data->lock); + ns = iwl_mld_ptp_get_adj_time(mld, (u64)gp2 * NSEC_PER_USEC); + spin_unlock_bh(&data->lock); + + *ts = ns_to_timespec64(ns); + return 0; +} + +static int iwl_mld_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct iwl_mld *mld = container_of(ptp, struct iwl_mld, + ptp_data.ptp_clock_info); + struct ptp_data *data = &mld->ptp_data; + + spin_lock_bh(&data->lock); + data->delta += delta; + IWL_DEBUG_PTP(mld, "delta=%lld, new delta=%lld\n", (long long)delta, + (long long)data->delta); + spin_unlock_bh(&data->lock); + return 0; +} + +static int iwl_mld_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct iwl_mld *mld = container_of(ptp, struct iwl_mld, + ptp_data.ptp_clock_info); + struct ptp_data *data = &mld->ptp_data; + u32 gp2; + + /* Must call iwl_mld_ptp_get_adj_time() before updating + * data->scale_update_gp2 or data->scaled_freq since + * scale_update_adj_time_ns should reflect the previous scaled_freq. + */ + if (iwl_mld_get_systime(mld, &gp2)) { + IWL_DEBUG_PTP(mld, "adjfine: failed to read systime\n"); + return -EBUSY; + } + + spin_lock_bh(&data->lock); + data->scale_update_adj_time_ns = + iwl_mld_ptp_get_adj_time(mld, gp2 * NSEC_PER_USEC); + data->scale_update_gp2 = gp2; + + /* scale_update_adj_time_ns now relects the configured delta, the + * wrap_counter and the previous scaled frequency. Thus delta and + * wrap_counter should be reset, and the scale frequency is updated + * to the new frequency. + */ + data->delta = 0; + data->wrap_counter = 0; + data->scaled_freq = PTP_SCALE_FACTOR + scaled_ppm; + IWL_DEBUG_PTP(mld, "adjfine: scaled_ppm=%ld new=%llu\n", + scaled_ppm, (unsigned long long)data->scaled_freq); + spin_unlock_bh(&data->lock); + return 0; +} + +static void iwl_mld_ptp_work(struct work_struct *wk) +{ + struct iwl_mld *mld = container_of(wk, struct iwl_mld, + ptp_data.dwork.work); + struct ptp_data *data = &mld->ptp_data; + u32 gp2; + + spin_lock_bh(&data->lock); + if (!iwl_mld_get_systime(mld, &gp2)) + iwl_mld_ptp_update_new_read(mld, gp2); + else + IWL_DEBUG_PTP(mld, "PTP work: failed to read GP2\n"); + spin_unlock_bh(&data->lock); +} + +static int +iwl_mld_get_crosstimestamp_fw(struct iwl_mld *mld, u32 *gp2, u64 *sys_time) +{ + struct iwl_synced_time_cmd synced_time_cmd = { + .operation = cpu_to_le32(IWL_SYNCED_TIME_OPERATION_READ_BOTH) + }; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(DATA_PATH_GROUP, WNM_PLATFORM_PTM_REQUEST_CMD), + .flags = CMD_WANT_SKB, + .data[0] = &synced_time_cmd, + .len[0] = sizeof(synced_time_cmd), + }; + struct iwl_synced_time_rsp *resp; + struct iwl_rx_packet *pkt; + int ret; + u64 gp2_10ns; + + wiphy_lock(mld->wiphy); + ret = iwl_mld_send_cmd(mld, &cmd); + wiphy_unlock(mld->wiphy); + if (ret) + return ret; + + pkt = cmd.resp_pkt; + + if (iwl_rx_packet_payload_len(pkt) != sizeof(*resp)) { + IWL_DEBUG_PTP(mld, "PTP: Invalid PTM command response\n"); + iwl_free_resp(&cmd); + return -EIO; + } + + resp = (void *)pkt->data; + + gp2_10ns = (u64)le32_to_cpu(resp->gp2_timestamp_hi) << 32 | + le32_to_cpu(resp->gp2_timestamp_lo); + *gp2 = div_u64(gp2_10ns, 100); + + *sys_time = (u64)le32_to_cpu(resp->platform_timestamp_hi) << 32 | + le32_to_cpu(resp->platform_timestamp_lo); + + iwl_free_resp(&cmd); + return ret; +} + +static int +iwl_mld_phc_get_crosstimestamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *xtstamp) +{ + struct iwl_mld *mld = container_of(ptp, struct iwl_mld, + ptp_data.ptp_clock_info); + struct ptp_data *data = &mld->ptp_data; + int ret = 0; + /* Raw value read from GP2 register in usec */ + u32 gp2; + /* GP2 value in ns*/ + s64 gp2_ns; + /* System (wall) time */ + ktime_t sys_time; + + memset(xtstamp, 0, sizeof(struct system_device_crosststamp)); + + ret = iwl_mld_get_crosstimestamp_fw(mld, &gp2, &sys_time); + if (ret) { + IWL_DEBUG_PTP(mld, + "PTP: fw get_crosstimestamp failed (ret=%d)\n", + ret); + return ret; + } + + spin_lock_bh(&data->lock); + gp2_ns = iwl_mld_ptp_get_adj_time(mld, (u64)gp2 * NSEC_PER_USEC); + spin_unlock_bh(&data->lock); + + IWL_DEBUG_PTP(mld, + "Got Sync Time: GP2:%u, last_GP2: %u, GP2_ns: %lld, sys_time: %lld\n", + gp2, mld->ptp_data.last_gp2, gp2_ns, (s64)sys_time); + + /* System monotonic raw time is not used */ + xtstamp->device = ns_to_ktime(gp2_ns); + xtstamp->sys_realtime = sys_time; + + return ret; +} + +void iwl_mld_ptp_init(struct iwl_mld *mld) +{ + if (WARN_ON(mld->ptp_data.ptp_clock)) + return; + + spin_lock_init(&mld->ptp_data.lock); + INIT_DELAYED_WORK(&mld->ptp_data.dwork, iwl_mld_ptp_work); + + mld->ptp_data.ptp_clock_info.owner = THIS_MODULE; + mld->ptp_data.ptp_clock_info.gettime64 = iwl_mld_ptp_gettime; + mld->ptp_data.ptp_clock_info.max_adj = 0x7fffffff; + mld->ptp_data.ptp_clock_info.adjtime = iwl_mld_ptp_adjtime; + mld->ptp_data.ptp_clock_info.adjfine = iwl_mld_ptp_adjfine; + mld->ptp_data.scaled_freq = PTP_SCALE_FACTOR; + mld->ptp_data.ptp_clock_info.getcrosststamp = + iwl_mld_phc_get_crosstimestamp; + + /* Give a short 'friendly name' to identify the PHC clock */ + snprintf(mld->ptp_data.ptp_clock_info.name, + sizeof(mld->ptp_data.ptp_clock_info.name), + "%s", "iwlwifi-PTP"); + + mld->ptp_data.ptp_clock = + ptp_clock_register(&mld->ptp_data.ptp_clock_info, mld->dev); + + if (IS_ERR_OR_NULL(mld->ptp_data.ptp_clock)) { + IWL_ERR(mld, "Failed to register PHC clock (%ld)\n", + PTR_ERR(mld->ptp_data.ptp_clock)); + mld->ptp_data.ptp_clock = NULL; + } else { + IWL_INFO(mld, "Registered PHC clock: %s, with index: %d\n", + mld->ptp_data.ptp_clock_info.name, + ptp_clock_index(mld->ptp_data.ptp_clock)); + } +} + +void iwl_mld_ptp_remove(struct iwl_mld *mld) +{ + if (mld->ptp_data.ptp_clock) { + IWL_INFO(mld, "Unregistering PHC clock: %s, with index: %d\n", + mld->ptp_data.ptp_clock_info.name, + ptp_clock_index(mld->ptp_data.ptp_clock)); + + ptp_clock_unregister(mld->ptp_data.ptp_clock); + mld->ptp_data.ptp_clock = NULL; + mld->ptp_data.last_gp2 = 0; + mld->ptp_data.wrap_counter = 0; + cancel_delayed_work_sync(&mld->ptp_data.dwork); + } +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/ptp.h b/drivers/net/wireless/intel/iwlwifi/mld/ptp.h new file mode 100644 index 0000000000000..f3d18dd304e53 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/ptp.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2025 Intel Corporation + */ +#ifndef __iwl_mld_ptp_h__ +#define __iwl_mld_ptp_h__ + +#include + +/** + * struct ptp_data - PTP hardware clock data + * + * @ptp_clock: struct ptp_clock pointer returned by the ptp_clock_register() + * function. + * @ptp_clock_info: struct ptp_clock_info that describes a PTP hardware clock + * @lock: protects the time adjustments data + * @delta: delta between hardware clock and ptp clock in nanoseconds + * @scale_update_gp2: GP2 time when the scale was last updated + * @scale_update_adj_time_ns: adjusted time when the scale was last updated, + * in nanoseconds + * @scaled_freq: clock frequency offset, scaled to 65536000000 + * @last_gp2: the last GP2 reading from the hardware, used for tracking GP2 + * wraparounds + * @wrap_counter: number of wraparounds since scale_update_adj_time_ns + * @dwork: worker scheduled every 1 hour to detect workarounds + */ +struct ptp_data { + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + + spinlock_t lock; + s64 delta; + u32 scale_update_gp2; + u64 scale_update_adj_time_ns; + u64 scaled_freq; + u32 last_gp2; + u32 wrap_counter; + struct delayed_work dwork; +}; + +void iwl_mld_ptp_init(struct iwl_mld *mld); +void iwl_mld_ptp_remove(struct iwl_mld *mld); +u64 iwl_mld_ptp_get_adj_time(struct iwl_mld *mld, u64 base_time_ns); + +#endif /* __iwl_mld_ptp_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c new file mode 100644 index 0000000000000..a75af8c1e8ab0 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include + +#include "fw/regulatory.h" +#include "fw/acpi.h" +#include "fw/uefi.h" + +#include "regulatory.h" +#include "mld.h" +#include "hcmd.h" + +void iwl_mld_get_bios_tables(struct iwl_mld *mld) +{ + int ret; + + iwl_acpi_get_guid_lock_status(&mld->fwrt); + + ret = iwl_bios_get_ppag_table(&mld->fwrt); + if (ret < 0) { + IWL_DEBUG_RADIO(mld, + "PPAG BIOS table invalid or unavailable. (%d)\n", + ret); + } + + ret = iwl_bios_get_wrds_table(&mld->fwrt); + if (ret < 0) { + IWL_DEBUG_RADIO(mld, + "WRDS SAR BIOS table invalid or unavailable. (%d)\n", + ret); + + /* If not available, don't fail and don't bother with EWRD and + * WGDS + */ + + if (!iwl_bios_get_wgds_table(&mld->fwrt)) { + /* If basic SAR is not available, we check for WGDS, + * which should *not* be available either. If it is + * available, issue an error, because we can't use SAR + * Geo without basic SAR. + */ + IWL_ERR(mld, "BIOS contains WGDS but no WRDS\n"); + } + + } else { + ret = iwl_bios_get_ewrd_table(&mld->fwrt); + /* If EWRD is not available, we can still use + * WRDS, so don't fail. + */ + if (ret < 0) + IWL_DEBUG_RADIO(mld, + "EWRD SAR BIOS table invalid or unavailable. (%d)\n", + ret); + + ret = iwl_bios_get_wgds_table(&mld->fwrt); + if (ret < 0) + IWL_DEBUG_RADIO(mld, + "Geo SAR BIOS table invalid or unavailable. (%d)\n", + ret); + /* we don't fail if the table is not available */ + } + + ret = iwl_uefi_get_uats_table(mld->trans, &mld->fwrt); + if (ret) + IWL_DEBUG_RADIO(mld, "failed to read UATS table (%d)\n", ret); +} + +static int iwl_mld_geo_sar_init(struct iwl_mld *mld) +{ + u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD); + union iwl_geo_tx_power_profiles_cmd cmd; + u16 len; + u32 n_bands; + __le32 sk = cpu_to_le32(0); + int ret; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + + BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) != + offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops)); + + cmd.v4.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES); + + /* Only set to South Korea if the table revision is 1 */ + if (mld->fwrt.geo_rev == 1) + sk = cpu_to_le32(1); + + if (cmd_ver == 5) { + len = sizeof(cmd.v5); + n_bands = ARRAY_SIZE(cmd.v5.table[0]); + cmd.v5.table_revision = sk; + } else if (cmd_ver == 4) { + len = sizeof(cmd.v4); + n_bands = ARRAY_SIZE(cmd.v4.table[0]); + cmd.v4.table_revision = sk; + } else { + return -EOPNOTSUPP; + } + + BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) != + offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table)); + /* the table is at the same position for all versions, so set use v4 */ + ret = iwl_sar_geo_fill_table(&mld->fwrt, &cmd.v4.table[0][0], + n_bands, BIOS_GEO_MAX_PROFILE_NUM); + + /* It is a valid scenario to not support SAR, or miss wgds table, + * but in that case there is no need to send the command. + */ + if (ret) + return 0; + + return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len); +} + +int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b) +{ + u32 cmd_id = REDUCE_TX_POWER_CMD; + struct iwl_dev_tx_power_cmd cmd = { + .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), + }; + __le16 *per_chain; + int ret; + u16 len = sizeof(cmd.common); + u32 n_subbands; + u8 cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, cmd_id, + IWL_FW_CMD_VER_UNKNOWN); + + if (cmd_ver == 10) { + len += sizeof(cmd.v10); + n_subbands = IWL_NUM_SUB_BANDS_V2; + per_chain = &cmd.v10.per_chain[0][0][0]; + cmd.v10.flags = + cpu_to_le32(mld->fwrt.reduced_power_flags); + } else if (cmd_ver == 9) { + len += sizeof(cmd.v9); + n_subbands = IWL_NUM_SUB_BANDS_V1; + per_chain = &cmd.v9.per_chain[0][0]; + } else { + return -EOPNOTSUPP; + } + + /* TODO: CDB - support IWL_NUM_CHAIN_TABLES_V2 */ + ret = iwl_sar_fill_profile(&mld->fwrt, per_chain, + IWL_NUM_CHAIN_TABLES, + n_subbands, prof_a, prof_b); + /* return on error or if the profile is disabled (positive number) */ + if (ret) + return ret; + + return iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd, len); +} + +int iwl_mld_init_sar(struct iwl_mld *mld) +{ + int chain_a_prof = 1; + int chain_b_prof = 1; + int ret; + + /* If no profile was chosen by the user yet, choose profile 1 (WRDS) as + * default for both chains + */ + if (mld->fwrt.sar_chain_a_profile && mld->fwrt.sar_chain_b_profile) { + chain_a_prof = mld->fwrt.sar_chain_a_profile; + chain_b_prof = mld->fwrt.sar_chain_b_profile; + } + + ret = iwl_mld_config_sar_profile(mld, chain_a_prof, chain_b_prof); + if (ret < 0) + return ret; + + if (ret) + return 0; + + return iwl_mld_geo_sar_init(mld); +} + +int iwl_mld_init_sgom(struct iwl_mld *mld) +{ + int ret; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, + SAR_OFFSET_MAPPING_TABLE_CMD), + .data[0] = &mld->fwrt.sgom_table, + .len[0] = sizeof(mld->fwrt.sgom_table), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + if (!mld->fwrt.sgom_enabled) { + IWL_DEBUG_RADIO(mld, "SGOM table is disabled\n"); + return 0; + } + + ret = iwl_mld_send_cmd(mld, &cmd); + if (ret) + IWL_ERR(mld, + "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret); + + return ret; +} + +static int iwl_mld_ppag_send_cmd(struct iwl_mld *mld) +{ + union iwl_ppag_table_cmd cmd = {}; + int ret, len; + + ret = iwl_fill_ppag_table(&mld->fwrt, &cmd, &len); + /* Not supporting PPAG table is a valid scenario */ + if (ret < 0) + return 0; + + IWL_DEBUG_RADIO(mld, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); + ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, + PER_PLATFORM_ANT_GAIN_CMD), + &cmd, len); + if (ret < 0) + IWL_ERR(mld, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n", + ret); + + return ret; +} + +int iwl_mld_init_ppag(struct iwl_mld *mld) +{ + /* no need to read the table, done in INIT stage */ + + if (!(iwl_is_ppag_approved(&mld->fwrt))) + return 0; + + return iwl_mld_ppag_send_cmd(mld); +} + +void iwl_mld_configure_lari(struct iwl_mld *mld) +{ + struct iwl_fw_runtime *fwrt = &mld->fwrt; + struct iwl_lari_config_change_cmd cmd = { + .config_bitmap = iwl_get_lari_config_bitmap(fwrt), + }; + int ret; + u32 value; + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value); + if (!ret) + cmd.oem_11ax_allow_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value); + if (!ret) + cmd.oem_unii4_allow_bitmap = + cpu_to_le32(value &= DSM_UNII4_ALLOW_BITMAP); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value); + if (!ret) + cmd.chan_state_active_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value); + if (!ret) + cmd.oem_uhb_allow_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value); + if (!ret) + cmd.force_disable_channels_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD, + &value); + if (!ret) + cmd.edt_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_wbem(fwrt, &value); + if (!ret) + cmd.oem_320mhz_allow_bitmap = cpu_to_le32(value); + + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value); + if (!ret) + cmd.oem_11be_allow_bitmap = cpu_to_le32(value); + + if (!cmd.config_bitmap && + !cmd.oem_uhb_allow_bitmap && + !cmd.oem_11ax_allow_bitmap && + !cmd.oem_unii4_allow_bitmap && + !cmd.chan_state_active_bitmap && + !cmd.force_disable_channels_bitmap && + !cmd.edt_bitmap && + !cmd.oem_320mhz_allow_bitmap && + !cmd.oem_11be_allow_bitmap) + return; + + IWL_DEBUG_RADIO(mld, + "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", + le32_to_cpu(cmd.config_bitmap), + le32_to_cpu(cmd.oem_11ax_allow_bitmap)); + IWL_DEBUG_RADIO(mld, + "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x\n", + le32_to_cpu(cmd.oem_unii4_allow_bitmap), + le32_to_cpu(cmd.chan_state_active_bitmap)); + IWL_DEBUG_RADIO(mld, + "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", + le32_to_cpu(cmd.oem_uhb_allow_bitmap), + le32_to_cpu(cmd.force_disable_channels_bitmap)); + IWL_DEBUG_RADIO(mld, + "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n", + le32_to_cpu(cmd.edt_bitmap), + le32_to_cpu(cmd.oem_320mhz_allow_bitmap)); + IWL_DEBUG_RADIO(mld, + "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n", + le32_to_cpu(cmd.oem_11be_allow_bitmap)); + + ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(REGULATORY_AND_NVM_GROUP, + LARI_CONFIG_CHANGE), &cmd); + if (ret) + IWL_DEBUG_RADIO(mld, + "Failed to send LARI_CONFIG_CHANGE (%d)\n", + ret); +} + +void iwl_mld_init_uats(struct iwl_mld *mld) +{ + int ret; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, + MCC_ALLOWED_AP_TYPE_CMD), + .data[0] = &mld->fwrt.uats_table, + .len[0] = sizeof(mld->fwrt.uats_table), + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + + if (!mld->fwrt.uats_valid) + return; + + ret = iwl_mld_send_cmd(mld, &cmd); + if (ret) + IWL_ERR(mld, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n", + ret); +} + +void iwl_mld_init_tas(struct iwl_mld *mld) +{ + int ret; + struct iwl_tas_data data = {}; + struct iwl_tas_config_cmd cmd = {}; + u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG); + + BUILD_BUG_ON(ARRAY_SIZE(data.block_list_array) != + IWL_WTAS_BLACK_LIST_MAX); + BUILD_BUG_ON(ARRAY_SIZE(cmd.block_list_array) != + IWL_WTAS_BLACK_LIST_MAX); + + if (!fw_has_capa(&mld->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TAS_CFG)) { + IWL_DEBUG_RADIO(mld, "TAS not enabled in FW\n"); + return; + } + + ret = iwl_bios_get_tas_table(&mld->fwrt, &data); + if (ret < 0) { + IWL_DEBUG_RADIO(mld, + "TAS table invalid or unavailable. (%d)\n", + ret); + return; + } + + if (!iwl_is_tas_approved()) { + IWL_DEBUG_RADIO(mld, + "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n", + dmi_get_system_info(DMI_SYS_VENDOR) ?: ""); + if ((!iwl_add_mcc_to_tas_block_list(data.block_list_array, + &data.block_list_size, + IWL_MCC_US)) || + (!iwl_add_mcc_to_tas_block_list(data.block_list_array, + &data.block_list_size, + IWL_MCC_CANADA))) { + IWL_DEBUG_RADIO(mld, + "Unable to add US/Canada to TAS block list, disabling TAS\n"); + return; + } + } else { + IWL_DEBUG_RADIO(mld, + "System vendor '%s' is in the approved list.\n", + dmi_get_system_info(DMI_SYS_VENDOR) ?: ""); + } + + cmd.block_list_size = cpu_to_le16(data.block_list_size); + for (u8 i = 0; i < data.block_list_size; i++) + cmd.block_list_array[i] = + cpu_to_le16(data.block_list_array[i]); + cmd.tas_config_info.table_source = data.table_source; + cmd.tas_config_info.table_revision = data.table_revision; + cmd.tas_config_info.value = cpu_to_le32(data.tas_selection); + + ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd); + if (ret) + IWL_DEBUG_RADIO(mld, "failed to send TAS_CONFIG (%d)\n", ret); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.h b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.h new file mode 100644 index 0000000000000..3b01c645adda3 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_regulatory_h__ +#define __iwl_mld_regulatory_h__ + +#include "mld.h" + +void iwl_mld_get_bios_tables(struct iwl_mld *mld); +void iwl_mld_configure_lari(struct iwl_mld *mld); +void iwl_mld_init_uats(struct iwl_mld *mld); +void iwl_mld_init_tas(struct iwl_mld *mld); + +int iwl_mld_init_ppag(struct iwl_mld *mld); + +int iwl_mld_init_sgom(struct iwl_mld *mld); + +int iwl_mld_init_sar(struct iwl_mld *mld); + +int iwl_mld_config_sar_profile(struct iwl_mld *mld, int prof_a, int prof_b); + +#endif /* __iwl_mld_regulatory_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.c b/drivers/net/wireless/intel/iwlwifi/mld/roc.c new file mode 100644 index 0000000000000..b87faca23cebe --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024 - 2025 Intel Corporation + */ +#include +#include + +#include "mld.h" +#include "roc.h" +#include "hcmd.h" +#include "iface.h" +#include "sta.h" +#include "mlo.h" + +#include "fw/api/context.h" +#include "fw/api/time-event.h" + +#define AUX_ROC_MAX_DELAY MSEC_TO_TU(200) + +static void +iwl_mld_vif_iter_emlsr_block_roc(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int *result = data; + int ret; + + ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif, + IWL_MLD_EMLSR_BLOCKED_ROC, + iwl_mld_get_primary_link(vif)); + if (ret) + *result = ret; +} + +int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *channel, int duration, + enum ieee80211_roc_type type) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_int_sta *aux_sta; + struct iwl_roc_req cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + }; + u8 ver = iwl_fw_lookup_cmd_ver(mld->fw, + WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0); + u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd); + enum iwl_roc_activity activity; + int ret = 0; + + lockdep_assert_wiphy(mld->wiphy); + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_vif_iter_emlsr_block_roc, + &ret); + if (ret) + return ret; + + /* TODO: task=Hotspot 2.0 */ + if (vif->type != NL80211_IFTYPE_P2P_DEVICE) { + IWL_ERR(mld, "NOT SUPPORTED: ROC on vif->type %d\n", + vif->type); + + return -EOPNOTSUPP; + } + + switch (type) { + case IEEE80211_ROC_TYPE_NORMAL: + activity = ROC_ACTIVITY_P2P_DISC; + break; + case IEEE80211_ROC_TYPE_MGMT_TX: + activity = ROC_ACTIVITY_P2P_NEG; + break; + default: + WARN_ONCE(1, "Got an invalid P2P ROC type\n"); + return -EINVAL; + } + + if (WARN_ON(mld_vif->roc_activity != ROC_NUM_ACTIVITIES)) + return -EBUSY; + + /* No MLO on P2P device */ + aux_sta = &mld_vif->deflink.aux_sta; + + ret = iwl_mld_add_aux_sta(mld, aux_sta); + if (ret) + return ret; + + cmd.activity = cpu_to_le32(activity); + cmd.sta_id = cpu_to_le32(aux_sta->sta_id); + cmd.channel_info.channel = cpu_to_le32(channel->hw_value); + cmd.channel_info.band = iwl_mld_nl80211_band_to_fw(channel->band); + cmd.channel_info.width = IWL_PHY_CHANNEL_MODE20; + /* TODO: task=Hotspot 2.0, revisit those parameters when we add an ROC + * on the BSS vif + */ + cmd.max_delay = cpu_to_le32(AUX_ROC_MAX_DELAY); + cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); + + memcpy(cmd.node_addr, vif->addr, ETH_ALEN); + + ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), + &cmd, cmd_len); + if (ret) { + IWL_ERR(mld, "Couldn't send the ROC_CMD\n"); + return ret; + } + mld_vif->roc_activity = activity; + + return 0; +} + +static void +iwl_mld_vif_iter_emlsr_unblock_roc(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + + iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_ROC); +} + +static void iwl_mld_destroy_roc(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_vif *mld_vif) +{ + mld_vif->roc_activity = ROC_NUM_ACTIVITIES; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_vif_iter_emlsr_unblock_roc, + NULL); + + /* wait until every tx has seen that roc_activity has been reset */ + synchronize_net(); + /* from here, no new tx will be added + * we can flush the Tx on the queues + */ + + iwl_mld_flush_link_sta_txqs(mld, mld_vif->deflink.aux_sta.sta_id); + + iwl_mld_remove_aux_sta(mld, vif, &vif->bss_conf); +} + +int iwl_mld_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct iwl_mld *mld = IWL_MAC80211_GET_MLD(hw); + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_roc_req cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + }; + u8 ver = iwl_fw_lookup_cmd_ver(mld->fw, + WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0); + u16 cmd_len = ver < 6 ? sizeof(struct iwl_roc_req_v5) : sizeof(cmd); + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + /* TODO: task=Hotspot 2.0 */ + if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE)) + return -EOPNOTSUPP; + + /* No roc activity running it's probably already done */ + if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES) + return 0; + + cmd.activity = cpu_to_le32(mld_vif->roc_activity); + + ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), + &cmd, cmd_len); + if (ret) + IWL_ERR(mld, "Couldn't send the command to cancel the ROC\n"); + + /* We may have raced with the firmware expiring the ROC instance at + * this very moment. In that case, we can have a notification in the + * async processing queue. However, none can arrive _after_ this as + * ROC_CMD was sent synchronously, i.e. we waited for a response and + * the firmware cannot refer to this ROC after the response. Thus, + * if we just cancel the notification (if there's one) we'll be at a + * clean state for any possible next ROC. + */ + iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_ROC, + mld_vif->roc_activity); + + iwl_mld_destroy_roc(mld, vif, mld_vif); + + return 0; +} + +void iwl_mld_handle_roc_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + const struct iwl_roc_notif *notif = (void *)pkt->data; + u32 activity = le32_to_cpu(notif->activity); + /* TODO: task=Hotspot 2.0 - roc can run on BSS */ + struct ieee80211_vif *vif = mld->p2p_device_vif; + struct iwl_mld_vif *mld_vif; + + if (WARN_ON(!vif)) + return; + + mld_vif = iwl_mld_vif_from_mac80211(vif); + /* It is possible that the ROC was canceled + * but the notification was already fired. + */ + if (mld_vif->roc_activity != activity) + return; + + if (le32_to_cpu(notif->success) && + le32_to_cpu(notif->started)) { + /* We had a successful start */ + ieee80211_ready_on_channel(mld->hw); + } else { + /* ROC was not successful, tell the firmware to remove it */ + if (le32_to_cpu(notif->started)) + iwl_mld_cancel_roc(mld->hw, vif); + else + iwl_mld_destroy_roc(mld, vif, mld_vif); + /* we need to let know mac80211 about end OR + * an unsuccessful start + */ + ieee80211_remain_on_channel_expired(mld->hw); + } +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/roc.h b/drivers/net/wireless/intel/iwlwifi/mld/roc.h new file mode 100644 index 0000000000000..985d7f20a3d7f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/roc.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_roc_h__ +#define __iwl_mld_roc_h__ + +#include + +int iwl_mld_start_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_channel *channel, int duration, + enum ieee80211_roc_type type); + +int iwl_mld_cancel_roc(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); + +void iwl_mld_handle_roc_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +#endif /* __iwl_mld_roc_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/rx.c new file mode 100644 index 0000000000000..c4f189bcece21 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.c @@ -0,0 +1,2060 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include +#include + +#include "mld.h" +#include "sta.h" +#include "agg.h" +#include "rx.h" +#include "hcmd.h" +#include "iface.h" +#include "time_sync.h" +#include "fw/dbg.h" +#include "fw/api/rx.h" + +/* stores relevant PHY data fields extracted from iwl_rx_mpdu_desc */ +struct iwl_mld_rx_phy_data { + enum iwl_rx_phy_info_type info_type; + __le32 data0; + __le32 data1; + __le32 data2; + __le32 data3; + __le32 eht_data4; + __le32 data5; + __le16 data4; + bool first_subframe; + bool with_data; + __le32 rx_vec[4]; + u32 rate_n_flags; + u32 gp2_on_air_rise; + u16 phy_info; + u8 energy_a, energy_b; + u8 channel; +}; + +static void +iwl_mld_fill_phy_data(struct iwl_rx_mpdu_desc *desc, + struct iwl_mld_rx_phy_data *phy_data) +{ + phy_data->phy_info = le16_to_cpu(desc->phy_info); + phy_data->rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); + phy_data->gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); + phy_data->channel = desc->v3.channel; + phy_data->energy_a = desc->v3.energy_a; + phy_data->energy_b = desc->v3.energy_b; + phy_data->data0 = desc->v3.phy_data0; + phy_data->data1 = desc->v3.phy_data1; + phy_data->data2 = desc->v3.phy_data2; + phy_data->data3 = desc->v3.phy_data3; + phy_data->data4 = desc->phy_data4; + phy_data->eht_data4 = desc->phy_eht_data4; + phy_data->data5 = desc->v3.phy_data5; + phy_data->with_data = true; +} + +static inline int iwl_mld_check_pn(struct iwl_mld *mld, struct sk_buff *skb, + int queue, struct ieee80211_sta *sta) +{ + struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); + struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); + struct iwl_mld_sta *mld_sta; + struct iwl_mld_ptk_pn *ptk_pn; + int res; + u8 tid, keyidx; + u8 pn[IEEE80211_CCMP_PN_LEN]; + u8 *extiv; + + /* multicast and non-data only arrives on default queue; avoid checking + * for default queue - we don't want to replicate all the logic that's + * necessary for checking the PN on fragmented frames, leave that + * to mac80211 + */ + if (queue == 0 || !ieee80211_is_data(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) + return 0; + + if (!(stats->flag & RX_FLAG_DECRYPTED)) + return 0; + + /* if we are here - this for sure is either CCMP or GCMP */ + if (!sta) { + IWL_DEBUG_DROP(mld, + "expected hw-decrypted unicast frame for station\n"); + return -1; + } + + mld_sta = iwl_mld_sta_from_mac80211(sta); + + extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control); + keyidx = extiv[3] >> 6; + + ptk_pn = rcu_dereference(mld_sta->ptk_pn[keyidx]); + if (!ptk_pn) + return -1; + + if (ieee80211_is_data_qos(hdr->frame_control)) + tid = ieee80211_get_tid(hdr); + else + tid = 0; + + /* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */ + if (tid >= IWL_MAX_TID_COUNT) + return -1; + + /* load pn */ + pn[0] = extiv[7]; + pn[1] = extiv[6]; + pn[2] = extiv[5]; + pn[3] = extiv[4]; + pn[4] = extiv[1]; + pn[5] = extiv[0]; + + res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); + if (res < 0) + return -1; + if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN)) + return -1; + + memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); + stats->flag |= RX_FLAG_PN_VALIDATED; + + return 0; +} + +/* iwl_mld_pass_packet_to_mac80211 - passes the packet for mac80211 */ +void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld, + struct napi_struct *napi, + struct sk_buff *skb, int queue, + struct ieee80211_sta *sta) +{ + KUNIT_STATIC_STUB_REDIRECT(iwl_mld_pass_packet_to_mac80211, + mld, napi, skb, queue, sta); + + if (unlikely(iwl_mld_check_pn(mld, skb, queue, sta))) { + kfree_skb(skb); + return; + } + + ieee80211_rx_napi(mld->hw, sta, skb, napi); +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_pass_packet_to_mac80211); + +static void iwl_mld_fill_signal(struct iwl_mld *mld, + struct ieee80211_rx_status *rx_status, + struct iwl_mld_rx_phy_data *phy_data) +{ + u32 rate_n_flags = phy_data->rate_n_flags; + int energy_a = phy_data->energy_a; + int energy_b = phy_data->energy_b; + int max_energy; + + energy_a = energy_a ? -energy_a : S8_MIN; + energy_b = energy_b ? -energy_b : S8_MIN; + max_energy = max(energy_a, energy_b); + + IWL_DEBUG_STATS(mld, "energy in A %d B %d, and max %d\n", + energy_a, energy_b, max_energy); + + rx_status->signal = max_energy; + rx_status->chains = + (rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS; + rx_status->chain_signal[0] = energy_a; + rx_status->chain_signal[1] = energy_b; +} + +static void +iwl_mld_decode_he_phy_ru_alloc(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_radiotap_he *he, + struct ieee80211_radiotap_he_mu *he_mu, + struct ieee80211_rx_status *rx_status) +{ + /* Unfortunately, we have to leave the mac80211 data + * incorrect for the case that we receive an HE-MU + * transmission and *don't* have the HE phy data (due + * to the bits being used for TSF). This shouldn't + * happen though as management frames where we need + * the TSF/timers are not be transmitted in HE-MU. + */ + u8 ru = le32_get_bits(phy_data->data1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); + u32 rate_n_flags = phy_data->rate_n_flags; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + u8 offs = 0; + + rx_status->bw = RATE_INFO_BW_HE_RU; + + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + + switch (ru) { + case 0 ... 36: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; + offs = ru; + break; + case 37 ... 52: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; + offs = ru - 37; + break; + case 53 ... 60: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + offs = ru - 53; + break; + case 61 ... 64: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; + offs = ru - 61; + break; + case 65 ... 66: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; + offs = ru - 65; + break; + case 67: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case 68: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + break; + } + he->data2 |= le16_encode_bits(offs, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); + he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); + if (phy_data->data1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80)) + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); + +#define CHECK_BW(bw) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_ ## bw ## MHZ != \ + RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS); \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_ ## bw ## MHZ != \ + RATE_MCS_CHAN_WIDTH_##bw >> RATE_MCS_CHAN_WIDTH_POS) + CHECK_BW(20); + CHECK_BW(40); + CHECK_BW(80); + CHECK_BW(160); + + if (he_mu) + he_mu->flags2 |= + le16_encode_bits(u32_get_bits(rate_n_flags, + RATE_MCS_CHAN_WIDTH_MSK), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); + else if (he_type == RATE_MCS_HE_TYPE_TRIG) + he->data6 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | + le16_encode_bits(u32_get_bits(rate_n_flags, + RATE_MCS_CHAN_WIDTH_MSK), + IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); +} + +static void +iwl_mld_decode_he_mu_ext(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_radiotap_he_mu *he_mu) +{ + u32 phy_data2 = le32_to_cpu(phy_data->data2); + u32 phy_data3 = le32_to_cpu(phy_data->data3); + u16 phy_data4 = le16_to_cpu(phy_data->data4); + u32 rate_n_flags = phy_data->rate_n_flags; + + if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK)) { + he_mu->flags1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN); + + he_mu->flags1 |= + le16_encode_bits(u32_get_bits(phy_data4, + IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU); + + he_mu->ru_ch1[0] = u32_get_bits(phy_data2, + IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0); + he_mu->ru_ch1[1] = u32_get_bits(phy_data3, + IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1); + he_mu->ru_ch1[2] = u32_get_bits(phy_data2, + IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2); + he_mu->ru_ch1[3] = u32_get_bits(phy_data3, + IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3); + } + + if (u32_get_bits(phy_data4, IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK) && + (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) { + he_mu->flags1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN); + + he_mu->flags2 |= + le16_encode_bits(u32_get_bits(phy_data4, + IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU); + + he_mu->ru_ch2[0] = u32_get_bits(phy_data2, + IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0); + he_mu->ru_ch2[1] = u32_get_bits(phy_data3, + IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1); + he_mu->ru_ch2[2] = u32_get_bits(phy_data2, + IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2); + he_mu->ru_ch2[3] = u32_get_bits(phy_data3, + IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3); + } +} + +static void +iwl_mld_decode_he_phy_data(struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_radiotap_he *he, + struct ieee80211_radiotap_he_mu *he_mu, + struct ieee80211_rx_status *rx_status, + int queue) +{ + switch (phy_data->info_type) { + case IWL_RX_PHY_INFO_TYPE_NONE: + case IWL_RX_PHY_INFO_TYPE_CCK: + case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY: + case IWL_RX_PHY_INFO_TYPE_HT: + case IWL_RX_PHY_INFO_TYPE_VHT_SU: + case IWL_RX_PHY_INFO_TYPE_VHT_MU: + case IWL_RX_PHY_INFO_TYPE_EHT_MU: + case IWL_RX_PHY_INFO_TYPE_EHT_TB: + case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT: + return; + case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1), + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2), + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3), + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data2, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4), + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4); + fallthrough; + case IWL_RX_PHY_INFO_TYPE_HE_SU: + case IWL_RX_PHY_INFO_TYPE_HE_MU: + case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_HE_TB: + /* HE common */ + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN); + he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0, + IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK), + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); + if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB && + phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) { + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); + he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0, + IWL_RX_PHY_DATA0_HE_UPLINK), + IEEE80211_RADIOTAP_HE_DATA3_UL_DL); + } + he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0, + IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM), + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); + he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0, + IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK), + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); + he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data0, + IWL_RX_PHY_DATA0_HE_PE_DISAMBIG), + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); + he->data5 |= le16_encode_bits(le32_get_bits(phy_data->data1, + IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK), + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); + he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0, + IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK), + IEEE80211_RADIOTAP_HE_DATA6_TXOP); + he->data6 |= le16_encode_bits(le32_get_bits(phy_data->data0, + IWL_RX_PHY_DATA0_HE_DOPPLER), + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); + break; + } + + switch (phy_data->info_type) { + case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_HE_MU: + case IWL_RX_PHY_INFO_TYPE_HE_SU: + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->data0, + IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK), + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); + break; + default: + /* nothing here */ + break; + } + + switch (phy_data->info_type) { + case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: + he_mu->flags1 |= + le16_encode_bits(le16_get_bits(phy_data->data4, + IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); + he_mu->flags1 |= + le16_encode_bits(le16_get_bits(phy_data->data4, + IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); + he_mu->flags2 |= + le16_encode_bits(le16_get_bits(phy_data->data4, + IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); + iwl_mld_decode_he_mu_ext(phy_data, he_mu); + fallthrough; + case IWL_RX_PHY_INFO_TYPE_HE_MU: + he_mu->flags2 |= + le16_encode_bits(le32_get_bits(phy_data->data1, + IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); + he_mu->flags2 |= + le16_encode_bits(le32_get_bits(phy_data->data1, + IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); + fallthrough; + case IWL_RX_PHY_INFO_TYPE_HE_TB: + case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: + iwl_mld_decode_he_phy_ru_alloc(phy_data, he, he_mu, rx_status); + break; + case IWL_RX_PHY_INFO_TYPE_HE_SU: + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); + he->data3 |= le16_encode_bits(le32_get_bits(phy_data->data0, + IWL_RX_PHY_DATA0_HE_BEAM_CHNG), + IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); + break; + default: + /* nothing */ + break; + } +} + +static void iwl_mld_rx_he(struct iwl_mld *mld, struct sk_buff *skb, + struct iwl_mld_rx_phy_data *phy_data, + int queue) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_radiotap_he *he = NULL; + struct ieee80211_radiotap_he_mu *he_mu = NULL; + u32 rate_n_flags = phy_data->rate_n_flags; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + u8 ltf; + static const struct ieee80211_radiotap_he known = { + .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN), + .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN), + }; + static const struct ieee80211_radiotap_he_mu mu_known = { + .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN), + .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN), + }; + u16 phy_info = phy_data->phy_info; + + he = skb_put_data(skb, &known, sizeof(known)); + rx_status->flag |= RX_FLAG_RADIOTAP_HE; + + if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU || + phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) { + he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); + rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; + } + + /* report the AMPDU-EOF bit on single frames */ + if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } + + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) + iwl_mld_decode_he_phy_data(phy_data, he, he_mu, rx_status, + queue); + + /* update aggregation data for monitor sake on default queue */ + if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) && + (phy_info & IWL_RX_MPDU_PHY_AMPDU) && phy_data->first_subframe) { + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->data0 & cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } + + if (he_type == RATE_MCS_HE_TYPE_EXT_SU && + rate_n_flags & RATE_MCS_HE_106T_MSK) { + rx_status->bw = RATE_INFO_BW_HE_RU; + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + } + + /* actually data is filled in mac80211 */ + if (he_type == RATE_MCS_HE_TYPE_SU || + he_type == RATE_MCS_HE_TYPE_EXT_SU) + he->data1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + +#define CHECK_TYPE(F) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ + (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) + + CHECK_TYPE(SU); + CHECK_TYPE(EXT_SU); + CHECK_TYPE(MU); + CHECK_TYPE(TRIG); + + he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS); + + if (rate_n_flags & RATE_MCS_BF_MSK) + he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF); + + switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >> + RATE_MCS_HE_GI_LTF_POS) { + case 0: + if (he_type == RATE_MCS_HE_TYPE_TRIG) + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + if (he_type == RATE_MCS_HE_TYPE_MU) + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + else + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X; + break; + case 1: + if (he_type == RATE_MCS_HE_TYPE_TRIG) + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; + break; + case 2: + if (he_type == RATE_MCS_HE_TYPE_TRIG) { + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + } else { + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; + } + break; + case 3: + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + break; + case 4: + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + break; + default: + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN; + } + + he->data5 |= le16_encode_bits(ltf, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); +} + +static void iwl_mld_decode_lsig(struct sk_buff *skb, + struct iwl_mld_rx_phy_data *phy_data) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_radiotap_lsig *lsig; + + switch (phy_data->info_type) { + case IWL_RX_PHY_INFO_TYPE_HT: + case IWL_RX_PHY_INFO_TYPE_VHT_SU: + case IWL_RX_PHY_INFO_TYPE_VHT_MU: + case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: + case IWL_RX_PHY_INFO_TYPE_HE_SU: + case IWL_RX_PHY_INFO_TYPE_HE_MU: + case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_HE_TB: + case IWL_RX_PHY_INFO_TYPE_EHT_MU: + case IWL_RX_PHY_INFO_TYPE_EHT_TB: + case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT: + lsig = skb_put(skb, sizeof(*lsig)); + lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN); + lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->data1, + IWL_RX_PHY_DATA1_LSIG_LEN_MASK), + IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH); + rx_status->flag |= RX_FLAG_RADIOTAP_LSIG; + break; + default: + break; + } +} + +/* Put a TLV on the skb and return data pointer + * + * Also pad the len to 4 and zero out all data part + */ +static void * +iwl_mld_radiotap_put_tlv(struct sk_buff *skb, u16 type, u16 len) +{ + struct ieee80211_radiotap_tlv *tlv; + + tlv = skb_put(skb, sizeof(*tlv)); + tlv->type = cpu_to_le16(type); + tlv->len = cpu_to_le16(len); + return skb_put_zero(skb, ALIGN(len, 4)); +} + +#define LE32_DEC_ENC(value, dec_bits, enc_bits) \ + le32_encode_bits(le32_get_bits(value, dec_bits), enc_bits) + +#define IWL_MLD_ENC_USIG_VALUE_MASK(usig, in_value, dec_bits, enc_bits) do { \ + typeof(enc_bits) _enc_bits = enc_bits; \ + typeof(usig) _usig = usig; \ + (_usig)->mask |= cpu_to_le32(_enc_bits); \ + (_usig)->value |= LE32_DEC_ENC(in_value, dec_bits, _enc_bits); \ +} while (0) + +#define __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \ + eht->data[(rt_data)] |= \ + (cpu_to_le32 \ + (IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru ## _KNOWN) | \ + LE32_DEC_ENC(data ## fw_data, \ + IWL_RX_PHY_DATA ## fw_data ## _EHT_MU_EXT_RU_ALLOC_ ## fw_ru, \ + IEEE80211_RADIOTAP_EHT_DATA ## rt_data ## _RU_ALLOC_CC_ ## rt_ru)) + +#define _IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) \ + __IWL_MLD_ENC_EHT_RU(rt_data, rt_ru, fw_data, fw_ru) + +#define IEEE80211_RADIOTAP_RU_DATA_1_1_1 1 +#define IEEE80211_RADIOTAP_RU_DATA_2_1_1 2 +#define IEEE80211_RADIOTAP_RU_DATA_1_1_2 2 +#define IEEE80211_RADIOTAP_RU_DATA_2_1_2 2 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_1 3 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_1 3 +#define IEEE80211_RADIOTAP_RU_DATA_1_2_2 3 +#define IEEE80211_RADIOTAP_RU_DATA_2_2_2 4 + +#define IWL_RX_RU_DATA_A1 2 +#define IWL_RX_RU_DATA_A2 2 +#define IWL_RX_RU_DATA_B1 2 +#define IWL_RX_RU_DATA_B2 4 +#define IWL_RX_RU_DATA_C1 3 +#define IWL_RX_RU_DATA_C2 3 +#define IWL_RX_RU_DATA_D1 4 +#define IWL_RX_RU_DATA_D2 4 + +#define IWL_MLD_ENC_EHT_RU(rt_ru, fw_ru) \ + _IWL_MLD_ENC_EHT_RU(IEEE80211_RADIOTAP_RU_DATA_ ## rt_ru, \ + rt_ru, \ + IWL_RX_RU_DATA_ ## fw_ru, \ + fw_ru) + +static void iwl_mld_decode_eht_ext_mu(struct iwl_mld *mld, + struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht, + struct ieee80211_radiotap_eht_usig *usig) +{ + if (phy_data->with_data) { + __le32 data1 = phy_data->data1; + __le32 data2 = phy_data->data2; + __le32 data3 = phy_data->data3; + __le32 data4 = phy_data->eht_data4; + __le32 data5 = phy_data->data5; + u32 phy_bw = phy_data->rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK; + + IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, data4, + IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS); + IWL_MLD_ENC_USIG_VALUE_MASK + (usig, data1, IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS); + + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN) | + LE32_DEC_ENC(data5, IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR, + IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M); + eht->data[7] |= LE32_DEC_ENC + (data5, IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA, + IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS); + + /* + * Hardware labels the content channels/RU allocation values + * as follows: + * Content Channel 1 Content Channel 2 + * 20 MHz: A1 + * 40 MHz: A1 B1 + * 80 MHz: A1 C1 B1 D1 + * 160 MHz: A1 C1 A2 C2 B1 D1 B2 D2 + * 320 MHz: A1 C1 A2 C2 A3 C3 A4 C4 B1 D1 B2 D2 B3 D3 B4 D4 + * + * However firmware can only give us A1-D2, so the higher + * frequencies are missing. + */ + + switch (phy_bw) { + case RATE_MCS_CHAN_WIDTH_320: + /* additional values are missing in RX metadata */ + fallthrough; + case RATE_MCS_CHAN_WIDTH_160: + /* content channel 1 */ + IWL_MLD_ENC_EHT_RU(1_2_1, A2); + IWL_MLD_ENC_EHT_RU(1_2_2, C2); + /* content channel 2 */ + IWL_MLD_ENC_EHT_RU(2_2_1, B2); + IWL_MLD_ENC_EHT_RU(2_2_2, D2); + fallthrough; + case RATE_MCS_CHAN_WIDTH_80: + /* content channel 1 */ + IWL_MLD_ENC_EHT_RU(1_1_2, C1); + /* content channel 2 */ + IWL_MLD_ENC_EHT_RU(2_1_2, D1); + fallthrough; + case RATE_MCS_CHAN_WIDTH_40: + /* content channel 2 */ + IWL_MLD_ENC_EHT_RU(2_1_1, B1); + fallthrough; + case RATE_MCS_CHAN_WIDTH_20: + IWL_MLD_ENC_EHT_RU(1_1_1, A1); + break; + } + } else { + __le32 usig_a1 = phy_data->rx_vec[0]; + __le32 usig_a2 = phy_data->rx_vec[1]; + + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1, + IWL_RX_USIG_A1_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1, + IWL_RX_USIG_A1_VALIDATE, + IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_PPDU_TYPE, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_PUNC_CHANNEL, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B8, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_SIG_MCS, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS); + IWL_MLD_ENC_USIG_VALUE_MASK + (usig, usig_a2, IWL_RX_USIG_A2_EHT_SIG_SYM_NUM, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_CRC_OK, + IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC); + } +} + +static void iwl_mld_decode_eht_ext_tb(struct iwl_mld *mld, + struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht, + struct ieee80211_radiotap_eht_usig *usig) +{ + if (phy_data->with_data) { + __le32 data5 = phy_data->data5; + + IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1); + + IWL_MLD_ENC_USIG_VALUE_MASK(usig, data5, + IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2); + } else { + __le32 usig_a1 = phy_data->rx_vec[0]; + __le32 usig_a2 = phy_data->rx_vec[1]; + + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a1, + IWL_RX_USIG_A1_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_PPDU_TYPE, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_USIG2_VALIDATE_B2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_1, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_TRIG_SPATIAL_REUSE_2, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_TRIG_USIG2_DISREGARD, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD); + IWL_MLD_ENC_USIG_VALUE_MASK(usig, usig_a2, + IWL_RX_USIG_A2_EHT_CRC_OK, + IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC); + } +} + +static void iwl_mld_decode_eht_ru(struct iwl_mld *mld, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht) +{ + u32 ru = le32_get_bits(eht->data[8], + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1); + enum nl80211_eht_ru_alloc nl_ru; + + /* Using D1.5 Table 9-53a - Encoding of PS160 and RU Allocation subfields + * in an EHT variant User Info field + */ + + switch (ru) { + case 0 ... 36: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_26; + break; + case 37 ... 52: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52; + break; + case 53 ... 60: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106; + break; + case 61 ... 64: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_242; + break; + case 65 ... 66: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484; + break; + case 67: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996; + break; + case 68: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996; + break; + case 69: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_4x996; + break; + case 70 ... 81: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_52P26; + break; + case 82 ... 89: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_106P26; + break; + case 90 ... 93: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_484P242; + break; + case 94 ... 95: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484; + break; + case 96 ... 99: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242; + break; + case 100 ... 103: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484; + break; + case 104: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996; + break; + case 105 ... 106: + nl_ru = NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484; + break; + default: + return; + } + + rx_status->bw = RATE_INFO_BW_EHT_RU; + rx_status->eht.ru = nl_ru; +} + +static void iwl_mld_decode_eht_phy_data(struct iwl_mld *mld, + struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status, + struct ieee80211_radiotap_eht *eht, + struct ieee80211_radiotap_eht_usig *usig) + +{ + __le32 data0 = phy_data->data0; + __le32 data1 = phy_data->data1; + __le32 usig_a1 = phy_data->rx_vec[0]; + u8 info_type = phy_data->info_type; + + /* Not in EHT range */ + if (info_type < IWL_RX_PHY_INFO_TYPE_EHT_MU || + info_type > IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT) + return; + + usig->common |= cpu_to_le32 + (IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN | + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN); + if (phy_data->with_data) { + usig->common |= LE32_DEC_ENC(data0, + IWL_RX_PHY_DATA0_EHT_UPLINK, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL); + usig->common |= LE32_DEC_ENC(data0, + IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR); + } else { + usig->common |= LE32_DEC_ENC(usig_a1, + IWL_RX_USIG_A1_UL_FLAG, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL); + usig->common |= LE32_DEC_ENC(usig_a1, + IWL_RX_USIG_A1_BSS_COLOR, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR); + } + + usig->common |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_CHECKED); + usig->common |= + LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_VALIDATE, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_VALIDATE_BITS_OK); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE); + eht->data[0] |= LE32_DEC_ENC(data0, + IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK, + IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE); + + /* All RU allocating size/index is in TB format */ + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_RU_ALLOC_TB_FMT); + eht->data[8] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PS160, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_PS_160); + eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B0, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B0); + eht->data[8] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_RU_ALLOC_B1_B7, + IEEE80211_RADIOTAP_EHT_DATA8_RU_ALLOC_TB_FMT_B7_B1); + + iwl_mld_decode_eht_ru(mld, rx_status, eht); + + /* We only get here in case of IWL_RX_MPDU_PHY_TSF_OVERLOAD is set + * which is on only in case of monitor mode so no need to check monitor + * mode + */ + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRIMARY_80); + eht->data[1] |= + le32_encode_bits(mld->monitor.p80, + IEEE80211_RADIOTAP_EHT_DATA1_PRIMARY_80); + + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN); + if (phy_data->with_data) + usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); + else + usig->common |= LE32_DEC_ENC(usig_a1, IWL_RX_USIG_A1_TXOP_DURATION, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM); + eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM, + IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM); + eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK, + IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM); + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM); + eht->data[0] |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG, + IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM); + + /* TODO: what about IWL_RX_PHY_DATA0_EHT_BW320_SLOT */ + + if (!le32_get_bits(data0, IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK)) + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC); + + usig->common |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN); + usig->common |= LE32_DEC_ENC(data0, IWL_RX_PHY_DATA0_EHT_PHY_VER, + IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER); + + /* + * TODO: what about TB - IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE, + * IWL_RX_PHY_DATA1_EHT_TB_LOW_SS + */ + + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF); + eht->data[0] |= LE32_DEC_ENC(data1, IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM, + IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF); + + if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT || + info_type == IWL_RX_PHY_INFO_TYPE_EHT_TB) + iwl_mld_decode_eht_ext_tb(mld, phy_data, rx_status, eht, usig); + + if (info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT || + info_type == IWL_RX_PHY_INFO_TYPE_EHT_MU) + iwl_mld_decode_eht_ext_mu(mld, phy_data, rx_status, eht, usig); +} + +static void iwl_mld_rx_eht(struct iwl_mld *mld, struct sk_buff *skb, + struct iwl_mld_rx_phy_data *phy_data, + int queue) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_radiotap_eht *eht; + struct ieee80211_radiotap_eht_usig *usig; + size_t eht_len = sizeof(*eht); + + u32 rate_n_flags = phy_data->rate_n_flags; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + /* EHT and HE have the same values for LTF */ + u8 ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN; + u16 phy_info = phy_data->phy_info; + u32 bw; + + /* u32 for 1 user_info */ + if (phy_data->with_data) + eht_len += sizeof(u32); + + eht = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT, eht_len); + + usig = iwl_mld_radiotap_put_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG, + sizeof(*usig)); + rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; + usig->common |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN); + + /* specific handling for 320MHz */ + bw = u32_get_bits(rate_n_flags, RATE_MCS_CHAN_WIDTH_MSK); + if (bw == RATE_MCS_CHAN_WIDTH_320_VAL) + bw += le32_get_bits(phy_data->data0, + IWL_RX_PHY_DATA0_EHT_BW320_SLOT); + + usig->common |= cpu_to_le32 + (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW, bw)); + + /* report the AMPDU-EOF bit on single frames */ + if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (phy_data->data0 & + cpu_to_le32(IWL_RX_PHY_DATA0_EHT_DELIM_EOF)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } + + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) + iwl_mld_decode_eht_phy_data(mld, phy_data, rx_status, eht, usig); + +#define CHECK_TYPE(F) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ + (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) + + CHECK_TYPE(SU); + CHECK_TYPE(EXT_SU); + CHECK_TYPE(MU); + CHECK_TYPE(TRIG); + + switch (u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK)) { + case 0: + if (he_type == RATE_MCS_HE_TYPE_TRIG) { + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X; + } else { + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; + } + break; + case 1: + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_1_6; + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X; + break; + case 2: + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + if (he_type == RATE_MCS_HE_TYPE_TRIG) + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2; + else + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_0_8; + break; + case 3: + if (he_type != RATE_MCS_HE_TYPE_TRIG) { + ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X; + rx_status->eht.gi = NL80211_RATE_INFO_EHT_GI_3_2; + } + break; + default: + /* nothing here */ + break; + } + + if (ltf != IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN) { + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_GI); + eht->data[0] |= cpu_to_le32 + (FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF, + ltf) | + FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI, + rx_status->eht.gi)); + } + + if (!phy_data->with_data) { + eht->known |= cpu_to_le32(IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S | + IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S); + eht->data[7] |= + le32_encode_bits(le32_get_bits(phy_data->rx_vec[2], + RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK), + IEEE80211_RADIOTAP_EHT_DATA7_NSS_S); + if (rate_n_flags & RATE_MCS_BF_MSK) + eht->data[7] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S); + } else { + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN | + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O | + IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O | + IEEE80211_RADIOTAP_EHT_USER_INFO_DATA_FOR_USER); + + if (rate_n_flags & RATE_MCS_BF_MSK) + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O); + + if (rate_n_flags & RATE_MCS_LDPC_MSK) + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING); + + eht->user_info[0] |= cpu_to_le32 + (FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS, + u32_get_bits(rate_n_flags, + RATE_VHT_MCS_RATE_CODE_MSK)) | + FIELD_PREP(IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O, + u32_get_bits(rate_n_flags, + RATE_MCS_NSS_MSK))); + } +} + +#ifdef CONFIG_IWLWIFI_DEBUGFS +static void iwl_mld_add_rtap_sniffer_config(struct iwl_mld *mld, + struct sk_buff *skb) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_radiotap_vendor_content *radiotap; + const u16 vendor_data_len = sizeof(mld->monitor.cur_aid); + + if (!mld->monitor.cur_aid) + return; + + radiotap = + iwl_mld_radiotap_put_tlv(skb, + IEEE80211_RADIOTAP_VENDOR_NAMESPACE, + sizeof(*radiotap) + vendor_data_len); + + /* Intel OUI */ + radiotap->oui[0] = 0xf6; + radiotap->oui[1] = 0x54; + radiotap->oui[2] = 0x25; + /* radiotap sniffer config sub-namespace */ + radiotap->oui_subtype = 1; + radiotap->vendor_type = 0; + + /* fill the data now */ + memcpy(radiotap->data, &mld->monitor.cur_aid, + sizeof(mld->monitor.cur_aid)); + + rx_status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; +} +#endif + +static void iwl_mld_rx_fill_status(struct iwl_mld *mld, struct sk_buff *skb, + struct iwl_mld_rx_phy_data *phy_data, + struct iwl_rx_mpdu_desc *mpdu_desc, + struct ieee80211_hdr *hdr, + int queue) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + u32 format = phy_data->rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + u32 rate_n_flags = phy_data->rate_n_flags; + u8 stbc = u32_get_bits(rate_n_flags, RATE_MCS_STBC_MSK); + bool is_sgi = rate_n_flags & RATE_MCS_SGI_MSK; + + if (WARN_ON_ONCE(phy_data->with_data && (!mpdu_desc || !hdr))) + return; + + /* Keep packets with CRC errors (and with overrun) for monitor mode + * (otherwise the firmware discards them) but mark them as bad. + */ + if (phy_data->with_data && + (!(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) || + !(mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK)))) { + IWL_DEBUG_RX(mld, "Bad CRC or FIFO: 0x%08X.\n", + le32_to_cpu(mpdu_desc->status)); + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + } + + phy_data->info_type = IWL_RX_PHY_INFO_TYPE_NONE; + + if (phy_data->with_data && + likely(!(phy_data->phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { + rx_status->mactime = + le64_to_cpu(mpdu_desc->v3.tsf_on_air_rise); + + /* TSF as indicated by the firmware is at INA time */ + rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; + } else { + phy_data->info_type = + le32_get_bits(phy_data->data1, + IWL_RX_PHY_DATA1_INFO_TYPE_MASK); + } + + /* management stuff on default queue */ + if (!queue && phy_data->with_data && + unlikely(ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control))) { + rx_status->boottime_ns = ktime_get_boottime_ns(); + + if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED) + mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_FOUND; + } + + /* set the preamble flag if appropriate */ + if (format == RATE_MCS_CCK_MSK && + phy_data->phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) + rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; + + iwl_mld_fill_signal(mld, rx_status, phy_data); + + /* This may be overridden by iwl_mld_rx_he() to HE_RU */ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + rx_status->bw = RATE_INFO_BW_40; + break; + case RATE_MCS_CHAN_WIDTH_80: + rx_status->bw = RATE_INFO_BW_80; + break; + case RATE_MCS_CHAN_WIDTH_160: + rx_status->bw = RATE_INFO_BW_160; + break; + case RATE_MCS_CHAN_WIDTH_320: + rx_status->bw = RATE_INFO_BW_320; + break; + } + + /* must be before L-SIG data */ + if (format == RATE_MCS_HE_MSK) + iwl_mld_rx_he(mld, skb, phy_data, queue); + + iwl_mld_decode_lsig(skb, phy_data); + + rx_status->device_timestamp = phy_data->gp2_on_air_rise; + + /* using TLV format and must be after all fixed len fields */ + if (format == RATE_MCS_EHT_MSK) + iwl_mld_rx_eht(mld, skb, phy_data, queue); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (unlikely(mld->monitor.on)) + iwl_mld_add_rtap_sniffer_config(mld, skb); +#endif + + if (format != RATE_MCS_CCK_MSK && is_sgi) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + + if (rate_n_flags & RATE_MCS_LDPC_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; + + switch (format) { + case RATE_MCS_HT_MSK: + rx_status->encoding = RX_ENC_HT; + rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags); + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + break; + case RATE_MCS_VHT_MSK: + case RATE_MCS_HE_MSK: + case RATE_MCS_EHT_MSK: + if (format == RATE_MCS_VHT_MSK) { + rx_status->encoding = RX_ENC_VHT; + } else if (format == RATE_MCS_HE_MSK) { + rx_status->encoding = RX_ENC_HE; + rx_status->he_dcm = + !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); + } else if (format == RATE_MCS_EHT_MSK) { + rx_status->encoding = RX_ENC_EHT; + } + + rx_status->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; + rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + break; + default: { + int rate = + iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags, + rx_status->band); + + /* valid rate */ + if (rate >= 0 && rate <= 0xFF) { + rx_status->rate_idx = rate; + break; + } + + /* invalid rate */ + rx_status->rate_idx = 0; + + if (net_ratelimit()) + IWL_ERR(mld, "invalid rate_n_flags=0x%x, band=%d\n", + rate_n_flags, rx_status->band); + break; + } + } +} + +/* iwl_mld_create_skb adds the rxb to a new skb */ +static int iwl_mld_build_rx_skb(struct iwl_mld *mld, struct sk_buff *skb, + struct ieee80211_hdr *hdr, u16 len, + u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; + unsigned int headlen, fraglen, pad_len = 0; + unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + u8 mic_crc_len = u8_get_bits(desc->mac_flags1, + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK) << 1; + + if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { + len -= 2; + pad_len = 2; + } + + /* For non monitor interface strip the bytes the RADA might not have + * removed (it might be disabled, e.g. for mgmt frames). As a monitor + * interface cannot exist with other interfaces, this removal is safe + * and sufficient, in monitor mode there's no decryption being done. + */ + if (len > mic_crc_len && !ieee80211_hw_check(mld->hw, RX_INCLUDES_FCS)) + len -= mic_crc_len; + + /* If frame is small enough to fit in skb->head, pull it completely. + * If not, only pull ieee80211_hdr (including crypto if present, and + * an additional 8 bytes for SNAP/ethertype, see below) so that + * splice() or TCP coalesce are more efficient. + * + * Since, in addition, ieee80211_data_to_8023() always pull in at + * least 8 bytes (possibly more for mesh) we can do the same here + * to save the cost of doing it later. That still doesn't pull in + * the actual IP header since the typical case has a SNAP header. + * If the latter changes (there are efforts in the standards group + * to do so) we should revisit this and ieee80211_data_to_8023(). + */ + headlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8; + + /* The firmware may align the packet to DWORD. + * The padding is inserted after the IV. + * After copying the header + IV skip the padding if + * present before copying packet data. + */ + hdrlen += crypt_len; + + if (unlikely(headlen < hdrlen)) + return -EINVAL; + + /* Since data doesn't move data while putting data on skb and that is + * the only way we use, data + len is the next place that hdr would + * be put + */ + skb_set_mac_header(skb, skb->len); + skb_put_data(skb, hdr, hdrlen); + skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen); + + if (skb->ip_summed == CHECKSUM_COMPLETE) { + struct { + u8 hdr[6]; + __be16 type; + } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len); + + if (unlikely(headlen - hdrlen < sizeof(*shdr) || + !ether_addr_equal(shdr->hdr, rfc1042_header) || + (shdr->type != htons(ETH_P_IP) && + shdr->type != htons(ETH_P_ARP) && + shdr->type != htons(ETH_P_IPV6) && + shdr->type != htons(ETH_P_8021Q) && + shdr->type != htons(ETH_P_PAE) && + shdr->type != htons(ETH_P_TDLS)))) + skb->ip_summed = CHECKSUM_NONE; + } + + fraglen = len - headlen; + + if (fraglen) { + int offset = (u8 *)hdr + headlen + pad_len - + (u8 *)rxb_addr(rxb) + rxb_offset(rxb); + + skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, + fraglen, rxb->truesize); + } + + return 0; +} + +/* returns true if a packet is a duplicate or invalid tid and + * should be dropped. Updates AMSDU PN tracking info + */ +VISIBLE_IF_IWLWIFI_KUNIT +bool +iwl_mld_is_dup(struct iwl_mld *mld, struct ieee80211_sta *sta, + struct ieee80211_hdr *hdr, + const struct iwl_rx_mpdu_desc *mpdu_desc, + struct ieee80211_rx_status *rx_status, int queue) +{ + struct iwl_mld_sta *mld_sta; + struct iwl_mld_rxq_dup_data *dup_data; + u8 tid, sub_frame_idx; + + if (WARN_ON(!sta)) + return false; + + mld_sta = iwl_mld_sta_from_mac80211(sta); + + if (WARN_ON_ONCE(!mld_sta->dup_data)) + return false; + + dup_data = &mld_sta->dup_data[queue]; + + /* Drop duplicate 802.11 retransmissions + * (IEEE 802.11-2020: 10.3.2.14 "Duplicate detection and recovery") + */ + if (ieee80211_is_ctl(hdr->frame_control) || + ieee80211_is_any_nullfunc(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) + return false; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + /* frame has qos control */ + tid = ieee80211_get_tid(hdr); + if (tid >= IWL_MAX_TID_COUNT) + return true; + } else { + tid = IWL_MAX_TID_COUNT; + } + + /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ + sub_frame_idx = mpdu_desc->amsdu_info & + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + + if (IWL_FW_CHECK(mld, + sub_frame_idx > 0 && + !(mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU), + "got sub_frame_idx=%d but A-MSDU flag is not set\n", + sub_frame_idx)) + return true; + + if (unlikely(ieee80211_has_retry(hdr->frame_control) && + dup_data->last_seq[tid] == hdr->seq_ctrl && + dup_data->last_sub_frame_idx[tid] >= sub_frame_idx)) + return true; + + /* Allow same PN as the first subframe for following sub frames */ + if (dup_data->last_seq[tid] == hdr->seq_ctrl && + sub_frame_idx > dup_data->last_sub_frame_idx[tid]) + rx_status->flag |= RX_FLAG_ALLOW_SAME_PN; + + dup_data->last_seq[tid] = hdr->seq_ctrl; + dup_data->last_sub_frame_idx[tid] = sub_frame_idx; + + rx_status->flag |= RX_FLAG_DUP_VALIDATED; + + return false; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_is_dup); + +static void iwl_mld_update_last_rx_timestamp(struct iwl_mld *mld, u8 baid) +{ + unsigned long now = jiffies; + unsigned long timeout; + struct iwl_mld_baid_data *ba_data; + + ba_data = rcu_dereference(mld->fw_id_to_ba[baid]); + if (!ba_data) { + IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid); + return; + } + + if (!ba_data->timeout) + return; + + /* To minimize cache bouncing between RX queues, avoid frequent updates + * to last_rx_timestamp. update it only when the timeout period has + * passed. The worst-case scenario is the session expiring after + * approximately 2 * timeout, which is negligible (the update is + * atomic). + */ + timeout = TU_TO_JIFFIES(ba_data->timeout); + if (time_is_before_jiffies(ba_data->last_rx_timestamp + timeout)) + ba_data->last_rx_timestamp = now; +} + +/* Processes received packets for a station. + * Sets *drop to true if the packet should be dropped. + * Returns the station if found, or NULL otherwise. + */ +static struct ieee80211_sta * +iwl_mld_rx_with_sta(struct iwl_mld *mld, struct ieee80211_hdr *hdr, + struct sk_buff *skb, + const struct iwl_rx_mpdu_desc *mpdu_desc, + const struct iwl_rx_packet *pkt, int queue, bool *drop) +{ + struct ieee80211_sta *sta = NULL; + struct ieee80211_link_sta *link_sta = NULL; + struct ieee80211_rx_status *rx_status; + u8 baid; + + if (mpdu_desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { + u8 sta_id = le32_get_bits(mpdu_desc->status, + IWL_RX_MPDU_STATUS_STA_ID); + + if (IWL_FW_CHECK(mld, + sta_id >= mld->fw->ucode_capa.num_stations, + "rx_mpdu: invalid sta_id %d\n", sta_id)) + return NULL; + + link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); + if (!IS_ERR_OR_NULL(link_sta)) + sta = link_sta->sta; + } else if (!is_multicast_ether_addr(hdr->addr2)) { + /* Passing NULL is fine since we prevent two stations with the + * same address from being added. + */ + sta = ieee80211_find_sta_by_ifaddr(mld->hw, hdr->addr2, NULL); + } + + /* we may not have any station yet */ + if (!sta) + return NULL; + + rx_status = IEEE80211_SKB_RXCB(skb); + + if (link_sta && sta->valid_links) { + rx_status->link_valid = true; + rx_status->link_id = link_sta->link_id; + } + + /* fill checksum */ + if (ieee80211_is_data(hdr->frame_control) && + pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) { + u16 hwsum = be16_to_cpu(mpdu_desc->v3.raw_xsum); + + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum_unfold(~(__force __sum16)hwsum); + } + + if (iwl_mld_is_dup(mld, sta, hdr, mpdu_desc, rx_status, queue)) { + IWL_DEBUG_DROP(mld, "Dropping duplicate packet 0x%x\n", + le16_to_cpu(hdr->seq_ctrl)); + *drop = true; + return NULL; + } + + baid = le32_get_bits(mpdu_desc->reorder_data, + IWL_RX_MPDU_REORDER_BAID_MASK); + if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) + iwl_mld_update_last_rx_timestamp(mld, baid); + + if (link_sta && ieee80211_is_data(hdr->frame_control)) { + u8 sub_frame_idx = mpdu_desc->amsdu_info & + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + + /* 0 means not an A-MSDU, and 1 means a new A-MSDU */ + if (!sub_frame_idx || sub_frame_idx == 1) + iwl_mld_count_mpdu_rx(link_sta, queue, 1); + + if (!is_multicast_ether_addr(hdr->addr1)) + iwl_mld_low_latency_update_counters(mld, hdr, sta, + queue); + } + + return sta; +} + +#define KEY_IDX_LEN 2 + +static int iwl_mld_rx_mgmt_prot(struct ieee80211_sta *sta, + struct ieee80211_hdr *hdr, + struct ieee80211_rx_status *rx_status, + u32 mpdu_status, + u32 mpdu_len) +{ + struct wireless_dev *wdev; + struct iwl_mld_sta *mld_sta; + struct iwl_mld_vif *mld_vif; + u8 keyidx; + struct ieee80211_key_conf *key; + const u8 *frame = (void *)hdr; + + if ((mpdu_status & IWL_RX_MPDU_STATUS_SEC_MASK) == + IWL_RX_MPDU_STATUS_SEC_NONE) + return 0; + + /* For non-beacon, we don't really care. But beacons may + * be filtered out, and we thus need the firmware's replay + * detection, otherwise beacons the firmware previously + * filtered could be replayed, or something like that, and + * it can filter a lot - though usually only if nothing has + * changed. + */ + if (!ieee80211_is_beacon(hdr->frame_control)) + return 0; + + if (!sta) + return -1; + + mld_sta = iwl_mld_sta_from_mac80211(sta); + mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif); + + /* key mismatch - will also report !MIC_OK but we shouldn't count it */ + if (!(mpdu_status & IWL_RX_MPDU_STATUS_KEY_VALID)) + goto report; + + /* good cases */ + if (likely(mpdu_status & IWL_RX_MPDU_STATUS_MIC_OK && + !(mpdu_status & IWL_RX_MPDU_STATUS_REPLAY_ERROR))) { + rx_status->flag |= RX_FLAG_DECRYPTED; + return 0; + } + + /* both keys will have the same cipher and MIC length, use + * whichever one is available + */ + key = rcu_dereference(mld_vif->bigtks[0]); + if (!key) { + key = rcu_dereference(mld_vif->bigtks[1]); + if (!key) + goto report; + } + + if (mpdu_len < key->icv_len + IEEE80211_GMAC_PN_LEN + KEY_IDX_LEN) + goto report; + + /* get the real key ID */ + keyidx = frame[mpdu_len - key->icv_len - IEEE80211_GMAC_PN_LEN - KEY_IDX_LEN]; + /* and if that's the other key, look it up */ + if (keyidx != key->keyidx) { + /* shouldn't happen since firmware checked, but be safe + * in case the MIC length is wrong too, for example + */ + if (keyidx != 6 && keyidx != 7) + return -1; + + key = rcu_dereference(mld_vif->bigtks[keyidx - 6]); + if (!key) + goto report; + } + + /* Report status to mac80211 */ + if (!(mpdu_status & IWL_RX_MPDU_STATUS_MIC_OK)) + ieee80211_key_mic_failure(key); + else if (mpdu_status & IWL_RX_MPDU_STATUS_REPLAY_ERROR) + ieee80211_key_replay(key); +report: + wdev = ieee80211_vif_to_wdev(mld_sta->vif); + if (wdev->netdev) + cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, + mpdu_len); + + return -1; +} + +static int iwl_mld_rx_crypto(struct iwl_mld *mld, + struct ieee80211_sta *sta, + struct ieee80211_hdr *hdr, + struct ieee80211_rx_status *rx_status, + struct iwl_rx_mpdu_desc *desc, int queue, + u32 pkt_flags, u8 *crypto_len) +{ + u32 status = le32_to_cpu(desc->status); + + if (unlikely(ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_has_protected(hdr->frame_control))) + return iwl_mld_rx_mgmt_prot(sta, hdr, rx_status, status, + le16_to_cpu(desc->mpdu_len)); + + if (!ieee80211_has_protected(hdr->frame_control) || + (status & IWL_RX_MPDU_STATUS_SEC_MASK) == + IWL_RX_MPDU_STATUS_SEC_NONE) + return 0; + + switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) { + case IWL_RX_MPDU_STATUS_SEC_CCM: + case IWL_RX_MPDU_STATUS_SEC_GCM: + BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN); + if (!(status & IWL_RX_MPDU_STATUS_MIC_OK)) { + IWL_DEBUG_DROP(mld, + "Dropping packet, bad MIC (CCM/GCM)\n"); + return -1; + } + + rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MIC_STRIPPED; + *crypto_len = IEEE80211_CCMP_HDR_LEN; + return 0; + case IWL_RX_MPDU_STATUS_SEC_TKIP: + if (!(status & IWL_RX_MPDU_STATUS_ICV_OK)) + return -1; + + if (!(status & RX_MPDU_RES_STATUS_MIC_OK)) + rx_status->flag |= RX_FLAG_MMIC_ERROR; + + if (pkt_flags & FH_RSCSR_RADA_EN) { + rx_status->flag |= RX_FLAG_ICV_STRIPPED; + rx_status->flag |= RX_FLAG_MMIC_STRIPPED; + } + + *crypto_len = IEEE80211_TKIP_IV_LEN; + rx_status->flag |= RX_FLAG_DECRYPTED; + return 0; + default: + break; + } + + return 0; +} + +static void iwl_mld_rx_update_ampdu_ref(struct iwl_mld *mld, + struct iwl_mld_rx_phy_data *phy_data, + struct ieee80211_rx_status *rx_status) +{ + bool toggle_bit = + phy_data->phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; + + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + /* Toggle is switched whenever new aggregation starts. Make + * sure ampdu_reference is never 0 so we can later use it to + * see if the frame was really part of an A-MPDU or not. + */ + if (toggle_bit != mld->monitor.ampdu_toggle) { + mld->monitor.ampdu_ref++; + if (mld->monitor.ampdu_ref == 0) + mld->monitor.ampdu_ref++; + mld->monitor.ampdu_toggle = toggle_bit; + phy_data->first_subframe = true; + } + rx_status->ampdu_reference = mld->monitor.ampdu_ref; +} + +static void +iwl_mld_fill_rx_status_band_freq(struct iwl_mld_rx_phy_data *phy_data, + struct iwl_rx_mpdu_desc *mpdu_desc, + struct ieee80211_rx_status *rx_status) +{ + enum nl80211_band band; + + band = BAND_IN_RX_STATUS(mpdu_desc->mac_phy_idx); + rx_status->band = iwl_mld_phy_band_to_nl80211(band); + rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel, + rx_status->band); +} + +void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, int queue) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mld_rx_phy_data phy_data = {}; + struct iwl_rx_mpdu_desc *mpdu_desc = (void *)pkt->data; + struct ieee80211_sta *sta; + struct ieee80211_hdr *hdr; + struct sk_buff *skb; + size_t mpdu_desc_size = sizeof(*mpdu_desc); + bool drop = false; + u8 crypto_len = 0; + u32 pkt_len = iwl_rx_packet_payload_len(pkt); + u32 mpdu_len; + enum iwl_mld_reorder_result reorder_res; + struct ieee80211_rx_status *rx_status; + + if (unlikely(mld->fw_status.in_hw_restart)) + return; + + if (IWL_FW_CHECK(mld, pkt_len < mpdu_desc_size, + "Bad REPLY_RX_MPDU_CMD size (%d)\n", pkt_len)) + return; + + mpdu_len = le16_to_cpu(mpdu_desc->mpdu_len); + + if (IWL_FW_CHECK(mld, mpdu_len + mpdu_desc_size > pkt_len, + "FW lied about packet len (%d)\n", pkt_len)) + return; + + /* Don't use dev_alloc_skb(), we'll have enough headroom once + * ieee80211_hdr pulled. + */ + skb = alloc_skb(128, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mld, "alloc_skb failed\n"); + return; + } + + hdr = (void *)(pkt->data + mpdu_desc_size); + + iwl_mld_fill_phy_data(mpdu_desc, &phy_data); + + if (mpdu_desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) { + /* If the device inserted padding it means that (it thought) + * the 802.11 header wasn't a multiple of 4 bytes long. In + * this case, reserve two bytes at the start of the SKB to + * align the payload properly in case we end up copying it. + */ + skb_reserve(skb, 2); + } + + rx_status = IEEE80211_SKB_RXCB(skb); + + /* this is needed early */ + iwl_mld_fill_rx_status_band_freq(&phy_data, mpdu_desc, rx_status); + + rcu_read_lock(); + + sta = iwl_mld_rx_with_sta(mld, hdr, skb, mpdu_desc, pkt, queue, &drop); + if (drop) + goto drop; + + /* update aggregation data for monitor sake on default queue */ + if (!queue && (phy_data.phy_info & IWL_RX_MPDU_PHY_AMPDU)) + iwl_mld_rx_update_ampdu_ref(mld, &phy_data, rx_status); + + iwl_mld_rx_fill_status(mld, skb, &phy_data, mpdu_desc, hdr, queue); + + if (iwl_mld_rx_crypto(mld, sta, hdr, rx_status, mpdu_desc, queue, + le32_to_cpu(pkt->len_n_flags), &crypto_len)) + goto drop; + + if (iwl_mld_build_rx_skb(mld, skb, hdr, mpdu_len, crypto_len, rxb)) + goto drop; + + /* time sync frame is saved and will be released later when the + * notification with the timestamps arrives. + */ + if (iwl_mld_time_sync_frame(mld, skb, hdr->addr2)) + goto out; + + reorder_res = iwl_mld_reorder(mld, napi, queue, sta, skb, mpdu_desc); + switch (reorder_res) { + case IWL_MLD_PASS_SKB: + break; + case IWL_MLD_DROP_SKB: + goto drop; + case IWL_MLD_BUFFERED_SKB: + goto out; + default: + WARN_ON(1); + goto drop; + } + + iwl_mld_pass_packet_to_mac80211(mld, napi, skb, queue, sta); + + goto out; + +drop: + kfree_skb(skb); +out: + rcu_read_unlock(); +} + +#define SYNC_RX_QUEUE_TIMEOUT (HZ) +void iwl_mld_sync_rx_queues(struct iwl_mld *mld, + enum iwl_mld_internal_rxq_notif_type type, + const void *notif_payload, u32 notif_payload_size) +{ + u8 num_rx_queues = mld->trans->num_rx_queues; + struct { + struct iwl_rxq_sync_cmd sync_cmd; + struct iwl_mld_internal_rxq_notif notif; + } __packed cmd = { + .sync_cmd.rxq_mask = cpu_to_le32(BIT(num_rx_queues) - 1), + .sync_cmd.count = + cpu_to_le32(sizeof(struct iwl_mld_internal_rxq_notif) + + notif_payload_size), + .notif.type = type, + .notif.cookie = mld->rxq_sync.cookie, + }; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), + .data[0] = &cmd, + .len[0] = sizeof(cmd), + .data[1] = notif_payload, + .len[1] = notif_payload_size, + }; + int ret; + + /* size must be a multiple of DWORD */ + if (WARN_ON(cmd.sync_cmd.count & cpu_to_le32(3))) + return; + + mld->rxq_sync.state = (1 << num_rx_queues) - 1; + + ret = iwl_mld_send_cmd(mld, &hcmd); + if (ret) { + IWL_ERR(mld, "Failed to trigger RX queues sync (%d)\n", ret); + goto out; + } + + ret = wait_event_timeout(mld->rxq_sync.waitq, + READ_ONCE(mld->rxq_sync.state) == 0, + SYNC_RX_QUEUE_TIMEOUT); + WARN_ONCE(!ret, "RXQ sync failed: state=0x%lx, cookie=%d\n", + mld->rxq_sync.state, mld->rxq_sync.cookie); + +out: + mld->rxq_sync.state = 0; + mld->rxq_sync.cookie++; +} + +void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld, + struct napi_struct *napi, + struct iwl_rx_packet *pkt, int queue) +{ + struct iwl_rxq_sync_notification *notif; + struct iwl_mld_internal_rxq_notif *internal_notif; + u32 len = iwl_rx_packet_payload_len(pkt); + size_t combined_notif_len = sizeof(*notif) + sizeof(*internal_notif); + + notif = (void *)pkt->data; + internal_notif = (void *)notif->payload; + + if (IWL_FW_CHECK(mld, len < combined_notif_len, + "invalid notification size %u (%zu)\n", + len, combined_notif_len)) + return; + + len -= combined_notif_len; + + if (IWL_FW_CHECK(mld, mld->rxq_sync.cookie != internal_notif->cookie, + "received expired RX queue sync message (cookie=%d expected=%d q[%d])\n", + internal_notif->cookie, mld->rxq_sync.cookie, queue)) + return; + + switch (internal_notif->type) { + case IWL_MLD_RXQ_EMPTY: + IWL_FW_CHECK(mld, len, + "invalid empty notification size %d\n", len); + break; + case IWL_MLD_RXQ_NOTIF_DEL_BA: + if (IWL_FW_CHECK(mld, len != sizeof(struct iwl_mld_delba_data), + "invalid delba notification size %u (%zu)\n", + len, sizeof(struct iwl_mld_delba_data))) + break; + iwl_mld_del_ba(mld, queue, (void *)internal_notif->payload); + break; + default: + WARN_ON_ONCE(1); + } + + IWL_FW_CHECK(mld, !test_and_clear_bit(queue, &mld->rxq_sync.state), + "RXQ sync: queue %d responded a second time!\n", queue); + + if (READ_ONCE(mld->rxq_sync.state) == 0) + wake_up(&mld->rxq_sync.waitq); +} + +void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi, + struct iwl_rx_packet *pkt, int queue) +{ + struct iwl_rx_no_data_ver_3 *desc; + struct iwl_mld_rx_phy_data phy_data; + struct ieee80211_rx_status *rx_status; + struct sk_buff *skb; + u32 format, rssi; + + if (unlikely(mld->fw_status.in_hw_restart)) + return; + + if (IWL_FW_CHECK(mld, iwl_rx_packet_payload_len(pkt) < sizeof(*desc), + "Bad RX_NO_DATA_NOTIF size (%d)\n", + iwl_rx_packet_payload_len(pkt))) + return; + + desc = (void *)pkt->data; + + rssi = le32_to_cpu(desc->rssi); + phy_data.energy_a = u32_get_bits(rssi, RX_NO_DATA_CHAIN_A_MSK); + phy_data.energy_b = u32_get_bits(rssi, RX_NO_DATA_CHAIN_B_MSK); + phy_data.channel = u32_get_bits(rssi, RX_NO_DATA_CHANNEL_MSK); + phy_data.data0 = desc->phy_info[0]; + phy_data.data1 = desc->phy_info[1]; + phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; + phy_data.gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); + phy_data.rate_n_flags = le32_to_cpu(desc->rate); + phy_data.with_data = false; + + BUILD_BUG_ON(sizeof(phy_data.rx_vec) != sizeof(desc->rx_vec)); + memcpy(phy_data.rx_vec, desc->rx_vec, sizeof(phy_data.rx_vec)); + + format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + + /* Don't use dev_alloc_skb(), we'll have enough headroom once + * ieee80211_hdr pulled. + */ + skb = alloc_skb(128, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mld, "alloc_skb failed\n"); + return; + } + + rx_status = IEEE80211_SKB_RXCB(skb); + + /* 0-length PSDU */ + rx_status->flag |= RX_FLAG_NO_PSDU; + + /* mark as failed PLCP on any errors to skip checks in mac80211 */ + if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) != + RX_NO_DATA_INFO_ERR_NONE) + rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC; + + switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) { + case RX_NO_DATA_INFO_TYPE_NDP: + rx_status->zero_length_psdu_type = + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; + break; + case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED: + case RX_NO_DATA_INFO_TYPE_TB_UNMATCHED: + rx_status->zero_length_psdu_type = + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED; + break; + default: + rx_status->zero_length_psdu_type = + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR; + break; + } + + rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ : + NL80211_BAND_2GHZ; + + rx_status->freq = ieee80211_channel_to_frequency(phy_data.channel, + rx_status->band); + + iwl_mld_rx_fill_status(mld, skb, &phy_data, NULL, NULL, queue); + + /* No more radiotap info should be added after this point. + * Mark it as mac header for upper layers to know where + * the radiotap header ends. + */ + skb_set_mac_header(skb, skb->len); + + /* Override the nss from the rx_vec since the rate_n_flags has + * only 1 bit for the nss which gives a max of 2 ss but there + * may be up to 8 spatial streams. + */ + switch (format) { + case RATE_MCS_VHT_MSK: + rx_status->nss = + le32_get_bits(desc->rx_vec[0], + RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1; + break; + case RATE_MCS_HE_MSK: + rx_status->nss = + le32_get_bits(desc->rx_vec[0], + RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1; + break; + case RATE_MCS_EHT_MSK: + rx_status->nss = + le32_get_bits(desc->rx_vec[2], + RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1; + } + + /* pass the packet to mac80211 */ + rcu_read_lock(); + ieee80211_rx_napi(mld->hw, NULL, skb, napi); + rcu_read_unlock(); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/rx.h b/drivers/net/wireless/intel/iwlwifi/mld/rx.h new file mode 100644 index 0000000000000..2beabd7e70b15 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/rx.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_rx_h__ +#define __iwl_mld_rx_h__ + +#include "mld.h" + +/** + * enum iwl_mld_internal_rxq_notif_type - RX queue sync notif types + * + * @IWL_MLD_RXQ_EMPTY: empty sync notification + * @IWL_MLD_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA + */ +enum iwl_mld_internal_rxq_notif_type { + IWL_MLD_RXQ_EMPTY, + IWL_MLD_RXQ_NOTIF_DEL_BA, +}; + +/** + * struct iwl_mld_internal_rxq_notif - @iwl_rxq_sync_cmd internal data. + * This data is echoed by the firmware to all RSS queues and should be DWORD + * aligned. FW is agnostic to the data, so there are no endianness requirements + * + * @type: one of &iwl_mld_internal_rxq_notif_type + * @cookie: unique internal cookie to identify old notifications + * @reserved: reserved for alignment + * @payload: data to send to RX queues based on the type (may be empty) + */ +struct iwl_mld_internal_rxq_notif { + u8 type; + u8 reserved[3]; + u32 cookie; + u8 payload[]; +} __packed; + +/** + * struct iwl_mld_rx_queues_sync - RX queues sync data + * + * @waitq: wait queue for RX queues sync completion + * @cookie: unique id to correlate sync requests with responses + * @state: bitmask representing the sync state of RX queues + * all RX queues bits are set before sending the command, and the + * corresponding queue bit cleared upon handling the notification + */ +struct iwl_mld_rx_queues_sync { + wait_queue_head_t waitq; + u32 cookie; + unsigned long state; +}; + +void iwl_mld_rx_mpdu(struct iwl_mld *mld, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, int queue); + +void iwl_mld_sync_rx_queues(struct iwl_mld *mld, + enum iwl_mld_internal_rxq_notif_type type, + const void *notif_payload, u32 notif_payload_size); + +void iwl_mld_handle_rx_queues_sync_notif(struct iwl_mld *mld, + struct napi_struct *napi, + struct iwl_rx_packet *pkt, int queue); + +void iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld, + struct napi_struct *napi, + struct sk_buff *skb, int queue, + struct ieee80211_sta *sta); + +void iwl_mld_rx_monitor_no_data(struct iwl_mld *mld, struct napi_struct *napi, + struct iwl_rx_packet *pkt, int queue); + +#endif /* __iwl_mld_agg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.c b/drivers/net/wireless/intel/iwlwifi/mld/scan.c new file mode 100644 index 0000000000000..7ec04318ec2fc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.c @@ -0,0 +1,2008 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#include + +#include "mld.h" +#include "scan.h" +#include "hcmd.h" +#include "iface.h" +#include "phy.h" +#include "mlo.h" + +#include "fw/api/scan.h" +#include "fw/dbg.h" + +#define IWL_SCAN_DWELL_ACTIVE 10 +#define IWL_SCAN_DWELL_PASSIVE 110 +#define IWL_SCAN_NUM_OF_FRAGS 3 + +/* adaptive dwell max budget time [TU] for full scan */ +#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300 + +/* adaptive dwell max budget time [TU] for directed scan */ +#define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100 + +/* adaptive dwell default high band APs number */ +#define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8 + +/* adaptive dwell default low band APs number */ +#define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2 + +/* adaptive dwell default APs number for P2P social channels (1, 6, 11) */ +#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10 + +/* adaptive dwell number of APs override for P2P friendly GO channels */ +#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10 + +/* adaptive dwell number of APs override for P2P social channels */ +#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2 + +/* adaptive dwell number of APs override mask for p2p friendly GO */ +#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20) + +/* adaptive dwell number of APs override mask for social channels */ +#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21) + +#define SCAN_TIMEOUT_MSEC (30000 * HZ) + +/* minimal number of 2GHz and 5GHz channels in the regular scan request */ +#define IWL_MLD_6GHZ_PASSIVE_SCAN_MIN_CHANS 4 + +enum iwl_mld_scan_type { + IWL_SCAN_TYPE_NOT_SET, + IWL_SCAN_TYPE_UNASSOC, + IWL_SCAN_TYPE_WILD, + IWL_SCAN_TYPE_MILD, + IWL_SCAN_TYPE_FRAGMENTED, + IWL_SCAN_TYPE_FAST_BALANCE, +}; + +struct iwl_mld_scan_timing_params { + u32 suspend_time; + u32 max_out_time; +}; + +static const struct iwl_mld_scan_timing_params scan_timing[] = { + [IWL_SCAN_TYPE_UNASSOC] = { + .suspend_time = 0, + .max_out_time = 0, + }, + [IWL_SCAN_TYPE_WILD] = { + .suspend_time = 30, + .max_out_time = 120, + }, + [IWL_SCAN_TYPE_MILD] = { + .suspend_time = 120, + .max_out_time = 120, + }, + [IWL_SCAN_TYPE_FRAGMENTED] = { + .suspend_time = 95, + .max_out_time = 44, + }, + [IWL_SCAN_TYPE_FAST_BALANCE] = { + .suspend_time = 30, + .max_out_time = 37, + }, +}; + +struct iwl_mld_scan_params { + enum iwl_mld_scan_type type; + u32 n_channels; + u16 delay; + int n_ssids; + struct cfg80211_ssid *ssids; + struct ieee80211_channel **channels; + u32 flags; + u8 *mac_addr; + u8 *mac_addr_mask; + bool no_cck; + bool pass_all; + int n_match_sets; + struct iwl_scan_probe_req preq; + struct cfg80211_match_set *match_sets; + int n_scan_plans; + struct cfg80211_sched_scan_plan *scan_plans; + bool iter_notif; + bool respect_p2p_go; + u8 fw_link_id; + struct cfg80211_scan_6ghz_params *scan_6ghz_params; + u32 n_6ghz_params; + bool scan_6ghz; + bool enable_6ghz_passive; + u8 bssid[ETH_ALEN] __aligned(2); +}; + +struct iwl_mld_scan_respect_p2p_go_iter_data { + struct ieee80211_vif *current_vif; + bool p2p_go; +}; + +static void iwl_mld_scan_respect_p2p_go_iter(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_scan_respect_p2p_go_iter_data *data = _data; + + /* exclude the given vif */ + if (vif == data->current_vif) + return; + + /* TODO: CDB check the band of the GO */ + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO && + iwl_mld_vif_from_mac80211(vif)->ap_ibss_active) + data->p2p_go = true; +} + +static bool iwl_mld_get_respect_p2p_go(struct iwl_mld *mld, + struct ieee80211_vif *vif, + bool low_latency) +{ + struct iwl_mld_scan_respect_p2p_go_iter_data data = { + .current_vif = vif, + .p2p_go = false, + }; + + if (!low_latency) + return false; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_scan_respect_p2p_go_iter, + &data); + + return data.p2p_go; +} + +struct iwl_mld_scan_iter_data { + struct ieee80211_vif *current_vif; + bool active_vif; + bool is_dcm_with_p2p_go; + bool global_low_latency; +}; + +static void iwl_mld_scan_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mld_scan_iter_data *data = _data; + struct ieee80211_vif *curr_vif = data->current_vif; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_vif *curr_mld_vif; + unsigned long curr_vif_active_links; + u16 link_id; + + data->global_low_latency |= iwl_mld_vif_low_latency(mld_vif); + + if ((ieee80211_vif_is_mld(vif) && vif->active_links) || + (vif->type != NL80211_IFTYPE_P2P_DEVICE && + mld_vif->deflink.active)) + data->active_vif = true; + + if (vif == curr_vif) + return; + + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_P2P_GO) + return; + + /* Currently P2P GO can't be AP MLD so the logic below assumes that */ + WARN_ON_ONCE(ieee80211_vif_is_mld(vif)); + + curr_vif_active_links = + ieee80211_vif_is_mld(curr_vif) ? curr_vif->active_links : 1; + + curr_mld_vif = iwl_mld_vif_from_mac80211(curr_vif); + + for_each_set_bit(link_id, &curr_vif_active_links, + IEEE80211_MLD_MAX_NUM_LINKS) { + struct iwl_mld_link *curr_mld_link = + iwl_mld_link_dereference_check(curr_mld_vif, link_id); + + if (WARN_ON(!curr_mld_link)) + return; + + if (rcu_access_pointer(curr_mld_link->chan_ctx) && + rcu_access_pointer(mld_vif->deflink.chan_ctx) != + rcu_access_pointer(curr_mld_link->chan_ctx)) { + data->is_dcm_with_p2p_go = true; + return; + } + } +} + +static enum +iwl_mld_scan_type iwl_mld_get_scan_type(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct iwl_mld_scan_iter_data *data) +{ + enum iwl_mld_traffic_load load = mld->scan.traffic_load.status; + + /* A scanning AP interface probably wants to generate a survey to do + * ACS (automatic channel selection). + * Force a non-fragmented scan in that case. + */ + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP) + return IWL_SCAN_TYPE_WILD; + + if (!data->active_vif) + return IWL_SCAN_TYPE_UNASSOC; + + if ((load == IWL_MLD_TRAFFIC_HIGH || data->global_low_latency) && + vif->type != NL80211_IFTYPE_P2P_DEVICE) + return IWL_SCAN_TYPE_FRAGMENTED; + + /* In case of DCM with P2P GO set all scan requests as + * fast-balance scan + */ + if (vif->type == NL80211_IFTYPE_STATION && + data->is_dcm_with_p2p_go) + return IWL_SCAN_TYPE_FAST_BALANCE; + + if (load >= IWL_MLD_TRAFFIC_MEDIUM || data->global_low_latency) + return IWL_SCAN_TYPE_MILD; + + return IWL_SCAN_TYPE_WILD; +} + +static u8 * +iwl_mld_scan_add_2ghz_elems(struct iwl_mld *mld, const u8 *ies, + size_t len, u8 *const pos) +{ + static const u8 before_ds_params[] = { + WLAN_EID_SSID, + WLAN_EID_SUPP_RATES, + WLAN_EID_REQUEST, + WLAN_EID_EXT_SUPP_RATES, + }; + size_t offs; + u8 *newpos = pos; + + offs = ieee80211_ie_split(ies, len, + before_ds_params, + ARRAY_SIZE(before_ds_params), + 0); + + memcpy(newpos, ies, offs); + newpos += offs; + + /* Add a placeholder for DS Parameter Set element */ + *newpos++ = WLAN_EID_DS_PARAMS; + *newpos++ = 1; + *newpos++ = 0; + + memcpy(newpos, ies + offs, len - offs); + newpos += len - offs; + + return newpos; +} + +static void +iwl_mld_scan_add_tpc_report_elem(u8 *pos) +{ + pos[0] = WLAN_EID_VENDOR_SPECIFIC; + pos[1] = WFA_TPC_IE_LEN - 2; + pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff; + pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff; + pos[4] = WLAN_OUI_MICROSOFT & 0xff; + pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC; + pos[6] = 0; + /* pos[7] - tx power will be inserted by the FW */ + pos[7] = 0; + pos[8] = 0; +} + +static u32 +iwl_mld_scan_ooc_priority(enum iwl_mld_scan_status scan_status) +{ + if (scan_status == IWL_MLD_SCAN_REGULAR) + return IWL_SCAN_PRIORITY_EXT_6; + if (scan_status == IWL_MLD_SCAN_INT_MLO) + return IWL_SCAN_PRIORITY_EXT_4; + + return IWL_SCAN_PRIORITY_EXT_2; +} + +static bool +iwl_mld_scan_is_regular(struct iwl_mld_scan_params *params) +{ + return params->n_scan_plans == 1 && + params->scan_plans[0].iterations == 1; +} + +static bool +iwl_mld_scan_is_fragmented(enum iwl_mld_scan_type type) +{ + return (type == IWL_SCAN_TYPE_FRAGMENTED || + type == IWL_SCAN_TYPE_FAST_BALANCE); +} + +static int +iwl_mld_scan_uid_by_status(struct iwl_mld *mld, int status) +{ + for (int i = 0; i < ARRAY_SIZE(mld->scan.uid_status); i++) + if (mld->scan.uid_status[i] == status) + return i; + + return -ENOENT; +} + +static const char * +iwl_mld_scan_ebs_status_str(enum iwl_scan_ebs_status status) +{ + switch (status) { + case IWL_SCAN_EBS_SUCCESS: + return "successful"; + case IWL_SCAN_EBS_INACTIVE: + return "inactive"; + case IWL_SCAN_EBS_FAILED: + case IWL_SCAN_EBS_CHAN_NOT_FOUND: + default: + return "failed"; + } +} + +static int +iwl_mld_scan_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list) +{ + for (int i = 0; i < PROBE_OPTION_MAX; i++) { + if (!ssid_list[i].len) + return -1; + if (ssid_list[i].len == ssid_len && + !memcmp(ssid_list[i].ssid, ssid, ssid_len)) + return i; + } + + return -1; +} + +static bool +iwl_mld_scan_fits(struct iwl_mld *mld, int n_ssids, + struct ieee80211_scan_ies *ies, int n_channels) +{ + return ((n_ssids <= PROBE_OPTION_MAX) && + (n_channels <= mld->fw->ucode_capa.n_scan_channels) & + (ies->common_ie_len + ies->len[NL80211_BAND_2GHZ] + + ies->len[NL80211_BAND_5GHZ] + ies->len[NL80211_BAND_6GHZ] <= + iwl_mld_scan_max_template_size())); +} + +static void +iwl_mld_scan_build_probe_req(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct ieee80211_scan_ies *ies, + struct iwl_mld_scan_params *params) +{ + struct ieee80211_mgmt *frame = (void *)params->preq.buf; + u8 *pos, *newpos; + const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? + params->mac_addr : NULL; + + if (mac_addr) + get_random_mask_addr(frame->sa, mac_addr, + params->mac_addr_mask); + else + memcpy(frame->sa, vif->addr, ETH_ALEN); + + frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); + eth_broadcast_addr(frame->da); + ether_addr_copy(frame->bssid, params->bssid); + frame->seq_ctrl = 0; + + pos = frame->u.probe_req.variable; + *pos++ = WLAN_EID_SSID; + *pos++ = 0; + + params->preq.mac_header.offset = 0; + params->preq.mac_header.len = cpu_to_le16(24 + 2); + + /* Insert DS parameter set element on 2.4 GHz band */ + newpos = iwl_mld_scan_add_2ghz_elems(mld, + ies->ies[NL80211_BAND_2GHZ], + ies->len[NL80211_BAND_2GHZ], + pos); + params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf); + params->preq.band_data[0].len = cpu_to_le16(newpos - pos); + pos = newpos; + + memcpy(pos, ies->ies[NL80211_BAND_5GHZ], + ies->len[NL80211_BAND_5GHZ]); + params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf); + params->preq.band_data[1].len = + cpu_to_le16(ies->len[NL80211_BAND_5GHZ]); + pos += ies->len[NL80211_BAND_5GHZ]; + + memcpy(pos, ies->ies[NL80211_BAND_6GHZ], + ies->len[NL80211_BAND_6GHZ]); + params->preq.band_data[2].offset = cpu_to_le16(pos - params->preq.buf); + params->preq.band_data[2].len = + cpu_to_le16(ies->len[NL80211_BAND_6GHZ]); + pos += ies->len[NL80211_BAND_6GHZ]; + + memcpy(pos, ies->common_ies, ies->common_ie_len); + params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf); + + iwl_mld_scan_add_tpc_report_elem(pos + ies->common_ie_len); + params->preq.common_data.len = cpu_to_le16(ies->common_ie_len + + WFA_TPC_IE_LEN); +} + +static u16 +iwl_mld_scan_get_cmd_gen_flags(struct iwl_mld *mld, + struct iwl_mld_scan_params *params, + struct ieee80211_vif *vif, + enum iwl_mld_scan_status scan_status) +{ + u16 flags = 0; + + /* If no direct SSIDs are provided perform a passive scan. Otherwise, + * if there is a single SSID which is not the broadcast SSID, assume + * that the scan is intended for roaming purposes and thus enable Rx on + * all chains to improve chances of hearing the beacons/probe responses. + */ + if (params->n_ssids == 0) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE; + else if (params->n_ssids == 1 && params->ssids[0].ssid_len) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS; + + if (params->pass_all) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL; + else + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH; + + if (iwl_mld_scan_is_fragmented(params->type)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1; + + if (!iwl_mld_scan_is_regular(params)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC; + + if (params->iter_notif || + mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_ENABLED) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE; + + if (scan_status == IWL_MLD_SCAN_SCHED || + scan_status == IWL_MLD_SCAN_NETDETECT) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE; + + if (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP | + NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE | + NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE; + + if ((scan_status == IWL_MLD_SCAN_SCHED || + scan_status == IWL_MLD_SCAN_NETDETECT) && + params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN; + + if (params->enable_6ghz_passive) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN; + + flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL; + + return flags; +} + +static u8 +iwl_mld_scan_get_cmd_gen_flags2(struct iwl_mld *mld, + struct iwl_mld_scan_params *params, + struct ieee80211_vif *vif, u16 gen_flags) +{ + u8 flags = 0; + + /* TODO: CDB */ + if (params->respect_p2p_go) + flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB | + IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB; + + if (params->scan_6ghz) + flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT; + + return flags; +} + +static void +iwl_mld_scan_cmd_set_dwell(struct iwl_mld *mld, + struct iwl_scan_general_params_v11 *gp, + struct iwl_mld_scan_params *params) +{ + const struct iwl_mld_scan_timing_params *timing = + &scan_timing[params->type]; + + gp->adwell_default_social_chn = + IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL; + gp->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS; + gp->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS; + + if (params->n_ssids && params->ssids[0].ssid_len) + gp->adwell_max_budget = + cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN); + else + gp->adwell_max_budget = + cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN); + + gp->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + + gp->max_out_of_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); + gp->suspend_time[SCAN_LB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); + + gp->active_dwell[SCAN_LB_LMAC_IDX] = IWL_SCAN_DWELL_ACTIVE; + gp->passive_dwell[SCAN_LB_LMAC_IDX] = IWL_SCAN_DWELL_PASSIVE; + gp->active_dwell[SCAN_HB_LMAC_IDX] = IWL_SCAN_DWELL_ACTIVE; + gp->passive_dwell[SCAN_HB_LMAC_IDX] = IWL_SCAN_DWELL_PASSIVE; + + IWL_DEBUG_SCAN(mld, + "Scan: adwell_max_budget=%d max_out_of_time=%d suspend_time=%d\n", + gp->adwell_max_budget, + gp->max_out_of_time[SCAN_LB_LMAC_IDX], + gp->suspend_time[SCAN_LB_LMAC_IDX]); +} + +static void +iwl_mld_scan_cmd_set_gen_params(struct iwl_mld *mld, + struct iwl_mld_scan_params *params, + struct ieee80211_vif *vif, + struct iwl_scan_general_params_v11 *gp, + enum iwl_mld_scan_status scan_status) +{ + u16 gen_flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif, + scan_status); + u8 gen_flags2 = iwl_mld_scan_get_cmd_gen_flags2(mld, params, vif, + gen_flags); + + IWL_DEBUG_SCAN(mld, "General: flags=0x%x, flags2=0x%x\n", + gen_flags, gen_flags2); + + gp->flags = cpu_to_le16(gen_flags); + gp->flags2 = gen_flags2; + + iwl_mld_scan_cmd_set_dwell(mld, gp, params); + + if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1) + gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS; + + if (params->fw_link_id != IWL_MLD_INVALID_FW_ID) + gp->scan_start_mac_or_link_id = params->fw_link_id; +} + +static int +iwl_mld_scan_cmd_set_sched_params(struct iwl_mld_scan_params *params, + struct iwl_scan_umac_schedule *schedule, + __le16 *delay) +{ + if (WARN_ON(!params->n_scan_plans || + params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS)) + return -EINVAL; + + for (int i = 0; i < params->n_scan_plans; i++) { + struct cfg80211_sched_scan_plan *scan_plan = + ¶ms->scan_plans[i]; + + schedule[i].iter_count = scan_plan->iterations; + schedule[i].interval = + cpu_to_le16(scan_plan->interval); + } + + /* If the number of iterations of the last scan plan is set to zero, + * it should run infinitely. However, this is not always the case. + * For example, when regular scan is requested the driver sets one scan + * plan with one iteration. + */ + if (!schedule[params->n_scan_plans - 1].iter_count) + schedule[params->n_scan_plans - 1].iter_count = 0xff; + + *delay = cpu_to_le16(params->delay); + + return 0; +} + +/* We insert the SSIDs in an inverted order, because the FW will + * invert it back. + */ +static void +iwl_mld_scan_cmd_build_ssids(struct iwl_mld_scan_params *params, + struct iwl_ssid_ie *ssids, u32 *ssid_bitmap) +{ + int i, j; + int index; + u32 tmp_bitmap = 0; + + /* copy SSIDs from match list. iwl_config_sched_scan_profiles() + * uses the order of these ssids to config match list. + */ + for (i = 0, j = params->n_match_sets - 1; + j >= 0 && i < PROBE_OPTION_MAX; + i++, j--) { + /* skip empty SSID match_sets */ + if (!params->match_sets[j].ssid.ssid_len) + continue; + + ssids[i].id = WLAN_EID_SSID; + ssids[i].len = params->match_sets[j].ssid.ssid_len; + memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid, + ssids[i].len); + } + + /* add SSIDs from scan SSID list */ + for (j = params->n_ssids - 1; + j >= 0 && i < PROBE_OPTION_MAX; + i++, j--) { + index = iwl_mld_scan_ssid_exist(params->ssids[j].ssid, + params->ssids[j].ssid_len, + ssids); + if (index < 0) { + ssids[i].id = WLAN_EID_SSID; + ssids[i].len = params->ssids[j].ssid_len; + memcpy(ssids[i].ssid, params->ssids[j].ssid, + ssids[i].len); + tmp_bitmap |= BIT(i); + } else { + tmp_bitmap |= BIT(index); + } + } + + if (ssid_bitmap) + *ssid_bitmap = tmp_bitmap; +} + +static void +iwl_mld_scan_fill_6g_chan_list(struct iwl_mld_scan_params *params, + struct iwl_scan_probe_params_v4 *pp) +{ + int j, idex_s = 0, idex_b = 0; + struct cfg80211_scan_6ghz_params *scan_6ghz_params = + params->scan_6ghz_params; + + for (j = 0; + j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE; + j++) { + if (!params->ssids[j].ssid_len) + continue; + + pp->short_ssid[idex_s] = + cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid, + params->ssids[j].ssid_len)); + + /* hidden 6ghz scan */ + pp->direct_scan[idex_s].id = WLAN_EID_SSID; + pp->direct_scan[idex_s].len = params->ssids[j].ssid_len; + memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid, + params->ssids[j].ssid_len); + idex_s++; + } + + /* Populate the arrays of the short SSIDs and the BSSIDs using the 6GHz + * collocated parameters. This might not be optimal, as this processing + * does not (yet) correspond to the actual channels, so it is possible + * that some entries would be left out. + */ + for (j = 0; j < params->n_6ghz_params; j++) { + int k; + + /* First, try to place the short SSID */ + if (scan_6ghz_params[j].short_ssid_valid) { + for (k = 0; k < idex_s; k++) { + if (pp->short_ssid[k] == + cpu_to_le32(scan_6ghz_params[j].short_ssid)) + break; + } + + if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) { + pp->short_ssid[idex_s++] = + cpu_to_le32(scan_6ghz_params[j].short_ssid); + } + } + + /* try to place BSSID for the same entry */ + for (k = 0; k < idex_b; k++) { + if (!memcmp(&pp->bssid_array[k], + scan_6ghz_params[j].bssid, ETH_ALEN)) + break; + } + + if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE && + !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid), + "scan: invalid BSSID at index %u, index_b=%u\n", + j, idex_b)) { + memcpy(&pp->bssid_array[idex_b++], + scan_6ghz_params[j].bssid, ETH_ALEN); + } + } + + pp->short_ssid_num = idex_s; + pp->bssid_num = idex_b; +} + +static void +iwl_mld_scan_cmd_set_probe_params(struct iwl_mld_scan_params *params, + struct iwl_scan_probe_params_v4 *pp, + u32 *bitmap_ssid) +{ + pp->preq = params->preq; + + if (params->scan_6ghz) { + iwl_mld_scan_fill_6g_chan_list(params, pp); + return; + } + + /* relevant only for 2.4 GHz /5 GHz scan */ + iwl_mld_scan_cmd_build_ssids(params, pp->direct_scan, bitmap_ssid); +} + +static bool +iwl_mld_scan_use_ebs(struct iwl_mld *mld, struct ieee80211_vif *vif, + bool low_latency) +{ + const struct iwl_ucode_capabilities *capa = &mld->fw->ucode_capa; + + /* We can only use EBS if: + * 1. the feature is supported. + * 2. the last EBS was successful. + * 3. it's not a p2p find operation. + * 4. we are not in low latency mode, + * or if fragmented ebs is supported by the FW + * 5. the VIF is not an AP interface (scan wants survey results) + */ + return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && + !mld->scan.last_ebs_failed && + vif->type != NL80211_IFTYPE_P2P_DEVICE && + (!low_latency || fw_has_api(capa, IWL_UCODE_TLV_API_FRAG_EBS)) && + ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_AP); +} + +static u8 +iwl_mld_scan_cmd_set_chan_flags(struct iwl_mld *mld, + struct iwl_mld_scan_params *params, + struct ieee80211_vif *vif, + bool low_latency) +{ + u8 flags = 0; + + flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER; + + if (iwl_mld_scan_use_ebs(mld, vif, low_latency)) + flags |= IWL_SCAN_CHANNEL_FLAG_EBS | + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; + + /* set fragmented ebs for fragmented scan */ + if (iwl_mld_scan_is_fragmented(params->type)) + flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG; + + /* Force EBS in case the scan is a fragmented and there is a need + * to take P2P GO operation into consideration during scan operation. + */ + /* TODO: CDB */ + if (iwl_mld_scan_is_fragmented(params->type) && + params->respect_p2p_go) { + IWL_DEBUG_SCAN(mld, "Respect P2P GO. Force EBS\n"); + flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS; + } + + return flags; +} + +static const u8 p2p_go_friendly_chs[] = { + 36, 40, 44, 48, 149, 153, 157, 161, 165, +}; + +static const u8 social_chs[] = { + 1, 6, 11 +}; + +static u32 iwl_mld_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id) +{ + if (vif_type != NL80211_IFTYPE_P2P_DEVICE) + return 0; + + for (int i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) { + if (ch_id == p2p_go_friendly_chs[i]) + return IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT; + } + + for (int i = 0; i < ARRAY_SIZE(social_chs); i++) { + if (ch_id == social_chs[i]) + return IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT; + } + + return 0; +} + +static void +iwl_mld_scan_cmd_set_channels(struct iwl_mld *mld, + struct ieee80211_channel **channels, + struct iwl_scan_channel_params_v7 *cp, + int n_channels, u32 flags, + enum nl80211_iftype vif_type) +{ + for (int i = 0; i < n_channels; i++) { + enum nl80211_band band = channels[i]->band; + struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i]; + u8 iwl_band = iwl_mld_nl80211_band_to_fw(band); + u32 n_aps_flag = + iwl_mld_scan_ch_n_aps_flag(vif_type, + channels[i]->hw_value); + + if (IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE) + n_aps_flag = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT; + + cfg->flags = cpu_to_le32(flags | n_aps_flag); + cfg->channel_num = channels[i]->hw_value; + if (cfg80211_channel_is_psc(channels[i])) + cfg->flags = 0; + + if (band == NL80211_BAND_6GHZ) { + /* 6 GHz channels should only appear in a scan request + * that has scan_6ghz set. The only exception is MLO + * scan, which has to be passive. + */ + WARN_ON_ONCE(cfg->flags != 0); + cfg->flags = + cpu_to_le32(IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE); + } + + cfg->v2.iter_count = 1; + cfg->v2.iter_interval = 0; + cfg->flags |= cpu_to_le32(iwl_band << + IWL_CHAN_CFG_FLAGS_BAND_POS); + } +} + +static u8 +iwl_mld_scan_cfg_channels_6g(struct iwl_mld *mld, + struct iwl_mld_scan_params *params, + u32 n_channels, + struct iwl_scan_probe_params_v4 *pp, + struct iwl_scan_channel_params_v7 *cp, + enum nl80211_iftype vif_type) +{ + struct cfg80211_scan_6ghz_params *scan_6ghz_params = + params->scan_6ghz_params; + u32 i; + u8 ch_cnt; + + for (i = 0, ch_cnt = 0; i < params->n_channels; i++) { + struct iwl_scan_channel_cfg_umac *cfg = + &cp->channel_config[ch_cnt]; + + u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0; + u8 k, n_s_ssids = 0, n_bssids = 0; + u8 max_s_ssids, max_bssids; + bool force_passive = false, found = false, allow_passive = true, + unsolicited_probe_on_chan = false, psc_no_listen = false; + s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED; + + /* Avoid performing passive scan on non PSC channels unless the + * scan is specifically a passive scan, i.e., no SSIDs + * configured in the scan command. + */ + if (!cfg80211_channel_is_psc(params->channels[i]) && + !params->n_6ghz_params && params->n_ssids) + continue; + + cfg->channel_num = params->channels[i]->hw_value; + cfg->flags |= + cpu_to_le32(PHY_BAND_6 << IWL_CHAN_CFG_FLAGS_BAND_POS); + + cfg->v5.iter_count = 1; + cfg->v5.iter_interval = 0; + + for (u32 j = 0; j < params->n_6ghz_params; j++) { + s8 tmp_psd_20; + + if (!(scan_6ghz_params[j].channel_idx == i)) + continue; + + unsolicited_probe_on_chan |= + scan_6ghz_params[j].unsolicited_probe; + + /* Use the highest PSD value allowed as advertised by + * APs for this channel + */ + tmp_psd_20 = scan_6ghz_params[j].psd_20; + if (tmp_psd_20 != + IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED && + (psd_20 == + IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED || + psd_20 < tmp_psd_20)) + psd_20 = tmp_psd_20; + + psc_no_listen |= scan_6ghz_params[j].psc_no_listen; + } + + /* In the following cases apply passive scan: + * 1. Non fragmented scan: + * - PSC channel with NO_LISTEN_FLAG on should be treated + * like non PSC channel + * - Non PSC channel with more than 3 short SSIDs or more + * than 9 BSSIDs. + * - Non PSC Channel with unsolicited probe response and + * more than 2 short SSIDs or more than 6 BSSIDs. + * - PSC channel with more than 2 short SSIDs or more than + * 6 BSSIDs. + * 2. Fragmented scan: + * - PSC channel with more than 1 SSID or 3 BSSIDs. + * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs. + * - Non PSC channel with unsolicited probe response and + * more than 1 SSID or more than 3 BSSIDs. + */ + if (!iwl_mld_scan_is_fragmented(params->type)) { + if (!cfg80211_channel_is_psc(params->channels[i]) || + psc_no_listen) { + if (unsolicited_probe_on_chan) { + max_s_ssids = 2; + max_bssids = 6; + } else { + max_s_ssids = 3; + max_bssids = 9; + } + } else { + max_s_ssids = 2; + max_bssids = 6; + } + } else if (cfg80211_channel_is_psc(params->channels[i])) { + max_s_ssids = 1; + max_bssids = 3; + } else { + if (unsolicited_probe_on_chan) { + max_s_ssids = 1; + max_bssids = 3; + } else { + max_s_ssids = 2; + max_bssids = 6; + } + } + + /* To optimize the scan time, i.e., reduce the scan dwell time + * on each channel, the below logic tries to set 3 direct BSSID + * probe requests for each broadcast probe request with a short + * SSID. + */ + for (u32 j = 0; j < params->n_6ghz_params; j++) { + if (!(scan_6ghz_params[j].channel_idx == i)) + continue; + + found = false; + + for (k = 0; + k < pp->short_ssid_num && n_s_ssids < max_s_ssids; + k++) { + if (!scan_6ghz_params[j].unsolicited_probe && + le32_to_cpu(pp->short_ssid[k]) == + scan_6ghz_params[j].short_ssid) { + /* Relevant short SSID bit set */ + if (s_ssid_bitmap & BIT(k)) { + found = true; + break; + } + + /* Prefer creating BSSID entries unless + * the short SSID probe can be done in + * the same channel dwell iteration. + * + * We also need to create a short SSID + * entry for any hidden AP. + */ + if (3 * n_s_ssids > n_bssids && + !pp->direct_scan[k].len) + break; + + /* Hidden AP, cannot do passive scan */ + if (pp->direct_scan[k].len) + allow_passive = false; + + s_ssid_bitmap |= BIT(k); + n_s_ssids++; + found = true; + break; + } + } + + if (found) + continue; + + for (k = 0; k < pp->bssid_num; k++) { + if (memcmp(&pp->bssid_array[k], + scan_6ghz_params[j].bssid, + ETH_ALEN)) + continue; + + if (bssid_bitmap & BIT(k)) + break; + + if (n_bssids < max_bssids) { + bssid_bitmap |= BIT(k); + n_bssids++; + } else { + force_passive = TRUE; + } + + break; + } + } + + if (cfg80211_channel_is_psc(params->channels[i]) && + psc_no_listen) + flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN; + + if (unsolicited_probe_on_chan) + flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES; + + if ((allow_passive && force_passive) || + (!(bssid_bitmap | s_ssid_bitmap) && + !cfg80211_channel_is_psc(params->channels[i]))) + flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE; + else + flags |= bssid_bitmap | (s_ssid_bitmap << 16); + + cfg->flags |= cpu_to_le32(flags); + cfg->v5.psd_20 = psd_20; + + ch_cnt++; + } + + if (params->n_channels > ch_cnt) + IWL_DEBUG_SCAN(mld, + "6GHz: reducing number channels: (%u->%u)\n", + params->n_channels, ch_cnt); + + return ch_cnt; +} + +static int +iwl_mld_scan_cmd_set_6ghz_chan_params(struct iwl_mld *mld, + struct iwl_mld_scan_params *params, + struct ieee80211_vif *vif, + struct iwl_scan_req_params_v17 *scan_p, + enum iwl_mld_scan_status scan_status) +{ + struct iwl_scan_channel_params_v7 *chan_p = &scan_p->channel_params; + struct iwl_scan_probe_params_v4 *probe_p = &scan_p->probe_params; + + chan_p->flags = iwl_mld_scan_get_cmd_gen_flags(mld, params, vif, + scan_status); + chan_p->count = iwl_mld_scan_cfg_channels_6g(mld, params, + params->n_channels, + probe_p, chan_p, + vif->type); + if (!chan_p->count) + return -EINVAL; + + if (!params->n_ssids || + (params->n_ssids == 1 && !params->ssids[0].ssid_len)) + chan_p->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER; + + return 0; +} + +static int +iwl_mld_scan_cmd_set_chan_params(struct iwl_mld *mld, + struct iwl_mld_scan_params *params, + struct ieee80211_vif *vif, + struct iwl_scan_req_params_v17 *scan_p, + bool low_latency, + enum iwl_mld_scan_status scan_status, + u32 channel_cfg_flags) +{ + struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params; + struct ieee80211_supported_band *sband = + &mld->nvm_data->bands[NL80211_BAND_6GHZ]; + + cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY; + cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS; + + if (IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE) + cp->n_aps_override[0] = IWL_MLD_ADAPTIVE_DWELL_NUM_APS_OVERRIDE; + + if (params->scan_6ghz) + return iwl_mld_scan_cmd_set_6ghz_chan_params(mld, params, + vif, scan_p, + scan_status); + + /* relevant only for 2.4 GHz/5 GHz scan */ + cp->flags = iwl_mld_scan_cmd_set_chan_flags(mld, params, vif, + low_latency); + cp->count = params->n_channels; + + iwl_mld_scan_cmd_set_channels(mld, params->channels, cp, + params->n_channels, channel_cfg_flags, + vif->type); + + if (!params->enable_6ghz_passive) + return 0; + + /* fill 6 GHz passive scan cfg */ + for (int i = 0; i < sband->n_channels; i++) { + struct ieee80211_channel *channel = + &sband->channels[i]; + struct iwl_scan_channel_cfg_umac *cfg = + &cp->channel_config[cp->count]; + + if (!cfg80211_channel_is_psc(channel)) + continue; + + cfg->channel_num = channel->hw_value; + cfg->v5.iter_count = 1; + cfg->v5.iter_interval = 0; + cfg->v5.psd_20 = + IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED; + cfg->flags = cpu_to_le32(PHY_BAND_6 << + IWL_CHAN_CFG_FLAGS_BAND_POS); + cp->count++; + } + + return 0; +} + +static int +iwl_mld_scan_build_cmd(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct iwl_mld_scan_params *params, + enum iwl_mld_scan_status scan_status, + bool low_latency) +{ + struct iwl_scan_req_umac_v17 *cmd = mld->scan.cmd; + struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params; + u32 bitmap_ssid = 0; + int uid, ret; + + memset(mld->scan.cmd, 0, mld->scan.cmd_size); + + /* find a free UID entry */ + uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_NONE); + if (uid < 0) + return uid; + + cmd->uid = cpu_to_le32(uid); + cmd->ooc_priority = + cpu_to_le32(iwl_mld_scan_ooc_priority(scan_status)); + + iwl_mld_scan_cmd_set_gen_params(mld, params, vif, + &scan_p->general_params, scan_status); + + ret = iwl_mld_scan_cmd_set_sched_params(params, + scan_p->periodic_params.schedule, + &scan_p->periodic_params.delay); + if (ret) + return ret; + + iwl_mld_scan_cmd_set_probe_params(params, &scan_p->probe_params, + &bitmap_ssid); + + ret = iwl_mld_scan_cmd_set_chan_params(mld, params, vif, scan_p, + low_latency, scan_status, + bitmap_ssid); + if (ret) + return ret; + + return uid; +} + +static bool +iwl_mld_scan_pass_all(struct iwl_mld *mld, + struct cfg80211_sched_scan_request *req) +{ + if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { + IWL_DEBUG_SCAN(mld, + "Sending scheduled scan with filtering, n_match_sets %d\n", + req->n_match_sets); + mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED; + return false; + } + + IWL_DEBUG_SCAN(mld, "Sending Scheduled scan without filtering\n"); + mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_ENABLED; + + return true; +} + +static int +iwl_mld_config_sched_scan_profiles(struct iwl_mld *mld, + struct cfg80211_sched_scan_request *req) +{ + struct iwl_host_cmd hcmd = { + .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD, + .dataflags[0] = IWL_HCMD_DFL_NOCOPY, + }; + struct iwl_scan_offload_profile *profile; + struct iwl_scan_offload_profile_cfg_data *cfg_data; + struct iwl_scan_offload_profile_cfg *profile_cfg; + struct iwl_scan_offload_blocklist *blocklist; + u32 blocklist_size = IWL_SCAN_MAX_BLACKLIST_LEN * sizeof(*blocklist); + u32 cmd_size = blocklist_size + sizeof(*profile_cfg); + u8 *cmd; + int ret; + + if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES_V2)) + return -EIO; + + cmd = kzalloc(cmd_size, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + hcmd.data[0] = cmd; + hcmd.len[0] = cmd_size; + + blocklist = (struct iwl_scan_offload_blocklist *)cmd; + profile_cfg = (struct iwl_scan_offload_profile_cfg *)(cmd + blocklist_size); + + /* No blocklist configuration */ + cfg_data = &profile_cfg->data; + cfg_data->num_profiles = req->n_match_sets; + cfg_data->active_clients = SCAN_CLIENT_SCHED_SCAN; + cfg_data->pass_match = SCAN_CLIENT_SCHED_SCAN; + cfg_data->match_notify = SCAN_CLIENT_SCHED_SCAN; + + if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len) + cfg_data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN; + + for (int i = 0; i < req->n_match_sets; i++) { + profile = &profile_cfg->profiles[i]; + + /* Support any cipher and auth algorithm */ + profile->unicast_cipher = 0xff; + profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED | + IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | + IWL_AUTH_ALGO_8021X | IWL_AUTH_ALGO_SAE | + IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE; + profile->network_type = IWL_NETWORK_TYPE_ANY; + profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY; + profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN; + profile->ssid_index = i; + } + + IWL_DEBUG_SCAN(mld, + "Sending scheduled scan profile config (n_match_sets=%u)\n", + req->n_match_sets); + + ret = iwl_mld_send_cmd(mld, &hcmd); + + kfree(cmd); + + return ret; +} + +static int +iwl_mld_sched_scan_handle_non_psc_channels(struct iwl_mld_scan_params *params, + bool *non_psc_included) +{ + int i, j; + + *non_psc_included = false; + /* for 6 GHZ band only PSC channels need to be added */ + for (i = 0; i < params->n_channels; i++) { + struct ieee80211_channel *channel = params->channels[i]; + + if (channel->band == NL80211_BAND_6GHZ && + !cfg80211_channel_is_psc(channel)) { + *non_psc_included = true; + break; + } + } + + if (!*non_psc_included) + return 0; + + params->channels = + kmemdup(params->channels, + sizeof(params->channels[0]) * params->n_channels, + GFP_KERNEL); + if (!params->channels) + return -ENOMEM; + + for (i = j = 0; i < params->n_channels; i++) { + if (params->channels[i]->band == NL80211_BAND_6GHZ && + !cfg80211_channel_is_psc(params->channels[i])) + continue; + params->channels[j++] = params->channels[i]; + } + + params->n_channels = j; + + return 0; +} + +static void +iwl_mld_scan_6ghz_passive_scan(struct iwl_mld *mld, + struct iwl_mld_scan_params *params, + struct ieee80211_vif *vif) +{ + struct ieee80211_supported_band *sband = + &mld->nvm_data->bands[NL80211_BAND_6GHZ]; + u32 n_disabled, i; + + params->enable_6ghz_passive = false; + + /* 6 GHz passive scan may be enabled in the first 2.4 GHz/5 GHz scan + * phase to discover geo location if no AP's are found. Skip it when + * we're in the 6 GHz scan phase. + */ + if (params->scan_6ghz) + return; + + /* 6 GHz passive scan allowed only on station interface */ + if (vif->type != NL80211_IFTYPE_STATION) { + IWL_DEBUG_SCAN(mld, + "6GHz passive scan: not station interface\n"); + return; + } + + /* 6 GHz passive scan is allowed in a defined time interval following + * HW reset or resume flow, or while not associated and a large + * interval has passed since the last 6 GHz passive scan. + */ + if ((vif->cfg.assoc || + time_after(mld->scan.last_6ghz_passive_jiffies + + (IWL_MLD_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) && + (time_before(mld->scan.last_start_time_jiffies + + (IWL_MLD_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ), + jiffies))) { + IWL_DEBUG_SCAN(mld, "6GHz passive scan: %s\n", + vif->cfg.assoc ? "associated" : + "timeout did not expire"); + return; + } + + /* not enough channels in the regular scan request */ + if (params->n_channels < IWL_MLD_6GHZ_PASSIVE_SCAN_MIN_CHANS) { + IWL_DEBUG_SCAN(mld, + "6GHz passive scan: not enough channels %d\n", + params->n_channels); + return; + } + + for (i = 0; i < params->n_ssids; i++) { + if (!params->ssids[i].ssid_len) + break; + } + + /* not a wildcard scan, so cannot enable passive 6 GHz scan */ + if (i == params->n_ssids) { + IWL_DEBUG_SCAN(mld, + "6GHz passive scan: no wildcard SSID\n"); + return; + } + + if (!sband || !sband->n_channels) { + IWL_DEBUG_SCAN(mld, + "6GHz passive scan: no 6GHz channels\n"); + return; + } + + for (i = 0, n_disabled = 0; i < sband->n_channels; i++) { + if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED)) + n_disabled++; + } + + /* Not all the 6 GHz channels are disabled, so no need for 6 GHz + * passive scan + */ + if (n_disabled != sband->n_channels) { + IWL_DEBUG_SCAN(mld, + "6GHz passive scan: 6GHz channels enabled\n"); + return; + } + + /* all conditions to enable 6 GHz passive scan are satisfied */ + IWL_DEBUG_SCAN(mld, "6GHz passive scan: can be enabled\n"); + params->enable_6ghz_passive = true; +} + +static void +iwl_mld_scan_set_link_id(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct iwl_mld_scan_params *params, + s8 tsf_report_link_id, + enum iwl_mld_scan_status scan_status) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *link; + + if (tsf_report_link_id < 0) { + if (vif->active_links) + tsf_report_link_id = __ffs(vif->active_links); + else + tsf_report_link_id = 0; + } + + link = iwl_mld_link_dereference_check(mld_vif, tsf_report_link_id); + if (!WARN_ON(!link)) { + params->fw_link_id = link->fw_id; + /* we to store fw_link_id only for regular scan, + * and use it in scan complete notif + */ + if (scan_status == IWL_MLD_SCAN_REGULAR) + mld->scan.fw_link_id = link->fw_id; + } else { + mld->scan.fw_link_id = IWL_MLD_INVALID_FW_ID; + params->fw_link_id = IWL_MLD_INVALID_FW_ID; + } +} + +static int +_iwl_mld_single_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req, + struct ieee80211_scan_ies *ies, + enum iwl_mld_scan_status scan_status) +{ + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(LONG_GROUP, SCAN_REQ_UMAC), + .len = { mld->scan.cmd_size, }, + .data = { mld->scan.cmd, }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + struct iwl_mld_scan_iter_data scan_iter_data = { + .current_vif = vif, + }; + struct cfg80211_sched_scan_plan scan_plan = {.iterations = 1}; + struct iwl_mld_scan_params params = {}; + int ret, uid; + + /* we should have failed registration if scan_cmd was NULL */ + if (WARN_ON(!mld->scan.cmd)) + return -ENOMEM; + + if (!iwl_mld_scan_fits(mld, req->n_ssids, ies, req->n_channels)) + return -ENOBUFS; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_scan_iterator, + &scan_iter_data); + + params.type = iwl_mld_get_scan_type(mld, vif, &scan_iter_data); + params.n_ssids = req->n_ssids; + params.flags = req->flags; + params.n_channels = req->n_channels; + params.delay = 0; + params.ssids = req->ssids; + params.channels = req->channels; + params.mac_addr = req->mac_addr; + params.mac_addr_mask = req->mac_addr_mask; + params.no_cck = req->no_cck; + params.pass_all = true; + params.n_match_sets = 0; + params.match_sets = NULL; + params.scan_plans = &scan_plan; + params.n_scan_plans = 1; + + params.n_6ghz_params = req->n_6ghz_params; + params.scan_6ghz_params = req->scan_6ghz_params; + params.scan_6ghz = req->scan_6ghz; + + ether_addr_copy(params.bssid, req->bssid); + /* TODO: CDB - per-band flag */ + params.respect_p2p_go = + iwl_mld_get_respect_p2p_go(mld, vif, + scan_iter_data.global_low_latency); + + if (req->duration) + params.iter_notif = true; + + iwl_mld_scan_set_link_id(mld, vif, ¶ms, req->tsf_report_link_id, + scan_status); + + iwl_mld_scan_build_probe_req(mld, vif, ies, ¶ms); + + iwl_mld_scan_6ghz_passive_scan(mld, ¶ms, vif); + + uid = iwl_mld_scan_build_cmd(mld, vif, ¶ms, scan_status, + scan_iter_data.global_low_latency); + if (uid < 0) + return uid; + + ret = iwl_mld_send_cmd(mld, &hcmd); + if (ret) { + IWL_ERR(mld, "Scan failed! ret %d\n", ret); + return ret; + } + + IWL_DEBUG_SCAN(mld, "Scan request send success: status=%u, uid=%u\n", + scan_status, uid); + + mld->scan.uid_status[uid] = scan_status; + mld->scan.status |= scan_status; + + if (params.enable_6ghz_passive) + mld->scan.last_6ghz_passive_jiffies = jiffies; + + return 0; +} + +static int +iwl_mld_scan_send_abort_cmd_status(struct iwl_mld *mld, int uid, u32 *status) +{ + struct iwl_umac_scan_abort abort_cmd = { + .uid = cpu_to_le32(uid), + }; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(LONG_GROUP, SCAN_ABORT_UMAC), + .flags = CMD_WANT_SKB, + .data = { &abort_cmd }, + .len[0] = sizeof(abort_cmd), + }; + struct iwl_rx_packet *pkt; + struct iwl_cmd_response *resp; + u32 resp_len; + int ret; + + ret = iwl_mld_send_cmd(mld, &cmd); + if (ret) + return ret; + + pkt = cmd.resp_pkt; + + resp_len = iwl_rx_packet_payload_len(pkt); + if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp), + "Scan Abort: unexpected response length %d\n", + resp_len)) { + ret = -EIO; + goto out; + } + + resp = (void *)pkt->data; + *status = le32_to_cpu(resp->status); + +out: + iwl_free_resp(&cmd); + return ret; +} + +static int +iwl_mld_scan_abort(struct iwl_mld *mld, int type, int uid, bool *wait) +{ + enum iwl_umac_scan_abort_status status; + int ret; + + *wait = true; + + IWL_DEBUG_SCAN(mld, "Sending scan abort, uid %u\n", uid); + + ret = iwl_mld_scan_send_abort_cmd_status(mld, uid, &status); + + IWL_DEBUG_SCAN(mld, "Scan abort: ret=%d status=%u\n", ret, status); + + /* We don't need to wait to scan complete in the following cases: + * 1. Driver failed to send the scan abort cmd. + * 2. The FW is no longer familiar with the scan that needs to be + * stopped. It is expected that the scan complete notification was + * already received but not yet processed. + * + * In both cases the flow should continue similar to the case that the + * scan was really aborted. + */ + if (ret || status == IWL_UMAC_SCAN_ABORT_STATUS_NOT_FOUND) + *wait = false; + + return ret; +} + +static int +iwl_mld_scan_stop_wait(struct iwl_mld *mld, int type, int uid) +{ + struct iwl_notification_wait wait_scan_done; + static const u16 scan_comp_notif[] = { SCAN_COMPLETE_UMAC }; + bool wait = true; + int ret; + + iwl_init_notification_wait(&mld->notif_wait, &wait_scan_done, + scan_comp_notif, + ARRAY_SIZE(scan_comp_notif), + NULL, NULL); + + IWL_DEBUG_SCAN(mld, "Preparing to stop scan, type=%x\n", type); + + ret = iwl_mld_scan_abort(mld, type, uid, &wait); + if (ret) { + IWL_DEBUG_SCAN(mld, "couldn't stop scan type=%d\n", type); + goto return_no_wait; + } + + if (!wait) { + IWL_DEBUG_SCAN(mld, "no need to wait for scan type=%d\n", type); + goto return_no_wait; + } + + return iwl_wait_notification(&mld->notif_wait, &wait_scan_done, HZ); + +return_no_wait: + iwl_remove_notification(&mld->notif_wait, &wait_scan_done); + return ret; +} + +int iwl_mld_sched_scan_start(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies, + int type) +{ + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(LONG_GROUP, SCAN_REQ_UMAC), + .len = { mld->scan.cmd_size, }, + .data = { mld->scan.cmd, }, + .dataflags = { IWL_HCMD_DFL_NOCOPY, }, + }; + struct iwl_mld_scan_params params = {}; + struct iwl_mld_scan_iter_data scan_iter_data = { + .current_vif = vif, + }; + bool non_psc_included = false; + int ret, uid; + + /* we should have failed registration if scan_cmd was NULL */ + if (WARN_ON(!mld->scan.cmd)) + return -ENOMEM; + + /* FW supports only a single periodic scan */ + if (mld->scan.status & (IWL_MLD_SCAN_SCHED | IWL_MLD_SCAN_NETDETECT)) + return -EBUSY; + + ieee80211_iterate_active_interfaces_mtx(mld->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mld_scan_iterator, + &scan_iter_data); + + params.type = iwl_mld_get_scan_type(mld, vif, &scan_iter_data); + params.flags = req->flags; + params.n_ssids = req->n_ssids; + params.ssids = req->ssids; + params.n_channels = req->n_channels; + params.channels = req->channels; + params.mac_addr = req->mac_addr; + params.mac_addr_mask = req->mac_addr_mask; + params.no_cck = false; + params.pass_all = iwl_mld_scan_pass_all(mld, req); + params.n_match_sets = req->n_match_sets; + params.match_sets = req->match_sets; + params.n_scan_plans = req->n_scan_plans; + params.scan_plans = req->scan_plans; + /* TODO: CDB - per-band flag */ + params.respect_p2p_go = + iwl_mld_get_respect_p2p_go(mld, vif, + scan_iter_data.global_low_latency); + + /* UMAC scan supports up to 16-bit delays, trim it down to 16-bits */ + params.delay = req->delay > U16_MAX ? U16_MAX : req->delay; + + eth_broadcast_addr(params.bssid); + + ret = iwl_mld_config_sched_scan_profiles(mld, req); + if (ret) + return ret; + + iwl_mld_scan_build_probe_req(mld, vif, ies, ¶ms); + + ret = iwl_mld_sched_scan_handle_non_psc_channels(¶ms, + &non_psc_included); + if (ret) + goto out; + + if (!iwl_mld_scan_fits(mld, req->n_ssids, ies, params.n_channels)) { + ret = -ENOBUFS; + goto out; + } + + uid = iwl_mld_scan_build_cmd(mld, vif, ¶ms, type, + scan_iter_data.global_low_latency); + if (uid < 0) { + ret = uid; + goto out; + } + + ret = iwl_mld_send_cmd(mld, &hcmd); + if (!ret) { + IWL_DEBUG_SCAN(mld, + "Sched scan request send success: type=%u, uid=%u\n", + type, uid); + mld->scan.uid_status[uid] = type; + mld->scan.status |= type; + } else { + IWL_ERR(mld, "Sched scan failed! ret %d\n", ret); + mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED; + } + +out: + if (non_psc_included) + kfree(params.channels); + return ret; +} + +int iwl_mld_scan_stop(struct iwl_mld *mld, int type, bool notify) +{ + int uid, ret; + + IWL_DEBUG_SCAN(mld, + "Request to stop scan: type=0x%x, status=0x%x\n", + type, mld->scan.status); + + if (!(mld->scan.status & type)) + return 0; + + uid = iwl_mld_scan_uid_by_status(mld, type); + /* must be valid, we just checked it's running */ + if (WARN_ON_ONCE(uid < 0)) + return uid; + + ret = iwl_mld_scan_stop_wait(mld, type, uid); + if (ret) + IWL_DEBUG_SCAN(mld, "Failed to stop scan\n"); + + /* Clear the scan status so the next scan requests will + * succeed and mark the scan as stopping, so that the Rx + * handler doesn't do anything, as the scan was stopped from + * above. Also remove the handler to not notify mac80211 + * erroneously after a new scan starts, for example. + */ + mld->scan.status &= ~type; + mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE; + iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_SCAN, + uid); + + if (type == IWL_MLD_SCAN_REGULAR) { + if (notify) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(mld->hw, &info); + } + } else if (notify) { + ieee80211_sched_scan_stopped(mld->hw); + mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED; + } + + return ret; +} + +int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req, + struct ieee80211_scan_ies *ies) +{ + return _iwl_mld_single_scan_start(mld, vif, req, ies, + IWL_MLD_SCAN_REGULAR); +} + +static void iwl_mld_int_mlo_scan_start(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_channel **channels, + size_t n_channels) +{ + struct cfg80211_scan_request *req __free(kfree) = NULL; + struct ieee80211_scan_ies ies = {}; + size_t size; + int ret; + + IWL_DEBUG_SCAN(mld, "Starting Internal MLO scan: n_channels=%zu\n", + n_channels); + + size = struct_size(req, channels, n_channels); + req = kzalloc(size, GFP_KERNEL); + if (!req) + return; + + /* set the requested channels */ + for (int i = 0; i < n_channels; i++) + req->channels[i] = channels[i]; + + req->n_channels = n_channels; + + /* set the rates */ + for (int i = 0; i < NUM_NL80211_BANDS; i++) + if (mld->wiphy->bands[i]) + req->rates[i] = + (1 << mld->wiphy->bands[i]->n_bitrates) - 1; + + req->wdev = ieee80211_vif_to_wdev(vif); + req->wiphy = mld->wiphy; + req->scan_start = jiffies; + req->tsf_report_link_id = -1; + + ret = _iwl_mld_single_scan_start(mld, vif, req, &ies, + IWL_MLD_SCAN_INT_MLO); + + if (!ret) + mld->scan.last_mlo_scan_time = ktime_get_boottime_ns(); + + IWL_DEBUG_SCAN(mld, "Internal MLO scan: ret=%d\n", ret); +} + +void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif) +{ + struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS]; + unsigned long usable_links = ieee80211_vif_usable_links(vif); + size_t n_channels = 0; + u8 link_id; + + lockdep_assert_wiphy(mld->wiphy); + + if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif) || + hweight16(vif->valid_links) == 1) + return; + + if (mld->scan.status & IWL_MLD_SCAN_INT_MLO) { + IWL_DEBUG_SCAN(mld, "Internal MLO scan is already running\n"); + return; + } + + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + link_conf_dereference_check(vif, link_id); + + if (WARN_ON_ONCE(!link_conf)) + continue; + + channels[n_channels++] = link_conf->chanreq.oper.chan; + } + + if (!n_channels) + return; + + iwl_mld_int_mlo_scan_start(mld, vif, channels, n_channels); +} + +void iwl_mld_handle_scan_iter_complete_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data; + u32 uid = __le32_to_cpu(notif->uid); + + if (IWL_FW_CHECK(mld, uid >= ARRAY_SIZE(mld->scan.uid_status), + "FW reports out-of-range scan UID %d\n", uid)) + return; + + if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_REGULAR) + mld->scan.start_tsf = le64_to_cpu(notif->start_tsf); + + IWL_DEBUG_SCAN(mld, + "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n", + notif->status, notif->scanned_channels); + + if (mld->scan.pass_all_sched_res == SCHED_SCAN_PASS_ALL_STATE_FOUND) { + IWL_DEBUG_SCAN(mld, "Pass all scheduled scan results found\n"); + ieee80211_sched_scan_results(mld->hw); + mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_ENABLED; + } + + IWL_DEBUG_SCAN(mld, + "UMAC Scan iteration complete: scan started at %llu (TSF)\n", + le64_to_cpu(notif->start_tsf)); +} + +void iwl_mld_handle_match_found_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + IWL_DEBUG_SCAN(mld, "Scheduled scan results\n"); + ieee80211_sched_scan_results(mld->hw); +} + +void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_umac_scan_complete *notif = (void *)pkt->data; + bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED); + u32 uid = __le32_to_cpu(notif->uid); + + if (IWL_FW_CHECK(mld, uid >= ARRAY_SIZE(mld->scan.uid_status), + "FW reports out-of-range scan UID %d\n", uid)) + return; + + IWL_DEBUG_SCAN(mld, + "Scan completed: uid=%u type=%u, status=%s, EBS=%s\n", + uid, mld->scan.uid_status[uid], + notif->status == IWL_SCAN_OFFLOAD_COMPLETED ? + "completed" : "aborted", + iwl_mld_scan_ebs_status_str(notif->ebs_status)); + IWL_DEBUG_SCAN(mld, "Scan completed: scan_status=0x%x\n", + mld->scan.status); + IWL_DEBUG_SCAN(mld, + "Scan completed: line=%u, iter=%u, elapsed time=%u\n", + notif->last_schedule, notif->last_iter, + __le32_to_cpu(notif->time_from_last_iter)); + + if (IWL_FW_CHECK(mld, !(mld->scan.uid_status[uid] & mld->scan.status), + "FW reports scan UID %d we didn't trigger\n", uid)) + return; + + /* if the scan is already stopping, we don't need to notify mac80211 */ + if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_REGULAR) { + struct cfg80211_scan_info info = { + .aborted = aborted, + .scan_start_tsf = mld->scan.start_tsf, + }; + int fw_link_id = mld->scan.fw_link_id; + struct ieee80211_bss_conf *link_conf = NULL; + + if (fw_link_id != IWL_MLD_INVALID_FW_ID) + link_conf = + wiphy_dereference(mld->wiphy, + mld->fw_id_to_bss_conf[fw_link_id]); + + /* It is possible that by the time the scan is complete the + * link was already removed and is not valid. + */ + if (link_conf) + ether_addr_copy(info.tsf_bssid, link_conf->bssid); + else + IWL_DEBUG_SCAN(mld, "Scan link is no longer valid\n"); + + ieee80211_scan_completed(mld->hw, &info); + } else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_SCHED) { + ieee80211_sched_scan_stopped(mld->hw); + mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED; + } else if (mld->scan.uid_status[uid] == IWL_MLD_SCAN_INT_MLO) { + IWL_DEBUG_SCAN(mld, "Internal MLO scan completed\n"); + + /* + * We limit link selection to internal MLO scans as otherwise + * we do not know whether all channels were covered. + */ + iwl_mld_select_links(mld); + } + + mld->scan.status &= ~mld->scan.uid_status[uid]; + + IWL_DEBUG_SCAN(mld, "Scan completed: after update: scan_status=0x%x\n", + mld->scan.status); + + mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE; + + if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS && + notif->ebs_status != IWL_SCAN_EBS_INACTIVE) + mld->scan.last_ebs_failed = true; +} + +/* This function is used in nic restart flow, to inform mac80211 about scans + * that were aborted by restart flow or by an assert. + */ +void iwl_mld_report_scan_aborted(struct iwl_mld *mld) +{ + int uid; + + uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_REGULAR); + if (uid >= 0) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(mld->hw, &info); + mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE; + } + + uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_SCHED); + if (uid >= 0) { + mld->scan.pass_all_sched_res = SCHED_SCAN_PASS_ALL_STATE_DISABLED; + mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE; + + /* sched scan will be restarted by mac80211 in reconfig. + * report to mac80211 that sched scan stopped only if we won't + * restart the firmware. + */ + if (!iwlwifi_mod_params.fw_restart) + ieee80211_sched_scan_stopped(mld->hw); + } + + uid = iwl_mld_scan_uid_by_status(mld, IWL_MLD_SCAN_INT_MLO); + if (uid >= 0) { + IWL_DEBUG_SCAN(mld, "Internal MLO scan aborted\n"); + mld->scan.uid_status[uid] = IWL_MLD_SCAN_NONE; + } + + BUILD_BUG_ON(IWL_MLD_SCAN_NONE != 0); + memset(mld->scan.uid_status, 0, sizeof(mld->scan.uid_status)); +} + +int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld) +{ + u8 scan_cmd_ver = iwl_fw_lookup_cmd_ver(mld->fw, SCAN_REQ_UMAC, + IWL_FW_CMD_VER_UNKNOWN); + size_t scan_cmd_size; + + if (scan_cmd_ver == 17) { + scan_cmd_size = sizeof(struct iwl_scan_req_umac_v17); + } else { + IWL_ERR(mld, "Unexpected scan cmd version %d\n", scan_cmd_ver); + return -EINVAL; + } + + mld->scan.cmd = kmalloc(scan_cmd_size, GFP_KERNEL); + if (!mld->scan.cmd) + return -ENOMEM; + + mld->scan.cmd_size = scan_cmd_size; + + return 0; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/scan.h b/drivers/net/wireless/intel/iwlwifi/mld/scan.h new file mode 100644 index 0000000000000..3ae940d55065c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/scan.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifndef __iwl_mld_scan_h__ +#define __iwl_mld_scan_h__ + +int iwl_mld_alloc_scan_cmd(struct iwl_mld *mld); + +int iwl_mld_regular_scan_start(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct cfg80211_scan_request *req, + struct ieee80211_scan_ies *ies); + +void iwl_mld_int_mlo_scan(struct iwl_mld *mld, struct ieee80211_vif *vif); + +void iwl_mld_handle_scan_iter_complete_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +int iwl_mld_scan_stop(struct iwl_mld *mld, int type, bool notify); + +int iwl_mld_sched_scan_start(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct cfg80211_sched_scan_request *req, + struct ieee80211_scan_ies *ies, + int type); + +void iwl_mld_handle_match_found_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +void iwl_mld_handle_scan_complete_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +#define WFA_TPC_IE_LEN 9 + +static inline int iwl_mld_scan_max_template_size(void) +{ +#define MAC_HDR_LEN 24 +#define DS_IE_LEN 3 +#define SSID_IE_LEN 2 + +/* driver create the 802.11 header, WFA TPC IE, DS parameter and SSID IE */ +#define DRIVER_TOTAL_IES_LEN \ + (MAC_HDR_LEN + WFA_TPC_IE_LEN + DS_IE_LEN + SSID_IE_LEN) + + BUILD_BUG_ON(SCAN_OFFLOAD_PROBE_REQ_SIZE < DRIVER_TOTAL_IES_LEN); + + return SCAN_OFFLOAD_PROBE_REQ_SIZE - DRIVER_TOTAL_IES_LEN; +} + +void iwl_mld_report_scan_aborted(struct iwl_mld *mld); + +enum iwl_mld_scan_status { + IWL_MLD_SCAN_NONE = 0, + IWL_MLD_SCAN_REGULAR = BIT(0), + IWL_MLD_SCAN_SCHED = BIT(1), + IWL_MLD_SCAN_NETDETECT = BIT(2), + IWL_MLD_SCAN_INT_MLO = BIT(3), +}; + +/* enum iwl_mld_pass_all_sched_results_states - Defines the states for + * handling/passing scheduled scan results to mac80211 + * @SCHED_SCAN_PASS_ALL_STATE_DISABLED: Don't pass all scan results, only when + * a match found. + * @SCHED_SCAN_PASS_ALL_STATE_ENABLED: Pass all scan results is enabled + * (no filtering). + * @SCHED_SCAN_PASS_ALL_STATE_FOUND: A scan result is found, pass it on the + * next scan iteration complete notification. + */ +enum iwl_mld_pass_all_sched_results_states { + SCHED_SCAN_PASS_ALL_STATE_DISABLED, + SCHED_SCAN_PASS_ALL_STATE_ENABLED, + SCHED_SCAN_PASS_ALL_STATE_FOUND, +}; + +/** + * enum iwl_mld_traffic_load - Levels of traffic load + * + * @IWL_MLD_TRAFFIC_LOW: low traffic load + * @IWL_MLD_TRAFFIC_MEDIUM: medium traffic load + * @IWL_MLD_TRAFFIC_HIGH: high traffic load + */ +enum iwl_mld_traffic_load { + IWL_MLD_TRAFFIC_LOW, + IWL_MLD_TRAFFIC_MEDIUM, + IWL_MLD_TRAFFIC_HIGH, +}; + +/** + * struct iwl_mld_scan - Scan data + * @status: scan status, a combination of %enum iwl_mld_scan_status, + * reflects the %scan.uid_status array. + * @uid_status: array to track the scan status per uid. + * @start_tsf: start time of last scan in TSF of the link that requested + * the scan. + * @last_ebs_failed: true if the last EBS (Energy Based Scan) failed. + * @pass_all_sched_res: see %enum iwl_mld_pass_all_sched_results_states. + * @fw_link_id: the current (regular) scan fw link id, used by scan + * complete notif. + * @traffic_load: traffic load related data + * @traffic_load.last_stats_ts_usec: The timestamp of the last statistics + * notification, used to calculate the elapsed time between two + * notifications and determine the traffic load + * @traffic_load.status: The current traffic load status, see + * &enum iwl_mld_traffic_load + * @cmd_size: size of %cmd. + * @cmd: pointer to scan cmd buffer (allocated once in op mode start). + * @last_6ghz_passive_jiffies: stores the last 6GHz passive scan time + * in jiffies. + * @last_start_time_jiffies: stores the last start time in jiffies + * (interface up/reset/resume). + * @last_mlo_scan_time: start time of the last MLO scan in nanoseconds since + * boot. + */ +struct iwl_mld_scan { + /* Add here fields that need clean up on restart */ + struct_group(zeroed_on_hw_restart, + unsigned int status; + u32 uid_status[IWL_MAX_UMAC_SCANS]; + u64 start_tsf; + bool last_ebs_failed; + enum iwl_mld_pass_all_sched_results_states pass_all_sched_res; + u8 fw_link_id; + struct { + u32 last_stats_ts_usec; + enum iwl_mld_traffic_load status; + } traffic_load; + ); + /* And here fields that survive a fw restart */ + size_t cmd_size; + void *cmd; + unsigned long last_6ghz_passive_jiffies; + unsigned long last_start_time_jiffies; + unsigned long last_mlo_scan_time; +}; + +#endif /* __iwl_mld_scan_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/session-protect.c b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.c new file mode 100644 index 0000000000000..dbb5615dc3f64 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024 Intel Corporation + */ +#include "session-protect.h" +#include "fw/api/time-event.h" +#include "fw/api/context.h" +#include "iface.h" +#include + +void iwl_mld_handle_session_prot_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_session_prot_notif *notif = (void *)pkt->data; + int fw_link_id = le32_to_cpu(notif->mac_link_id); + struct ieee80211_bss_conf *link_conf = + iwl_mld_fw_id_to_link_conf(mld, fw_link_id); + struct ieee80211_vif *vif; + struct iwl_mld_vif *mld_vif; + struct iwl_mld_session_protect *session_protect; + + if (WARN_ON(!link_conf)) + return; + + vif = link_conf->vif; + mld_vif = iwl_mld_vif_from_mac80211(vif); + session_protect = &mld_vif->session_protect; + + if (!le32_to_cpu(notif->status)) { + memset(session_protect, 0, sizeof(*session_protect)); + } else if (le32_to_cpu(notif->start)) { + /* End_jiffies indicates an active session */ + session_protect->session_requested = false; + session_protect->end_jiffies = + TU_TO_EXP_TIME(session_protect->duration); + /* !session_protect->end_jiffies means inactive session */ + if (!session_protect->end_jiffies) + session_protect->end_jiffies = 1; + } else { + memset(session_protect, 0, sizeof(*session_protect)); + } +} + +static int _iwl_mld_schedule_session_protection(struct iwl_mld *mld, + struct ieee80211_vif *vif, + u32 duration, u32 min_duration, + int link_id) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *link = + iwl_mld_link_dereference_check(mld_vif, link_id); + struct iwl_mld_session_protect *session_protect = + &mld_vif->session_protect; + struct iwl_session_prot_cmd cmd = { + .id_and_color = cpu_to_le32(link->fw_id), + .action = cpu_to_le32(FW_CTXT_ACTION_ADD), + .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), + .duration_tu = cpu_to_le32(MSEC_TO_TU(duration)), + }; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + WARN(hweight16(vif->active_links) > 1, + "Session protection isn't allowed with more than one active link"); + + if (session_protect->end_jiffies && + time_after(session_protect->end_jiffies, + TU_TO_EXP_TIME(min_duration))) { + IWL_DEBUG_TE(mld, "We have ample in the current session: %u\n", + jiffies_to_msecs(session_protect->end_jiffies - + jiffies)); + return -EALREADY; + } + + IWL_DEBUG_TE(mld, "Add a new session protection, duration %d TU\n", + le32_to_cpu(cmd.duration_tu)); + + ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, + SESSION_PROTECTION_CMD), &cmd); + + if (ret) + return ret; + + /* end_jiffies will be updated when handling session_prot_notif */ + session_protect->end_jiffies = 0; + session_protect->duration = duration; + session_protect->session_requested = true; + + return 0; +} + +void iwl_mld_schedule_session_protection(struct iwl_mld *mld, + struct ieee80211_vif *vif, + u32 duration, u32 min_duration, + int link_id) +{ + int ret; + + ret = _iwl_mld_schedule_session_protection(mld, vif, duration, + min_duration, link_id); + if (ret && ret != -EALREADY) + IWL_ERR(mld, + "Couldn't send the SESSION_PROTECTION_CMD (%d)\n", + ret); +} + +struct iwl_mld_session_start_data { + struct iwl_mld *mld; + struct ieee80211_bss_conf *link_conf; + bool success; +}; + +static bool iwl_mld_session_start_fn(struct iwl_notif_wait_data *notif_wait, + struct iwl_rx_packet *pkt, void *_data) +{ + struct iwl_session_prot_notif *notif = (void *)pkt->data; + unsigned int pkt_len = iwl_rx_packet_payload_len(pkt); + struct iwl_mld_session_start_data *data = _data; + struct ieee80211_bss_conf *link_conf; + struct iwl_mld *mld = data->mld; + int fw_link_id; + + if (IWL_FW_CHECK(mld, pkt_len < sizeof(*notif), + "short session prot notif (%d)\n", + pkt_len)) + return false; + + fw_link_id = le32_to_cpu(notif->mac_link_id); + link_conf = iwl_mld_fw_id_to_link_conf(mld, fw_link_id); + + if (link_conf != data->link_conf) + return false; + + if (!le32_to_cpu(notif->status)) + return true; + + if (notif->start) { + data->success = true; + return true; + } + + return false; +} + +int iwl_mld_start_session_protection(struct iwl_mld *mld, + struct ieee80211_vif *vif, + u32 duration, u32 min_duration, + int link_id, unsigned long timeout) +{ + static const u16 start_notif[] = { SESSION_PROTECTION_NOTIF }; + struct iwl_notification_wait start_wait; + struct iwl_mld_session_start_data data = { + .mld = mld, + .link_conf = wiphy_dereference(mld->wiphy, + vif->link_conf[link_id]), + }; + int ret; + + if (WARN_ON(!data.link_conf)) + return -EINVAL; + + iwl_init_notification_wait(&mld->notif_wait, &start_wait, + start_notif, ARRAY_SIZE(start_notif), + iwl_mld_session_start_fn, &data); + + ret = _iwl_mld_schedule_session_protection(mld, vif, duration, + min_duration, link_id); + + if (ret) { + iwl_remove_notification(&mld->notif_wait, &start_wait); + return ret == -EALREADY ? 0 : ret; + } + + ret = iwl_wait_notification(&mld->notif_wait, &start_wait, timeout); + if (ret) + return ret; + return data.success ? 0 : -EIO; +} + +int iwl_mld_cancel_session_protection(struct iwl_mld *mld, + struct ieee80211_vif *vif, + int link_id) +{ + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + struct iwl_mld_link *link = + iwl_mld_link_dereference_check(mld_vif, link_id); + struct iwl_mld_session_protect *session_protect = + &mld_vif->session_protect; + struct iwl_session_prot_cmd cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .conf_id = cpu_to_le32(SESSION_PROTECT_CONF_ASSOC), + }; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + /* If there isn't an active session or a requested one for this + * link do nothing + */ + if (!session_protect->session_requested && + !session_protect->end_jiffies) + return 0; + + if (WARN_ON(!link)) + return -EINVAL; + + cmd.id_and_color = cpu_to_le32(link->fw_id); + + ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(MAC_CONF_GROUP, + SESSION_PROTECTION_CMD), &cmd); + if (ret) { + IWL_ERR(mld, + "Couldn't send the SESSION_PROTECTION_CMD\n"); + return ret; + } + + memset(session_protect, 0, sizeof(*session_protect)); + + return 0; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/session-protect.h b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.h new file mode 100644 index 0000000000000..642bec8451a16 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/session-protect.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#ifndef __session_protect_h__ +#define __session_protect_h__ + +#include "mld.h" +#include "hcmd.h" +#include +#include "fw/api/mac-cfg.h" + +/** + * DOC: session protection + * + * Session protection is an API from the firmware that allows the driver to + * request time on medium. This is needed before the association when we need + * to be on medium for the association frame exchange. Once we configure the + * firmware as 'associated', the firmware will allocate time on medium without + * needed a session protection. + * + * TDLS discover uses this API as well even after association to ensure that + * other activities internal to the firmware will not interrupt our presence + * on medium. + */ + +/** + * struct iwl_mld_session_protect - session protection parameters + * @end_jiffies: expected end_jiffies of current session protection. + * 0 if not active + * @duration: the duration in tu of current session + * @session_requested: A session protection command was sent and wasn't yet + * answered + */ +struct iwl_mld_session_protect { + unsigned long end_jiffies; + u32 duration; + bool session_requested; +}; + +#define IWL_MLD_SESSION_PROTECTION_ASSOC_TIME_MS 900 +#define IWL_MLD_SESSION_PROTECTION_MIN_TIME_MS 400 + +/** + * iwl_mld_handle_session_prot_notif - handles %SESSION_PROTECTION_NOTIF + * @mld: the mld component + * @pkt: the RX packet containing the notification + */ +void iwl_mld_handle_session_prot_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +/** + * iwl_mld_schedule_session_protection - schedule a session protection + * @mld: the mld component + * @vif: the virtual interface for which the protection issued + * @duration: the requested duration of the protection + * @min_duration: the minimum duration of the protection + * @link_id: The link to schedule a session protection for + */ +void iwl_mld_schedule_session_protection(struct iwl_mld *mld, + struct ieee80211_vif *vif, + u32 duration, u32 min_duration, + int link_id); + +/** + * iwl_mld_start_session_protection - start a session protection + * @mld: the mld component + * @vif: the virtual interface for which the protection issued + * @duration: the requested duration of the protection + * @min_duration: the minimum duration of the protection + * @link_id: The link to schedule a session protection for + * @timeout: timeout for waiting + * + * This schedules the session protection, and waits for it to start + * (with timeout) + * + * Returns: 0 if successful, error code otherwise + */ +int iwl_mld_start_session_protection(struct iwl_mld *mld, + struct ieee80211_vif *vif, + u32 duration, u32 min_duration, + int link_id, unsigned long timeout); + +/** + * iwl_mld_cancel_session_protection - cancel the session protection. + * @mld: the mld component + * @vif: the virtual interface for which the session is issued + * @link_id: cancel the session protection for given link + * + * This functions cancels the session protection which is an act of good + * citizenship. If it is not needed any more it should be canceled because + * the other mac contexts wait for the medium during that time. + * + * Returns: 0 if successful, error code otherwise + * + */ +int iwl_mld_cancel_session_protection(struct iwl_mld *mld, + struct ieee80211_vif *vif, + int link_id); + +#endif /* __session_protect_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.c b/drivers/net/wireless/intel/iwlwifi/mld/sta.c new file mode 100644 index 0000000000000..332a7aecec2d3 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.c @@ -0,0 +1,1289 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include +#include + +#include "sta.h" +#include "hcmd.h" +#include "iface.h" +#include "mlo.h" +#include "key.h" +#include "agg.h" +#include "tlc.h" +#include "fw/api/sta.h" +#include "fw/api/mac.h" +#include "fw/api/rx.h" + +int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld, + struct ieee80211_link_sta *link_sta) +{ + struct iwl_mld_link_sta *mld_link_sta; + + /* This function should only be used with the wiphy lock held, + * In other cases, it is not guaranteed that the link_sta will exist + * in the driver too, and it is checked here. + */ + lockdep_assert_wiphy(mld->wiphy); + + /* This is not meant to be called with a NULL pointer */ + if (WARN_ON(!link_sta)) + return -ENOENT; + + mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta); + if (!mld_link_sta) { + WARN_ON(!iwl_mld_error_before_recovery(mld)); + return -ENOENT; + } + + return mld_link_sta->fw_id; +} + +static void +iwl_mld_fill_ampdu_size_and_dens(struct ieee80211_link_sta *link_sta, + struct ieee80211_bss_conf *link, + __le32 *tx_ampdu_max_size, + __le32 *tx_ampdu_spacing) +{ + u32 agg_size = 0, mpdu_dens = 0; + + if (WARN_ON(!link_sta || !link)) + return; + + /* Note that we always use only legacy & highest supported PPDUs, so + * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating + * the maximum A-MPDU size of various PPDU types in different bands, + * we only need to worry about the highest supported PPDU type here. + */ + + if (link_sta->ht_cap.ht_supported) { + agg_size = link_sta->ht_cap.ampdu_factor; + mpdu_dens = link_sta->ht_cap.ampdu_density; + } + + if (link->chanreq.oper.chan->band == NL80211_BAND_6GHZ) { + /* overwrite HT values on 6 GHz */ + mpdu_dens = + le16_get_bits(link_sta->he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); + agg_size = + le16_get_bits(link_sta->he_6ghz_capa.capa, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + } else if (link_sta->vht_cap.vht_supported) { + /* if VHT supported overwrite HT value */ + agg_size = + u32_get_bits(link_sta->vht_cap.cap, + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); + } + + /* D6.0 10.12.2 A-MPDU length limit rules + * A STA indicates the maximum length of the A-MPDU preEOF padding + * that it can receive in an HE PPDU in the Maximum A-MPDU Length + * Exponent field in its HT Capabilities, VHT Capabilities, + * and HE 6 GHz Band Capabilities elements (if present) and the + * Maximum AMPDU Length Exponent Extension field in its HE + * Capabilities element + */ + if (link_sta->he_cap.has_he) + agg_size += + u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3], + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); + + if (link_sta->eht_cap.has_eht) + agg_size += + u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1], + IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK); + + /* Limit to max A-MPDU supported by FW */ + agg_size = min_t(u32, agg_size, + STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT); + + *tx_ampdu_max_size = cpu_to_le32(agg_size); + *tx_ampdu_spacing = cpu_to_le32(mpdu_dens); +} + +static u8 iwl_mld_get_uapsd_acs(struct ieee80211_sta *sta) +{ + u8 uapsd_acs = 0; + + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + uapsd_acs |= BIT(AC_BK); + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + uapsd_acs |= BIT(AC_BE); + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + uapsd_acs |= BIT(AC_VI); + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + uapsd_acs |= BIT(AC_VO); + + return uapsd_acs | uapsd_acs << 4; +} + +static u8 iwl_mld_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) +{ + u8 byte_num = ppe_pos_bit / 8; + u8 bit_num = ppe_pos_bit % 8; + u8 residue_bits; + u8 res; + + if (bit_num <= 5) + return (ppe[byte_num] >> bit_num) & + (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); + + /* If bit_num > 5, we have to combine bits with next byte. + * Calculate how many bits we need to take from current byte (called + * here "residue_bits"), and add them to bits from next byte. + */ + + residue_bits = 8 - bit_num; + + res = (ppe[byte_num + 1] & + (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << + residue_bits; + res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); + + return res; +} + +static void iwl_mld_parse_ppe(struct iwl_mld *mld, + struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, + u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit, + bool inheritance) +{ + /* FW currently supports only nss == MAX_HE_SUPP_NSS + * + * If nss > MAX: we can ignore values we don't support + * If nss < MAX: we can set zeros in other streams + */ + if (nss > MAX_HE_SUPP_NSS) { + IWL_DEBUG_INFO(mld, "Got NSS = %d - trimming to %d\n", nss, + MAX_HE_SUPP_NSS); + nss = MAX_HE_SUPP_NSS; + } + + for (int i = 0; i < nss; i++) { + u8 ru_index_tmp = ru_index_bitmap << 1; + u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE; + + for (u8 bw = 0; + bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + ru_index_tmp >>= 1; + + /* According to the 11be spec, if for a specific BW the PPE Thresholds + * isn't present - it should inherit the thresholds from the last + * BW for which we had PPE Thresholds. In 11ax though, we don't have + * this inheritance - continue in this case + */ + if (!(ru_index_tmp & 1)) { + if (inheritance) + goto set_thresholds; + else + continue; + } + + high_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit); + ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; + low_th = iwl_mld_he_get_ppe_val(ppe, ppe_pos_bit); + ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; + +set_thresholds: + pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; + pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; + } + } +} + +static void iwl_mld_set_pkt_ext_from_he_ppe(struct iwl_mld *mld, + struct ieee80211_link_sta *link_sta, + struct iwl_he_pkt_ext_v2 *pkt_ext, + bool inheritance) +{ + u8 nss = (link_sta->he_cap.ppe_thres[0] & + IEEE80211_PPE_THRES_NSS_MASK) + 1; + u8 *ppe = &link_sta->he_cap.ppe_thres[0]; + u8 ru_index_bitmap = + u8_get_bits(*ppe, + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); + /* Starting after PPE header */ + u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; + + iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit, + inheritance); +} + +static int +iwl_mld_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, + u8 nominal_padding) +{ + int low_th = -1; + int high_th = -1; + + /* all the macros are the same for EHT and HE */ + switch (nominal_padding) { + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US: + low_th = IWL_HE_PKT_EXT_NONE; + high_th = IWL_HE_PKT_EXT_NONE; + break; + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US: + low_th = IWL_HE_PKT_EXT_BPSK; + high_th = IWL_HE_PKT_EXT_NONE; + break; + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US: + case IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US: + low_th = IWL_HE_PKT_EXT_NONE; + high_th = IWL_HE_PKT_EXT_BPSK; + break; + } + + if (low_th < 0 || high_th < 0) + return -EINVAL; + + /* Set the PPE thresholds accordingly */ + for (int i = 0; i < MAX_HE_SUPP_NSS; i++) { + for (u8 bw = 0; + bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; + pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; + } + } + + return 0; +} + +static void iwl_mld_get_optimal_ppe_info(struct iwl_he_pkt_ext_v2 *pkt_ext, + u8 nominal_padding) +{ + for (int i = 0; i < MAX_HE_SUPP_NSS; i++) { + for (u8 bw = 0; bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); + bw++) { + u8 *qam_th = &pkt_ext->pkt_ext_qam_th[i][bw][0]; + + if (nominal_padding > + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US && + qam_th[1] == IWL_HE_PKT_EXT_NONE) + qam_th[1] = IWL_HE_PKT_EXT_4096QAM; + else if (nominal_padding == + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US && + qam_th[0] == IWL_HE_PKT_EXT_NONE && + qam_th[1] == IWL_HE_PKT_EXT_NONE) + qam_th[0] = IWL_HE_PKT_EXT_4096QAM; + } + } +} + +static void iwl_mld_fill_pkt_ext(struct iwl_mld *mld, + struct ieee80211_link_sta *link_sta, + struct iwl_he_pkt_ext_v2 *pkt_ext) +{ + if (WARN_ON(!link_sta)) + return; + + /* Initialize the PPE thresholds to "None" (7), as described in Table + * 9-262ac of 80211.ax/D3.0. + */ + memset(pkt_ext, IWL_HE_PKT_EXT_NONE, sizeof(*pkt_ext)); + + if (link_sta->eht_cap.has_eht) { + u8 nominal_padding = + u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], + IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); + + /* If PPE Thresholds exists, parse them into a FW-familiar + * format. + */ + if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) { + u8 nss = (link_sta->eht_cap.eht_ppe_thres[0] & + IEEE80211_EHT_PPE_THRES_NSS_MASK) + 1; + u8 *ppe = &link_sta->eht_cap.eht_ppe_thres[0]; + u8 ru_index_bitmap = + u16_get_bits(*ppe, + IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); + /* Starting after PPE header */ + u8 ppe_pos_bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE; + + iwl_mld_parse_ppe(mld, pkt_ext, nss, ru_index_bitmap, + ppe, ppe_pos_bit, true); + /* EHT PPE Thresholds doesn't exist - set the API according to + * HE PPE Tresholds + */ + } else if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + /* Even though HE Capabilities IE doesn't contain PPE + * Thresholds for BW 320Mhz, thresholds for this BW will + * be filled in with the same values as 160Mhz, due to + * the inheritance, as required. + */ + iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext, + true); + + /* According to the requirements, for MCSs 12-13 the + * maximum value between HE PPE Threshold and Common + * Nominal Packet Padding needs to be taken + */ + iwl_mld_get_optimal_ppe_info(pkt_ext, nominal_padding); + + /* if PPE Thresholds doesn't present in both EHT IE and HE IE - + * take the Thresholds from Common Nominal Packet Padding field + */ + } else { + iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext, + nominal_padding); + } + } else if (link_sta->he_cap.has_he) { + /* If PPE Thresholds exist, parse them into a FW-familiar format. */ + if (link_sta->he_cap.he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + iwl_mld_set_pkt_ext_from_he_ppe(mld, link_sta, pkt_ext, + false); + /* PPE Thresholds doesn't exist - set the API PPE values + * according to Common Nominal Packet Padding field. + */ + } else { + u8 nominal_padding = + u8_get_bits(link_sta->he_cap.he_cap_elem.phy_cap_info[9], + IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); + if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) + iwl_mld_set_pkt_ext_from_nominal_padding(pkt_ext, + nominal_padding); + } + } + + for (int i = 0; i < MAX_HE_SUPP_NSS; i++) { + for (int bw = 0; + bw < ARRAY_SIZE(*pkt_ext->pkt_ext_qam_th[i]); + bw++) { + u8 *qam_th = + &pkt_ext->pkt_ext_qam_th[i][bw][0]; + + IWL_DEBUG_HT(mld, + "PPE table: nss[%d] bw[%d] PPET8 = %d, PPET16 = %d\n", + i, bw, qam_th[0], qam_th[1]); + } + } +} + +static u32 iwl_mld_get_htc_flags(struct ieee80211_link_sta *link_sta) +{ + u8 *mac_cap_info = + &link_sta->he_cap.he_cap_elem.mac_cap_info[0]; + u32 htc_flags = 0; + + if (mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) + htc_flags |= IWL_HE_HTC_SUPPORT; + if ((mac_cap_info[1] & IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || + (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { + u8 link_adap = + ((mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + + (mac_cap_info[1] & + IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); + + if (link_adap == 2) + htc_flags |= + IWL_HE_HTC_LINK_ADAP_UNSOLICITED; + else if (link_adap == 3) + htc_flags |= IWL_HE_HTC_LINK_ADAP_BOTH; + } + if (mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) + htc_flags |= IWL_HE_HTC_BSR_SUPP; + if (mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) + htc_flags |= IWL_HE_HTC_OMI_SUPP; + if (mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) + htc_flags |= IWL_HE_HTC_BQR_SUPP; + + return htc_flags; +} + +static int iwl_mld_send_sta_cmd(struct iwl_mld *mld, + const struct iwl_sta_cfg_cmd *cmd) +{ + int ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(MAC_CONF_GROUP, STA_CONFIG_CMD), + cmd); + if (ret) + IWL_ERR(mld, "STA_CONFIG_CMD send failed, ret=0x%x\n", ret); + return ret; +} + +static int +iwl_mld_add_modify_sta_cmd(struct iwl_mld *mld, + struct ieee80211_link_sta *link_sta) +{ + struct ieee80211_sta *sta = link_sta->sta; + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct ieee80211_bss_conf *link; + struct iwl_mld_link *mld_link; + struct iwl_sta_cfg_cmd cmd = {}; + int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta); + + lockdep_assert_wiphy(mld->wiphy); + + link = link_conf_dereference_protected(mld_sta->vif, + link_sta->link_id); + + mld_link = iwl_mld_link_from_mac80211(link); + + if (WARN_ON(!link || !mld_link) || fw_id < 0) + return -EINVAL; + + cmd.sta_id = cpu_to_le32(fw_id); + cmd.station_type = cpu_to_le32(mld_sta->sta_type); + cmd.link_id = cpu_to_le32(mld_link->fw_id); + + memcpy(&cmd.peer_mld_address, sta->addr, ETH_ALEN); + memcpy(&cmd.peer_link_address, link_sta->addr, ETH_ALEN); + + if (mld_sta->sta_state >= IEEE80211_STA_ASSOC) + cmd.assoc_id = cpu_to_le32(sta->aid); + + if (sta->mfp || mld_sta->sta_state < IEEE80211_STA_AUTHORIZED) + cmd.mfp = cpu_to_le32(1); + + switch (link_sta->rx_nss) { + case 1: + cmd.mimo = cpu_to_le32(0); + break; + case 2 ... 8: + cmd.mimo = cpu_to_le32(1); + break; + } + + switch (link_sta->smps_mode) { + case IEEE80211_SMPS_AUTOMATIC: + case IEEE80211_SMPS_NUM_MODES: + WARN_ON(1); + break; + case IEEE80211_SMPS_STATIC: + /* override NSS */ + cmd.mimo = cpu_to_le32(0); + break; + case IEEE80211_SMPS_DYNAMIC: + cmd.mimo_protection = cpu_to_le32(1); + break; + case IEEE80211_SMPS_OFF: + /* nothing */ + break; + } + + iwl_mld_fill_ampdu_size_and_dens(link_sta, link, + &cmd.tx_ampdu_max_size, + &cmd.tx_ampdu_spacing); + + if (sta->wme) { + cmd.sp_length = + cpu_to_le32(sta->max_sp ? sta->max_sp * 2 : 128); + cmd.uapsd_acs = cpu_to_le32(iwl_mld_get_uapsd_acs(sta)); + } + + if (link_sta->he_cap.has_he) { + cmd.trig_rnd_alloc = + cpu_to_le32(link->uora_exists ? 1 : 0); + + /* PPE Thresholds */ + iwl_mld_fill_pkt_ext(mld, link_sta, &cmd.pkt_ext); + + /* HTC flags */ + cmd.htc_flags = + cpu_to_le32(iwl_mld_get_htc_flags(link_sta)); + + if (link_sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_ACK_EN) + cmd.ack_enabled = cpu_to_le32(1); + } + + return iwl_mld_send_sta_cmd(mld, &cmd); +} + +IWL_MLD_ALLOC_FN(link_sta, link_sta) + +static int +iwl_mld_add_link_sta(struct iwl_mld *mld, struct ieee80211_link_sta *link_sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); + struct iwl_mld_link_sta *mld_link_sta; + int ret; + u8 fw_id; + + lockdep_assert_wiphy(mld->wiphy); + + /* We will fail to add it to the FW anyway */ + if (iwl_mld_error_before_recovery(mld)) + return -ENODEV; + + mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta); + + /* We need to preserve the fw sta ids during a restart, since the fw + * will recover SN/PN for them, this is why the mld_link_sta exists. + */ + if (mld_link_sta) { + /* But if we are not restarting, this is not OK */ + WARN_ON(!mld->fw_status.in_hw_restart); + + /* Avoid adding a STA that is already in FW to avoid an assert */ + if (WARN_ON(mld_link_sta->in_fw)) + return -EINVAL; + + fw_id = mld_link_sta->fw_id; + goto add_to_fw; + } + + /* Allocate a fw id and map it to the link_sta */ + ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta); + if (ret) + return ret; + + if (link_sta == &link_sta->sta->deflink) { + mld_link_sta = &mld_sta->deflink; + } else { + mld_link_sta = kzalloc(sizeof(*mld_link_sta), GFP_KERNEL); + if (!mld_link_sta) + return -ENOMEM; + } + + mld_link_sta->fw_id = fw_id; + rcu_assign_pointer(mld_sta->link[link_sta->link_id], mld_link_sta); + +add_to_fw: + ret = iwl_mld_add_modify_sta_cmd(mld, link_sta); + if (ret) { + RCU_INIT_POINTER(mld->fw_id_to_link_sta[fw_id], NULL); + RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL); + if (link_sta != &link_sta->sta->deflink) + kfree(mld_link_sta); + return ret; + } + mld_link_sta->in_fw = true; + + return 0; +} + +static int iwl_mld_rm_sta_from_fw(struct iwl_mld *mld, u8 fw_sta_id) +{ + struct iwl_remove_sta_cmd cmd = { + .sta_id = cpu_to_le32(fw_sta_id), + }; + int ret; + + ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(MAC_CONF_GROUP, STA_REMOVE_CMD), + &cmd); + if (ret) + IWL_ERR(mld, "Failed to remove station. Id=%d\n", fw_sta_id); + + return ret; +} + +static void +iwl_mld_remove_link_sta(struct iwl_mld *mld, + struct ieee80211_link_sta *link_sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); + struct iwl_mld_link_sta *mld_link_sta = + iwl_mld_link_sta_from_mac80211(link_sta); + + if (WARN_ON(!mld_link_sta)) + return; + + iwl_mld_rm_sta_from_fw(mld, mld_link_sta->fw_id); + mld_link_sta->in_fw = false; + + /* Now that the STA doesn't exist in FW, we don't expect any new + * notifications for it. Cancel the ones that are already pending + */ + iwl_mld_cancel_notifications_of_object(mld, IWL_MLD_OBJECT_TYPE_STA, + mld_link_sta->fw_id); + + /* This will not be done upon reconfig, so do it also when + * failed to remove from fw + */ + RCU_INIT_POINTER(mld->fw_id_to_link_sta[mld_link_sta->fw_id], NULL); + RCU_INIT_POINTER(mld_sta->link[link_sta->link_id], NULL); + if (mld_link_sta != &mld_sta->deflink) + kfree_rcu(mld_link_sta, rcu_head); +} + +static void iwl_mld_set_max_amsdu_len(struct iwl_mld *mld, + struct ieee80211_link_sta *link_sta) +{ + const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + + /* For EHT, HE and VHT we can use the value as it was calculated by + * mac80211. For HT, mac80211 doesn't enforce to 4095, so force it + * here + */ + if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he || + link_sta->vht_cap.vht_supported || + !ht_cap->ht_supported || + !(ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU)) + return; + + link_sta->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_BA; + ieee80211_sta_recalc_aggregates(link_sta->sta); +} + +int iwl_mld_update_all_link_stations(struct iwl_mld *mld, + struct ieee80211_sta *sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + int link_id; + + for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) { + int ret = iwl_mld_add_modify_sta_cmd(mld, link_sta); + + if (ret) + return ret; + + if (mld_sta->sta_state == IEEE80211_STA_ASSOC) + iwl_mld_set_max_amsdu_len(mld, link_sta); + } + return 0; +} + +static void iwl_mld_destroy_sta(struct ieee80211_sta *sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + + kfree(mld_sta->dup_data); + kfree(mld_sta->mpdu_counters); +} + +static int +iwl_mld_alloc_dup_data(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta) +{ + struct iwl_mld_rxq_dup_data *dup_data; + + if (mld->fw_status.in_hw_restart) + return 0; + + dup_data = kcalloc(mld->trans->num_rx_queues, sizeof(*dup_data), + GFP_KERNEL); + if (!dup_data) + return -ENOMEM; + + /* Initialize all the last_seq values to 0xffff which can never + * compare equal to the frame's seq_ctrl in the check in + * iwl_mld_is_dup() since the lower 4 bits are the fragment + * number and fragmented packets don't reach that function. + * + * This thus allows receiving a packet with seqno 0 and the + * retry bit set as the very first packet on a new TID. + */ + for (int q = 0; q < mld->trans->num_rx_queues; q++) + memset(dup_data[q].last_seq, 0xff, + sizeof(dup_data[q].last_seq)); + mld_sta->dup_data = dup_data; + + return 0; +} + +static void iwl_mld_alloc_mpdu_counters(struct iwl_mld *mld, + struct ieee80211_sta *sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct ieee80211_vif *vif = mld_sta->vif; + + if (mld->fw_status.in_hw_restart) + return; + + /* MPDUs are counted only when EMLSR is possible */ + if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION || + sta->tdls || !ieee80211_vif_is_mld(vif)) + return; + + mld_sta->mpdu_counters = kcalloc(mld->trans->num_rx_queues, + sizeof(*mld_sta->mpdu_counters), + GFP_KERNEL); + if (!mld_sta->mpdu_counters) + return; + + for (int q = 0; q < mld->trans->num_rx_queues; q++) + spin_lock_init(&mld_sta->mpdu_counters[q].lock); +} + +static int +iwl_mld_init_sta(struct iwl_mld *mld, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, enum iwl_fw_sta_type type) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + + mld_sta->vif = vif; + mld_sta->sta_type = type; + mld_sta->mld = mld; + + if (!mld->fw_status.in_hw_restart) + for (int i = 0; i < ARRAY_SIZE(sta->txq); i++) + iwl_mld_init_txq(iwl_mld_txq_from_mac80211(sta->txq[i])); + + iwl_mld_alloc_mpdu_counters(mld, sta); + + iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant); + + return iwl_mld_alloc_dup_data(mld, mld_sta); +} + +int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, enum iwl_fw_sta_type type) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + int link_id; + int ret; + + ret = iwl_mld_init_sta(mld, sta, vif, type); + if (ret) + return ret; + + /* We could have add only the deflink link_sta, but it will not work + * in the restart case if the single link that is active during + * reconfig is not the deflink one. + */ + for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) { + ret = iwl_mld_add_link_sta(mld, link_sta); + if (ret) + goto destroy_sta; + } + + return 0; + +destroy_sta: + iwl_mld_destroy_sta(sta); + + return ret; +} + +void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct ieee80211_link_sta *link_sta; + int link_id; + + for_each_sta_active_link(mld_sta->vif, sta, link_sta, link_id) { + int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta); + + if (fw_sta_id < 0) + continue; + + iwl_mld_flush_link_sta_txqs(mld, fw_sta_id); + } +} + +void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld, struct ieee80211_sta *sta) +{ + /* Avoid a warning in iwl_trans_wait_txq_empty if are anyway on the way + * to a restart. + */ + if (iwl_mld_error_before_recovery(mld)) + return; + + for (int i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct iwl_mld_txq *mld_txq = + iwl_mld_txq_from_mac80211(sta->txq[i]); + + if (!mld_txq->status.allocated) + continue; + + iwl_trans_wait_txq_empty(mld->trans, mld_txq->fw_id); + } +} + +void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct ieee80211_vif *vif = mld_sta->vif; + struct ieee80211_link_sta *link_sta; + u8 link_id; + + lockdep_assert_wiphy(mld->wiphy); + + /* Tell the HW to flush the queues */ + iwl_mld_flush_sta_txqs(mld, sta); + + /* Wait for trans to empty its queues */ + iwl_mld_wait_sta_txqs_empty(mld, sta); + + /* Now we can remove the queues */ + for (int i = 0; i < ARRAY_SIZE(sta->txq); i++) + iwl_mld_remove_txq(mld, sta->txq[i]); + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + /* Mac8011 will remove the groupwise keys after the sta is + * removed, but FW expects all the keys to be removed before + * the STA is, so remove them all here. + */ + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + iwl_mld_remove_ap_keys(mld, vif, sta, link_id); + + /* Remove the link_sta */ + iwl_mld_remove_link_sta(mld, link_sta); + } + + iwl_mld_destroy_sta(sta); +} + +u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta) +{ + struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + u32 result = 0; + + KUNIT_STATIC_STUB_REDIRECT(iwl_mld_fw_sta_id_mask, mld, sta); + + /* This function should only be used with the wiphy lock held, + * In other cases, it is not guaranteed that the link_sta will exist + * in the driver too, and it is checked in + * iwl_mld_fw_sta_id_from_link_sta. + */ + lockdep_assert_wiphy(mld->wiphy); + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + int fw_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta); + + if (!(fw_id < 0)) + result |= BIT(fw_id); + } + + return result; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_fw_sta_id_mask); + +static void iwl_mld_count_mpdu(struct ieee80211_link_sta *link_sta, int queue, + u32 count, bool tx) +{ + struct iwl_mld_per_q_mpdu_counter *queue_counter; + struct iwl_mld_per_link_mpdu_counter *link_counter; + struct iwl_mld_vif *mld_vif; + struct iwl_mld_sta *mld_sta; + struct iwl_mld_link *mld_link; + struct iwl_mld *mld; + int total_mpdus = 0; + + if (WARN_ON(!link_sta)) + return; + + mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); + if (!mld_sta->mpdu_counters) + return; + + mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif); + mld_link = iwl_mld_link_dereference_check(mld_vif, link_sta->link_id); + + if (WARN_ON_ONCE(!mld_link)) + return; + + queue_counter = &mld_sta->mpdu_counters[queue]; + + mld = mld_vif->mld; + + /* If it the window is over, first clear the counters. + * When we are not blocked by TPT, the window is managed by check_tpt_wk + */ + if ((mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT) && + time_is_before_jiffies(queue_counter->window_start_time + + IWL_MLD_TPT_COUNT_WINDOW)) { + memset(queue_counter->per_link, 0, + sizeof(queue_counter->per_link)); + queue_counter->window_start_time = jiffies; + + IWL_DEBUG_INFO(mld, "MPDU counters are cleared\n"); + } + + link_counter = &queue_counter->per_link[mld_link->fw_id]; + + spin_lock_bh(&queue_counter->lock); + + /* Update the statistics for this TPT measurement window */ + if (tx) + link_counter->tx += count; + else + link_counter->rx += count; + + /* + * Next, evaluate whether we should queue an unblock, + * skip this if we are not blocked due to low throughput. + */ + if (!(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT)) + goto unlock; + + for (int i = 0; i <= IWL_FW_MAX_LINK_ID; i++) + total_mpdus += tx ? queue_counter->per_link[i].tx : + queue_counter->per_link[i].rx; + + /* Unblock is already queued if the threshold was reached before */ + if (total_mpdus - count >= IWL_MLD_ENTER_EMLSR_TPT_THRESH) + goto unlock; + + if (total_mpdus >= IWL_MLD_ENTER_EMLSR_TPT_THRESH) + wiphy_work_queue(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk); + +unlock: + spin_unlock_bh(&queue_counter->lock); +} + +/* must be called under rcu_read_lock() */ +void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue, + u32 count) +{ + iwl_mld_count_mpdu(link_sta, queue, count, false); +} + +/* must be called under rcu_read_lock() */ +void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count) +{ + /* use queue 0 for all TX */ + iwl_mld_count_mpdu(link_sta, 0, count, true); +} + +static int iwl_mld_allocate_internal_txq(struct iwl_mld *mld, + struct iwl_mld_int_sta *internal_sta, + u8 tid) +{ + u32 sta_mask = BIT(internal_sta->sta_id); + int queue, size; + + size = max_t(u32, IWL_MGMT_QUEUE_SIZE, + mld->trans->cfg->min_txq_size); + + queue = iwl_trans_txq_alloc(mld->trans, 0, sta_mask, tid, size, + IWL_WATCHDOG_DISABLED); + + if (queue >= 0) + IWL_DEBUG_TX_QUEUES(mld, + "Enabling TXQ #%d for sta mask 0x%x tid %d\n", + queue, sta_mask, tid); + return queue; +} + +static int iwl_mld_send_aux_sta_cmd(struct iwl_mld *mld, + const struct iwl_mld_int_sta *internal_sta) +{ + struct iwl_aux_sta_cmd cmd = { + .sta_id = cpu_to_le32(internal_sta->sta_id), + /* TODO: CDB - properly set the lmac_id */ + .lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX), + }; + + return iwl_mld_send_cmd_pdu(mld, WIDE_ID(MAC_CONF_GROUP, AUX_STA_CMD), + &cmd); +} + +static int +iwl_mld_add_internal_sta_to_fw(struct iwl_mld *mld, + const struct iwl_mld_int_sta *internal_sta, + u8 fw_link_id, + const u8 *addr) +{ + struct iwl_sta_cfg_cmd cmd = {}; + + if (internal_sta->sta_type == STATION_TYPE_AUX) + return iwl_mld_send_aux_sta_cmd(mld, internal_sta); + + cmd.sta_id = cpu_to_le32((u8)internal_sta->sta_id); + cmd.link_id = cpu_to_le32(fw_link_id); + cmd.station_type = cpu_to_le32(internal_sta->sta_type); + + /* FW doesn't allow to add a IGTK/BIGTK if the sta isn't marked as MFP. + * On the other hand, FW will never check this flag during RX since + * an AP/GO doesn't receive protected broadcast management frames. + * So, we can set it unconditionally. + */ + if (internal_sta->sta_type == STATION_TYPE_BCAST_MGMT) + cmd.mfp = cpu_to_le32(1); + + if (addr) { + memcpy(cmd.peer_mld_address, addr, ETH_ALEN); + memcpy(cmd.peer_link_address, addr, ETH_ALEN); + } + + return iwl_mld_send_sta_cmd(mld, &cmd); +} + +static int iwl_mld_add_internal_sta(struct iwl_mld *mld, + struct iwl_mld_int_sta *internal_sta, + enum iwl_fw_sta_type sta_type, + u8 fw_link_id, const u8 *addr, u8 tid) +{ + int ret, queue_id; + + ret = iwl_mld_allocate_link_sta_fw_id(mld, + &internal_sta->sta_id, + ERR_PTR(-EINVAL)); + if (ret) + return ret; + + internal_sta->sta_type = sta_type; + + ret = iwl_mld_add_internal_sta_to_fw(mld, internal_sta, fw_link_id, + addr); + if (ret) + goto err; + + queue_id = iwl_mld_allocate_internal_txq(mld, internal_sta, tid); + if (queue_id < 0) { + iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id); + ret = queue_id; + goto err; + } + + internal_sta->queue_id = queue_id; + + return 0; +err: + iwl_mld_free_internal_sta(mld, internal_sta); + return ret; +} + +int iwl_mld_add_bcast_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + const u8 bcast_addr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + const u8 *addr; + + if (WARN_ON(!mld_link)) + return -EINVAL; + + if (WARN_ON(vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC)) + return -EINVAL; + + addr = vif->type == NL80211_IFTYPE_ADHOC ? link->bssid : bcast_addr; + + return iwl_mld_add_internal_sta(mld, &mld_link->bcast_sta, + STATION_TYPE_BCAST_MGMT, + mld_link->fw_id, addr, + IWL_MGMT_TID); +} + +int iwl_mld_add_mcast_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + const u8 mcast_addr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; + + if (WARN_ON(!mld_link)) + return -EINVAL; + + if (WARN_ON(vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC)) + return -EINVAL; + + return iwl_mld_add_internal_sta(mld, &mld_link->mcast_sta, + STATION_TYPE_MCAST, + mld_link->fw_id, mcast_addr, 0); +} + +int iwl_mld_add_aux_sta(struct iwl_mld *mld, + struct iwl_mld_int_sta *internal_sta) +{ + return iwl_mld_add_internal_sta(mld, internal_sta, STATION_TYPE_AUX, + 0, NULL, IWL_MAX_TID_COUNT); +} + +static void iwl_mld_remove_internal_sta(struct iwl_mld *mld, + struct iwl_mld_int_sta *internal_sta, + bool flush, u8 tid) +{ + if (WARN_ON_ONCE(internal_sta->sta_id == IWL_INVALID_STA || + internal_sta->queue_id == IWL_MLD_INVALID_QUEUE)) + return; + + if (flush) + iwl_mld_flush_link_sta_txqs(mld, internal_sta->sta_id); + + iwl_mld_free_txq(mld, BIT(internal_sta->sta_id), + tid, internal_sta->queue_id); + + iwl_mld_rm_sta_from_fw(mld, internal_sta->sta_id); + + iwl_mld_free_internal_sta(mld, internal_sta); +} + +void iwl_mld_remove_bcast_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + + if (WARN_ON(!mld_link)) + return; + + if (WARN_ON(vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC)) + return; + + iwl_mld_remove_internal_sta(mld, &mld_link->bcast_sta, true, + IWL_MGMT_TID); +} + +void iwl_mld_remove_mcast_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + + if (WARN_ON(!mld_link)) + return; + + if (WARN_ON(vif->type != NL80211_IFTYPE_AP && + vif->type != NL80211_IFTYPE_ADHOC)) + return; + + iwl_mld_remove_internal_sta(mld, &mld_link->mcast_sta, true, 0); +} + +void iwl_mld_remove_aux_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link) +{ + struct iwl_mld_link *mld_link = iwl_mld_link_from_mac80211(link); + + if (WARN_ON(!mld_link)) + return; + + /* TODO: Hotspot 2.0 */ + if (WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE)) + return; + + iwl_mld_remove_internal_sta(mld, &mld_link->aux_sta, false, + IWL_MAX_TID_COUNT); +} + +static int iwl_mld_update_sta_resources(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 old_sta_mask, + u32 new_sta_mask) +{ + int ret; + + ret = iwl_mld_update_sta_txqs(mld, sta, old_sta_mask, new_sta_mask); + if (ret) + return ret; + + ret = iwl_mld_update_sta_keys(mld, vif, sta, old_sta_mask, new_sta_mask); + if (ret) + return ret; + + return iwl_mld_update_sta_baids(mld, old_sta_mask, new_sta_mask); +} + +int iwl_mld_update_link_stas(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct iwl_mld_link_sta *mld_link_sta; + unsigned long links_to_add = ~old_links & new_links; + unsigned long links_to_rem = old_links & ~new_links; + unsigned long old_links_long = old_links; + unsigned long sta_mask_added = 0; + u32 current_sta_mask = 0, sta_mask_to_rem = 0; + unsigned int link_id, sta_id; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + for_each_set_bit(link_id, &old_links_long, + IEEE80211_MLD_MAX_NUM_LINKS) { + mld_link_sta = + iwl_mld_link_sta_dereference_check(mld_sta, link_id); + + if (WARN_ON(!mld_link_sta)) + return -EINVAL; + + current_sta_mask |= BIT(mld_link_sta->fw_id); + if (links_to_rem & BIT(link_id)) + sta_mask_to_rem |= BIT(mld_link_sta->fw_id); + } + + if (sta_mask_to_rem) { + ret = iwl_mld_update_sta_resources(mld, vif, sta, + current_sta_mask, + current_sta_mask & + ~sta_mask_to_rem); + if (ret) + return ret; + + current_sta_mask &= ~sta_mask_to_rem; + } + + for_each_set_bit(link_id, &links_to_rem, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_link_sta *link_sta = + link_sta_dereference_protected(sta, link_id); + + if (WARN_ON(!link_sta)) + return -EINVAL; + + iwl_mld_remove_link_sta(mld, link_sta); + } + + for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_link_sta *link_sta = + link_sta_dereference_protected(sta, link_id); + struct ieee80211_bss_conf *link; + + if (WARN_ON(!link_sta)) + return -EINVAL; + + ret = iwl_mld_add_link_sta(mld, link_sta); + if (ret) + goto remove_added_link_stas; + + mld_link_sta = + iwl_mld_link_sta_dereference_check(mld_sta, + link_id); + + link = link_conf_dereference_protected(mld_sta->vif, + link_sta->link_id); + + iwl_mld_set_max_amsdu_len(mld, link_sta); + iwl_mld_config_tlc_link(mld, vif, link, link_sta); + + sta_mask_added |= BIT(mld_link_sta->fw_id); + } + + if (sta_mask_added) { + ret = iwl_mld_update_sta_resources(mld, vif, sta, + current_sta_mask, + current_sta_mask | + sta_mask_added); + if (ret) + goto remove_added_link_stas; + } + + /* We couldn't activate the links before it has a STA. Now we can */ + for_each_set_bit(link_id, &links_to_add, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link = + link_conf_dereference_protected(mld_sta->vif, link_id); + + if (WARN_ON(!link)) + continue; + + iwl_mld_activate_link(mld, link); + } + + return 0; + +remove_added_link_stas: + for_each_set_bit(sta_id, &sta_mask_added, mld->fw->ucode_capa.num_stations) { + struct ieee80211_link_sta *link_sta = + wiphy_dereference(mld->wiphy, + mld->fw_id_to_link_sta[sta_id]); + + if (WARN_ON(!link_sta)) + continue; + + iwl_mld_remove_link_sta(mld, link_sta); + } + + return ret; +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/sta.h b/drivers/net/wireless/intel/iwlwifi/mld/sta.h new file mode 100644 index 0000000000000..ddcffd7b9fde1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/sta.h @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#ifndef __iwl_mld_sta_h__ +#define __iwl_mld_sta_h__ + +#include + +#include "mld.h" +#include "tx.h" + +/** + * struct iwl_mld_rxq_dup_data - Duplication detection data, per STA & Rx queue + * @last_seq: last sequence per tid. + * @last_sub_frame_idx: the index of the last subframe in an A-MSDU. This value + * will be zero if the packet is not part of an A-MSDU. + */ +struct iwl_mld_rxq_dup_data { + __le16 last_seq[IWL_MAX_TID_COUNT + 1]; + u8 last_sub_frame_idx[IWL_MAX_TID_COUNT + 1]; +} ____cacheline_aligned_in_smp; + +/** + * struct iwl_mld_link_sta - link-level station + * + * This represents the link-level sta - the driver level equivalent to the + * ieee80211_link_sta + * + * @last_rate_n_flags: rate_n_flags from the last &iwl_tlc_update_notif + * @signal_avg: the signal average coming from the firmware + * @in_fw: whether the link STA is uploaded to the FW (false during restart) + * @rcu_head: RCU head for freeing this object + * @fw_id: the FW id of this link sta. + */ +struct iwl_mld_link_sta { + /* Add here fields that need clean up on restart */ + struct_group(zeroed_on_hw_restart, + u32 last_rate_n_flags; + bool in_fw; + s8 signal_avg; + ); + /* And here fields that survive a fw restart */ + struct rcu_head rcu_head; + u32 fw_id; +}; + +#define iwl_mld_link_sta_dereference_check(mld_sta, link_id) \ + rcu_dereference_check((mld_sta)->link[link_id], \ + lockdep_is_held(&mld_sta->mld->wiphy->mtx)) + +#define for_each_mld_link_sta(mld_sta, link_sta, link_id) \ + for (link_id = 0; link_id < ARRAY_SIZE((mld_sta)->link); \ + link_id++) \ + if ((link_sta = \ + iwl_mld_link_sta_dereference_check(mld_sta, link_id))) + +#define IWL_NUM_DEFAULT_KEYS 4 + +/* struct iwl_mld_ptk_pn - Holds Packet Number (PN) per TID. + * @rcu_head: RCU head for freeing this data. + * @pn: Array storing PN for each TID. + */ +struct iwl_mld_ptk_pn { + struct rcu_head rcu_head; + struct { + u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN]; + } ____cacheline_aligned_in_smp q[]; +}; + +/** + * struct iwl_mld_per_link_mpdu_counter - per-link TX/RX MPDU counters + * + * @tx: Number of TX MPDUs. + * @rx: Number of RX MPDUs. + */ +struct iwl_mld_per_link_mpdu_counter { + u32 tx; + u32 rx; +}; + +/** + * struct iwl_mld_per_q_mpdu_counter - per-queue MPDU counter + * + * @lock: Needed to protect the counters when modified from statistics. + * @per_link: per-link counters. + * @window_start_time: timestamp of the counting-window start + */ +struct iwl_mld_per_q_mpdu_counter { + spinlock_t lock; + struct iwl_mld_per_link_mpdu_counter per_link[IWL_FW_MAX_LINK_ID + 1]; + unsigned long window_start_time; +} ____cacheline_aligned_in_smp; + +/** + * struct iwl_mld_sta - representation of a station in the driver. + * + * This represent the MLD-level sta, and will not be added to the FW. + * Embedded in ieee80211_sta. + * + * @vif: pointer the vif object. + * @sta_state: station state according to enum %ieee80211_sta_state + * @sta_type: type of this station. See &enum iwl_fw_sta_type + * @mld: a pointer to the iwl_mld object + * @dup_data: per queue duplicate packet detection data + * @data_tx_ant: stores the last TX antenna index; used for setting + * TX rate_n_flags for injected data frames (toggles on every TX failure). + * @tid_to_baid: a simple map of TID to Block-Ack fw id + * @deflink: This holds the default link STA information, for non MLO STA all + * link specific STA information is accessed through @deflink or through + * link[0] which points to address of @deflink. For MLO Link STA + * the first added link STA will point to deflink. + * @link: reference to Link Sta entries. For Non MLO STA, except 1st link, + * i.e link[0] all links would be assigned to NULL by default and + * would access link information via @deflink or link[0]. For MLO + * STA, first link STA being added will point its link pointer to + * @deflink address and remaining would be allocated and the address + * would be assigned to link[link_id] where link_id is the id assigned + * by the AP. + * @ptk_pn: Array of pointers to PTK PN data, used to track the Packet Number + * per key index and per queue (TID). + * @mpdu_counters: RX/TX MPDUs counters for each queue. + */ +struct iwl_mld_sta { + /* Add here fields that need clean up on restart */ + struct_group(zeroed_on_hw_restart, + enum ieee80211_sta_state sta_state; + enum iwl_fw_sta_type sta_type; + ); + /* And here fields that survive a fw restart */ + struct iwl_mld *mld; + struct ieee80211_vif *vif; + struct iwl_mld_rxq_dup_data *dup_data; + u8 tid_to_baid[IWL_MAX_TID_COUNT]; + u8 data_tx_ant; + + struct iwl_mld_link_sta deflink; + struct iwl_mld_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + struct iwl_mld_ptk_pn __rcu *ptk_pn[IWL_NUM_DEFAULT_KEYS]; + struct iwl_mld_per_q_mpdu_counter *mpdu_counters; +}; + +static inline struct iwl_mld_sta * +iwl_mld_sta_from_mac80211(struct ieee80211_sta *sta) +{ + return (void *)sta->drv_priv; +} + +static inline void +iwl_mld_cleanup_sta(void *data, struct ieee80211_sta *sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct iwl_mld_link_sta *mld_link_sta; + u8 link_id; + + for (int i = 0; i < ARRAY_SIZE(sta->txq); i++) + CLEANUP_STRUCT(iwl_mld_txq_from_mac80211(sta->txq[i])); + + for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) { + CLEANUP_STRUCT(mld_link_sta); + + if (!ieee80211_vif_is_mld(mld_sta->vif)) { + /* not an MLD STA; only has the deflink with ID zero */ + WARN_ON(link_id); + continue; + } + + if (mld_sta->vif->active_links & BIT(link_id)) + continue; + + /* Should not happen as link removal should always succeed */ + WARN_ON(1); + RCU_INIT_POINTER(mld_sta->link[link_id], NULL); + RCU_INIT_POINTER(mld_sta->mld->fw_id_to_link_sta[mld_link_sta->fw_id], + NULL); + if (mld_link_sta != &mld_sta->deflink) + kfree_rcu(mld_link_sta, rcu_head); + } + + CLEANUP_STRUCT(mld_sta); +} + +static inline struct iwl_mld_link_sta * +iwl_mld_link_sta_from_mac80211(struct ieee80211_link_sta *link_sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); + + return iwl_mld_link_sta_dereference_check(mld_sta, link_sta->link_id); +} + +int iwl_mld_add_sta(struct iwl_mld *mld, struct ieee80211_sta *sta, + struct ieee80211_vif *vif, enum iwl_fw_sta_type type); +void iwl_mld_remove_sta(struct iwl_mld *mld, struct ieee80211_sta *sta); +int iwl_mld_fw_sta_id_from_link_sta(struct iwl_mld *mld, + struct ieee80211_link_sta *link_sta); +u32 iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta); +int iwl_mld_update_all_link_stations(struct iwl_mld *mld, + struct ieee80211_sta *sta); +void iwl_mld_flush_sta_txqs(struct iwl_mld *mld, struct ieee80211_sta *sta); +void iwl_mld_wait_sta_txqs_empty(struct iwl_mld *mld, + struct ieee80211_sta *sta); +void iwl_mld_count_mpdu_rx(struct ieee80211_link_sta *link_sta, int queue, + u32 count); +void iwl_mld_count_mpdu_tx(struct ieee80211_link_sta *link_sta, u32 count); + +/** + * struct iwl_mld_int_sta - representation of an internal station + * (a station that exist in FW and in driver, but not in mac80211) + * + * @sta_id: the index of the station in the fw + * @queue_id: the if of the queue used by the station + * @sta_type: station type. One of &iwl_fw_sta_type + */ +struct iwl_mld_int_sta { + u8 sta_id; + u32 queue_id; + enum iwl_fw_sta_type sta_type; +}; + +static inline void +iwl_mld_init_internal_sta(struct iwl_mld_int_sta *internal_sta) +{ + internal_sta->sta_id = IWL_INVALID_STA; + internal_sta->queue_id = IWL_MLD_INVALID_QUEUE; +} + +static inline void +iwl_mld_free_internal_sta(struct iwl_mld *mld, + struct iwl_mld_int_sta *internal_sta) +{ + if (WARN_ON(internal_sta->sta_id == IWL_INVALID_STA)) + return; + + RCU_INIT_POINTER(mld->fw_id_to_link_sta[internal_sta->sta_id], NULL); + iwl_mld_init_internal_sta(internal_sta); +} + +int iwl_mld_add_bcast_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); + +int iwl_mld_add_mcast_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); + +int iwl_mld_add_aux_sta(struct iwl_mld *mld, + struct iwl_mld_int_sta *internal_sta); + +void iwl_mld_remove_bcast_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); + +void iwl_mld_remove_mcast_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); + +void iwl_mld_remove_aux_sta(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link); + +int iwl_mld_update_link_stas(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links); +#endif /* __iwl_mld_sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.c b/drivers/net/wireless/intel/iwlwifi/mld/stats.c new file mode 100644 index 0000000000000..0715bbc310319 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.c @@ -0,0 +1,513 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include "mld.h" +#include "stats.h" +#include "sta.h" +#include "mlo.h" +#include "hcmd.h" +#include "iface.h" +#include "scan.h" +#include "phy.h" +#include "fw/api/stats.h" + +static int iwl_mld_send_fw_stats_cmd(struct iwl_mld *mld, u32 cfg_mask, + u32 cfg_time, u32 type_mask) +{ + u32 cmd_id = WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD); + struct iwl_system_statistics_cmd stats_cmd = { + .cfg_mask = cpu_to_le32(cfg_mask), + .config_time_sec = cpu_to_le32(cfg_time), + .type_id_mask = cpu_to_le32(type_mask), + }; + + return iwl_mld_send_cmd_pdu(mld, cmd_id, &stats_cmd); +} + +int iwl_mld_clear_stats_in_fw(struct iwl_mld *mld) +{ + u32 cfg_mask = IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK; + u32 type_mask = IWL_STATS_NTFY_TYPE_ID_OPER | + IWL_STATS_NTFY_TYPE_ID_OPER_PART1; + + return iwl_mld_send_fw_stats_cmd(mld, cfg_mask, 0, type_mask); +} + +static void +iwl_mld_fill_stats_from_oper_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt, + u8 fw_sta_id, struct station_info *sinfo) +{ + const struct iwl_system_statistics_notif_oper *notif = + (void *)&pkt->data; + const struct iwl_stats_ntfy_per_sta *per_sta = + ¬if->per_sta[fw_sta_id]; + struct ieee80211_link_sta *link_sta; + struct iwl_mld_link_sta *mld_link_sta; + + /* 0 isn't a valid value, but FW might send 0. + * In that case, set the latest non-zero value we stored + */ + rcu_read_lock(); + + link_sta = rcu_dereference(mld->fw_id_to_link_sta[fw_sta_id]); + if (IS_ERR_OR_NULL(link_sta)) + goto unlock; + + mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta); + if (WARN_ON(!mld_link_sta)) + goto unlock; + + if (per_sta->average_energy) + mld_link_sta->signal_avg = + -(s8)le32_to_cpu(per_sta->average_energy); + + sinfo->signal_avg = mld_link_sta->signal_avg; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); + +unlock: + rcu_read_unlock(); +} + +struct iwl_mld_stats_data { + u8 fw_sta_id; + struct station_info *sinfo; + struct iwl_mld *mld; +}; + +static bool iwl_mld_wait_stats_handler(struct iwl_notif_wait_data *notif_data, + struct iwl_rx_packet *pkt, void *data) +{ + struct iwl_mld_stats_data *stats_data = data; + u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); + + switch (cmd) { + case WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_NOTIF): + iwl_mld_fill_stats_from_oper_notif(stats_data->mld, pkt, + stats_data->fw_sta_id, + stats_data->sinfo); + break; + case WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF): + break; + case WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF): + return true; + } + + return false; +} + +static int +iwl_mld_fw_stats_to_mac80211(struct iwl_mld *mld, struct iwl_mld_sta *mld_sta, + struct station_info *sinfo) +{ + u32 cfg_mask = IWL_STATS_CFG_FLG_ON_DEMAND_NTFY_MSK | + IWL_STATS_CFG_FLG_RESET_MSK; + u32 type_mask = IWL_STATS_NTFY_TYPE_ID_OPER | + IWL_STATS_NTFY_TYPE_ID_OPER_PART1; + static const u16 notifications[] = { + WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_NOTIF), + WIDE_ID(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF), + WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_END_NOTIF), + }; + struct iwl_mld_stats_data wait_stats_data = { + /* We don't support drv_sta_statistics in EMLSR */ + .fw_sta_id = mld_sta->deflink.fw_id, + .sinfo = sinfo, + .mld = mld, + }; + struct iwl_notification_wait stats_wait; + int ret; + + iwl_init_notification_wait(&mld->notif_wait, &stats_wait, + notifications, ARRAY_SIZE(notifications), + iwl_mld_wait_stats_handler, + &wait_stats_data); + + ret = iwl_mld_send_fw_stats_cmd(mld, cfg_mask, 0, type_mask); + if (ret) { + iwl_remove_notification(&mld->notif_wait, &stats_wait); + return ret; + } + + /* Wait 500ms for OPERATIONAL, PART1, and END notifications, + * which should be sufficient for the firmware to gather data + * from all LMACs and send notifications to the host. + */ + ret = iwl_wait_notification(&mld->notif_wait, &stats_wait, HZ / 2); + if (ret) + return ret; + + /* When periodic statistics are sent, FW will clear its statistics DB. + * If the statistics request here happens shortly afterwards, + * the response will contain data collected over a short time + * interval. The response we got here shouldn't be processed by + * the general statistics processing because it's incomplete. + * So, we delete it from the list so it won't be processed. + */ + iwl_mld_delete_handlers(mld, notifications, ARRAY_SIZE(notifications)); + + return 0; +} + +#define PERIODIC_STATS_SECONDS 5 + +int iwl_mld_request_periodic_fw_stats(struct iwl_mld *mld, bool enable) +{ + u32 cfg_mask = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK; + u32 type_mask = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER | + IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0; + u32 cfg_time = enable ? PERIODIC_STATS_SECONDS : 0; + + return iwl_mld_send_fw_stats_cmd(mld, cfg_mask, cfg_time, type_mask); +} + +static void iwl_mld_sta_stats_fill_txrate(struct iwl_mld_sta *mld_sta, + struct station_info *sinfo) +{ + struct rate_info *rinfo = &sinfo->txrate; + u32 rate_n_flags = mld_sta->deflink.last_rate_n_flags; + u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + u32 gi_ltf; + + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + rinfo->bw = RATE_INFO_BW_20; + break; + case RATE_MCS_CHAN_WIDTH_40: + rinfo->bw = RATE_INFO_BW_40; + break; + case RATE_MCS_CHAN_WIDTH_80: + rinfo->bw = RATE_INFO_BW_80; + break; + case RATE_MCS_CHAN_WIDTH_160: + rinfo->bw = RATE_INFO_BW_160; + break; + case RATE_MCS_CHAN_WIDTH_320: + rinfo->bw = RATE_INFO_BW_320; + break; + } + + if (format == RATE_MCS_CCK_MSK || format == RATE_MCS_LEGACY_OFDM_MSK) { + int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); + + /* add the offset needed to get to the legacy ofdm indices */ + if (format == RATE_MCS_LEGACY_OFDM_MSK) + rate += IWL_FIRST_OFDM_RATE; + + switch (rate) { + case IWL_RATE_1M_INDEX: + rinfo->legacy = 10; + break; + case IWL_RATE_2M_INDEX: + rinfo->legacy = 20; + break; + case IWL_RATE_5M_INDEX: + rinfo->legacy = 55; + break; + case IWL_RATE_11M_INDEX: + rinfo->legacy = 110; + break; + case IWL_RATE_6M_INDEX: + rinfo->legacy = 60; + break; + case IWL_RATE_9M_INDEX: + rinfo->legacy = 90; + break; + case IWL_RATE_12M_INDEX: + rinfo->legacy = 120; + break; + case IWL_RATE_18M_INDEX: + rinfo->legacy = 180; + break; + case IWL_RATE_24M_INDEX: + rinfo->legacy = 240; + break; + case IWL_RATE_36M_INDEX: + rinfo->legacy = 360; + break; + case IWL_RATE_48M_INDEX: + rinfo->legacy = 480; + break; + case IWL_RATE_54M_INDEX: + rinfo->legacy = 540; + } + return; + } + + rinfo->nss = u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1; + + if (format == RATE_MCS_HT_MSK) + rinfo->mcs = RATE_HT_MCS_INDEX(rate_n_flags); + else + rinfo->mcs = u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); + + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + + switch (format) { + case RATE_MCS_EHT_MSK: + rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS; + break; + case RATE_MCS_HE_MSK: + gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK); + + rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; + + if (rate_n_flags & RATE_MCS_HE_106T_MSK) { + rinfo->bw = RATE_INFO_BW_HE_RU; + rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; + } + + switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { + case RATE_MCS_HE_TYPE_SU: + case RATE_MCS_HE_TYPE_EXT_SU: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (gi_ltf == 2) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else if (gi_ltf == 3) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + break; + case RATE_MCS_HE_TYPE_MU: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (gi_ltf == 2) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + case RATE_MCS_HE_TYPE_TRIG: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + } + + if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) + rinfo->he_dcm = 1; + break; + case RATE_MCS_HT_MSK: + rinfo->flags |= RATE_INFO_FLAGS_MCS; + break; + case RATE_MCS_VHT_MSK: + rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; + break; + } +} + +void iwl_mld_mac80211_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + + /* This API is not EMLSR ready, so we cannot provide complete + * information if EMLSR is active + */ + if (hweight16(vif->active_links) > 1) + return; + + if (iwl_mld_fw_stats_to_mac80211(mld_sta->mld, mld_sta, sinfo)) + return; + + iwl_mld_sta_stats_fill_txrate(mld_sta, sinfo); + + /* TODO: NL80211_STA_INFO_BEACON_RX */ + + /* TODO: NL80211_STA_INFO_BEACON_SIGNAL_AVG */ +} + +#define IWL_MLD_TRAFFIC_LOAD_MEDIUM_THRESH 10 /* percentage */ +#define IWL_MLD_TRAFFIC_LOAD_HIGH_THRESH 50 /* percentage */ +#define IWL_MLD_TRAFFIC_LOAD_MIN_WINDOW_USEC (500 * 1000) + +static u8 iwl_mld_stats_load_percentage(u32 last_ts_usec, u32 curr_ts_usec, + u32 total_airtime_usec) +{ + u32 elapsed_usec = curr_ts_usec - last_ts_usec; + + if (elapsed_usec < IWL_MLD_TRAFFIC_LOAD_MIN_WINDOW_USEC) + return 0; + + return (100 * total_airtime_usec / elapsed_usec); +} + +static void iwl_mld_stats_recalc_traffic_load(struct iwl_mld *mld, + u32 total_airtime_usec, + u32 curr_ts_usec) +{ + u32 last_ts_usec = mld->scan.traffic_load.last_stats_ts_usec; + u8 load_prec; + + /* Skip the calculation as this is the first notification received */ + if (!last_ts_usec) + goto out; + + load_prec = iwl_mld_stats_load_percentage(last_ts_usec, curr_ts_usec, + total_airtime_usec); + + if (load_prec > IWL_MLD_TRAFFIC_LOAD_HIGH_THRESH) + mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_HIGH; + else if (load_prec > IWL_MLD_TRAFFIC_LOAD_MEDIUM_THRESH) + mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_MEDIUM; + else + mld->scan.traffic_load.status = IWL_MLD_TRAFFIC_LOW; + +out: + mld->scan.traffic_load.last_stats_ts_usec = curr_ts_usec; +} + +static void iwl_mld_update_link_sig(struct ieee80211_vif *vif, int sig, + struct ieee80211_bss_conf *bss_conf) +{ + struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld; + int exit_emlsr_thresh; + + if (sig == 0) { + IWL_DEBUG_RX(mld, "RSSI is 0 - skip signal based decision\n"); + return; + } + + /* TODO: task=statistics handle CQM notifications */ + + if (sig < IWL_MLD_LOW_RSSI_MLO_SCAN_THRESH) + iwl_mld_int_mlo_scan(mld, vif); + + if (!iwl_mld_emlsr_active(vif)) + return; + + /* We are in EMLSR, check if we need to exit */ + exit_emlsr_thresh = + iwl_mld_get_emlsr_rssi_thresh(mld, &bss_conf->chanreq.oper, + true); + + if (sig < exit_emlsr_thresh) + iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LOW_RSSI, + iwl_mld_get_other_link(vif, + bss_conf->link_id)); +} + +static void +iwl_mld_process_per_link_stats(struct iwl_mld *mld, + const struct iwl_stats_ntfy_per_link *per_link, + u32 curr_ts_usec) +{ + u32 total_airtime_usec = 0; + + for (u32 fw_id = 0; + fw_id < ARRAY_SIZE(mld->fw_id_to_bss_conf); + fw_id++) { + const struct iwl_stats_ntfy_per_link *link_stats; + struct ieee80211_bss_conf *bss_conf; + int sig; + + bss_conf = wiphy_dereference(mld->wiphy, + mld->fw_id_to_bss_conf[fw_id]); + if (!bss_conf || bss_conf->vif->type != NL80211_IFTYPE_STATION) + continue; + + link_stats = &per_link[fw_id]; + + total_airtime_usec += le32_to_cpu(link_stats->air_time); + + sig = -le32_to_cpu(link_stats->beacon_filter_average_energy); + iwl_mld_update_link_sig(bss_conf->vif, sig, bss_conf); + + /* TODO: parse more fields here (task=statistics)*/ + } + + iwl_mld_stats_recalc_traffic_load(mld, total_airtime_usec, + curr_ts_usec); +} + +static void +iwl_mld_process_per_sta_stats(struct iwl_mld *mld, + const struct iwl_stats_ntfy_per_sta *per_sta) +{ + for (int i = 0; i < mld->fw->ucode_capa.num_stations; i++) { + struct ieee80211_link_sta *link_sta = + wiphy_dereference(mld->wiphy, + mld->fw_id_to_link_sta[i]); + struct iwl_mld_link_sta *mld_link_sta; + s8 avg_energy = + -(s8)le32_to_cpu(per_sta[i].average_energy); + + if (IS_ERR_OR_NULL(link_sta) || !avg_energy) + continue; + + mld_link_sta = iwl_mld_link_sta_from_mac80211(link_sta); + if (WARN_ON(!mld_link_sta)) + continue; + + mld_link_sta->signal_avg = avg_energy; + } +} + +static void iwl_mld_fill_chanctx_stats(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *ctx, + void *data) +{ + struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx); + const struct iwl_stats_ntfy_per_phy *per_phy = data; + u32 new_load, old_load; + + if (WARN_ON(phy->fw_id >= IWL_STATS_MAX_PHY_OPERATIONAL)) + return; + + phy->channel_load_by_us = + le32_to_cpu(per_phy[phy->fw_id].channel_load_by_us); + + old_load = phy->avg_channel_load_not_by_us; + new_load = le32_to_cpu(per_phy[phy->fw_id].channel_load_not_by_us); + if (IWL_FW_CHECK(phy->mld, new_load > 100, "Invalid channel load %u\n", + new_load)) + return; + + /* give a weight of 0.5 for the old value */ + phy->avg_channel_load_not_by_us = (new_load >> 1) + (old_load >> 1); + + iwl_mld_emlsr_check_chan_load(hw, phy, old_load); +} + +static void +iwl_mld_process_per_phy_stats(struct iwl_mld *mld, + const struct iwl_stats_ntfy_per_phy *per_phy) +{ + ieee80211_iter_chan_contexts_mtx(mld->hw, + iwl_mld_fill_chanctx_stats, + (void *)(uintptr_t)per_phy); + +} + +void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + const struct iwl_system_statistics_notif_oper *stats = + (void *)&pkt->data; + u32 curr_ts_usec = le32_to_cpu(stats->time_stamp); + + BUILD_BUG_ON(ARRAY_SIZE(stats->per_sta) != IWL_STATION_COUNT_MAX); + BUILD_BUG_ON(ARRAY_SIZE(stats->per_link) < + ARRAY_SIZE(mld->fw_id_to_bss_conf)); + + iwl_mld_process_per_link_stats(mld, stats->per_link, curr_ts_usec); + iwl_mld_process_per_sta_stats(mld, stats->per_sta); + iwl_mld_process_per_phy_stats(mld, stats->per_phy); + + iwl_mld_check_omi_bw_reduction(mld); +} + +void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + /* TODO */ +} + diff --git a/drivers/net/wireless/intel/iwlwifi/mld/stats.h b/drivers/net/wireless/intel/iwlwifi/mld/stats.h new file mode 100644 index 0000000000000..de434e66d5555 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/stats.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_stats_h__ +#define __iwl_mld_stats_h__ + +int iwl_mld_request_periodic_fw_stats(struct iwl_mld *mld, bool enable); + +void iwl_mld_mac80211_sta_statistics(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo); + +void iwl_mld_handle_stats_oper_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); +void iwl_mld_handle_stats_oper_part1_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +int iwl_mld_clear_stats_in_fw(struct iwl_mld *mld); + +#endif /* __iwl_mld_stats_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile new file mode 100644 index 0000000000000..36317feb923ba --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +iwlmld-tests-y += module.o hcmd.o utils.o link.o rx.o agg.o link-selection.o + +ccflags-y += -I$(src)/../ +obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmld-tests.o diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c new file mode 100644 index 0000000000000..1fd664be1a7c2 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c @@ -0,0 +1,663 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2024 Intel Corporation + */ +#include +#include +#include + +#include "utils.h" +#include "mld.h" +#include "sta.h" +#include "agg.h" +#include "rx.h" + +#define FC_QOS_DATA (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA) +#define BA_WINDOW_SIZE 64 +#define QUEUE 0 + +static const struct reorder_buffer_case { + const char *desc; + struct { + /* ieee80211_hdr fields */ + u16 fc; + u8 tid; + bool multicast; + /* iwl_rx_mpdu_desc fields */ + u16 nssn; + /* used also for setting hdr::seq_ctrl */ + u16 sn; + u8 baid; + bool amsdu; + bool last_subframe; + bool old_sn; + bool dup; + } rx_pkt; + struct { + bool valid; + u16 head_sn; + u8 baid; + u16 num_entries; + /* The test prepares the reorder buffer with fake skbs based + * on the sequence numbers provided in @entries array. + */ + struct { + u16 sn; + /* Set add_subframes > 0 to simulate an A-MSDU by + * queueing additional @add_subframes skbs in the + * appropriate reorder buffer entry (based on the @sn) + */ + u8 add_subframes; + } entries[BA_WINDOW_SIZE]; + } reorder_buf_state; + struct { + enum iwl_mld_reorder_result reorder_res; + u16 head_sn; + u16 num_stored; + u16 skb_release_order[BA_WINDOW_SIZE]; + u16 skb_release_order_count; + } expected; +} reorder_buffer_cases[] = { + { + .desc = "RX packet with invalid BAID", + .rx_pkt = { + .fc = FC_QOS_DATA, + .baid = IWL_RX_REORDER_DATA_INVALID_BAID, + }, + .reorder_buf_state = { + .valid = true, + }, + .expected = { + /* Invalid BAID should not be buffered. The frame is + * passed to the network stack immediately. + */ + .reorder_res = IWL_MLD_PASS_SKB, + .num_stored = 0, + }, + }, + { + .desc = "RX Multicast packet", + .rx_pkt = { + .fc = FC_QOS_DATA, + .multicast = true, + }, + .reorder_buf_state = { + .valid = true, + }, + .expected = { + /* Multicast packets are not buffered. The packet is + * passed to the network stack immediately. + */ + .reorder_res = IWL_MLD_PASS_SKB, + .num_stored = 0, + }, + }, + { + .desc = "RX non-QoS data", + .rx_pkt = { + .fc = IEEE80211_FTYPE_DATA, + }, + .reorder_buf_state = { + .valid = true, + }, + .expected = { + /* non-QoS data frames do not require reordering. + * The packet is passed to the network stack + * immediately. + */ + .reorder_res = IWL_MLD_PASS_SKB, + }, + }, + { + .desc = "RX old SN packet, reorder buffer is not yet valid", + .rx_pkt = { + .fc = FC_QOS_DATA, + .old_sn = true, + }, + .reorder_buf_state = { + .valid = false, + }, + .expected = { + /* The buffer is invalid and the RX packet has an old + * SN. The packet is passed to the network stack + * immediately. + */ + .reorder_res = IWL_MLD_PASS_SKB, + }, + }, + { + .desc = "RX old SN packet, reorder buffer valid", + .rx_pkt = { + .fc = FC_QOS_DATA, + .old_sn = true, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = 100, + }, + .expected = { + /* The buffer is valid and the RX packet has an old SN. + * The packet should be dropped. + */ + .reorder_res = IWL_MLD_DROP_SKB, + .num_stored = 0, + .head_sn = 100, + }, + }, + { + .desc = "RX duplicate packet", + .rx_pkt = { + .fc = FC_QOS_DATA, + .dup = true, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = 100, + }, + .expected = { + /* Duplicate packets should be dropped */ + .reorder_res = IWL_MLD_DROP_SKB, + .num_stored = 0, + .head_sn = 100, + }, + }, + { + .desc = "RX In-order packet, sn < nssn", + .rx_pkt = { + .fc = FC_QOS_DATA, + .sn = 100, + .nssn = 101, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = 100, + }, + .expected = { + /* 1. Reorder buffer is empty. + * 2. RX packet SN is in order and less than NSSN. + * Packet is released to the network stack immediately + * and buffer->head_sn is updated to NSSN. + */ + .reorder_res = IWL_MLD_PASS_SKB, + .num_stored = 0, + .head_sn = 101, + }, + }, + { + .desc = "RX In-order packet, sn == head_sn", + .rx_pkt = { + .fc = FC_QOS_DATA, + .sn = 101, + .nssn = 100, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = 101, + }, + .expected = { + /* 1. Reorder buffer is empty. + * 2. RX packet SN is equal to buffer->head_sn. + * Packet is released to the network stack immediately + * and buffer->head_sn is incremented. + */ + .reorder_res = IWL_MLD_PASS_SKB, + .num_stored = 0, + .head_sn = 102, + }, + }, + { + .desc = "RX In-order packet, IEEE80211_MAX_SN wrap around", + .rx_pkt = { + .fc = FC_QOS_DATA, + .sn = IEEE80211_MAX_SN, + .nssn = IEEE80211_MAX_SN - 1, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = IEEE80211_MAX_SN, + }, + .expected = { + /* 1. Reorder buffer is empty. + * 2. RX SN == buffer->head_sn == IEEE80211_MAX_SN + * Packet is released to the network stack immediately + * and buffer->head_sn is incremented correctly (wraps + * around to 0). + */ + .reorder_res = IWL_MLD_PASS_SKB, + .num_stored = 0, + .head_sn = 0, + }, + }, + { + .desc = "RX Out-of-order packet, pending packet in buffer", + .rx_pkt = { + .fc = FC_QOS_DATA, + .sn = 100, + .nssn = 102, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = 100, + .num_entries = 1, + .entries[0].sn = 101, + }, + .expected = { + /* 1. Reorder buffer contains one packet with SN=101. + * 2. RX packet SN = buffer->head_sn. + * Both packets are released (in order) to the network + * stack as there are no gaps. + */ + .reorder_res = IWL_MLD_BUFFERED_SKB, + .num_stored = 0, + .head_sn = 102, + .skb_release_order = {100, 101}, + .skb_release_order_count = 2, + }, + }, + { + .desc = "RX Out-of-order packet, pending packet in buffer (wrap around)", + .rx_pkt = { + .fc = FC_QOS_DATA, + .sn = 0, + .nssn = 1, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = IEEE80211_MAX_SN - 1, + .num_entries = 1, + .entries[0].sn = IEEE80211_MAX_SN, + }, + .expected = { + /* 1. Reorder buffer contains one packet with + * SN=IEEE80211_MAX_SN. + * 2. RX Packet SN = 0 (after wrap around) + * Both packets are released (in order) to the network + * stack as there are no gaps. + */ + .reorder_res = IWL_MLD_BUFFERED_SKB, + .num_stored = 0, + .head_sn = 1, + .skb_release_order = { 4095, 0 }, + .skb_release_order_count = 2, + }, + }, + { + .desc = "RX Out-of-order packet, filling 1/2 holes in buffer, release RX packet", + .rx_pkt = { + .fc = FC_QOS_DATA, + .sn = 100, + .nssn = 101, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = 100, + .num_entries = 1, + .entries[0].sn = 102, + }, + .expected = { + /* 1. Reorder buffer contains one packet with SN=102. + * 2. There are 2 holes at SN={100, 101}. + * Only the RX packet (SN=100) is released, there is + * still a hole at 101. + */ + .reorder_res = IWL_MLD_BUFFERED_SKB, + .num_stored = 1, + .head_sn = 101, + .skb_release_order = {100}, + .skb_release_order_count = 1, + }, + }, + { + .desc = "RX Out-of-order packet, filling 1/2 holes, release 2 packets", + .rx_pkt = { + .fc = FC_QOS_DATA, + .sn = 102, + .nssn = 103, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = 100, + .num_entries = 3, + .entries[0].sn = 101, + .entries[1].sn = 104, + .entries[2].sn = 105, + }, + .expected = { + /* 1. Reorder buffer contains three packets. + * 2. RX packet fills one of two holes (at SN=102). + * Two packets are released (until the next hole at + * SN=103). + */ + .reorder_res = IWL_MLD_BUFFERED_SKB, + .num_stored = 2, + .head_sn = 103, + .skb_release_order = {101, 102}, + .skb_release_order_count = 2, + }, + }, + { + .desc = "RX Out-of-order packet, filling 1/1 holes, no packets released", + .rx_pkt = { + .fc = FC_QOS_DATA, + .sn = 102, + .nssn = 100, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = 100, + .num_entries = 3, + .entries[0].sn = 101, + .entries[1].sn = 103, + .entries[2].sn = 104, + }, + .expected = { + /* 1. Reorder buffer contains three packets: + * SN={101, 103, 104}. + * 2. RX packet fills a hole (SN=102), but NSSN is + * smaller than buffered frames. + * No packets can be released yet and buffer->head_sn + * is not updated. + */ + .reorder_res = IWL_MLD_BUFFERED_SKB, + .num_stored = 4, + .head_sn = 100, + }, + }, + { + .desc = "RX In-order A-MSDU, last subframe", + .rx_pkt = { + .fc = FC_QOS_DATA, + .sn = 100, + .nssn = 101, + .amsdu = true, + .last_subframe = true, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = 100, + .num_entries = 1, + .entries[0] = { + .sn = 100, + .add_subframes = 1, + }, + }, + .expected = { + /* 1. Reorder buffer contains a 2-sub frames A-MSDU + * at SN=100. + * 2. RX packet is the last SN=100 A-MSDU subframe + * All packets are released in order (3 x SN=100). + */ + .reorder_res = IWL_MLD_BUFFERED_SKB, + .num_stored = 0, + .head_sn = 101, + .skb_release_order = {100, 100, 100}, + .skb_release_order_count = 3, + }, + }, + { + .desc = "RX In-order A-MSDU, not the last subframe", + .rx_pkt = { + .fc = FC_QOS_DATA, + .sn = 100, + .nssn = 101, + .amsdu = true, + .last_subframe = false, + }, + .reorder_buf_state = { + .valid = true, + .head_sn = 100, + .num_entries = 1, + .entries[0] = { + .sn = 100, + .add_subframes = 1, + }, + }, + .expected = { + /* 1. Reorder buffer contains a 2-sub frames A-MSDU + * at SN=100. + * 2. RX packet additional SN=100 A-MSDU subframe, + * but not the last one + * No packets are released and head_sn is not updated. + */ + .reorder_res = IWL_MLD_BUFFERED_SKB, + .num_stored = 3, + .head_sn = 100, + }, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(test_reorder_buffer, reorder_buffer_cases, desc); + +static struct sk_buff_head g_released_skbs; +static u16 g_num_released_skbs; + +/* Add released skbs from reorder buffer to a global list; This allows us + * to verify the correct release order of packets after they pass through the + * simulated reorder logic. + */ +static void +fake_iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld, + struct napi_struct *napi, + struct sk_buff *skb, int queue, + struct ieee80211_sta *sta) +{ + __skb_queue_tail(&g_released_skbs, skb); + g_num_released_skbs++; +} + +static u32 +fake_iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct iwl_mld_link_sta *mld_link_sta; + u8 link_id; + u32 sta_mask = 0; + + /* This is the expectation in the real function */ + lockdep_assert_wiphy(mld->wiphy); + + /* We can't use for_each_sta_active_link */ + for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) + sta_mask |= BIT(mld_link_sta->fw_id); + return sta_mask; +} + +static struct iwl_rx_mpdu_desc *setup_mpdu_desc(void) +{ + struct kunit *test = kunit_get_current_test(); + const struct reorder_buffer_case *param = + (const void *)(test->param_value); + struct iwl_rx_mpdu_desc *mpdu_desc; + + KUNIT_ALLOC_AND_ASSERT(test, mpdu_desc); + + mpdu_desc->reorder_data |= + cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_BAID_MASK, + param->rx_pkt.baid)); + mpdu_desc->reorder_data |= + cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_SN_MASK, + param->rx_pkt.sn)); + mpdu_desc->reorder_data |= + cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_NSSN_MASK, + param->rx_pkt.nssn)); + if (param->rx_pkt.old_sn) + mpdu_desc->reorder_data |= + cpu_to_le32(IWL_RX_MPDU_REORDER_BA_OLD_SN); + + if (param->rx_pkt.dup) + mpdu_desc->status |= cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE); + + if (param->rx_pkt.amsdu) { + mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU; + if (param->rx_pkt.last_subframe) + mpdu_desc->amsdu_info |= + IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; + } + + return mpdu_desc; +} + +static struct sk_buff *alloc_and_setup_skb(u16 fc, u16 seq_ctrl, u8 tid, + bool mcast) +{ + struct kunit *test = kunit_get_current_test(); + struct ieee80211_hdr hdr = { + .frame_control = cpu_to_le16(fc), + .seq_ctrl = cpu_to_le16(seq_ctrl), + }; + struct sk_buff *skb; + + skb = kunit_zalloc_skb(test, 128, GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, skb); + + if (ieee80211_is_data_qos(hdr.frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(&hdr); + + qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK; + } + + if (mcast) + hdr.addr1[0] = 0x1; + + skb_set_mac_header(skb, skb->len); + skb_put_data(skb, &hdr, ieee80211_hdrlen(hdr.frame_control)); + + return skb; +} + +static struct iwl_mld_reorder_buffer * +setup_reorder_buffer(struct iwl_mld_baid_data *baid_data) +{ + struct kunit *test = kunit_get_current_test(); + const struct reorder_buffer_case *param = + (const void *)(test->param_value); + struct iwl_mld_reorder_buffer *buffer = baid_data->reorder_buf; + struct iwl_mld_reorder_buf_entry *entries = baid_data->entries; + struct sk_buff *fake_skb; + + buffer->valid = param->reorder_buf_state.valid; + buffer->head_sn = param->reorder_buf_state.head_sn; + buffer->queue = QUEUE; + + for (int i = 0; i < baid_data->buf_size; i++) + __skb_queue_head_init(&entries[i].frames); + + for (int i = 0; i < param->reorder_buf_state.num_entries; i++) { + u16 sn = param->reorder_buf_state.entries[i].sn; + int index = sn % baid_data->buf_size; + u8 add_subframes = + param->reorder_buf_state.entries[i].add_subframes; + /* create 1 skb per entry + additional skbs per num of + * requested subframes + */ + u8 num_skbs = 1 + add_subframes; + + for (int j = 0; j < num_skbs; j++) { + fake_skb = alloc_and_setup_skb(FC_QOS_DATA, sn, 0, + false); + __skb_queue_tail(&entries[index].frames, fake_skb); + buffer->num_stored++; + } + } + + return buffer; +} + +static struct iwl_mld_reorder_buffer *setup_ba_data(struct ieee80211_sta *sta) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld *mld = test->priv; + const struct reorder_buffer_case *param = + (const void *)(test->param_value); + struct iwl_mld_baid_data *baid_data = NULL; + struct iwl_mld_reorder_buffer *buffer; + u32 reorder_buf_size = BA_WINDOW_SIZE * sizeof(baid_data->entries[0]); + u8 baid = param->reorder_buf_state.baid; + + /* Assuming only 1 RXQ */ + KUNIT_ALLOC_AND_ASSERT_SIZE(test, baid_data, + sizeof(*baid_data) + reorder_buf_size); + + baid_data->baid = baid; + baid_data->tid = param->rx_pkt.tid; + baid_data->buf_size = BA_WINDOW_SIZE; + + wiphy_lock(mld->wiphy); + baid_data->sta_mask = iwl_mld_fw_sta_id_mask(mld, sta); + wiphy_unlock(mld->wiphy); + + baid_data->entries_per_queue = BA_WINDOW_SIZE; + + buffer = setup_reorder_buffer(baid_data); + + KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld->fw_id_to_ba[baid])); + rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data); + + return buffer; +} + +static void test_reorder_buffer(struct kunit *test) +{ + struct iwl_mld *mld = test->priv; + const struct reorder_buffer_case *param = + (const void *)(test->param_value); + struct iwl_rx_mpdu_desc *mpdu_desc; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + struct sk_buff *skb; + struct iwl_mld_reorder_buffer *buffer; + enum iwl_mld_reorder_result reorder_res; + u16 skb_release_order_count = param->expected.skb_release_order_count; + u16 skb_idx = 0; + + /* Init globals and activate stubs */ + __skb_queue_head_init(&g_released_skbs); + g_num_released_skbs = 0; + kunit_activate_static_stub(test, iwl_mld_fw_sta_id_mask, + fake_iwl_mld_fw_sta_id_mask); + kunit_activate_static_stub(test, iwl_mld_pass_packet_to_mac80211, + fake_iwl_mld_pass_packet_to_mac80211); + + vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION); + sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1); + + /* Prepare skb, mpdu_desc, BA data and the reorder buffer */ + skb = alloc_and_setup_skb(param->rx_pkt.fc, param->rx_pkt.sn, + param->rx_pkt.tid, param->rx_pkt.multicast); + buffer = setup_ba_data(sta); + mpdu_desc = setup_mpdu_desc(); + + rcu_read_lock(); + reorder_res = iwl_mld_reorder(mld, NULL, QUEUE, sta, skb, mpdu_desc); + rcu_read_unlock(); + + KUNIT_ASSERT_EQ(test, reorder_res, param->expected.reorder_res); + KUNIT_ASSERT_EQ(test, buffer->num_stored, param->expected.num_stored); + KUNIT_ASSERT_EQ(test, buffer->head_sn, param->expected.head_sn); + + /* Verify skbs release order */ + KUNIT_ASSERT_EQ(test, skb_release_order_count, g_num_released_skbs); + while ((skb = __skb_dequeue(&g_released_skbs))) { + struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); + + KUNIT_ASSERT_EQ(test, le16_to_cpu(hdr->seq_ctrl), + param->expected.skb_release_order[skb_idx]); + skb_idx++; + } + KUNIT_ASSERT_EQ(test, skb_idx, skb_release_order_count); +} + +static struct kunit_case reorder_buffer_test_cases[] = { + KUNIT_CASE_PARAM(test_reorder_buffer, test_reorder_buffer_gen_params), + {}, +}; + +static struct kunit_suite reorder_buffer = { + .name = "iwlmld-reorder-buffer", + .test_cases = reorder_buffer_test_cases, + .init = iwlmld_kunit_test_init, +}; + +kunit_test_suite(reorder_buffer); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c new file mode 100644 index 0000000000000..4e189bf8b3fb4 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2024 Intel Corporation + */ +#include + +#include +#include "mld.h" + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +static void test_hcmd_names_sorted(struct kunit *test) +{ + int i; + + for (i = 0; i < global_iwl_mld_goups_size; i++) { + const struct iwl_hcmd_arr *arr = &iwl_mld_groups[i]; + int j; + + if (!arr->arr) + continue; + for (j = 0; j < arr->size - 1; j++) + KUNIT_EXPECT_LE(test, arr->arr[j].cmd_id, + arr->arr[j + 1].cmd_id); + } +} + +static void test_hcmd_names_for_rx(struct kunit *test) +{ + static struct iwl_trans t = { + .command_groups = iwl_mld_groups, + }; + + t.command_groups_size = global_iwl_mld_goups_size; + + for (unsigned int i = 0; i < iwl_mld_rx_handlers_num; i++) { + const struct iwl_rx_handler *rxh; + const char *name; + + rxh = &iwl_mld_rx_handlers[i]; + + name = iwl_get_cmd_string(&t, rxh->cmd_id); + KUNIT_EXPECT_NOT_NULL(test, name); + KUNIT_EXPECT_NE_MSG(test, strcmp(name, "UNKNOWN"), 0, + "ID 0x%04x is UNKNOWN", rxh->cmd_id); + } +} + +static struct kunit_case hcmd_names_cases[] = { + KUNIT_CASE(test_hcmd_names_sorted), + KUNIT_CASE(test_hcmd_names_for_rx), + {}, +}; + +static struct kunit_suite hcmd_names = { + .name = "iwlmld-hcmd-names", + .test_cases = hcmd_names_cases, +}; + +kunit_test_suite(hcmd_names); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c new file mode 100644 index 0000000000000..295dcfd3f85d1 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c @@ -0,0 +1,303 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * KUnit tests for link selection functions + * + * Copyright (C) 2025 Intel Corporation + */ +#include + +#include "utils.h" +#include "mld.h" +#include "link.h" +#include "iface.h" +#include "phy.h" +#include "mlo.h" + +static const struct link_grading_test_case { + const char *desc; + struct { + struct { + u8 link_id; + const struct cfg80211_chan_def *chandef; + bool active; + s32 signal; + bool has_chan_util_elem; + u8 chan_util; /* 0-255 , used only if has_chan_util_elem is true */ + u8 chan_load_by_us; /* 0-100, used only if active is true */; + } link; + } input; + unsigned int expected_grade; +} link_grading_cases[] = { + { + .desc = "channel util of 128 (50%)", + .input.link = { + .link_id = 0, + .chandef = &chandef_2ghz, + .active = false, + .has_chan_util_elem = true, + .chan_util = 128, + }, + .expected_grade = 86, + }, + { + .desc = "channel util of 180 (70%)", + .input.link = { + .link_id = 0, + .chandef = &chandef_2ghz, + .active = false, + .has_chan_util_elem = true, + .chan_util = 180, + }, + .expected_grade = 51, + }, + { + .desc = "channel util of 180 (70%), channel load by us of 10%", + .input.link = { + .link_id = 0, + .chandef = &chandef_2ghz, + .has_chan_util_elem = true, + .chan_util = 180, + .active = true, + .chan_load_by_us = 10, + }, + .expected_grade = 67, + }, + { + .desc = "no channel util element", + .input.link = { + .link_id = 0, + .chandef = &chandef_2ghz, + .active = true, + }, + .expected_grade = 120, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc); + +static void setup_link(struct ieee80211_bss_conf *link) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld *mld = test->priv; + const struct link_grading_test_case *test_param = + (const void *)(test->param_value); + + KUNIT_ALLOC_AND_ASSERT(test, link->bss); + + link->bss->signal = DBM_TO_MBM(test_param->input.link.signal); + + link->chanreq.oper = *test_param->input.link.chandef; + + if (test_param->input.link.has_chan_util_elem) { + struct cfg80211_bss_ies *ies; + struct ieee80211_bss_load_elem bss_load = { + .channel_util = test_param->input.link.chan_util, + }; + struct element *elem = + iwlmld_kunit_gen_element(WLAN_EID_QBSS_LOAD, + &bss_load, + sizeof(bss_load)); + unsigned int elem_len = sizeof(*elem) + sizeof(bss_load); + + KUNIT_ALLOC_AND_ASSERT_SIZE(test, ies, sizeof(*ies) + elem_len); + memcpy(ies->data, elem, elem_len); + ies->len = elem_len; + rcu_assign_pointer(link->bss->beacon_ies, ies); + rcu_assign_pointer(link->bss->ies, ies); + } + + if (test_param->input.link.active) { + struct ieee80211_chanctx_conf *chan_ctx = + wiphy_dereference(mld->wiphy, link->chanctx_conf); + struct iwl_mld_phy *phy; + + KUNIT_ASSERT_NOT_NULL(test, chan_ctx); + + phy = iwl_mld_phy_from_mac80211(chan_ctx); + + phy->channel_load_by_us = test_param->input.link.chan_load_by_us; + } +} + +static void test_link_grading(struct kunit *test) +{ + struct iwl_mld *mld = test->priv; + const struct link_grading_test_case *test_param = + (const void *)(test->param_value); + struct ieee80211_vif *vif; + struct ieee80211_bss_conf *link; + unsigned int actual_grade; + /* Extract test case parameters */ + u8 link_id = test_param->input.link.link_id; + bool active = test_param->input.link.active; + u16 valid_links; + struct iwl_mld_kunit_link assoc_link = { + .band = test_param->input.link.chandef->chan->band, + }; + + /* If the link is not active, use a different link as the assoc link */ + if (active) { + assoc_link.id = link_id; + valid_links = BIT(link_id); + } else { + assoc_link.id = BIT(ffz(BIT(link_id))); + valid_links = BIT(assoc_link.id) | BIT(link_id); + } + + vif = iwlmld_kunit_setup_mlo_assoc(valid_links, &assoc_link); + + wiphy_lock(mld->wiphy); + link = wiphy_dereference(mld->wiphy, vif->link_conf[link_id]); + KUNIT_ASSERT_NOT_NULL(test, link); + + setup_link(link); + + actual_grade = iwl_mld_get_link_grade(mld, link); + wiphy_unlock(mld->wiphy); + + /* Assert that the returned grade matches the expected grade */ + KUNIT_EXPECT_EQ(test, actual_grade, test_param->expected_grade); +} + +static struct kunit_case link_selection_cases[] = { + KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params), + {}, +}; + +static struct kunit_suite link_selection = { + .name = "iwlmld-link-selection-tests", + .test_cases = link_selection_cases, + .init = iwlmld_kunit_test_init, +}; + +kunit_test_suite(link_selection); + +static const struct channel_load_case { + const char *desc; + bool low_latency_vif; + u32 chan_load_not_by_us; + enum nl80211_chan_width bw_a; + enum nl80211_chan_width bw_b; + bool primary_link_active; + bool expected_result; +} channel_load_cases[] = { + { + .desc = "Unequal bandwidth, primary link inactive, EMLSR not allowed", + .low_latency_vif = false, + .primary_link_active = false, + .bw_a = NL80211_CHAN_WIDTH_40, + .bw_b = NL80211_CHAN_WIDTH_20, + .expected_result = false, + }, + { + .desc = "Equal bandwidths, sufficient channel load, EMLSR allowed", + .low_latency_vif = false, + .primary_link_active = true, + .chan_load_not_by_us = 11, + .bw_a = NL80211_CHAN_WIDTH_40, + .bw_b = NL80211_CHAN_WIDTH_40, + .expected_result = true, + }, + { + .desc = "Equal bandwidths, insufficient channel load, EMLSR not allowed", + .low_latency_vif = false, + .primary_link_active = true, + .chan_load_not_by_us = 6, + .bw_a = NL80211_CHAN_WIDTH_80, + .bw_b = NL80211_CHAN_WIDTH_80, + .expected_result = false, + }, + { + .desc = "Low latency VIF, sufficient channel load, EMLSR allowed", + .low_latency_vif = true, + .primary_link_active = true, + .chan_load_not_by_us = 6, + .bw_a = NL80211_CHAN_WIDTH_160, + .bw_b = NL80211_CHAN_WIDTH_160, + .expected_result = true, + }, + { + .desc = "Different bandwidths (2x ratio), primary link load permits EMLSR", + .low_latency_vif = false, + .primary_link_active = true, + .chan_load_not_by_us = 30, + .bw_a = NL80211_CHAN_WIDTH_40, + .bw_b = NL80211_CHAN_WIDTH_20, + .expected_result = true, + }, + { + .desc = "Different bandwidths (4x ratio), primary link load permits EMLSR", + .low_latency_vif = false, + .primary_link_active = true, + .chan_load_not_by_us = 45, + .bw_a = NL80211_CHAN_WIDTH_80, + .bw_b = NL80211_CHAN_WIDTH_20, + .expected_result = true, + }, + { + .desc = "Different bandwidths (16x ratio), primary link load insufficient", + .low_latency_vif = false, + .primary_link_active = true, + .chan_load_not_by_us = 45, + .bw_a = NL80211_CHAN_WIDTH_320, + .bw_b = NL80211_CHAN_WIDTH_20, + .expected_result = false, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(channel_load, channel_load_cases, desc); + +static void test_iwl_mld_channel_load_allows_emlsr(struct kunit *test) +{ + const struct channel_load_case *params = test->param_value; + struct iwl_mld *mld = test->priv; + struct ieee80211_vif *vif; + struct cfg80211_chan_def chandef_a, chandef_b; + struct iwl_mld_link_sel_data a = {.chandef = &chandef_a, + .link_id = 4}; + struct iwl_mld_link_sel_data b = {.chandef = &chandef_b, + .link_id = 5}; + struct iwl_mld_kunit_link assoc_link = { + .id = params->primary_link_active ? a.link_id : b.link_id, + .bandwidth = params->primary_link_active ? params->bw_a : params->bw_b, + }; + bool result; + + vif = iwlmld_kunit_setup_mlo_assoc(BIT(a.link_id) | BIT(b.link_id), + &assoc_link); + + chandef_a.width = params->bw_a; + chandef_b.width = params->bw_b; + + if (params->low_latency_vif) + iwl_mld_vif_from_mac80211(vif)->low_latency_causes = 1; + + wiphy_lock(mld->wiphy); + + /* Simulate channel load */ + if (params->primary_link_active) { + struct iwl_mld_phy *phy = + iwlmld_kunit_get_phy_of_link(vif, a.link_id); + + phy->avg_channel_load_not_by_us = params->chan_load_not_by_us; + } + + result = iwl_mld_channel_load_allows_emlsr(mld, vif, &a, &b); + + wiphy_unlock(mld->wiphy); + + KUNIT_EXPECT_EQ(test, result, params->expected_result); +} + +static struct kunit_case channel_load_criteria_test_cases[] = { + KUNIT_CASE_PARAM(test_iwl_mld_channel_load_allows_emlsr, channel_load_gen_params), + {} +}; + +static struct kunit_suite channel_load_criteria_tests = { + .name = "iwlmld_channel_load_allows_emlsr", + .test_cases = channel_load_criteria_test_cases, + .init = iwlmld_kunit_test_init, +}; + +kunit_test_suite(channel_load_criteria_tests); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c new file mode 100644 index 0000000000000..4a4eaa134bd3c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/link.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2024-2025 Intel Corporation + */ +#include + +#include "utils.h" +#include "mld.h" +#include "link.h" +#include "iface.h" +#include "fw/api/mac-cfg.h" + +static const struct missed_beacon_test_case { + const char *desc; + struct { + struct iwl_missed_beacons_notif notif; + bool emlsr; + } input; + struct { + bool disconnected; + bool emlsr; + } output; +} missed_beacon_cases[] = { + { + .desc = "no EMLSR, no disconnect", + .input.notif = { + .consec_missed_beacons = cpu_to_le32(4), + }, + }, + { + .desc = "no EMLSR, no beacon loss since Rx, no disconnect", + .input.notif = { + .consec_missed_beacons = cpu_to_le32(20), + }, + }, + { + .desc = "no EMLSR, beacon loss since Rx, disconnect", + .input.notif = { + .consec_missed_beacons = cpu_to_le32(20), + .consec_missed_beacons_since_last_rx = + cpu_to_le32(10), + }, + .output.disconnected = true, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(test_missed_beacon, missed_beacon_cases, desc); + +static void fake_ieee80211_connection_loss(struct ieee80211_vif *vif) +{ + vif->cfg.assoc = false; +} + +static void test_missed_beacon(struct kunit *test) +{ + struct iwl_mld *mld = test->priv; + struct iwl_missed_beacons_notif *notif; + const struct missed_beacon_test_case *test_param = + (const void *)(test->param_value); + struct ieee80211_vif *vif; + struct iwl_rx_packet *pkt; + struct iwl_mld_kunit_link link1 = { + .id = 0, + .band = NL80211_BAND_6GHZ, + }; + struct iwl_mld_kunit_link link2 = { + .id = 1, + .band = NL80211_BAND_5GHZ, + }; + + kunit_activate_static_stub(test, ieee80211_connection_loss, + fake_ieee80211_connection_loss); + pkt = iwl_mld_kunit_create_pkt(test_param->input.notif); + notif = (void *)pkt->data; + + if (test_param->input.emlsr) { + vif = iwlmld_kunit_assoc_emlsr(&link1, &link2); + } else { + struct iwl_mld_vif *mld_vif; + + vif = iwlmld_kunit_setup_non_mlo_assoc(&link1); + mld_vif = iwl_mld_vif_from_mac80211(vif); + notif->link_id = cpu_to_le32(mld_vif->deflink.fw_id); + } + + wiphy_lock(mld->wiphy); + + iwl_mld_handle_missed_beacon_notif(mld, pkt); + + wiphy_unlock(mld->wiphy); + + KUNIT_ASSERT_NE(test, vif->cfg.assoc, test_param->output.disconnected); + + /* TODO: add test cases for esr and check */ +} + +static struct kunit_case link_cases[] = { + KUNIT_CASE_PARAM(test_missed_beacon, test_missed_beacon_gen_params), + {}, +}; + +static struct kunit_suite link = { + .name = "iwlmld-link", + .test_cases = link_cases, + .init = iwlmld_kunit_test_init, +}; + +kunit_test_suite(link); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/module.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/module.c new file mode 100644 index 0000000000000..5d9818587b238 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/module.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * This is just module boilerplate for the iwlmld kunit module. + * + * Copyright (C) 2024 Intel Corporation + */ +#include + +MODULE_IMPORT_NS("IWLWIFI"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("kunit tests for iwlmld"); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c new file mode 100644 index 0000000000000..20cb4e03ab413 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2024 Intel Corporation + */ +#include +#include "utils.h" +#include "iwl-trans.h" +#include "mld.h" +#include "sta.h" + +static const struct is_dup_case { + const char *desc; + struct { + /* ieee80211_hdr fields */ + __le16 fc; + __le16 seq; + u8 tid; + bool multicast; + /* iwl_rx_mpdu_desc fields */ + bool is_amsdu; + u8 sub_frame_idx; + } rx_pkt; + struct { + __le16 last_seq; + u8 last_sub_frame_idx; + u8 tid; + } dup_data_state; + struct { + bool is_dup; + u32 rx_status_flag; + } result; +} is_dup_cases[] = { + { + .desc = "Control frame", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_CTL), + }, + .result = { + .is_dup = false, + .rx_status_flag = 0, + } + }, + { + .desc = "Null func frame", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_NULLFUNC), + }, + .result = { + .is_dup = false, + .rx_status_flag = 0, + } + }, + { + .desc = "Multicast data", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA), + .multicast = true, + }, + .result = { + .is_dup = false, + .rx_status_flag = 0, + } + }, + { + .desc = "QoS null func frame", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_NULLFUNC), + }, + .result = { + .is_dup = false, + .rx_status_flag = 0, + } + }, + { + .desc = "QoS data new sequence", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_DATA), + .seq = __cpu_to_le16(0x101), + }, + .dup_data_state = { + .last_seq = __cpu_to_le16(0x100), + .last_sub_frame_idx = 0, + }, + .result = { + .is_dup = false, + .rx_status_flag = RX_FLAG_DUP_VALIDATED, + }, + }, + { + .desc = "QoS data same sequence, no retry", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_DATA), + .seq = __cpu_to_le16(0x100), + }, + .dup_data_state = { + .last_seq = __cpu_to_le16(0x100), + .last_sub_frame_idx = 0, + }, + .result = { + .is_dup = false, + .rx_status_flag = RX_FLAG_DUP_VALIDATED, + }, + }, + { + .desc = "QoS data same sequence, has retry", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_DATA | + IEEE80211_FCTL_RETRY), + .seq = __cpu_to_le16(0x100), + }, + .dup_data_state = { + .last_seq = __cpu_to_le16(0x100), + .last_sub_frame_idx = 0, + }, + .result = { + .is_dup = true, + .rx_status_flag = 0, + }, + }, + { + .desc = "QoS data invalid tid", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_DATA), + .seq = __cpu_to_le16(0x100), + .tid = IWL_MAX_TID_COUNT + 1, + }, + .result = { + .is_dup = true, + .rx_status_flag = 0, + }, + }, + { + .desc = "non-QoS data, same sequence, same tid, no retry", + .rx_pkt = { + /* Driver will use tid = IWL_MAX_TID_COUNT */ + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA), + .seq = __cpu_to_le16(0x100), + }, + .dup_data_state = { + .tid = IWL_MAX_TID_COUNT, + .last_seq = __cpu_to_le16(0x100), + .last_sub_frame_idx = 0, + }, + .result = { + .is_dup = false, + .rx_status_flag = RX_FLAG_DUP_VALIDATED, + }, + }, + { + .desc = "non-QoS data, same sequence, same tid, has retry", + .rx_pkt = { + /* Driver will use tid = IWL_MAX_TID_COUNT */ + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_FCTL_RETRY), + .seq = __cpu_to_le16(0x100), + }, + .dup_data_state = { + .tid = IWL_MAX_TID_COUNT, + .last_seq = __cpu_to_le16(0x100), + .last_sub_frame_idx = 0, + }, + .result = { + .is_dup = true, + .rx_status_flag = 0, + }, + }, + { + .desc = "non-QoS data, same sequence on different tid's", + .rx_pkt = { + /* Driver will use tid = IWL_MAX_TID_COUNT */ + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA), + .seq = __cpu_to_le16(0x100), + }, + .dup_data_state = { + .tid = 7, + .last_seq = __cpu_to_le16(0x100), + .last_sub_frame_idx = 0, + }, + .result = { + .is_dup = false, + .rx_status_flag = RX_FLAG_DUP_VALIDATED, + }, + }, + { + .desc = "A-MSDU new subframe, allow same PN", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_DATA), + .seq = __cpu_to_le16(0x100), + .is_amsdu = true, + .sub_frame_idx = 1, + }, + .dup_data_state = { + .last_seq = __cpu_to_le16(0x100), + .last_sub_frame_idx = 0, + }, + .result = { + .is_dup = false, + .rx_status_flag = RX_FLAG_ALLOW_SAME_PN | + RX_FLAG_DUP_VALIDATED, + }, + }, + { + .desc = "A-MSDU subframe with smaller idx, disallow same PN", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_DATA), + .seq = __cpu_to_le16(0x100), + .is_amsdu = true, + .sub_frame_idx = 1, + }, + .dup_data_state = { + .last_seq = __cpu_to_le16(0x100), + .last_sub_frame_idx = 2, + }, + .result = { + .is_dup = false, + .rx_status_flag = RX_FLAG_DUP_VALIDATED, + }, + }, + { + .desc = "A-MSDU same subframe, no retry, disallow same PN", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_DATA), + .seq = __cpu_to_le16(0x100), + .is_amsdu = true, + .sub_frame_idx = 0, + }, + .dup_data_state = { + .last_seq = __cpu_to_le16(0x100), + .last_sub_frame_idx = 0, + }, + .result = { + .is_dup = false, + .rx_status_flag = RX_FLAG_DUP_VALIDATED, + }, + }, + { + .desc = "A-MSDU same subframe, has retry", + .rx_pkt = { + .fc = __cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_STYPE_QOS_DATA | + IEEE80211_FCTL_RETRY), + .seq = __cpu_to_le16(0x100), + .is_amsdu = true, + .sub_frame_idx = 0, + }, + .dup_data_state = { + .last_seq = __cpu_to_le16(0x100), + .last_sub_frame_idx = 0, + }, + .result = { + .is_dup = true, + .rx_status_flag = 0, + }, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(test_is_dup, is_dup_cases, desc); + +static void +setup_dup_data_state(struct ieee80211_sta *sta) +{ + struct kunit *test = kunit_get_current_test(); + const struct is_dup_case *param = (const void *)(test->param_value); + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + u8 tid = param->dup_data_state.tid; + struct iwl_mld_rxq_dup_data *dup_data; + + /* Allocate dup_data only for 1 queue */ + KUNIT_ALLOC_AND_ASSERT(test, dup_data); + + /* Initialize dup data, see iwl_mld_alloc_dup_data */ + memset(dup_data->last_seq, 0xff, sizeof(dup_data->last_seq)); + + dup_data->last_seq[tid] = param->dup_data_state.last_seq; + dup_data->last_sub_frame_idx[tid] = + param->dup_data_state.last_sub_frame_idx; + + mld_sta->dup_data = dup_data; +} + +static void setup_rx_pkt(const struct is_dup_case *param, + struct ieee80211_hdr *hdr, + struct iwl_rx_mpdu_desc *mpdu_desc) +{ + u8 tid = param->rx_pkt.tid; + + /* Set "new rx packet" header */ + hdr->frame_control = param->rx_pkt.fc; + hdr->seq_ctrl = param->rx_pkt.seq; + + if (ieee80211_is_data_qos(hdr->frame_control)) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + + qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK; + } + + if (param->rx_pkt.multicast) + hdr->addr1[0] = 0x1; + + /* Set mpdu_desc */ + mpdu_desc->amsdu_info = param->rx_pkt.sub_frame_idx & + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + if (param->rx_pkt.is_amsdu) + mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU; +} + +static void test_is_dup(struct kunit *test) +{ + const struct is_dup_case *param = (const void *)(test->param_value); + struct iwl_mld *mld = test->priv; + struct iwl_rx_mpdu_desc mpdu_desc = { }; + struct ieee80211_rx_status rx_status = { }; + struct ieee80211_vif *vif; + struct ieee80211_sta *sta; + struct ieee80211_hdr hdr; + + vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION); + sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1); + + /* Prepare test case state */ + setup_dup_data_state(sta); + setup_rx_pkt(param, &hdr, &mpdu_desc); + + KUNIT_EXPECT_EQ(test, + iwl_mld_is_dup(mld, sta, &hdr, &mpdu_desc, &rx_status, + 0), /* assuming only 1 queue */ + param->result.is_dup); + KUNIT_EXPECT_EQ(test, rx_status.flag, param->result.rx_status_flag); +} + +static struct kunit_case is_dup_test_cases[] = { + KUNIT_CASE_PARAM(test_is_dup, test_is_dup_gen_params), + {}, +}; + +static struct kunit_suite is_dup = { + .name = "iwlmld-rx-is-dup", + .test_cases = is_dup_test_cases, + .init = iwlmld_kunit_test_init, +}; + +kunit_test_suite(is_dup); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c new file mode 100644 index 0000000000000..9712ee6965093 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2024-2025 Intel Corporation + */ +#include +#include + +#include "utils.h" + +#include + +#include "fw/api/scan.h" +#include "fw/api/mac-cfg.h" +#include "iwl-trans.h" +#include "mld.h" +#include "iface.h" +#include "link.h" +#include "phy.h" +#include "sta.h" + +int iwlmld_kunit_test_init(struct kunit *test) +{ + struct iwl_mld *mld; + struct iwl_trans *trans; + const struct iwl_cfg *cfg; + struct iwl_fw *fw; + struct ieee80211_hw *hw; + + KUNIT_ALLOC_AND_ASSERT(test, trans); + KUNIT_ALLOC_AND_ASSERT(test, trans->dev); + KUNIT_ALLOC_AND_ASSERT(test, cfg); + KUNIT_ALLOC_AND_ASSERT(test, fw); + KUNIT_ALLOC_AND_ASSERT(test, hw); + KUNIT_ALLOC_AND_ASSERT(test, hw->wiphy); + + mutex_init(&hw->wiphy->mtx); + + /* Allocate and initialize the mld structure */ + KUNIT_ALLOC_AND_ASSERT(test, mld); + iwl_construct_mld(mld, trans, cfg, fw, hw, NULL); + + fw->ucode_capa.num_stations = IWL_STATION_COUNT_MAX; + fw->ucode_capa.num_links = IWL_FW_MAX_LINK_ID + 1; + + mld->fwrt.trans = trans; + mld->fwrt.fw = fw; + mld->fwrt.dev = trans->dev; + + /* TODO: add priv_size to hw allocation and setup hw->priv to enable + * testing mac80211 callbacks + */ + + KUNIT_ALLOC_AND_ASSERT(test, mld->nvm_data); + KUNIT_ALLOC_AND_ASSERT_SIZE(test, mld->scan.cmd, + sizeof(struct iwl_scan_req_umac_v17)); + mld->scan.cmd_size = sizeof(struct iwl_scan_req_umac_v17); + + /* This is not the state at the end of the regular opmode_start, + * but it is more common to need it. Explicitly undo this if needed. + */ + mld->trans->state = IWL_TRANS_FW_ALIVE; + mld->fw_status.running = true; + + /* Avoid passing mld struct around */ + test->priv = mld; + return 0; +} + +IWL_MLD_ALLOC_FN(link, bss_conf) + +static void iwlmld_kunit_init_link(struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link, + struct iwl_mld_link *mld_link, int link_id) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld *mld = test->priv; + struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); + int ret; + + /* setup mac80211 link */ + rcu_assign_pointer(vif->link_conf[link_id], link); + link->link_id = link_id; + link->vif = vif; + link->beacon_int = 100; + link->dtim_period = 3; + link->qos = true; + + /* and mld_link */ + ret = iwl_mld_allocate_link_fw_id(mld, &mld_link->fw_id, link); + KUNIT_ASSERT_EQ(test, ret, 0); + rcu_assign_pointer(mld_vif->link[link_id], mld_link); + rcu_assign_pointer(vif->link_conf[link_id], link); +} + +IWL_MLD_ALLOC_FN(vif, vif) + +/* Helper function to add and initialize a VIF for KUnit tests */ +struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld *mld = test->priv; + struct ieee80211_vif *vif; + struct iwl_mld_vif *mld_vif; + int ret; + + /* TODO: support more types */ + KUNIT_ASSERT_EQ(test, type, NL80211_IFTYPE_STATION); + + KUNIT_ALLOC_AND_ASSERT_SIZE(test, vif, + sizeof(*vif) + sizeof(*mld_vif)); + + vif->type = type; + mld_vif = iwl_mld_vif_from_mac80211(vif); + mld_vif->mld = mld; + + ret = iwl_mld_allocate_vif_fw_id(mld, &mld_vif->fw_id, vif); + KUNIT_ASSERT_EQ(test, ret, 0); + + /* TODO: revisit (task=EHT) */ + if (mlo) + return vif; + + /* Initialize the default link */ + iwlmld_kunit_init_link(vif, &vif->bss_conf, &mld_vif->deflink, 0); + + return vif; +} + +/* Use only for MLO vif */ +struct ieee80211_bss_conf * +iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id) +{ + struct kunit *test = kunit_get_current_test(); + struct ieee80211_bss_conf *link; + struct iwl_mld_link *mld_link; + + KUNIT_ALLOC_AND_ASSERT(test, link); + KUNIT_ALLOC_AND_ASSERT(test, mld_link); + + iwlmld_kunit_init_link(vif, link, mld_link, link_id); + vif->valid_links |= BIT(link_id); + + return link; +} + +struct ieee80211_chanctx_conf * +iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld *mld = test->priv; + struct ieee80211_chanctx_conf *ctx; + struct iwl_mld_phy *phy; + int fw_id; + + KUNIT_ALLOC_AND_ASSERT_SIZE(test, ctx, sizeof(*ctx) + sizeof(*phy)); + + /* Setup the chanctx conf */ + ctx->def = *def; + ctx->min_def = *def; + ctx->ap = *def; + + /* and the iwl_mld_phy */ + phy = iwl_mld_phy_from_mac80211(ctx); + + fw_id = iwl_mld_allocate_fw_phy_id(mld); + KUNIT_ASSERT_GE(test, fw_id, 0); + + phy->fw_id = fw_id; + phy->mld = mld; + phy->chandef = *def; + + return ctx; +} + +void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link, + struct ieee80211_chanctx_conf *ctx) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld *mld = test->priv; + struct iwl_mld_link *mld_link; + + KUNIT_EXPECT_NULL(test, rcu_access_pointer(link->chanctx_conf)); + rcu_assign_pointer(link->chanctx_conf, ctx); + + lockdep_assert_wiphy(mld->wiphy); + + mld_link = iwl_mld_link_from_mac80211(link); + + KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld_link->chan_ctx)); + KUNIT_EXPECT_FALSE(test, mld_link->active); + + rcu_assign_pointer(mld_link->chan_ctx, ctx); + mld_link->active = true; + + if (ieee80211_vif_is_mld(vif)) + vif->active_links |= BIT(link->link_id); +} + +IWL_MLD_ALLOC_FN(link_sta, link_sta) + +static void iwlmld_kunit_add_link_sta(struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta, + struct iwl_mld_link_sta *mld_link_sta, + u8 link_id) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + struct iwl_mld *mld = test->priv; + u8 fw_id; + int ret; + + /* initialize mac80211's link_sta */ + link_sta->link_id = link_id; + rcu_assign_pointer(sta->link[link_id], link_sta); + + link_sta->sta = sta; + + /* and the iwl_mld_link_sta */ + ret = iwl_mld_allocate_link_sta_fw_id(mld, &fw_id, link_sta); + KUNIT_ASSERT_EQ(test, ret, 0); + mld_link_sta->fw_id = fw_id; + + rcu_assign_pointer(mld_sta->link[link_id], mld_link_sta); +} + +static struct ieee80211_link_sta * +iwlmld_kunit_alloc_link_sta(struct ieee80211_sta *sta, int link_id) +{ + struct kunit *test = kunit_get_current_test(); + struct ieee80211_link_sta *link_sta; + struct iwl_mld_link_sta *mld_link_sta; + + /* Only valid for MLO */ + KUNIT_ASSERT_TRUE(test, sta->valid_links); + + KUNIT_ALLOC_AND_ASSERT(test, link_sta); + KUNIT_ALLOC_AND_ASSERT(test, mld_link_sta); + + iwlmld_kunit_add_link_sta(sta, link_sta, mld_link_sta, link_id); + + sta->valid_links |= BIT(link_id); + + return link_sta; +} + +/* Allocate and initialize a STA with the first link_sta */ +static struct ieee80211_sta * +iwlmld_kunit_add_sta(struct ieee80211_vif *vif, int link_id) +{ + struct kunit *test = kunit_get_current_test(); + struct ieee80211_sta *sta; + struct iwl_mld_sta *mld_sta; + + /* Allocate memory for ieee80211_sta with embedded iwl_mld_sta */ + KUNIT_ALLOC_AND_ASSERT_SIZE(test, sta, sizeof(*sta) + sizeof(*mld_sta)); + + /* TODO: allocate and initialize the TXQs ? */ + + mld_sta = iwl_mld_sta_from_mac80211(sta); + mld_sta->vif = vif; + mld_sta->mld = test->priv; + + /* TODO: adjust for internal stations */ + mld_sta->sta_type = STATION_TYPE_PEER; + + if (link_id >= 0) { + iwlmld_kunit_add_link_sta(sta, &sta->deflink, + &mld_sta->deflink, link_id); + sta->valid_links = BIT(link_id); + } else { + iwlmld_kunit_add_link_sta(sta, &sta->deflink, + &mld_sta->deflink, 0); + } + return sta; +} + +/* Move s STA to a state */ +static void iwlmld_kunit_move_sta_state(struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state state) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld_sta *mld_sta; + struct iwl_mld_vif *mld_vif; + + /* The sta will be removed automatically at the end of the test */ + KUNIT_ASSERT_NE(test, state, IEEE80211_STA_NOTEXIST); + + mld_sta = iwl_mld_sta_from_mac80211(sta); + mld_sta->sta_state = state; + + mld_vif = iwl_mld_vif_from_mac80211(mld_sta->vif); + mld_vif->authorized = state == IEEE80211_STA_AUTHORIZED; + + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) + mld_vif->ap_sta = sta; +} + +struct ieee80211_sta *iwlmld_kunit_setup_sta(struct ieee80211_vif *vif, + enum ieee80211_sta_state state, + int link_id) +{ + struct kunit *test = kunit_get_current_test(); + struct ieee80211_sta *sta; + + /* The sta will be removed automatically at the end of the test */ + KUNIT_ASSERT_NE(test, state, IEEE80211_STA_NOTEXIST); + + /* First - allocate and init the STA */ + sta = iwlmld_kunit_add_sta(vif, link_id); + + /* Now move it all the way to the wanted state */ + for (enum ieee80211_sta_state _state = IEEE80211_STA_NONE; + _state <= state; _state++) + iwlmld_kunit_move_sta_state(vif, sta, state); + + return sta; +} + +static void iwlmld_kunit_set_vif_associated(struct ieee80211_vif *vif) +{ + /* TODO: setup chanreq */ + /* TODO setup capabilities */ + + vif->cfg.assoc = 1; +} + +static struct ieee80211_vif * +iwlmld_kunit_setup_assoc(bool mlo, struct iwl_mld_kunit_link *assoc_link) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld *mld = test->priv; + struct ieee80211_vif *vif; + struct ieee80211_bss_conf *link; + struct ieee80211_chanctx_conf *chan_ctx; + + KUNIT_ASSERT_TRUE(test, mlo || assoc_link->id == 0); + + vif = iwlmld_kunit_add_vif(mlo, NL80211_IFTYPE_STATION); + + if (mlo) + link = iwlmld_kunit_add_link(vif, assoc_link->id); + else + link = &vif->bss_conf; + + chan_ctx = iwlmld_kunit_add_chanctx(assoc_link->band, + assoc_link->bandwidth); + + wiphy_lock(mld->wiphy); + iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx); + wiphy_unlock(mld->wiphy); + + /* The AP sta will now be pointer to by mld_vif->ap_sta */ + iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, assoc_link->id); + + iwlmld_kunit_set_vif_associated(vif); + + return vif; +} + +struct ieee80211_vif * +iwlmld_kunit_setup_mlo_assoc(u16 valid_links, + struct iwl_mld_kunit_link *assoc_link) +{ + struct kunit *test = kunit_get_current_test(); + struct ieee80211_vif *vif; + + KUNIT_ASSERT_TRUE(test, + hweight16(valid_links) == 1 || + hweight16(valid_links) == 2); + KUNIT_ASSERT_TRUE(test, valid_links & BIT(assoc_link->id)); + + vif = iwlmld_kunit_setup_assoc(true, assoc_link); + + /* Add the other link, if applicable */ + if (hweight16(valid_links) > 1) { + u8 other_link_id = ffs(valid_links & ~BIT(assoc_link->id)) - 1; + + iwlmld_kunit_add_link(vif, other_link_id); + } + + return vif; +} + +struct ieee80211_vif * +iwlmld_kunit_setup_non_mlo_assoc(struct iwl_mld_kunit_link *assoc_link) +{ + return iwlmld_kunit_setup_assoc(false, assoc_link); +} + +struct iwl_rx_packet * +_iwl_mld_kunit_create_pkt(const void *notif, size_t notif_sz) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_rx_packet *pkt; + + KUNIT_ALLOC_AND_ASSERT_SIZE(test, pkt, sizeof(pkt) + notif_sz); + + memcpy(pkt->data, notif, notif_sz); + pkt->len_n_flags = cpu_to_le32(sizeof(pkt->hdr) + notif_sz); + + return pkt; +} + +struct ieee80211_vif *iwlmld_kunit_assoc_emlsr(struct iwl_mld_kunit_link *link1, + struct iwl_mld_kunit_link *link2) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld *mld = test->priv; + struct ieee80211_vif *vif; + struct ieee80211_bss_conf *link; + struct ieee80211_chanctx_conf *chan_ctx; + struct ieee80211_sta *sta; + struct iwl_mld_vif *mld_vif; + u16 valid_links = BIT(link1->id) | BIT(link2->id); + + KUNIT_ASSERT_TRUE(test, hweight16(valid_links) == 2); + + vif = iwlmld_kunit_setup_mlo_assoc(valid_links, link1); + mld_vif = iwl_mld_vif_from_mac80211(vif); + + /* Activate second link */ + wiphy_lock(mld->wiphy); + + link = wiphy_dereference(mld->wiphy, vif->link_conf[link2->id]); + KUNIT_EXPECT_NOT_NULL(test, link); + + chan_ctx = iwlmld_kunit_add_chanctx(link2->band, link2->bandwidth); + iwlmld_kunit_assign_chanctx_to_link(vif, link, chan_ctx); + + wiphy_unlock(mld->wiphy); + + /* And other link sta */ + sta = mld_vif->ap_sta; + KUNIT_EXPECT_NOT_NULL(test, sta); + + iwlmld_kunit_alloc_link_sta(sta, link2->id); + + return vif; +} + +struct element *iwlmld_kunit_gen_element(u8 id, const void *data, size_t len) +{ + struct kunit *test = kunit_get_current_test(); + struct element *elem; + + KUNIT_ALLOC_AND_ASSERT_SIZE(test, elem, sizeof(*elem) + len); + + elem->id = id; + elem->datalen = len; + memcpy(elem->data, data, len); + + return elem; +} + +struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif, + u8 link_id) +{ + struct kunit *test = kunit_get_current_test(); + struct iwl_mld *mld = test->priv; + struct ieee80211_chanctx_conf *chanctx; + struct ieee80211_bss_conf *link = + wiphy_dereference(mld->wiphy, vif->link_conf[link_id]); + + KUNIT_EXPECT_NOT_NULL(test, link); + + chanctx = wiphy_dereference(mld->wiphy, link->chanctx_conf); + KUNIT_EXPECT_NOT_NULL(test, chanctx); + + return iwl_mld_phy_from_mac80211(chanctx); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h new file mode 100644 index 0000000000000..d3723653cf1b3 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#ifndef __iwl_mld_kunit_utils_h__ +#define __iwl_mld_kunit_utils_h__ + +#include +#include + +struct iwl_mld; + +int iwlmld_kunit_test_init(struct kunit *test); + +struct iwl_mld_kunit_link { + u8 id; + enum nl80211_band band; + enum nl80211_chan_width bandwidth; +}; + +enum nl80211_iftype; + +struct ieee80211_vif *iwlmld_kunit_add_vif(bool mlo, enum nl80211_iftype type); + +struct ieee80211_bss_conf * +iwlmld_kunit_add_link(struct ieee80211_vif *vif, int link_id); + +#define KUNIT_ALLOC_AND_ASSERT_SIZE(test, ptr, size) \ +do { \ + (ptr) = kunit_kzalloc((test), (size), GFP_KERNEL); \ + KUNIT_ASSERT_NOT_NULL((test), (ptr)); \ +} while (0) + +#define KUNIT_ALLOC_AND_ASSERT(test, ptr) \ + KUNIT_ALLOC_AND_ASSERT_SIZE(test, ptr, sizeof(*(ptr))) + +#define CHANNEL(_name, _band, _freq) \ +static struct ieee80211_channel _name = { \ + .band = (_band), \ + .center_freq = (_freq), \ + .hw_value = (_freq), \ +} + +#define CHANDEF(_name, _channel, _freq1, _width) \ +__maybe_unused static struct cfg80211_chan_def _name = { \ + .chan = &(_channel), \ + .center_freq1 = (_freq1), \ + .width = (_width), \ +} + +CHANNEL(chan_2ghz, NL80211_BAND_2GHZ, 2412); +CHANNEL(chan_5ghz, NL80211_BAND_5GHZ, 5200); +CHANNEL(chan_6ghz, NL80211_BAND_6GHZ, 6115); +/* Feel free to add more */ + +CHANDEF(chandef_2ghz, chan_2ghz, 2412, NL80211_CHAN_WIDTH_20); +CHANDEF(chandef_5ghz, chan_5ghz, 5200, NL80211_CHAN_WIDTH_40); +CHANDEF(chandef_6ghz, chan_6ghz, 6115, NL80211_CHAN_WIDTH_160); +/* Feel free to add more */ + +//struct cfg80211_chan_def; + +struct ieee80211_chanctx_conf * +iwlmld_kunit_add_chanctx_from_def(struct cfg80211_chan_def *def); + +static inline struct ieee80211_chanctx_conf * +iwlmld_kunit_add_chanctx(enum nl80211_band band, enum nl80211_chan_width width) +{ + struct cfg80211_chan_def chandef; + + switch (band) { + case NL80211_BAND_2GHZ: + chandef = chandef_2ghz; + break; + case NL80211_BAND_5GHZ: + chandef = chandef_5ghz; + break; + default: + case NL80211_BAND_6GHZ: + chandef = chandef_6ghz; + break; + } + + chandef.width = width; + + return iwlmld_kunit_add_chanctx_from_def(&chandef); +} + +void iwlmld_kunit_assign_chanctx_to_link(struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link, + struct ieee80211_chanctx_conf *ctx); + +/* Allocate a sta, initialize it and move it to the wanted state */ +struct ieee80211_sta *iwlmld_kunit_setup_sta(struct ieee80211_vif *vif, + enum ieee80211_sta_state state, + int link_id); + +struct ieee80211_vif * +iwlmld_kunit_setup_mlo_assoc(u16 valid_links, + struct iwl_mld_kunit_link *assoc_link); + +struct ieee80211_vif * +iwlmld_kunit_setup_non_mlo_assoc(struct iwl_mld_kunit_link *assoc_link); + +struct iwl_rx_packet * +_iwl_mld_kunit_create_pkt(const void *notif, size_t notif_sz); + +#define iwl_mld_kunit_create_pkt(_notif) \ + _iwl_mld_kunit_create_pkt(&(_notif), sizeof(_notif)) + +struct ieee80211_vif * +iwlmld_kunit_assoc_emlsr(struct iwl_mld_kunit_link *link1, + struct iwl_mld_kunit_link *link2); + +struct element *iwlmld_kunit_gen_element(u8 id, const void *data, size_t len); + +/** + * iwlmld_kunit_get_phy_of_link - Get the phy of a link + * + * @vif: The vif to get the phy from. + * @link_id: The id of the link to get the phy for. + * + * given a vif and link id, return the phy pointer of that link. + * This assumes that the link exists, and that it had a chanctx + * assigned. + * If this is not the case, the test will fail. + * + * Return: phy pointer. + */ +struct iwl_mld_phy *iwlmld_kunit_get_phy_of_link(struct ieee80211_vif *vif, + u8 link_id); + +#endif /* __iwl_mld_kunit_utils_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/thermal.c b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c new file mode 100644 index 0000000000000..1909953a9be98 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/thermal.c @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ +#ifdef CONFIG_THERMAL +#include +#include +#endif + +#include "fw/api/phy.h" + +#include "thermal.h" +#include "mld.h" +#include "hcmd.h" + +#define IWL_MLD_CT_KILL_DURATION (5 * HZ) + +void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + const struct ct_kill_notif *notif = (const void *)pkt->data; + + IWL_ERR(mld, + "CT Kill notification: temp = %d, DTS = 0x%x, Scheme 0x%x - Enter CT Kill\n", + le16_to_cpu(notif->temperature), notif->dts, + notif->scheme); + + iwl_mld_set_ctkill(mld, true); + + wiphy_delayed_work_queue(mld->wiphy, &mld->ct_kill_exit_wk, + round_jiffies_relative(IWL_MLD_CT_KILL_DURATION)); +} + +static void iwl_mld_exit_ctkill(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mld *mld; + + mld = container_of(wk, struct iwl_mld, ct_kill_exit_wk.work); + + IWL_ERR(mld, "Exit CT Kill\n"); + iwl_mld_set_ctkill(mld, false); +} + +void iwl_mld_handle_temp_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt) +{ + const struct iwl_dts_measurement_notif *notif = + (const void *)pkt->data; + int temp; + u32 ths_crossed; + + temp = le32_to_cpu(notif->temp); + + /* shouldn't be negative, but since it's s32, make sure it isn't */ + if (IWL_FW_CHECK(mld, temp < 0, "negative temperature %d\n", temp)) + return; + + ths_crossed = le32_to_cpu(notif->threshold_idx); + + /* 0xFF in ths_crossed means the notification is not related + * to a trip, so we can ignore it here. + */ + if (ths_crossed == 0xFF) + return; + + IWL_DEBUG_TEMP(mld, "Temp = %d Threshold crossed = %d\n", + temp, ths_crossed); + + if (IWL_FW_CHECK(mld, ths_crossed >= IWL_MAX_DTS_TRIPS, + "bad threshold: %d\n", ths_crossed)) + return; + +#ifdef CONFIG_THERMAL + if (mld->tzone) + thermal_zone_device_update(mld->tzone, THERMAL_TRIP_VIOLATED); +#endif /* CONFIG_THERMAL */ +} + +#ifdef CONFIG_THERMAL +static int iwl_mld_get_temp(struct iwl_mld *mld, s32 *temp) +{ + struct iwl_host_cmd cmd = { + .id = WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE), + .flags = CMD_WANT_SKB, + }; + const struct iwl_dts_measurement_resp *resp; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + ret = iwl_mld_send_cmd(mld, &cmd); + if (ret) { + IWL_ERR(mld, + "Failed to send the temperature measurement command (err=%d)\n", + ret); + return ret; + } + + if (iwl_rx_packet_payload_len(cmd.resp_pkt) < sizeof(*resp)) { + IWL_ERR(mld, + "Failed to get a valid response to DTS measurement\n"); + ret = -EIO; + goto free_resp; + } + + resp = (const void *)cmd.resp_pkt->data; + *temp = le32_to_cpu(resp->temp); + + IWL_DEBUG_TEMP(mld, + "Got temperature measurement response: temp=%d\n", + *temp); + +free_resp: + iwl_free_resp(&cmd); + return ret; +} + +static int compare_temps(const void *a, const void *b) +{ + return ((s16)le16_to_cpu(*(__le16 *)a) - + (s16)le16_to_cpu(*(__le16 *)b)); +} + +struct iwl_trip_walk_data { + __le16 *thresholds; + int count; +}; + +static int iwl_trip_temp_iter(struct thermal_trip *trip, void *arg) +{ + struct iwl_trip_walk_data *twd = arg; + + if (trip->temperature == THERMAL_TEMP_INVALID) + return 0; + + twd->thresholds[twd->count++] = + cpu_to_le16((s16)(trip->temperature / 1000)); + return 0; +} +#endif + +int iwl_mld_config_temp_report_ths(struct iwl_mld *mld) +{ + struct temp_report_ths_cmd cmd = {0}; + int ret; +#ifdef CONFIG_THERMAL + struct iwl_trip_walk_data twd = { + .thresholds = cmd.thresholds, + .count = 0 + }; + + if (!mld->tzone) + goto send; + + /* The thermal core holds an array of temperature trips that are + * unsorted and uncompressed, the FW should get it compressed and + * sorted. + */ + + /* compress trips to cmd array, remove uninitialized values*/ + for_each_thermal_trip(mld->tzone, iwl_trip_temp_iter, &twd); + + cmd.num_temps = cpu_to_le32(twd.count); + if (twd.count) + sort(cmd.thresholds, twd.count, sizeof(s16), + compare_temps, NULL); + +send: +#endif + lockdep_assert_wiphy(mld->wiphy); + + ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, + TEMP_REPORTING_THRESHOLDS_CMD), + &cmd); + if (ret) + IWL_ERR(mld, "TEMP_REPORT_THS_CMD command failed (err=%d)\n", + ret); + + return ret; +} + +#ifdef CONFIG_THERMAL +static int iwl_mld_tzone_get_temp(struct thermal_zone_device *device, + int *temperature) +{ + struct iwl_mld *mld = thermal_zone_device_priv(device); + int temp; + int ret = 0; + + wiphy_lock(mld->wiphy); + + if (!mld->fw_status.running) { + /* Tell the core that there is no valid temperature value to + * return, but it need not worry about this. + */ + *temperature = THERMAL_TEMP_INVALID; + goto unlock; + } + + ret = iwl_mld_get_temp(mld, &temp); + if (ret) + goto unlock; + + *temperature = temp * 1000; +unlock: + wiphy_unlock(mld->wiphy); + return ret; +} + +static int iwl_mld_tzone_set_trip_temp(struct thermal_zone_device *device, + const struct thermal_trip *trip, + int temp) +{ + struct iwl_mld *mld = thermal_zone_device_priv(device); + int ret; + + wiphy_lock(mld->wiphy); + + if (!mld->fw_status.running) { + ret = -EIO; + goto unlock; + } + + if ((temp / 1000) > S16_MAX) { + ret = -EINVAL; + goto unlock; + } + + ret = iwl_mld_config_temp_report_ths(mld); +unlock: + wiphy_unlock(mld->wiphy); + return ret; +} + +static struct thermal_zone_device_ops tzone_ops = { + .get_temp = iwl_mld_tzone_get_temp, + .set_trip_temp = iwl_mld_tzone_set_trip_temp, +}; + +static void iwl_mld_thermal_zone_register(struct iwl_mld *mld) +{ + int ret; + char name[16]; + static atomic_t counter = ATOMIC_INIT(0); + struct thermal_trip trips[IWL_MAX_DTS_TRIPS] = { + [0 ... IWL_MAX_DTS_TRIPS - 1] = { + .temperature = THERMAL_TEMP_INVALID, + .type = THERMAL_TRIP_PASSIVE, + .flags = THERMAL_TRIP_FLAG_RW_TEMP, + }, + }; + + BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); + + sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF); + mld->tzone = + thermal_zone_device_register_with_trips(name, trips, + IWL_MAX_DTS_TRIPS, + mld, &tzone_ops, + NULL, 0, 0); + if (IS_ERR(mld->tzone)) { + IWL_DEBUG_TEMP(mld, + "Failed to register to thermal zone (err = %ld)\n", + PTR_ERR(mld->tzone)); + mld->tzone = NULL; + return; + } + + ret = thermal_zone_device_enable(mld->tzone); + if (ret) { + IWL_DEBUG_TEMP(mld, "Failed to enable thermal zone\n"); + thermal_zone_device_unregister(mld->tzone); + } +} + +/* budget in mWatt */ +static const u32 iwl_mld_cdev_budgets[] = { + 2400, /* cooling state 0 */ + 2000, /* cooling state 1 */ + 1800, /* cooling state 2 */ + 1600, /* cooling state 3 */ + 1400, /* cooling state 4 */ + 1200, /* cooling state 5 */ + 1000, /* cooling state 6 */ + 900, /* cooling state 7 */ + 800, /* cooling state 8 */ + 700, /* cooling state 9 */ + 650, /* cooling state 10 */ + 600, /* cooling state 11 */ + 550, /* cooling state 12 */ + 500, /* cooling state 13 */ + 450, /* cooling state 14 */ + 400, /* cooling state 15 */ + 350, /* cooling state 16 */ + 300, /* cooling state 17 */ + 250, /* cooling state 18 */ + 200, /* cooling state 19 */ + 150, /* cooling state 20 */ +}; + +int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state, + enum iwl_ctdp_cmd_operation op) +{ + struct iwl_ctdp_cmd cmd = { + .operation = cpu_to_le32(op), + .budget = cpu_to_le32(iwl_mld_cdev_budgets[state]), + .window_size = 0, + }; + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + ret = iwl_mld_send_cmd_pdu(mld, WIDE_ID(PHY_OPS_GROUP, CTDP_CONFIG_CMD), + &cmd); + + if (ret) { + IWL_ERR(mld, "cTDP command failed (err=%d)\n", ret); + return ret; + } + + if (op == CTDP_CMD_OPERATION_START) + mld->cooling_dev.cur_state = state; + + return 0; +} + +static int iwl_mld_tcool_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = ARRAY_SIZE(iwl_mld_cdev_budgets) - 1; + + return 0; +} + +static int iwl_mld_tcool_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct iwl_mld *mld = (struct iwl_mld *)(cdev->devdata); + + *state = mld->cooling_dev.cur_state; + + return 0; +} + +static int iwl_mld_tcool_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long new_state) +{ + struct iwl_mld *mld = (struct iwl_mld *)(cdev->devdata); + int ret; + + wiphy_lock(mld->wiphy); + + if (!mld->fw_status.running) { + ret = -EIO; + goto unlock; + } + + if (new_state >= ARRAY_SIZE(iwl_mld_cdev_budgets)) { + ret = -EINVAL; + goto unlock; + } + + ret = iwl_mld_config_ctdp(mld, new_state, CTDP_CMD_OPERATION_START); + +unlock: + wiphy_unlock(mld->wiphy); + return ret; +} + +static const struct thermal_cooling_device_ops tcooling_ops = { + .get_max_state = iwl_mld_tcool_get_max_state, + .get_cur_state = iwl_mld_tcool_get_cur_state, + .set_cur_state = iwl_mld_tcool_set_cur_state, +}; + +static void iwl_mld_cooling_device_register(struct iwl_mld *mld) +{ + char name[] = "iwlwifi"; + + BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); + + mld->cooling_dev.cdev = + thermal_cooling_device_register(name, + mld, + &tcooling_ops); + + if (IS_ERR(mld->cooling_dev.cdev)) { + IWL_DEBUG_TEMP(mld, + "Failed to register to cooling device (err = %ld)\n", + PTR_ERR(mld->cooling_dev.cdev)); + mld->cooling_dev.cdev = NULL; + return; + } +} + +static void iwl_mld_thermal_zone_unregister(struct iwl_mld *mld) +{ + if (!mld->tzone) + return; + + IWL_DEBUG_TEMP(mld, "Thermal zone device unregister\n"); + if (mld->tzone) { + thermal_zone_device_unregister(mld->tzone); + mld->tzone = NULL; + } +} + +static void iwl_mld_cooling_device_unregister(struct iwl_mld *mld) +{ + if (!mld->cooling_dev.cdev) + return; + + IWL_DEBUG_TEMP(mld, "Cooling device unregister\n"); + if (mld->cooling_dev.cdev) { + thermal_cooling_device_unregister(mld->cooling_dev.cdev); + mld->cooling_dev.cdev = NULL; + } +} +#endif /* CONFIG_THERMAL */ + +void iwl_mld_thermal_initialize(struct iwl_mld *mld) +{ + wiphy_delayed_work_init(&mld->ct_kill_exit_wk, iwl_mld_exit_ctkill); + +#ifdef CONFIG_THERMAL + iwl_mld_cooling_device_register(mld); + iwl_mld_thermal_zone_register(mld); +#endif +} + +void iwl_mld_thermal_exit(struct iwl_mld *mld) +{ + wiphy_delayed_work_cancel(mld->wiphy, &mld->ct_kill_exit_wk); + +#ifdef CONFIG_THERMAL + iwl_mld_cooling_device_unregister(mld); + iwl_mld_thermal_zone_unregister(mld); +#endif +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/thermal.h b/drivers/net/wireless/intel/iwlwifi/mld/thermal.h new file mode 100644 index 0000000000000..8c8893331b05c --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/thermal.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_thermal_h__ +#define __iwl_mld_thermal_h__ + +#include "iwl-trans.h" + +struct iwl_mld; + +#ifdef CONFIG_THERMAL +#include + +/* + * struct iwl_mld_cooling_device + * @cur_state: current state + * @cdev: struct thermal cooling device + */ +struct iwl_mld_cooling_device { + u32 cur_state; + struct thermal_cooling_device *cdev; +}; + +int iwl_mld_config_ctdp(struct iwl_mld *mld, u32 state, + enum iwl_ctdp_cmd_operation op); +#endif + +void iwl_mld_handle_temp_notif(struct iwl_mld *mld, struct iwl_rx_packet *pkt); +void iwl_mld_handle_ct_kill_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); +int iwl_mld_config_temp_report_ths(struct iwl_mld *mld); +void iwl_mld_thermal_initialize(struct iwl_mld *mld); +void iwl_mld_thermal_exit(struct iwl_mld *mld); + +#endif /* __iwl_mld_thermal_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/time_sync.c b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.c new file mode 100644 index 0000000000000..50799f9bfccba --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2025 Intel Corporation + */ + +#include "mld.h" +#include "hcmd.h" +#include "ptp.h" +#include "time_sync.h" +#include + +static int iwl_mld_init_time_sync(struct iwl_mld *mld, u32 protocols, + const u8 *addr) +{ + struct iwl_mld_time_sync_data *time_sync = kzalloc(sizeof(*time_sync), + GFP_KERNEL); + + if (!time_sync) + return -ENOMEM; + + time_sync->active_protocols = protocols; + ether_addr_copy(time_sync->peer_addr, addr); + skb_queue_head_init(&time_sync->frame_list); + rcu_assign_pointer(mld->time_sync, time_sync); + + return 0; +} + +int iwl_mld_time_sync_fw_config(struct iwl_mld *mld) +{ + struct iwl_time_sync_cfg_cmd cmd = {}; + struct iwl_mld_time_sync_data *time_sync; + int err; + + time_sync = wiphy_dereference(mld->wiphy, mld->time_sync); + if (!time_sync) + return -EINVAL; + + cmd.protocols = cpu_to_le32(time_sync->active_protocols); + ether_addr_copy(cmd.peer_addr, time_sync->peer_addr); + + err = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(DATA_PATH_GROUP, + WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), + &cmd); + if (err) + IWL_ERR(mld, "Failed to send time sync cfg cmd: %d\n", err); + + return err; +} + +int iwl_mld_time_sync_config(struct iwl_mld *mld, const u8 *addr, u32 protocols) +{ + struct iwl_mld_time_sync_data *time_sync; + int err; + + time_sync = wiphy_dereference(mld->wiphy, mld->time_sync); + + /* The fw only supports one peer. We do allow reconfiguration of the + * same peer for cases of fw reset etc. + */ + if (time_sync && time_sync->active_protocols && + !ether_addr_equal(addr, time_sync->peer_addr)) { + IWL_DEBUG_INFO(mld, "Time sync: reject config for peer: %pM\n", + addr); + return -ENOBUFS; + } + + if (protocols & ~(IWL_TIME_SYNC_PROTOCOL_TM | + IWL_TIME_SYNC_PROTOCOL_FTM)) + return -EINVAL; + + IWL_DEBUG_INFO(mld, "Time sync: set peer addr=%pM\n", addr); + + iwl_mld_deinit_time_sync(mld); + err = iwl_mld_init_time_sync(mld, protocols, addr); + if (err) + return err; + + err = iwl_mld_time_sync_fw_config(mld); + return err; +} + +void iwl_mld_deinit_time_sync(struct iwl_mld *mld) +{ + struct iwl_mld_time_sync_data *time_sync = + wiphy_dereference(mld->wiphy, mld->time_sync); + + if (!time_sync) + return; + + RCU_INIT_POINTER(mld->time_sync, NULL); + skb_queue_purge(&time_sync->frame_list); + kfree_rcu(time_sync, rcu_head); +} + +bool iwl_mld_time_sync_frame(struct iwl_mld *mld, struct sk_buff *skb, u8 *addr) +{ + struct iwl_mld_time_sync_data *time_sync; + + rcu_read_lock(); + time_sync = rcu_dereference(mld->time_sync); + if (time_sync && ether_addr_equal(time_sync->peer_addr, addr) && + (ieee80211_is_timing_measurement(skb) || ieee80211_is_ftm(skb))) { + skb_queue_tail(&time_sync->frame_list, skb); + rcu_read_unlock(); + return true; + } + rcu_read_unlock(); + + return false; +} + +static bool iwl_mld_is_skb_match(struct sk_buff *skb, u8 *addr, u8 dialog_token) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + u8 skb_dialog_token; + + if (ieee80211_is_timing_measurement(skb)) + skb_dialog_token = mgmt->u.action.u.wnm_timing_msr.dialog_token; + else + skb_dialog_token = mgmt->u.action.u.ftm.dialog_token; + + if ((ether_addr_equal(mgmt->sa, addr) || + ether_addr_equal(mgmt->da, addr)) && + skb_dialog_token == dialog_token) + return true; + + return false; +} + +static struct sk_buff *iwl_mld_time_sync_find_skb(struct iwl_mld *mld, u8 *addr, + u8 dialog_token) +{ + struct iwl_mld_time_sync_data *time_sync; + struct sk_buff *skb; + + rcu_read_lock(); + + time_sync = rcu_dereference(mld->time_sync); + if (IWL_FW_CHECK(mld, !time_sync, + "Time sync notification but time sync is not initialized\n")) { + rcu_read_unlock(); + return NULL; + } + + /* The notifications are expected to arrive in the same order of the + * frames. If the incoming notification doesn't match the first SKB + * in the queue, it means there was no time sync notification for this + * SKB and it can be dropped. + */ + while ((skb = skb_dequeue(&time_sync->frame_list))) { + if (iwl_mld_is_skb_match(skb, addr, dialog_token)) + break; + + kfree_skb(skb); + skb = NULL; + IWL_DEBUG_DROP(mld, + "Time sync: drop SKB without matching notification\n"); + } + rcu_read_unlock(); + + return skb; +} + +static u64 iwl_mld_get_64_bit(__le32 high, __le32 low) +{ + return ((u64)le32_to_cpu(high) << 32) | le32_to_cpu(low); +} + +void iwl_mld_handle_time_msmt_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct ptp_data *data = &mld->ptp_data; + struct iwl_time_msmt_notify *notif = (void *)pkt->data; + struct ieee80211_rx_status *rx_status; + struct skb_shared_hwtstamps *shwt; + u64 ts_10ns; + struct sk_buff *skb = + iwl_mld_time_sync_find_skb(mld, notif->peer_addr, + le32_to_cpu(notif->dialog_token)); + u64 adj_time; + + if (IWL_FW_CHECK(mld, !skb, "Time sync event but no pending skb\n")) + return; + + spin_lock_bh(&data->lock); + ts_10ns = iwl_mld_get_64_bit(notif->t2_hi, notif->t2_lo); + adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10); + shwt = skb_hwtstamps(skb); + shwt->hwtstamp = ktime_set(0, adj_time); + + ts_10ns = iwl_mld_get_64_bit(notif->t3_hi, notif->t3_lo); + adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10); + rx_status = IEEE80211_SKB_RXCB(skb); + rx_status->ack_tx_hwtstamp = ktime_set(0, adj_time); + spin_unlock_bh(&data->lock); + + IWL_DEBUG_INFO(mld, + "Time sync: RX event - report frame t2=%llu t3=%llu\n", + ktime_to_ns(shwt->hwtstamp), + ktime_to_ns(rx_status->ack_tx_hwtstamp)); + ieee80211_rx_napi(mld->hw, NULL, skb, NULL); +} + +void iwl_mld_handle_time_sync_confirm_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct ptp_data *data = &mld->ptp_data; + struct iwl_time_msmt_cfm_notify *notif = (void *)pkt->data; + struct ieee80211_tx_status status = {}; + struct skb_shared_hwtstamps *shwt; + u64 ts_10ns, adj_time; + + status.skb = + iwl_mld_time_sync_find_skb(mld, notif->peer_addr, + le32_to_cpu(notif->dialog_token)); + + if (IWL_FW_CHECK(mld, !status.skb, + "Time sync confirm but no pending skb\n")) + return; + + spin_lock_bh(&data->lock); + ts_10ns = iwl_mld_get_64_bit(notif->t1_hi, notif->t1_lo); + adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10); + shwt = skb_hwtstamps(status.skb); + shwt->hwtstamp = ktime_set(0, adj_time); + + ts_10ns = iwl_mld_get_64_bit(notif->t4_hi, notif->t4_lo); + adj_time = iwl_mld_ptp_get_adj_time(mld, ts_10ns * 10); + status.info = IEEE80211_SKB_CB(status.skb); + status.ack_hwtstamp = ktime_set(0, adj_time); + spin_unlock_bh(&data->lock); + + IWL_DEBUG_INFO(mld, + "Time sync: TX event - report frame t1=%llu t4=%llu\n", + ktime_to_ns(shwt->hwtstamp), + ktime_to_ns(status.ack_hwtstamp)); + ieee80211_tx_status_ext(mld->hw, &status); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/time_sync.h b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.h new file mode 100644 index 0000000000000..2d4c5512e9615 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/time_sync.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2025 Intel Corporation + */ +#ifndef __iwl_mld_time_sync_h__ +#define __iwl_mld_time_sync_h__ + +struct iwl_mld_time_sync_data { + struct rcu_head rcu_head; + u8 peer_addr[ETH_ALEN]; + u32 active_protocols; + struct sk_buff_head frame_list; +}; + +int iwl_mld_time_sync_config(struct iwl_mld *mld, const u8 *addr, + u32 protocols); +int iwl_mld_time_sync_fw_config(struct iwl_mld *mld); +void iwl_mld_deinit_time_sync(struct iwl_mld *mld); +void iwl_mld_handle_time_msmt_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); +bool iwl_mld_time_sync_frame(struct iwl_mld *mld, struct sk_buff *skb, + u8 *addr); +void iwl_mld_handle_time_sync_confirm_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +#endif /* __iwl_mld_time_sync_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.c b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c new file mode 100644 index 0000000000000..f054cc921d9d4 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.c @@ -0,0 +1,700 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024-2025 Intel Corporation + */ + +#include + +#include "tlc.h" +#include "hcmd.h" +#include "sta.h" + +#include "fw/api/rs.h" +#include "fw/api/context.h" +#include "fw/api/dhc.h" + +static u8 iwl_mld_fw_bw_from_sta_bw(const struct ieee80211_link_sta *link_sta) +{ + switch (link_sta->bandwidth) { + case IEEE80211_STA_RX_BW_320: + return IWL_TLC_MNG_CH_WIDTH_320MHZ; + case IEEE80211_STA_RX_BW_160: + return IWL_TLC_MNG_CH_WIDTH_160MHZ; + case IEEE80211_STA_RX_BW_80: + return IWL_TLC_MNG_CH_WIDTH_80MHZ; + case IEEE80211_STA_RX_BW_40: + return IWL_TLC_MNG_CH_WIDTH_40MHZ; + case IEEE80211_STA_RX_BW_20: + default: + return IWL_TLC_MNG_CH_WIDTH_20MHZ; + } +} + +static __le16 +iwl_mld_get_tlc_cmd_flags(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + const struct ieee80211_sta_he_cap *own_he_cap, + const struct ieee80211_sta_eht_cap *own_eht_cap) +{ + struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; + bool has_vht = vht_cap->vht_supported; + u16 flags = 0; + + /* STBC flags */ + if (mld->cfg->ht_params->stbc && + (hweight8(iwl_mld_get_valid_tx_ant(mld)) > 1)) { + if (he_cap->has_he && he_cap->he_cap_elem.phy_cap_info[2] & + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) + flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + else if (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) + flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + else if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) + flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK; + } + + /* LDPC */ + if (mld->cfg->ht_params->ldpc && + ((ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) || + (has_vht && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC)))) + flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; + + if (he_cap->has_he && (he_cap->he_cap_elem.phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) + flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; + + if (own_he_cap && + !(own_he_cap->he_cap_elem.phy_cap_info[1] & + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) + flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK; + + /* DCM */ + if (he_cap->has_he && + (he_cap->he_cap_elem.phy_cap_info[3] & + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK && + own_he_cap && + own_he_cap->he_cap_elem.phy_cap_info[3] & + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) + flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK; + + /* Extra EHT LTF */ + if (own_eht_cap && + own_eht_cap->eht_cap_elem.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF && + link_sta->eht_cap.has_eht && + link_sta->eht_cap.eht_cap_elem.phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_SUPP_EXTRA_EHT_LTF) { + flags |= IWL_TLC_MNG_CFG_FLAGS_EHT_EXTRA_LTF_MSK; + } + + return cpu_to_le16(flags); +} + +static u8 iwl_mld_get_fw_chains(struct iwl_mld *mld) +{ + u8 chains = iwl_mld_get_valid_tx_ant(mld); + u8 fw_chains = 0; + + if (chains & ANT_A) + fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK; + if (chains & ANT_B) + fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK; + + return fw_chains; +} + +static u8 iwl_mld_get_fw_sgi(struct ieee80211_link_sta *link_sta) +{ + struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; + u8 sgi_chwidths = 0; + + /* If the association supports HE, HT/VHT rates will never be used for + * Tx and therefor there's no need to set the + * sgi-per-channel-width-support bits + */ + if (he_cap->has_he) + return 0; + + if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) + sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); + if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) + sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ); + if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80) + sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ); + if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160) + sgi_chwidths |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ); + + return sgi_chwidths; +} + +static int +iwl_mld_get_highest_fw_mcs(const struct ieee80211_sta_vht_cap *vht_cap, + int nss) +{ + u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) & + (0x3 << (2 * (nss - 1))); + rx_mcs >>= (2 * (nss - 1)); + + switch (rx_mcs) { + case IEEE80211_VHT_MCS_SUPPORT_0_7: + return IWL_TLC_MNG_HT_RATE_MCS7; + case IEEE80211_VHT_MCS_SUPPORT_0_8: + return IWL_TLC_MNG_HT_RATE_MCS8; + case IEEE80211_VHT_MCS_SUPPORT_0_9: + return IWL_TLC_MNG_HT_RATE_MCS9; + default: + WARN_ON_ONCE(1); + break; + } + + return 0; +} + +static void +iwl_mld_fill_vht_rates(const struct ieee80211_link_sta *link_sta, + const struct ieee80211_sta_vht_cap *vht_cap, + struct iwl_tlc_config_cmd_v4 *cmd) +{ + u16 supp; + int i, highest_mcs; + u8 max_nss = link_sta->rx_nss; + struct ieee80211_vht_cap ieee_vht_cap = { + .vht_cap_info = cpu_to_le32(vht_cap->cap), + .supp_mcs = vht_cap->vht_mcs, + }; + + /* the station support only a single receive chain */ + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC) + max_nss = 1; + + for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) { + int nss = i + 1; + + highest_mcs = iwl_mld_get_highest_fw_mcs(vht_cap, nss); + if (!highest_mcs) + continue; + + supp = BIT(highest_mcs + 1) - 1; + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) + supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9); + + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = cpu_to_le16(supp); + /* Check if VHT extended NSS indicates that the bandwidth/NSS + * configuration is supported - only for MCS 0 since we already + * decoded the MCS bits anyway ourselves. + */ + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160 && + ieee80211_get_vht_max_nss(&ieee_vht_cap, + IEEE80211_VHT_CHANWIDTH_160MHZ, + 0, true, nss) >= nss) + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] = + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80]; + } +} + +static u16 iwl_mld_he_mac80211_mcs_to_fw_mcs(u16 mcs) +{ + switch (mcs) { + case IEEE80211_HE_MCS_SUPPORT_0_7: + return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1; + case IEEE80211_HE_MCS_SUPPORT_0_9: + return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1; + case IEEE80211_HE_MCS_SUPPORT_0_11: + return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1; + case IEEE80211_HE_MCS_NOT_SUPPORTED: + return 0; + } + + WARN(1, "invalid HE MCS %d\n", mcs); + return 0; +} + +static void +iwl_mld_fill_he_rates(const struct ieee80211_link_sta *link_sta, + const struct ieee80211_sta_he_cap *own_he_cap, + struct iwl_tlc_config_cmd_v4 *cmd) +{ + const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; + u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); + u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); + u16 tx_mcs_80 = le16_to_cpu(own_he_cap->he_mcs_nss_supp.tx_mcs_80); + u16 tx_mcs_160 = le16_to_cpu(own_he_cap->he_mcs_nss_supp.tx_mcs_160); + int i; + u8 nss = link_sta->rx_nss; + + /* the station support only a single receive chain */ + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC) + nss = 1; + + for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) { + u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; + u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; + u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3; + u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3; + + /* If one side doesn't support - mark both as not supporting */ + if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED || + _tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) { + _mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; + _tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; + } + if (_mcs_80 > _tx_mcs_80) + _mcs_80 = _tx_mcs_80; + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_80] = + cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_80)); + + /* If one side doesn't support - mark both as not supporting */ + if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED || + _tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) { + _mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; + _tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; + } + if (_mcs_160 > _tx_mcs_160) + _mcs_160 = _tx_mcs_160; + cmd->ht_rates[i][IWL_TLC_MCS_PER_BW_160] = + cpu_to_le16(iwl_mld_he_mac80211_mcs_to_fw_mcs(_mcs_160)); + } +} + +static void iwl_mld_set_eht_mcs(__le16 ht_rates[][3], + enum IWL_TLC_MCS_PER_BW bw, + u8 max_nss, u16 mcs_msk) +{ + if (max_nss >= 2) + ht_rates[IWL_TLC_NSS_2][bw] |= cpu_to_le16(mcs_msk); + + if (max_nss >= 1) + ht_rates[IWL_TLC_NSS_1][bw] |= cpu_to_le16(mcs_msk); +} + +static const +struct ieee80211_eht_mcs_nss_supp_bw * +iwl_mld_get_eht_mcs_of_bw(enum IWL_TLC_MCS_PER_BW bw, + const struct ieee80211_eht_mcs_nss_supp *eht_mcs) +{ + switch (bw) { + case IWL_TLC_MCS_PER_BW_80: + return &eht_mcs->bw._80; + case IWL_TLC_MCS_PER_BW_160: + return &eht_mcs->bw._160; + case IWL_TLC_MCS_PER_BW_320: + return &eht_mcs->bw._320; + default: + return NULL; + } +} + +static u8 iwl_mld_get_eht_max_nss(u8 rx_nss, u8 tx_nss) +{ + u8 tx = u8_get_bits(tx_nss, IEEE80211_EHT_MCS_NSS_TX); + u8 rx = u8_get_bits(rx_nss, IEEE80211_EHT_MCS_NSS_RX); + /* the max nss that can be used, + * is the min with our tx capa and the peer rx capa. + */ + return min(tx, rx); +} + +#define MAX_NSS_MCS(mcs_num, rx, tx) \ + iwl_mld_get_eht_max_nss((rx)->rx_tx_mcs ##mcs_num## _max_nss, \ + (tx)->rx_tx_mcs ##mcs_num## _max_nss) + +static void +iwl_mld_fill_eht_rates(struct ieee80211_vif *vif, + const struct ieee80211_link_sta *link_sta, + const struct ieee80211_sta_he_cap *own_he_cap, + const struct ieee80211_sta_eht_cap *own_eht_cap, + struct iwl_tlc_config_cmd_v4 *cmd) +{ + /* peer RX mcs capa */ + const struct ieee80211_eht_mcs_nss_supp *eht_rx_mcs = + &link_sta->eht_cap.eht_mcs_nss_supp; + /* our TX mcs capa */ + const struct ieee80211_eht_mcs_nss_supp *eht_tx_mcs = + &own_eht_cap->eht_mcs_nss_supp; + + enum IWL_TLC_MCS_PER_BW bw; + struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_rx_20; + struct ieee80211_eht_mcs_nss_supp_20mhz_only mcs_tx_20; + + /* peer is 20 MHz only */ + if (vif->type == NL80211_IFTYPE_AP && + !(link_sta->he_cap.he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { + mcs_rx_20 = eht_rx_mcs->only_20mhz; + } else { + mcs_rx_20.rx_tx_mcs7_max_nss = + eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_rx_20.rx_tx_mcs9_max_nss = + eht_rx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_rx_20.rx_tx_mcs11_max_nss = + eht_rx_mcs->bw._80.rx_tx_mcs11_max_nss; + mcs_rx_20.rx_tx_mcs13_max_nss = + eht_rx_mcs->bw._80.rx_tx_mcs13_max_nss; + } + + /* NIC is capable of 20 MHz only */ + if (!(own_he_cap->he_cap_elem.phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) { + mcs_tx_20 = eht_tx_mcs->only_20mhz; + } else { + mcs_tx_20.rx_tx_mcs7_max_nss = + eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_tx_20.rx_tx_mcs9_max_nss = + eht_tx_mcs->bw._80.rx_tx_mcs9_max_nss; + mcs_tx_20.rx_tx_mcs11_max_nss = + eht_tx_mcs->bw._80.rx_tx_mcs11_max_nss; + mcs_tx_20.rx_tx_mcs13_max_nss = + eht_tx_mcs->bw._80.rx_tx_mcs13_max_nss; + } + + /* rates for 20/40/80 MHz */ + bw = IWL_TLC_MCS_PER_BW_80; + iwl_mld_set_eht_mcs(cmd->ht_rates, bw, + MAX_NSS_MCS(7, &mcs_rx_20, &mcs_tx_20), + GENMASK(7, 0)); + iwl_mld_set_eht_mcs(cmd->ht_rates, bw, + MAX_NSS_MCS(9, &mcs_rx_20, &mcs_tx_20), + GENMASK(9, 8)); + iwl_mld_set_eht_mcs(cmd->ht_rates, bw, + MAX_NSS_MCS(11, &mcs_rx_20, &mcs_tx_20), + GENMASK(11, 10)); + iwl_mld_set_eht_mcs(cmd->ht_rates, bw, + MAX_NSS_MCS(13, &mcs_rx_20, &mcs_tx_20), + GENMASK(13, 12)); + + /* rates for 160/320 MHz */ + for (bw = IWL_TLC_MCS_PER_BW_160; bw <= IWL_TLC_MCS_PER_BW_320; bw++) { + const struct ieee80211_eht_mcs_nss_supp_bw *mcs_rx = + iwl_mld_get_eht_mcs_of_bw(bw, eht_rx_mcs); + const struct ieee80211_eht_mcs_nss_supp_bw *mcs_tx = + iwl_mld_get_eht_mcs_of_bw(bw, eht_tx_mcs); + + /* got unsupported index for bw */ + if (!mcs_rx || !mcs_tx) + continue; + + /* break out if we don't support the bandwidth */ + if (cmd->max_ch_width < (bw + IWL_TLC_MNG_CH_WIDTH_80MHZ)) + break; + + iwl_mld_set_eht_mcs(cmd->ht_rates, bw, + MAX_NSS_MCS(9, mcs_rx, mcs_tx), + GENMASK(9, 0)); + iwl_mld_set_eht_mcs(cmd->ht_rates, bw, + MAX_NSS_MCS(11, mcs_rx, mcs_tx), + GENMASK(11, 10)); + iwl_mld_set_eht_mcs(cmd->ht_rates, bw, + MAX_NSS_MCS(13, mcs_rx, mcs_tx), + GENMASK(13, 12)); + } + + /* the station support only a single receive chain */ + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC || + link_sta->rx_nss < 2) + memset(cmd->ht_rates[IWL_TLC_NSS_2], 0, + sizeof(cmd->ht_rates[IWL_TLC_NSS_2])); +} + +static void +iwl_mld_fill_supp_rates(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct ieee80211_supported_band *sband, + const struct ieee80211_sta_he_cap *own_he_cap, + const struct ieee80211_sta_eht_cap *own_eht_cap, + struct iwl_tlc_config_cmd_v4 *cmd) +{ + int i; + u16 non_ht_rates = 0; + unsigned long rates_bitmap; + const struct ieee80211_sta_ht_cap *ht_cap = &link_sta->ht_cap; + const struct ieee80211_sta_vht_cap *vht_cap = &link_sta->vht_cap; + const struct ieee80211_sta_he_cap *he_cap = &link_sta->he_cap; + + /* non HT rates */ + rates_bitmap = link_sta->supp_rates[sband->band]; + for_each_set_bit(i, &rates_bitmap, BITS_PER_LONG) + non_ht_rates |= BIT(sband->bitrates[i].hw_value); + + cmd->non_ht_rates = cpu_to_le16(non_ht_rates); + cmd->mode = IWL_TLC_MNG_MODE_NON_HT; + + if (link_sta->eht_cap.has_eht && own_he_cap && own_eht_cap) { + cmd->mode = IWL_TLC_MNG_MODE_EHT; + iwl_mld_fill_eht_rates(vif, link_sta, own_he_cap, + own_eht_cap, cmd); + } else if (he_cap->has_he && own_he_cap) { + cmd->mode = IWL_TLC_MNG_MODE_HE; + iwl_mld_fill_he_rates(link_sta, own_he_cap, cmd); + } else if (vht_cap->vht_supported) { + cmd->mode = IWL_TLC_MNG_MODE_VHT; + iwl_mld_fill_vht_rates(link_sta, vht_cap, cmd); + } else if (ht_cap->ht_supported) { + cmd->mode = IWL_TLC_MNG_MODE_HT; + cmd->ht_rates[IWL_TLC_NSS_1][IWL_TLC_MCS_PER_BW_80] = + cpu_to_le16(ht_cap->mcs.rx_mask[0]); + + /* the station support only a single receive chain */ + if (link_sta->smps_mode == IEEE80211_SMPS_STATIC) + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = + 0; + else + cmd->ht_rates[IWL_TLC_NSS_2][IWL_TLC_MCS_PER_BW_80] = + cpu_to_le16(ht_cap->mcs.rx_mask[1]); + } +} + +static void iwl_mld_send_tlc_cmd(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + enum nl80211_band band) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); + struct ieee80211_supported_band *sband = mld->hw->wiphy->bands[band]; + const struct ieee80211_sta_he_cap *own_he_cap = + ieee80211_get_he_iftype_cap_vif(sband, vif); + const struct ieee80211_sta_eht_cap *own_eht_cap = + ieee80211_get_eht_iftype_cap_vif(sband, vif); + struct iwl_tlc_config_cmd_v4 cmd = { + /* For AP mode, use 20 MHz until the STA is authorized */ + .max_ch_width = mld_sta->sta_state > IEEE80211_STA_ASSOC ? + iwl_mld_fw_bw_from_sta_bw(link_sta) : + IWL_TLC_MNG_CH_WIDTH_20MHZ, + .flags = iwl_mld_get_tlc_cmd_flags(mld, vif, link_sta, + own_he_cap, own_eht_cap), + .chains = iwl_mld_get_fw_chains(mld), + .sgi_ch_width_supp = iwl_mld_get_fw_sgi(link_sta), + .max_mpdu_len = cpu_to_le16(link_sta->agg.max_amsdu_len), + }; + int fw_sta_id = iwl_mld_fw_sta_id_from_link_sta(mld, link_sta); + int ret; + + if (fw_sta_id < 0) + return; + + cmd.sta_id = fw_sta_id; + + iwl_mld_fill_supp_rates(mld, vif, link_sta, sband, + own_he_cap, own_eht_cap, + &cmd); + + IWL_DEBUG_RATE(mld, + "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n", + cmd.sta_id, cmd.max_ch_width, cmd.mode); + + /* Send async since this can be called within a RCU-read section */ + ret = iwl_mld_send_cmd_with_flags_pdu(mld, WIDE_ID(DATA_PATH_GROUP, + TLC_MNG_CONFIG_CMD), + CMD_ASYNC, &cmd); + if (ret) + IWL_ERR(mld, "Failed to send TLC cmd (%d)\n", ret); +} + +int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data) +{ + struct { + struct iwl_dhc_cmd dhc; + struct iwl_dhc_tlc_cmd tlc; + } __packed cmd = { + .tlc.sta_id = sta_id, + .tlc.type = cpu_to_le32(type), + .tlc.data[0] = cpu_to_le32(data), + .dhc.length = cpu_to_le32(sizeof(cmd.tlc) >> 2), + .dhc.index_and_mask = + cpu_to_le32(DHC_TABLE_INTEGRATION | DHC_TARGET_UMAC | + DHC_INTEGRATION_TLC_DEBUG_CONFIG), + }; + int ret; + + ret = iwl_mld_send_cmd_with_flags_pdu(mld, + WIDE_ID(IWL_ALWAYS_LONG_GROUP, + DEBUG_HOST_COMMAND), + CMD_ASYNC, &cmd); + IWL_DEBUG_RATE(mld, "sta_id %d, type: 0x%X, value: 0x%X, ret%d\n", + sta_id, type, data, ret); + return ret; +} + +void iwl_mld_config_tlc_link(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta) +{ + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); + enum nl80211_band band; + + if (WARN_ON_ONCE(!link_conf->chanreq.oper.chan)) + return; + + /* Before we have information about a station, configure the A-MSDU RC + * limit such that iwlmd and mac80211 would not be allowed to build + * A-MSDUs. + */ + if (mld_sta->sta_state < IEEE80211_STA_ASSOC) { + link_sta->agg.max_rc_amsdu_len = 1; + ieee80211_sta_recalc_aggregates(link_sta->sta); + } + + band = link_conf->chanreq.oper.chan->band; + iwl_mld_send_tlc_cmd(mld, vif, link_sta, band); +} + +void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct ieee80211_bss_conf *link; + int link_id; + + lockdep_assert_wiphy(mld->wiphy); + + for_each_vif_active_link(vif, link, link_id) { + struct ieee80211_link_sta *link_sta = + link_sta_dereference_check(sta, link_id); + + if (!link || !link_sta) + continue; + + iwl_mld_config_tlc_link(mld, vif, link, link_sta); + } +} + +static u16 +iwl_mld_get_amsdu_size_of_tid(struct iwl_mld *mld, + struct ieee80211_link_sta *link_sta, + unsigned int tid) +{ + struct ieee80211_sta *sta = link_sta->sta; + struct ieee80211_vif *vif = iwl_mld_sta_from_mac80211(sta)->vif; + const u8 tid_to_mac80211_ac[] = { + IEEE80211_AC_BE, + IEEE80211_AC_BK, + IEEE80211_AC_BK, + IEEE80211_AC_BE, + IEEE80211_AC_VI, + IEEE80211_AC_VI, + IEEE80211_AC_VO, + IEEE80211_AC_VO, + }; + unsigned int result = link_sta->agg.max_rc_amsdu_len; + u8 ac, txf, lmac; + + lockdep_assert_wiphy(mld->wiphy); + + /* Don't send an AMSDU that will be longer than the TXF. + * Add a security margin of 256 for the TX command + headers. + * We also want to have the start of the next packet inside the + * fifo to be able to send bursts. + */ + + if (WARN_ON(tid >= ARRAY_SIZE(tid_to_mac80211_ac))) + return 0; + + ac = tid_to_mac80211_ac[tid]; + + /* For HE redirect to trigger based fifos */ + if (link_sta->he_cap.has_he) + ac += 4; + + txf = iwl_mld_mac80211_ac_to_fw_tx_fifo(ac); + + /* Only one link: take the lmac according to the band */ + if (hweight16(sta->valid_links) <= 1) { + enum nl80211_band band; + struct ieee80211_bss_conf *link = + wiphy_dereference(mld->wiphy, + vif->link_conf[link_sta->link_id]); + + if (WARN_ON(!link || !link->chanreq.oper.chan)) + band = NL80211_BAND_2GHZ; + else + band = link->chanreq.oper.chan->band; + lmac = iwl_mld_get_lmac_id(mld, band); + + /* More than one link but with 2 lmacs: take the minimum */ + } else if (fw_has_capa(&mld->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) { + lmac = IWL_LMAC_5G_INDEX; + result = min_t(unsigned int, result, + mld->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); + lmac = IWL_LMAC_24G_INDEX; + /* More than one link but only one lmac */ + } else { + lmac = IWL_LMAC_24G_INDEX; + } + + return min_t(unsigned int, result, + mld->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256); +} + +void iwl_mld_handle_tlc_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_tlc_update_notif *notif = (void *)pkt->data; + struct ieee80211_link_sta *link_sta; + u32 flags = le32_to_cpu(notif->flags); + u32 enabled; + u16 size; + + if (IWL_FW_CHECK(mld, notif->sta_id >= mld->fw->ucode_capa.num_stations, + "Invalid sta id (%d) in TLC notification\n", + notif->sta_id)) + return; + + link_sta = wiphy_dereference(mld->wiphy, + mld->fw_id_to_link_sta[notif->sta_id]); + + if (WARN(IS_ERR_OR_NULL(link_sta), + "link_sta of sta id (%d) doesn't exist\n", notif->sta_id)) + return; + + if (flags & IWL_TLC_NOTIF_FLAG_RATE) { + struct iwl_mld_link_sta *mld_link_sta = + iwl_mld_link_sta_from_mac80211(link_sta); + char pretty_rate[100]; + + if (WARN_ON(!mld_link_sta)) + return; + + mld_link_sta->last_rate_n_flags = le32_to_cpu(notif->rate); + + rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate), + mld_link_sta->last_rate_n_flags); + IWL_DEBUG_RATE(mld, "TLC notif: new rate = %s\n", pretty_rate); + } + + /* We are done processing the notif */ + if (!(flags & IWL_TLC_NOTIF_FLAG_AMSDU)) + return; + + enabled = le32_to_cpu(notif->amsdu_enabled); + size = le32_to_cpu(notif->amsdu_size); + + if (size < 2000) { + size = 0; + enabled = 0; + } + + if (IWL_FW_CHECK(mld, size > link_sta->agg.max_amsdu_len, + "Invalid AMSDU len in TLC notif: %d (Max AMSDU len: %d)\n", + size, link_sta->agg.max_amsdu_len)) + return; + + link_sta->agg.max_rc_amsdu_len = size; + + for (int i = 0; i < IWL_MAX_TID_COUNT; i++) { + if (enabled & BIT(i)) + link_sta->agg.max_tid_amsdu_len[i] = + iwl_mld_get_amsdu_size_of_tid(mld, link_sta, i); + else + link_sta->agg.max_tid_amsdu_len[i] = 1; + } + + ieee80211_sta_recalc_aggregates(link_sta->sta); + + IWL_DEBUG_RATE(mld, + "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", + le32_to_cpu(notif->amsdu_size), size, enabled); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tlc.h b/drivers/net/wireless/intel/iwlwifi/mld/tlc.h new file mode 100644 index 0000000000000..c32f42e8840bb --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tlc.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_tlc_h__ +#define __iwl_mld_tlc_h__ + +#include "mld.h" + +void iwl_mld_config_tlc_link(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta); + +void iwl_mld_config_tlc(struct iwl_mld *mld, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + +void iwl_mld_handle_tlc_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); + +int iwl_mld_send_tlc_dhc(struct iwl_mld *mld, u8 sta_id, u32 type, u32 data); + +#endif /* __iwl_mld_tlc_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tx.c b/drivers/net/wireless/intel/iwlwifi/mld/tx.c new file mode 100644 index 0000000000000..543abe72e4652 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tx.c @@ -0,0 +1,1374 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2024 - 2025 Intel Corporation + */ +#include + +#include "tx.h" +#include "sta.h" +#include "hcmd.h" +#include "iwl-utils.h" +#include "iface.h" + +#include "fw/dbg.h" + +#include "fw/api/tx.h" +#include "fw/api/rs.h" +#include "fw/api/txq.h" +#include "fw/api/datapath.h" +#include "fw/api/time-event.h" + +#define MAX_ANT_NUM 2 + +/* Toggles between TX antennas. Receives the bitmask of valid TX antennas and + * the *index* used for the last TX, and returns the next valid *index* to use. + * In order to set it in the tx_cmd, must do BIT(idx). + */ +static u8 iwl_mld_next_ant(u8 valid, u8 last_idx) +{ + u8 index = last_idx; + + for (int i = 0; i < MAX_ANT_NUM; i++) { + index = (index + 1) % MAX_ANT_NUM; + if (valid & BIT(index)) + return index; + } + + WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid); + + return last_idx; +} + +void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant) +{ + *ant = iwl_mld_next_ant(iwl_mld_get_valid_tx_ant(mld), *ant); +} + +static int +iwl_mld_get_queue_size(struct iwl_mld *mld, struct ieee80211_txq *txq) +{ + struct ieee80211_sta *sta = txq->sta; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + int max_size = IWL_DEFAULT_QUEUE_SIZE; + + lockdep_assert_wiphy(mld->wiphy); + + for_each_sta_active_link(txq->vif, sta, link_sta, link_id) { + if (link_sta->eht_cap.has_eht) { + max_size = IWL_DEFAULT_QUEUE_SIZE_EHT; + break; + } + + if (link_sta->he_cap.has_he) + max_size = IWL_DEFAULT_QUEUE_SIZE_HE; + } + + return max_size; +} + +static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq) +{ + u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid; + u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta); + /* We can't know when the station is asleep or awake, so we + * must disable the queue hang detection. + */ + unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ? + IWL_WATCHDOG_DISABLED : + mld->trans->trans_cfg->base_params->wd_timeout; + int queue, size; + + lockdep_assert_wiphy(mld->wiphy); + + if (tid == IWL_MGMT_TID) + size = max_t(u32, IWL_MGMT_QUEUE_SIZE, + mld->trans->cfg->min_txq_size); + else + size = iwl_mld_get_queue_size(mld, txq); + + queue = iwl_trans_txq_alloc(mld->trans, 0, fw_sta_mask, tid, size, + watchdog_timeout); + + if (queue >= 0) + IWL_DEBUG_TX_QUEUES(mld, + "Enabling TXQ #%d for sta mask 0x%x tid %d\n", + queue, fw_sta_mask, tid); + return queue; +} + +static int iwl_mld_add_txq(struct iwl_mld *mld, struct ieee80211_txq *txq) +{ + struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq); + int id; + + lockdep_assert_wiphy(mld->wiphy); + + /* This will alse send the SCD_QUEUE_CONFIG_CMD */ + id = iwl_mld_allocate_txq(mld, txq); + if (id < 0) + return id; + + mld_txq->fw_id = id; + mld_txq->status.allocated = true; + + rcu_assign_pointer(mld->fw_id_to_txq[id], txq); + + return 0; +} + +void iwl_mld_add_txq_list(struct iwl_mld *mld) +{ + lockdep_assert_wiphy(mld->wiphy); + + while (!list_empty(&mld->txqs_to_add)) { + struct ieee80211_txq *txq; + struct iwl_mld_txq *mld_txq = + list_first_entry(&mld->txqs_to_add, struct iwl_mld_txq, + list); + int failed; + + txq = container_of((void *)mld_txq, struct ieee80211_txq, + drv_priv); + + failed = iwl_mld_add_txq(mld, txq); + + local_bh_disable(); + spin_lock(&mld->add_txqs_lock); + list_del_init(&mld_txq->list); + spin_unlock(&mld->add_txqs_lock); + /* If the queue allocation failed, we can't transmit. Leave the + * frames on the txq, maybe the attempt to allocate the queue + * will succeed. + */ + if (!failed) + iwl_mld_tx_from_txq(mld, txq); + local_bh_enable(); + } +} + +void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk) +{ + struct iwl_mld *mld = container_of(wk, struct iwl_mld, + add_txqs_wk); + + /* will reschedule to run after restart */ + if (mld->fw_status.in_hw_restart) + return; + + iwl_mld_add_txq_list(mld); +} + +void +iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id) +{ + struct iwl_scd_queue_cfg_cmd remove_cmd = { + .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE), + .u.remove.tid = cpu_to_le32(tid), + .u.remove.sta_mask = cpu_to_le32(fw_sta_mask), + }; + + iwl_mld_send_cmd_pdu(mld, + WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD), + &remove_cmd); + + iwl_trans_txq_free(mld->trans, queue_id); +} + +void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq) +{ + struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq); + u32 sta_msk, tid; + + lockdep_assert_wiphy(mld->wiphy); + + spin_lock_bh(&mld->add_txqs_lock); + if (!list_empty(&mld_txq->list)) + list_del_init(&mld_txq->list); + spin_unlock_bh(&mld->add_txqs_lock); + + if (!mld_txq->status.allocated || + WARN_ON(mld_txq->fw_id >= ARRAY_SIZE(mld->fw_id_to_txq))) + return; + + sta_msk = iwl_mld_fw_sta_id_mask(mld, txq->sta); + + tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : + txq->tid; + + iwl_mld_free_txq(mld, sta_msk, tid, mld_txq->fw_id); + + RCU_INIT_POINTER(mld->fw_id_to_txq[mld_txq->fw_id], NULL); + mld_txq->status.allocated = false; +} + +#define OPT_HDR(type, skb, off) \ + (type *)(skb_network_header(skb) + (off)) + +static __le32 +iwl_mld_get_offload_assist(struct sk_buff *skb, bool amsdu) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + u16 mh_len = ieee80211_hdrlen(hdr->frame_control); + u16 offload_assist = 0; +#if IS_ENABLED(CONFIG_INET) + u8 protocol = 0; + + /* Do not compute checksum if already computed */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + goto out; + + /* We do not expect to be requested to csum stuff we do not support */ + + /* TBD: do we also need to check + * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all + * the devices we support has this flags? + */ + if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_IPV6), + "No support for requested checksum\n")) { + skb_checksum_help(skb); + goto out; + } + + if (skb->protocol == htons(ETH_P_IP)) { + protocol = ip_hdr(skb)->protocol; + } else { +#if IS_ENABLED(CONFIG_IPV6) + struct ipv6hdr *ipv6h = + (struct ipv6hdr *)skb_network_header(skb); + unsigned int off = sizeof(*ipv6h); + + protocol = ipv6h->nexthdr; + while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { + struct ipv6_opt_hdr *hp; + + /* only supported extension headers */ + if (protocol != NEXTHDR_ROUTING && + protocol != NEXTHDR_HOP && + protocol != NEXTHDR_DEST) { + skb_checksum_help(skb); + goto out; + } + + hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); + protocol = hp->nexthdr; + off += ipv6_optlen(hp); + } + /* if we get here - protocol now should be TCP/UDP */ +#endif + } + + if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) { + WARN_ON_ONCE(1); + skb_checksum_help(skb); + goto out; + } + + /* enable L4 csum */ + offload_assist |= BIT(TX_CMD_OFFLD_L4_EN); + + /* Set offset to IP header (snap). + * We don't support tunneling so no need to take care of inner header. + * Size is in words. + */ + offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR); + + /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */ + if (skb->protocol == htons(ETH_P_IP) && amsdu) { + ip_hdr(skb)->check = 0; + offload_assist |= BIT(TX_CMD_OFFLD_L3_EN); + } + + /* reset UDP/TCP header csum */ + if (protocol == IPPROTO_TCP) + tcp_hdr(skb)->check = 0; + else + udp_hdr(skb)->check = 0; + +out: +#endif + mh_len /= 2; + offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE; + + if (amsdu) + offload_assist |= BIT(TX_CMD_OFFLD_AMSDU); + else if (ieee80211_hdrlen(hdr->frame_control) % 4) + /* padding is inserted later in transport */ + offload_assist |= BIT(TX_CMD_OFFLD_PAD); + + return cpu_to_le32(offload_assist); +} + +static void iwl_mld_get_basic_rates_and_band(struct iwl_mld *mld, + struct ieee80211_vif *vif, + struct ieee80211_tx_info *info, + unsigned long *basic_rates, + u8 *band) +{ + u32 link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + + *basic_rates = vif->bss_conf.basic_rates; + *band = info->band; + + if (link_id == IEEE80211_LINK_UNSPECIFIED && + ieee80211_vif_is_mld(vif)) { + /* shouldn't do this when >1 link is active */ + WARN_ON(hweight16(vif->active_links) != 1); + link_id = __ffs(vif->active_links); + } + + if (link_id < IEEE80211_LINK_UNSPECIFIED) { + struct ieee80211_bss_conf *link_conf; + + rcu_read_lock(); + link_conf = rcu_dereference(vif->link_conf[link_id]); + if (link_conf) { + *basic_rates = link_conf->basic_rates; + if (link_conf->chanreq.oper.chan) + *band = link_conf->chanreq.oper.chan->band; + } + rcu_read_unlock(); + } +} + +u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld, + struct ieee80211_tx_info *info, + struct ieee80211_vif *vif) +{ + struct ieee80211_supported_band *sband; + u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT; + unsigned long basic_rates; + u8 band, rate; + u32 i; + + iwl_mld_get_basic_rates_and_band(mld, vif, info, &basic_rates, &band); + + sband = mld->hw->wiphy->bands[band]; + for_each_set_bit(i, &basic_rates, BITS_PER_LONG) { + u16 hw = sband->bitrates[i].hw_value; + + if (hw >= IWL_FIRST_OFDM_RATE) { + if (lowest_ofdm > hw) + lowest_ofdm = hw; + } else if (lowest_cck > hw) { + lowest_cck = hw; + } + } + + if (band == NL80211_BAND_2GHZ && !vif->p2p && + vif->type != NL80211_IFTYPE_P2P_DEVICE && + !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) { + if (lowest_cck != IWL_RATE_COUNT) + rate = lowest_cck; + else if (lowest_ofdm != IWL_RATE_COUNT) + rate = lowest_ofdm; + else + rate = IWL_FIRST_CCK_RATE; + } else if (lowest_ofdm != IWL_RATE_COUNT) { + rate = lowest_ofdm; + } else { + rate = IWL_FIRST_OFDM_RATE; + } + + return rate; +} + +static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld, + struct ieee80211_tx_info *info, + int rate_idx) +{ + u32 rate_flags = 0; + u8 rate_plcp; + + /* if the rate isn't a well known legacy rate, take the lowest one */ + if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY) + rate_idx = iwl_mld_get_lowest_rate(mld, info, + info->control.vif); + + WARN_ON_ONCE(rate_idx < 0); + + /* Set CCK or OFDM flag */ + if (rate_idx <= IWL_LAST_CCK_RATE) + rate_flags |= RATE_MCS_CCK_MSK; + else + rate_flags |= RATE_MCS_LEGACY_OFDM_MSK; + + /* Legacy rates are indexed: + * 0 - 3 for CCK and 0 - 7 for OFDM + */ + rate_plcp = (rate_idx >= IWL_FIRST_OFDM_RATE ? + rate_idx - IWL_FIRST_OFDM_RATE : rate_idx); + + return (u32)rate_plcp | rate_flags; +} + +static u32 iwl_mld_get_tx_ant(struct iwl_mld *mld, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc) +{ + if (sta && ieee80211_is_data(fc)) { + struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); + + return BIT(mld_sta->data_tx_ant) << RATE_MCS_ANT_POS; + } + + return BIT(mld->mgmt_tx_ant) << RATE_MCS_ANT_POS; +} + +static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + __le16 fc) +{ + struct ieee80211_tx_rate *rate = &info->control.rates[0]; + u32 result; + + /* we only care about legacy/HT/VHT so far, so we can + * build in v1 and use iwl_new_rate_from_v1() + * FIXME: in newer devices we only support the new rates, build + * the rate_n_flags in the new format here instead of using v1 and + * converting it. + */ + + if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { + u8 mcs = ieee80211_rate_get_vht_mcs(rate); + u8 nss = ieee80211_rate_get_vht_nss(rate); + + result = RATE_MCS_VHT_MSK_V1; + result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK); + result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK); + + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + result |= RATE_MCS_SGI_MSK_V1; + + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1); + else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1); + else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1); + + result = iwl_new_rate_from_v1(result); + } else if (rate->flags & IEEE80211_TX_RC_MCS) { + result = RATE_MCS_HT_MSK_V1; + result |= u32_encode_bits(rate->idx, + RATE_HT_MCS_RATE_CODE_MSK_V1 | + RATE_HT_MCS_NSS_MSK_V1); + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) + result |= RATE_MCS_SGI_MSK_V1; + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) + result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1); + if (info->flags & IEEE80211_TX_CTL_LDPC) + result |= RATE_MCS_LDPC_MSK_V1; + if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC)) + result |= RATE_MCS_STBC_MSK; + + result = iwl_new_rate_from_v1(result); + } else { + result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx); + } + + if (info->control.antennas) + result |= u32_encode_bits(info->control.antennas, + RATE_MCS_ANT_AB_MSK); + else + result |= iwl_mld_get_tx_ant(mld, info, sta, fc); + + return result; +} + +static u32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc) +{ + if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) + return iwl_mld_get_inject_tx_rate(mld, info, sta, fc); + + return iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) | + iwl_mld_get_tx_ant(mld, info, sta, fc); +} + +static void +iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd_gen3 *tx_cmd, + struct sk_buff *skb, bool amsdu) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_vif *vif; + + /* Copy MAC header from skb into command buffer */ + memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control)); + + if (!amsdu || !skb_is_gso(skb)) + return; + + /* As described in IEEE sta 802.11-2020, table 9-30 (Address + * field contents), A-MSDU address 3 should contain the BSSID + * address. + * + * In TSO, the skb header address 3 contains the original address 3 to + * correctly create all the A-MSDU subframes headers from it. + * Override now the address 3 in the command header with the BSSID. + * + * Note: we fill in the MLD address, but the firmware will do the + * necessary translation to link address after encryption. + */ + vif = info->control.vif; + switch (vif->type) { + case NL80211_IFTYPE_STATION: + ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr); + break; + case NL80211_IFTYPE_AP: + ether_addr_copy(tx_cmd->hdr->addr3, vif->addr); + break; + default: + break; + } +} + +static void +iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_tx_cmd, + struct ieee80211_sta *sta) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) : + NULL; + struct iwl_tx_cmd_gen3 *tx_cmd; + bool amsdu = ieee80211_is_data_qos(hdr->frame_control) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); + u32 rate_n_flags = 0; + u16 flags = 0; + + dev_tx_cmd->hdr.cmd = TX_CMD; + + if (!info->control.hw_key) + flags |= IWL_TX_FLAGS_ENCRYPT_DIS; + + /* For data and mgmt packets rate info comes from the fw. + * Only set rate/antenna for injected frames with fixed rate, or + * when no sta is given. + */ + if (unlikely(!sta || + info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) { + flags |= IWL_TX_FLAGS_CMD_RATE; + rate_n_flags = iwl_mld_get_tx_rate_n_flags(mld, info, sta, + hdr->frame_control); + } else if (!ieee80211_is_data(hdr->frame_control) || + (mld_sta && + mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) { + /* These are important frames */ + flags |= IWL_TX_FLAGS_HIGH_PRI; + } + + tx_cmd = (void *)dev_tx_cmd->payload; + + iwl_mld_fill_tx_cmd_hdr(tx_cmd, skb, amsdu); + + tx_cmd->offload_assist = iwl_mld_get_offload_assist(skb, amsdu); + + /* Total # bytes to be transmitted */ + tx_cmd->len = cpu_to_le16((u16)skb->len); + + tx_cmd->flags = cpu_to_le16(flags); + + tx_cmd->rate_n_flags = cpu_to_le32(rate_n_flags); +} + +/* Caller of this need to check that info->control.vif is not NULL */ +static struct iwl_mld_link * +iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info *info) +{ + struct iwl_mld_vif *mld_vif = + iwl_mld_vif_from_mac80211(info->control.vif); + u32 link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + + if (link_id == IEEE80211_LINK_UNSPECIFIED) { + if (info->control.vif->active_links) + link_id = ffs(info->control.vif->active_links) - 1; + else + link_id = 0; + } + + return rcu_dereference(mld_vif->link[link_id]); +} + +static int +iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + __le16 fc = hdr->frame_control; + struct iwl_mld_vif *mld_vif; + struct iwl_mld_link *link; + + if (txq && txq->sta) + return iwl_mld_txq_from_mac80211(txq)->fw_id; + + if (!info->control.vif) + return IWL_MLD_INVALID_QUEUE; + + switch (info->control.vif->type) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_ADHOC: + link = iwl_mld_get_link_from_tx_info(info); + + if (WARN_ON(!link)) + break; + + /* ucast disassociate/deauth frames without a station might + * happen, especially with reason 7 ("Class 3 frame received + * from nonassociated STA"). + */ + if (ieee80211_is_mgmt(fc) && + (!ieee80211_is_bufferable_mmpdu(skb) || + ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) + return link->bcast_sta.queue_id; + + if (is_multicast_ether_addr(hdr->addr1) && + !ieee80211_has_order(fc)) + return link->mcast_sta.queue_id; + + WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, + "Couldn't find a TXQ. fc=0x%02x", le16_to_cpu(fc)); + return link->bcast_sta.queue_id; + case NL80211_IFTYPE_P2P_DEVICE: + mld_vif = iwl_mld_vif_from_mac80211(info->control.vif); + + if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES) { + IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n"); + return IWL_MLD_INVALID_DROP_TX; + } + + WARN_ON(!ieee80211_is_mgmt(fc)); + + return mld_vif->deflink.aux_sta.queue_id; + default: + /* TODO: consider monitor (task=monitor) */ + WARN_ONCE(1, "Unsupported vif type\n"); + break; + } + + return IWL_MLD_INVALID_QUEUE; +} + +static void iwl_mld_probe_resp_set_noa(struct iwl_mld *mld, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_mld_link *mld_link = + &iwl_mld_vif_from_mac80211(info->control.vif)->deflink; + struct iwl_probe_resp_data *resp_data; + u8 *pos; + + if (!info->control.vif->p2p) + return; + + rcu_read_lock(); + + resp_data = rcu_dereference(mld_link->probe_resp_data); + if (!resp_data) + goto out; + + if (!resp_data->notif.noa_active) + goto out; + + if (skb_tailroom(skb) < resp_data->noa_len) { + if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { + IWL_ERR(mld, + "Failed to reallocate probe resp\n"); + goto out; + } + } + + pos = skb_put(skb, resp_data->noa_len); + + *pos++ = WLAN_EID_VENDOR_SPECIFIC; + /* Set length of IE body (not including ID and length itself) */ + *pos++ = resp_data->noa_len - 2; + *pos++ = (WLAN_OUI_WFA >> 16) & 0xff; + *pos++ = (WLAN_OUI_WFA >> 8) & 0xff; + *pos++ = WLAN_OUI_WFA & 0xff; + *pos++ = WLAN_OUI_TYPE_WFA_P2P; + + memcpy(pos, &resp_data->notif.noa_attr, + resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); + +out: + rcu_read_unlock(); +} + +/* This function must be called with BHs disabled */ +static int iwl_mld_tx_mpdu(struct iwl_mld *mld, struct sk_buff *skb, + struct ieee80211_txq *txq) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_sta *sta = txq ? txq->sta : NULL; + struct iwl_device_tx_cmd *dev_tx_cmd; + int queue = iwl_mld_get_tx_queue_id(mld, txq, skb); + u8 tid = IWL_MAX_TID_COUNT; + + if (WARN_ONCE(queue == IWL_MLD_INVALID_QUEUE, "Invalid TX Queue id") || + queue == IWL_MLD_INVALID_DROP_TX) + return -1; + + if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control))) + return -1; + + dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans); + if (unlikely(!dev_tx_cmd)) + return -1; + + if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { + if (IWL_MLD_NON_TRANSMITTING_AP) + return -1; + + iwl_mld_probe_resp_set_noa(mld, skb); + } + + iwl_mld_fill_tx_cmd(mld, skb, dev_tx_cmd, sta); + + if (ieee80211_is_data(hdr->frame_control)) { + if (ieee80211_is_data_qos(hdr->frame_control)) + tid = ieee80211_get_tid(hdr); + else + tid = IWL_TID_NON_QOS; + } + + IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n", + tid, queue, skb->len); + + /* From now on, we cannot access info->control */ + memset(&info->status, 0, sizeof(info->status)); + memset(info->driver_data, 0, sizeof(info->driver_data)); + + info->driver_data[1] = dev_tx_cmd; + + if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue)) + goto err; + + /* Update low-latency counter when a packet is queued instead + * of after TX, it makes sense for early low-latency detection + */ + if (sta) + iwl_mld_low_latency_update_counters(mld, hdr, sta, 0); + + return 0; + +err: + iwl_trans_free_tx_cmd(mld->trans, dev_tx_cmd); + IWL_DEBUG_TX(mld, "TX from Q:%d dropped\n", queue); + return -1; +} + +#ifdef CONFIG_INET + +/* This function handles the segmentation of a large TSO packet into multiple + * MPDUs, ensuring that the resulting segments conform to AMSDU limits and + * constraints. + */ +static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb, + struct ieee80211_sta *sta, + struct sk_buff_head *mpdus_skbs) +{ + struct ieee80211_hdr *hdr = (void *)skb->data; + netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG; + unsigned int mss = skb_shinfo(skb)->gso_size; + unsigned int num_subframes, tcp_payload_len, subf_len; + u16 snap_ip_tcp, pad, max_tid_amsdu_len; + u8 tid; + + snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb); + + if (!ieee80211_is_data_qos(hdr->frame_control) || + !sta->cur->max_rc_amsdu_len) + return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs); + + /* Do not build AMSDU for IPv6 with extension headers. + * Ask stack to segment and checksum the generated MPDUs for us. + */ + if (skb->protocol == htons(ETH_P_IPV6) && + ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != + IPPROTO_TCP) { + netdev_flags &= ~NETIF_F_CSUM_MASK; + return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs); + } + + tid = ieee80211_get_tid(hdr); + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + return -EINVAL; + + max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid]; + if (!max_tid_amsdu_len) + return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs); + + /* Sub frame header + SNAP + IP header + TCP header + MSS */ + subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; + pad = (4 - subf_len) & 0x3; + + /* If we have N subframes in the A-MSDU, then the A-MSDU's size is + * N * subf_len + (N - 1) * pad. + */ + num_subframes = (max_tid_amsdu_len + pad) / (subf_len + pad); + + if (sta->max_amsdu_subframes && + num_subframes > sta->max_amsdu_subframes) + num_subframes = sta->max_amsdu_subframes; + + tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - + tcp_hdrlen(skb) + skb->data_len; + + /* Make sure we have enough TBs for the A-MSDU: + * 2 for each subframe + * 1 more for each fragment + * 1 more for the potential data in the header + */ + if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > + mld->trans->max_skb_frags) + num_subframes = 1; + + if (num_subframes > 1) + *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; + + /* This skb fits in one single A-MSDU */ + if (tcp_payload_len <= num_subframes * mss) { + __skb_queue_tail(mpdus_skbs, skb); + return 0; + } + + /* Trick the segmentation function to make it create SKBs that can fit + * into one A-MSDU. + */ + return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skbs); +} + +/* Manages TSO (TCP Segmentation Offload) packet transmission by segmenting + * large packets when necessary and transmitting each segment as MPDU. + */ +static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb, + struct ieee80211_txq *txq) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct sk_buff *orig_skb = skb; + struct sk_buff_head mpdus_skbs; + unsigned int payload_len; + int ret; + + if (WARN_ON(!txq || !txq->sta)) + return -1; + + payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - + tcp_hdrlen(skb) + skb->data_len; + + if (payload_len <= skb_shinfo(skb)->gso_size) + return iwl_mld_tx_mpdu(mld, skb, txq); + + if (!info->control.vif) + return -1; + + __skb_queue_head_init(&mpdus_skbs); + + ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs); + if (ret) + return ret; + + WARN_ON(skb_queue_empty(&mpdus_skbs)); + + while (!skb_queue_empty(&mpdus_skbs)) { + skb = __skb_dequeue(&mpdus_skbs); + + ret = iwl_mld_tx_mpdu(mld, skb, txq); + if (!ret) + continue; + + /* Free skbs created as part of TSO logic that have not yet + * been dequeued + */ + __skb_queue_purge(&mpdus_skbs); + + /* skb here is not necessarily same as skb that entered + * this method, so free it explicitly. + */ + if (skb == orig_skb) + ieee80211_free_txskb(mld->hw, skb); + else + kfree_skb(skb); + + /* there was error, but we consumed skb one way or + * another, so return 0 + */ + return 0; + } + + return 0; +} +#else +static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb, + struct ieee80211_txq *txq) +{ + /* Impossible to get TSO without CONFIG_INET */ + WARN_ON(1); + + return -1; +} +#endif /* CONFIG_INET */ + +void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb, + struct ieee80211_txq *txq) +{ + if (skb_is_gso(skb)) { + if (!iwl_mld_tx_tso(mld, skb, txq)) + return; + goto err; + } + + if (likely(!iwl_mld_tx_mpdu(mld, skb, txq))) + return; + +err: + ieee80211_free_txskb(mld->hw, skb); +} + +void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq) +{ + struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq); + struct sk_buff *skb = NULL; + u8 zero_addr[ETH_ALEN] = {}; + + /* + * No need for threads to be pending here, they can leave the first + * taker all the work. + * + * mld_txq->tx_request logic: + * + * If 0, no one is currently TXing, set to 1 to indicate current thread + * will now start TX and other threads should quit. + * + * If 1, another thread is currently TXing, set to 2 to indicate to + * that thread that there was another request. Since that request may + * have raced with the check whether the queue is empty, the TXing + * thread should check the queue's status one more time before leaving. + * This check is done in order to not leave any TX hanging in the queue + * until the next TX invocation (which may not even happen). + * + * If 2, another thread is currently TXing, and it will already double + * check the queue, so do nothing. + */ + if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2)) + return; + + rcu_read_lock(); + do { + while (likely(!mld_txq->status.stop_full) && + (skb = ieee80211_tx_dequeue(mld->hw, txq))) + iwl_mld_tx_skb(mld, skb, txq); + } while (atomic_dec_return(&mld_txq->tx_request)); + + IWL_DEBUG_TX(mld, "TXQ of sta %pM tid %d is now empty\n", + txq->sta ? txq->sta->addr : zero_addr, txq->tid); + + rcu_read_unlock(); +} + +static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags, + struct ieee80211_tx_info *info) +{ + enum nl80211_band band = info->band; + struct ieee80211_tx_rate *tx_rate = &info->status.rates[0]; + u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK; + u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK; + u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; + + if (sgi) + tx_rate->flags |= IEEE80211_TX_RC_SHORT_GI; + + switch (chan_width) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + tx_rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + break; + case RATE_MCS_CHAN_WIDTH_80: + tx_rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; + break; + case RATE_MCS_CHAN_WIDTH_160: + tx_rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; + break; + default: + break; + } + + switch (format) { + case RATE_MCS_HT_MSK: + tx_rate->flags |= IEEE80211_TX_RC_MCS; + tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags); + break; + case RATE_MCS_VHT_MSK: + ieee80211_rate_set_vht(tx_rate, + rate_n_flags & RATE_MCS_CODE_MSK, + FIELD_GET(RATE_MCS_NSS_MSK, + rate_n_flags) + 1); + tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS; + break; + case RATE_MCS_HE_MSK: + /* mac80211 cannot do this without ieee80211_tx_status_ext() + * but it only matters for radiotap + */ + tx_rate->idx = 0; + break; + default: + tx_rate->idx = + iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags, + band); + break; + } +} + +void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_tx_resp *tx_resp = (void *)pkt->data; + int txq_id = le16_to_cpu(tx_resp->tx_queue); + struct agg_tx_status *agg_status = &tx_resp->status; + u32 status = le16_to_cpu(agg_status->status); + u32 pkt_len = iwl_rx_packet_payload_len(pkt); + size_t notif_size = sizeof(*tx_resp) + sizeof(u32); + int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid); + int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid); + struct ieee80211_link_sta *link_sta; + struct iwl_mld_sta *mld_sta; + u16 ssn; + struct sk_buff_head skbs; + u8 skb_freed = 0; + bool mgmt = false; + bool tx_failure = (status & TX_STATUS_MSK) != TX_STATUS_SUCCESS; + + if (IWL_FW_CHECK(mld, tx_resp->frame_count != 1, + "Invalid tx_resp notif frame_count (%d)\n", + tx_resp->frame_count)) + return; + + /* validate the size of the variable part of the notif */ + if (IWL_FW_CHECK(mld, notif_size != pkt_len, + "Invalid tx_resp notif size (expected=%zu got=%u)\n", + notif_size, pkt_len)) + return; + + ssn = le32_to_cpup((__le32 *)agg_status + + tx_resp->frame_count) & 0xFFFF; + + __skb_queue_head_init(&skbs); + + /* we can free until ssn % q.n_bd not inclusive */ + iwl_trans_reclaim(mld->trans, txq_id, ssn, &skbs, false); + + while (!skb_queue_empty(&skbs)) { + struct sk_buff *skb = __skb_dequeue(&skbs); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + + skb_freed++; + + iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]); + + memset(&info->status, 0, sizeof(info->status)); + + info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED); + + /* inform mac80211 about what happened with the frame */ + switch (status & TX_STATUS_MSK) { + case TX_STATUS_SUCCESS: + case TX_STATUS_DIRECT_DONE: + info->flags |= IEEE80211_TX_STAT_ACK; + break; + default: + break; + } + + /* If we are freeing multiple frames, mark all the frames + * but the first one as acked, since they were acknowledged + * before + */ + if (skb_freed > 1) + info->flags |= IEEE80211_TX_STAT_ACK; + + if (tx_failure) { + enum iwl_fw_ini_time_point tp = + IWL_FW_INI_TIME_POINT_TX_FAILED; + + if (ieee80211_is_action(hdr->frame_control)) + tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED; + else if (ieee80211_is_mgmt(hdr->frame_control)) + mgmt = true; + + iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL); + } + + iwl_mld_hwrate_to_tx_rate(le32_to_cpu(tx_resp->initial_rate), + info); + + if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1))) + ieee80211_tx_status_skb(mld->hw, skb); + } + + IWL_DEBUG_TX_REPLY(mld, + "TXQ %d status 0x%08x ssn=%d initial_rate 0x%x retries %d\n", + txq_id, status, ssn, le32_to_cpu(tx_resp->initial_rate), + tx_resp->failure_frame); + + if (tx_failure && mgmt) + iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant); + + if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations, + "Got invalid sta_id (%d)\n", sta_id)) + return; + + rcu_read_lock(); + + link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); + if (!link_sta) { + /* This can happen if the TX cmd was sent before pre_rcu_remove + * but the TX response was received after + */ + IWL_DEBUG_TX_REPLY(mld, + "Got valid sta_id (%d) but sta is NULL\n", + sta_id); + goto out; + } + + if (IS_ERR(link_sta)) + goto out; + + mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); + + if (tx_failure && mld_sta->sta_state < IEEE80211_STA_AUTHORIZED) + iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant); + + if (tid < IWL_MAX_TID_COUNT) + iwl_mld_count_mpdu_tx(link_sta, 1); + +out: + rcu_read_unlock(); +} + +static void iwl_mld_tx_reclaim_txq(struct iwl_mld *mld, int txq, int index, + bool in_flush) +{ + struct sk_buff_head reclaimed_skbs; + + __skb_queue_head_init(&reclaimed_skbs); + + iwl_trans_reclaim(mld->trans, txq, index, &reclaimed_skbs, in_flush); + + while (!skb_queue_empty(&reclaimed_skbs)) { + struct sk_buff *skb = __skb_dequeue(&reclaimed_skbs); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]); + + memset(&info->status, 0, sizeof(info->status)); + + /* Packet was transmitted successfully, failures come as single + * frames because before failing a frame the firmware transmits + * it without aggregation at least once. + */ + if (!in_flush) + info->flags |= IEEE80211_TX_STAT_ACK; + else + info->flags &= ~IEEE80211_TX_STAT_ACK; + + ieee80211_tx_status_skb(mld->hw, skb); + } +} + +int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id) +{ + struct iwl_tx_path_flush_cmd_rsp *rsp; + struct iwl_tx_path_flush_cmd flush_cmd = { + .sta_id = cpu_to_le32(fw_sta_id), + .tid_mask = cpu_to_le16(0xffff), + }; + struct iwl_host_cmd cmd = { + .id = TXPATH_FLUSH, + .len = { sizeof(flush_cmd), }, + .data = { &flush_cmd, }, + .flags = CMD_WANT_SKB, + }; + int ret, num_flushed_queues; + u32 resp_len; + + IWL_DEBUG_TX_QUEUES(mld, "flush for sta id %d tid mask 0x%x\n", + fw_sta_id, 0xffff); + + ret = iwl_mld_send_cmd(mld, &cmd); + if (ret) { + IWL_ERR(mld, "Failed to send flush command (%d)\n", ret); + return ret; + } + + resp_len = iwl_rx_packet_payload_len(cmd.resp_pkt); + if (IWL_FW_CHECK(mld, resp_len != sizeof(*rsp), + "Invalid TXPATH_FLUSH response len: %d\n", + resp_len)) { + ret = -EIO; + goto free_rsp; + } + + rsp = (void *)cmd.resp_pkt->data; + + if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id, + "sta_id %d != rsp_sta_id %d\n", fw_sta_id, + le16_to_cpu(rsp->sta_id))) { + ret = -EIO; + goto free_rsp; + } + + num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues); + if (IWL_FW_CHECK(mld, num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP, + "num_flushed_queues %d\n", num_flushed_queues)) { + ret = -EIO; + goto free_rsp; + } + + for (int i = 0; i < num_flushed_queues; i++) { + struct iwl_flush_queue_info *queue_info = &rsp->queues[i]; + int read_after = le16_to_cpu(queue_info->read_after_flush); + int txq_id = le16_to_cpu(queue_info->queue_num); + + if (IWL_FW_CHECK(mld, + txq_id >= ARRAY_SIZE(mld->fw_id_to_txq), + "Invalid txq id %d\n", txq_id)) + continue; + + IWL_DEBUG_TX_QUEUES(mld, + "tid %d txq_id %d read-before %d read-after %d\n", + le16_to_cpu(queue_info->tid), txq_id, + le16_to_cpu(queue_info->read_before_flush), + read_after); + + iwl_mld_tx_reclaim_txq(mld, txq_id, read_after, true); + } + +free_rsp: + iwl_free_resp(&cmd); + return ret; +} + +int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq) +{ + struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq); + int ret; + + lockdep_assert_wiphy(mld->wiphy); + + if (likely(mld_txq->status.allocated)) + return 0; + + ret = iwl_mld_add_txq(mld, txq); + + spin_lock_bh(&mld->add_txqs_lock); + if (!list_empty(&mld_txq->list)) + list_del_init(&mld_txq->list); + spin_unlock_bh(&mld->add_txqs_lock); + + return ret; +} + +int iwl_mld_update_sta_txqs(struct iwl_mld *mld, + struct ieee80211_sta *sta, + u32 old_sta_mask, u32 new_sta_mask) +{ + struct iwl_scd_queue_cfg_cmd cmd = { + .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY), + .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask), + .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask), + }; + + lockdep_assert_wiphy(mld->wiphy); + + for (int tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) { + struct ieee80211_txq *txq = + sta->txq[tid != IWL_MAX_TID_COUNT ? + tid : IEEE80211_NUM_TIDS]; + struct iwl_mld_txq *mld_txq = + iwl_mld_txq_from_mac80211(txq); + int ret; + + if (!mld_txq->status.allocated) + continue; + + if (tid == IWL_MAX_TID_COUNT) + cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID); + else + cmd.u.modify.tid = cpu_to_le32(tid); + + ret = iwl_mld_send_cmd_pdu(mld, + WIDE_ID(DATA_PATH_GROUP, + SCD_QUEUE_CONFIG_CMD), + &cmd); + if (ret) + return ret; + } + + return 0; +} + +void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt) +{ + struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data; + u32 pkt_len = iwl_rx_packet_payload_len(pkt); + u16 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt); + u8 sta_id = ba_res->sta_id; + struct ieee80211_link_sta *link_sta; + + if (!tfd_cnt) + return; + + if (IWL_FW_CHECK(mld, struct_size(ba_res, tfd, tfd_cnt) > pkt_len, + "Short BA notif (tfd_cnt=%d, size:0x%x)\n", + tfd_cnt, pkt_len)) + return; + + IWL_DEBUG_TX_REPLY(mld, + "BA notif received from sta_id=%d, flags=0x%x, sent:%d, acked:%d\n", + sta_id, le32_to_cpu(ba_res->flags), + le16_to_cpu(ba_res->txed), + le16_to_cpu(ba_res->done)); + + for (int i = 0; i < tfd_cnt; i++) { + struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; + int txq_id = le16_to_cpu(ba_tfd->q_num); + int index = le16_to_cpu(ba_tfd->tfd_index); + + if (IWL_FW_CHECK(mld, + txq_id >= ARRAY_SIZE(mld->fw_id_to_txq), + "Invalid txq id %d\n", txq_id)) + continue; + + iwl_mld_tx_reclaim_txq(mld, txq_id, index, false); + } + + if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations, + "Got invalid sta_id (%d)\n", sta_id)) + return; + + rcu_read_lock(); + + link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); + if (IWL_FW_CHECK(mld, IS_ERR_OR_NULL(link_sta), + "Got valid sta_id (%d) but link_sta is NULL\n", + sta_id)) + goto out; + + iwl_mld_count_mpdu_tx(link_sta, le16_to_cpu(ba_res->txed)); +out: + rcu_read_unlock(); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mld/tx.h b/drivers/net/wireless/intel/iwlwifi/mld/tx.h new file mode 100644 index 0000000000000..520f15f9d33ca --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mld/tx.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* + * Copyright (C) 2024 Intel Corporation + */ +#ifndef __iwl_mld_tx_h__ +#define __iwl_mld_tx_h__ + +#include "mld.h" + +#define IWL_MLD_INVALID_QUEUE 0xFFFF +#define IWL_MLD_INVALID_DROP_TX 0xFFFE + +/** + * struct iwl_mld_txq - TX Queue data + * + * @fw_id: the fw id of this txq. Only valid when &status.allocated is true. + * @status: bitmap of the txq status + * @status.allocated: Indicates that the queue was allocated. + * @status.stop_full: Indicates that the queue is full and should stop TXing. + * @list: list pointer, for &mld::txqs_to_add + * @tx_request: makes sure that if there are multiple threads that want to tx + * from this txq, only one of them will do all the TXing. + * This is needed to avoid spinning the trans txq lock, which is expensive + */ +struct iwl_mld_txq { + /* Add here fields that need clean up on restart */ + struct_group(zeroed_on_hw_restart, + u16 fw_id; + struct { + u8 allocated:1; + u8 stop_full:1; + } status; + ); + struct list_head list; + atomic_t tx_request; + /* And here fields that survive a fw restart */ +}; + +static inline void iwl_mld_init_txq(struct iwl_mld_txq *mld_txq) +{ + INIT_LIST_HEAD(&mld_txq->list); + atomic_set(&mld_txq->tx_request, 0); +} + +static inline struct iwl_mld_txq * +iwl_mld_txq_from_mac80211(struct ieee80211_txq *txq) +{ + return (void *)txq->drv_priv; +} + +void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk); +void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq); +void iwl_mld_add_txq_list(struct iwl_mld *mld); +void +iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id); +void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq); +void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); +int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id); +int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq); + +int iwl_mld_update_sta_txqs(struct iwl_mld *mld, + struct ieee80211_sta *sta, + u32 old_sta_mask, u32 new_sta_mask); + +void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld, + struct iwl_rx_packet *pkt); +void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant); + +u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld, + struct ieee80211_tx_info *info, + struct ieee80211_vif *vif); + +void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb, + struct ieee80211_txq *txq); + +#endif /* __iwl_mld_tx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 129b6bdf9ef90..3e8b7168af01c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -3092,8 +3092,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, ieee80211_resume_disconnect(vif); } -static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +enum rt_status { + FW_ALIVE, + FW_NEEDS_RESET, + FW_ERROR, +}; + +static enum rt_status iwl_mvm_check_rt_status(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) { u32 err_id; @@ -3101,29 +3107,35 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, if (iwl_fwrt_read_err_table(mvm->trans, mvm->trans->dbg.lmac_error_event_table[0], &err_id)) { - if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) { - struct cfg80211_wowlan_wakeup wakeup = { - .rfkill_release = true, - }; - ieee80211_report_wowlan_wakeup(vif, &wakeup, - GFP_KERNEL); + if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + IWL_WARN(mvm, "Rfkill was toggled during suspend\n"); + if (vif) { + struct cfg80211_wowlan_wakeup wakeup = { + .rfkill_release = true, + }; + + ieee80211_report_wowlan_wakeup(vif, &wakeup, + GFP_KERNEL); + } + + return FW_NEEDS_RESET; } - return true; + return FW_ERROR; } /* check if we have lmac2 set and check for error */ if (iwl_fwrt_read_err_table(mvm->trans, mvm->trans->dbg.lmac_error_event_table[1], NULL)) - return true; + return FW_ERROR; /* check for umac error */ if (iwl_fwrt_read_err_table(mvm->trans, mvm->trans->dbg.umac_error_event_table, NULL)) - return true; + return FW_ERROR; - return false; + return FW_ALIVE; } /* @@ -3376,7 +3388,7 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait, break; } case WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION): { - struct iwl_mvm_d3_end_notif *notif = (void *)pkt->data; + struct iwl_d3_end_notif *notif = (void *)pkt->data; d3_data->d3_end_flags = __le32_to_cpu(notif->flags); d3_data->notif_received |= IWL_D3_NOTIF_D3_END_NOTIF; @@ -3492,6 +3504,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D0I3_END_FIRST); bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm); + enum rt_status rt_status; bool keep = false; mutex_lock(&mvm->mutex); @@ -3515,14 +3528,19 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); - if (iwl_mvm_check_rt_status(mvm, vif)) { - IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n"); + rt_status = iwl_mvm_check_rt_status(mvm, vif); + if (rt_status != FW_ALIVE) { set_bit(STATUS_FW_ERROR, &mvm->trans->status); - iwl_mvm_dump_nic_error_log(mvm); - iwl_dbg_tlv_time_point(&mvm->fwrt, - IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); - iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, - false, 0); + if (rt_status == FW_ERROR) { + IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n"); + iwl_mvm_dump_nic_error_log(mvm); + iwl_dbg_tlv_time_point(&mvm->fwrt, + IWL_FW_INI_TIME_POINT_FW_ASSERT, + NULL); + iwl_fw_dbg_collect_desc(&mvm->fwrt, + &iwl_dump_desc_assert, + false, 0); + } ret = 1; goto err; } @@ -3679,6 +3697,7 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm) .notif_expected = IWL_D3_NOTIF_D3_END_NOTIF, }; + enum rt_status rt_status; int ret; lockdep_assert_held(&mvm->mutex); @@ -3688,14 +3707,20 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm) mvm->last_reset_or_resume_time_jiffies = jiffies; iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); - if (iwl_mvm_check_rt_status(mvm, NULL)) { - IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n"); + rt_status = iwl_mvm_check_rt_status(mvm, NULL); + if (rt_status != FW_ALIVE) { set_bit(STATUS_FW_ERROR, &mvm->trans->status); - iwl_mvm_dump_nic_error_log(mvm); - iwl_dbg_tlv_time_point(&mvm->fwrt, - IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); - iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, - false, 0); + if (rt_status == FW_ERROR) { + IWL_ERR(mvm, + "iwl_mvm_check_rt_status failed, device is gone during suspend\n"); + iwl_mvm_dump_nic_error_log(mvm); + iwl_dbg_tlv_time_point(&mvm->fwrt, + IWL_FW_INI_TIME_POINT_FW_ASSERT, + NULL); + iwl_fw_dbg_collect_desc(&mvm->fwrt, + &iwl_dump_desc_assert, + false, 0); + } mvm->trans->state = IWL_TRANS_NO_FW; ret = -ENODEV; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 83e3c11603622..671d3f8d79c19 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2023, 2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -542,7 +542,7 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; - struct iwl_mvm_tas_status_resp *rsp = NULL; + struct iwl_tas_status_resp *rsp = NULL; static const size_t bufsz = 1024; char *buff, *pos, *endpos; const char * const tas_dis_reason[TAS_DISABLED_REASON_MAX] = { @@ -552,6 +552,8 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file, "Due To SAR Limit Less Than 6 dBm", [TAS_DISABLED_REASON_INVALID] = "N/A", + [TAS_DISABLED_DUE_TO_TABLE_SOURCE_INVALID] = + "Due to table source invalid", }; const char * const tas_current_status[TAS_DYNA_STATUS_MAX] = { [TAS_DYNA_INACTIVE] = "INACTIVE", @@ -598,23 +600,19 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file, pos += scnprintf(pos, endpos - pos, "TAS Conclusion:\n"); for (i = 0; i < rsp->in_dual_radio + 1; i++) { - if (rsp->tas_status_mac[i].band != TAS_LMAC_BAND_INVALID && - rsp->tas_status_mac[i].dynamic_status & BIT(TAS_DYNA_ACTIVE)) { + if (rsp->tas_status_mac[i].dynamic_status & + BIT(TAS_DYNA_ACTIVE)) { pos += scnprintf(pos, endpos - pos, "\tON for "); switch (rsp->tas_status_mac[i].band) { - case TAS_LMAC_BAND_HB: + case PHY_BAND_5: pos += scnprintf(pos, endpos - pos, "HB\n"); break; - case TAS_LMAC_BAND_LB: + case PHY_BAND_24: pos += scnprintf(pos, endpos - pos, "LB\n"); break; - case TAS_LMAC_BAND_UHB: + case PHY_BAND_6: pos += scnprintf(pos, endpos - pos, "UHB\n"); break; - case TAS_LMAC_BAND_INVALID: - pos += scnprintf(pos, endpos - pos, - "INVALID BAND\n"); - break; default: pos += scnprintf(pos, endpos - pos, "Unsupported band (%d)\n", @@ -668,20 +666,16 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file, pos += scnprintf(pos, endpos - pos, "TAS status for "); switch (rsp->tas_status_mac[i].band) { - case TAS_LMAC_BAND_HB: + case PHY_BAND_5: pos += scnprintf(pos, endpos - pos, "High band\n"); break; - case TAS_LMAC_BAND_LB: + case PHY_BAND_24: pos += scnprintf(pos, endpos - pos, "Low band\n"); break; - case TAS_LMAC_BAND_UHB: + case PHY_BAND_6: pos += scnprintf(pos, endpos - pos, "Ultra high band\n"); break; - case TAS_LMAC_BAND_INVALID: - pos += scnprintf(pos, endpos - pos, - "INVALID band\n"); - break; default: pos += scnprintf(pos, endpos - pos, "Unsupported band (%d)\n", @@ -704,11 +698,9 @@ static ssize_t iwl_dbgfs_tas_get_status_read(struct file *file, pos += scnprintf(pos, endpos - pos, "Dynamic status:\n"); dyn_status = (rsp->tas_status_mac[i].dynamic_status); - for_each_set_bit(tmp, &dyn_status, sizeof(dyn_status)) { - if (tmp >= 0 && tmp < TAS_DYNA_STATUS_MAX) - pos += scnprintf(pos, endpos - pos, - "\t%s (%d)\n", - tas_current_status[tmp], tmp); + for_each_set_bit(tmp, &dyn_status, TAS_DYNA_STATUS_MAX) { + pos += scnprintf(pos, endpos - pos, "\t%s (%d)\n", + tas_current_status[tmp], tmp); } pos += scnprintf(pos, endpos - pos, @@ -1479,6 +1471,13 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm, if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) return -EOPNOTSUPP; + /* + * If the firmware is not running, silently succeed since there is + * no data to clear. + */ + if (!iwl_mvm_firmware_running(mvm)) + return count; + mutex_lock(&mvm->mutex); iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt); mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index b26141c30c61c..a493ef6bedc3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include #include @@ -46,107 +46,6 @@ struct iwl_mvm_ftm_iter_data { u8 *tk; }; -int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u8 *addr, u32 cipher, u8 *tk, u32 tk_len, - u8 *hltk, u32 hltk_len) -{ - struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn), - GFP_KERNEL); - u32 expected_tk_len; - - lockdep_assert_held(&mvm->mutex); - - if (!pasn) - return -ENOBUFS; - - iwl_mvm_ftm_remove_pasn_sta(mvm, addr); - - pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher); - - switch (pasn->cipher) { - case IWL_LOCATION_CIPHER_CCMP_128: - case IWL_LOCATION_CIPHER_GCMP_128: - expected_tk_len = WLAN_KEY_LEN_CCMP; - break; - case IWL_LOCATION_CIPHER_GCMP_256: - expected_tk_len = WLAN_KEY_LEN_GCMP_256; - break; - default: - goto out; - } - - /* - * If associated to this AP and already have security context, - * the TK is already configured for this station, so it - * shouldn't be set again here. - */ - if (vif->cfg.assoc) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_bss_conf *link_conf; - unsigned int link_id; - struct ieee80211_sta *sta; - u8 sta_id; - - rcu_read_lock(); - for_each_vif_active_link(vif, link_conf, link_id) { - if (memcmp(addr, link_conf->bssid, ETH_ALEN)) - continue; - - sta_id = mvmvif->link[link_id]->ap_sta_id; - sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); - if (!IS_ERR_OR_NULL(sta) && sta->mfp) - expected_tk_len = 0; - break; - } - rcu_read_unlock(); - } - - if (tk_len != expected_tk_len || - (hltk_len && hltk_len != sizeof(pasn->hltk))) { - IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n", - tk_len, hltk_len); - goto out; - } - - if (!expected_tk_len && !hltk_len) { - IWL_ERR(mvm, "TK and HLTK not set\n"); - goto out; - } - - memcpy(pasn->addr, addr, sizeof(pasn->addr)); - - if (hltk_len) { - memcpy(pasn->hltk, hltk, sizeof(pasn->hltk)); - pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK; - } - - if (tk && tk_len) - memcpy(pasn->tk, tk, sizeof(pasn->tk)); - - list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list); - return 0; -out: - kfree(pasn); - return -EINVAL; -} - -void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr) -{ - struct iwl_mvm_ftm_pasn_entry *entry, *prev; - - lockdep_assert_held(&mvm->mutex); - - list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list, - list) { - if (memcmp(entry->addr, addr, sizeof(entry->addr))) - continue; - - list_del(&entry->list); - kfree(entry); - return; - } -} - static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm) { struct iwl_mvm_loc_entry *e, *t; @@ -773,7 +672,11 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, target.bssid = bssid; target.cipher = cipher; + target.tk = NULL; ieee80211_iter_keys(mvm->hw, vif, iter, &target); + + if (!WARN_ON(!target.tk)) + memcpy(tk, target.tk, TK_11AZ_LEN); } else { memcpy(tk, entry->tk, sizeof(entry->tk)); } @@ -949,7 +852,7 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, static int iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request_peer *peer, - struct iwl_tof_range_req_ap_entry_v10 *target) + struct iwl_tof_range_req_ap_entry *target) { u32 i2r_max_sts, flags; int ret; @@ -1021,7 +924,7 @@ static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { - struct iwl_tof_range_req_cmd_v14 cmd; + struct iwl_tof_range_req_cmd cmd; struct iwl_host_cmd hcmd = { .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), .dataflags[0] = IWL_HCMD_DFL_DUP, @@ -1035,7 +938,7 @@ static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm, for (i = 0; i < cmd.num_of_ap; i++) { struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; - struct iwl_tof_range_req_ap_entry_v10 *target = &cmd.ap[i]; + struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i]; err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target); if (err) @@ -1301,7 +1204,7 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, static void iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm, - struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap) + struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap) { struct iwl_mvm_ftm_pasn_entry *entry; @@ -1339,7 +1242,7 @@ static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len) switch (ver) { case 9: case 8: - return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8); + return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy); case 7: return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7); case 6: @@ -1359,7 +1262,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data; struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data; - struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data; + struct iwl_tof_range_rsp_ntfy *fw_resp_v8 = (void *)pkt->data; int i; bool new_api = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ); @@ -1395,9 +1298,9 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n", mvm->ftm_initiator.req->cookie, num_of_aps); - for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) { + for (i = 0; i < num_of_aps && i < IWL_TOF_MAX_APS; i++) { struct cfg80211_pmsr_result result = {}; - struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap; + struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap; int peer_idx; if (new_api) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index e6e468e81ab3a..83f6e508a094c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -324,92 +324,6 @@ static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm, kfree(sta); } -int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u8 *addr, u32 cipher, u8 *tk, u32 tk_len, - u8 *hltk, u32 hltk_len) -{ - int ret; - struct iwl_mvm_pasn_sta *sta = NULL; - struct iwl_mvm_pasn_hltk_data hltk_data = { - .addr = addr, - .hltk = hltk, - }; - struct iwl_mvm_pasn_hltk_data *hltk_data_ptr = NULL; - - u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, - WIDE_ID(LOCATION_GROUP, TOF_RESPONDER_DYN_CONFIG_CMD), - 2); - - lockdep_assert_held(&mvm->mutex); - - if (cmd_ver < 3) { - IWL_ERR(mvm, "Adding PASN station not supported by FW\n"); - return -EOPNOTSUPP; - } - - if ((!hltk || !hltk_len) && (!tk || !tk_len)) { - IWL_ERR(mvm, "TK and HLTK not set\n"); - return -EINVAL; - } - - if (hltk && hltk_len) { - if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT)) { - IWL_ERR(mvm, "No support for secure LTF measurement\n"); - return -EINVAL; - } - - hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher); - if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) { - IWL_ERR(mvm, "invalid cipher: %u\n", cipher); - return -EINVAL; - } - - hltk_data_ptr = &hltk_data; - } - - if (tk && tk_len) { - sta = kzalloc(sizeof(*sta) + tk_len, GFP_KERNEL); - if (!sta) - return -ENOBUFS; - - ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr, - cipher, tk, tk_len, &sta->keyconf); - if (ret) { - kfree(sta); - return ret; - } - - memcpy(sta->addr, addr, ETH_ALEN); - list_add_tail(&sta->list, &mvm->resp_pasn_list); - } - - ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, hltk_data_ptr); - if (ret && sta) - iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); - - return ret; -} - -int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, u8 *addr) -{ - struct iwl_mvm_pasn_sta *sta, *prev; - - lockdep_assert_held(&mvm->mutex); - - list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) { - if (!memcmp(sta->addr, addr, ETH_ALEN)) { - iwl_mvm_resp_del_pasn_sta(mvm, vif, sta); - return 0; - } - } - - IWL_ERR(mvm, "FTM: PASN station %pM not found\n", addr); - return -EINVAL; -} - int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index df49dd2e2026d..2b5a62604fc40 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -422,6 +422,8 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, /* if reached this point, Alive notification was received */ iwl_mei_alive_notif(true); + iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); + ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait, &mvm->fw->ucode_capa); if (ret) { @@ -430,8 +432,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, return ret; } - iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); - /* * Note: all the queues are enabled as part of the interface * initialization, but in firmware restart scenarios they @@ -1094,22 +1094,6 @@ static int iwl_mvm_ppag_init(struct iwl_mvm *mvm) return iwl_mvm_ppag_send_cmd(mvm); } -static bool -iwl_mvm_add_to_tas_block_list(u16 *list, u8 *size, u16 mcc) -{ - /* Verify that there is room for another country */ - if (*size >= IWL_WTAS_BLACK_LIST_MAX) - return false; - - for (u8 i = 0; i < *size; i++) { - if (list[i] == mcc) - return true; - } - - list[*size++] = mcc; - return true; -} - static void iwl_mvm_tas_init(struct iwl_mvm *mvm) { u32 cmd_id = WIDE_ID(REGULATORY_AND_NVM_GROUP, TAS_CONFIG); @@ -1150,10 +1134,10 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n", dmi_get_system_info(DMI_SYS_VENDOR) ?: ""); - if ((!iwl_mvm_add_to_tas_block_list(data.block_list_array, + if ((!iwl_add_mcc_to_tas_block_list(data.block_list_array, &data.block_list_size, IWL_MCC_US)) || - (!iwl_mvm_add_to_tas_block_list(data.block_list_array, + (!iwl_add_mcc_to_tas_block_list(data.block_list_array, &data.block_list_size, IWL_MCC_CANADA))) { IWL_DEBUG_RADIO(mvm, @@ -1213,38 +1197,6 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret); } -static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm) -{ - u32 value = 0; - /* default behaviour is disabled */ - bool bios_enable_rfi = false; - int ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_RFI_CONFIG, &value); - - - if (ret < 0) { - IWL_DEBUG_RADIO(mvm, "Failed to get DSM RFI, ret=%d\n", ret); - return bios_enable_rfi; - } - - value &= DSM_VALUE_RFI_DISABLE; - /* RFI BIOS CONFIG value can be 0 or 3 only. - * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled. - * 1 and 2 are invalid BIOS configurations, So, it's not possible to - * disable ddr/dlvr separately. - */ - if (!value) { - IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to enable\n"); - bios_enable_rfi = true; - } else if (value == DSM_VALUE_RFI_DISABLE) { - IWL_DEBUG_RADIO(mvm, "DSM RFI is evaluated to disable\n"); - } else { - IWL_DEBUG_RADIO(mvm, - "DSM RFI got invalid value, value=%d\n", value); - } - - return bios_enable_rfi; -} - static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) { struct iwl_lari_config_change_cmd cmd; @@ -1631,7 +1583,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) iwl_mvm_uats_init(mvm); if (iwl_rfi_supported(mvm)) { - if (iwl_mvm_eval_dsm_rfi(mvm)) + if (iwl_rfi_is_enabled_in_bios(&mvm->fwrt)) iwl_rfi_send_config_cmd(mvm, NULL); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 6b06732441c35..bec18d197f310 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1960,26 +1960,3 @@ void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, ieee80211_channel_switch_disconnect(vif); rcu_read_unlock(); } - -void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_missed_vap_notif *mb = (void *)pkt->data; - struct ieee80211_vif *vif; - u32 id = le32_to_cpu(mb->mac_id); - - IWL_DEBUG_INFO(mvm, - "missed_vap notify mac_id=%u, num_beacon_intervals_elapsed=%u, profile_periodicity=%u\n", - le32_to_cpu(mb->mac_id), - mb->num_beacon_intervals_elapsed, - mb->profile_periodicity); - - rcu_read_lock(); - - vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true); - if (vif) - iwl_mvm_connection_loss(mvm, vif, "missed vap beacon"); - - rcu_read_unlock(); -} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index af6644b7e95fb..1e916a0ce0824 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -70,7 +70,7 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { }; static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { - .max_peers = IWL_MVM_TOF_MAX_APS, + .max_peers = IWL_TOF_MAX_APS, .report_ap_tsf = 1, .randomize_mac_addr = 1, @@ -272,9 +272,10 @@ static const u8 tm_if_types_ext_capa_sta[] = { __bf_shf(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY) | \ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US << \ __bf_shf(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY)) -#define IWL_MVM_MLD_CAPA_OPS FIELD_PREP_CONST( \ +#define IWL_MVM_MLD_CAPA_OPS (FIELD_PREP_CONST( \ IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP, \ - IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) + IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_SAME) | \ + IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT) static const struct wiphy_iftype_ext_capab add_iftypes_ext_capa[] = { { @@ -4099,6 +4100,20 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm, return 0; } +void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool update) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + + if (!iwl_mvm_has_rlc_offload(mvm)) + return; + + mvmvif->ps_disabled = !vif->cfg.ps; + + if (update) + iwl_mvm_power_update_mac(mvm); +} + /* Common part for MLD and non-MLD modes */ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -4191,6 +4206,7 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw, new_state == IEEE80211_STA_AUTHORIZED) { ret = iwl_mvm_sta_state_assoc_to_authorized(mvm, vif, sta, callbacks); + iwl_mvm_smps_workaround(mvm, vif, true); } else if (old_state == IEEE80211_STA_AUTHORIZED && new_state == IEEE80211_STA_ASSOC) { ret = iwl_mvm_sta_state_authorized_to_assoc(mvm, vif, sta, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index 341a2a7a49ec9..78d7153a0cfca 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2022-2024 Intel Corporation + * Copyright (C) 2022-2025 Intel Corporation */ #include "mvm.h" @@ -887,6 +887,7 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, } if (changes & BSS_CHANGED_PS) { + iwl_mvm_smps_workaround(mvm, vif, false); ret = iwl_mvm_power_update_mac(mvm); if (ret) IWL_ERR(mvm, "failed to update power mode\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 2f159024eeb89..9dd6700411370 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -121,7 +121,7 @@ static int iwl_mvm_add_aux_sta_to_fw(struct iwl_mvm *mvm, { int ret; - struct iwl_mvm_aux_sta_cmd cmd = { + struct iwl_aux_sta_cmd cmd = { .sta_id = cpu_to_le32(sta->sta_id), .lmac_id = cpu_to_le32(lmac_id), }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index ee769da72e68c..f6391c7a3e296 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1310,7 +1310,7 @@ struct iwl_mvm { struct cfg80211_pmsr_request *req; struct wireless_dev *req_wdev; struct list_head loc_list; - int responses[IWL_MVM_TOF_MAX_APS]; + int responses[IWL_TOF_MAX_APS]; struct { struct list_head resp; } smooth; @@ -2095,8 +2095,6 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_channel_switch_error_notif(struct iwl_mvm *mvm, @@ -2520,12 +2518,6 @@ void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm, struct ieee80211_bss_conf *bss_conf); void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, u8 *addr); -int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u8 *addr, u32 cipher, u8 *tk, u32 tk_len, - u8 *hltk, u32 hltk_len); void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -2540,10 +2532,6 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req); void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm); void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm); -int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - u8 *addr, u32 cipher, u8 *tk, u32 tk_len, - u8 *hltk, u32 hltk_len); -void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr); /* TDLS */ @@ -3043,4 +3031,7 @@ iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, bool is_ap); + +void iwl_mvm_smps_workaround(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + bool update); #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 984f407f70272..76603ef02704a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -145,7 +145,7 @@ static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data; + struct iwl_esr_mode_notif *notif = (void *)pkt->data; struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm); /* FW recommendations is only for entering EMLSR */ @@ -495,7 +495,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF, iwl_mvm_rx_esr_mode_notif, RX_HANDLER_ASYNC_LOCKED_WIPHY, - struct iwl_mvm_esr_mode_notif), + struct iwl_esr_mode_notif), RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF, iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index a8c4e354e2ce7..068c58e9c1eb4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1783,7 +1783,7 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm, if ((high_tpt != IWL_INVALID_VALUE) && (high_tpt > current_tpt)) { IWL_DEBUG_RATE(mvm, - "Higher rate is better. Increate rate\n"); + "Higher rate is better. Increase rate\n"); return RS_ACTION_UPSCALE; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 09fd8752046ee..14ea89f931bbf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -995,7 +995,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); u32 rate_n_flags = phy_data->rate_n_flags; - u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; @@ -1050,13 +1050,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, if (he_mu) he_mu->flags2 |= - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); - else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1) + else if (he_type == RATE_MCS_HE_TYPE_TRIG) he->data6 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 7a4844ec3c104..78fd7faaed97d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -4311,67 +4311,6 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) return ieee80211_sn_sub(sn, tid_data->next_reclaimed); } -int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, - u8 *key, u32 key_len, - struct ieee80211_key_conf *keyconf) -{ - int ret; - u16 queue; - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif); - bool mld = iwl_mvm_has_mld_api(mvm->fw); - u32 type = IWL_STA_LINK; - - if (mld) - type = STATION_TYPE_PEER; - - ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, - NL80211_IFTYPE_UNSPECIFIED, type); - if (ret) - return ret; - - if (mld) - ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, sta, addr, - mvmvif->deflink.fw_link_id, - &queue, - IWL_MAX_TID_COUNT, - &wdg_timeout); - else - ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, - mvmvif->color, addr, sta, - &queue, - IWL_MVM_TX_FIFO_BE); - if (ret) - goto out; - - keyconf->cipher = cipher; - memcpy(keyconf->key, key, key_len); - keyconf->keylen = key_len; - keyconf->flags = IEEE80211_KEY_FLAG_PAIRWISE; - - if (mld) { - /* The MFP flag is set according to the station mfp field. Since - * we don't have a station, set it manually. - */ - u32 key_flags = - iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) | - IWL_SEC_KEY_FLAG_MFP; - u32 sta_mask = BIT(sta->sta_id); - - ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf); - } else { - ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, - 0, NULL, 0, 0, true); - } - -out: - if (ret) - iwl_mvm_dealloc_int_sta(mvm, sta); - return ret; -} - void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 id) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 6856f7440ef3e..19c905b641e2a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -597,10 +597,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); -int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, - u8 *key, u32 key_len, - struct ieee80211_key_conf *key_conf_out); void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 9216c43a35c4d..1a30bb1ff8caf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2024 Intel Corporation + * Copyright (C) 2012-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH */ @@ -771,7 +771,7 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity) { - struct iwl_roc_req roc_cmd = { + struct iwl_roc_req_v5 roc_cmd = { .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), .activity = cpu_to_le32(activity), }; @@ -1030,6 +1030,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, /* End TE, notify mac80211 */ mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID; mvmvif->time_event_data.link_id = -1; + /* set the bit so the ROC cleanup will actually clean up */ + set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status); iwl_mvm_roc_finished(mvm); ieee80211_remain_on_channel_expired(mvm->hw); } else if (le32_to_cpu(notif->start)) { @@ -1100,7 +1102,7 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, { int res; u32 duration_tu, delay; - struct iwl_roc_req roc_req = { + struct iwl_roc_req_v5 roc_req = { .action = cpu_to_le32(FW_CTXT_ACTION_ADD), .activity = cpu_to_le32(activity), .sta_id = cpu_to_le32(mvm->aux_sta.sta_id), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index d92470960b38b..c851290e75a24 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -105,7 +105,7 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait, void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_dts_measurement_notif_v2 *notif_v2; + struct iwl_dts_measurement_notif *notif_v2; int len = iwl_rx_packet_payload_len(pkt); int temp; u32 ths_crossed; @@ -506,7 +506,7 @@ static const u32 iwl_mvm_cdev_budgets[] = { int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state) { - struct iwl_mvm_ctdp_cmd cmd = { + struct iwl_ctdp_cmd cmd = { .operation = cpu_to_le32(op), .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]), .window_size = 0, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 838c426db7f05..8aa7c455bdee3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include #include "iwl-trans.h" @@ -137,6 +137,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, if (trans->dsbr_urm_permanent) control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM; + if (trans->ext_32khz_clock_valid) + control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID; + /* Allocate prph scratch */ prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), &trans_pcie->prph_scratch_dma_addr, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index e0b657b2f74b0..93446c3740081 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2024 Intel Corporation + * Copyright (C) 2005-2014, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -498,7 +498,8 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = { /* Ma devices */ {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)}, {IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)}, - +#endif /* CONFIG_IWLMVM */ +#if IS_ENABLED(CONFIG_IWLMLD) /* Bz devices */ {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_gl_trans_cfg)}, {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)}, @@ -543,7 +544,7 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = { /* Dr devices */ {IWL_PCI_DEVICE(0x272F, PCI_ANY_ID, iwl_dr_trans_cfg)}, -#endif /* CONFIG_IWLMVM */ +#endif /* CONFIG_IWLMLD */ {0} }; @@ -551,16 +552,17 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids); #define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \ - _rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \ + _rf_id, _rf_step, _bw_limit, _cores, _cdb, _cfg, _name) \ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \ .name = _name, .mac_type = _mac_type, .rf_type = _rf_type, .rf_step = _rf_step, \ - .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \ + .bw_limit = _bw_limit, .cores = _cores, .rf_id = _rf_id, \ .mac_step = _mac_step, .cdb = _cdb, .jacket = IWL_CFG_ANY } #define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \ _IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ - IWL_CFG_ANY, _cfg, _name) + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \ + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, \ + _cfg, _name) VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { #if IS_ENABLED(CONFIG_IWLMVM) @@ -723,66 +725,66 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_2ac_cfg_soc, iwl9560_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9270_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9270_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9162_160_name), _IWL_DEV_INFO(0x271B, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9162_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9260_160_name), _IWL_DEV_INFO(0x2526, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY, IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9260_2ac_cfg, iwl9260_name), /* Qu with Jf */ @@ -790,132 +792,132 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_b0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_qu_c0_jf_b0_cfg, iwl9560_killer_1550i_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9462_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1551, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550s_name), _IWL_DEV_INFO(IWL_CFG_ANY, 0x1552, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwl9560_quz_a0_jf_b0_cfg, iwl9560_killer_1550i_name), /* Qu with Hr */ @@ -923,202 +925,204 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_b0_hr_b0, iwl_ax203_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_qu_c0_hr_b0, iwl_ax201_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_quz_a0_hr1_b0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_quz_a0_hr_b0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_QUZ, SILICON_B_STEP, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_quz_a0_hr_b0, iwl_ax201_name), /* Ma */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_ma, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_ma, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_ma, iwl_ax231_name), /* So with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Hr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax203_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + 80, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax101_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_so_a0_hr_a0, iwl_ax201_name), /* So-F with Gf */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* SoF with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), /* So with GF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), /* So with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9560_name), /* So with JF */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9461_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY, IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV, IWL_CFG_ANY, - IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, + 80, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), +#endif /* CONFIG_IWLMVM */ +#if IS_ENABLED(CONFIG_IWLMLD) /* Bz */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, @@ -1130,75 +1134,121 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_wh_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax201_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_ax211_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_fm_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_bz, iwl_wh_name), /* Ga (Gl) */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_gl, iwl_gl_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, + 160, IWL_CFG_ANY, IWL_CFG_NO_CDB, iwl_cfg_gl, iwl_mtp_name), /* Sc */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc, iwl_sc_name), + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc, iwl_fm_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc, iwl_wh_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, + 160, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc, iwl_sp_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2, iwl_sc2_name), + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc2, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc2, iwl_fm_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc2, iwl_wh_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC2, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, + 160, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc2, iwl_sp_name), _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_sc2f, iwl_sc2f_name), + IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc2f, iwl_ax211_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc2f, iwl_fm_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_NO_LIM, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc2f, iwl_wh_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_SC2F, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY, + 160, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_sc2f, iwl_sp_name), + /* Dr */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_DR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_dr, iwl_dr_name), /* Br */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_BR, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_BW_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_cfg_br, iwl_br_name), -#endif /* CONFIG_IWLMVM */ +#endif /* CONFIG_IWLMLD */ }; EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_dev_info_table); @@ -1348,7 +1398,7 @@ static int map_crf_id(struct iwl_trans *iwl_trans) VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info * iwl_pci_find_dev_info(u16 device, u16 subsystem_device, u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb, - u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step) + u8 jacket, u8 rf_id, u8 bw_limit, u8 cores, u8 rf_step) { int num_devices = ARRAY_SIZE(iwl_dev_info_table); int i; @@ -1391,8 +1441,15 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device, dev_info->rf_id != rf_id) continue; - if (dev_info->no_160 != (u8)IWL_CFG_ANY && - dev_info->no_160 != no_160) + /* + * Check that bw_limit have the same "boolean" value since + * IWL_SUBDEVICE_BW_LIM can only return a boolean value and + * dev_info->bw_limit encodes a non-boolean value. + * dev_info->bw_limit == IWL_CFG_BW_NO_LIM must be equal to + * !bw_limit to have a match. + */ + if (dev_info->bw_limit != IWL_CFG_BW_ANY && + (dev_info->bw_limit == IWL_CFG_BW_NO_LIM) == !!bw_limit) continue; if (dev_info->cores != (u8)IWL_CFG_ANY && @@ -1530,13 +1587,13 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id), CSR_HW_RFID_IS_JACKET(iwl_trans->hw_rf_id), IWL_SUBDEVICE_RF_ID(pdev->subsystem_device), - IWL_SUBDEVICE_NO_160(pdev->subsystem_device), + IWL_SUBDEVICE_BW_LIM(pdev->subsystem_device), IWL_SUBDEVICE_CORES(pdev->subsystem_device), CSR_HW_RFID_STEP(iwl_trans->hw_rf_id)); if (dev_info) { iwl_trans->cfg = dev_info->cfg; iwl_trans->name = dev_info->name; - iwl_trans->no_160 = dev_info->no_160 == IWL_CFG_NO_160; + iwl_trans->bw_limit = dev_info->bw_limit; } #if IS_ENABLED(CONFIG_IWLMVM) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 856b7e9f717d5..45460f93d24ad 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2003-2015, 2018-2024 Intel Corporation + * Copyright (C) 2003-2015, 2018-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -646,7 +646,8 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, unsigned int len); struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *cmd_meta, - u8 **hdr, unsigned int hdr_room); + u8 **hdr, unsigned int hdr_room, + unsigned int offset); void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *cmd_meta); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index 793514a1852a3..3ece34e30d580 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include "iwl-trans.h" #include "iwl-prph.h" @@ -95,7 +95,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave) CSR_GP_CNTRL_REG_FLAG_INIT_DONE); } -static void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) +void iwl_trans_pcie_fw_reset_handshake(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 1f483f15c2383..401919f9fe88e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020, 2023-2024 Intel Corporation + * Copyright (C) 2018-2020, 2023-2025 Intel Corporation */ #include #include @@ -188,7 +188,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); /* Our device supports 9 segments at most, it will fit in 1 page */ - sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, + snap_ip_tcp_hdrlen + hdr_len); if (!sgt) return -ENOMEM; @@ -347,6 +348,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, return tfd; out_err: + iwl_pcie_free_tso_pages(trans, skb, out_meta); iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); return NULL; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 334ebd4c12fa7..7c1dd5cc084ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1855,6 +1855,7 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, * @cmd_meta: command meta to store the scatter list information for unmapping * @hdr: output argument for TSO headers * @hdr_room: requested length for TSO headers + * @offset: offset into the data from which mapping should start * * Allocate space for a scatter gather list and TSO headers and map the SKB * using the scatter gather list. The SKB is unmapped again when the page is @@ -1864,9 +1865,12 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset, */ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *cmd_meta, - u8 **hdr, unsigned int hdr_room) + u8 **hdr, unsigned int hdr_room, + unsigned int offset) { struct sg_table *sgt; + unsigned int n_segments = skb_shinfo(skb)->nr_frags + 1; + int orig_nents; if (WARN_ON_ONCE(skb_has_frag_list(skb))) return NULL; @@ -1874,8 +1878,7 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, *hdr = iwl_pcie_get_page_hdr(trans, hdr_room + __alignof__(struct sg_table) + sizeof(struct sg_table) + - (skb_shinfo(skb)->nr_frags + 1) * - sizeof(struct scatterlist), + n_segments * sizeof(struct scatterlist), skb); if (!*hdr) return NULL; @@ -1883,14 +1886,15 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table)); sgt->sgl = (void *)(sgt + 1); - sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1); + sg_init_table(sgt->sgl, n_segments); /* Only map the data, not the header (it is copied to the TSO page) */ - sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb), - skb->data_len); - if (WARN_ON_ONCE(sgt->orig_nents <= 0)) + orig_nents = skb_to_sgvec(skb, sgt->sgl, offset, skb->len - offset); + if (WARN_ON_ONCE(orig_nents <= 0)) return NULL; + sgt->orig_nents = orig_nents; + /* And map the entire SKB */ if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0) return NULL; @@ -1939,7 +1943,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, + snap_ip_tcp_hdrlen + hdr_len + iv_len); if (!sgt) return -ENOMEM; diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c index d0bda23c628aa..7ef5e89c6af27 100644 --- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c +++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c @@ -2,7 +2,7 @@ /* * KUnit tests for the iwlwifi device info table * - * Copyright (C) 2023-2024 Intel Corporation + * Copyright (C) 2023-2025 Intel Corporation */ #include #include @@ -13,9 +13,9 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); static void iwl_pci_print_dev_info(const char *pfx, const struct iwl_dev_info *di) { - printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,no_160=%d,cores=%.2x\n", + printk(KERN_DEBUG "%sdev=%.4x,subdev=%.4x,mac_type=%.4x,mac_step=%.4x,rf_type=%.4x,cdb=%d,jacket=%d,rf_id=%.2x,bw_limit=%d,cores=%.2x\n", pfx, di->device, di->subdevice, di->mac_type, di->mac_step, - di->rf_type, di->cdb, di->jacket, di->rf_id, di->no_160, + di->rf_type, di->cdb, di->jacket, di->rf_id, di->bw_limit, di->cores); } @@ -31,8 +31,13 @@ static void devinfo_table_order(struct kunit *test) di->mac_type, di->mac_step, di->rf_type, di->cdb, di->jacket, di->rf_id, - di->no_160, di->cores, di->rf_step); - if (ret != di) { + di->bw_limit != IWL_CFG_BW_NO_LIM, + di->cores, di->rf_step); + if (!ret) { + iwl_pci_print_dev_info("No entry found for: ", di); + KUNIT_FAIL(test, + "No entry found for entry at index %d\n", idx); + } else if (ret != di) { iwl_pci_print_dev_info("searched: ", di); iwl_pci_print_dev_info("found: ", ret); KUNIT_FAIL(test, diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index 5a525da434c28..21fde876bb0de 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c @@ -452,46 +452,6 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val) return ret; } -/** - * lbs_get_snmp_mib - Get an SNMP MIB value - * - * @priv: A pointer to &struct lbs_private structure - * @oid: The OID to retrieve from the firmware - * @out_val: Location for the returned value - * - * returns: 0 on success, error on failure - */ -int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val) -{ - struct cmd_ds_802_11_snmp_mib cmd; - int ret; - - memset(&cmd, 0, sizeof (cmd)); - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - cmd.action = cpu_to_le16(CMD_ACT_GET); - cmd.oid = cpu_to_le16(oid); - - ret = lbs_cmd_with_response(priv, CMD_802_11_SNMP_MIB, &cmd); - if (ret) - goto out; - - switch (le16_to_cpu(cmd.bufsize)) { - case sizeof(u8): - *out_val = cmd.value[0]; - break; - case sizeof(u16): - *out_val = le16_to_cpu(*((__le16 *)(&cmd.value))); - break; - default: - lbs_deb_cmd("SNMP_CMD: (get) unhandled OID 0x%x size %d\n", - oid, le16_to_cpu(cmd.bufsize)); - break; - } - -out: - return ret; -} - /** * lbs_get_tx_power - Get the min, max, and current TX power * @@ -524,31 +484,6 @@ int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel, return ret; } -/** - * lbs_set_tx_power - Set the TX power - * - * @priv: A pointer to &struct lbs_private structure - * @dbm: The desired power level in dBm - * - * returns: 0 on success, error on failure - */ -int lbs_set_tx_power(struct lbs_private *priv, s16 dbm) -{ - struct cmd_ds_802_11_rf_tx_power cmd; - int ret; - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - cmd.action = cpu_to_le16(CMD_ACT_SET); - cmd.curlevel = cpu_to_le16(dbm); - - lbs_deb_cmd("SET_RF_TX_POWER: %d dBm\n", dbm); - - ret = lbs_cmd_with_response(priv, CMD_802_11_RF_TX_POWER, &cmd); - - return ret; -} - /** * lbs_set_monitor_mode - Enable or disable monitor mode * (only implemented on OLPC usb8388 FW) @@ -968,10 +903,6 @@ static void lbs_submit_command(struct lbs_private *priv, } if (command == CMD_802_11_DEEP_SLEEP) { - if (priv->is_auto_deep_sleep_enabled) { - priv->wakeup_dev_required = 1; - priv->dnld_sent = 0; - } priv->is_deep_sleep = 1; lbs_complete_command(priv, cmdnode, 0); } else { @@ -1440,70 +1371,6 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv) } -/** - * lbs_set_tpc_cfg - Configures the transmission power control functionality - * - * @priv: A pointer to &struct lbs_private structure - * @enable: Transmission power control enable - * @p0: Power level when link quality is good (dBm). - * @p1: Power level when link quality is fair (dBm). - * @p2: Power level when link quality is poor (dBm). - * @usesnr: Use Signal to Noise Ratio in TPC - * - * returns: 0 on success - */ -int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, - int8_t p2, int usesnr) -{ - struct cmd_ds_802_11_tpc_cfg cmd; - int ret; - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - cmd.action = cpu_to_le16(CMD_ACT_SET); - cmd.enable = !!enable; - cmd.usesnr = !!usesnr; - cmd.P0 = p0; - cmd.P1 = p1; - cmd.P2 = p2; - - ret = lbs_cmd_with_response(priv, CMD_802_11_TPC_CFG, &cmd); - - return ret; -} - -/** - * lbs_set_power_adapt_cfg - Configures the power adaptation settings - * - * @priv: A pointer to &struct lbs_private structure - * @enable: Power adaptation enable - * @p0: Power level for 1, 2, 5.5 and 11 Mbps (dBm). - * @p1: Power level for 6, 9, 12, 18, 22, 24 and 36 Mbps (dBm). - * @p2: Power level for 48 and 54 Mbps (dBm). - * - * returns: 0 on Success - */ - -int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, - int8_t p1, int8_t p2) -{ - struct cmd_ds_802_11_pa_cfg cmd; - int ret; - - memset(&cmd, 0, sizeof(cmd)); - cmd.hdr.size = cpu_to_le16(sizeof(cmd)); - cmd.action = cpu_to_le16(CMD_ACT_SET); - cmd.enable = !!enable; - cmd.P0 = p0; - cmd.P1 = p1; - cmd.P2 = p2; - - ret = lbs_cmd_with_response(priv, CMD_802_11_PA_CFG , &cmd); - - return ret; -} - - struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, uint16_t command, struct cmd_header *in_cmd, int in_cmd_size, int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), @@ -1520,12 +1387,10 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, /* No commands are allowed in Deep Sleep until we toggle the GPIO * to wake up the card and it has signaled that it's ready. */ - if (!priv->is_auto_deep_sleep_enabled) { - if (priv->is_deep_sleep) { - lbs_deb_cmd("command not allowed in deep sleep\n"); - cmdnode = ERR_PTR(-EBUSY); - goto done; - } + if (priv->is_deep_sleep) { + lbs_deb_cmd("command not allowed in deep sleep\n"); + cmdnode = ERR_PTR(-EBUSY); + goto done; } cmdnode = lbs_get_free_cmd_node(priv); diff --git a/drivers/net/wireless/marvell/libertas/cmd.h b/drivers/net/wireless/marvell/libertas/cmd.h index d7be232f57394..a95c2651e67f6 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.h +++ b/drivers/net/wireless/marvell/libertas/cmd.h @@ -105,19 +105,9 @@ int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel, int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val); -int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val); - /* Commands only used in wext.c, assoc. and scan.c */ -int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, - int8_t p1, int8_t p2); - -int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, - int8_t p2, int usesnr); - -int lbs_set_tx_power(struct lbs_private *priv, s16 dbm); - int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep); int lbs_set_host_sleep(struct lbs_private *priv, int host_sleep); diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c index f2aa659e77148..8393f396eebee 100644 --- a/drivers/net/wireless/marvell/libertas/cmdresp.c +++ b/drivers/net/wireless/marvell/libertas/cmdresp.c @@ -279,7 +279,6 @@ void lbs_process_event(struct lbs_private *priv, u32 event) priv->reset_deep_sleep_wakeup(priv); lbs_deb_cmd("EVENT: ds awake\n"); priv->is_deep_sleep = 0; - priv->wakeup_dev_required = 0; wake_up_interruptible(&priv->ds_awake_q); break; diff --git a/drivers/net/wireless/marvell/libertas/decl.h b/drivers/net/wireless/marvell/libertas/decl.h index c1e0388ef01da..ea69007a29584 100644 --- a/drivers/net/wireless/marvell/libertas/decl.h +++ b/drivers/net/wireless/marvell/libertas/decl.h @@ -64,11 +64,7 @@ int lbs_resume(struct lbs_private *priv); void lbs_queue_event(struct lbs_private *priv, u32 event); void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx); -int lbs_enter_auto_deep_sleep(struct lbs_private *priv); -int lbs_exit_auto_deep_sleep(struct lbs_private *priv); - u32 lbs_fw_index_to_data_rate(u8 index); -u8 lbs_data_rate_to_fw_index(u32 rate); int lbs_get_firmware(struct device *dev, u32 card_model, const struct lbs_fw_table *fw_table, diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h index 4b6e05a8e5d54..c4708ce4eb835 100644 --- a/drivers/net/wireless/marvell/libertas/dev.h +++ b/drivers/net/wireless/marvell/libertas/dev.h @@ -83,12 +83,8 @@ struct lbs_private { /* Deep sleep */ int is_deep_sleep; int deep_sleep_required; - int is_auto_deep_sleep_enabled; - int wakeup_dev_required; int is_activity_detected; - int auto_deep_sleep_timeout; /* in ms */ wait_queue_head_t ds_awake_q; - struct timer_list auto_deepsleep_timer; /* Host sleep*/ int is_host_sleep_configured; diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index 78e8b5aecec0e..017e5c6bbade5 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c @@ -79,26 +79,6 @@ u32 lbs_fw_index_to_data_rate(u8 idx) return fw_data_rates[idx]; } -/** - * lbs_data_rate_to_fw_index - use rate to get the index - * - * @rate: data rate - * returns: index or 0 - */ -u8 lbs_data_rate_to_fw_index(u32 rate) -{ - u8 i; - - if (!rate) - return 0; - - for (i = 0; i < sizeof(fw_data_rates); i++) { - if (rate == fw_data_rates[i]) - return i; - } - return 0; -} - int lbs_set_iface_type(struct lbs_private *priv, enum nl80211_iftype type) { int ret = 0; @@ -276,8 +256,7 @@ void lbs_host_to_card_done(struct lbs_private *priv) /* Wake main thread if commands are pending */ if (!priv->cur_cmd || priv->tx_pending_len > 0) { - if (!priv->wakeup_dev_required) - wake_up(&priv->waitq); + wake_up(&priv->waitq); } spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -468,8 +447,7 @@ static int lbs_thread(void *data) shouldsleep = 0; /* We have a command response */ else if (priv->cur_cmd) shouldsleep = 1; /* Can't send a command; one already running */ - else if (!list_empty(&priv->cmdpendingq) && - !(priv->wakeup_dev_required)) + else if (!list_empty(&priv->cmdpendingq)) shouldsleep = 0; /* We have a command to send */ else if (kfifo_len(&priv->event_fifo)) shouldsleep = 0; /* We have an event to process */ @@ -536,14 +514,6 @@ static int lbs_thread(void *data) } spin_unlock_irq(&priv->driver_lock); - if (priv->wakeup_dev_required) { - lbs_deb_thread("Waking up device...\n"); - /* Wake up device */ - if (priv->exit_deep_sleep(priv)) - lbs_deb_thread("Wakeup device failed\n"); - continue; - } - /* command timeout stuff */ if (priv->cmd_timed_out && priv->cur_cmd) { struct cmd_ctrl_node *cmdnode = priv->cur_cmd; @@ -626,7 +596,6 @@ static int lbs_thread(void *data) del_timer(&priv->command_timer); del_timer(&priv->tx_lockup_timer); - del_timer(&priv->auto_deepsleep_timer); return 0; } @@ -773,55 +742,6 @@ static void lbs_tx_lockup_handler(struct timer_list *t) spin_unlock_irqrestore(&priv->driver_lock, flags); } -/** - * auto_deepsleep_timer_fn - put the device back to deep sleep mode when - * timer expires and no activity (command, event, data etc.) is detected. - * @t: Context from which to retrieve a &struct lbs_private pointer - * returns: N/A - */ -static void auto_deepsleep_timer_fn(struct timer_list *t) -{ - struct lbs_private *priv = from_timer(priv, t, auto_deepsleep_timer); - - if (priv->is_activity_detected) { - priv->is_activity_detected = 0; - } else { - if (priv->is_auto_deep_sleep_enabled && - (!priv->wakeup_dev_required) && - (priv->connect_status != LBS_CONNECTED)) { - struct cmd_header cmd; - - lbs_deb_main("Entering auto deep sleep mode...\n"); - memset(&cmd, 0, sizeof(cmd)); - cmd.size = cpu_to_le16(sizeof(cmd)); - lbs_cmd_async(priv, CMD_802_11_DEEP_SLEEP, &cmd, - sizeof(cmd)); - } - } - mod_timer(&priv->auto_deepsleep_timer , jiffies + - (priv->auto_deep_sleep_timeout * HZ)/1000); -} - -int lbs_enter_auto_deep_sleep(struct lbs_private *priv) -{ - priv->is_auto_deep_sleep_enabled = 1; - if (priv->is_deep_sleep) - priv->wakeup_dev_required = 1; - mod_timer(&priv->auto_deepsleep_timer , - jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000); - - return 0; -} - -int lbs_exit_auto_deep_sleep(struct lbs_private *priv) -{ - priv->is_auto_deep_sleep_enabled = 0; - priv->auto_deep_sleep_timeout = 0; - del_timer(&priv->auto_deepsleep_timer); - - return 0; -} - static int lbs_init_adapter(struct lbs_private *priv) { int ret; @@ -835,9 +755,7 @@ static int lbs_init_adapter(struct lbs_private *priv) priv->psmode = LBS802_11POWERMODECAM; priv->psstate = PS_STATE_FULL_POWER; priv->is_deep_sleep = 0; - priv->is_auto_deep_sleep_enabled = 0; priv->deep_sleep_required = 0; - priv->wakeup_dev_required = 0; init_waitqueue_head(&priv->ds_awake_q); init_waitqueue_head(&priv->scan_q); priv->authtype_auto = 1; @@ -849,7 +767,6 @@ static int lbs_init_adapter(struct lbs_private *priv) timer_setup(&priv->command_timer, lbs_cmd_timeout_handler, 0); timer_setup(&priv->tx_lockup_timer, lbs_tx_lockup_handler, 0); - timer_setup(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn, 0); INIT_LIST_HEAD(&priv->cmdfreeq); INIT_LIST_HEAD(&priv->cmdpendingq); @@ -883,7 +800,6 @@ static void lbs_free_adapter(struct lbs_private *priv) kfifo_free(&priv->event_fifo); del_timer(&priv->command_timer); del_timer(&priv->tx_lockup_timer); - del_timer(&priv->auto_deepsleep_timer); } static const struct net_device_ops lbs_netdev_ops = { diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index 66f0f5377ac18..738bafc3749b0 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -403,12 +403,14 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && bss_desc->bcn_ht_oper->ht_param & - IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) + IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) { + chan_list->chan_scan_param[0].radio_type |= + CHAN_BW_40MHZ << 2; SET_SECONDARYCHAN(chan_list->chan_scan_param[0]. radio_type, (bss_desc->bcn_ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET)); - + } *buffer += struct_size(chan_list, chan_scan_param, 1); ret_len += struct_size(chan_list, chan_scan_param, 1); } diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c index d39092b992129..d7fd79214bcfb 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfp.c +++ b/drivers/net/wireless/marvell/mwifiex/cfp.c @@ -150,7 +150,7 @@ static const u16 ac_mcs_rate_nss2[8][10] = { struct region_code_mapping { u8 code; - u8 region[IEEE80211_COUNTRY_STRING_LEN]; + u8 region[IEEE80211_COUNTRY_STRING_LEN] __nonstring; }; static struct region_code_mapping region_code_mapping_t[] = { diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 4a96281792cc1..91458f3bd14a5 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -454,6 +454,11 @@ enum mwifiex_channel_flags { #define HostCmd_RET_BIT 0x8000 #define HostCmd_ACT_GEN_GET 0x0000 #define HostCmd_ACT_GEN_SET 0x0001 +#define HOST_CMD_ACT_GEN_SET 0x0001 +/* Add this non-CamelCase-style macro to comply with checkpatch requirements. + * This macro will eventually replace all existing CamelCase-style macros in + * the future for consistency. + */ #define HostCmd_ACT_GEN_REMOVE 0x0004 #define HostCmd_ACT_BITWISE_SET 0x0002 #define HostCmd_ACT_BITWISE_CLR 0x0003 @@ -2352,6 +2357,14 @@ struct host_cmd_ds_add_station { u8 tlv[]; } __packed; +#define MWIFIEX_CFG_TYPE_CAL 0x2 + +struct host_cmd_ds_802_11_cfg_data { + __le16 action; + __le16 type; + __le16 data_len; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -2431,6 +2444,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_pkt_aggr_ctrl pkt_aggr_ctrl; struct host_cmd_ds_sta_configure sta_cfg; struct host_cmd_ds_add_station sta_info; + struct host_cmd_ds_802_11_cfg_data cfg_data; } params; } __packed; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 855019fe54858..b07cb302a00c6 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -54,7 +54,7 @@ const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 }; * proper cleanup before exiting. */ static int mwifiex_register(void *card, struct device *dev, - struct mwifiex_if_ops *if_ops, void **padapter) + const struct mwifiex_if_ops *if_ops, void **padapter) { struct mwifiex_adapter *adapter; int i; @@ -691,10 +691,6 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) init_failed = true; done: - if (adapter->cal_data) { - release_firmware(adapter->cal_data); - adapter->cal_data = NULL; - } if (adapter->firmware) { release_firmware(adapter->firmware); adapter->firmware = NULL; @@ -1713,7 +1709,7 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter) */ int mwifiex_add_card(void *card, struct completion *fw_done, - struct mwifiex_if_ops *if_ops, u8 iface_type, + const struct mwifiex_if_ops *if_ops, u8 iface_type, struct device *dev) { struct mwifiex_adapter *adapter; diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 0674dcf7a5374..63f1c900e0967 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1470,7 +1470,7 @@ int mwifiex_init_shutdown_fw(struct mwifiex_private *priv, u32 func_init_shutdown); int mwifiex_add_card(void *card, struct completion *fw_done, - struct mwifiex_if_ops *if_ops, u8 iface_type, + const struct mwifiex_if_ops *if_ops, u8 iface_type, struct device *dev); int mwifiex_remove_card(struct mwifiex_adapter *adapter); @@ -1571,8 +1571,6 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv, struct cfg80211_chan_def chandef); int mwifiex_config_start_uap(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg); -void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, - struct mwifiex_sta_node *node); void mwifiex_config_uap_11d(struct mwifiex_private *priv, struct cfg80211_beacon_data *beacon_data); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 5f997becdbaa2..e11458fd4d50f 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -21,7 +21,7 @@ #define PCIE_VERSION "1.0" #define DRV_NAME "Marvell mwifiex PCIe" -static struct mwifiex_if_ops pcie_ops; +static const struct mwifiex_if_ops pcie_ops; static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = { .cmd_addr_lo = PCIE_SCRATCH_0_REG, @@ -3240,7 +3240,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) mwifiex_pcie_free_buffers(adapter); } -static struct mwifiex_if_ops pcie_ops = { +static const struct mwifiex_if_ops pcie_ops = { .init_if = mwifiex_init_pcie, .cleanup_if = mwifiex_cleanup_pcie, .check_fw_status = mwifiex_check_fw_status, diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 490ffd981164d..c1fe484488399 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -21,7 +21,7 @@ static void mwifiex_sdio_work(struct work_struct *work); -static struct mwifiex_if_ops sdio_ops; +static const struct mwifiex_if_ops sdio_ops; static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = { .start_rd_port = 1, @@ -3167,7 +3167,7 @@ static void mwifiex_sdio_up_dev(struct mwifiex_adapter *adapter) dev_err(&card->func->dev, "error enabling SDIO port\n"); } -static struct mwifiex_if_ops sdio_ops = { +static const struct mwifiex_if_ops sdio_ops = { .init_if = mwifiex_init_sdio, .cleanup_if = mwifiex_cleanup_sdio, .check_fw_status = mwifiex_check_fw_status, diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index e2800a831c8ed..c4689f5a1acc8 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -1507,6 +1507,7 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, u32 len; u8 *data = (u8 *)cmd + S_DS_GEN; int ret; + struct host_cmd_ds_802_11_cfg_data *pcfg_data; if (prop) { len = prop->length; @@ -1514,12 +1515,20 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, data, len); if (ret) return ret; + + cmd->size = cpu_to_le16(S_DS_GEN + len); mwifiex_dbg(adapter, INFO, "download cfg_data from device tree: %s\n", prop->name); } else if (adapter->cal_data->data && adapter->cal_data->size > 0) { len = mwifiex_parse_cal_cfg((u8 *)adapter->cal_data->data, - adapter->cal_data->size, data); + adapter->cal_data->size, + data + sizeof(*pcfg_data)); + pcfg_data = &cmd->params.cfg_data; + pcfg_data->action = cpu_to_le16(HOST_CMD_ACT_GEN_SET); + pcfg_data->type = cpu_to_le16(MWIFIEX_CFG_TYPE_CAL); + pcfg_data->data_len = cpu_to_le16(len); + cmd->size = cpu_to_le16(S_DS_GEN + sizeof(*pcfg_data) + len); mwifiex_dbg(adapter, INFO, "download cfg_data from config file\n"); } else { @@ -1527,7 +1536,6 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv, } cmd->command = cpu_to_le16(HostCmd_CMD_CFG_DATA); - cmd->size = cpu_to_le16(S_DS_GEN + len); return 0; } @@ -2293,9 +2301,13 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init) "marvell,caldata"); } - if (adapter->cal_data) + if (adapter->cal_data) { mwifiex_send_cmd(priv, HostCmd_CMD_CFG_DATA, HostCmd_ACT_GEN_SET, 0, NULL, true); + release_firmware(adapter->cal_data); + adapter->cal_data = NULL; + } + /* Read MAC address from HW */ ret = mwifiex_send_cmd(priv, HostCmd_CMD_GET_HW_SPEC, diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c index 58ef5020a46a7..245cb99a3daad 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_event.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c @@ -325,19 +325,3 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv) return 0; } - -/* This function deletes station entry from associated station list. - * Also if both AP and STA are 11n enabled, RxReorder tables and TxBA stream - * tables created for this station are deleted. - */ -void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, - struct mwifiex_sta_node *node) -{ - if (priv->ap_11n_enabled && node->is_11n_enabled) { - mwifiex_11n_del_rx_reorder_tbl_by_ta(priv, node->mac_addr); - mwifiex_del_tx_ba_stream_tbl_by_ra(priv, node->mac_addr); - } - mwifiex_del_sta_entry(priv, node->mac_addr); - - return; -} diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 6085cd50970d4..3034c4405cb57 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -10,7 +10,7 @@ #define USB_VERSION "1.0" -static struct mwifiex_if_ops usb_ops; +static const struct mwifiex_if_ops usb_ops; static const struct usb_device_id mwifiex_usb_table[] = { /* 8766 */ @@ -1585,7 +1585,7 @@ mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) return 0; } -static struct mwifiex_if_ops usb_ops = { +static const struct mwifiex_if_ops usb_ops = { .register_dev = mwifiex_register_dev, .unregister_dev = mwifiex_unregister_dev, .wakeup = mwifiex_pm_wakeup_card, diff --git a/drivers/net/wireless/mediatek/mt76/channel.c b/drivers/net/wireless/mediatek/mt76/channel.c index 6a35c6ebd823e..e7b839e742903 100644 --- a/drivers/net/wireless/mediatek/mt76/channel.c +++ b/drivers/net/wireless/mediatek/mt76/channel.c @@ -293,6 +293,7 @@ struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy, kfree(mlink); return ERR_PTR(ret); } + rcu_assign_pointer(mvif->offchannel_link, mlink); return mlink; } @@ -301,10 +302,12 @@ void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif, struct mt76_vif_link *mlink) { struct mt76_dev *dev = phy->dev; + struct mt76_vif_data *mvif = mlink->mvif; if (IS_ERR_OR_NULL(mlink) || !mlink->offchannel) return; + rcu_assign_pointer(mvif->offchannel_link, NULL); dev->drv->vif_link_remove(phy, vif, &vif->bss_conf, mlink); kfree(mlink); } diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 0bc66cc19acd1..443517d06c9fa 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -95,6 +95,10 @@ int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int l #ifdef CONFIG_NL80211_TESTMODE dev->test_mtd.name = devm_kstrdup(dev->dev, part, GFP_KERNEL); + if (!dev->test_mtd.name) { + ret = -ENOMEM; + goto out_put_node; + } dev->test_mtd.offset = offset; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 508b472408c20..b88d7e10742ee 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -816,8 +816,8 @@ void mt76_free_device(struct mt76_dev *dev) } EXPORT_SYMBOL_GPL(mt76_free_device); -static struct mt76_phy * -mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif) +struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; struct mt76_chanctx *ctx; @@ -831,6 +831,7 @@ mt76_vif_phy(struct ieee80211_hw *hw, struct ieee80211_vif *vif) ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv; return ctx->phy; } +EXPORT_SYMBOL_GPL(mt76_vif_phy); static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) { @@ -1697,6 +1698,17 @@ void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid) } EXPORT_SYMBOL_GPL(mt76_wcid_add_poll); +s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower) +{ + int n_chains = hweight16(phy->chainmask); + + txpower = mt76_get_sar_power(phy, phy->chandef.chan, txpower * 2); + txpower -= mt76_tx_power_nss_delta(n_chains); + + return txpower; +} +EXPORT_SYMBOL_GPL(mt76_get_power_bound); + int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, int *dbm) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 132148f7b1070..d7cd467b812fc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -351,6 +351,7 @@ struct mt76_wcid { u8 hw_key_idx; u8 hw_key_idx2; + u8 offchannel:1; u8 sta:1; u8 sta_disabled:1; u8 amsdu:1; @@ -491,6 +492,7 @@ struct mt76_hw_cap { #define MT_DRV_RX_DMA_HDR BIT(3) #define MT_DRV_HW_MGMT_TXQ BIT(4) #define MT_DRV_AMSDU_OFFLOAD BIT(5) +#define MT_DRV_IGNORE_TXS_FAILED BIT(6) struct mt76_driver_ops { u32 drv_flags; @@ -769,6 +771,7 @@ struct mt76_testmode_data { struct mt76_vif_link { u8 idx; + u8 link_idx; u8 omac_idx; u8 band_idx; u8 wmm_idx; @@ -786,6 +789,7 @@ struct mt76_vif_link { struct mt76_vif_data { struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + struct mt76_vif_link __rcu *offchannel_link; struct mt76_phy *roc_phy; u16 valid_links; @@ -1224,6 +1228,8 @@ struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, u8 band_idx); int mt76_register_phy(struct mt76_phy *phy, bool vht, struct ieee80211_rate *rates, int n_rates); +struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy, const struct file_operations *ops); @@ -1482,6 +1488,8 @@ void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx); +s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower); + int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, int *dbm); int mt76_init_sar_power(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h index db0c29e65185c..487ad716f872a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h @@ -314,6 +314,9 @@ enum tx_frag_idx { #define MT_TXFREE_INFO_COUNT GENMASK(27, 24) #define MT_TXFREE_INFO_STAT GENMASK(29, 28) +#define MT_TXS_HDR_SIZE 4 /* Unit: DW */ +#define MT_TXS_SIZE 12 /* Unit: DW */ + #define MT_TXS0_BW GENMASK(31, 29) #define MT_TXS0_TID GENMASK(28, 26) #define MT_TXS0_AMPDU BIT(25) diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index f30cf9e716105..bafcf5a279e23 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -391,7 +391,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC); if (vif->type == NL80211_IFTYPE_STATION && - !is_zero_ether_addr(link_conf->bssid)) { + link_conf && !is_zero_ether_addr(link_conf->bssid)) { memcpy(basic->peer_addr, link_conf->bssid, ETH_ALEN); basic->aid = cpu_to_le16(vif->cfg.aid); } else { @@ -1168,7 +1168,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, .tag = cpu_to_le16(DEV_INFO_ACTIVE), .len = cpu_to_le16(sizeof(struct req_tlv)), .active = enable, - .link_idx = mvif->idx, + .link_idx = mvif->link_idx, }, }; struct { @@ -1191,7 +1191,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, .bmc_tx_wlan_idx = cpu_to_le16(wcid->idx), .sta_idx = cpu_to_le16(wcid->idx), .conn_state = 1, - .link_idx = mvif->idx, + .link_idx = mvif->link_idx, }, }; int err, idx, cmd, len; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index b456ccd00d581..11c16d1fc70fc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -156,7 +156,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) static const struct mt76_driver_ops drv_ops = { .txwi_size = sizeof(struct mt76x02_txwi), .drv_flags = MT_DRV_TX_ALIGNED4_SKBS | - MT_DRV_SW_RX_AIRTIME, + MT_DRV_SW_RX_AIRTIME | + MT_DRV_IGNORE_TXS_FAILED, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, .set_channel = mt76x0_set_channel, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index b031c500b7415..90e5666c0857d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -214,7 +214,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { static const struct mt76_driver_ops drv_ops = { - .drv_flags = MT_DRV_SW_RX_AIRTIME, + .drv_flags = MT_DRV_SW_RX_AIRTIME | + MT_DRV_IGNORE_TXS_FAILED, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, .set_channel = mt76x0_set_channel, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 727bfdd00b400..2303019670e2c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -22,7 +22,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) static const struct mt76_driver_ops drv_ops = { .txwi_size = sizeof(struct mt76x02_txwi), .drv_flags = MT_DRV_TX_ALIGNED4_SKBS | - MT_DRV_SW_RX_AIRTIME, + MT_DRV_SW_RX_AIRTIME | + MT_DRV_IGNORE_TXS_FAILED, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, .set_channel = mt76x2e_set_channel, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index e832ad53e2393..84ef80ab4afbf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -22,6 +22,7 @@ static const struct usb_device_id mt76x2u_device_table[] = { { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */ { USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */ + { USB_DEVICE(0x2357, 0x0137) }, /* TP-Link TL-WDN6200 */ { }, }; @@ -29,7 +30,8 @@ static int mt76x2u_probe(struct usb_interface *intf, const struct usb_device_id *id) { static const struct mt76_driver_ops drv_ops = { - .drv_flags = MT_DRV_SW_RX_AIRTIME, + .drv_flags = MT_DRV_SW_RX_AIRTIME | + MT_DRV_IGNORE_TXS_FAILED, .survey_flags = SURVEY_INFO_TIME_TX, .update_survey = mt76x02_update_channel, .set_channel = mt76x2u_set_channel, diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 578013884e438..192e8eff970b9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -303,9 +303,9 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) phy->mib.dl_vht_3mu_cnt, phy->mib.dl_vht_4mu_cnt); - sub_total_cnt = phy->mib.dl_vht_2mu_cnt + - phy->mib.dl_vht_3mu_cnt + - phy->mib.dl_vht_4mu_cnt; + sub_total_cnt = (u64)phy->mib.dl_vht_2mu_cnt + + phy->mib.dl_vht_3mu_cnt + + phy->mib.dl_vht_4mu_cnt; seq_printf(file, "\nTotal non-HE MU-MIMO DL PPDU count: %lld", sub_total_cnt); @@ -353,26 +353,27 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) phy->mib.dl_he_9to16ru_cnt, phy->mib.dl_he_gtr16ru_cnt); - sub_total_cnt = phy->mib.dl_he_2mu_cnt + - phy->mib.dl_he_3mu_cnt + - phy->mib.dl_he_4mu_cnt; + sub_total_cnt = (u64)phy->mib.dl_he_2mu_cnt + + phy->mib.dl_he_3mu_cnt + + phy->mib.dl_he_4mu_cnt; total_ppdu_cnt = sub_total_cnt; seq_printf(file, "\nTotal HE MU-MIMO DL PPDU count: %lld", sub_total_cnt); - sub_total_cnt = phy->mib.dl_he_2ru_cnt + - phy->mib.dl_he_3ru_cnt + - phy->mib.dl_he_4ru_cnt + - phy->mib.dl_he_5to8ru_cnt + - phy->mib.dl_he_9to16ru_cnt + - phy->mib.dl_he_gtr16ru_cnt; + sub_total_cnt = (u64)phy->mib.dl_he_2ru_cnt + + phy->mib.dl_he_3ru_cnt + + phy->mib.dl_he_4ru_cnt + + phy->mib.dl_he_5to8ru_cnt + + phy->mib.dl_he_9to16ru_cnt + + phy->mib.dl_he_gtr16ru_cnt; total_ppdu_cnt += sub_total_cnt; seq_printf(file, "\nTotal HE OFDMA DL PPDU count: %lld", sub_total_cnt); - total_ppdu_cnt += phy->mib.dl_he_su_cnt + phy->mib.dl_he_ext_su_cnt; + total_ppdu_cnt += (u64)phy->mib.dl_he_su_cnt + + phy->mib.dl_he_ext_su_cnt; seq_printf(file, "\nAll HE DL PPDU count: %lld", total_ppdu_cnt); @@ -404,20 +405,20 @@ static int mt7915_muru_stats_show(struct seq_file *file, void *data) phy->mib.ul_hetrig_9to16ru_cnt, phy->mib.ul_hetrig_gtr16ru_cnt); - sub_total_cnt = phy->mib.ul_hetrig_2mu_cnt + - phy->mib.ul_hetrig_3mu_cnt + - phy->mib.ul_hetrig_4mu_cnt; + sub_total_cnt = (u64)phy->mib.ul_hetrig_2mu_cnt + + phy->mib.ul_hetrig_3mu_cnt + + phy->mib.ul_hetrig_4mu_cnt; total_ppdu_cnt = sub_total_cnt; seq_printf(file, "\nTotal HE MU-MIMO UL TB PPDU count: %lld", sub_total_cnt); - sub_total_cnt = phy->mib.ul_hetrig_2ru_cnt + - phy->mib.ul_hetrig_3ru_cnt + - phy->mib.ul_hetrig_4ru_cnt + - phy->mib.ul_hetrig_5to8ru_cnt + - phy->mib.ul_hetrig_9to16ru_cnt + - phy->mib.ul_hetrig_gtr16ru_cnt; + sub_total_cnt = (u64)phy->mib.ul_hetrig_2ru_cnt + + phy->mib.ul_hetrig_3ru_cnt + + phy->mib.ul_hetrig_4ru_cnt + + phy->mib.ul_hetrig_5to8ru_cnt + + phy->mib.ul_hetrig_9to16ru_cnt + + phy->mib.ul_hetrig_gtr16ru_cnt; total_ppdu_cnt += sub_total_cnt; seq_printf(file, "\nTotal HE OFDMA UL TB PPDU count: %lld", @@ -1084,13 +1085,13 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf, return -EINVAL; if (pwr160) - pwr160 = mt7915_get_power_bound(phy, pwr160); + pwr160 = mt76_get_power_bound(mphy, pwr160); if (pwr80) - pwr80 = mt7915_get_power_bound(phy, pwr80); + pwr80 = mt76_get_power_bound(mphy, pwr80); if (pwr40) - pwr40 = mt7915_get_power_bound(phy, pwr40); + pwr40 = mt76_get_power_bound(mphy, pwr40); if (pwr20) - pwr20 = mt7915_get_power_bound(phy, pwr20); + pwr20 = mt76_get_power_bound(mphy, pwr20); if (pwr160 < 0 || pwr80 < 0 || pwr40 < 0 || pwr20 < 0) return -EINVAL; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 9d790f234e82c..3643c72bb68d4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -3323,7 +3323,7 @@ int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy, if (ret) return ret; - txpower = mt7915_get_power_bound(phy, txpower); + txpower = mt76_get_power_bound(mphy, txpower); if (txpower > mphy->txpower_cur || txpower < 0) return -EINVAL; @@ -3373,7 +3373,7 @@ int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy) int i, idx; int tx_power; - tx_power = mt7915_get_power_bound(phy, hw->conf.power_level); + tx_power = mt76_get_power_bound(mphy, hw->conf.power_level); tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, &limits_array, tx_power); mphy->txpower_cur = tx_power; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index 49476a4182fd1..092ed504a8f26 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -515,16 +515,4 @@ enum { sizeof(struct bss_info_bmc_rate) +\ sizeof(struct bss_info_ext_bss)) -static inline s8 -mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower) -{ - struct mt76_phy *mphy = phy->mt76; - int n_chains = hweight16(mphy->chainmask); - - txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2); - txpower -= mt76_tx_power_nss_delta(n_chains); - - return txpower; -} - #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 13e58c328aff4..78b77a54d1957 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -811,6 +811,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx; msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET; msta->deflink.last_txs = jiffies; + msta->deflink.sta = msta; ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c index f41ca42484978..63cb08f4d87cc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c @@ -58,15 +58,110 @@ static int mt7925_thermal_init(struct mt792x_phy *phy) return PTR_ERR_OR_ZERO(hwmon); } +void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2) +{ + struct mt792x_phy *phy = &dev->phy; + struct mt7925_clc_rule_v2 *rule; + struct mt7925_clc *clc; + bool old = dev->has_eht, new = true; + u32 mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2); + u8 *pos; + + if (mtcl_conf != MT792X_ACPI_MTCL_INVALID && + (((mtcl_conf >> 4) & 0x3) == 0)) { + new = false; + goto out; + } + + if (!phy->clc[MT792x_CLC_BE_CTRL]) + goto out; + + clc = (struct mt7925_clc *)phy->clc[MT792x_CLC_BE_CTRL]; + pos = clc->data; + + while (1) { + rule = (struct mt7925_clc_rule_v2 *)pos; + + if (rule->alpha2[0] == alpha2[0] && + rule->alpha2[1] == alpha2[1]) { + new = false; + break; + } + + /* Check the last one */ + if (rule->flag && BIT(0)) + break; + + pos += sizeof(*rule); + } + +out: + if (old == new) + return; + + dev->has_eht = new; + mt7925_set_stream_he_eht_caps(phy); +} + +static void +mt7925_regd_channel_update(struct wiphy *wiphy, struct mt792x_dev *dev) +{ +#define IS_UNII_INVALID(idx, sfreq, efreq, cfreq) \ + (!(dev->phy.clc_chan_conf & BIT(idx)) && (cfreq) >= (sfreq) && (cfreq) <= (efreq)) +#define MT7925_UNII_59G_IS_VALID 0x1 +#define MT7925_UNII_6G_IS_VALID 0x1e + struct ieee80211_supported_band *sband; + struct mt76_dev *mdev = &dev->mt76; + struct ieee80211_channel *ch; + u32 mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, mdev->alpha2); + int i; + + if (mtcl_conf != MT792X_ACPI_MTCL_INVALID) { + if ((mtcl_conf & 0x3) == 0) + dev->phy.clc_chan_conf &= ~MT7925_UNII_59G_IS_VALID; + if (((mtcl_conf >> 2) & 0x3) == 0) + dev->phy.clc_chan_conf &= ~MT7925_UNII_6G_IS_VALID; + } + + sband = wiphy->bands[NL80211_BAND_5GHZ]; + if (!sband) + return; + + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + + /* UNII-4 */ + if (IS_UNII_INVALID(0, 5845, 5925, ch->center_freq)) + ch->flags |= IEEE80211_CHAN_DISABLED; + } + + sband = wiphy->bands[NL80211_BAND_6GHZ]; + if (!sband) + return; + + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + + /* UNII-5/6/7/8 */ + if (IS_UNII_INVALID(1, 5925, 6425, ch->center_freq) || + IS_UNII_INVALID(2, 6425, 6525, ch->center_freq) || + IS_UNII_INVALID(3, 6525, 6875, ch->center_freq) || + IS_UNII_INVALID(4, 6875, 7125, ch->center_freq)) + ch->flags |= IEEE80211_CHAN_DISABLED; + } +} + void mt7925_regd_update(struct mt792x_dev *dev) { struct mt76_dev *mdev = &dev->mt76; struct ieee80211_hw *hw = mdev->hw; + struct wiphy *wiphy = hw->wiphy; if (!dev->regd_change) return; mt7925_mcu_set_clc(dev, mdev->alpha2, dev->country_ie_env); + mt7925_regd_channel_update(wiphy, dev); mt7925_mcu_set_channel_domain(hw->priv); mt7925_set_tx_sar_pwr(hw, NULL); dev->regd_change = false; @@ -244,6 +339,7 @@ int mt7925_register_device(struct mt792x_dev *dev) dev->mt76.tx_worker.fn = mt792x_tx_worker; INIT_DELAYED_WORK(&dev->pm.ps_work, mt792x_pm_power_save_work); + INIT_DELAYED_WORK(&dev->mlo_pm_work, mt7925_mlo_pm_work); INIT_WORK(&dev->pm.wake_work, mt792x_pm_wake_work); spin_lock_init(&dev->pm.wake.lock); mutex_init(&dev->pm.mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index 98daf80ac1313..e79364ac129ee 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -251,12 +251,12 @@ int mt7925_init_mlo_caps(struct mt792x_phy *phy) }, }; - if (!(phy->chip_cap & MT792x_CHIP_CAP_MLO_EVT_EN)) + if (!(phy->chip_cap & MT792x_CHIP_CAP_MLO_EN)) return 0; ext_capab[0].eml_capabilities = phy->eml_cap; ext_capab[0].mld_capa_and_ops = - u16_encode_bits(1, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS); + u16_encode_bits(0, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS); wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; wiphy->iftype_ext_capab = ext_capab; @@ -360,10 +360,15 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev, struct mt76_txq *mtxq; int idx, ret = 0; - mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask); - if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) { - ret = -ENOSPC; - goto out; + if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + mconf->mt76.idx = MT792x_MAX_INTERFACES; + } else { + mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask); + + if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) { + ret = -ENOSPC; + goto out; + } } mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ? @@ -371,6 +376,7 @@ static int mt7925_mac_link_bss_add(struct mt792x_dev *dev, mconf->mt76.band_idx = 0xff; mconf->mt76.wmm_idx = ieee80211_vif_is_mld(vif) ? 0 : mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS; + mconf->mt76.link_idx = hweight16(mvif->valid_links); if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ) mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4; @@ -421,6 +427,7 @@ mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mvif->bss_conf.vif = mvif; mvif->sta.vif = mvif; mvif->deflink_id = IEEE80211_LINK_UNSPECIFIED; + mvif->mlo_pm_state = MT792x_MLO_LINK_DISASSOC; ret = mt7925_mac_link_bss_add(dev, &vif->bss_conf, &mvif->sta.deflink); if (ret < 0) @@ -1149,7 +1156,12 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev, struct mt792x_bss_conf *mconf; mconf = mt792x_link_conf_to_mconf(link_conf); - mt792x_mac_link_bss_remove(dev, mconf, mlink); + + if (ieee80211_vif_is_mld(vif)) + mt792x_mac_link_bss_remove(dev, mconf, mlink); + else + mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf, + link_sta, false); } spin_lock_bh(&mdev->sta_poll_lock); @@ -1169,6 +1181,31 @@ mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif, struct mt76_wcid *wcid; unsigned int link_id; + /* clean up bss before starec */ + for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_link_sta *link_sta; + struct ieee80211_bss_conf *link_conf; + struct mt792x_bss_conf *mconf; + struct mt792x_link_sta *mlink; + + link_sta = mt792x_sta_to_link_sta(vif, sta, link_id); + if (!link_sta) + continue; + + mlink = mt792x_sta_to_link(msta, link_id); + if (!mlink) + continue; + + link_conf = mt792x_vif_to_bss_conf(vif, link_id); + if (!link_conf) + continue; + + mconf = mt792x_link_conf_to_mconf(link_conf); + + mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf, + link_sta, false); + } + for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_link_sta *link_sta; struct mt792x_link_sta *mlink; @@ -1206,51 +1243,22 @@ void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, { struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - struct { - struct { - u8 omac_idx; - u8 band_idx; - __le16 pad; - } __packed hdr; - struct req_tlv { - __le16 tag; - __le16 len; - u8 active; - u8 link_idx; /* hw link idx */ - u8 omac_addr[ETH_ALEN]; - } __packed tlv; - } dev_req = { - .hdr = { - .omac_idx = 0, - .band_idx = 0, - }, - .tlv = { - .tag = cpu_to_le16(DEV_INFO_ACTIVE), - .len = cpu_to_le16(sizeof(struct req_tlv)), - .active = true, - }, - }; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; unsigned long rem; rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0); mt7925_mac_sta_remove_links(dev, vif, sta, rem); - if (ieee80211_vif_is_mld(vif)) { - mt7925_mcu_set_dbdc(&dev->mphy, false); - - /* recovery omac address for the legacy interface */ - memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN); - mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE), - &dev_req, sizeof(dev_req), true); - } + if (ieee80211_vif_is_mld(vif)) + mt7925_mcu_del_dev(mdev, vif); if (vif->type == NL80211_IFTYPE_STATION) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - mvif->wep_sta = NULL; ewma_rssi_init(&mvif->bss_conf.rssi); } + + mvif->mlo_pm_state = MT792x_MLO_LINK_DISASSOC; } EXPORT_SYMBOL_GPL(mt7925_mac_sta_remove); @@ -1289,22 +1297,22 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn, params->buf_size); - mt7925_mcu_uni_rx_ba(dev, vif, params, true); + mt7925_mcu_uni_rx_ba(dev, params, true); break; case IEEE80211_AMPDU_RX_STOP: mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid); - mt7925_mcu_uni_rx_ba(dev, vif, params, false); + mt7925_mcu_uni_rx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_OPERATIONAL: mtxq->aggr = true; mtxq->send_bar = false; - mt7925_mcu_uni_tx_ba(dev, vif, params, true); + mt7925_mcu_uni_tx_ba(dev, params, true); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; clear_bit(tid, &msta->deflink.wcid.ampdu_state); - mt7925_mcu_uni_tx_ba(dev, vif, params, false); + mt7925_mcu_uni_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: set_bit(tid, &msta->deflink.wcid.ampdu_state); @@ -1313,7 +1321,7 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; clear_bit(tid, &msta->deflink.wcid.ampdu_state); - mt7925_mcu_uni_tx_ba(dev, vif, params, false); + mt7925_mcu_uni_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } @@ -1322,6 +1330,38 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return ret; } +static void +mt7925_mlo_pm_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt792x_dev *dev = priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + unsigned long valid = ieee80211_vif_is_mld(vif) ? + mvif->valid_links : BIT(0); + struct ieee80211_bss_conf *bss_conf; + int i; + + if (mvif->mlo_pm_state != MT792x_MLO_CHANGED_PS) + return; + + mt792x_mutex_acquire(dev); + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + mt7925_mcu_uni_bss_ps(dev, bss_conf); + } + mt792x_mutex_release(dev); +} + +void mt7925_mlo_pm_work(struct work_struct *work) +{ + struct mt792x_dev *dev = container_of(work, struct mt792x_dev, + mlo_pm_work.work); + struct ieee80211_hw *hw = mt76_hw(dev); + + ieee80211_iterate_active_interfaces(hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7925_mlo_pm_iter, dev); +} + static bool is_valid_alpha2(const char *alpha2) { if (!alpha2) @@ -1378,6 +1418,8 @@ void mt7925_scan_work(struct work_struct *work) if (!is_valid_alpha2(evt->alpha2)) break; + mt7925_regd_be_ctrl(phy->dev, evt->alpha2); + if (mdev->alpha2[0] != '0' && mdev->alpha2[1] != '0') break; @@ -1871,6 +1913,9 @@ static void mt7925_vif_cfg_changed(struct ieee80211_hw *hw, mt7925_mcu_sta_update(dev, NULL, vif, true, MT76_STA_INFO_STATE_ASSOC); mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc); + + if (ieee80211_vif_is_mld(vif)) + mvif->mlo_pm_state = MT792x_MLO_LINK_ASSOC; } if (changed & BSS_CHANGED_ARP_FILTER) { @@ -1881,9 +1926,19 @@ static void mt7925_vif_cfg_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_PS) { - for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { - bss_conf = mt792x_vif_to_bss_conf(vif, i); + if (hweight16(mvif->valid_links) < 2) { + /* legacy */ + bss_conf = &vif->bss_conf; mt7925_mcu_uni_bss_ps(dev, bss_conf); + } else { + if (mvif->mlo_pm_state == MT792x_MLO_LINK_ASSOC) { + mvif->mlo_pm_state = MT792x_MLO_CHANGED_PS_PENDING; + } else if (mvif->mlo_pm_state == MT792x_MLO_CHANGED_PS) { + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + mt7925_mcu_uni_bss_ps(dev, bss_conf); + } + } } } @@ -1934,11 +1989,12 @@ static void mt7925_link_info_changed(struct ieee80211_hw *hw, if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) mt7925_mcu_set_tx(dev, info); - if (changed & BSS_CHANGED_BSSID) { - if (ieee80211_vif_is_mld(vif) && - hweight16(mvif->valid_links) == 2) - /* Indicate the secondary setup done */ - mt7925_mcu_uni_bss_bcnft(dev, info, true); + if (mvif->mlo_pm_state == MT792x_MLO_CHANGED_PS_PENDING) { + /* Indicate the secondary setup done */ + mt7925_mcu_uni_bss_bcnft(dev, info, true); + + ieee80211_queue_delayed_work(hw, &dev->mlo_pm_work, 5 * HZ); + mvif->mlo_pm_state = MT792x_MLO_CHANGED_PS; } mt792x_mutex_release(dev); @@ -2022,8 +2078,6 @@ mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif, goto free; if (mconf != &mvif->bss_conf) { - mt7925_mcu_set_bss_pm(dev, link_conf, true); - err = mt7925_set_mlo_roc(phy, &mvif->bss_conf, vif->active_links); if (err < 0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 15815ad84713a..e61da76b2097b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -348,14 +348,10 @@ mt7925_mcu_handle_hif_ctrl_basic(struct mt792x_dev *dev, struct tlv *tlv) basic = (struct mt7925_mcu_hif_ctrl_basic_tlv *)tlv; if (basic->hifsuspend) { - if (basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE && - basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE) - /* success */ - dev->hif_idle = true; - else - /* busy */ - /* invalid */ - dev->hif_idle = false; + dev->hif_idle = true; + if (!(basic->hif_tx_traffic_status == HIF_TRAFFIC_IDLE && + basic->hif_rx_traffic_status == HIF_TRAFFIC_IDLE)) + dev_info(dev->mt76.dev, "Hif traffic not idle.\n"); } else { dev->hif_resumed = true; } @@ -576,10 +572,10 @@ void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb) static int mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif, - struct mt76_wcid *wcid, struct ieee80211_ampdu_params *params, bool enable, bool tx) { + struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv; struct sta_rec_ba_uni *ba; struct sk_buff *skb; struct tlv *tlv; @@ -607,60 +603,76 @@ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif_link *mvif, /** starec & wtbl **/ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev, - struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params, bool enable) { struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_link_sta *mlink; - struct mt792x_bss_conf *mconf; - unsigned long usable_links = ieee80211_vif_usable_links(vif); - struct mt76_wcid *wcid; - u8 link_id, ret; - - for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { - mconf = mt792x_vif_to_link(mvif, link_id); - mlink = mt792x_sta_to_link(msta, link_id); - wcid = &mlink->wcid; - - if (enable && !params->amsdu) - mlink->wcid.amsdu = false; + struct mt792x_vif *mvif = msta->vif; - ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params, - enable, true); - if (ret < 0) - break; - } + if (enable && !params->amsdu) + msta->deflink.wcid.amsdu = false; - return ret; + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params, + enable, true); } int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, - struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params, bool enable) { struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_link_sta *mlink; - struct mt792x_bss_conf *mconf; - unsigned long usable_links = ieee80211_vif_usable_links(vif); - struct mt76_wcid *wcid; - u8 link_id, ret; + struct mt792x_vif *mvif = msta->vif; - for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { - mconf = mt792x_vif_to_link(mvif, link_id); - mlink = mt792x_sta_to_link(msta, link_id); - wcid = &mlink->wcid; + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params, + enable, false); +} - ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params, - enable, false); - if (ret < 0) - break; - } +static int mt7925_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val) +{ + struct { + u8 rsv[4]; - return ret; + __le16 tag; + __le16 len; + + __le32 addr; + __le32 valid; + u8 data[MT7925_EEPROM_BLOCK_SIZE]; + } __packed req = { + .tag = cpu_to_le16(1), + .len = cpu_to_le16(sizeof(req) - 4), + .addr = cpu_to_le32(round_down(offset, + MT7925_EEPROM_BLOCK_SIZE)), + }; + struct evt { + u8 rsv[4]; + + __le16 tag; + __le16 len; + + __le32 ver; + __le32 addr; + __le32 valid; + __le32 size; + __le32 magic_num; + __le32 type; + __le32 rsv1[4]; + u8 data[32]; + } __packed *res; + struct sk_buff *skb; + int ret; + + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL), + &req, sizeof(req), true, &skb); + if (ret) + return ret; + + res = (struct evt *)skb->data; + *val = res->data[offset % MT7925_EEPROM_BLOCK_SIZE]; + + dev_kfree_skb(skb); + + return 0; } static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name) @@ -671,13 +683,21 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name) struct mt76_dev *mdev = &dev->mt76; struct mt792x_phy *phy = &dev->phy; const struct firmware *fw; + u8 *clc_base = NULL, hw_encap = 0; int ret, i, len, offset = 0; - u8 *clc_base = NULL; + dev->phy.clc_chan_conf = 0xff; if (mt7925_disable_clc || mt76_is_usb(&dev->mt76)) return 0; + if (mt76_is_mmio(&dev->mt76)) { + ret = mt7925_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap); + if (ret) + return ret; + hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP); + } + ret = request_firmware(&fw, fw_name, mdev->dev); if (ret) return ret; @@ -722,6 +742,12 @@ static int mt7925_load_clc(struct mt792x_dev *dev, const char *fw_name) if (phy->clc[clc->idx]) continue; + /* header content sanity */ + if ((clc->idx == MT792x_CLC_BE_CTRL && + u8_get_bits(clc->t2.type, MT_EE_HW_TYPE_ENCAP) != hw_encap) || + u8_get_bits(clc->t0.type, MT_EE_HW_TYPE_ENCAP) != hw_encap) + continue; + phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc, le32_to_cpu(clc->len), GFP_KERNEL); @@ -828,7 +854,6 @@ mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data) mdev->phy.chainmask = mdev->phy.antenna_mask; mdev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G); mdev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G); - dev->has_eht = cap->eht; } static void @@ -1850,49 +1875,6 @@ mt7925_mcu_sta_mld_tlv(struct sk_buff *skb, } } -static int -mt7925_mcu_sta_cmd(struct mt76_phy *phy, - struct mt76_sta_cmd_info *info) -{ - struct mt76_vif_link *mvif = (struct mt76_vif_link *)info->vif->drv_priv; - struct mt76_dev *dev = phy->dev; - struct sk_buff *skb; - int conn_state; - - skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid, - MT7925_STA_UPDATE_MAX_SIZE); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - conn_state = info->enable ? CONN_STATE_PORT_SECURE : - CONN_STATE_DISCONNECT; - if (info->link_sta) - mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf, - info->link_sta, - conn_state, info->newly); - if (info->link_sta && info->enable) { - mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta); - mt7925_mcu_sta_ht_tlv(skb, info->link_sta); - mt7925_mcu_sta_vht_tlv(skb, info->link_sta); - mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta); - mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta); - mt7925_mcu_sta_he_tlv(skb, info->link_sta); - mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta); - mt7925_mcu_sta_eht_tlv(skb, info->link_sta); - mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, - info->link_sta); - mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta, - info->vif, info->rcpi, - info->state); - mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta); - } - - if (info->enable) - mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta); - - return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); -} - static void mt7925_mcu_sta_remove_tlv(struct sk_buff *skb) { @@ -1905,8 +1887,8 @@ mt7925_mcu_sta_remove_tlv(struct sk_buff *skb) } static int -mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy, - struct mt76_sta_cmd_info *info) +mt7925_mcu_sta_cmd(struct mt76_phy *phy, + struct mt76_sta_cmd_info *info) { struct mt792x_vif *mvif = (struct mt792x_vif *)info->vif->drv_priv; struct mt76_dev *dev = phy->dev; @@ -1920,12 +1902,10 @@ mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy, if (IS_ERR(skb)) return PTR_ERR(skb); - if (info->enable) + if (info->enable && info->link_sta) { mt76_connac_mcu_sta_basic_tlv(dev, skb, info->link_conf, info->link_sta, info->enable, info->newly); - - if (info->enable && info->link_sta) { mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta); mt7925_mcu_sta_ht_tlv(skb, info->link_sta); mt7925_mcu_sta_vht_tlv(skb, info->link_sta); @@ -1976,25 +1956,15 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev, }; struct mt792x_sta *msta; struct mt792x_link_sta *mlink; - int err; if (link_sta) { msta = (struct mt792x_sta *)link_sta->sta->drv_priv; mlink = mt792x_sta_to_link(msta, link_sta->link_id); } info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid; + info.newly = state != MT76_STA_INFO_STATE_ASSOC; - if (link_sta) - info.newly = state != MT76_STA_INFO_STATE_ASSOC; - else - info.newly = state == MT76_STA_INFO_STATE_ASSOC ? false : true; - - if (ieee80211_vif_is_mld(vif)) - err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info); - else - err = mt7925_mcu_sta_cmd(&dev->mphy, &info); - - return err; + return mt7925_mcu_sta_cmd(&dev->mphy, &info); } int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, @@ -2559,6 +2529,7 @@ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb, struct ieee80211_vif *vif = link_conf->vif; struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv; + struct mt792x_phy *phy = mvif->phy; struct bss_mld_tlv *mld; struct tlv *tlv; bool is_mld; @@ -2574,8 +2545,13 @@ mt7925_mcu_bss_mld_tlv(struct sk_buff *skb, mld->group_mld_id = is_mld ? mvif->bss_conf.mt76.idx : 0xff; mld->own_mld_id = mconf->mt76.idx + 32; mld->remap_idx = 0xff; - mld->eml_enable = !!(link_conf->vif->cfg.eml_cap & - IEEE80211_EML_CAP_EMLSR_SUPP); + + if (phy->chip_cap & MT792x_CHIP_CAP_MLO_EML_EN) { + mld->eml_enable = !!(link_conf->vif->cfg.eml_cap & + IEEE80211_EML_CAP_EMLSR_SUPP); + } else { + mld->eml_enable = 0; + } memcpy(mld->mac_addr, vif->addr, ETH_ALEN); } @@ -2668,6 +2644,62 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy, MCU_UNI_CMD(BSS_INFO_UPDATE), true); } +void mt7925_mcu_del_dev(struct mt76_dev *mdev, + struct ieee80211_vif *vif) +{ + struct mt76_vif_link *mvif = (struct mt76_vif_link *)vif->drv_priv; + struct { + struct { + u8 omac_idx; + u8 band_idx; + __le16 pad; + } __packed hdr; + struct req_tlv { + __le16 tag; + __le16 len; + u8 active; + u8 link_idx; /* hw link idx */ + u8 omac_addr[ETH_ALEN]; + } __packed tlv; + } dev_req = { + .tlv = { + .tag = cpu_to_le16(DEV_INFO_ACTIVE), + .len = cpu_to_le16(sizeof(struct req_tlv)), + .active = true, + }, + }; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt76_connac_bss_basic_tlv basic; + } basic_req = { + .basic = { + .tag = cpu_to_le16(UNI_BSS_INFO_BASIC), + .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)), + .active = true, + .conn_state = 1, + }, + }; + + dev_req.hdr.omac_idx = mvif->omac_idx; + dev_req.hdr.band_idx = mvif->band_idx; + + basic_req.hdr.bss_idx = mvif->idx; + basic_req.basic.omac_idx = mvif->omac_idx; + basic_req.basic.band_idx = mvif->band_idx; + basic_req.basic.link_idx = mvif->link_idx; + + mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), + &basic_req, sizeof(basic_req), true); + + /* recovery omac address for the legacy interface */ + memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN); + mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE), + &dev_req, sizeof(dev_req), true); +} + int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, struct ieee80211_chanctx_conf *ctx, struct ieee80211_bss_conf *link_conf, @@ -3155,16 +3187,17 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, .idx = idx, .env = env_cap, - .acpi_conf = mt792x_acpi_get_flags(&dev->phy), }; int ret, valid_cnt = 0; - u8 i, *pos; + u8 *pos, *last_pos; if (!clc) return 0; - pos = clc->data + sizeof(*seg) * clc->nr_seg; - for (i = 0; i < clc->nr_country; i++) { + req.ver = clc->ver; + pos = clc->data + sizeof(*seg) * clc->t0.nr_seg; + last_pos = clc->data + le32_to_cpu(*(__le32 *)(clc->data + 4)); + while (pos < last_pos) { struct mt7925_clc_rule *rule = (struct mt7925_clc_rule *)pos; pos += sizeof(*rule); @@ -3179,6 +3212,7 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, memcpy(req.type, rule->type, 2); req.size = cpu_to_le16(seg->len); + dev->phy.clc_chan_conf = clc->ver == 1 ? 0xff : rule->flag; skb = __mt76_mcu_msg_alloc(&dev->mt76, &req, le16_to_cpu(req.size) + sizeof(req), sizeof(req), GFP_KERNEL); @@ -3194,8 +3228,10 @@ __mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, valid_cnt++; } - if (!valid_cnt) + if (!valid_cnt) { + dev->phy.clc_chan_conf = 0xff; return -ENOENT; + } return 0; } @@ -3208,6 +3244,9 @@ int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, /* submit all clc config */ for (i = 0; i < ARRAY_SIZE(phy->clc); i++) { + if (i == MT792x_CLC_BE_CTRL) + continue; + ret = __mt7925_mcu_set_clc(dev, alpha2, env_cap, phy->clc[i], i); @@ -3266,6 +3305,9 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, else uni_txd->option = MCU_CMD_UNI_EXT_ACK; + if (cmd == MCU_UNI_CMD(HIF_CTRL)) + uni_txd->option &= ~MCU_CMD_ACK; + goto exit; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h index 1e47d2c61b545..8ac43feb26d64 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -566,8 +566,8 @@ struct mt7925_wow_pattern_tlv { u8 offset; u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN]; u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN]; - u8 rsv[7]; -} __packed; + u8 rsv[4]; +}; struct roc_acquire_tlv { __le16 tag; @@ -627,6 +627,8 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, struct ieee80211_vif *vif, bool enable); +void mt7925_mcu_del_dev(struct mt76_dev *mdev, + struct ieee80211_vif *vif); int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, struct ieee80211_chanctx_conf *ctx, struct ieee80211_bss_conf *link_conf, diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h index 8707b5d04743b..4e50f2597ccd4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -137,11 +137,18 @@ enum { MT7925_CLC_MAX_NUM, }; +struct mt7925_clc_rule_v2 { + u32 flag; + u8 alpha2[2]; + u8 rsv[10]; +} __packed; + struct mt7925_clc_rule { u8 alpha2[2]; u8 type[2]; u8 seg_idx; - u8 rsv[3]; + u8 flag; /* UNII4~8 ctrl flag */ + u8 rsv[2]; } __packed; struct mt7925_clc_segment { @@ -152,14 +159,26 @@ struct mt7925_clc_segment { u8 rsv2[4]; } __packed; -struct mt7925_clc { - __le32 len; - u8 idx; - u8 ver; +struct mt7925_clc_type0 { u8 nr_country; u8 type; u8 nr_seg; u8 rsv[7]; +} __packed; + +struct mt7925_clc_type2 { + u8 type; + u8 rsv[9]; +} __packed; + +struct mt7925_clc { + __le32 len; + u8 idx; + u8 ver; + union { + struct mt7925_clc_type0 t0; + struct mt7925_clc_type2 t2; + }; u8 data[]; } __packed; @@ -167,9 +186,12 @@ enum mt7925_eeprom_field { MT_EE_CHIP_ID = 0x000, MT_EE_VERSION = 0x002, MT_EE_MAC_ADDR = 0x004, + MT_EE_HW_TYPE = 0xa71, __MT_EE_MAX = 0x9ff }; +#define MT_EE_HW_TYPE_ENCAP GENMASK(1, 0) + enum { TXPWR_USER, TXPWR_EEPROM, @@ -235,6 +257,7 @@ int mt7925_mcu_chip_config(struct mt792x_dev *dev, const char *cmd); int mt7925_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, u8 bit_op, u32 bit_map); +void mt7925_regd_be_ctrl(struct mt792x_dev *dev, u8 *alpha2); void mt7925_regd_update(struct mt792x_dev *dev); int mt7925_mac_init(struct mt792x_dev *dev); int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, @@ -263,13 +286,12 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, struct ieee80211_vif *vif, bool enable); int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev, - struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params, bool enable); int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, - struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params, bool enable); +void mt7925_mlo_pm_work(struct work_struct *work); void mt7925_scan_work(struct work_struct *work); void mt7925_roc_work(struct work_struct *work); int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h index 32ed01a96bf7c..e0359d431eca9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x.h +++ b/drivers/net/wireless/mediatek/mt76/mt792x.h @@ -27,8 +27,9 @@ #define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0) #define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1) -#define MT792x_CHIP_CAP_MLO_EVT_EN BIT(2) #define MT792x_CHIP_CAP_WF_RF_PIN_CTRL_EVT_EN BIT(3) +#define MT792x_CHIP_CAP_MLO_EN BIT(8) +#define MT792x_CHIP_CAP_MLO_EML_EN BIT(9) /* NOTE: used to map mt76_rates. idx may change if firmware expands table */ #define MT792x_BASIC_RATES_TBL 11 @@ -70,6 +71,7 @@ struct mt792x_fw_features { enum { MT792x_CLC_POWER, MT792x_CLC_POWER_EXT, + MT792x_CLC_BE_CTRL, MT792x_CLC_MAX_NUM, }; @@ -81,6 +83,13 @@ enum mt792x_reg_power_type { MT_AP_VLP, }; +enum mt792x_mlo_pm_state { + MT792x_MLO_LINK_DISASSOC, + MT792x_MLO_LINK_ASSOC, + MT792x_MLO_CHANGED_PS_PENDING, + MT792x_MLO_CHANGED_PS, +}; + DECLARE_EWMA(avg_signal, 10, 8) struct mt792x_link_sta { @@ -134,6 +143,7 @@ struct mt792x_vif { struct mt792x_phy *phy; u16 valid_links; u8 deflink_id; + enum mt792x_mlo_pm_state mlo_pm_state; struct work_struct csa_work; struct timer_list csa_timer; @@ -239,6 +249,7 @@ struct mt792x_dev { const struct mt792x_irq_map *irq_map; struct work_struct ipv6_ns_work; + struct delayed_work mlo_pm_work; /* IPv6 addresses for WoWLAN */ struct sk_buff_head ipv6_ns_list; @@ -497,7 +508,7 @@ int mt792xe_mcu_fw_pmctrl(struct mt792x_dev *dev); int mt792x_init_acpi_sar(struct mt792x_dev *dev); int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default); u8 mt792x_acpi_get_flags(struct mt792x_phy *phy); -u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2); +u32 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2); #else static inline int mt792x_init_acpi_sar(struct mt792x_dev *dev) { @@ -515,9 +526,9 @@ static inline u8 mt792x_acpi_get_flags(struct mt792x_phy *phy) return 0; } -static inline u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2) +static inline u32 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2) { - return 0xf; + return MT792X_ACPI_MTCL_INVALID; } #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c index 9317f8ff2070f..d1aebadd50aac 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c @@ -4,6 +4,28 @@ #include #include "mt792x.h" +static const char * const cc_list_all[] = { + "00", "EU", "AR", "AU", "AZ", "BY", "BO", "BR", + "CA", "CL", "CN", "ID", "JP", "MY", "MX", "ME", + "MA", "NZ", "NG", "PH", "RU", "RS", "SG", "KR", + "TW", "TH", "UA", "GB", "US", "VN", "KH", "PY", +}; + +static const char * const cc_list_eu[] = { + "AD", "AT", "BE", "BG", "CY", "CZ", "HR", "DK", + "EE", "FI", "FR", "DE", "GR", "HU", "IS", "IE", + "IT", "LV", "LI", "LT", "LU", "MC", "MT", "NL", + "NO", "PL", "PT", "RO", "SK", "SI", "ES", "SE", + "CH", +}; + +static const char * const cc_list_be[] = { + "AR", "BR", "BY", "CL", "IQ", "MX", "OM", "RU", + "RW", "VN", "KR", "UA", "", "", "", "", + "EU", "AT", "CN", "CA", "TW", "NZ", "PH", "UK", + "US", +}; + static int mt792x_acpi_read(struct mt792x_dev *dev, u8 *method, u8 **tbl, u32 *len) { @@ -66,13 +88,22 @@ mt792x_acpi_read(struct mt792x_dev *dev, u8 *method, u8 **tbl, u32 *len) } /* MTCL : Country List Table for 6G band */ +/* MTCL : Country List Table for 6G band and 11BE */ static int mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version) { - int ret; + int len, ret; - *version = ((ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL)) < 0) - ? 1 : 2; + ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, &len); + if (ret) + return ret; + + if (len == sizeof(struct mt792x_asar_cl)) + *version = ((struct mt792x_asar_cl *)*table)->version; + else if (len == sizeof(struct mt792x_asar_cl_v3)) + *version = ((struct mt792x_asar_cl_v3 *)*table)->version; + else + return -EINVAL; return ret; } @@ -351,10 +382,24 @@ u8 mt792x_acpi_get_flags(struct mt792x_phy *phy) } EXPORT_SYMBOL_GPL(mt792x_acpi_get_flags); -static u8 +static u32 +mt792x_acpi_get_mtcl_map_v3(int row, int column, struct mt792x_asar_cl_v3 *cl) +{ + u32 config = 0; + u8 mode_be = 0; + + mode_be = (cl->mode_be > 0x02) ? 0 : cl->mode_be; + + if (cl->version > 2 && cl->clbe[row] & BIT(column)) + config |= (mode_be & 0x3) << 4; + + return config; +} + +static u32 mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl) { - u8 config = 0; + u32 config = 0; u8 mode_6g, mode_5g9; mode_6g = (cl->mode_6g > 0x02) ? 0 : cl->mode_6g; @@ -368,30 +413,44 @@ mt792x_acpi_get_mtcl_map(int row, int column, struct mt792x_asar_cl *cl) return config; } -u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2) +static u32 +mt792x_acpi_parse_mtcl_tbl_v3(struct mt792x_phy *phy, char *alpha2) { - static const char * const cc_list_all[] = { - "00", "EU", "AR", "AU", "AZ", "BY", "BO", "BR", - "CA", "CL", "CN", "ID", "JP", "MY", "MX", "ME", - "MA", "NZ", "NG", "PH", "RU", "RS", "SG", "KR", - "TW", "TH", "UA", "GB", "US", "VN", "KH", "PY", - }; - static const char * const cc_list_eu[] = { - "AT", "BE", "BG", "CY", "CZ", "HR", "DK", "EE", - "FI", "FR", "DE", "GR", "HU", "IS", "IE", "IT", - "LV", "LI", "LT", "LU", "MT", "NL", "NO", "PL", - "PT", "RO", "SK", "SI", "ES", "SE", "CH", - }; struct mt792x_acpi_sar *sar = phy->acpisar; - struct mt792x_asar_cl *cl; + struct mt792x_asar_cl_v3 *cl = sar->countrylist_v3; int col, row, i; - if (!sar) - return 0xf; + if (sar->ver != 3) + goto out; - cl = sar->countrylist; if (!cl) - return 0xc; + return MT792X_ACPI_MTCL_INVALID; + + for (i = 0; i < ARRAY_SIZE(cc_list_be); i++) { + col = 7 - i % 8; + row = i / 8; + if (!memcmp(cc_list_be[i], alpha2, 2)) + return mt792x_acpi_get_mtcl_map_v3(row, col, cl); + } + for (i = 0; i < ARRAY_SIZE(cc_list_eu); i++) { + if (!memcmp(cc_list_eu[i], alpha2, 2)) + return mt792x_acpi_get_mtcl_map_v3(3, 7, cl); + } + +out: + /* Depends on driver */ + return 0x20; +} + +static u32 +mt792x_acpi_parse_mtcl_tbl(struct mt792x_phy *phy, char *alpha2) +{ + struct mt792x_acpi_sar *sar = phy->acpisar; + struct mt792x_asar_cl *cl = sar->countrylist; + int col, row, i; + + if (!cl) + return MT792X_ACPI_MTCL_INVALID; for (i = 0; i < ARRAY_SIZE(cc_list_all); i++) { col = 7 - i % 8; @@ -406,4 +465,22 @@ u8 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2) return mt792x_acpi_get_mtcl_map(0, 7, cl); } + +u32 mt792x_acpi_get_mtcl_conf(struct mt792x_phy *phy, char *alpha2) +{ + struct mt792x_acpi_sar *sar = phy->acpisar; + u32 config = 0; + + if (!sar) + return MT792X_ACPI_MTCL_INVALID; + + config = mt792x_acpi_parse_mtcl_tbl_v3(phy, alpha2); + + if (config == MT792X_ACPI_MTCL_INVALID) + return MT792X_ACPI_MTCL_INVALID; + + config |= mt792x_acpi_parse_mtcl_tbl(phy, alpha2); + + return config; +} EXPORT_SYMBOL_GPL(mt792x_acpi_get_mtcl_conf); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h index 2298983b6342f..e45dcd7fbdb18 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h +++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h @@ -15,6 +15,8 @@ #define MT792x_ACPI_MTGS "MTGS" #define MT792x_ACPI_MTFG "MTFG" +#define MT792X_ACPI_MTCL_INVALID 0xffffffff + struct mt792x_asar_dyn_limit { u8 idx; u8 frp[5]; @@ -72,6 +74,17 @@ struct mt792x_asar_geo_v2 { DECLARE_FLEX_ARRAY(struct mt792x_asar_geo_limit_v2, tbl); } __packed; +struct mt792x_asar_cl_v3 { + u8 names[4]; + u8 version; + u8 mode_6g; + u8 cl6g[6]; + u8 mode_5g9; + u8 cl5g9[6]; + u8 mode_be; + u8 clbe[6]; +} __packed; + struct mt792x_asar_cl { u8 names[4]; u8 version; @@ -100,7 +113,10 @@ struct mt792x_acpi_sar { struct mt792x_asar_geo *geo; struct mt792x_asar_geo_v2 *geo_v2; }; - struct mt792x_asar_cl *countrylist; + union { + struct mt792x_asar_cl *countrylist; + struct mt792x_asar_cl_v3 *countrylist_v3; + }; struct mt792x_asar_fg *fg; }; diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c index 8799627f62926..0f7806f6338d0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c @@ -665,7 +665,8 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); ieee80211_hw_set(hw, CONNECTION_MONITOR); - ieee80211_hw_set(hw, CHANCTX_STA_CSA); + if (is_mt7921(&dev->mt76)) + ieee80211_hw_set(hw, CHANCTX_STA_CSA); if (dev->pm.enable) ieee80211_hw_set(hw, CONNECTION_MONITOR); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c index 7b2bb72b407da..4a28db17a2874 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c @@ -616,28 +616,51 @@ static void mt7996_sta_hw_queue_read(void *data, struct ieee80211_sta *sta) { struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; - struct mt7996_dev *dev = msta->vif->deflink.phy->dev; + struct mt7996_vif *mvif = msta->vif; + struct mt7996_dev *dev = mvif->deflink.phy->dev; + struct ieee80211_link_sta *link_sta; struct seq_file *s = data; - u8 ac; + struct ieee80211_vif *vif; + unsigned int link_id; + + vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); + + rcu_read_lock(); - for (ac = 0; ac < 4; ac++) { - u32 qlen, ctrl, val; - u32 idx = msta->wcid.idx >> 5; - u8 offs = msta->wcid.idx & GENMASK(4, 0); + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct mt7996_sta_link *msta_link; + struct mt76_vif_link *mlink; + u8 ac; - ctrl = BIT(31) | BIT(11) | (ac << 24); - val = mt76_rr(dev, MT_PLE_AC_QEMPTY(ac, idx)); + mlink = rcu_dereference(mvif->mt76.link[link_id]); + if (!mlink) + continue; - if (val & BIT(offs)) + msta_link = rcu_dereference(msta->link[link_id]); + if (!msta_link) continue; - mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx); - qlen = mt76_get_field(dev, MT_FL_Q3_CTRL, - GENMASK(11, 0)); - seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n", - sta->addr, msta->wcid.idx, - msta->vif->deflink.mt76.wmm_idx, ac, qlen); + for (ac = 0; ac < 4; ac++) { + u32 idx = msta_link->wcid.idx >> 5, qlen, ctrl, val; + u8 offs = msta_link->wcid.idx & GENMASK(4, 0); + + ctrl = BIT(31) | BIT(11) | (ac << 24); + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(ac, idx)); + + if (val & BIT(offs)) + continue; + + mt76_wr(dev, + MT_FL_Q0_CTRL, ctrl | msta_link->wcid.idx); + qlen = mt76_get_field(dev, MT_FL_Q3_CTRL, + GENMASK(11, 0)); + seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n", + sta->addr, msta_link->wcid.idx, + mlink->wmm_idx, ac, qlen); + } } + + rcu_read_unlock(); } static int @@ -930,6 +953,7 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file, struct ieee80211_sta *sta = file->private_data; struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct mt7996_dev *dev = msta->vif->deflink.phy->dev; + struct mt7996_sta_link *msta_link = &msta->deflink; struct ra_rate phy = {}; char buf[100]; int ret; @@ -964,7 +988,7 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file, goto out; } - phy.wlan_idx = cpu_to_le16(msta->wcid.idx); + phy.wlan_idx = cpu_to_le16(msta_link->wcid.idx); phy.gi = cpu_to_le16(gi); phy.ltf = cpu_to_le16(ltf); phy.ldpc = phy.ldpc ? 7 : 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c index 019c925ae600e..d89c06f47997f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c @@ -53,26 +53,48 @@ static const struct mt7996_dfs_radar_spec jp_radar_specs = { }; static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, - u16 idx, bool unicast) + u16 idx, u8 band_idx) { - struct mt7996_sta *sta; + struct mt7996_sta_link *msta_link; + struct mt7996_sta *msta; + struct mt7996_vif *mvif; struct mt76_wcid *wcid; + int i; if (idx >= ARRAY_SIZE(dev->mt76.wcid)) return NULL; wcid = rcu_dereference(dev->mt76.wcid[idx]); - if (unicast || !wcid) - return wcid; + if (!wcid) + return NULL; - if (!wcid->sta) + if (!mt7996_band_valid(dev, band_idx)) return NULL; - sta = container_of(wcid, struct mt7996_sta, wcid); - if (!sta->vif) + if (wcid->phy_idx == band_idx) + return wcid; + + msta_link = container_of(wcid, struct mt7996_sta_link, wcid); + msta = msta_link->sta; + if (!msta || !msta->vif) return NULL; - return &sta->vif->deflink.sta.wcid; + mvif = msta->vif; + for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) { + struct mt76_vif_link *mlink; + + mlink = rcu_dereference(mvif->mt76.link[i]); + if (!mlink) + continue; + + if (mlink->band_idx != band_idx) + continue; + + msta_link = rcu_dereference(msta->link[i]); + break; + } + + return &msta_link->wcid; } bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask) @@ -100,10 +122,13 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev) [IEEE80211_AC_VI] = 4, [IEEE80211_AC_VO] = 6 }; + struct mt7996_sta_link *msta_link; + struct mt76_vif_link *mlink; struct ieee80211_sta *sta; struct mt7996_sta *msta; u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; LIST_HEAD(sta_poll_list); + struct mt76_wcid *wcid; int i; spin_lock_bh(&dev->mt76.sta_poll_lock); @@ -123,25 +148,28 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev) spin_unlock_bh(&dev->mt76.sta_poll_lock); break; } - msta = list_first_entry(&sta_poll_list, - struct mt7996_sta, wcid.poll_list); - list_del_init(&msta->wcid.poll_list); + msta_link = list_first_entry(&sta_poll_list, + struct mt7996_sta_link, + wcid.poll_list); + msta = msta_link->sta; + wcid = &msta_link->wcid; + list_del_init(&wcid->poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); - idx = msta->wcid.idx; + idx = wcid->idx; /* refresh peer's airtime reporting */ addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u32 tx_last = msta->airtime_ac[i]; - u32 rx_last = msta->airtime_ac[i + 4]; + u32 tx_last = msta_link->airtime_ac[i]; + u32 rx_last = msta_link->airtime_ac[i + 4]; - msta->airtime_ac[i] = mt76_rr(dev, addr); - msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); + msta_link->airtime_ac[i] = mt76_rr(dev, addr); + msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); - tx_time[i] = msta->airtime_ac[i] - tx_last; - rx_time[i] = msta->airtime_ac[i + 4] - rx_last; + tx_time[i] = msta_link->airtime_ac[i] - tx_last; + rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last; if ((tx_last | rx_last) & BIT(30)) clear = true; @@ -152,10 +180,11 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev) if (clear) { mt7996_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + memset(msta_link->airtime_ac, 0, + sizeof(msta_link->airtime_ac)); } - if (!msta->wcid.sta) + if (!wcid->sta) continue; sta = container_of((void *)msta, struct ieee80211_sta, @@ -181,28 +210,23 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev) rssi[2] = to_rssi(GENMASK(23, 16), val); rssi[3] = to_rssi(GENMASK(31, 14), val); - msta->ack_signal = - mt76_rx_signal(msta->vif->deflink.phy->mt76->antenna_mask, rssi); + mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]); + if (mlink) { + struct mt76_phy *mphy = mt76_vif_link_phy(mlink); + + if (mphy) + msta_link->ack_signal = + mt76_rx_signal(mphy->antenna_mask, + rssi); + } - ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); + ewma_avg_signal_add(&msta_link->avg_ack_signal, + -msta_link->ack_signal); } rcu_read_unlock(); } -void mt7996_mac_enable_rtscts(struct mt7996_dev *dev, - struct ieee80211_vif *vif, bool enable) -{ - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - u32 addr; - - addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->deflink.sta.wcid.idx, 5); - if (enable) - mt76_set(dev, addr, BIT(5)); - else - mt76_clear(dev, addr, BIT(5)); -} - /* The HW does not translate the mac header to 802.3 for mesh point */ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) { @@ -474,11 +498,15 @@ mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); - status->wcid = mt7996_rx_get_wcid(dev, idx, unicast); + status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx); if (status->wcid) { - msta = container_of(status->wcid, struct mt7996_sta, wcid); - mt76_wcid_add_poll(&dev->mt76, &msta->wcid); + struct mt7996_sta_link *msta_link; + + msta_link = container_of(status->wcid, struct mt7996_sta_link, + wcid); + msta = msta_link->sta; + mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); } status->freq = mphy->chandef.chan->center_freq; @@ -717,9 +745,8 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi, u32 val; if (wcid->sta) { - struct ieee80211_sta *sta; + struct ieee80211_sta *sta = wcid_to_sta(wcid); - sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); wmm = sta->wme; } @@ -746,7 +773,9 @@ mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi, static void mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, - struct sk_buff *skb, struct ieee80211_key_conf *key) + struct sk_buff *skb, + struct ieee80211_key_conf *key, + struct mt76_wcid *wcid) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; @@ -754,6 +783,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, bool multicast = is_multicast_ether_addr(hdr->addr1); u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl; + u16 seqno = le16_to_cpu(sc); u8 fc_type, fc_stype; u32 val; @@ -773,8 +803,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, info->flags & IEEE80211_TX_CTL_USE_MINRATE) val |= MT_TXD1_FIXED_RATE; - if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) && - key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { + if (key && multicast && ieee80211_is_robust_mgmt_frame(skb)) { val |= MT_TXD1_BIP; txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); } @@ -804,9 +833,13 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); } - if (info->flags & IEEE80211_TX_CTL_INJECTED) { - u16 seqno = le16_to_cpu(sc); + if (multicast && ieee80211_vif_is_mld(info->control.vif)) { + val = MT_TXD3_SN_VALID | + FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); + txwi[3] |= cpu_to_le32(val); + } + if (info->flags & IEEE80211_TX_CTL_INJECTED) { if (ieee80211_is_back_req(hdr->frame_control)) { struct ieee80211_bar *bar; @@ -819,6 +852,19 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, txwi[3] |= cpu_to_le32(val); txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); } + + if (ieee80211_vif_is_mld(info->control.vif) && + (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))) + txwi[5] |= cpu_to_le32(MT_TXD5_FL); + + if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) && + ieee80211_vif_is_mld(info->control.vif)) { + txwi[5] |= cpu_to_le32(MT_TXD5_FL); + txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); + } + + if (!wcid->sta && ieee80211_is_mgmt(fc)) + txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); } void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, @@ -832,7 +878,9 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; - struct mt76_vif_link *mvif; + struct mt76_vif_link *mlink = NULL; + struct mt7996_vif *mvif; + unsigned int link_id; u16 tx_count = 15; u32 val; bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | @@ -840,11 +888,20 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, bool beacon = !!(changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc); - mvif = vif ? (struct mt76_vif_link *)vif->drv_priv : NULL; - if (mvif) { - omac_idx = mvif->omac_idx; - wmm_idx = mvif->wmm_idx; - band_idx = mvif->band_idx; + if (wcid != &dev->mt76.global_wcid) + link_id = wcid->link_id; + else + link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + + mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; + if (mvif) + mlink = rcu_dereference(mvif->mt76.link[link_id]); + + if (mlink) { + omac_idx = mlink->omac_idx; + wmm_idx = mlink->wmm_idx; + band_idx = mlink->band_idx; } if (inband_disc) { @@ -891,7 +948,10 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, val |= MT_TXD5_TX_STATUS_HOST; txwi[5] = cpu_to_le32(val); - val = MT_TXD6_DIS_MAT | MT_TXD6_DAS; + val = MT_TXD6_DAS; + if (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) + val |= MT_TXD6_DIS_MAT; + if (is_mt7996(&dev->mt76)) val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1); else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control)) @@ -903,23 +963,25 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, if (is_8023) mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid); else - mt7996_mac_write_txwi_80211(dev, txwi, skb, key); + mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid); if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { bool mcast = ieee80211_is_data(hdr->frame_control) && is_multicast_ether_addr(hdr->addr1); u8 idx = MT7996_BASIC_RATES_TBL; - if (mvif) { - if (mcast && mvif->mcast_rates_idx) - idx = mvif->mcast_rates_idx; - else if (beacon && mvif->beacon_rates_idx) - idx = mvif->beacon_rates_idx; + if (mlink) { + if (mcast && mlink->mcast_rates_idx) + idx = mlink->mcast_rates_idx; + else if (beacon && mlink->beacon_rates_idx) + idx = mlink->beacon_rates_idx; else - idx = mvif->basic_rates_idx; + idx = mlink->basic_rates_idx; } val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW; + if (mcast) + val |= MT_TXD6_DIS_MAT; txwi[6] |= cpu_to_le32(val); txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); } @@ -984,8 +1046,14 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (vif) { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt76_vif_link *mlink = NULL; + + if (wcid->offchannel) + mlink = rcu_dereference(mvif->mt76.offchannel_link); + if (!mlink) + mlink = &mvif->deflink.mt76; - txp->fw.bss_idx = mvif->deflink.mt76.idx; + txp->fw.bss_idx = mlink->idx; } txp->fw.token = cpu_to_le16(id); @@ -1027,9 +1095,10 @@ u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) static void mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb) { - struct mt7996_sta *msta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; + struct mt7996_sta_link *msta_link; + struct mt7996_sta *msta; u16 fc, tid; if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) @@ -1058,7 +1127,9 @@ mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb) return; msta = (struct mt7996_sta *)sta->drv_priv; - if (!test_and_set_bit(tid, &msta->wcid.ampdu_state)) + msta_link = &msta->deflink; + + if (!test_and_set_bit(tid, &msta_link->wcid.ampdu_state)) ieee80211_start_tx_ba_session(sta, tid, 0); } @@ -1136,7 +1207,7 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) */ info = le32_to_cpu(*cur_info); if (info & MT_TXFREE_INFO_PAIR) { - struct mt7996_sta *msta; + struct mt7996_sta_link *msta_link; u16 idx; idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); @@ -1145,8 +1216,9 @@ mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) if (!sta) continue; - msta = container_of(wcid, struct mt7996_sta, wcid); - mt76_wcid_add_poll(&dev->mt76, &msta->wcid); + msta_link = container_of(wcid, struct mt7996_sta_link, + wcid); + mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); continue; } else if (info & MT_TXFREE_INFO_HEADER) { u32 tx_retries = 0, tx_failed = 0; @@ -1230,7 +1302,7 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, struct ieee80211_sta *sta; u8 tid; - sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); + sta = wcid_to_sta(wcid); tid = FIELD_GET(MT_TXS0_TID, txs); ieee80211_refresh_tx_agg_session_timer(sta, tid); } @@ -1344,7 +1416,7 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) { - struct mt7996_sta *msta = NULL; + struct mt7996_sta_link *msta_link; struct mt76_wcid *wcid; __le32 *txs_data = data; u16 wcidx; @@ -1365,14 +1437,13 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) if (!wcid) goto out; - msta = container_of(wcid, struct mt7996_sta, wcid); - mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data); if (!wcid->sta) goto out; - mt76_wcid_add_poll(&dev->mt76, &msta->wcid); + msta_link = container_of(wcid, struct mt7996_sta_link, wcid); + mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); out: rcu_read_unlock(); @@ -1399,7 +1470,7 @@ bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) mt7996_mac_tx_free(dev, data, len); return false; case PKT_TYPE_TXS: - for (rxd += 4; rxd + 8 <= end; rxd += 8) + for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) mt7996_mac_add_txs(dev, rxd); return false; case PKT_TYPE_RX_FW_MONITOR: @@ -1442,7 +1513,7 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, mt7996_mcu_rx_event(dev, skb); break; case PKT_TYPE_TXS: - for (rxd += 4; rxd + 8 <= end; rxd += 8) + for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) mt7996_mac_add_txs(dev, rxd); dev_kfree_skb(skb); break; @@ -2239,38 +2310,70 @@ void mt7996_mac_update_stats(struct mt7996_phy *phy) void mt7996_mac_sta_rc_work(struct work_struct *work) { struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work); + struct ieee80211_bss_conf *link_conf; + struct ieee80211_link_sta *link_sta; + struct mt7996_sta_link *msta_link; + struct mt7996_vif_link *link; + struct mt76_vif_link *mlink; struct ieee80211_sta *sta; struct ieee80211_vif *vif; struct mt7996_sta *msta; - u32 changed; + struct mt7996_vif *mvif; LIST_HEAD(list); + u32 changed; + u8 link_id; + rcu_read_lock(); spin_lock_bh(&dev->mt76.sta_poll_lock); list_splice_init(&dev->sta_rc_list, &list); while (!list_empty(&list)) { - msta = list_first_entry(&list, struct mt7996_sta, rc_list); - list_del_init(&msta->rc_list); - changed = msta->changed; - msta->changed = 0; + msta_link = list_first_entry(&list, struct mt7996_sta_link, + rc_list); + list_del_init(&msta_link->rc_list); + + changed = msta_link->changed; + msta_link->changed = 0; + + sta = wcid_to_sta(&msta_link->wcid); + link_id = msta_link->wcid.link_id; + msta = msta_link->sta; + mvif = msta->vif; + vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); + + mlink = rcu_dereference(mvif->mt76.link[link_id]); + if (!mlink) + continue; + + link_sta = rcu_dereference(sta->link[link_id]); + if (!link_sta) + continue; + + link_conf = rcu_dereference(vif->link_conf[link_id]); + if (!link_conf) + continue; + spin_unlock_bh(&dev->mt76.sta_poll_lock); - sta = container_of((void *)msta, struct ieee80211_sta, drv_priv); - vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + link = (struct mt7996_vif_link *)mlink; if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | IEEE80211_RC_NSS_CHANGED | IEEE80211_RC_BW_CHANGED)) - mt7996_mcu_add_rate_ctrl(dev, vif, sta, true); + mt7996_mcu_add_rate_ctrl(dev, vif, link_conf, + link_sta, link, msta_link, + true); if (changed & IEEE80211_RC_SMPS_CHANGED) - mt7996_mcu_set_fixed_field(dev, vif, sta, NULL, + mt7996_mcu_set_fixed_field(dev, link_sta, link, + msta_link, NULL, RATE_PARAM_MMPS_UPDATE); spin_lock_bh(&dev->mt76.sta_poll_lock); } spin_unlock_bh(&dev->mt76.sta_poll_lock); + rcu_read_unlock(); } void mt7996_mac_work(struct work_struct *work) @@ -2538,7 +2641,7 @@ static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt) } static bool -mt7996_mac_twt_param_equal(struct mt7996_sta *msta, +mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link, struct ieee80211_twt_params *twt_agrt) { u16 type = le16_to_cpu(twt_agrt->req_type); @@ -2549,10 +2652,10 @@ mt7996_mac_twt_param_equal(struct mt7996_sta *msta, for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) { struct mt7996_twt_flow *f; - if (!(msta->twt.flowid_mask & BIT(i))) + if (!(msta_link->twt.flowid_mask & BIT(i))) continue; - f = &msta->twt.flow[i]; + f = &msta_link->twt.flow[i]; if (f->duration == twt_agrt->min_twt_dur && f->mantissa == twt_agrt->mantissa && f->exp == exp && @@ -2572,6 +2675,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct ieee80211_twt_params *twt_agrt = (void *)twt->params; + struct mt7996_sta_link *msta_link = &msta->deflink; u16 req_type = le16_to_cpu(twt_agrt->req_type); enum ieee80211_twt_setup_cmd sta_setup_cmd; struct mt7996_dev *dev = mt7996_hw_dev(hw); @@ -2586,7 +2690,8 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT) goto unlock; - if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow)) + if (hweight8(msta_link->twt.flowid_mask) == + ARRAY_SIZE(msta_link->twt.flow)) goto unlock; if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) { @@ -2595,10 +2700,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, goto unlock; } - if (mt7996_mac_twt_param_equal(msta, twt_agrt)) + if (mt7996_mac_twt_param_equal(msta_link, twt_agrt)) goto unlock; - flowid = ffs(~msta->twt.flowid_mask) - 1; + flowid = ffs(~msta_link->twt.flowid_mask) - 1; twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); twt_agrt->req_type |= le16_encode_bits(flowid, IEEE80211_TWT_REQTYPE_FLOWID); @@ -2607,10 +2712,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); - flow = &msta->twt.flow[flowid]; + flow = &msta_link->twt.flow[flowid]; memset(flow, 0, sizeof(*flow)); INIT_LIST_HEAD(&flow->list); - flow->wcid = msta->wcid.idx; + flow->wcid = msta_link->wcid.idx; flow->table_id = table_id; flow->id = flowid; flow->duration = twt_agrt->min_twt_dur; @@ -2628,7 +2733,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, flow->sched = true; flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow); - curr_tsf = __mt7996_get_tsf(hw, msta->vif); + curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink); div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); flow_tsf = curr_tsf + interval - rem; twt_agrt->twt = cpu_to_le64(flow_tsf); @@ -2637,12 +2742,13 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, } flow->tsf = le64_to_cpu(twt_agrt->twt); - if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD)) + if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow, + MCU_TWT_AGRT_ADD)) goto unlock; setup_cmd = TWT_SETUP_CMD_ACCEPT; dev->twt.table_mask |= BIT(table_id); - msta->twt.flowid_mask |= BIT(flowid); + msta_link->twt.flowid_mask |= BIT(flowid); dev->twt.n_agrt++; unlock: @@ -2655,26 +2761,26 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, } void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev, - struct mt7996_sta *msta, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, u8 flowid) { struct mt7996_twt_flow *flow; lockdep_assert_held(&dev->mt76.mutex); - if (flowid >= ARRAY_SIZE(msta->twt.flow)) + if (flowid >= ARRAY_SIZE(msta_link->twt.flow)) return; - if (!(msta->twt.flowid_mask & BIT(flowid))) + if (!(msta_link->twt.flowid_mask & BIT(flowid))) return; - flow = &msta->twt.flow[flowid]; - if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, - MCU_TWT_AGRT_DELETE)) + flow = &msta_link->twt.flow[flowid]; + if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE)) return; list_del_init(&flow->list); - msta->twt.flowid_mask &= ~BIT(flowid); + msta_link->twt.flowid_mask &= ~BIT(flowid); dev->twt.table_mask &= ~BIT(flow->table_id); dev->twt.n_agrt--; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index 69dd565d83190..91c64e3a0860f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -158,58 +158,101 @@ mt7996_init_bitrate_mask(struct ieee80211_vif *vif, struct mt7996_vif_link *mlin static int mt7996_set_hw_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct mt7996_vif_link *mlink, struct ieee80211_key_conf *key) + struct ieee80211_key_conf *key) { struct mt7996_dev *dev = mt7996_hw_dev(hw); - struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv : - &mlink->sta; - struct mt76_wcid *wcid = &msta->wcid; - u8 *wcid_keyidx = &wcid->hw_key_idx; - struct mt7996_phy *phy; int idx = key->keyidx; + unsigned int link_id; + unsigned long links; + + if (key->link_id >= 0) + links = BIT(key->link_id); + else if (sta && sta->valid_links) + links = sta->valid_links; + else if (vif->valid_links) + links = vif->valid_links; + else + links = BIT(0); - phy = mt7996_vif_link_phy(mlink); - if (!phy) - return -EINVAL; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct mt7996_sta_link *msta_link; + struct mt7996_vif_link *link; + u8 *wcid_keyidx; + int err; - if (sta && !wcid->sta) - return -EOPNOTSUPP; + link = mt7996_vif_link(dev, vif, link_id); + if (!link) + continue; - switch (key->cipher) { - case WLAN_CIPHER_SUITE_AES_CMAC: - case WLAN_CIPHER_SUITE_BIP_CMAC_256: - case WLAN_CIPHER_SUITE_BIP_GMAC_128: - case WLAN_CIPHER_SUITE_BIP_GMAC_256: - if (key->keyidx == 6 || key->keyidx == 7) { - wcid_keyidx = &wcid->hw_key_idx2; - key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + if (sta) { + struct mt7996_sta *msta; + + msta = (struct mt7996_sta *)sta->drv_priv; + msta_link = mt76_dereference(msta->link[link_id], + &dev->mt76); + if (!msta_link) + continue; + + if (!msta_link->wcid.sta) + return -EOPNOTSUPP; + } else { + msta_link = &link->msta_link; + } + wcid_keyidx = &msta_link->wcid.hw_key_idx; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + case WLAN_CIPHER_SUITE_BIP_CMAC_256: + case WLAN_CIPHER_SUITE_BIP_GMAC_128: + case WLAN_CIPHER_SUITE_BIP_GMAC_256: + if (key->keyidx == 6 || key->keyidx == 7) { + wcid_keyidx = &msta_link->wcid.hw_key_idx2; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE; + } + break; + default: + break; } - break; - default: - break; - } - if (cmd == SET_KEY && !sta && !mlink->mt76.cipher) { - mlink->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher); - mt7996_mcu_add_bss_info(phy, vif, &vif->bss_conf, &mlink->mt76, true); - } + if (cmd == SET_KEY && !sta && !link->mt76.cipher) { + struct ieee80211_bss_conf *link_conf; - if (cmd == SET_KEY) { - *wcid_keyidx = idx; - } else { - if (idx == *wcid_keyidx) - *wcid_keyidx = -1; - return 0; - } + link_conf = link_conf_dereference_protected(vif, + link_id); + if (!link_conf) + link_conf = &vif->bss_conf; + + link->mt76.cipher = + mt76_connac_mcu_get_cipher(key->cipher); + mt7996_mcu_add_bss_info(link->phy, vif, link_conf, + &link->mt76, msta_link, true); + } - mt76_wcid_key_setup(&dev->mt76, wcid, key); + if (cmd == SET_KEY) { + *wcid_keyidx = idx; + } else { + if (idx == *wcid_keyidx) + *wcid_keyidx = -1; + continue; + } - if (key->keyidx == 6 || key->keyidx == 7) - return mt7996_mcu_bcn_prot_enable(dev, vif, key); + mt76_wcid_key_setup(&dev->mt76, &msta_link->wcid, key); + + if (key->keyidx == 6 || key->keyidx == 7) { + err = mt7996_mcu_bcn_prot_enable(dev, link, + msta_link, key); + if (err) + return err; + } + + err = mt7996_mcu_add_key(&dev->mt76, vif, key, + MCU_WMWA_UNI_CMD(STA_REC_UPDATE), + &msta_link->wcid, cmd); + if (err) + return err; + } - return mt7996_mcu_add_key(&dev->mt76, vif, key, - MCU_WMWA_UNI_CMD(STA_REC_UPDATE), - &msta->wcid, cmd); + return 0; } static void @@ -217,12 +260,10 @@ mt7996_key_iter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key, void *data) { - struct mt7996_vif_link *mlink = data; - if (sta) return; - WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, mlink, key)); + WARN_ON(mt7996_set_hw_key(hw, SET_KEY, vif, NULL, key)); } int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif, @@ -230,6 +271,8 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif, struct mt76_vif_link *mlink) { struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76); + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt7996_sta_link *msta_link = &link->msta_link; struct mt7996_phy *phy = mphy->priv; struct mt7996_dev *dev = phy->dev; u8 band_idx = phy->mt76->band_idx; @@ -248,7 +291,8 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif, mlink->omac_idx = idx; mlink->band_idx = band_idx; mlink->wmm_idx = vif->type == NL80211_IFTYPE_AP ? 0 : 3; - mlink->wcid = &link->sta.wcid; + mlink->wcid = &msta_link->wcid; + mlink->wcid->offchannel = mlink->offchannel; ret = mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, true); if (ret) @@ -259,10 +303,11 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif, idx = MT7996_WTBL_RESERVED - mlink->idx; - INIT_LIST_HEAD(&link->sta.rc_list); - link->sta.wcid.idx = idx; - link->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; - mt76_wcid_init(&link->sta.wcid, band_idx); + INIT_LIST_HEAD(&msta_link->rc_list); + msta_link->wcid.idx = idx; + msta_link->wcid.link_id = link_conf->link_id; + msta_link->wcid.tx_info |= MT_WCID_TX_INFO_SET; + mt76_wcid_init(&msta_link->wcid, band_idx); mt7996_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -283,15 +328,19 @@ int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif, mt7996_init_bitrate_mask(vif, link); - mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, true); + mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, true); /* defer the first STA_REC of BMC entry to BSS_CHANGED_BSSID for STA * interface, since firmware only records BSSID when the entry is new */ if (vif->type != NL80211_IFTYPE_STATION) - mt7996_mcu_add_sta(dev, vif, mlink, NULL, CONN_STATE_PORT_SECURE, true); - rcu_assign_pointer(dev->mt76.wcid[idx], &link->sta.wcid); + mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL, + CONN_STATE_PORT_SECURE, true); + rcu_assign_pointer(dev->mt76.wcid[idx], &msta_link->wcid); - ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, link); + ieee80211_iter_keys(mphy->hw, vif, mt7996_key_iter, NULL); + + if (mvif->mt76.deflink_id == IEEE80211_LINK_UNSPECIFIED) + mvif->mt76.deflink_id = link_conf->link_id; return 0; } @@ -301,29 +350,42 @@ void mt7996_vif_link_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif, struct mt76_vif_link *mlink) { struct mt7996_vif_link *link = container_of(mlink, struct mt7996_vif_link, mt76); + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt7996_sta_link *msta_link = &link->msta_link; struct mt7996_phy *phy = mphy->priv; struct mt7996_dev *dev = phy->dev; - struct mt7996_sta *msta; - int idx; + int idx = msta_link->wcid.idx; - msta = &link->sta; - idx = msta->wcid.idx; - mt7996_mcu_add_sta(dev, vif, mlink, NULL, CONN_STATE_DISCONNECT, false); - mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, false); + mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL, + CONN_STATE_DISCONNECT, false); + mt7996_mcu_add_bss_info(phy, vif, link_conf, mlink, msta_link, false); mt7996_mcu_add_dev_info(phy, vif, link_conf, mlink, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); + if (mvif->mt76.deflink_id == link_conf->link_id) { + struct ieee80211_bss_conf *iter; + unsigned int link_id; + + mvif->mt76.deflink_id = IEEE80211_LINK_UNSPECIFIED; + for_each_vif_active_link(vif, iter, link_id) { + if (link_id != IEEE80211_LINK_UNSPECIFIED) { + mvif->mt76.deflink_id = link_id; + break; + } + } + } + dev->mt76.vif_mask &= ~BIT_ULL(mlink->idx); phy->omac_mask &= ~BIT_ULL(mlink->omac_idx); spin_lock_bh(&dev->mt76.sta_poll_lock); - if (!list_empty(&msta->wcid.poll_list)) - list_del_init(&msta->wcid.poll_list); + if (!list_empty(&msta_link->wcid.poll_list)) + list_del_init(&msta_link->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); - mt76_wcid_cleanup(&dev->mt76, &msta->wcid); + mt76_wcid_cleanup(&dev->mt76, &msta_link->wcid); } static void mt7996_phy_set_rxfilter(struct mt7996_phy *phy) @@ -399,6 +461,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw, mt76_vif_init(vif, &mvif->mt76); vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR; + mvif->mt76.deflink_id = IEEE80211_LINK_UNSPECIFIED; out: mutex_unlock(&dev->mt76.mutex); @@ -480,7 +543,6 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, { struct mt7996_dev *dev = mt7996_hw_dev(hw); struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct mt7996_vif_link *mlink = &mvif->deflink; int err; /* The hardware does not support per-STA RX GTK, fallback @@ -515,11 +577,11 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } - if (!mt7996_vif_link_phy(mlink)) - return 0; /* defer until after link add */ + if (!mt7996_vif_link_phy(&mvif->deflink)) + return 0; /* defer until after link add */ mutex_lock(&dev->mt76.mutex); - err = mt7996_set_hw_key(hw, cmd, vif, sta, mlink, key); + err = mt7996_set_hw_key(hw, cmd, vif, sta, key); mutex_unlock(&dev->mt76.mutex); return err; @@ -601,6 +663,33 @@ static void mt7996_configure_filter(struct ieee80211_hw *hw, mutex_unlock(&dev->mt76.mutex); } +static int +mt7996_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + unsigned int link_id, int *dbm) +{ + struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink); + struct mt7996_dev *dev = mt7996_hw_dev(hw); + struct wireless_dev *wdev; + int n_chains, delta, i; + + if (!phy) { + wdev = ieee80211_vif_to_wdev(vif); + for (i = 0; i < hw->wiphy->n_radio; i++) + if (wdev->radio_mask & BIT(i)) + phy = dev->radio_phy[i]; + + if (!phy) + return -EINVAL; + } + + n_chains = hweight16(phy->mt76->chainmask); + delta = mt76_tx_power_nss_delta(n_chains); + *dbm = DIV_ROUND_UP(phy->mt76->txpower_cur + delta, 2); + + return 0; +} + static u8 mt7996_get_rates_table(struct mt7996_phy *phy, struct ieee80211_bss_conf *conf, bool beacon, bool mcast) @@ -630,12 +719,11 @@ mt7996_get_rates_table(struct mt7996_phy *phy, struct ieee80211_bss_conf *conf, } static void -mt7996_update_mu_group(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +mt7996_update_mu_group(struct ieee80211_hw *hw, struct mt7996_vif_link *link, struct ieee80211_bss_conf *info) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_dev *dev = mt7996_hw_dev(hw); - u8 band = mvif->deflink.mt76.band_idx; + u8 band = link->mt76.band_idx; u32 *mu; mu = (u32 *)info->mu_group.membership; @@ -649,23 +737,56 @@ mt7996_update_mu_group(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt76_wr(dev, MT_WF_PHYRX_BAND_GID_TAB_POS3(band), mu[3]); } -static void mt7996_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, - u64 changed) +static void +mt7996_vif_cfg_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u64 changed) { struct mt7996_dev *dev = mt7996_hw_dev(hw); - struct mt76_vif_link *mvif; + + mutex_lock(&dev->mt76.mutex); + + if ((changed & BSS_CHANGED_ASSOC) && vif->cfg.assoc) { + struct ieee80211_bss_conf *link_conf; + unsigned long link_id; + + for_each_vif_active_link(vif, link_conf, link_id) { + struct mt7996_vif_link *link; + + link = mt7996_vif_link(dev, vif, link_id); + if (!link) + continue; + + if (!link->phy) + continue; + + mt7996_mcu_add_bss_info(link->phy, vif, link_conf, + &link->mt76, &link->msta_link, + true); + mt7996_mcu_add_sta(dev, link_conf, NULL, link, NULL, + CONN_STATE_PORT_SECURE, + !!(changed & BSS_CHANGED_BSSID)); + } + } + + mutex_unlock(&dev->mt76.mutex); +} + +static void +mt7996_link_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u64 changed) +{ + struct mt7996_dev *dev = mt7996_hw_dev(hw); + struct mt7996_vif_link *link; struct mt7996_phy *phy; struct mt76_phy *mphy; mutex_lock(&dev->mt76.mutex); - mvif = mt76_vif_conf_link(&dev->mt76, vif, info); - if (!mvif) + link = mt7996_vif_conf_link(dev, vif, info); + if (!link) goto out; - mphy = mt76_vif_link_phy(mvif); + mphy = mt76_vif_link_phy(&link->mt76); if (!mphy) goto out; @@ -675,16 +796,14 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, * and then peer references bss_info_rfch to set bandwidth cap. */ if ((changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) || - (changed & BSS_CHANGED_ASSOC && vif->cfg.assoc) || (changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon)) { - mt7996_mcu_add_bss_info(phy, vif, info, mvif, true); - mt7996_mcu_add_sta(dev, vif, mvif, NULL, CONN_STATE_PORT_SECURE, + mt7996_mcu_add_bss_info(phy, vif, info, &link->mt76, + &link->msta_link, true); + mt7996_mcu_add_sta(dev, info, NULL, link, NULL, + CONN_STATE_PORT_SECURE, !!(changed & BSS_CHANGED_BSSID)); } - if (changed & BSS_CHANGED_ERP_CTS_PROT) - mt7996_mac_enable_rtscts(dev, vif, info->use_cts_prot); - if (changed & BSS_CHANGED_ERP_SLOT) { int slottime = info->use_short_slot ? 9 : 20; @@ -695,11 +814,11 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, } if (changed & BSS_CHANGED_MCAST_RATE) - mvif->mcast_rates_idx = + link->mt76.mcast_rates_idx = mt7996_get_rates_table(phy, info, false, true); if (changed & BSS_CHANGED_BASIC_RATES) - mvif->basic_rates_idx = + link->mt76.basic_rates_idx = mt7996_get_rates_table(phy, info, false, false); /* ensure that enable txcmd_mode after bss_info */ @@ -707,19 +826,19 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, mt7996_mcu_set_tx(dev, vif, info); if (changed & BSS_CHANGED_HE_OBSS_PD) - mt7996_mcu_add_obss_spr(phy, vif, &info->he_obss_pd); + mt7996_mcu_add_obss_spr(phy, link, &info->he_obss_pd); if (changed & BSS_CHANGED_HE_BSS_COLOR) { if ((vif->type == NL80211_IFTYPE_AP && - mvif->omac_idx <= HW_BSSID_MAX) || + link->mt76.omac_idx <= HW_BSSID_MAX) || vif->type == NL80211_IFTYPE_STATION) - mt7996_mcu_update_bss_color(dev, mvif, + mt7996_mcu_update_bss_color(dev, &link->mt76, &info->he_bss_color); } if (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) { - mvif->beacon_rates_idx = + link->mt76.beacon_rates_idx = mt7996_get_rates_table(phy, info, true, false); mt7996_mcu_add_beacon(hw, vif, info); @@ -727,10 +846,10 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw, if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | BSS_CHANGED_FILS_DISCOVERY)) - mt7996_mcu_beacon_inband_discov(dev, vif, changed); + mt7996_mcu_beacon_inband_discov(dev, info, link, changed); if (changed & BSS_CHANGED_MU_GROUPS) - mt7996_update_mu_group(hw, vif, info); + mt7996_update_mu_group(hw, link, info); if (changed & BSS_CHANGED_TXPOWER && info->txpower != phy->txpower) { @@ -754,96 +873,322 @@ mt7996_channel_switch_beacon(struct ieee80211_hw *hw, mutex_unlock(&dev->mt76.mutex); } -int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static int +mt7996_mac_sta_init_link(struct mt7996_dev *dev, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link, unsigned int link_id) { - struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); + struct ieee80211_sta *sta = link_sta->sta; struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct mt7996_vif_link *link = &mvif->deflink; - u8 band_idx = link->phy->mt76->band_idx; + struct mt7996_phy *phy = link->phy; + struct mt7996_sta_link *msta_link; int idx; idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA); if (idx < 0) return -ENOSPC; - INIT_LIST_HEAD(&msta->rc_list); - INIT_LIST_HEAD(&msta->wcid.poll_list); - msta->vif = mvif; - msta->wcid.sta = 1; - msta->wcid.idx = idx; - msta->wcid.phy_idx = band_idx; + if (msta->deflink_id == IEEE80211_LINK_UNSPECIFIED) { + int i; - ewma_avg_signal_init(&msta->avg_ack_signal); + msta_link = &msta->deflink; + msta->deflink_id = link_id; - mt7996_mac_wtbl_update(dev, idx, + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct mt76_txq *mtxq; + + if (!sta->txq[i]) + continue; + + mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; + mtxq->wcid = idx; + } + } else { + msta_link = kzalloc(sizeof(*msta_link), GFP_KERNEL); + if (!msta_link) + return -ENOMEM; + } + + INIT_LIST_HEAD(&msta_link->rc_list); + INIT_LIST_HEAD(&msta_link->wcid.poll_list); + msta_link->sta = msta; + msta_link->wcid.sta = 1; + msta_link->wcid.idx = idx; + msta_link->wcid.link_id = link_id; + + ewma_avg_signal_init(&msta_link->avg_ack_signal); + ewma_signal_init(&msta_link->wcid.rssi); + + rcu_assign_pointer(msta->link[link_id], msta_link); + + mt7996_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + mt7996_mcu_add_sta(dev, link_conf, link_sta, link, msta_link, + CONN_STATE_DISCONNECT, true); + + rcu_assign_pointer(dev->mt76.wcid[idx], &msta_link->wcid); + mt76_wcid_init(&msta_link->wcid, phy->mt76->band_idx); + + return 0; +} + +static void +mt7996_mac_sta_deinit_link(struct mt7996_dev *dev, + struct mt7996_sta_link *msta_link) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(msta_link->wcid.aggr); i++) + mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, i); + + mt7996_mac_wtbl_update(dev, msta_link->wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - mt7996_mcu_add_sta(dev, vif, &link->mt76, sta, CONN_STATE_DISCONNECT, - true); + + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (!list_empty(&msta_link->wcid.poll_list)) + list_del_init(&msta_link->wcid.poll_list); + if (!list_empty(&msta_link->rc_list)) + list_del_init(&msta_link->rc_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + + mt76_wcid_cleanup(&dev->mt76, &msta_link->wcid); + mt76_wcid_mask_clear(dev->mt76.wcid_mask, msta_link->wcid.idx); +} + +static void +mt7996_mac_sta_remove_links(struct mt7996_dev *dev, struct ieee80211_sta *sta, + unsigned long links) +{ + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + struct mt76_dev *mdev = &dev->mt76; + unsigned int link_id; + + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct mt7996_sta_link *msta_link = NULL; + + msta_link = rcu_replace_pointer(msta->link[link_id], msta_link, + lockdep_is_held(&mdev->mutex)); + if (!msta_link) + continue; + + mt7996_mac_sta_deinit_link(dev, msta_link); + if (msta->deflink_id == link_id) { + msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; + continue; + } + + kfree_rcu(msta_link, rcu_head); + } +} + +static int +mt7996_mac_sta_add_links(struct mt7996_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, unsigned long new_links) +{ + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + unsigned int link_id; + int err; + + for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf; + struct ieee80211_link_sta *link_sta; + struct mt7996_vif_link *link; + + if (rcu_access_pointer(msta->link[link_id])) + continue; + + link_conf = link_conf_dereference_protected(vif, link_id); + if (!link_conf) + goto error_unlink; + + link = mt7996_vif_link(dev, vif, link_id); + if (!link) + goto error_unlink; + + link_sta = link_sta_dereference_protected(sta, link_id); + if (!link_sta) + goto error_unlink; + + err = mt7996_mac_sta_init_link(dev, link_conf, link_sta, link, + link_id); + if (err) + goto error_unlink; + } return 0; + +error_unlink: + mt7996_mac_sta_remove_links(dev, sta, new_links); + + return err; +} + +static int +mt7996_mac_sta_change_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 old_links, + u16 new_links) +{ + struct mt7996_dev *dev = mt7996_hw_dev(hw); + unsigned long add = new_links & ~old_links; + unsigned long rem = old_links & ~new_links; + int ret; + + mutex_lock(&dev->mt76.mutex); + + mt7996_mac_sta_remove_links(dev, sta, rem); + ret = mt7996_mac_sta_add_links(dev, vif, sta, add); + + mutex_unlock(&dev->mt76.mutex); + + return ret; } -int mt7996_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, enum mt76_sta_event ev) +static int +mt7996_mac_sta_add(struct mt76_phy *mphy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { + struct mt76_dev *mdev = mphy->dev; struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct mt7996_vif_link *link = &mvif->deflink; - int i, ret; + unsigned long links = sta->mlo ? sta->valid_links : BIT(0); + int err; - switch (ev) { - case MT76_STA_EVENT_ASSOC: - ret = mt7996_mcu_add_sta(dev, vif, &link->mt76, sta, - CONN_STATE_CONNECT, true); - if (ret) - return ret; + mutex_lock(&mdev->mutex); - ret = mt7996_mcu_add_rate_ctrl(dev, vif, sta, false); - if (ret) - return ret; + msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; + msta->vif = mvif; + err = mt7996_mac_sta_add_links(dev, vif, sta, links); + if (!err) + mphy->num_sta++; - msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; - msta->wcid.sta = 1; + mutex_unlock(&mdev->mutex); - return 0; + return err; +} - case MT76_STA_EVENT_AUTHORIZE: - return mt7996_mcu_add_sta(dev, vif, &link->mt76, sta, - CONN_STATE_PORT_SECURE, false); +static int +mt7996_mac_sta_event(struct mt7996_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum mt76_sta_event ev) +{ + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + unsigned long links = sta->valid_links; + struct ieee80211_link_sta *link_sta; + unsigned int link_id; + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct ieee80211_bss_conf *link_conf; + struct mt7996_sta_link *msta_link; + struct mt7996_vif_link *link; + int i, err; + + link_conf = link_conf_dereference_protected(vif, link_id); + if (!link_conf) + continue; - case MT76_STA_EVENT_DISASSOC: - for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++) - mt7996_mac_twt_teardown_flow(dev, msta, i); + link = mt7996_vif_link(dev, vif, link_id); + if (!link) + continue; - mt7996_mcu_add_sta(dev, vif, &link->mt76, sta, - CONN_STATE_DISCONNECT, false); - msta->wcid.sta_disabled = 1; - msta->wcid.sta = 0; + msta_link = mt76_dereference(msta->link[link_id], &dev->mt76); + if (!msta_link) + continue; - return 0; + switch (ev) { + case MT76_STA_EVENT_ASSOC: + err = mt7996_mcu_add_sta(dev, link_conf, link_sta, + link, msta_link, + CONN_STATE_CONNECT, true); + if (err) + return err; + + err = mt7996_mcu_add_rate_ctrl(dev, vif, link_conf, + link_sta, link, + msta_link, false); + if (err) + return err; + + msta_link->wcid.tx_info |= MT_WCID_TX_INFO_SET; + msta_link->wcid.sta = 1; + break; + case MT76_STA_EVENT_AUTHORIZE: + err = mt7996_mcu_add_sta(dev, link_conf, link_sta, + link, msta_link, + CONN_STATE_PORT_SECURE, false); + if (err) + return err; + break; + case MT76_STA_EVENT_DISASSOC: + for (i = 0; i < ARRAY_SIZE(msta_link->twt.flow); i++) + mt7996_mac_twt_teardown_flow(dev, link, + msta_link, i); + + if (sta->mlo && links == BIT(link_id)) /* last link */ + mt7996_mcu_teardown_mld_sta(dev, link, + msta_link); + else + mt7996_mcu_add_sta(dev, link_conf, link_sta, + link, msta_link, + CONN_STATE_DISCONNECT, false); + msta_link->wcid.sta_disabled = 1; + msta_link->wcid.sta = 0; + links = links & ~BIT(link_id); + break; + } } return 0; } -void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +static void +mt7996_mac_sta_remove(struct mt76_phy *mphy, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { + struct mt76_dev *mdev = mphy->dev; struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + unsigned long links = sta->mlo ? sta->valid_links : BIT(0); - mt7996_mac_wtbl_update(dev, msta->wcid.idx, - MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + mutex_lock(&mdev->mutex); + + mt7996_mac_sta_remove_links(dev, sta, links); + mphy->num_sta--; + + mutex_unlock(&mdev->mutex); +} + +static int +mt7996_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct mt76_phy *mphy = mt76_vif_phy(hw, vif); + struct mt7996_dev *dev = mt7996_hw_dev(hw); + enum mt76_sta_event ev; - spin_lock_bh(&mdev->sta_poll_lock); - if (!list_empty(&msta->wcid.poll_list)) - list_del_init(&msta->wcid.poll_list); - if (!list_empty(&msta->rc_list)) - list_del_init(&msta->rc_list); - spin_unlock_bh(&mdev->sta_poll_lock); + if (!mphy) + return -EINVAL; + + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) + return mt7996_mac_sta_add(mphy, vif, sta); + + if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) + mt7996_mac_sta_remove(mphy, vif, sta); + + if (old_state == IEEE80211_STA_AUTH && + new_state == IEEE80211_STA_ASSOC) + ev = MT76_STA_EVENT_ASSOC; + else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTHORIZED) + ev = MT76_STA_EVENT_AUTHORIZE; + else if (old_state == IEEE80211_STA_ASSOC && + new_state == IEEE80211_STA_AUTH) + ev = MT76_STA_EVENT_DISASSOC; + else + return 0; + + return mt7996_mac_sta_event(dev, vif, sta, ev); } static void mt7996_tx(struct ieee80211_hw *hw, @@ -855,12 +1200,18 @@ static void mt7996_tx(struct ieee80211_hw *hw, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct mt76_wcid *wcid = &dev->mt76.global_wcid; + u8 link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); + + rcu_read_lock(); if (vif) { - struct mt7996_vif *mvif; + struct mt7996_vif *mvif = (void *)vif->drv_priv; + struct mt76_vif_link *mlink; - mvif = (struct mt7996_vif *)vif->drv_priv; - wcid = &mvif->deflink.sta.wcid; + mlink = rcu_dereference(mvif->mt76.link[link_id]); + if (mlink && mlink->wcid) + wcid = mlink->wcid; if (mvif->mt76.roc_phy && (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) { @@ -872,19 +1223,22 @@ static void mt7996_tx(struct ieee80211_hw *hw, } } - if (control->sta) { - struct mt7996_sta *sta; - - sta = (struct mt7996_sta *)control->sta->drv_priv; - wcid = &sta->wcid; - } - if (!mphy) { ieee80211_free_txskb(hw, skb); - return; + goto unlock; } + if (control->sta) { + struct mt7996_sta *msta = (void *)control->sta->drv_priv; + struct mt7996_sta_link *msta_link; + + msta_link = rcu_dereference(msta->link[link_id]); + if (msta_link) + wcid = &msta_link->wcid; + } mt76_tx(mphy, control->sta, wcid, skb); +unlock: + rcu_read_unlock(); } static int mt7996_set_rts_threshold(struct ieee80211_hw *hw, u32 val) @@ -914,11 +1268,13 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum ieee80211_ampdu_mlme_action action = params->action; struct mt7996_dev *dev = mt7996_hw_dev(hw); struct ieee80211_sta *sta = params->sta; - struct ieee80211_txq *txq = sta->txq[params->tid]; struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + struct ieee80211_txq *txq = sta->txq[params->tid]; + struct ieee80211_link_sta *link_sta; u16 tid = params->tid; u16 ssn = params->ssn; struct mt76_txq *mtxq; + unsigned int link_id; int ret = 0; if (!txq) @@ -927,38 +1283,61 @@ mt7996_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mtxq = (struct mt76_txq *)txq->drv_priv; mutex_lock(&dev->mt76.mutex); - switch (action) { - case IEEE80211_AMPDU_RX_START: - mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, - params->buf_size); - ret = mt7996_mcu_add_rx_ba(dev, params, true); - break; - case IEEE80211_AMPDU_RX_STOP: - mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); - ret = mt7996_mcu_add_rx_ba(dev, params, false); - break; - case IEEE80211_AMPDU_TX_OPERATIONAL: - mtxq->aggr = true; - mtxq->send_bar = false; - ret = mt7996_mcu_add_tx_ba(dev, params, true); - break; - case IEEE80211_AMPDU_TX_STOP_FLUSH: - case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - mtxq->aggr = false; - clear_bit(tid, &msta->wcid.ampdu_state); - ret = mt7996_mcu_add_tx_ba(dev, params, false); - break; - case IEEE80211_AMPDU_TX_START: - set_bit(tid, &msta->wcid.ampdu_state); - ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; - break; - case IEEE80211_AMPDU_TX_STOP_CONT: - mtxq->aggr = false; - clear_bit(tid, &msta->wcid.ampdu_state); - ret = mt7996_mcu_add_tx_ba(dev, params, false); - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); - break; + + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct mt7996_sta_link *msta_link; + struct mt7996_vif_link *link; + + msta_link = mt76_dereference(msta->link[link_id], &dev->mt76); + if (!msta_link) + continue; + + link = mt7996_vif_link(dev, vif, link_id); + if (!link) + continue; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + mt76_rx_aggr_start(&dev->mt76, &msta_link->wcid, tid, + ssn, params->buf_size); + ret = mt7996_mcu_add_rx_ba(dev, params, link, true); + break; + case IEEE80211_AMPDU_RX_STOP: + mt76_rx_aggr_stop(&dev->mt76, &msta_link->wcid, tid); + ret = mt7996_mcu_add_rx_ba(dev, params, link, false); + break; + case IEEE80211_AMPDU_TX_OPERATIONAL: + mtxq->aggr = true; + mtxq->send_bar = false; + ret = mt7996_mcu_add_tx_ba(dev, params, link, + msta_link, true); + break; + case IEEE80211_AMPDU_TX_STOP_FLUSH: + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + mtxq->aggr = false; + clear_bit(tid, &msta_link->wcid.ampdu_state); + ret = mt7996_mcu_add_tx_ba(dev, params, link, + msta_link, false); + break; + case IEEE80211_AMPDU_TX_START: + set_bit(tid, &msta_link->wcid.ampdu_state); + ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; + break; + case IEEE80211_AMPDU_TX_STOP_CONT: + mtxq->aggr = false; + clear_bit(tid, &msta_link->wcid.ampdu_state); + ret = mt7996_mcu_add_tx_ba(dev, params, link, + msta_link, false); + break; + } + + if (ret) + break; } + + if (action == IEEE80211_AMPDU_TX_STOP_CONT) + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + mutex_unlock(&dev->mt76.mutex); return ret; @@ -989,10 +1368,10 @@ mt7996_get_stats(struct ieee80211_hw *hw, return 0; } -u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif) +u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif_link *link) { struct mt7996_dev *dev = mt7996_hw_dev(hw); - struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink); + struct mt7996_phy *phy = link->phy; union { u64 t64; u32 t32[2]; @@ -1004,8 +1383,8 @@ u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif) lockdep_assert_held(&dev->mt76.mutex); - n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 - : mvif->deflink.mt76.omac_idx; + n = link->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 + : link->mt76.omac_idx; /* TSF software read */ mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE, MT_LPON_TCR_SW_READ); @@ -1023,7 +1402,7 @@ mt7996_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) u64 ret; mutex_lock(&dev->mt76.mutex); - ret = __mt7996_get_tsf(hw, mvif); + ret = __mt7996_get_tsf(hw, &mvif->deflink); mutex_unlock(&dev->mt76.mutex); return ret; @@ -1035,26 +1414,33 @@ mt7996_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_dev *dev = mt7996_hw_dev(hw); - struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink); + struct mt7996_vif_link *link; + struct mt7996_phy *phy; union { u64 t64; u32 t32[2]; } tsf = { .t64 = timestamp, }; u16 n; - if (!phy) - return; - mutex_lock(&dev->mt76.mutex); - n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 - : mvif->deflink.mt76.omac_idx; + link = mt7996_vif_link(dev, vif, mvif->mt76.deflink_id); + if (!link) + goto unlock; + + n = link->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 + : link->mt76.omac_idx; + phy = link->phy; + if (!phy) + goto unlock; + mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]); mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]); /* TSF software overwrite */ mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE, MT_LPON_TCR_SW_WRITE); +unlock: mutex_unlock(&dev->mt76.mutex); } @@ -1064,26 +1450,33 @@ mt7996_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_dev *dev = mt7996_hw_dev(hw); - struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink); + struct mt7996_vif_link *link; + struct mt7996_phy *phy; union { u64 t64; u32 t32[2]; } tsf = { .t64 = timestamp, }; u16 n; - if (!phy) - return; - mutex_lock(&dev->mt76.mutex); - n = mvif->deflink.mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 - : mvif->deflink.mt76.omac_idx; + link = mt7996_vif_link(dev, vif, mvif->mt76.deflink_id); + if (!link) + goto unlock; + + phy = link->phy; + if (!phy) + goto unlock; + + n = link->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0 + : link->mt76.omac_idx; mt76_wr(dev, MT_LPON_UTTR0(phy->mt76->band_idx), tsf.t32[0]); mt76_wr(dev, MT_LPON_UTTR1(phy->mt76->band_idx), tsf.t32[1]); /* TSF software adjust*/ mt76_rmw(dev, MT_LPON_TCR(phy->mt76->band_idx, n), MT_LPON_TCR_SW_MODE, MT_LPON_TCR_SW_ADJUST); +unlock: mutex_unlock(&dev->mt76.mutex); } @@ -1145,7 +1538,8 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw, { struct mt7996_dev *dev = mt7996_hw_dev(hw); struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; - struct rate_info *txrate = &msta->wcid.rate; + struct mt7996_sta_link *msta_link = &msta->deflink; + struct rate_info *txrate = &msta_link->wcid.rate; if (txrate->legacy || txrate->flags) { if (txrate->legacy) { @@ -1165,55 +1559,67 @@ static void mt7996_sta_statistics(struct ieee80211_hw *hw, sinfo->txrate.flags = txrate->flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); - sinfo->tx_failed = msta->wcid.stats.tx_failed; + sinfo->tx_failed = msta_link->wcid.stats.tx_failed; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); - sinfo->tx_retries = msta->wcid.stats.tx_retries; + sinfo->tx_retries = msta_link->wcid.stats.tx_retries; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); - sinfo->ack_signal = (s8)msta->ack_signal; + sinfo->ack_signal = (s8)msta_link->ack_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); - sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal); + sinfo->avg_ack_signal = + -(s8)ewma_avg_signal_read(&msta_link->avg_ack_signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { - sinfo->tx_bytes = msta->wcid.stats.tx_bytes; + sinfo->tx_bytes = msta_link->wcid.stats.tx_bytes; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64); - sinfo->rx_bytes = msta->wcid.stats.rx_bytes; + sinfo->rx_bytes = msta_link->wcid.stats.rx_bytes; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64); - sinfo->tx_packets = msta->wcid.stats.tx_packets; + sinfo->tx_packets = msta_link->wcid.stats.tx_packets; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS); - sinfo->rx_packets = msta->wcid.stats.rx_packets; + sinfo->rx_packets = msta_link->wcid.stats.rx_packets; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS); } } -static void mt7996_sta_rc_work(void *data, struct ieee80211_sta *sta) +static void mt7996_link_rate_ctrl_update(void *data, struct ieee80211_sta *sta) { struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct mt7996_dev *dev = msta->vif->deflink.phy->dev; + struct mt7996_sta_link *msta_link; u32 *changed = data; + rcu_read_lock(); + + msta_link = rcu_dereference(msta->link[msta->deflink_id]); + if (!msta_link) + goto out; + spin_lock_bh(&dev->mt76.sta_poll_lock); - msta->changed |= *changed; - if (list_empty(&msta->rc_list)) - list_add_tail(&msta->rc_list, &dev->sta_rc_list); + + msta_link->changed |= *changed; + if (list_empty(&msta_link->rc_list)) + list_add_tail(&msta_link->rc_list, &dev->sta_rc_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); +out: + rcu_read_unlock(); } -static void mt7996_sta_rc_update(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_link_sta *link_sta, - u32 changed) +static void mt7996_link_sta_rc_update(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + u32 changed) { struct mt7996_dev *dev = mt7996_hw_dev(hw); struct ieee80211_sta *sta = link_sta->sta; - mt7996_sta_rc_work(&changed, sta); + mt7996_link_rate_ctrl_update(&changed, sta); ieee80211_queue_work(hw, &dev->rc_work); } @@ -1235,7 +1641,8 @@ mt7996_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif, * - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT * then multiple MCS setting (MCS 4,5,6) is not supported. */ - ieee80211_iterate_stations_atomic(hw, mt7996_sta_rc_work, &changed); + ieee80211_iterate_stations_atomic(hw, mt7996_link_rate_ctrl_update, + &changed); ieee80211_queue_work(hw, &dev->rc_work); return 0; @@ -1246,18 +1653,37 @@ static void mt7996_sta_set_4addr(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool enabled) { - struct mt7996_dev *dev = mt7996_hw_dev(hw); struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + struct mt7996_dev *dev = mt7996_hw_dev(hw); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; - if (enabled) - set_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags); - else - clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags); + mutex_lock(&dev->mt76.mutex); - if (!msta->wcid.sta) - return; + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct mt7996_sta_link *msta_link; + struct mt7996_vif_link *link; + + link = mt7996_vif_link(dev, vif, link_id); + if (!link) + continue; - mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta); + msta_link = mt76_dereference(msta->link[link_id], &dev->mt76); + if (!msta_link) + continue; + + if (enabled) + set_bit(MT_WCID_FLAG_4ADDR, &msta_link->wcid.flags); + else + clear_bit(MT_WCID_FLAG_4ADDR, &msta_link->wcid.flags); + + if (!msta_link->wcid.sta) + continue; + + mt7996_mcu_wtbl_update_hdr_trans(dev, vif, link, msta_link); + } + + mutex_unlock(&dev->mt76.mutex); } static void mt7996_sta_set_decap_offload(struct ieee80211_hw *hw, @@ -1265,18 +1691,39 @@ static void mt7996_sta_set_decap_offload(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool enabled) { - struct mt7996_dev *dev = mt7996_hw_dev(hw); struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + struct mt7996_dev *dev = mt7996_hw_dev(hw); + struct ieee80211_link_sta *link_sta; + unsigned int link_id; - if (enabled) - set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); - else - clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + mutex_lock(&dev->mt76.mutex); - if (!msta->wcid.sta) - return; + for_each_sta_active_link(vif, sta, link_sta, link_id) { + struct mt7996_sta_link *msta_link; + struct mt7996_vif_link *link; + + link = mt7996_vif_link(dev, vif, link_id); + if (!link) + continue; + + msta_link = mt76_dereference(msta->link[link_id], &dev->mt76); + if (!msta_link) + continue; + + if (enabled) + set_bit(MT_WCID_FLAG_HDR_TRANS, + &msta_link->wcid.flags); + else + clear_bit(MT_WCID_FLAG_HDR_TRANS, + &msta_link->wcid.flags); - mt7996_mcu_wtbl_update_hdr_trans(dev, vif, sta); + if (!msta_link->wcid.sta) + continue; + + mt7996_mcu_wtbl_update_hdr_trans(dev, vif, link, msta_link); + } + + mutex_unlock(&dev->mt76.mutex); } static const char mt7996_gstrings_stats[][ETH_GSTRING_LEN] = { @@ -1408,11 +1855,12 @@ static void mt7996_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) { struct mt76_ethtool_worker_info *wi = wi_data; struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + struct mt7996_sta_link *msta_link = &msta->deflink; if (msta->vif->deflink.mt76.idx != wi->idx) return; - mt76_ethtool_worker(wi, &msta->wcid.stats, true); + mt76_ethtool_worker(wi, &msta_link->wcid.stats, true); } static @@ -1516,10 +1964,12 @@ mt7996_twt_teardown_request(struct ieee80211_hw *hw, u8 flowid) { struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + struct mt7996_sta_link *msta_link = &msta->deflink; + struct mt7996_vif_link *link = &msta->vif->deflink; struct mt7996_dev *dev = mt7996_hw_dev(hw); mutex_lock(&dev->mt76.mutex); - mt7996_mac_twt_teardown_flow(dev, msta, flowid); + mt7996_mac_twt_teardown_flow(dev, link, msta_link, flowid); mutex_unlock(&dev->mt76.mutex); } @@ -1589,12 +2039,26 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw, { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; - struct mt7996_vif_link *mlink = &mvif->deflink; struct mt7996_dev *dev = mt7996_hw_dev(hw); struct mtk_wed_device *wed = &dev->mt76.mmio.wed; + struct mt7996_sta_link *msta_link; + struct mt7996_vif_link *link; + struct mt76_vif_link *mlink; struct mt7996_phy *phy; - phy = mt7996_vif_link_phy(mlink); + mlink = rcu_dereference(mvif->mt76.link[msta->deflink_id]); + if (!mlink) + return -EIO; + + msta_link = rcu_dereference(msta->link[msta->deflink_id]); + if (!msta_link) + return -EIO; + + if (!msta_link->wcid.sta || msta_link->wcid.idx > MT7996_WTBL_STA) + return -EIO; + + link = (struct mt7996_vif_link *)mlink; + phy = mt7996_vif_link_phy(link); if (!phy) return -ENODEV; @@ -1604,15 +2068,12 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw, if (!mtk_wed_device_active(wed)) return -ENODEV; - if (!msta->wcid.sta || msta->wcid.idx > MT7996_WTBL_STA) - return -EIO; - path->type = DEV_PATH_MTK_WDMA; path->dev = ctx->dev; path->mtk_wdma.wdma_idx = wed->wdma_idx; - path->mtk_wdma.bss = mvif->deflink.mt76.idx; + path->mtk_wdma.bss = mlink->idx; path->mtk_wdma.queue = 0; - path->mtk_wdma.wcid = msta->wcid.idx; + path->mtk_wdma.wcid = msta_link->wcid.idx; path->mtk_wdma.amsdu = mtk_wed_is_amsdu_supported(wed); ctx->dev = NULL; @@ -1622,6 +2083,14 @@ mt7996_net_fill_forward_path(struct ieee80211_hw *hw, #endif +static int +mt7996_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) +{ + return 0; +} + const struct ieee80211_ops mt7996_ops = { .add_chanctx = mt76_add_chanctx, .remove_chanctx = mt76_remove_chanctx, @@ -1637,10 +2106,11 @@ const struct ieee80211_ops mt7996_ops = { .config = mt7996_config, .conf_tx = mt7996_conf_tx, .configure_filter = mt7996_configure_filter, - .bss_info_changed = mt7996_bss_info_changed, - .sta_state = mt76_sta_state, + .vif_cfg_changed = mt7996_vif_cfg_changed, + .link_info_changed = mt7996_link_info_changed, + .sta_state = mt7996_sta_state, .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove, - .link_sta_rc_update = mt7996_sta_rc_update, + .link_sta_rc_update = mt7996_link_sta_rc_update, .set_key = mt7996_set_key, .ampdu_action = mt7996_ampdu_action, .set_rts_threshold = mt7996_set_rts_threshold, @@ -1650,7 +2120,7 @@ const struct ieee80211_ops mt7996_ops = { .remain_on_channel = mt76_remain_on_channel, .cancel_remain_on_channel = mt76_cancel_remain_on_channel, .release_buffered_frames = mt76_release_buffered_frames, - .get_txpower = mt76_get_txpower, + .get_txpower = mt7996_get_txpower, .channel_switch_beacon = mt7996_channel_switch_beacon, .get_stats = mt7996_get_stats, .get_et_sset_count = mt7996_get_et_sset_count, @@ -1677,4 +2147,6 @@ const struct ieee80211_ops mt7996_ops = { .net_fill_forward_path = mt7996_net_fill_forward_path, .net_setup_tc = mt76_wed_net_setup_tc, #endif + .change_vif_links = mt7996_change_vif_links, + .change_sta_links = mt7996_mac_sta_change_links, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index e4569d0322214..ddd555942c738 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -118,13 +118,13 @@ mt7996_mcu_get_sta_nss(u16 mcs_map) } static void -mt7996_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs, - u16 mcs_map) +mt7996_mcu_set_sta_he_mcs(struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link, + __le16 *he_mcs, u16 mcs_map) { - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; - enum nl80211_band band = msta->vif->deflink.phy->mt76->chandef.chan->band; - const u16 *mask = msta->vif->deflink.bitrate_mask.control[band].he_mcs; - int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; + int nss, max_nss = link_sta->rx_nss > 3 ? 4 : link_sta->rx_nss; + enum nl80211_band band = link->phy->mt76->chandef.chan->band; + const u16 *mask = link->bitrate_mask.control[band].he_mcs; for (nss = 0; nss < max_nss; nss++) { int mcs; @@ -167,11 +167,11 @@ mt7996_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs, } static void -mt7996_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs, - const u16 *mask) +mt7996_mcu_set_sta_vht_mcs(struct ieee80211_link_sta *link_sta, + __le16 *vht_mcs, const u16 *mask) { - u16 mcs, mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); - int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; + u16 mcs, mcs_map = le16_to_cpu(link_sta->vht_cap.vht_mcs.rx_mcs_map); + int nss, max_nss = link_sta->rx_nss > 3 ? 4 : link_sta->rx_nss; for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) { switch (mcs_map & 0x3) { @@ -193,13 +193,13 @@ mt7996_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs, } static void -mt7996_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs, - const u8 *mask) +mt7996_mcu_set_sta_ht_mcs(struct ieee80211_link_sta *link_sta, + u8 *ht_mcs, const u8 *mask) { - int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; + int nss, max_nss = link_sta->rx_nss > 3 ? 4 : link_sta->rx_nss; for (nss = 0; nss < max_nss; nss++) - ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss]; + ht_mcs[nss] = link_sta->ht_cap.mcs.rx_mask[nss] & mask[nss]; } static int @@ -1064,7 +1064,8 @@ __mt7996_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif_link *mvif, int int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, - struct mt76_vif_link *mlink, int enable) + struct mt76_vif_link *mlink, + struct mt7996_sta_link *msta_link, int enable) { struct mt7996_dev *dev = phy->dev; struct sk_buff *skb; @@ -1081,7 +1082,7 @@ int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif, /* bss_basic must be first */ mt7996_mcu_bss_basic_tlv(skb, vif, link_conf, mlink, phy->mt76, - mlink->wcid->idx, enable); + msta_link->wcid.idx, enable); mt7996_mcu_bss_sec_tlv(skb, mlink); if (vif->type == NL80211_IFTYPE_MONITOR) @@ -1159,37 +1160,34 @@ mt7996_mcu_sta_ba(struct mt7996_dev *dev, struct mt76_vif_link *mvif, /** starec & wtbl **/ int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev, struct ieee80211_ampdu_params *params, - bool enable) + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, bool enable) { - struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv; - struct mt7996_vif *mvif = msta->vif; - if (enable && !params->amsdu) - msta->wcid.amsdu = false; + msta_link->wcid.amsdu = false; - return mt7996_mcu_sta_ba(dev, &mvif->deflink.mt76, params, enable, true); + return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, true); } int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev, struct ieee80211_ampdu_params *params, - bool enable) + struct mt7996_vif_link *link, bool enable) { - struct mt7996_sta *msta = (struct mt7996_sta *)params->sta->drv_priv; - struct mt7996_vif *mvif = msta->vif; - - return mt7996_mcu_sta_ba(dev, &mvif->deflink.mt76, params, enable, false); + return mt7996_mcu_sta_ba(dev, &link->mt76, params, enable, false); } static void -mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7996_mcu_sta_he_tlv(struct sk_buff *skb, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link) { - struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; + struct ieee80211_he_cap_elem *elem = &link_sta->he_cap.he_cap_elem; struct ieee80211_he_mcs_nss_supp mcs_map; struct sta_rec_he_v2 *he; struct tlv *tlv; int i = 0; - if (!sta->deflink.he_cap.has_he) + if (!link_sta->he_cap.has_he) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_V2, sizeof(*he)); @@ -1201,21 +1199,21 @@ mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) he->he_phy_cap[i] = elem->phy_cap_info[i]; } - mcs_map = sta->deflink.he_cap.he_mcs_nss_supp; - switch (sta->deflink.bandwidth) { + mcs_map = link_sta->he_cap.he_mcs_nss_supp; + switch (link_sta->bandwidth) { case IEEE80211_STA_RX_BW_160: if (elem->phy_cap_info[0] & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) - mt7996_mcu_set_sta_he_mcs(sta, + mt7996_mcu_set_sta_he_mcs(link_sta, link, &he->max_nss_mcs[CMD_HE_MCS_BW8080], le16_to_cpu(mcs_map.rx_mcs_80p80)); - mt7996_mcu_set_sta_he_mcs(sta, + mt7996_mcu_set_sta_he_mcs(link_sta, link, &he->max_nss_mcs[CMD_HE_MCS_BW160], le16_to_cpu(mcs_map.rx_mcs_160)); fallthrough; default: - mt7996_mcu_set_sta_he_mcs(sta, + mt7996_mcu_set_sta_he_mcs(link_sta, link, &he->max_nss_mcs[CMD_HE_MCS_BW80], le16_to_cpu(mcs_map.rx_mcs_80)); break; @@ -1225,24 +1223,26 @@ mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) } static void -mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, + struct ieee80211_link_sta *link_sta) { struct sta_rec_he_6g_capa *he_6g; struct tlv *tlv; - if (!sta->deflink.he_6ghz_capa.capa) + if (!link_sta->he_6ghz_capa.capa) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g)); he_6g = (struct sta_rec_he_6g_capa *)tlv; - he_6g->capa = sta->deflink.he_6ghz_capa.capa; + he_6g->capa = link_sta->he_6ghz_capa.capa; } static void -mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, + struct ieee80211_link_sta *link_sta) { - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + struct mt7996_sta *msta = (struct mt7996_sta *)link_sta->sta->drv_priv; struct ieee80211_vif *vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); struct ieee80211_eht_mcs_nss_supp *mcs_map; @@ -1250,11 +1250,11 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) struct sta_rec_eht *eht; struct tlv *tlv; - if (!sta->deflink.eht_cap.has_eht) + if (!link_sta->eht_cap.has_eht) return; - mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp; - elem = &sta->deflink.eht_cap.eht_cap_elem; + mcs_map = &link_sta->eht_cap.eht_mcs_nss_supp; + elem = &link_sta->eht_cap.eht_cap_elem; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht)); @@ -1265,7 +1265,7 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]); if (vif->type != NL80211_IFTYPE_STATION && - (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & + (link_sta->he_cap.he_cap_elem.phy_cap_info[0] & (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | @@ -1281,47 +1281,48 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) } static void -mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta) { struct sta_rec_ht_uni *ht; struct tlv *tlv; - if (!sta->deflink.ht_cap.ht_supported) + if (!link_sta->ht_cap.ht_supported) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); ht = (struct sta_rec_ht_uni *)tlv; - ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap); - ht->ampdu_param = u8_encode_bits(sta->deflink.ht_cap.ampdu_factor, + ht->ht_cap = cpu_to_le16(link_sta->ht_cap.cap); + ht->ampdu_param = u8_encode_bits(link_sta->ht_cap.ampdu_factor, IEEE80211_HT_AMPDU_PARM_FACTOR) | - u8_encode_bits(sta->deflink.ht_cap.ampdu_density, + u8_encode_bits(link_sta->ht_cap.ampdu_density, IEEE80211_HT_AMPDU_PARM_DENSITY); } static void -mt7996_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7996_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta) { struct sta_rec_vht *vht; struct tlv *tlv; /* For 6G band, this tlv is necessary to let hw work normally */ - if (!sta->deflink.he_6ghz_capa.capa && !sta->deflink.vht_cap.vht_supported) + if (!link_sta->he_6ghz_capa.capa && !link_sta->vht_cap.vht_supported) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht)); vht = (struct sta_rec_vht *)tlv; - vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap); - vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; - vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map; + vht->vht_cap = cpu_to_le32(link_sta->vht_cap.cap); + vht->vht_rx_mcs_map = link_sta->vht_cap.vht_mcs.rx_mcs_map; + vht->vht_tx_mcs_map = link_sta->vht_cap.vht_mcs.tx_mcs_map; } static void mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + struct mt7996_sta_link *msta_link) { - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct sta_rec_amsdu *amsdu; struct tlv *tlv; @@ -1330,16 +1331,16 @@ mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb, vif->type != NL80211_IFTYPE_AP) return; - if (!sta->deflink.agg.max_amsdu_len) + if (!link_sta->agg.max_amsdu_len) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu)); amsdu = (struct sta_rec_amsdu *)tlv; amsdu->max_amsdu_num = 8; amsdu->amsdu_en = true; - msta->wcid.amsdu = true; + msta_link->wcid.amsdu = true; - switch (sta->deflink.agg.max_amsdu_len) { + switch (link_sta->agg.max_amsdu_len) { case IEEE80211_MAX_MPDU_LEN_VHT_11454: amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; @@ -1356,30 +1357,31 @@ mt7996_mcu_sta_amsdu_tlv(struct mt7996_dev *dev, struct sk_buff *skb, static void mt7996_mcu_sta_muru_tlv(struct mt7996_dev *dev, struct sk_buff *skb, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta) { - struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; + struct ieee80211_he_cap_elem *elem = &link_sta->he_cap.he_cap_elem; struct sta_rec_muru *muru; struct tlv *tlv; - if (vif->type != NL80211_IFTYPE_STATION && - vif->type != NL80211_IFTYPE_AP) + if (link_conf->vif->type != NL80211_IFTYPE_STATION && + link_conf->vif->type != NL80211_IFTYPE_AP) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru)); muru = (struct sta_rec_muru *)tlv; - muru->cfg.mimo_dl_en = vif->bss_conf.eht_mu_beamformer || - vif->bss_conf.he_mu_beamformer || - vif->bss_conf.vht_mu_beamformer || - vif->bss_conf.vht_mu_beamformee; + muru->cfg.mimo_dl_en = link_conf->eht_mu_beamformer || + link_conf->he_mu_beamformer || + link_conf->vht_mu_beamformer || + link_conf->vht_mu_beamformee; muru->cfg.ofdma_dl_en = true; - if (sta->deflink.vht_cap.vht_supported) + if (link_sta->vht_cap.vht_supported) muru->mimo_dl.vht_mu_bfee = - !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + !!(link_sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); - if (!sta->deflink.he_cap.has_he) + if (!link_sta->he_cap.has_he) return; muru->mimo_dl.partial_bw_dl_mimo = @@ -1410,49 +1412,50 @@ mt7996_mcu_sta_muru_tlv(struct mt7996_dev *dev, struct sk_buff *skb, } static inline bool -mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool bfee) +mt7996_is_ebf_supported(struct mt7996_phy *phy, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, bool bfee) { int sts = hweight16(phy->mt76->chainmask); - if (vif->type != NL80211_IFTYPE_STATION && - vif->type != NL80211_IFTYPE_AP) + if (link_conf->vif->type != NL80211_IFTYPE_STATION && + link_conf->vif->type != NL80211_IFTYPE_AP) return false; if (!bfee && sts < 2) return false; - if (sta->deflink.eht_cap.has_eht) { - struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap; + if (link_sta->eht_cap.has_eht) { + struct ieee80211_sta_eht_cap *pc = &link_sta->eht_cap; struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem; if (bfee) - return vif->bss_conf.eht_su_beamformee && + return link_conf->eht_su_beamformee && EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]); else - return vif->bss_conf.eht_su_beamformer && + return link_conf->eht_su_beamformer && EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]); } - if (sta->deflink.he_cap.has_he) { - struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem; + if (link_sta->he_cap.has_he) { + struct ieee80211_he_cap_elem *pe = &link_sta->he_cap.he_cap_elem; if (bfee) - return vif->bss_conf.he_su_beamformee && + return link_conf->he_su_beamformee && HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]); else - return vif->bss_conf.he_su_beamformer && + return link_conf->he_su_beamformer && HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]); } - if (sta->deflink.vht_cap.vht_supported) { - u32 cap = sta->deflink.vht_cap.cap; + if (link_sta->vht_cap.vht_supported) { + u32 cap = link_sta->vht_cap.cap; if (bfee) - return vif->bss_conf.vht_su_beamformee && + return link_conf->vht_su_beamformee && (cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); else - return vif->bss_conf.vht_su_beamformer && + return link_conf->vht_su_beamformer && (cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); } @@ -1473,10 +1476,11 @@ mt7996_mcu_sta_sounding_rate(struct sta_rec_bf *bf, struct mt7996_phy *phy) } static void -mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy, - struct sta_rec_bf *bf, bool explicit) +mt7996_mcu_sta_bfer_ht(struct ieee80211_link_sta *link_sta, + struct mt7996_phy *phy, struct sta_rec_bf *bf, + bool explicit) { - struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs; + struct ieee80211_mcs_info *mcs = &link_sta->ht_cap.mcs; u8 n = 0; bf->tx_mode = MT_PHY_TYPE_HT; @@ -1499,10 +1503,11 @@ mt7996_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7996_phy *phy, } static void -mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy, - struct sta_rec_bf *bf, bool explicit) +mt7996_mcu_sta_bfer_vht(struct ieee80211_link_sta *link_sta, + struct mt7996_phy *phy, struct sta_rec_bf *bf, + bool explicit) { - struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap; + struct ieee80211_sta_vht_cap *pc = &link_sta->vht_cap; struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap; u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map); u8 nss_mcs = mt7996_mcu_get_sta_nss(mcs_map); @@ -1523,24 +1528,24 @@ mt7996_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7996_phy *phy, bf->ncol = min_t(u8, nss_mcs, bf->nrow); bf->ibf_ncol = min_t(u8, MT7996_IBF_MAX_NC, bf->ncol); - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) bf->nrow = 1; } else { bf->nrow = tx_ant; bf->ncol = min_t(u8, nss_mcs, bf->nrow); bf->ibf_ncol = min_t(u8, MT7996_IBF_MAX_NC, nss_mcs); - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) bf->ibf_nrow = 1; } } static void -mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, - struct mt7996_phy *phy, struct sta_rec_bf *bf, - bool explicit) +mt7996_mcu_sta_bfer_he(struct ieee80211_link_sta *link_sta, + struct ieee80211_vif *vif, struct mt7996_phy *phy, + struct sta_rec_bf *bf, bool explicit) { - struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap; + struct ieee80211_sta_he_cap *pc = &link_sta->he_cap; struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem; const struct ieee80211_sta_he_cap *vc = mt76_connac_get_he_phy_cap(phy->mt76, vif); @@ -1569,7 +1574,7 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) : min_t(u8, MT7996_IBF_MAX_NC, nss_mcs); - if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160) + if (link_sta->bandwidth != IEEE80211_STA_RX_BW_160) return; /* go over for 160MHz and 80p80 */ @@ -1601,11 +1606,11 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, } static void -mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif, - struct mt7996_phy *phy, struct sta_rec_bf *bf, - bool explicit) +mt7996_mcu_sta_bfer_eht(struct ieee80211_link_sta *link_sta, + struct ieee80211_vif *vif, struct mt7996_phy *phy, + struct sta_rec_bf *bf, bool explicit) { - struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap; + struct ieee80211_sta_eht_cap *pc = &link_sta->eht_cap; struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem; struct ieee80211_eht_mcs_nss_supp *eht_nss = &pc->eht_mcs_nss_supp; const struct ieee80211_sta_eht_cap *vc = @@ -1629,10 +1634,10 @@ mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif, bf->ibf_ncol = explicit ? min_t(u8, MT7996_IBF_MAX_NC, bf->ncol) : min_t(u8, MT7996_IBF_MAX_NC, nss_mcs); - if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_160) + if (link_sta->bandwidth < IEEE80211_STA_RX_BW_160) return; - switch (sta->deflink.bandwidth) { + switch (link_sta->bandwidth) { case IEEE80211_STA_RX_BW_160: snd_dim = EHT_PHY(CAP2_SOUNDING_DIM_160MHZ_MASK, ve->phy_cap_info[2]); sts = EHT_PHY(CAP1_BEAMFORMEE_SS_160MHZ_MASK, pe->phy_cap_info[1]); @@ -1660,13 +1665,15 @@ mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif, static void mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link) { #define EBF_MODE BIT(0) #define IBF_MODE BIT(1) #define BF_MAT_ORDER 4 - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct mt7996_phy *phy = mvif->deflink.phy; + struct ieee80211_vif *vif = link_conf->vif; + struct mt7996_phy *phy = link->phy; int tx_ant = hweight16(phy->mt76->chainmask) - 1; struct sta_rec_bf *bf; struct tlv *tlv; @@ -1678,10 +1685,10 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb, }; bool ebf; - if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) + if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he)) return; - ebf = mt7996_is_ebf_supported(phy, vif, sta, false); + ebf = mt7996_is_ebf_supported(phy, link_conf, link_sta, false); if (!ebf && !dev->ibf) return; @@ -1692,28 +1699,29 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb, * vht: support eBF and iBF * ht: iBF only, since mac80211 lacks of eBF support */ - if (sta->deflink.eht_cap.has_eht) - mt7996_mcu_sta_bfer_eht(sta, vif, phy, bf, ebf); - else if (sta->deflink.he_cap.has_he) - mt7996_mcu_sta_bfer_he(sta, vif, phy, bf, ebf); - else if (sta->deflink.vht_cap.vht_supported) - mt7996_mcu_sta_bfer_vht(sta, phy, bf, ebf); - else if (sta->deflink.ht_cap.ht_supported) - mt7996_mcu_sta_bfer_ht(sta, phy, bf, ebf); + if (link_sta->eht_cap.has_eht) + mt7996_mcu_sta_bfer_eht(link_sta, vif, link->phy, bf, ebf); + else if (link_sta->he_cap.has_he) + mt7996_mcu_sta_bfer_he(link_sta, vif, link->phy, bf, ebf); + else if (link_sta->vht_cap.vht_supported) + mt7996_mcu_sta_bfer_vht(link_sta, link->phy, bf, ebf); + else if (link_sta->ht_cap.ht_supported) + mt7996_mcu_sta_bfer_ht(link_sta, link->phy, bf, ebf); else return; bf->bf_cap = ebf ? EBF_MODE : (dev->ibf ? IBF_MODE : 0); if (is_mt7992(&dev->mt76) && tx_ant == 4) bf->bf_cap |= IBF_MODE; - bf->bw = sta->deflink.bandwidth; - bf->ibf_dbw = sta->deflink.bandwidth; + + bf->bw = link_sta->bandwidth; + bf->ibf_dbw = link_sta->bandwidth; bf->ibf_nrow = tx_ant; - if (sta->deflink.eht_cap.has_eht || sta->deflink.he_cap.has_he) + if (link_sta->eht_cap.has_eht || link_sta->he_cap.has_he) bf->ibf_timeout = is_mt7996(&dev->mt76) ? MT7996_IBF_TIMEOUT : MT7992_IBF_TIMEOUT; - else if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol) + else if (!ebf && link_sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol) bf->ibf_timeout = MT7996_IBF_TIMEOUT_LEGACY; else bf->ibf_timeout = MT7996_IBF_TIMEOUT; @@ -1727,7 +1735,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb, matrix[bf->nrow][bf->ncol] : 0; } - switch (sta->deflink.bandwidth) { + switch (link_sta->bandwidth) { case IEEE80211_STA_RX_BW_160: case IEEE80211_STA_RX_BW_80: bf->mem_total = bf->mem_20m * 2; @@ -1743,31 +1751,32 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb, static void mt7996_mcu_sta_bfee_tlv(struct mt7996_dev *dev, struct sk_buff *skb, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct mt7996_phy *phy = mvif->deflink.phy; + struct mt7996_phy *phy = link->phy; int tx_ant = hweight8(phy->mt76->antenna_mask) - 1; struct sta_rec_bfee *bfee; struct tlv *tlv; u8 nrow = 0; - if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he)) + if (!(link_sta->vht_cap.vht_supported || link_sta->he_cap.has_he)) return; - if (!mt7996_is_ebf_supported(phy, vif, sta, true)) + if (!mt7996_is_ebf_supported(phy, link_conf, link_sta, true)) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee)); bfee = (struct sta_rec_bfee *)tlv; - if (sta->deflink.he_cap.has_he) { - struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem; + if (link_sta->he_cap.has_he) { + struct ieee80211_he_cap_elem *pe = &link_sta->he_cap.he_cap_elem; nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, pe->phy_cap_info[5]); - } else if (sta->deflink.vht_cap.vht_supported) { - struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap; + } else if (link_sta->vht_cap.vht_supported) { + struct ieee80211_sta_vht_cap *pc = &link_sta->vht_cap; nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, pc->cap); @@ -1874,18 +1883,19 @@ int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev, MCU_WM_UNI_CMD(RA), true); } -int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, void *data, u32 field) +int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, + void *data, u32 field) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct sta_phy_uni *phy = data; struct sta_rec_ra_fixed_uni *ra; struct sk_buff *skb; struct tlv *tlv; - skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76, - &msta->wcid, + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, + &msta_link->wcid, MT7996_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -1904,7 +1914,7 @@ int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif ra->phy = *phy; break; case RATE_PARAM_MMPS_UPDATE: - ra->mmps_mode = mt7996_mcu_get_mmps_mode(sta->deflink.smps_mode); + ra->mmps_mode = mt7996_mcu_get_mmps_mode(link_sta->smps_mode); break; default: break; @@ -1916,12 +1926,13 @@ int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif } static int -mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct cfg80211_chan_def *chandef = &mvif->deflink.phy->mt76->chandef; - struct cfg80211_bitrate_mask *mask = &mvif->deflink.bitrate_mask; + struct cfg80211_chan_def *chandef = &link->phy->mt76->chandef; + struct cfg80211_bitrate_mask *mask = &link->bitrate_mask; enum nl80211_band band = chandef->chan->band; struct sta_phy_uni phy = {}; int ret, nrates = 0; @@ -1942,11 +1953,11 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif } \ } while (0) - if (sta->deflink.he_cap.has_he) { + if (link_sta->he_cap.has_he) { __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1); - } else if (sta->deflink.vht_cap.vht_supported) { + } else if (link_sta->vht_cap.vht_supported) { __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0); - } else if (sta->deflink.ht_cap.ht_supported) { + } else if (link_sta->ht_cap.ht_supported) { __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0); } else { nrates = hweight32(mask->control[band].legacy); @@ -1963,7 +1974,8 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif /* fixed single rate */ if (nrates == 1) { - ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy, + ret = mt7996_mcu_set_fixed_field(dev, link_sta, link, + msta_link, &phy, RATE_PARAM_FIXED_MCS); if (ret) return ret; @@ -1972,20 +1984,20 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif /* fixed GI */ if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI || mask->control[band].he_gi != GENMASK(7, 0)) { - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; u32 addr; /* firmware updates only TXCMD but doesn't take WTBL into * account, so driver should update here to reflect the * actual txrate hardware sends out. */ - addr = mt7996_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7); - if (sta->deflink.he_cap.has_he) + addr = mt7996_mac_wtbl_lmac_addr(dev, msta_link->wcid.idx, 7); + if (link_sta->he_cap.has_he) mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi); else mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi); - ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy, + ret = mt7996_mcu_set_fixed_field(dev, link_sta, link, + msta_link, &phy, RATE_PARAM_FIXED_GI); if (ret) return ret; @@ -1993,7 +2005,8 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif /* fixed HE_LTF */ if (mask->control[band].he_ltf != GENMASK(7, 0)) { - ret = mt7996_mcu_set_fixed_field(dev, vif, sta, &phy, + ret = mt7996_mcu_set_fixed_field(dev, link_sta, link, + msta_link, &phy, RATE_PARAM_FIXED_HE_LTF); if (ret) return ret; @@ -2004,30 +2017,32 @@ mt7996_mcu_add_rate_ctrl_fixed(struct mt7996_dev *dev, struct ieee80211_vif *vif static void mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link) { #define INIT_RCPI 180 - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct mt76_phy *mphy = mvif->deflink.phy->mt76; + struct mt76_phy *mphy = link->phy->mt76; struct cfg80211_chan_def *chandef = &mphy->chandef; - struct cfg80211_bitrate_mask *mask = &mvif->deflink.bitrate_mask; + struct cfg80211_bitrate_mask *mask = &link->bitrate_mask; + u32 cap = link_sta->sta->wme ? STA_CAP_WMM : 0; enum nl80211_band band = chandef->chan->band; struct sta_rec_ra_uni *ra; struct tlv *tlv; - u32 supp_rate = sta->deflink.supp_rates[band]; - u32 cap = sta->wme ? STA_CAP_WMM : 0; + u32 supp_rate = link_sta->supp_rates[band]; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra)); ra = (struct sta_rec_ra_uni *)tlv; ra->valid = true; ra->auto_rate = true; - ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink); + ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, link_sta); ra->channel = chandef->chan->hw_value; - ra->bw = (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) ? - CMD_CBW_320MHZ : sta->deflink.bandwidth; + ra->bw = (link_sta->bandwidth == IEEE80211_STA_RX_BW_320) ? + CMD_CBW_320MHZ : link_sta->bandwidth; ra->phy.bw = ra->bw; - ra->mmps_mode = mt7996_mcu_get_mmps_mode(sta->deflink.smps_mode); + ra->mmps_mode = mt7996_mcu_get_mmps_mode(link_sta->smps_mode); if (supp_rate) { supp_rate &= mask->control[band].legacy; @@ -2047,60 +2062,60 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev, } } - if (sta->deflink.ht_cap.ht_supported) { + if (link_sta->ht_cap.ht_supported) { ra->supp_mode |= MODE_HT; - ra->af = sta->deflink.ht_cap.ampdu_factor; - ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); + ra->af = link_sta->ht_cap.ampdu_factor; + ra->ht_gf = !!(link_sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); cap |= STA_CAP_HT; - if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) + if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) cap |= STA_CAP_SGI_20; - if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) + if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) cap |= STA_CAP_SGI_40; - if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC) + if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC) cap |= STA_CAP_TX_STBC; - if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) + if (link_sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) cap |= STA_CAP_RX_STBC; - if (vif->bss_conf.ht_ldpc && - (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) + if (link_conf->ht_ldpc && + (link_sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) cap |= STA_CAP_LDPC; - mt7996_mcu_set_sta_ht_mcs(sta, ra->ht_mcs, + mt7996_mcu_set_sta_ht_mcs(link_sta, ra->ht_mcs, mask->control[band].ht_mcs); ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs; } - if (sta->deflink.vht_cap.vht_supported) { + if (link_sta->vht_cap.vht_supported) { u8 af; ra->supp_mode |= MODE_VHT; af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, - sta->deflink.vht_cap.cap); + link_sta->vht_cap.cap); ra->af = max_t(u8, ra->af, af); cap |= STA_CAP_VHT; - if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) + if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) cap |= STA_CAP_VHT_SGI_80; - if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160) + if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160) cap |= STA_CAP_VHT_SGI_160; - if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC) + if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC) cap |= STA_CAP_VHT_TX_STBC; - if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1) + if (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1) cap |= STA_CAP_VHT_RX_STBC; - if ((vif->type != NL80211_IFTYPE_AP || vif->bss_conf.vht_ldpc) && - (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)) + if ((vif->type != NL80211_IFTYPE_AP || link_conf->vht_ldpc) && + (link_sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)) cap |= STA_CAP_VHT_LDPC; - mt7996_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs, + mt7996_mcu_set_sta_vht_mcs(link_sta, ra->supp_vht_mcs, mask->control[band].vht_mcs); } - if (sta->deflink.he_cap.has_he) { + if (link_sta->he_cap.has_he) { ra->supp_mode |= MODE_HE; cap |= STA_CAP_HE; - if (sta->deflink.he_6ghz_capa.capa) - ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + if (link_sta->he_6ghz_capa.capa) + ra->af = le16_get_bits(link_sta->he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); } ra->sta_cap = cpu_to_le32(cap); @@ -2108,16 +2123,18 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev, memset(ra->rx_rcpi, INIT_RCPI, sizeof(ra->rx_rcpi)); } -int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool changed) +int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, bool changed) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; struct sk_buff *skb; int ret; - skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76, - &msta->wcid, + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, + &msta_link->wcid, MT7996_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -2127,19 +2144,19 @@ int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif, * update sta_rec_he here. */ if (changed) - mt7996_mcu_sta_he_tlv(skb, sta); + mt7996_mcu_sta_he_tlv(skb, link_sta, link); /* sta_rec_ra accommodates BW, NSS and only MCS range format * i.e 0-{7,8,9} for VHT. */ - mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta); + mt7996_mcu_sta_rate_ctrl_tlv(skb, dev, vif, link_conf, link_sta, link); ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); if (ret) return ret; - return mt7996_mcu_add_rate_ctrl_fixed(dev, vif, sta); + return mt7996_mcu_add_rate_ctrl_fixed(dev, link_sta, link, msta_link); } static int @@ -2148,6 +2165,7 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif, { #define MT_STA_BSS_GROUP 1 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct mt7996_sta_link *msta_link; struct mt7996_sta *msta; struct { u8 __rsv1[4]; @@ -2166,73 +2184,154 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif, .val = cpu_to_le32(mvif->deflink.mt76.idx % 16), }; - msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->deflink.sta; - req.wlan_idx = cpu_to_le16(msta->wcid.idx); + msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL; + msta_link = msta ? &msta->deflink : &mvif->deflink.msta_link; + req.wlan_idx = cpu_to_le16(msta_link->wcid.idx); return mt76_mcu_send_msg(&dev->mt76, MCU_WM_UNI_CMD(VOW), &req, sizeof(req), true); } -int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct mt76_vif_link *mlink, - struct ieee80211_sta *sta, int conn_state, bool newly) +static void +mt7996_mcu_sta_mld_setup_tlv(struct mt7996_dev *dev, struct sk_buff *skb, + struct ieee80211_sta *sta) { - struct ieee80211_link_sta *link_sta = NULL; - struct mt76_wcid *wcid = mlink->wcid; - struct sk_buff *skb; - int ret; + struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + unsigned long links = sta->valid_links; + unsigned int nlinks = hweight16(links); + struct mld_setup_link *mld_setup_link; + struct sta_rec_mld_setup *mld_setup; + struct mt7996_sta_link *msta_link; + struct ieee80211_vif *vif; + unsigned int link_id; + struct tlv *tlv; + + msta_link = mt76_dereference(msta->link[msta->deflink_id], &dev->mt76); + if (!msta_link) + return; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MLD, + sizeof(struct sta_rec_mld_setup) + + sizeof(struct mld_setup_link) * nlinks); + + mld_setup = (struct sta_rec_mld_setup *)tlv; + memcpy(mld_setup->mld_addr, sta->addr, ETH_ALEN); + mld_setup->setup_wcid = cpu_to_le16(msta_link->wcid.idx); + mld_setup->primary_id = cpu_to_le16(msta_link->wcid.idx); + + if (nlinks > 1) { + link_id = __ffs(links & ~BIT(msta->deflink_id)); + msta_link = mt76_dereference(msta->link[msta->deflink_id], + &dev->mt76); + if (!msta_link) + return; + } + mld_setup->seconed_id = cpu_to_le16(msta_link->wcid.idx); + mld_setup->link_num = nlinks; + + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + mld_setup_link = (struct mld_setup_link *)mld_setup->link_info; + for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct mt7996_vif_link *link; + + msta_link = mt76_dereference(msta->link[link_id], &dev->mt76); + if (!msta_link) + continue; - if (sta) { - struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; + link = mt7996_vif_link(dev, vif, link_id); + if (!link) + continue; - wcid = &msta->wcid; - link_sta = &sta->deflink; + if (!msta_link) + continue; + + mld_setup_link->wcid = cpu_to_le16(msta_link->wcid.idx); + mld_setup_link->bss_idx = link->mt76.idx; + mld_setup_link++; } +} + +static void +mt7996_mcu_sta_eht_mld_tlv(struct mt7996_dev *dev, struct sk_buff *skb, + struct ieee80211_sta *sta) +{ + struct sta_rec_eht_mld *eht_mld; + struct tlv *tlv; + int i; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT_MLD, sizeof(*eht_mld)); + eht_mld = (struct sta_rec_eht_mld *)tlv; + + for (i = 0; i < ARRAY_SIZE(eht_mld->str_cap); i++) + eht_mld->str_cap[i] = 0x7; +} - skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, mlink, wcid, +int mt7996_mcu_add_sta(struct mt7996_dev *dev, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, + int conn_state, bool newly) +{ + struct mt76_wcid *wcid = msta_link ? &msta_link->wcid : link->mt76.wcid; + struct ieee80211_sta *sta = link_sta ? link_sta->sta : NULL; + struct sk_buff *skb; + int ret; + + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, wcid, MT7996_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); /* starec basic */ - mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, &vif->bss_conf, link_sta, + mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, link_conf, link_sta, conn_state, newly); if (conn_state == CONN_STATE_DISCONNECT) goto out; /* starec hdr trans */ - mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, wcid); + mt7996_mcu_sta_hdr_trans_tlv(dev, skb, link_conf->vif, wcid); /* starec tx proc */ mt7996_mcu_sta_tx_proc_tlv(skb); /* tag order is in accordance with firmware dependency. */ - if (sta) { + if (link_sta) { /* starec hdrt mode */ mt7996_mcu_sta_hdrt_tlv(dev, skb); - /* starec bfer */ - mt7996_mcu_sta_bfer_tlv(dev, skb, vif, sta); + if (conn_state == CONN_STATE_CONNECT) { + /* starec bfer */ + mt7996_mcu_sta_bfer_tlv(dev, skb, link_conf, link_sta, + link); + /* starec bfee */ + mt7996_mcu_sta_bfee_tlv(dev, skb, link_conf, link_sta, + link); + } /* starec ht */ - mt7996_mcu_sta_ht_tlv(skb, sta); + mt7996_mcu_sta_ht_tlv(skb, link_sta); /* starec vht */ - mt7996_mcu_sta_vht_tlv(skb, sta); + mt7996_mcu_sta_vht_tlv(skb, link_sta); /* starec uapsd */ - mt76_connac_mcu_sta_uapsd(skb, vif, sta); + mt76_connac_mcu_sta_uapsd(skb, link_conf->vif, sta); /* starec amsdu */ - mt7996_mcu_sta_amsdu_tlv(dev, skb, vif, sta); + mt7996_mcu_sta_amsdu_tlv(dev, skb, link_conf->vif, link_sta, + msta_link); /* starec he */ - mt7996_mcu_sta_he_tlv(skb, sta); + mt7996_mcu_sta_he_tlv(skb, link_sta, link); /* starec he 6g*/ - mt7996_mcu_sta_he_6g_tlv(skb, sta); + mt7996_mcu_sta_he_6g_tlv(skb, link_sta); /* starec eht */ - mt7996_mcu_sta_eht_tlv(skb, sta); + mt7996_mcu_sta_eht_tlv(skb, link_sta); /* starec muru */ - mt7996_mcu_sta_muru_tlv(dev, skb, vif, sta); - /* starec bfee */ - mt7996_mcu_sta_bfee_tlv(dev, skb, vif, sta); + mt7996_mcu_sta_muru_tlv(dev, skb, link_conf, link_sta); + + if (sta->mlo) { + mt7996_mcu_sta_mld_setup_tlv(dev, skb, sta); + mt7996_mcu_sta_eht_mld_tlv(dev, skb, sta); + } } - ret = mt7996_mcu_add_group(dev, vif, sta); + ret = mt7996_mcu_add_group(dev, link_conf->vif, sta); if (ret) { dev_kfree_skb(skb); return ret; @@ -2242,6 +2341,24 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); } +int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link) +{ + struct sk_buff *skb; + + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, + &msta_link->wcid, + MT7996_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF, sizeof(struct tlv)); + + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); +} + static int mt7996_mcu_sta_key_tlv(struct mt76_wcid *wcid, struct sk_buff *skb, @@ -2307,17 +2424,18 @@ int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true); } -static int mt7996_mcu_get_pn(struct mt7996_dev *dev, struct ieee80211_vif *vif, - u8 *pn) +static int mt7996_mcu_get_pn(struct mt7996_dev *dev, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, u8 *pn) { #define TSC_TYPE_BIGTK_PN 2 - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct sta_rec_pn_info *pn_info; struct sk_buff *skb, *rskb; struct tlv *tlv; int ret; - skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76, &mvif->deflink.sta.wcid); + skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, + &msta_link->wcid); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -2341,10 +2459,11 @@ static int mt7996_mcu_get_pn(struct mt7996_dev *dev, struct ieee80211_vif *vif, return 0; } -int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif, +int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, struct ieee80211_key_conf *key) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_mcu_bcn_prot_tlv *bcn_prot; struct sk_buff *skb; struct tlv *tlv; @@ -2353,7 +2472,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif sizeof(struct mt7996_mcu_bcn_prot_tlv); int ret; - skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->deflink.mt76, len); + skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76, len); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -2361,7 +2480,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif bcn_prot = (struct mt7996_mcu_bcn_prot_tlv *)tlv; - ret = mt7996_mcu_get_pn(dev, vif, pn); + ret = mt7996_mcu_get_pn(dev, link, msta_link, pn); if (ret) { dev_kfree_skb(skb); return ret; @@ -2592,13 +2711,14 @@ int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, - struct ieee80211_vif *vif, u32 changed) + struct ieee80211_bss_conf *link_conf, + struct mt7996_vif_link *link, u32 changed) { #define OFFLOAD_TX_MODE_SU BIT(0) #define OFFLOAD_TX_MODE_MU BIT(1) + struct ieee80211_vif *vif = link_conf->vif; struct ieee80211_hw *hw = mt76_hw(dev); - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct mt7996_phy *phy = mt7996_vif_link_phy(&mvif->deflink); + struct mt7996_phy *phy = link->phy; struct mt76_wcid *wcid = &dev->mt76.global_wcid; struct bss_inband_discovery_tlv *discov; struct ieee80211_tx_info *info; @@ -2615,21 +2735,21 @@ int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, chandef = &phy->mt76->chandef; band = chandef->chan->band; - if (vif->bss_conf.nontransmitted) + if (link_conf->nontransmitted) return 0; - rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->deflink.mt76, + rskb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &link->mt76, MT7996_MAX_BSS_OFFLOAD_SIZE); if (IS_ERR(rskb)) return PTR_ERR(rskb); if (changed & BSS_CHANGED_FILS_DISCOVERY && - vif->bss_conf.fils_discovery.max_interval) { - interval = vif->bss_conf.fils_discovery.max_interval; + link_conf->fils_discovery.max_interval) { + interval = link_conf->fils_discovery.max_interval; skb = ieee80211_get_fils_discovery_tmpl(hw, vif); } else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP && - vif->bss_conf.unsol_bcast_probe_resp_interval) { - interval = vif->bss_conf.unsol_bcast_probe_resp_interval; + link_conf->unsol_bcast_probe_resp_interval) { + interval = link_conf->unsol_bcast_probe_resp_interval; skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif); } @@ -4068,12 +4188,12 @@ mt7996_mcu_set_obss_spr_pd(struct mt7996_phy *phy, } static int -mt7996_mcu_set_obss_spr_siga(struct mt7996_phy *phy, struct ieee80211_vif *vif, +mt7996_mcu_set_obss_spr_siga(struct mt7996_phy *phy, + struct mt7996_vif_link *link, struct ieee80211_he_obss_pd *he_obss_pd) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; struct mt7996_dev *dev = phy->dev; - u8 omac = mvif->deflink.mt76.omac_idx; + u8 omac = link->mt76.omac_idx; struct { u8 band_idx; u8 __rsv[3]; @@ -4145,7 +4265,8 @@ mt7996_mcu_set_obss_spr_bitmap(struct mt7996_phy *phy, sizeof(req), true); } -int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif, +int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, + struct mt7996_vif_link *link, struct ieee80211_he_obss_pd *he_obss_pd) { int ret; @@ -4179,7 +4300,7 @@ int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif, return ret; /* Set SR prohibit */ - ret = mt7996_mcu_set_obss_spr_siga(phy, vif, he_obss_pd); + ret = mt7996_mcu_set_obss_spr_siga(phy, link, he_obss_pd); if (ret) return ret; @@ -4215,7 +4336,7 @@ int mt7996_mcu_update_bss_color(struct mt7996_dev *dev, #define TWT_AGRT_PROTECT BIT(2) int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev, - struct mt7996_vif *mvif, + struct mt7996_vif_link *link, struct mt7996_twt_flow *flow, int cmd) { @@ -4246,12 +4367,12 @@ int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev, .len = cpu_to_le16(sizeof(req) - 4), .tbl_idx = flow->table_id, .cmd = cmd, - .own_mac_idx = mvif->deflink.mt76.omac_idx, + .own_mac_idx = link->mt76.omac_idx, .flowid = flow->id, .peer_id = cpu_to_le16(flow->wcid), .duration = flow->duration, - .bss = mvif->deflink.mt76.idx, - .bss_idx = mvif->deflink.mt76.idx, + .bss = link->mt76.idx, + .bss_idx = link->mt76.idx, .start_tsf = cpu_to_le64(flow->tsf), .mantissa = flow->mantissa, .exponent = flow->exp, @@ -4341,22 +4462,19 @@ int mt7996_mcu_rdd_cmd(struct mt7996_dev *dev, int cmd, u8 index, int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link) { - struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; - struct mt7996_sta *msta; struct sk_buff *skb; - msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->deflink.sta; - - skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->deflink.mt76, - &msta->wcid, + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &link->mt76, + &msta_link->wcid, MT7996_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); /* starec hdr trans */ - mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, &msta->wcid); + mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, &msta_link->wcid); return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); } @@ -4579,7 +4697,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy) struct sk_buff *skb; int i, tx_power; - tx_power = mt7996_get_power_bound(phy, phy->txpower); + tx_power = mt76_get_power_bound(mphy, phy->txpower); tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, &la, tx_power); mphy->txpower_cur = tx_power; diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h index 43468bcaffc6d..2ab6a53bee869 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h @@ -625,6 +625,35 @@ struct sta_rec_hdr_trans { u8 mesh; } __packed; +struct sta_rec_mld_setup { + __le16 tag; + __le16 len; + u8 mld_addr[ETH_ALEN]; + __le16 primary_id; + __le16 seconed_id; + __le16 setup_wcid; + u8 link_num; + u8 info; + u8 __rsv[2]; + u8 link_info[]; +} __packed; + +struct sta_rec_eht_mld { + __le16 tag; + __le16 len; + u8 nsep; + u8 __rsv1[2]; + u8 str_cap[__MT_MAX_BAND]; + __le16 eml_cap; + u8 __rsv2[4]; +} __packed; + +struct mld_setup_link { + __le16 wcid; + u8 bss_idx; + u8 __rsv; +} __packed; + struct hdr_trans_en { __le16 tag; __le16 len; @@ -798,6 +827,9 @@ enum { sizeof(struct sta_rec_eht) + \ sizeof(struct sta_rec_hdrt) + \ sizeof(struct sta_rec_hdr_trans) + \ + sizeof(struct sta_rec_mld_setup) + \ + sizeof(struct mld_setup_link) * 3 + \ + sizeof(struct sta_rec_eht_mld) + \ sizeof(struct tlv)) #define MT7996_MAX_BEACON_SIZE 1338 @@ -809,18 +841,6 @@ enum { #define MT7996_MAX_BSS_OFFLOAD_SIZE (MT7996_MAX_BEACON_SIZE + \ MT7996_BEACON_UPDATE_SIZE) -static inline s8 -mt7996_get_power_bound(struct mt7996_phy *phy, s8 txpower) -{ - struct mt76_phy *mphy = phy->mt76; - int n_chains = hweight16(mphy->chainmask); - - txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2); - txpower -= mt76_tx_power_nss_delta(n_chains); - - return txpower; -} - enum { UNI_BAND_CONFIG_RADIO_ENABLE, UNI_BAND_CONFIG_RTS_THRESHOLD = 0x08, @@ -908,7 +928,8 @@ enum { UNI_CMD_SER_SET_RECOVER_L3_TX_DISABLE, UNI_CMD_SER_SET_RECOVER_L3_BF, UNI_CMD_SER_SET_RECOVER_L4_MDP, - UNI_CMD_SER_SET_RECOVER_FULL, + UNI_CMD_SER_SET_RECOVER_FROM_ETH, + UNI_CMD_SER_SET_RECOVER_FULL = 8, UNI_CMD_SER_SET_SYSTEM_ASSERT, /* action */ UNI_CMD_SER_ENABLE = 1, diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c index 7a8ee6c75cf2b..13b188e281bdb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c @@ -281,7 +281,7 @@ static int mt7996_mmio_wed_reset(struct mtk_wed_device *wed) if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state)) return -EBUSY; - ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_L1, + ret = mt7996_mcu_set_ser(dev, UNI_CMD_SER_TRIGGER, UNI_CMD_SER_SET_RECOVER_FROM_ETH, mphy->band_idx); if (ret) goto out; @@ -618,9 +618,6 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev, .rx_skb = mt7996_queue_rx_skb, .rx_check = mt7996_rx_check, .rx_poll_complete = mt7996_rx_poll_complete, - .sta_add = mt7996_mac_sta_add, - .sta_event = mt7996_mac_sta_event, - .sta_remove = mt7996_mac_sta_remove, .update_survey = mt7996_update_channel, .set_channel = mt7996_set_channel, .vif_link_add = mt7996_vif_link_add, diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h index 29fabb9b04ae3..43e646ed6094c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h @@ -185,10 +185,10 @@ struct mt7996_twt_flow { DECLARE_EWMA(avg_signal, 10, 8) -struct mt7996_sta { +struct mt7996_sta_link { struct mt76_wcid wcid; /* must be first */ - struct mt7996_vif *vif; + struct mt7996_sta *sta; struct list_head rc_list; u32 airtime_ac[8]; @@ -204,12 +204,22 @@ struct mt7996_sta { u8 flowid_mask; struct mt7996_twt_flow flow[MT7996_MAX_STA_TWT_AGRT]; } twt; + + struct rcu_head rcu_head; +}; + +struct mt7996_sta { + struct mt7996_sta_link deflink; /* must be first */ + struct mt7996_sta_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + u8 deflink_id; + + struct mt7996_vif *vif; }; struct mt7996_vif_link { struct mt76_vif_link mt76; /* must be first */ - struct mt7996_sta sta; + struct mt7996_sta_link msta_link; struct mt7996_phy *phy; struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; @@ -525,7 +535,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev, void __iomem *mem_base, u32 device_id); void mt7996_wfsys_reset(struct mt7996_dev *dev); irqreturn_t mt7996_irq_handler(int irq, void *dev_instance); -u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif); +u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif_link *link); int mt7996_register_device(struct mt7996_dev *dev); void mt7996_unregister_device(struct mt7996_dev *dev); int mt7996_vif_link_add(struct mt76_phy *mphy, struct ieee80211_vif *vif, @@ -553,7 +563,7 @@ int mt7996_run(struct mt7996_phy *phy); int mt7996_mcu_init(struct mt7996_dev *dev); int mt7996_mcu_init_firmware(struct mt7996_dev *dev); int mt7996_mcu_twt_agrt_update(struct mt7996_dev *dev, - struct mt7996_vif *mvif, + struct mt7996_vif_link *link, struct mt7996_twt_flow *flow, int cmd); int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif, @@ -561,35 +571,52 @@ int mt7996_mcu_add_dev_info(struct mt7996_phy *phy, struct ieee80211_vif *vif, struct mt76_vif_link *mlink, bool enable); int mt7996_mcu_add_bss_info(struct mt7996_phy *phy, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, - struct mt76_vif_link *mlink, int enable); -int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct mt76_vif_link *mlink, - struct ieee80211_sta *sta, int conn_state, bool newly); + struct mt76_vif_link *mlink, + struct mt7996_sta_link *msta_link, int enable); +int mt7996_mcu_add_sta(struct mt7996_dev *dev, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, + int conn_state, bool newly); +int mt7996_mcu_teardown_mld_sta(struct mt7996_dev *dev, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link); int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev, struct ieee80211_ampdu_params *params, - bool add); + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, bool enable); int mt7996_mcu_add_rx_ba(struct mt7996_dev *dev, struct ieee80211_ampdu_params *params, - bool add); + struct mt7996_vif_link *link, bool enable); int mt7996_mcu_update_bss_color(struct mt7996_dev *dev, struct mt76_vif_link *mlink, struct cfg80211_he_bss_color *he_bss_color); int mt7996_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf); int mt7996_mcu_beacon_inband_discov(struct mt7996_dev *dev, - struct ieee80211_vif *vif, u32 changed); -int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct mt7996_vif_link *link, u32 changed); +int mt7996_mcu_add_obss_spr(struct mt7996_phy *phy, + struct mt7996_vif_link *link, struct ieee80211_he_obss_pd *he_obss_pd); -int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool changed); +int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, bool changed); int mt7996_set_channel(struct mt76_phy *mphy); int mt7996_mcu_set_chan_info(struct mt7996_phy *phy, u16 tag); int mt7996_mcu_set_tx(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf); int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev, void *data, u16 version); -int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, void *data, u32 field); +int mt7996_mcu_set_fixed_field(struct mt7996_dev *dev, + struct ieee80211_link_sta *link_sta, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, + void *data, u32 field); int mt7996_mcu_set_eeprom(struct mt7996_dev *dev); int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset, u8 *buf, u32 buf_len); int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num); @@ -683,26 +710,19 @@ bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask); void mt7996_mac_reset_counters(struct mt7996_phy *phy); void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy); void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band); -void mt7996_mac_enable_rtscts(struct mt7996_dev *dev, - struct ieee80211_vif *vif, bool enable); void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_key_conf *key, int pid, enum mt76_txq_id qid, u32 changed); void mt7996_mac_set_coverage_class(struct mt7996_phy *phy); -int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int mt7996_mac_sta_event(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, enum mt76_sta_event ev); -void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); void mt7996_mac_work(struct work_struct *work); void mt7996_mac_reset_work(struct work_struct *work); void mt7996_mac_dump_work(struct work_struct *work); void mt7996_mac_sta_rc_work(struct work_struct *work); void mt7996_mac_update_stats(struct mt7996_phy *phy); void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev, - struct mt7996_sta *msta, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, u8 flowid); void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, struct ieee80211_sta *sta, @@ -727,11 +747,14 @@ bool mt7996_debugfs_rx_log(struct mt7996_dev *dev, const void *data, int len); int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_key_conf *key, int mcu_cmd, struct mt76_wcid *wcid, enum set_key_cmd cmd); -int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif, +int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link, struct ieee80211_key_conf *key); int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); + struct mt7996_vif_link *link, + struct mt7996_sta_link *msta_link); int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode); #ifdef CONFIG_MAC80211_DEBUGFS void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/mediatek/mt76/scan.c b/drivers/net/wireless/mediatek/mt76/scan.c index 1c4f9deaaada5..9b20ccbeb8cf1 100644 --- a/drivers/net/wireless/mediatek/mt76/scan.c +++ b/drivers/net/wireless/mediatek/mt76/scan.c @@ -52,11 +52,6 @@ mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid) ether_addr_copy(hdr->addr3, req->bssid); } - info = IEEE80211_SKB_CB(skb); - if (req->no_cck) - info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE; - info->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK; - if (req->ie_len) skb_put_data(skb, req->ie, req->ie_len); @@ -64,10 +59,20 @@ mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid) skb_set_queue_mapping(skb, IEEE80211_AC_VO); rcu_read_lock(); - if (ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) - mt76_tx(phy, NULL, mvif->wcid, skb); - else + + if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) { ieee80211_free_txskb(phy->hw, skb); + goto out; + } + + info = IEEE80211_SKB_CB(skb); + if (req->no_cck) + info->flags |= IEEE80211_TX_CTL_NO_CCK_RATE; + info->control.flags |= IEEE80211_TX_CTRL_DONT_USE_RATE_MASK; + + mt76_tx(phy, NULL, mvif->wcid, skb); + +out: rcu_read_unlock(); } diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index af0c50c983ec1..513916469ca2f 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -100,7 +100,8 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, return; /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ - if (flags & MT_TX_CB_TXS_FAILED) { + if (flags & MT_TX_CB_TXS_FAILED && + (dev->drv->drv_flags & MT_DRV_IGNORE_TXS_FAILED)) { info->status.rates[0].count = 0; info->status.rates[0].idx = -1; info->flags |= IEEE80211_TX_STAT_ACK; diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 674461fa7fb34..eae35b678952d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -1510,10 +1510,15 @@ enum qlink_tlv_id { }; struct qlink_tlv_hdr { - __le16 type; - __le16 len; + /* New members MUST be added within the struct_group() macro below. */ + __struct_group(qlink_tlv_hdr_fixed, __hdr, __packed, + __le16 type; + __le16 len; + ); u8 val[]; } __packed; +static_assert(offsetof(struct qlink_tlv_hdr, val) == sizeof(struct qlink_tlv_hdr_fixed), + "struct member likely outside of __struct_group()"); struct qlink_iface_limit { __le16 max_num; @@ -1567,7 +1572,7 @@ enum qlink_reg_rule_flags { * @dfs_cac_ms: DFS CAC period. */ struct qlink_tlv_reg_rule { - struct qlink_tlv_hdr hdr; + struct qlink_tlv_hdr_fixed hdr; __le32 start_freq_khz; __le32 end_freq_khz; __le32 max_bandwidth_khz; @@ -1606,7 +1611,7 @@ enum qlink_dfs_state { * @channel: ieee80211 channel settings. */ struct qlink_tlv_channel { - struct qlink_tlv_hdr hdr; + struct qlink_tlv_hdr_fixed hdr; struct qlink_channel chan; } __packed; @@ -1618,7 +1623,7 @@ struct qlink_tlv_channel { * @chan: channel definition data. */ struct qlink_tlv_chandef { - struct qlink_tlv_hdr hdr; + struct qlink_tlv_hdr_fixed hdr; struct qlink_chandef chdef; } __packed; @@ -1643,7 +1648,7 @@ enum qlink_ie_set_type { * @ie_data: IEs data. */ struct qlink_tlv_ie_set { - struct qlink_tlv_hdr hdr; + struct qlink_tlv_hdr_fixed hdr; u8 type; u8 flags; u8 rsvd[2]; @@ -1657,7 +1662,7 @@ struct qlink_tlv_ie_set { * @ie_data: IEs data. */ struct qlink_tlv_ext_ie { - struct qlink_tlv_hdr hdr; + struct qlink_tlv_hdr_fixed hdr; u8 eid_ext; u8 rsvd[3]; u8 ie_data[]; @@ -1678,7 +1683,7 @@ struct qlink_sband_iftype_data { * @iftype_data: interface type data entries. */ struct qlink_tlv_iftype_data { - struct qlink_tlv_hdr hdr; + struct qlink_tlv_hdr_fixed hdr; u8 n_iftype_data; u8 rsvd[3]; struct qlink_sband_iftype_data iftype_data[]; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c index 0abb1b092bc20..73034e7e41d14 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/8192c.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c @@ -644,6 +644,8 @@ struct rtl8xxxu_fileops rtl8192cu_fops = { .rx_agg_buf_size = 16000, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32), .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16), + .supports_ap = 1, + .max_macid_num = 32, .max_sec_cam_num = 32, .adda_1t_init = 0x0b1b25a0, .adda_1t_path_on = 0x0bdb25a0, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c index 4ce0c05c51291..569856ca677f6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c @@ -860,9 +860,10 @@ rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len) return len; write_error: - dev_info(&udev->dev, - "%s: Failed to write block at addr: %04x size: %04x\n", - __func__, addr, blocksize); + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE) + dev_info(&udev->dev, + "%s: Failed to write block at addr: %04x size: %04x\n", + __func__, addr, blocksize); return -EAGAIN; } @@ -4064,8 +4065,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) */ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary); - ret = rtl8xxxu_download_firmware(priv); - dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret); + for (int retry = 5; retry >= 0 ; retry--) { + ret = rtl8xxxu_download_firmware(priv); + dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret); + if (ret != -EAGAIN) + break; + if (retry) + dev_dbg(dev, "%s: retry firmware download\n", __func__); + } if (ret) goto exit; ret = rtl8xxxu_start_firmware(priv); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index d429560009bb6..e07402e73ba32 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -484,7 +484,7 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, * pathA or mac1 has to set phy0&phy1 pathA */ if ((content == radiob_txt) && (rfpath == RF90_PATH_A)) { rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - " ===> althougth Path A, we load radiob.txt\n"); + " ===> although Path A, we load radiob.txt\n"); radioa_arraylen = radiob_arraylen; radioa_array_table = radiob_array_table; } @@ -750,7 +750,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel) && rtlhal->interfaceindex == 1) { need_pwr_down = rtl92d_phy_enable_anotherphy(hw, false); rtlhal->during_mac1init_radioa = true; - /* asume no this case */ + /* assume no this case */ if (need_pwr_down) rtl92d_phy_enable_rf_env(hw, path, &u4regvalue); @@ -1885,7 +1885,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw, bneed_powerdown_radio = rtl92d_phy_enable_anotherphy(hw, false); rtlpriv->rtlhal.during_mac1init_radioa = true; - /* asume no this case */ + /* assume no this case */ if (bneed_powerdown_radio) rtl92d_phy_enable_rf_env(hw, erfpath, &u4regvalue); diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig index ab21b8059e0bf..3736f290bd426 100644 --- a/drivers/net/wireless/realtek/rtw88/Kconfig +++ b/drivers/net/wireless/realtek/rtw88/Kconfig @@ -54,6 +54,9 @@ config RTW88_8812A tristate select RTW88_88XXA +config RTW88_8814A + tristate + config RTW88_8822BE tristate "Realtek 8822BE PCI wireless network adapter" depends on PCI @@ -222,6 +225,28 @@ config RTW88_8812AU 802.11ac USB wireless network adapter +config RTW88_8814AE + tristate "Realtek 8814AE PCI wireless network adapter" + depends on PCI + select RTW88_CORE + select RTW88_PCI + select RTW88_8814A + help + Select this option will enable support for 8814AE chipset + + 802.11ac PCIe wireless network adapter + +config RTW88_8814AU + tristate "Realtek 8814AU USB wireless network adapter" + depends on USB + select RTW88_CORE + select RTW88_USB + select RTW88_8814A + help + Select this option will enable support for 8814AU chipset + + 802.11ac USB wireless network adapter + config RTW88_DEBUG bool "Realtek rtw88 debug support" depends on RTW88_CORE diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile index 0cbbb366e3930..0b3da05a29380 100644 --- a/drivers/net/wireless/realtek/rtw88/Makefile +++ b/drivers/net/wireless/realtek/rtw88/Makefile @@ -94,6 +94,15 @@ rtw88_8821au-objs := rtw8821au.o obj-$(CONFIG_RTW88_8812AU) += rtw88_8812au.o rtw88_8812au-objs := rtw8812au.o +obj-$(CONFIG_RTW88_8814A) += rtw88_8814a.o +rtw88_8814a-objs := rtw8814a.o rtw8814a_table.o + +obj-$(CONFIG_RTW88_8814AE) += rtw88_8814ae.o +rtw88_8814ae-objs := rtw8814ae.o + +obj-$(CONFIG_RTW88_8814AU) += rtw88_8814au.o +rtw88_8814au-objs := rtw8814au.o + obj-$(CONFIG_RTW88_PCI) += rtw88_pci.o rtw88_pci-objs := pci.o diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index 364ec0436d0fe..b67d69b01f879 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -654,10 +654,10 @@ static void rtw_print_rate(struct seq_file *m, u8 rate) case DESC_RATE6M...DESC_RATE54M: rtw_print_ofdm_rate_txt(m, rate); break; - case DESC_RATEMCS0...DESC_RATEMCS15: + case DESC_RATEMCS0...DESC_RATEMCS31: rtw_print_ht_rate_txt(m, rate); break; - case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT2SS_MCS9: + case DESC_RATEVHT1SS_MCS0...DESC_RATEVHT4SS_MCS9: rtw_print_vht_rate_txt(m, rate); break; default: @@ -692,9 +692,11 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) { struct rtw_debugfs_priv *debugfs_priv = m->private; struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + struct rtw_power_params pwr_param = {0}; struct rtw_hal *hal = &rtwdev->hal; + u8 nss = rtwdev->efuse.hw_cap.nss; u8 path, rate, bw, ch, regd; - struct rtw_power_params pwr_param = {0}; + u8 max_ht_rate, max_rate; mutex_lock(&rtwdev->mutex); bw = hal->current_band_width; @@ -707,19 +709,23 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v) seq_printf(m, "%-4s %-10s %-9s %-9s (%-4s %-4s %-4s) %-4s\n", "path", "rate", "pwr", "base", "byr", "lmt", "sar", "rem"); + max_ht_rate = DESC_RATEMCS0 + nss * 8 - 1; + + if (rtwdev->chip->vht_supported) + max_rate = DESC_RATEVHT1SS_MCS0 + nss * 10 - 1; + else + max_rate = max_ht_rate; + mutex_lock(&hal->tx_power_mutex); - for (path = RF_PATH_A; path <= RF_PATH_B; path++) { + for (path = RF_PATH_A; path < hal->rf_path_num; path++) { /* there is no CCK rates used in 5G */ if (hal->current_band_type == RTW_BAND_5G) rate = DESC_RATE6M; else rate = DESC_RATE1M; - /* now, not support vht 3ss and vht 4ss*/ - for (; rate <= DESC_RATEVHT2SS_MCS9; rate++) { - /* now, not support ht 3ss and ht 4ss*/ - if (rate > DESC_RATEMCS15 && - rate < DESC_RATEVHT1SS_MCS0) + for (; rate <= max_rate; rate++) { + if (rate > max_ht_rate && rate <= DESC_RATEMCS31) continue; rtw_get_tx_power_params(rtwdev, path, rate, bw, @@ -849,20 +855,28 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v) last_cnt->num_qry_pkt[rate_id + 9]); } - seq_printf(m, "[RSSI(dBm)] = {%d, %d}\n", + seq_printf(m, "[RSSI(dBm)] = {%d, %d, %d, %d}\n", dm_info->rssi[RF_PATH_A] - 100, - dm_info->rssi[RF_PATH_B] - 100); - seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d}\n", + dm_info->rssi[RF_PATH_B] - 100, + dm_info->rssi[RF_PATH_C] - 100, + dm_info->rssi[RF_PATH_D] - 100); + seq_printf(m, "[Rx EVM(dB)] = {-%d, -%d, -%d, -%d}\n", dm_info->rx_evm_dbm[RF_PATH_A], - dm_info->rx_evm_dbm[RF_PATH_B]); - seq_printf(m, "[Rx SNR] = {%d, %d}\n", + dm_info->rx_evm_dbm[RF_PATH_B], + dm_info->rx_evm_dbm[RF_PATH_C], + dm_info->rx_evm_dbm[RF_PATH_D]); + seq_printf(m, "[Rx SNR] = {%d, %d, %d, %d}\n", dm_info->rx_snr[RF_PATH_A], - dm_info->rx_snr[RF_PATH_B]); - seq_printf(m, "[CFO_tail(KHz)] = {%d, %d}\n", + dm_info->rx_snr[RF_PATH_B], + dm_info->rx_snr[RF_PATH_C], + dm_info->rx_snr[RF_PATH_D]); + seq_printf(m, "[CFO_tail(KHz)] = {%d, %d, %d, %d}\n", dm_info->cfo_tail[RF_PATH_A], - dm_info->cfo_tail[RF_PATH_B]); + dm_info->cfo_tail[RF_PATH_B], + dm_info->cfo_tail[RF_PATH_C], + dm_info->cfo_tail[RF_PATH_D]); - if (dm_info->curr_rx_rate >= DESC_RATE11M) { + if (dm_info->curr_rx_rate >= DESC_RATE6M) { seq_puts(m, "[Rx Average Status]:\n"); seq_printf(m, " * OFDM, EVM: {-%d}, SNR: {%d}\n", (u8)ewma_evm_read(&ewma_evm[RTW_EVM_OFDM]), @@ -875,6 +889,13 @@ static int rtw_debugfs_get_phy_info(struct seq_file *m, void *v) (u8)ewma_evm_read(&ewma_evm[RTW_EVM_2SS_B]), (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_A]), (u8)ewma_snr_read(&ewma_snr[RTW_SNR_2SS_B])); + seq_printf(m, " * 3SS, EVM: {-%d, -%d, -%d}, SNR: {%d, %d, %d}\n", + (u8)ewma_evm_read(&ewma_evm[RTW_EVM_3SS_A]), + (u8)ewma_evm_read(&ewma_evm[RTW_EVM_3SS_B]), + (u8)ewma_evm_read(&ewma_evm[RTW_EVM_3SS_C]), + (u8)ewma_snr_read(&ewma_snr[RTW_SNR_3SS_A]), + (u8)ewma_snr_read(&ewma_snr[RTW_SNR_3SS_B]), + (u8)ewma_snr_read(&ewma_snr[RTW_SNR_3SS_C])); } seq_puts(m, "[Rx Counter]:\n"); diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 02389b7c68768..6b563ac489a74 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -735,6 +735,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, { u8 h2c_pkt[H2C_PKT_SIZE] = {0}; bool disable_pt = true; + u32 mask_hi; SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO); @@ -755,6 +756,20 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, si->init_ra_lv = 0; rtw_fw_send_h2c_command(rtwdev, h2c_pkt); + + if (rtwdev->chip->id != RTW_CHIP_TYPE_8814A) + return; + + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO_HI); + + mask_hi = si->ra_mask >> 32; + + SET_RA_INFO_RA_MASK0(h2c_pkt, (mask_hi & 0xff)); + SET_RA_INFO_RA_MASK1(h2c_pkt, (mask_hi & 0xff00) >> 8); + SET_RA_INFO_RA_MASK2(h2c_pkt, (mask_hi & 0xff0000) >> 16); + SET_RA_INFO_RA_MASK3(h2c_pkt, (mask_hi & 0xff000000) >> 24); + + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); } void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect) diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 404de1b0c407b..48ad9ceab6ea1 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -557,6 +557,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id) #define H2C_CMD_DEFAULT_PORT 0x2c #define H2C_CMD_RA_INFO 0x40 #define H2C_CMD_RSSI_MONITOR 0x42 +#define H2C_CMD_RA_INFO_HI 0x46 #define H2C_CMD_BCN_FILTER_OFFLOAD_P0 0x56 #define H2C_CMD_BCN_FILTER_OFFLOAD_P1 0x57 #define H2C_CMD_WL_PHY_INFO 0x58 diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index cae9cca6dca3d..0491f501c1383 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -291,6 +291,7 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on) if (rtw_read8(rtwdev, REG_CR) == 0xea) cur_pwr = false; else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && + chip->id != RTW_CHIP_TYPE_8814A && (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0))) cur_pwr = false; else @@ -784,7 +785,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev, if (!check_firmware_size(data, size)) return -EINVAL; - if (!ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) + if (rtwdev->chip->ltecoex_addr && + !ltecoex_read_reg(rtwdev, 0x38, <ecoex_bckp)) return -EBUSY; wlan_cpu_enable(rtwdev, false); @@ -802,7 +804,8 @@ static int __rtw_download_firmware(struct rtw_dev *rtwdev, wlan_cpu_enable(rtwdev, true); - if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) { + if (rtwdev->chip->ltecoex_addr && + !ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp)) { ret = -EBUSY; goto dlfw_fail; } diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 0cee0fd8c0ef0..959f56a3cc1ab 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -136,7 +136,7 @@ u16 rtw_desc_to_bitrate(u8 desc_rate) return rate.bitrate; } -static struct ieee80211_supported_band rtw_band_2ghz = { +static const struct ieee80211_supported_band rtw_band_2ghz = { .band = NL80211_BAND_2GHZ, .channels = rtw_channeltable_2g, @@ -149,7 +149,7 @@ static struct ieee80211_supported_band rtw_band_2ghz = { .vht_cap = {0}, }; -static struct ieee80211_supported_band rtw_band_5ghz = { +static const struct ieee80211_supported_band rtw_band_5ghz = { .band = NL80211_BAND_5GHZ, .channels = rtw_channeltable_5g, @@ -1234,7 +1234,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) ldpc_en = VHT_LDPC_EN; } else if (sta->deflink.ht_cap.ht_supported) { - ra_mask |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) | + ra_mask |= ((u64)sta->deflink.ht_cap.mcs.rx_mask[3] << 36) | + ((u64)sta->deflink.ht_cap.mcs.rx_mask[2] << 28) | + (sta->deflink.ht_cap.mcs.rx_mask[1] << 20) | (sta->deflink.ht_cap.mcs.rx_mask[0] << 12); if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) stbc_en = HT_STBC_EN; @@ -1244,6 +1246,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, if (efuse->hw_cap.nss == 1 || rtwdev->hal.txrx_1ss) ra_mask &= RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS; + else if (efuse->hw_cap.nss == 2) + ra_mask &= RA_MASK_VHT_RATES_2SS | RA_MASK_HT_RATES_2SS | + RA_MASK_VHT_RATES_1SS | RA_MASK_HT_RATES_1SS; if (hal->current_band_type == RTW_BAND_5G) { ra_mask |= (u64)sta->deflink.supp_rates[NL80211_BAND_5GHZ] << 4; @@ -1302,10 +1307,9 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si, break; } - if (sta->deflink.vht_cap.vht_supported && ra_mask & 0xffc00000) - tx_num = 2; - else if (sta->deflink.ht_cap.ht_supported && ra_mask & 0xfff00000) - tx_num = 2; + if (sta->deflink.vht_cap.vht_supported || + sta->deflink.ht_cap.ht_supported) + tx_num = efuse->hw_cap.nss; rate_id = get_rate_id(wireless_set, bw_mode, tx_num); @@ -1561,6 +1565,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_efuse *efuse = &rtwdev->efuse; + int i; ht_cap->ht_supported = true; ht_cap->cap = 0; @@ -1580,25 +1585,20 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_cap->ampdu_density = chip->ampdu_density; ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - if (efuse->hw_cap.nss > 1) { - ht_cap->mcs.rx_mask[0] = 0xFF; - ht_cap->mcs.rx_mask[1] = 0xFF; - ht_cap->mcs.rx_mask[4] = 0x01; - ht_cap->mcs.rx_highest = cpu_to_le16(300); - } else { - ht_cap->mcs.rx_mask[0] = 0xFF; - ht_cap->mcs.rx_mask[1] = 0x00; - ht_cap->mcs.rx_mask[4] = 0x01; - ht_cap->mcs.rx_highest = cpu_to_le16(150); - } + + for (i = 0; i < efuse->hw_cap.nss; i++) + ht_cap->mcs.rx_mask[i] = 0xFF; + ht_cap->mcs.rx_mask[4] = 0x01; + ht_cap->mcs.rx_highest = cpu_to_le16(150 * efuse->hw_cap.nss); } static void rtw_init_vht_cap(struct rtw_dev *rtwdev, struct ieee80211_sta_vht_cap *vht_cap) { struct rtw_efuse *efuse = &rtwdev->efuse; - u16 mcs_map; + u16 mcs_map = 0; __le16 highest; + int i; if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE && efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT) @@ -1621,21 +1621,15 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev, if (rtw_chip_has_rx_ldpc(rtwdev)) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; - mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; - if (efuse->hw_cap.nss > 1) { - highest = cpu_to_le16(780); - mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2; - } else { - highest = cpu_to_le16(390); - mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2; + for (i = 0; i < 8; i++) { + if (i < efuse->hw_cap.nss) + mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); + else + mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); } + highest = cpu_to_le16(390 * efuse->hw_cap.nss); + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.rx_highest = highest; diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 62cd4c5263019..02343e059fd97 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -61,7 +61,7 @@ enum rtw_hci_type { }; struct rtw_hci { - struct rtw_hci_ops *ops; + const struct rtw_hci_ops *ops; enum rtw_hci_type type; u32 rpwm_addr; @@ -166,9 +166,14 @@ enum rtw_rate_section { RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_1S, RTW_RATE_SECTION_VHT_2S, + __RTW_RATE_SECTION_2SS_MAX = RTW_RATE_SECTION_VHT_2S, + RTW_RATE_SECTION_HT_3S, + RTW_RATE_SECTION_HT_4S, + RTW_RATE_SECTION_VHT_3S, + RTW_RATE_SECTION_VHT_4S, /* keep last */ - RTW_RATE_SECTION_MAX, + RTW_RATE_SECTION_NUM, }; enum rtw_wireless_set { @@ -191,6 +196,7 @@ enum rtw_chip_type { RTW_CHIP_TYPE_8703B, RTW_CHIP_TYPE_8821A, RTW_CHIP_TYPE_8812A, + RTW_CHIP_TYPE_8814A, }; enum rtw_tx_queue_type { @@ -380,6 +386,9 @@ enum rtw_evm { RTW_EVM_1SS, RTW_EVM_2SS_A, RTW_EVM_2SS_B, + RTW_EVM_3SS_A, + RTW_EVM_3SS_B, + RTW_EVM_3SS_C, /* keep it last */ RTW_EVM_NUM }; @@ -397,6 +406,10 @@ enum rtw_snr { RTW_SNR_2SS_B, RTW_SNR_2SS_C, RTW_SNR_2SS_D, + RTW_SNR_3SS_A, + RTW_SNR_3SS_B, + RTW_SNR_3SS_C, + RTW_SNR_3SS_D, /* keep it last */ RTW_SNR_NUM }; @@ -822,7 +835,7 @@ struct rtw_vif { }; struct rtw_regulatory { - char alpha2[2]; + char alpha2[2] __nonstring; u8 txpwr_regd_2g; u8 txpwr_regd_5g; }; @@ -1130,14 +1143,26 @@ struct rtw_rfe_def { * For 2G there are cck rate and ofdm rate with different settings. */ struct rtw_pwr_track_tbl { + const u8 *pwrtrk_5gd_n[RTW_PWR_TRK_5G_NUM]; + const u8 *pwrtrk_5gd_p[RTW_PWR_TRK_5G_NUM]; + const u8 *pwrtrk_5gc_n[RTW_PWR_TRK_5G_NUM]; + const u8 *pwrtrk_5gc_p[RTW_PWR_TRK_5G_NUM]; const u8 *pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM]; const u8 *pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM]; const u8 *pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM]; const u8 *pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM]; + const u8 *pwrtrk_2gd_n; + const u8 *pwrtrk_2gd_p; + const u8 *pwrtrk_2gc_n; + const u8 *pwrtrk_2gc_p; const u8 *pwrtrk_2gb_n; const u8 *pwrtrk_2gb_p; const u8 *pwrtrk_2ga_n; const u8 *pwrtrk_2ga_p; + const u8 *pwrtrk_2g_cckd_n; + const u8 *pwrtrk_2g_cckd_p; + const u8 *pwrtrk_2g_cckc_n; + const u8 *pwrtrk_2g_cckc_p; const u8 *pwrtrk_2g_cckb_n; const u8 *pwrtrk_2g_cckb_p; const u8 *pwrtrk_2g_ccka_n; @@ -1227,8 +1252,8 @@ struct rtw_chip_info { const struct rtw_hw_reg *dig; const struct rtw_hw_reg *dig_cck; - u32 rf_base_addr[2]; - u32 rf_sipi_addr[2]; + u32 rf_base_addr[RTW_RF_PATH_MAX]; + u32 rf_sipi_addr[RTW_RF_PATH_MAX]; const struct rtw_rf_sipi_addr *rf_sipi_read_addr; u8 fix_rf_phy_num; const struct rtw_ltecoex_addr *ltecoex_addr; @@ -1924,7 +1949,7 @@ union rtw_sar_cfg { struct rtw_sar { enum rtw_sar_sources src; - union rtw_sar_cfg cfg[RTW_RF_PATH_MAX][RTW_RATE_SECTION_MAX]; + union rtw_sar_cfg cfg[RTW_RF_PATH_MAX][RTW_RATE_SECTION_NUM]; }; struct rtw_hal { @@ -1968,16 +1993,16 @@ struct rtw_hal { s8 tx_pwr_by_rate_offset_5g[RTW_RF_PATH_MAX] [DESC_RATE_MAX]; s8 tx_pwr_by_rate_base_2g[RTW_RF_PATH_MAX] - [RTW_RATE_SECTION_MAX]; + [RTW_RATE_SECTION_NUM]; s8 tx_pwr_by_rate_base_5g[RTW_RF_PATH_MAX] - [RTW_RATE_SECTION_MAX]; + [RTW_RATE_SECTION_NUM]; s8 tx_pwr_limit_2g[RTW_REGD_MAX] [RTW_CHANNEL_WIDTH_MAX] - [RTW_RATE_SECTION_MAX] + [RTW_RATE_SECTION_NUM] [RTW_MAX_CHANNEL_NUM_2G]; s8 tx_pwr_limit_5g[RTW_REGD_MAX] [RTW_CHANNEL_WIDTH_MAX] - [RTW_RATE_SECTION_MAX] + [RTW_RATE_SECTION_NUM] [RTW_MAX_CHANNEL_NUM_5G]; s8 tx_pwr_tbl[RTW_RF_PATH_MAX] [DESC_RATE_MAX]; diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 0ecaefc4c83dd..bb4c4ccb31d41 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -20,7 +20,7 @@ module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644); MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support"); -static u32 rtw_pci_tx_queue_idx_addr[] = { +static const u32 rtw_pci_tx_queue_idx_addr[] = { [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ, [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ, @@ -1591,7 +1591,7 @@ static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev) rtw_pci_io_unmapping(rtwdev, pdev); } -static struct rtw_hci_ops rtw_pci_ops = { +static const struct rtw_hci_ops rtw_pci_ops = { .tx_write = rtw_pci_tx_write, .tx_kick_off = rtw_pci_tx_kick_off, .flush_queues = rtw_pci_flush_queues, diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c index 8ed20c89d2163..55be0d8e0c28d 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.c +++ b/drivers/net/wireless/realtek/rtw88/phy.c @@ -52,60 +52,93 @@ static const u32 db_invert_table[12][8] = { 1995262315, 2511886432U, 3162277660U, 3981071706U} }; -u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M }; -u8 rtw_ofdm_rates[] = { +const u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M }; + +const u8 rtw_ofdm_rates[] = { DESC_RATE6M, DESC_RATE9M, DESC_RATE12M, DESC_RATE18M, DESC_RATE24M, DESC_RATE36M, DESC_RATE48M, DESC_RATE54M }; -u8 rtw_ht_1s_rates[] = { + +const u8 rtw_ht_1s_rates[] = { DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2, DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5, DESC_RATEMCS6, DESC_RATEMCS7 }; -u8 rtw_ht_2s_rates[] = { + +const u8 rtw_ht_2s_rates[] = { DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10, DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13, DESC_RATEMCS14, DESC_RATEMCS15 }; -u8 rtw_vht_1s_rates[] = { + +const u8 rtw_vht_1s_rates[] = { DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1, DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3, DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5, DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7, DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9 }; -u8 rtw_vht_2s_rates[] = { + +const u8 rtw_vht_2s_rates[] = { DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1, DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3, DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5, DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7, DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9 }; -u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = { + +const u8 rtw_ht_3s_rates[] = { + DESC_RATEMCS16, DESC_RATEMCS17, DESC_RATEMCS18, + DESC_RATEMCS19, DESC_RATEMCS20, DESC_RATEMCS21, + DESC_RATEMCS22, DESC_RATEMCS23 +}; + +const u8 rtw_ht_4s_rates[] = { + DESC_RATEMCS24, DESC_RATEMCS25, DESC_RATEMCS26, + DESC_RATEMCS27, DESC_RATEMCS28, DESC_RATEMCS29, + DESC_RATEMCS30, DESC_RATEMCS31 +}; + +const u8 rtw_vht_3s_rates[] = { + DESC_RATEVHT3SS_MCS0, DESC_RATEVHT3SS_MCS1, + DESC_RATEVHT3SS_MCS2, DESC_RATEVHT3SS_MCS3, + DESC_RATEVHT3SS_MCS4, DESC_RATEVHT3SS_MCS5, + DESC_RATEVHT3SS_MCS6, DESC_RATEVHT3SS_MCS7, + DESC_RATEVHT3SS_MCS8, DESC_RATEVHT3SS_MCS9 +}; + +const u8 rtw_vht_4s_rates[] = { + DESC_RATEVHT4SS_MCS0, DESC_RATEVHT4SS_MCS1, + DESC_RATEVHT4SS_MCS2, DESC_RATEVHT4SS_MCS3, + DESC_RATEVHT4SS_MCS4, DESC_RATEVHT4SS_MCS5, + DESC_RATEVHT4SS_MCS6, DESC_RATEVHT4SS_MCS7, + DESC_RATEVHT4SS_MCS8, DESC_RATEVHT4SS_MCS9 +}; + +const u8 * const rtw_rate_section[RTW_RATE_SECTION_NUM] = { rtw_cck_rates, rtw_ofdm_rates, rtw_ht_1s_rates, rtw_ht_2s_rates, - rtw_vht_1s_rates, rtw_vht_2s_rates + rtw_vht_1s_rates, rtw_vht_2s_rates, + rtw_ht_3s_rates, rtw_ht_4s_rates, + rtw_vht_3s_rates, rtw_vht_4s_rates }; EXPORT_SYMBOL(rtw_rate_section); -u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = { +const u8 rtw_rate_size[RTW_RATE_SECTION_NUM] = { ARRAY_SIZE(rtw_cck_rates), ARRAY_SIZE(rtw_ofdm_rates), ARRAY_SIZE(rtw_ht_1s_rates), ARRAY_SIZE(rtw_ht_2s_rates), ARRAY_SIZE(rtw_vht_1s_rates), - ARRAY_SIZE(rtw_vht_2s_rates) + ARRAY_SIZE(rtw_vht_2s_rates), + ARRAY_SIZE(rtw_ht_3s_rates), + ARRAY_SIZE(rtw_ht_4s_rates), + ARRAY_SIZE(rtw_vht_3s_rates), + ARRAY_SIZE(rtw_vht_4s_rates) }; EXPORT_SYMBOL(rtw_rate_size); -static const u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates); -static const u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates); -static const u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates); -static const u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates); -static const u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates); -static const u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates); - enum rtw_phy_band_type { PHY_BAND_2G = 0, PHY_BAND_5G = 1, @@ -1590,7 +1623,7 @@ static void rtw_phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band, ch_idx = rtw_channel_to_idx(band, ch); if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX || - rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) { + rs >= RTW_RATE_SECTION_NUM || ch_idx < 0) { WARN(1, "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n", regd, band, bw, rs, ch_idx, pwr_limit); @@ -1634,11 +1667,15 @@ rtw_xref_5g_txpwr_lmt(struct rtw_dev *rtwdev, u8 regd, static void rtw_xref_txpwr_lmt_by_rs(struct rtw_dev *rtwdev, u8 regd, u8 bw, u8 ch_idx) { + static const u8 rs_cmp[4][2] = { + {RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S}, + {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S}, + {RTW_RATE_SECTION_HT_3S, RTW_RATE_SECTION_VHT_3S}, + {RTW_RATE_SECTION_HT_4S, RTW_RATE_SECTION_VHT_4S} + }; u8 rs_idx, rs_ht, rs_vht; - u8 rs_cmp[2][2] = {{RTW_RATE_SECTION_HT_1S, RTW_RATE_SECTION_VHT_1S}, - {RTW_RATE_SECTION_HT_2S, RTW_RATE_SECTION_VHT_2S} }; - for (rs_idx = 0; rs_idx < 2; rs_idx++) { + for (rs_idx = 0; rs_idx < 4; rs_idx++) { rs_ht = rs_cmp[rs_idx][0]; rs_vht = rs_cmp[rs_idx][1]; @@ -1695,7 +1732,7 @@ rtw_cfg_txpwr_lmt_by_alt(struct rtw_dev *rtwdev, u8 regd, u8 regd_alt) u8 bw, rs; for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) - for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) + for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++) __cfg_txpwr_lmt_by_alt(&rtwdev->hal, regd, regd_alt, bw, rs); } @@ -1959,10 +1996,10 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, u8 rate, u8 group) { const struct rtw_chip_info *chip = rtwdev->chip; - u8 tx_power; - bool mcs_rate; - bool above_2ss; + bool above_2ss, above_3ss, above_4ss; u8 factor = chip->txgi_factor; + bool mcs_rate; + u8 tx_power; if (rate <= DESC_RATE11M) tx_power = pwr_idx_2g->cck_base[group]; @@ -1972,11 +2009,15 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, if (rate >= DESC_RATE6M && rate <= DESC_RATE54M) tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor; - mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || + mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS31) || (rate >= DESC_RATEVHT1SS_MCS0 && - rate <= DESC_RATEVHT2SS_MCS9); - above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || + rate <= DESC_RATEVHT4SS_MCS9); + above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS31) || (rate >= DESC_RATEVHT2SS_MCS0); + above_3ss = (rate >= DESC_RATEMCS16 && rate <= DESC_RATEMCS31) || + (rate >= DESC_RATEVHT3SS_MCS0); + above_4ss = (rate >= DESC_RATEMCS24 && rate <= DESC_RATEMCS31) || + (rate >= DESC_RATEVHT4SS_MCS0); if (!mcs_rate) return tx_power; @@ -1989,11 +2030,19 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev, tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor; if (above_2ss) tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor; + if (above_3ss) + tx_power += pwr_idx_2g->ht_3s_diff.bw20 * factor; + if (above_4ss) + tx_power += pwr_idx_2g->ht_4s_diff.bw20 * factor; break; case RTW_CHANNEL_WIDTH_40: /* bw40 is the base power */ if (above_2ss) tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor; + if (above_3ss) + tx_power += pwr_idx_2g->ht_3s_diff.bw40 * factor; + if (above_4ss) + tx_power += pwr_idx_2g->ht_4s_diff.bw40 * factor; break; } @@ -2006,19 +2055,23 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, u8 rate, u8 group) { const struct rtw_chip_info *chip = rtwdev->chip; - u8 tx_power; + bool above_2ss, above_3ss, above_4ss; + u8 factor = chip->txgi_factor; u8 upper, lower; bool mcs_rate; - bool above_2ss; - u8 factor = chip->txgi_factor; + u8 tx_power; tx_power = pwr_idx_5g->bw40_base[group]; - mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) || + mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS31) || (rate >= DESC_RATEVHT1SS_MCS0 && - rate <= DESC_RATEVHT2SS_MCS9); - above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) || + rate <= DESC_RATEVHT4SS_MCS9); + above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS31) || (rate >= DESC_RATEVHT2SS_MCS0); + above_3ss = (rate >= DESC_RATEMCS16 && rate <= DESC_RATEMCS31) || + (rate >= DESC_RATEVHT3SS_MCS0); + above_4ss = (rate >= DESC_RATEMCS24 && rate <= DESC_RATEMCS31) || + (rate >= DESC_RATEVHT4SS_MCS0); if (!mcs_rate) { tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor; @@ -2033,11 +2086,19 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor; if (above_2ss) tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor; + if (above_3ss) + tx_power += pwr_idx_5g->ht_3s_diff.bw20 * factor; + if (above_4ss) + tx_power += pwr_idx_5g->ht_4s_diff.bw20 * factor; break; case RTW_CHANNEL_WIDTH_40: /* bw40 is the base power */ if (above_2ss) tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor; + if (above_3ss) + tx_power += pwr_idx_5g->ht_3s_diff.bw40 * factor; + if (above_4ss) + tx_power += pwr_idx_5g->ht_4s_diff.bw40 * factor; break; case RTW_CHANNEL_WIDTH_80: /* the base idx of bw80 is the average of bw40+/bw40- */ @@ -2048,13 +2109,17 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev, tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor; if (above_2ss) tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor; + if (above_3ss) + tx_power += pwr_idx_5g->vht_3s_diff.bw80 * factor; + if (above_4ss) + tx_power += pwr_idx_5g->vht_4s_diff.bw80 * factor; break; } return tx_power; } -/* return RTW_RATE_SECTION_MAX to indicate rate is invalid */ +/* return RTW_RATE_SECTION_NUM to indicate rate is invalid */ static u8 rtw_phy_rate_to_rate_section(u8 rate) { if (rate >= DESC_RATE1M && rate <= DESC_RATE11M) @@ -2065,12 +2130,20 @@ static u8 rtw_phy_rate_to_rate_section(u8 rate) return RTW_RATE_SECTION_HT_1S; else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) return RTW_RATE_SECTION_HT_2S; + else if (rate >= DESC_RATEMCS16 && rate <= DESC_RATEMCS23) + return RTW_RATE_SECTION_HT_3S; + else if (rate >= DESC_RATEMCS24 && rate <= DESC_RATEMCS31) + return RTW_RATE_SECTION_HT_4S; else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9) return RTW_RATE_SECTION_VHT_1S; else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9) return RTW_RATE_SECTION_VHT_2S; + else if (rate >= DESC_RATEVHT3SS_MCS0 && rate <= DESC_RATEVHT3SS_MCS9) + return RTW_RATE_SECTION_VHT_3S; + else if (rate >= DESC_RATEVHT4SS_MCS0 && rate <= DESC_RATEVHT4SS_MCS9) + return RTW_RATE_SECTION_VHT_4S; else - return RTW_RATE_SECTION_MAX; + return RTW_RATE_SECTION_NUM; } static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, @@ -2088,7 +2161,7 @@ static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, if (regd > RTW_REGD_WW) return power_limit; - if (rs == RTW_RATE_SECTION_MAX) + if (rs == RTW_RATE_SECTION_NUM) goto err; /* only 20M BW with cck and ofdm */ @@ -2096,7 +2169,7 @@ static s8 rtw_phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band, bw = RTW_CHANNEL_WIDTH_20; /* only 20/40M BW with ht */ - if (rs == RTW_RATE_SECTION_HT_1S || rs == RTW_RATE_SECTION_HT_2S) + if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS31) bw = min_t(u8, bw, RTW_CHANNEL_WIDTH_40); /* select min power limit among [20M BW ~ current BW] */ @@ -2132,7 +2205,7 @@ static s8 rtw_phy_get_tx_power_sar(struct rtw_dev *rtwdev, u8 sar_band, .rs = rs, }; - if (rs == RTW_RATE_SECTION_MAX) + if (rs == RTW_RATE_SECTION_NUM) goto err; return rtw_query_sar(rtwdev, &arg); @@ -2214,14 +2287,14 @@ static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev, { struct rtw_hal *hal = &rtwdev->hal; u8 regd = rtw_regd_get(rtwdev); - u8 *rates; + const u8 *rates; u8 size; u8 rate; u8 pwr_idx; u8 bw; int i; - if (rs >= RTW_RATE_SECTION_MAX) + if (rs >= RTW_RATE_SECTION_NUM) return; rates = rtw_rate_section[rs]; @@ -2252,7 +2325,7 @@ static void rtw_phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, else rs = RTW_RATE_SECTION_OFDM; - for (; rs < RTW_RATE_SECTION_MAX; rs++) + for (; rs < RTW_RATE_SECTION_NUM; rs++) rtw_phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs); } @@ -2274,13 +2347,13 @@ EXPORT_SYMBOL(rtw_phy_set_tx_power_level); static void rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path, - u8 rs, u8 size, u8 *rates) + u8 rs, u8 size, const u8 *rates) { u8 rate; u8 base_idx, rate_idx; s8 base_2g, base_5g; - if (rs >= RTW_RATE_SECTION_VHT_1S) + if (size == 10) /* VHT rates */ base_idx = rates[size - 3]; else base_idx = rates[size - 1]; @@ -2297,28 +2370,12 @@ rtw_phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path, void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal) { - u8 path; + u8 path, rs; - for (path = 0; path < RTW_RF_PATH_MAX; path++) { - rtw_phy_tx_power_by_rate_config_by_path(hal, path, - RTW_RATE_SECTION_CCK, - rtw_cck_size, rtw_cck_rates); - rtw_phy_tx_power_by_rate_config_by_path(hal, path, - RTW_RATE_SECTION_OFDM, - rtw_ofdm_size, rtw_ofdm_rates); - rtw_phy_tx_power_by_rate_config_by_path(hal, path, - RTW_RATE_SECTION_HT_1S, - rtw_ht_1s_size, rtw_ht_1s_rates); - rtw_phy_tx_power_by_rate_config_by_path(hal, path, - RTW_RATE_SECTION_HT_2S, - rtw_ht_2s_size, rtw_ht_2s_rates); - rtw_phy_tx_power_by_rate_config_by_path(hal, path, - RTW_RATE_SECTION_VHT_1S, - rtw_vht_1s_size, rtw_vht_1s_rates); - rtw_phy_tx_power_by_rate_config_by_path(hal, path, - RTW_RATE_SECTION_VHT_2S, - rtw_vht_2s_size, rtw_vht_2s_rates); - } + for (path = 0; path < RTW_RF_PATH_MAX; path++) + for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++) + rtw_phy_tx_power_by_rate_config_by_path(hal, path, rs, + rtw_rate_size[rs], rtw_rate_section[rs]); } static void @@ -2347,7 +2404,7 @@ void rtw_phy_tx_power_limit_config(struct rtw_hal *hal) for (regd = 0; regd < RTW_REGD_MAX; regd++) for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) - for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) + for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++) __rtw_phy_tx_power_limit_config(hal, regd, bw, rs); } @@ -2383,7 +2440,7 @@ void rtw_phy_init_tx_power(struct rtw_dev *rtwdev) /* init tx power limit */ for (regd = 0; regd < RTW_REGD_MAX; regd++) for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++) - for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) + for (rs = 0; rs < RTW_RATE_SECTION_NUM; rs++) rtw_phy_init_tx_power_limit(rtwdev, regd, bw, rs); } @@ -2401,32 +2458,56 @@ void rtw_phy_config_swing_table(struct rtw_dev *rtwdev, swing_table->n[RF_PATH_A] = tbl->pwrtrk_2g_ccka_n; swing_table->p[RF_PATH_B] = tbl->pwrtrk_2g_cckb_p; swing_table->n[RF_PATH_B] = tbl->pwrtrk_2g_cckb_n; + swing_table->p[RF_PATH_C] = tbl->pwrtrk_2g_cckc_p; + swing_table->n[RF_PATH_C] = tbl->pwrtrk_2g_cckc_n; + swing_table->p[RF_PATH_D] = tbl->pwrtrk_2g_cckd_p; + swing_table->n[RF_PATH_D] = tbl->pwrtrk_2g_cckd_n; } else { swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p; swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n; swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p; swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n; + swing_table->p[RF_PATH_C] = tbl->pwrtrk_2gc_p; + swing_table->n[RF_PATH_C] = tbl->pwrtrk_2gc_n; + swing_table->p[RF_PATH_D] = tbl->pwrtrk_2gd_p; + swing_table->n[RF_PATH_D] = tbl->pwrtrk_2gd_n; } } else if (IS_CH_5G_BAND_1(channel) || IS_CH_5G_BAND_2(channel)) { swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_1]; swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_1]; swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_1]; swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_1]; + swing_table->p[RF_PATH_C] = tbl->pwrtrk_5gc_p[RTW_PWR_TRK_5G_1]; + swing_table->n[RF_PATH_C] = tbl->pwrtrk_5gc_n[RTW_PWR_TRK_5G_1]; + swing_table->p[RF_PATH_D] = tbl->pwrtrk_5gd_p[RTW_PWR_TRK_5G_1]; + swing_table->n[RF_PATH_D] = tbl->pwrtrk_5gd_n[RTW_PWR_TRK_5G_1]; } else if (IS_CH_5G_BAND_3(channel)) { swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_2]; swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_2]; swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_2]; swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_2]; + swing_table->p[RF_PATH_C] = tbl->pwrtrk_5gc_p[RTW_PWR_TRK_5G_2]; + swing_table->n[RF_PATH_C] = tbl->pwrtrk_5gc_n[RTW_PWR_TRK_5G_2]; + swing_table->p[RF_PATH_D] = tbl->pwrtrk_5gd_p[RTW_PWR_TRK_5G_2]; + swing_table->n[RF_PATH_D] = tbl->pwrtrk_5gd_n[RTW_PWR_TRK_5G_2]; } else if (IS_CH_5G_BAND_4(channel)) { swing_table->p[RF_PATH_A] = tbl->pwrtrk_5ga_p[RTW_PWR_TRK_5G_3]; swing_table->n[RF_PATH_A] = tbl->pwrtrk_5ga_n[RTW_PWR_TRK_5G_3]; swing_table->p[RF_PATH_B] = tbl->pwrtrk_5gb_p[RTW_PWR_TRK_5G_3]; swing_table->n[RF_PATH_B] = tbl->pwrtrk_5gb_n[RTW_PWR_TRK_5G_3]; + swing_table->p[RF_PATH_C] = tbl->pwrtrk_5gc_p[RTW_PWR_TRK_5G_3]; + swing_table->n[RF_PATH_C] = tbl->pwrtrk_5gc_n[RTW_PWR_TRK_5G_3]; + swing_table->p[RF_PATH_D] = tbl->pwrtrk_5gd_p[RTW_PWR_TRK_5G_3]; + swing_table->n[RF_PATH_D] = tbl->pwrtrk_5gd_n[RTW_PWR_TRK_5G_3]; } else { swing_table->p[RF_PATH_A] = tbl->pwrtrk_2ga_p; swing_table->n[RF_PATH_A] = tbl->pwrtrk_2ga_n; swing_table->p[RF_PATH_B] = tbl->pwrtrk_2gb_p; swing_table->n[RF_PATH_B] = tbl->pwrtrk_2gb_n; + swing_table->p[RF_PATH_C] = tbl->pwrtrk_2gc_p; + swing_table->n[RF_PATH_C] = tbl->pwrtrk_2gc_n; + swing_table->p[RF_PATH_D] = tbl->pwrtrk_2gd_p; + swing_table->n[RF_PATH_D] = tbl->pwrtrk_2gd_n; } } EXPORT_SYMBOL(rtw_phy_config_swing_table); diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h index ccfcbd3ced03a..c9e6b869661d6 100644 --- a/drivers/net/wireless/realtek/rtw88/phy.h +++ b/drivers/net/wireless/realtek/rtw88/phy.h @@ -7,14 +7,18 @@ #include "debug.h" -extern u8 rtw_cck_rates[]; -extern u8 rtw_ofdm_rates[]; -extern u8 rtw_ht_1s_rates[]; -extern u8 rtw_ht_2s_rates[]; -extern u8 rtw_vht_1s_rates[]; -extern u8 rtw_vht_2s_rates[]; -extern u8 *rtw_rate_section[]; -extern u8 rtw_rate_size[]; +extern const u8 rtw_cck_rates[]; +extern const u8 rtw_ofdm_rates[]; +extern const u8 rtw_ht_1s_rates[]; +extern const u8 rtw_ht_2s_rates[]; +extern const u8 rtw_vht_1s_rates[]; +extern const u8 rtw_vht_2s_rates[]; +extern const u8 rtw_ht_3s_rates[]; +extern const u8 rtw_ht_4s_rates[]; +extern const u8 rtw_vht_3s_rates[]; +extern const u8 rtw_vht_4s_rates[]; +extern const u8 * const rtw_rate_section[]; +extern const u8 rtw_rate_size[]; void rtw_phy_init(struct rtw_dev *rtwdev); void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev); diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index e438405fba566..08e9494977e06 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -8,6 +8,7 @@ #define REG_SYS_FUNC_EN 0x0002 #define BIT_FEN_EN_25_1 BIT(13) #define BIT_FEN_ELDR BIT(12) +#define BIT_FEN_PCIEA BIT(6) #define BIT_FEN_CPUEN BIT(2) #define BIT_FEN_USBA BIT(2) #define BIT_FEN_BB_GLB_RST BIT(1) @@ -39,6 +40,9 @@ #define BIT_RF_RSTB BIT(1) #define BIT_RF_EN BIT(0) +#define REG_RF_CTRL1 0x0020 +#define REG_RF_CTRL2 0x0021 + #define REG_AFE_CTRL1 0x0024 #define BIT_MAC_CLK_SEL (BIT(20) | BIT(21)) #define REG_EFUSE_CTRL 0x0030 @@ -73,6 +77,8 @@ #define BIT_BT_PTA_EN BIT(5) #define BIT_WLRFE_4_5_EN BIT(2) +#define REG_GPIO_PIN_CTRL 0x0044 + #define REG_LED_CFG 0x004C #define BIT_LNAON_SEL_EN BIT(26) #define BIT_PAPE_SEL_EN BIT(25) @@ -110,6 +116,7 @@ #define BIT_SDIO_PAD_E5 BIT(18) #define REG_RF_B_CTRL 0x76 +#define REG_RF_CTRL3 0x0076 #define REG_AFE_CTRL_4 0x0078 #define BIT_CK320M_AFE_EN BIT(4) @@ -130,6 +137,7 @@ #define BIT_SHIFT_ROM_PGE 16 #define BIT_FW_INIT_RDY BIT(15) #define BIT_FW_DW_RDY BIT(14) +#define BIT_CPU_CLK_SEL (BIT(12) | BIT(13)) #define BIT_RPWM_TOGGLE BIT(7) #define BIT_RAM_DL_SEL BIT(7) /* legacy only */ #define BIT_DMEM_CHKSUM_OK BIT(6) @@ -147,7 +155,7 @@ BIT_CHECK_SUM_OK) #define FW_READY_LEGACY (BIT_MCUFWDL_RDY | BIT_FWDL_CHK_RPT | \ BIT_WINTINI_RDY | BIT_RAM_DL_SEL) -#define FW_READY_MASK 0xffff +#define FW_READY_MASK (0xffff & ~BIT_CPU_CLK_SEL) #define REG_MCU_TST_CFG 0x84 #define VAL_FW_TRIGGER 0x1 @@ -602,15 +610,25 @@ #define REG_CCA2ND 0x0838 #define REG_L1PKTH 0x0848 #define REG_CLKTRK 0x0860 +#define REG_CSI_MASK_SETTING1 0x0874 +#define REG_NBI_SETTING 0x087c +#define BIT_NBI_ENABLE BIT(13) +#define REG_CSI_FIX_MASK0 0x0880 +#define REG_CSI_FIX_MASK1 0x0884 +#define REG_CSI_FIX_MASK6 0x0898 +#define REG_CSI_FIX_MASK7 0x089c #define REG_ADCCLK 0x08AC #define REG_HSSI_READ 0x08B0 #define REG_FPGA0_XCD_RF_PARA 0x08B4 #define REG_RX_MCS_LIMIT 0x08BC #define REG_ADC160 0x08C4 +#define REG_DBGSEL 0x08fc #define REG_ANTSEL_SW 0x0900 #define REG_DAC_RSTB 0x090c +#define REG_PSD 0x0910 +#define BIT_PSD_INI GENMASK(23, 22) #define REG_SINGLE_TONE_CONT_TX 0x0914 - +#define REG_AGC_TABLE 0x0958 #define REG_RFE_CTRL_E 0x0974 #define REG_2ND_CCA_CTRL 0x0976 #define REG_IQK_COM00 0x0978 @@ -620,10 +638,18 @@ #define REG_FAS 0x09a4 #define REG_RXSB 0x0a00 +#define BIT_RXSB_ANA_DIV BIT(15) #define REG_CCK_RX 0x0a04 #define REG_CCK_PD_TH 0x0a0a - -#define REG_CCK0_FAREPORT 0xa2c +#define REG_PRECTRL 0x0a14 +#define BIT_DIS_CO_PATHSEL BIT(7) +#define BIT_IQ_WGT GENMASK(9, 8) +#define REG_CCA_MF 0x0a20 +#define BIT_MBC_WIN GENMASK(5, 4) +#define REG_CCK0_TX_FILTER1 0x0a20 +#define REG_CCK0_TX_FILTER2 0x0a24 +#define REG_CCK0_DEBUG_PORT 0x0a28 +#define REG_CCK0_FAREPORT 0x0a2c #define BIT_CCK0_2RX BIT(18) #define BIT_CCK0_MRC BIT(22) #define REG_FA_CCK 0x0a5c @@ -642,10 +668,18 @@ #define DIS_DPD_RATEVHT2SS_MCS1 BIT(9) #define DIS_DPD_RATEALL GENMASK(9, 0) +#define REG_CCA 0x0a70 +#define BIT_CCA_CO BIT(7) +#define REG_ANTSEL 0x0a74 +#define BIT_ANT_BYCO BIT(8) +#define REG_CCKTX 0x0a84 +#define BIT_CMB_CCA_2R BIT(28) + #define REG_CNTRST 0x0b58 #define REG_3WIRE_SWA 0x0c00 #define REG_RX_IQC_AB_A 0x0c10 +#define REG_RX_IQC_CD_A 0x0c14 #define REG_TXSCALE_A 0x0c1c #define BB_SWING_MASK GENMASK(31, 21) #define REG_TX_AGC_A_CCK_11_CCK_1 0xc20 @@ -673,7 +707,7 @@ #define REG_LSSI_WRITE_A 0x0c90 #define REG_PREDISTA 0x0c90 #define REG_TXAGCIDX 0x0c94 - +#define REG_TX_AGC_A 0x0c94 #define REG_RFE_PINMUX_A 0x0cb0 #define REG_RFE_INV_A 0x0cb4 #define REG_RFE_CTRL8 0x0cb4 @@ -682,6 +716,7 @@ #define DPDT_CTRL_PIN 0x77 #define RFE_INV_MASK 0x3ff00000 #define REG_RFECTL_A 0x0cb8 +#define REG_RFE_INV0 0x0cbc #define REG_RFE_INV8 0x0cbd #define BIT_MASK_RFE_INV89 GENMASK(1, 0) #define REG_RFE_INV16 0x0cbe @@ -702,6 +737,7 @@ #define REG_3WIRE_SWB 0x0e00 #define REG_RX_IQC_AB_B 0x0e10 +#define REG_RX_IQC_CD_B 0x0e14 #define REG_TXSCALE_B 0x0e1c #define REG_TX_AGC_B_CCK_11_CCK_1 0xe20 #define REG_TX_AGC_B_OFDM18_OFDM6 0xe24 @@ -728,6 +764,7 @@ #define REG_LSSI_WRITE_B 0x0e90 #define REG_PREDISTB 0x0e90 #define REG_INIDLYB 0x0e94 +#define REG_TX_AGC_B 0x0e94 #define REG_RFE_PINMUX_B 0x0eb0 #define REG_RFE_INV_B 0x0eb4 #define REG_RFECTL_B 0x0eb8 @@ -743,8 +780,11 @@ #define REG_CRC_HT 0x0f10 #define REG_CRC_OFDM 0x0f14 #define REG_FA_OFDM 0x0f48 +#define REG_DBGRPT 0x0fa0 #define REG_CCA_CCK 0x0fcc +#define REG_SYS_CFG3_8814A 0x1000 + #define REG_ANAPARSW_MAC_0 0x1010 #define BIT_CF_L_V2 GENMASK(29, 28) @@ -862,9 +902,27 @@ #define LTECOEX_WRITE_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1 #define LTECOEX_READ_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1 +#define REG_RX_IQC_AB_C 0x1810 +#define REG_RX_IQC_CD_C 0x1814 +#define REG_TXSCALE_C 0x181c +#define REG_CK_MONHC 0x185c +#define REG_AFE_PWR1_C 0x1860 #define REG_IGN_GNT_BT1 0x1860 +#define REG_TX_AGC_C 0x1894 +#define REG_RFE_PINMUX_C 0x18b4 #define REG_RFESEL_CTRL 0x1990 +#define REG_AGC_TBL 0x1998 + +#define REG_RX_IQC_AB_D 0x1a10 +#define REG_RX_IQC_CD_D 0x1a14 +#define REG_TXSCALE_D 0x1a1c +#define REG_CK_MONHD 0x1a5c +#define REG_AFE_PWR1_D 0x1a60 +#define REG_TX_AGC_D 0x1a94 +#define REG_RFE_PINMUX_D 0x1ab4 +#define REG_RFE_INVSEL_D 0x1abc +#define BIT_RFE_SELSW0_D GENMASK(27, 20) #define REG_NOMASK_TXBT 0x1ca7 #define REG_ANAPAR 0x1c30 @@ -905,6 +963,7 @@ #define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8)) #define RF18_CHANNEL_MASK (MASKBYTE0) #define RF18_RFSI_MASK (BIT(18) | BIT(17)) +#define RF_RCK1_V1 0x1c #define RF_RCK 0x1d #define RF_MODE_TABLE_ADDR 0x30 #define RF_MODE_TABLE_DATA0 0x31 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index eeca31bf71f16..87715bd54860a 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -444,7 +444,7 @@ static u8 rtw8723d_iqk_check_tx_failed(struct rtw_dev *rtwdev, rtw_read32(rtwdev, REG_IQK_RES_TX), rtw_read32(rtwdev, REG_IQK_RES_TY)); rtw_dbg(rtwdev, RTW_DBG_RFK, - "[IQK] 0xe90(before IQK)= 0x%x, 0xe98(afer IQK) = 0x%x\n", + "[IQK] 0xe90(before IQK)= 0x%x, 0xe98(after IQK) = 0x%x\n", rtw_read32(rtwdev, 0xe90), rtw_read32(rtwdev, 0xe98)); @@ -472,7 +472,7 @@ static u8 rtw8723d_iqk_check_rx_failed(struct rtw_dev *rtwdev, rtw_read32(rtwdev, REG_IQK_RES_RY)); rtw_dbg(rtwdev, RTW_DBG_RFK, - "[IQK] 0xea0(before IQK)= 0x%x, 0xea8(afer IQK) = 0x%x\n", + "[IQK] 0xea0(before IQK)= 0x%x, 0xea8(after IQK) = 0x%x\n", rtw_read32(rtwdev, 0xea0), rtw_read32(rtwdev, 0xea8)); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.c b/drivers/net/wireless/realtek/rtw88/rtw8814a.c new file mode 100644 index 0000000000000..cfd35d40d46e2 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.c @@ -0,0 +1,2257 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2025 Realtek Corporation + */ + +#include +#include "main.h" +#include "coex.h" +#include "tx.h" +#include "phy.h" +#include "rtw8814a.h" +#include "rtw8814a_table.h" +#include "rtw88xxa.h" +#include "reg.h" +#include "debug.h" +#include "efuse.h" +#include "regd.h" +#include "usb.h" + +static void rtw8814a_efuse_grant(struct rtw_dev *rtwdev, bool on) +{ + if (on) { + rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON); + + rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR); + rtw_write16_set(rtwdev, REG_SYS_CLKR, + BIT_LOADER_CLK_EN | BIT_ANA8M); + } else { + rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF); + } +} + +static void rtw8814a_read_rfe_type(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + + if (!(efuse->rfe_option & BIT(7))) + return; + + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) + efuse->rfe_option = 0; + else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) + efuse->rfe_option = 1; +} + +static void rtw8814a_read_amplifier_type(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + + switch (efuse->rfe_option) { + case 1: + /* Internal 2G */ + efuse->pa_type_2g = 0; + efuse->lna_type_2g = 0; + /* External 5G */ + efuse->pa_type_5g = BIT(0); + efuse->lna_type_5g = BIT(3); + break; + case 2 ... 5: + /* External everything */ + efuse->pa_type_2g = BIT(4); + efuse->lna_type_2g = BIT(3); + efuse->pa_type_5g = BIT(0); + efuse->lna_type_5g = BIT(3); + break; + case 6: + efuse->lna_type_5g = BIT(3); + break; + default: + break; + } +} + +static void rtw8814a_read_rf_type(struct rtw_dev *rtwdev, + struct rtw8814a_efuse *map) +{ + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); + struct rtw_hal *hal = &rtwdev->hal; + + switch (map->trx_antenna_option) { + case 0xff: /* 4T4R */ + case 0xee: /* 3T3R */ + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && + rtwusb->udev->speed != USB_SPEED_SUPER) + hal->rf_type = RF_2T2R; + else + hal->rf_type = RF_3T3R; + + break; + case 0x66: /* 2T2R */ + case 0x6f: /* 2T4R */ + default: + hal->rf_type = RF_2T2R; + break; + } + + hal->rf_path_num = 4; + hal->rf_phy_num = 4; + + if (hal->rf_type == RF_3T3R) { + hal->antenna_rx = BB_PATH_ABC; + hal->antenna_tx = BB_PATH_ABC; + } else { + hal->antenna_rx = BB_PATH_AB; + hal->antenna_tx = BB_PATH_AB; + } +} + +static void rtw8814a_init_hwcap(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_hal *hal = &rtwdev->hal; + + efuse->hw_cap.bw = BIT(RTW_CHANNEL_WIDTH_20) | + BIT(RTW_CHANNEL_WIDTH_40) | + BIT(RTW_CHANNEL_WIDTH_80); + efuse->hw_cap.ptcl = EFUSE_HW_CAP_PTCL_VHT; + + if (hal->rf_type == RF_3T3R) + efuse->hw_cap.nss = 3; + else + efuse->hw_cap.nss = 2; + + rtw_dbg(rtwdev, RTW_DBG_EFUSE, + "hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n", + efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl, + efuse->hw_cap.ant_num, efuse->hw_cap.nss); +} + +static int rtw8814a_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw8814a_efuse *map; + int i; + + if (rtw_dbg_is_enabled(rtwdev, RTW_DBG_EFUSE)) + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, + log_map, rtwdev->chip->log_efuse_size, true); + + map = (struct rtw8814a_efuse *)log_map; + + efuse->usb_mode_switch = u8_get_bits(map->usb_mode, BIT(4)); + efuse->rfe_option = map->rfe_option; + efuse->rf_board_option = map->rf_board_option; + efuse->crystal_cap = map->xtal_k; + efuse->channel_plan = map->channel_plan; + efuse->country_code[0] = map->country_code[0]; + efuse->country_code[1] = map->country_code[1]; + efuse->bt_setting = map->rf_bt_setting; + efuse->regd = map->rf_board_option & 0x7; + efuse->thermal_meter[RF_PATH_A] = map->thermal_meter; + efuse->thermal_meter_k = map->thermal_meter; + efuse->tx_bb_swing_setting_2g = map->tx_bb_swing_setting_2g; + efuse->tx_bb_swing_setting_5g = map->tx_bb_swing_setting_5g; + + rtw8814a_read_rfe_type(rtwdev); + rtw8814a_read_amplifier_type(rtwdev); + + /* Override rtw_chip_parameter_setup() */ + rtw8814a_read_rf_type(rtwdev, map); + + rtw8814a_init_hwcap(rtwdev); + + for (i = 0; i < 4; i++) + efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i]; + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_USB: + ether_addr_copy(efuse->addr, map->u.mac_addr); + break; + case RTW_HCI_TYPE_PCIE: + ether_addr_copy(efuse->addr, map->e.mac_addr); + break; + case RTW_HCI_TYPE_SDIO: + default: + /* unsupported now */ + return -EOPNOTSUPP; + } + + return 0; +} + +static void rtw8814a_init_rfe_reg(struct rtw_dev *rtwdev) +{ + u8 rfe_option = rtwdev->efuse.rfe_option; + + if (rfe_option == 2 || rfe_option == 1) { + rtw_write32_mask(rtwdev, 0x1994, 0xf, 0xf); + rtw_write8_set(rtwdev, REG_GPIO_MUXCFG + 2, 0xf0); + } else if (rfe_option == 0) { + rtw_write32_mask(rtwdev, 0x1994, 0xf, 0xf); + rtw_write8_set(rtwdev, REG_GPIO_MUXCFG + 2, 0xc0); + } +} + +#define RTW_TXSCALE_SIZE 37 +static const u32 rtw8814a_txscale_tbl[RTW_TXSCALE_SIZE] = { + 0x081, 0x088, 0x090, 0x099, 0x0a2, 0x0ac, 0x0b6, 0x0c0, 0x0cc, 0x0d8, + 0x0e5, 0x0f2, 0x101, 0x110, 0x120, 0x131, 0x143, 0x156, 0x16a, 0x180, + 0x197, 0x1af, 0x1c8, 0x1e3, 0x200, 0x21e, 0x23e, 0x261, 0x285, 0x2ab, + 0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe +}; + +static u32 rtw8814a_get_bb_swing(struct rtw_dev *rtwdev, u8 band, u8 rf_path) +{ + static const u32 swing2setting[4] = {0x200, 0x16a, 0x101, 0x0b6}; + struct rtw_efuse *efuse = &rtwdev->efuse; + u8 tx_bb_swing; + + if (band == RTW_BAND_2G) + tx_bb_swing = efuse->tx_bb_swing_setting_2g; + else + tx_bb_swing = efuse->tx_bb_swing_setting_5g; + + tx_bb_swing >>= 2 * rf_path; + tx_bb_swing &= 0x3; + + return swing2setting[tx_bb_swing]; +} + +static u8 rtw8814a_get_swing_index(struct rtw_dev *rtwdev) +{ + u32 swing, table_value; + u8 i; + + swing = rtw8814a_get_bb_swing(rtwdev, rtwdev->hal.current_band_type, + RF_PATH_A); + + for (i = 0; i < ARRAY_SIZE(rtw8814a_txscale_tbl); i++) { + table_value = rtw8814a_txscale_tbl[i]; + if (swing == table_value) + return i; + } + + return 24; +} + +static void rtw8814a_pwrtrack_init(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 path; + + dm_info->default_ofdm_index = rtw8814a_get_swing_index(rtwdev); + + for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) { + ewma_thermal_init(&dm_info->avg_thermal[path]); + dm_info->delta_power_index[path] = 0; + dm_info->delta_power_index_last[path] = 0; + } + dm_info->pwr_trk_triggered = false; + dm_info->pwr_trk_init_trigger = true; + dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k; +} + +static void rtw8814a_config_trx_path(struct rtw_dev *rtwdev) +{ + /* RX CCK disable 2R CCA */ + rtw_write32_clr(rtwdev, REG_CCK0_FAREPORT, + BIT_CCK0_2RX | BIT_CCK0_MRC); + /* pathB tx on, path A/C/D tx off */ + rtw_write32_mask(rtwdev, REG_CCK_RX, 0xf0000000, 0x4); + /* pathB rx */ + rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0x5); +} + +static void rtw8814a_config_cck_rx_antenna_init(struct rtw_dev *rtwdev) +{ + /* CCK 2R CCA parameters */ + + /* Disable Ant diversity */ + rtw_write32_mask(rtwdev, REG_RXSB, BIT_RXSB_ANA_DIV, 0x0); + /* Concurrent CCA at LSB & USB */ + rtw_write32_mask(rtwdev, REG_CCA, BIT_CCA_CO, 0); + /* RX path diversity enable */ + rtw_write32_mask(rtwdev, REG_ANTSEL, BIT_ANT_BYCO, 0); + /* r_en_mrc_antsel */ + rtw_write32_mask(rtwdev, REG_PRECTRL, BIT_DIS_CO_PATHSEL, 0); + /* MBC weighting */ + rtw_write32_mask(rtwdev, REG_CCA_MF, BIT_MBC_WIN, 1); + /* 2R CCA only */ + rtw_write32_mask(rtwdev, REG_CCKTX, BIT_CMB_CCA_2R, 1); +} + +static void rtw8814a_phy_set_param(struct rtw_dev *rtwdev) +{ + u32 crystal_cap, val32; + u8 val8, rf_path; + + /* power on BB/RF domain */ + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) + rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_USBA); + else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) + rtw_write8_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_PCIEA); + + rtw_write8_set(rtwdev, REG_SYS_CFG3_8814A + 2, + BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB); + + /* Power on RF paths A..D */ + val8 = BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB; + rtw_write8(rtwdev, REG_RF_CTRL, val8); + rtw_write8(rtwdev, REG_RF_CTRL1, val8); + rtw_write8(rtwdev, REG_RF_CTRL2, val8); + rtw_write8(rtwdev, REG_RF_CTRL3, val8); + + rtw_load_table(rtwdev, rtwdev->chip->bb_tbl); + rtw_load_table(rtwdev, rtwdev->chip->agc_tbl); + + crystal_cap = rtwdev->efuse.crystal_cap & 0x3F; + crystal_cap |= crystal_cap << 6; + rtw_write32_mask(rtwdev, REG_AFE_CTRL3, 0x07ff8000, crystal_cap); + + rtw8814a_config_trx_path(rtwdev); + + for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) + rtw_load_table(rtwdev, rtwdev->chip->rf_tbl[rf_path]); + + val32 = rtw_read_rf(rtwdev, RF_PATH_A, RF_RCK1_V1, RFREG_MASK); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RCK1_V1, RFREG_MASK, val32); + rtw_write_rf(rtwdev, RF_PATH_C, RF_RCK1_V1, RFREG_MASK, val32); + rtw_write_rf(rtwdev, RF_PATH_D, RF_RCK1_V1, RFREG_MASK, val32); + + rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST); + + rtw_write8(rtwdev, REG_HWSEQ_CTRL, 0xFF); + + rtw_write32(rtwdev, REG_BAR_MODE_CTRL, 0x0201ffff); + + rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA); + + rtw_write8(rtwdev, REG_NAV_CTRL + 2, 0); + + rtw_write8_clr(rtwdev, REG_GPIO_MUXCFG, BIT(5)); + + rtw8814a_config_cck_rx_antenna_init(rtwdev); + + rtw_phy_init(rtwdev); + rtw8814a_pwrtrack_init(rtwdev); + + rtw8814a_init_rfe_reg(rtwdev); + + rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT(3)); + + rtw_write8(rtwdev, REG_NAV_CTRL + 2, 235); + + /* enable Tx report. */ + rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, 0x1F); + + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) { + /* Reset USB mode switch setting */ + rtw_write8(rtwdev, REG_SYS_SDIO_CTRL, 0x0); + rtw_write8(rtwdev, REG_ACLK_MON, 0x0); + } +} + +static void rtw8814ae_enable_rf_1_2v(struct rtw_dev *rtwdev) +{ + /* This is for fullsize card, because GPIO7 there is floating. + * We should pull GPIO7 high to enable RF 1.2V Switch Power Supply + */ + + /* 1. set 0x40[1:0] to 0, BIT_GPIOSEL=0, select pin as GPIO */ + rtw_write8_clr(rtwdev, REG_GPIO_MUXCFG, BIT(1) | BIT(0)); + + /* 2. set 0x44[31] to 0 + * mode=0: data port; + * mode=1 and BIT_GPIO_IO_SEL=0: interrupt mode; + */ + rtw_write8_clr(rtwdev, REG_GPIO_PIN_CTRL + 3, BIT(7)); + + /* 3. data mode + * 3.1 set 0x44[23] to 1 + * sel=0: input; + * sel=1: output; + */ + rtw_write8_set(rtwdev, REG_GPIO_PIN_CTRL + 2, BIT(7)); + + /* 3.2 set 0x44[15] to 1 + * output high value; + */ + rtw_write8_set(rtwdev, REG_GPIO_PIN_CTRL + 1, BIT(7)); +} + +static int rtw8814a_mac_init(struct rtw_dev *rtwdev) +{ + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); + + rtw_write16(rtwdev, REG_CR, + MAC_TRX_ENABLE | BIT_MAC_SEC_EN | BIT_32K_CAL_TMR_EN); + + rtw_load_table(rtwdev, rtwdev->chip->mac_tbl); + + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) + rtw_write8(rtwdev, REG_AUTO_LLT_V1 + 3, + rtwdev->chip->usb_tx_agg_desc_num << 1); + + rtw_write32(rtwdev, REG_HIMR0, 0); + rtw_write32(rtwdev, REG_HIMR1, 0); + + rtw_write32_mask(rtwdev, REG_RRSR, 0xfffff, 0xfffff); + + rtw_write16(rtwdev, REG_RETRY_LIMIT, 0x3030); + + rtw_write16(rtwdev, REG_RXFLTMAP0, 0xffff); + rtw_write16(rtwdev, REG_RXFLTMAP1, 0x0400); + rtw_write16(rtwdev, REG_RXFLTMAP2, 0xffff); + + rtw_write8(rtwdev, REG_MAX_AGGR_NUM, 0x36); + rtw_write8(rtwdev, REG_MAX_AGGR_NUM + 1, 0x36); + + /* Set Spec SIFS (used in NAV) */ + rtw_write16(rtwdev, REG_SPEC_SIFS, 0x100a); + rtw_write16(rtwdev, REG_MAC_SPEC_SIFS, 0x100a); + + /* Set SIFS for CCK */ + rtw_write16(rtwdev, REG_SIFS, 0x100a); + + /* Set SIFS for OFDM */ + rtw_write16(rtwdev, REG_SIFS + 2, 0x100a); + + /* TXOP */ + rtw_write32(rtwdev, REG_EDCA_BE_PARAM, 0x005EA42B); + rtw_write32(rtwdev, REG_EDCA_BK_PARAM, 0x0000A44F); + rtw_write32(rtwdev, REG_EDCA_VI_PARAM, 0x005EA324); + rtw_write32(rtwdev, REG_EDCA_VO_PARAM, 0x002FA226); + + rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL, BIT(7)); + + rtw_write8(rtwdev, REG_ACKTO, 0x80); + + rtw_write16(rtwdev, REG_BCN_CTRL, + BIT_DIS_TSF_UDT | (BIT_DIS_TSF_UDT << 8)); + rtw_write32_mask(rtwdev, REG_TBTT_PROHIBIT, 0xfffff, WLAN_TBTT_TIME); + rtw_write8(rtwdev, REG_DRVERLYINT, 0x05); + rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME); + rtw_write16(rtwdev, REG_BCNTCFG, 0x4413); + rtw_write8(rtwdev, REG_BCN_MAX_ERR, 0xFF); + + rtw_write32(rtwdev, REG_FAST_EDCA_VOVI_SETTING, 0x08070807); + rtw_write32(rtwdev, REG_FAST_EDCA_BEBK_SETTING, 0x08070807); + + if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB && + rtwusb->udev->speed == USB_SPEED_SUPER) { + /* Disable U1/U2 Mode to avoid 2.5G spur in USB3.0. */ + rtw_write8_clr(rtwdev, REG_USB_MOD, BIT(4) | BIT(3)); + /* To avoid usb 3.0 H2C fail. */ + rtw_write16(rtwdev, 0xf002, 0); + + rtw_write8_clr(rtwdev, REG_SW_AMPDU_BURST_MODE_CTRL, + BIT_PRE_TX_CMD); + } else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE) { + rtw8814ae_enable_rf_1_2v(rtwdev); + + /* Force the antenna b to wifi. */ + rtw_write8_set(rtwdev, REG_PAD_CTRL1, BIT(2)); + rtw_write8_set(rtwdev, REG_PAD_CTRL1 + 1, BIT(0)); + rtw_write8_set(rtwdev, REG_LED_CFG + 3, + (BIT(27) | BIT_DPDT_WL_SEL) >> 24); + } + + return 0; +} + +static void rtw8814a_set_rfe_reg_24g(struct rtw_dev *rtwdev) +{ + switch (rtwdev->efuse.rfe_option) { + case 2: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x72707270); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x72707270); + rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x72707270); + rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77707770); + + rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D, + BIT_RFE_SELSW0_D, 0x72); + + break; + case 1: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77777777); + + rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D, + BIT_RFE_SELSW0_D, 0x77); + + break; + case 0: + default: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x77777777); + /* Is it not necessary to set REG_RFE_PINMUX_D ? */ + + rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D, + BIT_RFE_SELSW0_D, 0x77); + + break; + } +} + +static void rtw8814a_set_rfe_reg_5g(struct rtw_dev *rtwdev) +{ + switch (rtwdev->efuse.rfe_option) { + case 2: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x37173717); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x37173717); + rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x37173717); + rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77177717); + + rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D, + BIT_RFE_SELSW0_D, 0x37); + + break; + case 1: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x33173317); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x33173317); + rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x33173317); + rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77177717); + + rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D, + BIT_RFE_SELSW0_D, 0x33); + + break; + case 0: + default: + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x54775477); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x54775477); + rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x54775477); + rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x54775477); + + rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D, + BIT_RFE_SELSW0_D, 0x54); + + break; + } +} + +static void rtw8814a_set_channel_bb_swing(struct rtw_dev *rtwdev, u8 band) +{ + rtw_write32_mask(rtwdev, REG_TXSCALE_A, BB_SWING_MASK, + rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_A)); + rtw_write32_mask(rtwdev, REG_TXSCALE_B, BB_SWING_MASK, + rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_B)); + rtw_write32_mask(rtwdev, REG_TXSCALE_C, BB_SWING_MASK, + rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_C)); + rtw_write32_mask(rtwdev, REG_TXSCALE_D, BB_SWING_MASK, + rtw8814a_get_bb_swing(rtwdev, band, RF_PATH_D)); + rtw8814a_pwrtrack_init(rtwdev); +} + +static void rtw8814a_set_bw_reg_adc(struct rtw_dev *rtwdev, u8 bw) +{ + u32 adc = 0; + + if (bw == RTW_CHANNEL_WIDTH_20) + adc = 0; + else if (bw == RTW_CHANNEL_WIDTH_40) + adc = 1; + else if (bw == RTW_CHANNEL_WIDTH_80) + adc = 2; + + rtw_write32_mask(rtwdev, REG_ADCCLK, BIT(1) | BIT(0), adc); +} + +static void rtw8814a_set_bw_reg_agc(struct rtw_dev *rtwdev, u8 new_band, u8 bw) +{ + u32 agc = 7; + + if (bw == RTW_CHANNEL_WIDTH_20) { + agc = 6; + } else if (bw == RTW_CHANNEL_WIDTH_40) { + if (new_band == RTW_BAND_5G) + agc = 8; + else + agc = 7; + } else if (bw == RTW_CHANNEL_WIDTH_80) { + agc = 3; + } + + rtw_write32_mask(rtwdev, REG_CCASEL, 0xf000, agc); +} + +static void rtw8814a_switch_band(struct rtw_dev *rtwdev, u8 new_band, u8 bw) +{ + /* Clear 0x1000[16], When this bit is set to 0, CCK and OFDM + * are disabled, and clock are gated. Otherwise, CCK and OFDM + * are enabled. + */ + rtw_write8_clr(rtwdev, REG_SYS_CFG3_8814A + 2, BIT_FEN_BB_RSTB); + + if (new_band == RTW_BAND_2G) { + rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 0); + + rtw8814a_set_rfe_reg_24g(rtwdev); + + rtw_write32_mask(rtwdev, REG_TXPSEL, 0xf0, 0x2); + rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0x5); + + rtw_write32_mask(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST, 0x3); + + rtw_write8(rtwdev, REG_CCK_CHECK, 0); + + rtw_write32_mask(rtwdev, 0xa80, BIT(18), 0); + } else { + rtw_write8(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN); + + /* Enable CCK Tx function, even when CCK is off */ + rtw_write32_mask(rtwdev, 0xa80, BIT(18), 1); + + rtw8814a_set_rfe_reg_5g(rtwdev); + + rtw_write32_mask(rtwdev, REG_TXPSEL, 0xf0, 0x0); + rtw_write32_mask(rtwdev, REG_CCK_RX, 0x0f000000, 0xf); + + rtw_write32_mask(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST, 0x2); + } + + rtw8814a_set_channel_bb_swing(rtwdev, new_band); + + rtw8814a_set_bw_reg_adc(rtwdev, bw); + rtw8814a_set_bw_reg_agc(rtwdev, new_band, bw); + + rtw_write8_set(rtwdev, REG_SYS_CFG3_8814A + 2, BIT_FEN_BB_RSTB); +} + +static void rtw8814a_switch_channel(struct rtw_dev *rtwdev, u8 channel) +{ + struct rtw_hal *hal = &rtwdev->hal; + u32 fc_area, rf_mod_ag, cfgch; + u8 path; + + switch (channel) { + case 36 ... 48: + fc_area = 0x494; + break; + case 50 ... 64: + fc_area = 0x453; + break; + case 100 ... 116: + fc_area = 0x452; + break; + default: + if (channel >= 118) + fc_area = 0x412; + else + fc_area = 0x96a; + break; + } + + rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, fc_area); + + for (path = 0; path < hal->rf_path_num; path++) { + switch (channel) { + case 36 ... 64: + rf_mod_ag = 0x101; + break; + case 100 ... 140: + rf_mod_ag = 0x301; + break; + default: + if (channel > 140) + rf_mod_ag = 0x501; + else + rf_mod_ag = 0x000; + break; + } + + cfgch = (rf_mod_ag << 8) | channel; + + rtw_write_rf(rtwdev, path, RF_CFGCH, + RF18_RFSI_MASK | RF18_BAND_MASK | RF18_CHANNEL_MASK, cfgch); + } + + switch (channel) { + case 36 ... 64: + rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 1); + break; + case 100 ... 144: + rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 2); + break; + default: + if (channel >= 149) + rtw_write32_mask(rtwdev, REG_AGC_TABLE, 0x1f, 3); + + break; + } +} + +static void rtw8814a_24g_cck_tx_dfir(struct rtw_dev *rtwdev, u8 channel) +{ + if (channel >= 1 && channel <= 11) { + rtw_write32(rtwdev, REG_CCK0_TX_FILTER1, 0x1a1b0030); + rtw_write32(rtwdev, REG_CCK0_TX_FILTER2, 0x090e1317); + rtw_write32(rtwdev, REG_CCK0_DEBUG_PORT, 0x00000204); + } else if (channel >= 12 && channel <= 13) { + rtw_write32(rtwdev, REG_CCK0_TX_FILTER1, 0x1a1b0030); + rtw_write32(rtwdev, REG_CCK0_TX_FILTER2, 0x090e1217); + rtw_write32(rtwdev, REG_CCK0_DEBUG_PORT, 0x00000305); + } else if (channel == 14) { + rtw_write32(rtwdev, REG_CCK0_TX_FILTER1, 0x1a1b0030); + rtw_write32(rtwdev, REG_CCK0_TX_FILTER2, 0x00000E17); + rtw_write32(rtwdev, REG_CCK0_DEBUG_PORT, 0x00000000); + } +} + +static void rtw8814a_set_bw_reg_mac(struct rtw_dev *rtwdev, u8 bw) +{ + u16 val16 = rtw_read16(rtwdev, REG_WMAC_TRXPTCL_CTL); + + val16 &= ~BIT_RFMOD; + if (bw == RTW_CHANNEL_WIDTH_80) + val16 |= BIT_RFMOD_80M; + else if (bw == RTW_CHANNEL_WIDTH_40) + val16 |= BIT_RFMOD_40M; + + rtw_write16(rtwdev, REG_WMAC_TRXPTCL_CTL, val16); +} + +static void rtw8814a_set_bw_rf(struct rtw_dev *rtwdev, u8 bw) +{ + u8 path; + + for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) { + switch (bw) { + case RTW_CHANNEL_WIDTH_5: + case RTW_CHANNEL_WIDTH_10: + case RTW_CHANNEL_WIDTH_20: + default: + rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 3); + break; + case RTW_CHANNEL_WIDTH_40: + rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 1); + break; + case RTW_CHANNEL_WIDTH_80: + rtw_write_rf(rtwdev, path, RF_CFGCH, RF18_BW_MASK, 0); + break; + } + } +} + +static void rtw8814a_adc_clk(struct rtw_dev *rtwdev) +{ + static const u32 rxiqc_reg[2][4] = { + { REG_RX_IQC_AB_A, REG_RX_IQC_AB_B, + REG_RX_IQC_AB_C, REG_RX_IQC_AB_D }, + { REG_RX_IQC_CD_A, REG_RX_IQC_CD_B, + REG_RX_IQC_CD_C, REG_RX_IQC_CD_D } + }; + u32 bb_reg_8fc, bb_reg_808, rxiqc[4]; + u32 i = 0, mac_active = 1; + u8 mac_reg_522; + + if (rtwdev->hal.cut_version != RTW_CHIP_VER_CUT_A) + return; + + /* 1 Step1. MAC TX pause */ + mac_reg_522 = rtw_read8(rtwdev, REG_TXPAUSE); + bb_reg_8fc = rtw_read32(rtwdev, REG_DBGSEL); + bb_reg_808 = rtw_read32(rtwdev, REG_RXPSEL); + rtw_write8(rtwdev, REG_TXPAUSE, 0x3f); + + /* 1 Step 2. Backup rxiqc & rxiqc = 0 */ + for (i = 0; i < 4; i++) { + rxiqc[i] = rtw_read32(rtwdev, rxiqc_reg[0][i]); + rtw_write32(rtwdev, rxiqc_reg[0][i], 0x0); + rtw_write32(rtwdev, rxiqc_reg[1][i], 0x0); + } + rtw_write32_mask(rtwdev, REG_PRECTRL, BIT_IQ_WGT, 0x3); + i = 0; + + /* 1 Step 3. Monitor MAC IDLE */ + rtw_write32(rtwdev, REG_DBGSEL, 0x0); + while (mac_active) { + mac_active = rtw_read32(rtwdev, REG_DBGRPT) & 0x803e0008; + i++; + if (i > 1000) + break; + } + + /* 1 Step 4. ADC clk flow */ + rtw_write8(rtwdev, REG_RXPSEL, 0x11); + rtw_write32_mask(rtwdev, REG_DAC_RSTB, BIT(13), 0x1); + rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2) | BIT(1), 0x3); + rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x1); + + /* 0xc1c/0xe1c/0x181c/0x1a1c[4] must=1 to ensure table can be + * written when bbrstb=0 + * 0xc60/0xe60/0x1860/0x1a60[15] always = 1 after this line + * 0xc60/0xe60/0x1860/0x1a60[14] always = 0 bcz its error in A-cut + */ + + /* power_off/clk_off @ anapar_state=idle mode */ + rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x15800002); + rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x01808003); + rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x15800002); + rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x01808003); + rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x15800002); + rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x01808003); + rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x15800002); + rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x01808003); + + rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2), 0x0); + rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x0); + /* [19] = 1 to turn off ADC */ + rtw_write32(rtwdev, REG_CK_MONHA, 0x0D080058); + rtw_write32(rtwdev, REG_CK_MONHB, 0x0D080058); + rtw_write32(rtwdev, REG_CK_MONHC, 0x0D080058); + rtw_write32(rtwdev, REG_CK_MONHD, 0x0D080058); + + /* power_on/clk_off */ + /* [19] = 0 to turn on ADC */ + rtw_write32(rtwdev, REG_CK_MONHA, 0x0D000058); + rtw_write32(rtwdev, REG_CK_MONHB, 0x0D000058); + rtw_write32(rtwdev, REG_CK_MONHC, 0x0D000058); + rtw_write32(rtwdev, REG_CK_MONHD, 0x0D000058); + + /* power_on/clk_on @ anapar_state=BT mode */ + rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x05808032); + rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x05808032); + rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x05808032); + rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x05808032); + rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2), 0x1); + rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x1); + + /* recover original setting @ anapar_state=BT mode */ + rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x05808032); + rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x05808032); + rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x05808032); + rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x05808032); + + rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x05800002); + rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x07808003); + rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x05800002); + rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x07808003); + rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x05800002); + rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x07808003); + rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x05800002); + rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x07808003); + + rtw_write8_mask(rtwdev, REG_GNT_BT, BIT(2) | BIT(1), 0x0); + rtw_write32_mask(rtwdev, REG_CCK_RPT_FORMAT, BIT(2), 0x0); + rtw_write32_mask(rtwdev, REG_DAC_RSTB, BIT(13), 0x0); + + /* 1 Step 5. Recover MAC TX & IQC */ + rtw_write8(rtwdev, REG_TXPAUSE, mac_reg_522); + rtw_write32(rtwdev, REG_DBGSEL, bb_reg_8fc); + rtw_write32(rtwdev, REG_RXPSEL, bb_reg_808); + for (i = 0; i < 4; i++) { + rtw_write32(rtwdev, rxiqc_reg[0][i], rxiqc[i]); + rtw_write32(rtwdev, rxiqc_reg[1][i], 0x01000000); + } + rtw_write32_mask(rtwdev, REG_PRECTRL, BIT_IQ_WGT, 0x0); +} + +static void rtw8814a_spur_calibration_ch140(struct rtw_dev *rtwdev, u8 channel) +{ + struct rtw_hal *hal = &rtwdev->hal; + + /* Add for 8814AE module ch140 MP Rx */ + if (channel == 140) { + if (hal->ch_param[0] == 0) + hal->ch_param[0] = rtw_read32(rtwdev, REG_CCASEL); + if (hal->ch_param[1] == 0) + hal->ch_param[1] = rtw_read32(rtwdev, REG_PDMFTH); + + rtw_write32(rtwdev, REG_CCASEL, 0x75438170); + rtw_write32(rtwdev, REG_PDMFTH, 0x79a18a0a); + } else { + if (rtw_read32(rtwdev, REG_CCASEL) == 0x75438170 && + hal->ch_param[0] != 0) + rtw_write32(rtwdev, REG_CCASEL, hal->ch_param[0]); + + if (rtw_read32(rtwdev, REG_PDMFTH) == 0x79a18a0a && + hal->ch_param[1] != 0) + rtw_write32(rtwdev, REG_PDMFTH, hal->ch_param[1]); + + hal->ch_param[0] = rtw_read32(rtwdev, REG_CCASEL); + hal->ch_param[1] = rtw_read32(rtwdev, REG_PDMFTH); + } +} + +static void rtw8814a_set_nbi_reg(struct rtw_dev *rtwdev, u32 tone_idx) +{ + /* tone_idx X 10 */ + static const u32 nbi_128[] = { + 25, 55, 85, 115, 135, + 155, 185, 205, 225, 245, + 265, 285, 305, 335, 355, + 375, 395, 415, 435, 455, + 485, 505, 525, 555, 585, 615, 635 + }; + u32 reg_idx = 0; + u32 i; + + for (i = 0; i < ARRAY_SIZE(nbi_128); i++) { + if (tone_idx < nbi_128[i]) { + reg_idx = i + 1; + break; + } + } + + rtw_write32_mask(rtwdev, REG_NBI_SETTING, 0xfc000, reg_idx); +} + +static void rtw8814a_nbi_setting(struct rtw_dev *rtwdev, u32 ch, u32 f_intf) +{ + u32 fc, int_distance, tone_idx; + + fc = 2412 + (ch - 1) * 5; + int_distance = abs_diff(fc, f_intf); + + /* 10 * (int_distance / 0.3125) */ + tone_idx = int_distance << 5; + + rtw8814a_set_nbi_reg(rtwdev, tone_idx); + + rtw_write32_mask(rtwdev, REG_NBI_SETTING, BIT_NBI_ENABLE, 1); +} + +static void rtw8814a_spur_nbi_setting(struct rtw_dev *rtwdev) +{ + u8 primary_channel = rtwdev->hal.primary_channel; + u8 rfe_type = rtwdev->efuse.rfe_option; + + if (rfe_type != 0 && rfe_type != 1 && rfe_type != 6 && rfe_type != 7) + return; + + if (primary_channel == 14) + rtw8814a_nbi_setting(rtwdev, primary_channel, 2480); + else if (primary_channel >= 4 && primary_channel <= 8) + rtw8814a_nbi_setting(rtwdev, primary_channel, 2440); + else + rtw_write32_mask(rtwdev, REG_NBI_SETTING, BIT_NBI_ENABLE, 0); +} + +/* A workaround to eliminate the 5280 MHz & 5600 MHz & 5760 MHz spur of 8814A */ +static void rtw8814a_spur_calibration(struct rtw_dev *rtwdev, u8 channel, u8 bw) +{ + u8 rfe_type = rtwdev->efuse.rfe_option; + bool reset_nbi_csi = true; + + if (rfe_type == 0) { + switch (bw) { + case RTW_CHANNEL_WIDTH_40: + if (channel == 54 || channel == 118) { + rtw_write32_mask(rtwdev, REG_NBI_SETTING, + 0x000fe000, 0x3e >> 1); + rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, + BIT(0), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0); + rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK1, + BIT(0), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0); + + reset_nbi_csi = false; + } else if (channel == 151) { + rtw_write32_mask(rtwdev, REG_NBI_SETTING, + 0x000fe000, 0x1e >> 1); + rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, + BIT(0), 1); + rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK0, + BIT(16), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0); + + reset_nbi_csi = false; + } + break; + case RTW_CHANNEL_WIDTH_80: + if (channel == 58 || channel == 122) { + rtw_write32_mask(rtwdev, REG_NBI_SETTING, + 0x000fe000, 0x3a >> 1); + rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, + BIT(0), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0); + rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK7, + BIT(0), 1); + + reset_nbi_csi = false; + } else if (channel == 155) { + rtw_write32_mask(rtwdev, REG_NBI_SETTING, + 0x000fe000, 0x5a >> 1); + rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, + BIT(0), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0); + rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK6, + BIT(16), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0); + + reset_nbi_csi = false; + } + break; + case RTW_CHANNEL_WIDTH_20: + if (channel == 153) { + rtw_write32_mask(rtwdev, REG_NBI_SETTING, + 0x000fe000, 0x1e >> 1); + rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, + BIT(0), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0); + rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK7, + BIT(16), 1); + + reset_nbi_csi = false; + } + + rtw8814a_spur_calibration_ch140(rtwdev, channel); + break; + default: + break; + } + } else if (rfe_type == 1 || rfe_type == 2) { + switch (bw) { + case RTW_CHANNEL_WIDTH_20: + if (channel == 153) { + rtw_write32_mask(rtwdev, REG_NBI_SETTING, + 0x000fe000, 0x1E >> 1); + rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, + BIT(0), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0); + rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK7, + BIT(16), 1); + + reset_nbi_csi = false; + } + break; + case RTW_CHANNEL_WIDTH_40: + if (channel == 151) { + rtw_write32_mask(rtwdev, REG_NBI_SETTING, + 0x000fe000, 0x1e >> 1); + rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, + BIT(0), 1); + rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK0, + BIT(16), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0); + + reset_nbi_csi = false; + } + break; + case RTW_CHANNEL_WIDTH_80: + if (channel == 155) { + rtw_write32_mask(rtwdev, REG_NBI_SETTING, + 0x000fe000, 0x5a >> 1); + rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, + BIT(0), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0); + rtw_write32_mask(rtwdev, REG_CSI_FIX_MASK6, + BIT(16), 1); + rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0); + + reset_nbi_csi = false; + } + break; + default: + break; + } + } + + if (reset_nbi_csi) { + rtw_write32_mask(rtwdev, REG_NBI_SETTING, + 0x000fe000, 0xfc >> 1); + rtw_write32_mask(rtwdev, REG_CSI_MASK_SETTING1, BIT(0), 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK0, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK1, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK6, 0); + rtw_write32(rtwdev, REG_CSI_FIX_MASK7, 0); + } + + rtw8814a_spur_nbi_setting(rtwdev); +} + +static void rtw8814a_set_bw_mode(struct rtw_dev *rtwdev, u8 new_band, + u8 channel, u8 bw, u8 primary_chan_idx) +{ + u8 txsc40 = 0, txsc20, txsc; + + rtw8814a_set_bw_reg_mac(rtwdev, bw); + + txsc20 = primary_chan_idx; + if (bw == RTW_CHANNEL_WIDTH_80) { + if (txsc20 == RTW_SC_20_UPPER || txsc20 == RTW_SC_20_UPMOST) + txsc40 = RTW_SC_40_UPPER; + else + txsc40 = RTW_SC_40_LOWER; + } + + txsc = BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40); + rtw_write8(rtwdev, REG_DATA_SC, txsc); + + rtw8814a_set_bw_reg_adc(rtwdev, bw); + rtw8814a_set_bw_reg_agc(rtwdev, new_band, bw); + + if (bw == RTW_CHANNEL_WIDTH_80) { + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x3c, txsc); + } else if (bw == RTW_CHANNEL_WIDTH_40) { + rtw_write32_mask(rtwdev, REG_ADCCLK, 0x3c, txsc); + + if (txsc == RTW_SC_20_UPPER) + rtw_write32_set(rtwdev, REG_RXSB, BIT(4)); + else + rtw_write32_clr(rtwdev, REG_RXSB, BIT(4)); + } + + rtw8814a_set_bw_rf(rtwdev, bw); + + rtw8814a_adc_clk(rtwdev); + + rtw8814a_spur_calibration(rtwdev, channel, bw); +} + +static void rtw8814a_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw, + u8 primary_chan_idx) +{ + u8 old_band, new_band; + + if (rtw_read8(rtwdev, REG_CCK_CHECK) & BIT_CHECK_CCK_EN) + old_band = RTW_BAND_5G; + else + old_band = RTW_BAND_2G; + + if (channel > 14) + new_band = RTW_BAND_5G; + else + new_band = RTW_BAND_2G; + + if (new_band != old_band) + rtw8814a_switch_band(rtwdev, new_band, bw); + + rtw8814a_switch_channel(rtwdev, channel); + + rtw8814a_24g_cck_tx_dfir(rtwdev, channel); + + rtw8814a_set_bw_mode(rtwdev, new_band, channel, bw, primary_chan_idx); +} + +static s8 rtw8814a_cck_rx_pwr(u8 lna_idx, u8 vga_idx) +{ + s8 rx_pwr_all = 0; + + switch (lna_idx) { + case 7: + rx_pwr_all = -38 - 2 * vga_idx; + break; + case 5: + rx_pwr_all = -28 - 2 * vga_idx; + break; + case 3: + rx_pwr_all = -8 - 2 * vga_idx; + break; + case 2: + rx_pwr_all = -1 - 2 * vga_idx; + break; + default: + break; + } + + return rx_pwr_all; +} + +static void rtw8814a_query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, + struct rtw_rx_pkt_stat *pkt_stat) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_jaguar_phy_status_rpt *rpt; + u8 gain[RTW_RF_PATH_MAX], rssi, i; + s8 rx_pwr_db, middle1, middle2; + s8 snr[RTW_RF_PATH_MAX]; + s8 evm[RTW_RF_PATH_MAX]; + u8 rfmode, subchannel; + u8 lna, vga; + s8 cfo[2]; + + rpt = (struct rtw_jaguar_phy_status_rpt *)phy_status; + + pkt_stat->bw = RTW_CHANNEL_WIDTH_20; + + if (pkt_stat->rate <= DESC_RATE11M) { + lna = le32_get_bits(rpt->w1, RTW_JGRPHY_W1_AGC_RPT_LNA_IDX); + vga = le32_get_bits(rpt->w1, RTW_JGRPHY_W1_AGC_RPT_VGA_IDX); + + rx_pwr_db = rtw8814a_cck_rx_pwr(lna, vga); + + pkt_stat->rx_power[RF_PATH_A] = rx_pwr_db; + pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1); + dm_info->rssi[RF_PATH_A] = pkt_stat->rssi; + pkt_stat->signal_power = rx_pwr_db; + } else { /* OFDM rate */ + gain[RF_PATH_A] = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_GAIN_A); + gain[RF_PATH_B] = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_GAIN_B); + gain[RF_PATH_C] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_GAIN_C); + gain[RF_PATH_D] = le32_get_bits(rpt->w6, RTW_JGRPHY_W6_GAIN_D); + + snr[RF_PATH_A] = le32_get_bits(rpt->w3, RTW_JGRPHY_W3_RXSNR_A); + snr[RF_PATH_B] = le32_get_bits(rpt->w4, RTW_JGRPHY_W4_RXSNR_B); + snr[RF_PATH_C] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_RXSNR_C); + snr[RF_PATH_D] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_RXSNR_D); + + evm[RF_PATH_A] = le32_get_bits(rpt->w3, RTW_JGRPHY_W3_RXEVM_1); + evm[RF_PATH_B] = le32_get_bits(rpt->w3, RTW_JGRPHY_W3_RXEVM_2); + evm[RF_PATH_C] = le32_get_bits(rpt->w4, RTW_JGRPHY_W4_RXEVM_3); + evm[RF_PATH_D] = le32_get_bits(rpt->w5, RTW_JGRPHY_W5_RXEVM_4); + + if (pkt_stat->rate <= DESC_RATE54M) + evm[RF_PATH_A] = le32_get_bits(rpt->w6, + RTW_JGRPHY_W6_SIGEVM); + + for (i = RF_PATH_A; i < RTW_RF_PATH_MAX; i++) { + pkt_stat->rx_power[i] = gain[i] - 110; + + rssi = rtw_phy_rf_power_2_rssi(&pkt_stat->rx_power[i], 1); + dm_info->rssi[i] = rssi; + + pkt_stat->rx_snr[i] = snr[i]; + dm_info->rx_snr[i] = snr[i] >> 1; + + pkt_stat->rx_evm[i] = evm[i]; + evm[i] = max_t(s8, -127, evm[i]); + dm_info->rx_evm_dbm[i] = abs(evm[i]) >> 1; + } + + rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, + RTW_RF_PATH_MAX); + pkt_stat->rssi = rssi; + + /* When power saving is enabled the hardware sometimes + * reports unbelievably high gain for paths A and C + * (e.g. one frame 64 68 68 72, the next frame 106 66 88 72, + * the next 66 66 68 72), so use the second lowest gain + * instead of the highest. + */ + middle1 = max(min(gain[RF_PATH_A], gain[RF_PATH_B]), + min(gain[RF_PATH_C], gain[RF_PATH_D])); + middle2 = min(max(gain[RF_PATH_A], gain[RF_PATH_B]), + max(gain[RF_PATH_C], gain[RF_PATH_D])); + rx_pwr_db = min(middle1, middle2); + rx_pwr_db -= 110; + pkt_stat->signal_power = rx_pwr_db; + + rfmode = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_R_RFMOD); + subchannel = le32_get_bits(rpt->w0, RTW_JGRPHY_W0_SUB_CHNL); + + if (rfmode == 1 && subchannel == 0) { + pkt_stat->bw = RTW_CHANNEL_WIDTH_40; + } else if (rfmode == 2) { + if (subchannel == 0) + pkt_stat->bw = RTW_CHANNEL_WIDTH_80; + else if (subchannel == 9 || subchannel == 10) + pkt_stat->bw = RTW_CHANNEL_WIDTH_40; + } + + cfo[RF_PATH_A] = le32_get_bits(rpt->w2, RTW_JGRPHY_W2_CFO_TAIL_A); + cfo[RF_PATH_B] = le32_get_bits(rpt->w2, RTW_JGRPHY_W2_CFO_TAIL_B); + + for (i = RF_PATH_A; i < 2; i++) { + pkt_stat->cfo_tail[i] = cfo[i]; + dm_info->cfo_tail[i] = (cfo[i] * 5) >> 1; + } + } +} + +static void +rtw8814a_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) +{ + struct rtw_hal *hal = &rtwdev->hal; + u32 txagc_table_wd; + u8 rate, pwr_index; + int j; + + for (j = 0; j < rtw_rate_size[rs]; j++) { + rate = rtw_rate_section[rs][j]; + + pwr_index = hal->tx_pwr_tbl[path][rate] + 2; + if (pwr_index > rtwdev->chip->max_power_index) + pwr_index = rtwdev->chip->max_power_index; + + txagc_table_wd = 0x00801000; + txagc_table_wd |= (pwr_index << 24) | (path << 8) | rate; + + rtw_write32(rtwdev, REG_AGC_TBL, txagc_table_wd); + + /* first time to turn on the txagc table + * second to write the addr0 + */ + if (rate == DESC_RATE1M) + rtw_write32(rtwdev, REG_AGC_TBL, txagc_table_wd); + } +} + +static void rtw8814a_set_tx_power_index(struct rtw_dev *rtwdev) +{ + struct rtw_hal *hal = &rtwdev->hal; + int path; + + for (path = 0; path < hal->rf_path_num; path++) { + if (hal->current_band_type == RTW_BAND_2G) + rtw8814a_set_tx_power_index_by_rate(rtwdev, path, + RTW_RATE_SECTION_CCK); + + rtw8814a_set_tx_power_index_by_rate(rtwdev, path, + RTW_RATE_SECTION_OFDM); + + if (test_bit(RTW_FLAG_SCANNING, rtwdev->flags)) + continue; + + rtw8814a_set_tx_power_index_by_rate(rtwdev, path, + RTW_RATE_SECTION_HT_1S); + rtw8814a_set_tx_power_index_by_rate(rtwdev, path, + RTW_RATE_SECTION_VHT_1S); + + rtw8814a_set_tx_power_index_by_rate(rtwdev, path, + RTW_RATE_SECTION_HT_2S); + rtw8814a_set_tx_power_index_by_rate(rtwdev, path, + RTW_RATE_SECTION_VHT_2S); + + rtw8814a_set_tx_power_index_by_rate(rtwdev, path, + RTW_RATE_SECTION_HT_3S); + rtw8814a_set_tx_power_index_by_rate(rtwdev, path, + RTW_RATE_SECTION_VHT_3S); + } +} + +static void rtw8814a_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) +{ +} + +static void rtw8814a_false_alarm_statistics(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u32 cck_fa_cnt, ofdm_fa_cnt; + u32 crc32_cnt, cca32_cnt; + u32 cck_enable; + + cck_enable = rtw_read32(rtwdev, REG_RXPSEL) & BIT(28); + cck_fa_cnt = rtw_read16(rtwdev, REG_FA_CCK); + ofdm_fa_cnt = rtw_read16(rtwdev, REG_FA_OFDM); + + dm_info->cck_fa_cnt = cck_fa_cnt; + dm_info->ofdm_fa_cnt = ofdm_fa_cnt; + dm_info->total_fa_cnt = ofdm_fa_cnt; + if (cck_enable) + dm_info->total_fa_cnt += cck_fa_cnt; + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK); + dm_info->cck_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD); + dm_info->cck_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD); + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_OFDM); + dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD); + dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD); + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_HT); + dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD); + dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD); + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_VHT); + dm_info->vht_ok_cnt = u32_get_bits(crc32_cnt, MASKLWORD); + dm_info->vht_err_cnt = u32_get_bits(crc32_cnt, MASKHWORD); + + cca32_cnt = rtw_read32(rtwdev, REG_CCA_OFDM); + dm_info->ofdm_cca_cnt = u32_get_bits(cca32_cnt, MASKHWORD); + dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt; + if (cck_enable) { + cca32_cnt = rtw_read32(rtwdev, REG_CCA_CCK); + dm_info->cck_cca_cnt = u32_get_bits(cca32_cnt, MASKLWORD); + dm_info->total_cca_cnt += dm_info->cck_cca_cnt; + } + + rtw_write32_set(rtwdev, REG_FAS, BIT(17)); + rtw_write32_clr(rtwdev, REG_FAS, BIT(17)); + rtw_write32_clr(rtwdev, REG_CCK0_FAREPORT, BIT(15)); + rtw_write32_set(rtwdev, REG_CCK0_FAREPORT, BIT(15)); + rtw_write32_set(rtwdev, REG_CNTRST, BIT(0)); + rtw_write32_clr(rtwdev, REG_CNTRST, BIT(0)); +} + +#define MAC_REG_NUM_8814 2 +#define BB_REG_NUM_8814 14 +#define RF_REG_NUM_8814 1 + +static void rtw8814a_iqk_backup_mac_bb(struct rtw_dev *rtwdev, + u32 *mac_backup, u32 *bb_backup, + const u32 *mac_regs, + const u32 *bb_regs) +{ + u32 i; + + /* save MACBB default value */ + for (i = 0; i < MAC_REG_NUM_8814; i++) + mac_backup[i] = rtw_read32(rtwdev, mac_regs[i]); + + for (i = 0; i < BB_REG_NUM_8814; i++) + bb_backup[i] = rtw_read32(rtwdev, bb_regs[i]); +} + +static void rtw8814a_iqk_backup_rf(struct rtw_dev *rtwdev, + u32 rf_backup[][4], const u32 *rf_regs) +{ + u32 i; + + /* Save RF Parameters */ + for (i = 0; i < RF_REG_NUM_8814; i++) { + rf_backup[i][RF_PATH_A] = rtw_read_rf(rtwdev, RF_PATH_A, + rf_regs[i], RFREG_MASK); + rf_backup[i][RF_PATH_B] = rtw_read_rf(rtwdev, RF_PATH_B, + rf_regs[i], RFREG_MASK); + rf_backup[i][RF_PATH_C] = rtw_read_rf(rtwdev, RF_PATH_C, + rf_regs[i], RFREG_MASK); + rf_backup[i][RF_PATH_D] = rtw_read_rf(rtwdev, RF_PATH_D, + rf_regs[i], RFREG_MASK); + } +} + +static void rtw8814a_iqk_afe_setting(struct rtw_dev *rtwdev, bool do_iqk) +{ + if (do_iqk) { + /* IQK AFE setting RX_WAIT_CCA mode */ + rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x0e808003); + rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x0e808003); + rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x0e808003); + rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x0e808003); + } else { + rtw_write32(rtwdev, REG_AFE_PWR1_A, 0x07808003); + rtw_write32(rtwdev, REG_AFE_PWR1_B, 0x07808003); + rtw_write32(rtwdev, REG_AFE_PWR1_C, 0x07808003); + rtw_write32(rtwdev, REG_AFE_PWR1_D, 0x07808003); + } + + rtw_write32_mask(rtwdev, REG_DAC_RSTB, BIT(13), 0x1); + + rtw_write8_set(rtwdev, REG_GNT_BT, BIT(2) | BIT(1)); + rtw_write8_clr(rtwdev, REG_GNT_BT, BIT(2) | BIT(1)); + + rtw_write32_set(rtwdev, REG_CCK_RPT_FORMAT, BIT(2)); + rtw_write32_clr(rtwdev, REG_CCK_RPT_FORMAT, BIT(2)); +} + +static void rtw8814a_iqk_restore_mac_bb(struct rtw_dev *rtwdev, + u32 *mac_backup, u32 *bb_backup, + const u32 *mac_regs, + const u32 *bb_regs) +{ + u32 i; + + /* Reload MacBB Parameters */ + for (i = 0; i < MAC_REG_NUM_8814; i++) + rtw_write32(rtwdev, mac_regs[i], mac_backup[i]); + + for (i = 0; i < BB_REG_NUM_8814; i++) + rtw_write32(rtwdev, bb_regs[i], bb_backup[i]); +} + +static void rtw8814a_iqk_restore_rf(struct rtw_dev *rtwdev, + const u32 rf_backup[][4], + const u32 *rf_regs) +{ + u32 i; + + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x0); + rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE, RFREG_MASK, 0x0); + rtw_write_rf(rtwdev, RF_PATH_C, RF_LUTWE, RFREG_MASK, 0x0); + rtw_write_rf(rtwdev, RF_PATH_D, RF_LUTWE, RFREG_MASK, 0x0); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_RXBB2, RFREG_MASK, 0x88001); + rtw_write_rf(rtwdev, RF_PATH_B, RF_RXBB2, RFREG_MASK, 0x88001); + rtw_write_rf(rtwdev, RF_PATH_C, RF_RXBB2, RFREG_MASK, 0x88001); + rtw_write_rf(rtwdev, RF_PATH_D, RF_RXBB2, RFREG_MASK, 0x88001); + + for (i = 0; i < RF_REG_NUM_8814; i++) { + rtw_write_rf(rtwdev, RF_PATH_A, rf_regs[i], + RFREG_MASK, rf_backup[i][RF_PATH_A]); + rtw_write_rf(rtwdev, RF_PATH_B, rf_regs[i], + RFREG_MASK, rf_backup[i][RF_PATH_B]); + rtw_write_rf(rtwdev, RF_PATH_C, rf_regs[i], + RFREG_MASK, rf_backup[i][RF_PATH_C]); + rtw_write_rf(rtwdev, RF_PATH_D, rf_regs[i], + RFREG_MASK, rf_backup[i][RF_PATH_D]); + } +} + +static void rtw8814a_iqk_reset_nctl(struct rtw_dev *rtwdev) +{ + rtw_write32(rtwdev, 0x1b00, 0xf8000000); + rtw_write32(rtwdev, 0x1b80, 0x00000006); + + rtw_write32(rtwdev, 0x1b00, 0xf8000000); + rtw_write32(rtwdev, 0x1b80, 0x00000002); +} + +static void rtw8814a_iqk_configure_mac(struct rtw_dev *rtwdev) +{ + rtw_write8(rtwdev, REG_TXPAUSE, 0x3f); + rtw_write32_clr(rtwdev, REG_BCN_CTRL, + (BIT_EN_BCN_FUNCTION << 8) | BIT_EN_BCN_FUNCTION); + + /* RX ante off */ + rtw_write8(rtwdev, REG_RXPSEL, 0x00); + /* CCA off */ + rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf, 0xe); + /* CCK RX path off */ + rtw_write32_set(rtwdev, REG_PRECTRL, BIT_IQ_WGT); + rtw_write32(rtwdev, REG_RFE_PINMUX_A, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_B, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_C, 0x77777777); + rtw_write32(rtwdev, REG_RFE_PINMUX_D, 0x77777777); + rtw_write32_mask(rtwdev, REG_RFE_INVSEL_D, BIT_RFE_SELSW0_D, 0x77); + rtw_write32_mask(rtwdev, REG_PSD, BIT_PSD_INI, 0x0); + + rtw_write32_mask(rtwdev, REG_RFE_INV0, 0xf, 0x0); +} + +static void rtw8814a_lok_one_shot(struct rtw_dev *rtwdev, u8 path) +{ + u32 lok_temp1, lok_temp2; + bool lok_ready; + u8 ii; + + /* ADC Clock source */ + rtw_write32_mask(rtwdev, REG_FAS, BIT(21) | BIT(20), path); + /* LOK: CMD ID = 0 + * {0xf8000011, 0xf8000021, 0xf8000041, 0xf8000081} + */ + rtw_write32(rtwdev, 0x1b00, 0xf8000001 | (BIT(path) << 4)); + + usleep_range(1000, 1100); + + if (read_poll_timeout(!rtw_read32_mask, lok_ready, lok_ready, + 1000, 10000, false, + rtwdev, 0x1b00, BIT(0))) { + rtw_dbg(rtwdev, RTW_DBG_RFK, "==>S%d LOK timed out\n", path); + + rtw8814a_iqk_reset_nctl(rtwdev); + + rtw_write_rf(rtwdev, path, RF_DTXLOK, RFREG_MASK, 0x08400); + + return; + } + + rtw_write32(rtwdev, 0x1b00, 0xf8000000 | (path << 1)); + rtw_write32(rtwdev, 0x1bd4, 0x003f0001); + + lok_temp2 = rtw_read32_mask(rtwdev, 0x1bfc, 0x003e0000); + lok_temp2 = (lok_temp2 + 0x10) & 0x1f; + + lok_temp1 = rtw_read32_mask(rtwdev, 0x1bfc, 0x0000003e); + lok_temp1 = (lok_temp1 + 0x10) & 0x1f; + + for (ii = 1; ii < 5; ii++) { + lok_temp1 += (lok_temp1 & BIT(4 - ii)) << (ii * 2); + lok_temp2 += (lok_temp2 & BIT(4 - ii)) << (ii * 2); + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, + "path %d lok_temp1 = %#x, lok_temp2 = %#x\n", + path, lok_temp1 >> 4, lok_temp2 >> 4); + + rtw_write_rf(rtwdev, path, RF_DTXLOK, 0x07c00, lok_temp1 >> 4); + rtw_write_rf(rtwdev, path, RF_DTXLOK, 0xf8000, lok_temp2 >> 4); +} + +static void rtw8814a_iqk_tx_one_shot(struct rtw_dev *rtwdev, u8 path, + u32 *tx_matrix, bool *tx_ok) +{ + u8 bw = rtwdev->hal.current_band_width; + u8 cal_retry; + u32 iqk_cmd; + + for (cal_retry = 0; cal_retry < 4; cal_retry++) { + rtw_write32_mask(rtwdev, REG_FAS, BIT(21) | BIT(20), path); + + iqk_cmd = 0xf8000001 | ((bw + 3) << 8) | (BIT(path) << 4); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "TXK_Trigger = %#x\n", iqk_cmd); + + rtw_write32(rtwdev, 0x1b00, iqk_cmd); + + usleep_range(10000, 11000); + + if (read_poll_timeout(!rtw_read32_mask, *tx_ok, *tx_ok, + 1000, 20000, false, + rtwdev, 0x1b00, BIT(0))) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "tx iqk S%d timed out\n", path); + + rtw8814a_iqk_reset_nctl(rtwdev); + } else { + *tx_ok = !rtw_read32_mask(rtwdev, 0x1b08, BIT(26)); + + if (*tx_ok) + break; + } + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d tx ==> 0x1b00 = 0x%x\n", + path, rtw_read32(rtwdev, 0x1b00)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d tx ==> 0x1b08 = 0x%x\n", + path, rtw_read32(rtwdev, 0x1b08)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d tx ==> cal_retry = %x\n", + path, cal_retry); + + rtw_write32(rtwdev, 0x1b00, 0xf8000000 | (path << 1)); + + if (*tx_ok) { + *tx_matrix = rtw_read32(rtwdev, 0x1b38); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d_IQC = 0x%x\n", + path, *tx_matrix); + } +} + +static void rtw8814a_iqk_rx_one_shot(struct rtw_dev *rtwdev, u8 path, + u32 *tx_matrix, bool *tx_ok) +{ + static const u16 iqk_apply[RTW_RF_PATH_MAX] = { + REG_TXAGCIDX, REG_TX_AGC_B, REG_TX_AGC_C, REG_TX_AGC_D + }; + u8 band = rtwdev->hal.current_band_type; + u8 bw = rtwdev->hal.current_band_width; + u32 rx_matrix; + u8 cal_retry; + u32 iqk_cmd; + bool rx_ok; + + for (cal_retry = 0; cal_retry < 4; cal_retry++) { + rtw_write32_mask(rtwdev, REG_FAS, BIT(21) | BIT(20), path); + + if (band == RTW_BAND_2G) { + rtw_write_rf(rtwdev, path, RF_LUTDBG, BIT(11), 0x1); + rtw_write_rf(rtwdev, path, RF_GAINTX, 0xfffff, 0x51ce1); + + switch (path) { + case 0: + case 1: + rtw_write32(rtwdev, REG_RFE_PINMUX_B, + 0x54775477); + break; + case 2: + rtw_write32(rtwdev, REG_RFE_PINMUX_C, + 0x54775477); + break; + case 3: + rtw_write32(rtwdev, REG_RFE_INVSEL_D, 0x75400000); + rtw_write32(rtwdev, REG_RFE_PINMUX_D, + 0x77777777); + break; + } + } + + iqk_cmd = 0xf8000001 | ((9 - bw) << 8) | (BIT(path) << 4); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "RXK_Trigger = 0x%x\n", iqk_cmd); + + rtw_write32(rtwdev, 0x1b00, iqk_cmd); + + usleep_range(10000, 11000); + + if (read_poll_timeout(!rtw_read32_mask, rx_ok, rx_ok, + 1000, 20000, false, + rtwdev, 0x1b00, BIT(0))) { + rtw_dbg(rtwdev, RTW_DBG_RFK, + "rx iqk S%d timed out\n", path); + + rtw8814a_iqk_reset_nctl(rtwdev); + } else { + rx_ok = !rtw_read32_mask(rtwdev, 0x1b08, BIT(26)); + + if (rx_ok) + break; + } + } + + rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d rx ==> 0x1b00 = 0x%x\n", + path, rtw_read32(rtwdev, 0x1b00)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d rx ==> 0x1b08 = 0x%x\n", + path, rtw_read32(rtwdev, 0x1b08)); + rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d rx ==> cal_retry = %x\n", + path, cal_retry); + + rtw_write32(rtwdev, 0x1b00, 0xf8000000 | (path << 1)); + + if (rx_ok) { + rtw_write32(rtwdev, 0x1b3c, 0x20000000); + rx_matrix = rtw_read32(rtwdev, 0x1b3c); + + rtw_dbg(rtwdev, RTW_DBG_RFK, "S%d_IQC = 0x%x\n", + path, rx_matrix); + } + + if (*tx_ok) + rtw_write32(rtwdev, 0x1b38, *tx_matrix); + else + rtw_write32_mask(rtwdev, iqk_apply[path], BIT(0), 0x0); + + if (!rx_ok) + rtw_write32_mask(rtwdev, iqk_apply[path], + BIT(11) | BIT(10), 0x0); + + if (band == RTW_BAND_2G) + rtw_write_rf(rtwdev, path, RF_LUTDBG, BIT(11), 0x0); +} + +static void rtw8814a_iqk(struct rtw_dev *rtwdev) +{ + u8 band = rtwdev->hal.current_band_type; + u8 bw = rtwdev->hal.current_band_width; + u32 tx_matrix[RTW_RF_PATH_MAX]; + bool tx_ok[RTW_RF_PATH_MAX]; + u8 path; + + rtw_dbg(rtwdev, RTW_DBG_RFK, "IQK band = %d GHz bw = %d MHz\n", + band == RTW_BAND_2G ? 2 : 5, (1 << (bw + 1)) * 10); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_TXMOD, BIT(19), 0x1); + rtw_write_rf(rtwdev, RF_PATH_B, RF_TXMOD, BIT(19), 0x1); + rtw_write_rf(rtwdev, RF_PATH_C, RF_TXMOD, BIT(19), 0x1); + rtw_write_rf(rtwdev, RF_PATH_D, RF_TXMOD, BIT(19), 0x1); + + rtw_write32_mask(rtwdev, REG_TXAGCIDX, + (BIT(11) | BIT(10) | BIT(0)), 0x401); + rtw_write32_mask(rtwdev, REG_TX_AGC_B, + (BIT(11) | BIT(10) | BIT(0)), 0x401); + rtw_write32_mask(rtwdev, REG_TX_AGC_C, + (BIT(11) | BIT(10) | BIT(0)), 0x401); + rtw_write32_mask(rtwdev, REG_TX_AGC_D, + (BIT(11) | BIT(10) | BIT(0)), 0x401); + + if (band == RTW_BAND_5G) + rtw_write32(rtwdev, 0x1b00, 0xf8000ff1); + else + rtw_write32(rtwdev, 0x1b00, 0xf8000ef1); + + usleep_range(1000, 1100); + + rtw_write32(rtwdev, 0x810, 0x20101063); + rtw_write32(rtwdev, REG_DAC_RSTB, 0x0B00C000); + + for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++) + rtw8814a_lok_one_shot(rtwdev, path); + + for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++) + rtw8814a_iqk_tx_one_shot(rtwdev, path, + &tx_matrix[path], &tx_ok[path]); + + for (path = RF_PATH_A; path < RTW_RF_PATH_MAX; path++) + rtw8814a_iqk_rx_one_shot(rtwdev, path, + &tx_matrix[path], &tx_ok[path]); +} + +static void rtw8814a_do_iqk(struct rtw_dev *rtwdev) +{ + static const u32 backup_mac_reg[MAC_REG_NUM_8814] = {0x520, 0x550}; + static const u32 backup_bb_reg[BB_REG_NUM_8814] = { + 0xa14, 0x808, 0x838, 0x90c, 0x810, 0xcb0, 0xeb0, + 0x18b4, 0x1ab4, 0x1abc, 0x9a4, 0x764, 0xcbc, 0x910 + }; + static const u32 backup_rf_reg[RF_REG_NUM_8814] = {0x0}; + u32 rf_backup[RF_REG_NUM_8814][RTW_RF_PATH_MAX]; + u32 mac_backup[MAC_REG_NUM_8814]; + u32 bb_backup[BB_REG_NUM_8814]; + + rtw8814a_iqk_backup_mac_bb(rtwdev, mac_backup, bb_backup, + backup_mac_reg, backup_bb_reg); + rtw8814a_iqk_afe_setting(rtwdev, true); + rtw8814a_iqk_backup_rf(rtwdev, rf_backup, backup_rf_reg); + rtw8814a_iqk_configure_mac(rtwdev); + rtw8814a_iqk(rtwdev); + rtw8814a_iqk_reset_nctl(rtwdev); /* for 3-wire to BB use */ + rtw8814a_iqk_afe_setting(rtwdev, false); + rtw8814a_iqk_restore_mac_bb(rtwdev, mac_backup, bb_backup, + backup_mac_reg, backup_bb_reg); + rtw8814a_iqk_restore_rf(rtwdev, rf_backup, backup_rf_reg); +} + +static void rtw8814a_phy_calibration(struct rtw_dev *rtwdev) +{ + rtw8814a_do_iqk(rtwdev); +} + +static void rtw8814a_coex_cfg_init(struct rtw_dev *rtwdev) +{ +} + +static void rtw8814a_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, + u8 pos_type) +{ + /* Override rtw_coex_coex_ctrl_owner(). RF path C does not + * function when BIT_LTE_MUX_CTRL_PATH is set. + */ + rtw_write8_clr(rtwdev, REG_SYS_SDIO_CTRL + 3, + BIT_LTE_MUX_CTRL_PATH >> 24); +} + +static void rtw8814a_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) +{ +} + +static void rtw8814a_coex_cfg_gnt_debug(struct rtw_dev *rtwdev) +{ +} + +static void rtw8814a_coex_cfg_rfe_type(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + + /* Only needed to make rtw8814a_coex_cfg_ant_switch() run. */ + coex_rfe->ant_switch_exist = true; +} + +static void rtw8814a_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) +{ +} + +static void rtw8814a_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) +{ +} + +static void rtw8814a_txagc_swing_offset(struct rtw_dev *rtwdev, u8 path, + u8 tx_pwr_idx_offset, + s8 *txagc_idx, u8 *swing_idx) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 swing_upper_bound = dm_info->default_ofdm_index + 10; + s8 delta_pwr_idx = dm_info->delta_power_index[path]; + u8 swing_index = dm_info->default_ofdm_index; + u8 max_tx_pwr_idx_offset = 0xf; + u8 swing_lower_bound = 0; + s8 agc_index = 0; + + tx_pwr_idx_offset = min_t(u8, tx_pwr_idx_offset, max_tx_pwr_idx_offset); + + if (delta_pwr_idx >= 0) { + if (delta_pwr_idx <= tx_pwr_idx_offset) { + agc_index = delta_pwr_idx; + swing_index = dm_info->default_ofdm_index; + } else if (delta_pwr_idx > tx_pwr_idx_offset) { + agc_index = tx_pwr_idx_offset; + swing_index = dm_info->default_ofdm_index + + delta_pwr_idx - tx_pwr_idx_offset; + swing_index = min_t(u8, swing_index, swing_upper_bound); + } + } else { + if (dm_info->default_ofdm_index > abs(delta_pwr_idx)) + swing_index = + dm_info->default_ofdm_index + delta_pwr_idx; + else + swing_index = swing_lower_bound; + swing_index = max_t(u8, swing_index, swing_lower_bound); + + agc_index = 0; + } + + if (swing_index >= RTW_TXSCALE_SIZE) { + rtw_warn(rtwdev, "swing index overflow\n"); + swing_index = RTW_TXSCALE_SIZE - 1; + } + *txagc_idx = agc_index; + *swing_idx = swing_index; +} + +static void rtw8814a_pwrtrack_set_pwr(struct rtw_dev *rtwdev, u8 path, + u8 pwr_idx_offset) +{ + static const u32 txagc_reg[RTW_RF_PATH_MAX] = { + REG_TX_AGC_A, REG_TX_AGC_B, REG_TX_AGC_C, REG_TX_AGC_D + }; + static const u32 txscale_reg[RTW_RF_PATH_MAX] = { + REG_TXSCALE_A, REG_TXSCALE_B, REG_TXSCALE_C, REG_TXSCALE_D + }; + s8 txagc_idx; + u8 swing_idx; + + rtw8814a_txagc_swing_offset(rtwdev, path, pwr_idx_offset, + &txagc_idx, &swing_idx); + rtw_write32_mask(rtwdev, txagc_reg[path], GENMASK(29, 25), + txagc_idx); + rtw_write32_mask(rtwdev, txscale_reg[path], BB_SWING_MASK, + rtw8814a_txscale_tbl[swing_idx]); +} + +static void rtw8814a_pwrtrack_set(struct rtw_dev *rtwdev, u8 path) +{ + u8 max_pwr_idx = rtwdev->chip->max_power_index; + u8 band_width = rtwdev->hal.current_band_width; + u8 channel = rtwdev->hal.current_channel; + u8 tx_rate = rtwdev->dm_info.tx_rate; + u8 regd = rtw_regd_get(rtwdev); + u8 pwr_idx_offset, tx_pwr_idx; + + tx_pwr_idx = rtw_phy_get_tx_power_index(rtwdev, path, tx_rate, + band_width, channel, regd); + + tx_pwr_idx = min_t(u8, tx_pwr_idx, max_pwr_idx); + + pwr_idx_offset = max_pwr_idx - tx_pwr_idx; + + rtw8814a_pwrtrack_set_pwr(rtwdev, path, pwr_idx_offset); +} + +static void rtw8814a_phy_pwrtrack_path(struct rtw_dev *rtwdev, + struct rtw_swing_table *swing_table, + u8 path) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 power_idx_cur, power_idx_last; + u8 delta; + + /* 8814A only has one thermal meter at PATH A */ + delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A); + + power_idx_last = dm_info->delta_power_index[path]; + power_idx_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, swing_table, + path, RF_PATH_A, delta); + + /* if delta of power indexes are the same, just skip */ + if (power_idx_cur == power_idx_last) + return; + + dm_info->delta_power_index[path] = power_idx_cur; + rtw8814a_pwrtrack_set(rtwdev, path); +} + +static void rtw8814a_phy_pwrtrack(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_swing_table swing_table; + u8 thermal_value, path; + + rtw_phy_config_swing_table(rtwdev, &swing_table); + + if (rtwdev->efuse.thermal_meter[RF_PATH_A] == 0xff) + return; + + thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00); + + rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A); + + if (dm_info->pwr_trk_init_trigger) + dm_info->pwr_trk_init_trigger = false; + else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value, + RF_PATH_A)) + goto iqk; + + for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) + rtw8814a_phy_pwrtrack_path(rtwdev, &swing_table, path); + +iqk: + if (rtw_phy_pwrtrack_need_iqk(rtwdev)) + rtw8814a_do_iqk(rtwdev); +} + +static void rtw8814a_pwr_track(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + if (!dm_info->pwr_trk_triggered) { + rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, + GENMASK(17, 16), 0x03); + dm_info->pwr_trk_triggered = true; + return; + } + + rtw8814a_phy_pwrtrack(rtwdev); + dm_info->pwr_trk_triggered = false; +} + +static void rtw8814a_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) +{ + static const u8 pd[CCK_PD_LV_MAX] = {0x40, 0x83, 0xcd, 0xdd, 0xed}; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + /* Override rtw_phy_cck_pd_lv_link(). It implements something + * like type 2/3/4. We need type 1 here. + */ + if (rtw_is_assoc(rtwdev)) { + if (dm_info->min_rssi > 60) { + new_lvl = CCK_PD_LV3; + } else if (dm_info->min_rssi > 35) { + new_lvl = CCK_PD_LV2; + } else if (dm_info->min_rssi > 20) { + if (dm_info->cck_fa_avg > 500) + new_lvl = CCK_PD_LV2; + else if (dm_info->cck_fa_avg < 250) + new_lvl = CCK_PD_LV1; + else + return; + } else { + new_lvl = CCK_PD_LV1; + } + } + + rtw_dbg(rtwdev, RTW_DBG_PHY, "lv: (%d) -> (%d)\n", + dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A], new_lvl); + + if (dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] == new_lvl) + return; + + dm_info->cck_fa_avg = CCK_FA_AVG_RESET; + dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] = new_lvl; + + rtw_write8(rtwdev, REG_CCK_PD_TH, pd[new_lvl]); +} + +static void rtw8814a_led_set(struct led_classdev *led, + enum led_brightness brightness) +{ + struct rtw_dev *rtwdev = container_of(led, struct rtw_dev, led_cdev); + u32 led_gpio_cfg; + + led_gpio_cfg = rtw_read32(rtwdev, REG_GPIO_PIN_CTRL_2); + led_gpio_cfg |= BIT(16) | BIT(17) | BIT(21) | BIT(22); + + if (brightness == LED_OFF) { + led_gpio_cfg |= BIT(8) | BIT(9) | BIT(13) | BIT(14); + } else { + led_gpio_cfg &= ~(BIT(8) | BIT(9) | BIT(13) | BIT(14)); + led_gpio_cfg &= ~(BIT(0) | BIT(1) | BIT(5) | BIT(6)); + } + + rtw_write32(rtwdev, REG_GPIO_PIN_CTRL_2, led_gpio_cfg); +} + +static void rtw8814a_fill_txdesc_checksum(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + u8 *txdesc) +{ + size_t words = 32 / 2; /* calculate the first 32 bytes (16 words) */ + + fill_txdesc_checksum_common(txdesc, words); +} + +static const struct rtw_chip_ops rtw8814a_ops = { + .power_on = rtw_power_on, + .power_off = rtw_power_off, + .phy_set_param = rtw8814a_phy_set_param, + .read_efuse = rtw8814a_read_efuse, + .query_phy_status = rtw8814a_query_phy_status, + .set_channel = rtw8814a_set_channel, + .mac_init = rtw8814a_mac_init, + .read_rf = rtw_phy_read_rf, + .write_rf = rtw_phy_write_rf_reg_sipi, + .set_tx_power_index = rtw8814a_set_tx_power_index, + .set_antenna = NULL, + .cfg_ldo25 = rtw8814a_cfg_ldo25, + .efuse_grant = rtw8814a_efuse_grant, + .false_alarm_statistics = rtw8814a_false_alarm_statistics, + .phy_calibration = rtw8814a_phy_calibration, + .cck_pd_set = rtw8814a_phy_cck_pd_set, + .pwr_track = rtw8814a_pwr_track, + .config_bfee = NULL, + .set_gid_table = NULL, + .cfg_csi_rate = NULL, + .led_set = rtw8814a_led_set, + .fill_txdesc_checksum = rtw8814a_fill_txdesc_checksum, + + .coex_set_init = rtw8814a_coex_cfg_init, + .coex_set_ant_switch = rtw8814a_coex_cfg_ant_switch, + .coex_set_gnt_fix = rtw8814a_coex_cfg_gnt_fix, + .coex_set_gnt_debug = rtw8814a_coex_cfg_gnt_debug, + .coex_set_rfe_type = rtw8814a_coex_cfg_rfe_type, + .coex_set_wl_tx_power = rtw8814a_coex_cfg_wl_tx_power, + .coex_set_wl_rx_gain = rtw8814a_coex_cfg_wl_rx_gain, +}; + +static const struct rtw_rqpn rqpn_table_8814a[] = { + /* SDIO */ + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, /* vo vi */ + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, /* be bk */ + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, /* mg hi */ + /* PCIE */ + {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + /* USB, 2 bulk out */ + {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH, + RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + /* USB, 3 bulk out */ + {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + /* USB, 4 bulk out */ + {RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, +}; + +static const struct rtw_prioq_addrs prioq_addrs_8814a = { + .prio[RTW_DMA_MAPPING_EXTRA] = { + .rsvd = REG_FIFOPAGE_INFO_4, .avail = REG_FIFOPAGE_INFO_4 + 2, + }, + .prio[RTW_DMA_MAPPING_LOW] = { + .rsvd = REG_FIFOPAGE_INFO_2, .avail = REG_FIFOPAGE_INFO_2 + 2, + }, + .prio[RTW_DMA_MAPPING_NORMAL] = { + .rsvd = REG_FIFOPAGE_INFO_3, .avail = REG_FIFOPAGE_INFO_3 + 2, + }, + .prio[RTW_DMA_MAPPING_HIGH] = { + .rsvd = REG_FIFOPAGE_INFO_1, .avail = REG_FIFOPAGE_INFO_1 + 2, + }, + .wsize = true, +}; + +static const struct rtw_page_table page_table_8814a[] = { + /* SDIO */ + {0, 0, 0, 0, 0}, /* hq nq lq exq gapq */ + /* PCIE */ + {32, 32, 32, 32, 0}, + /* USB, 2 bulk out */ + {32, 32, 32, 32, 0}, + /* USB, 3 bulk out */ + {32, 32, 32, 32, 0}, + /* USB, 4 bulk out */ + {32, 32, 32, 32, 0}, +}; + +static const struct rtw_intf_phy_para_table phy_para_table_8814a = {}; + +static const struct rtw_hw_reg rtw8814a_dig[] = { + [0] = { .addr = 0xc50, .mask = 0x7f }, + [1] = { .addr = 0xe50, .mask = 0x7f }, + [2] = { .addr = 0x1850, .mask = 0x7f }, + [3] = { .addr = 0x1a50, .mask = 0x7f }, +}; + +static const struct rtw_rfe_def rtw8814a_rfe_defs[] = { + [0] = { .phy_pg_tbl = &rtw8814a_bb_pg_type0_tbl, + .txpwr_lmt_tbl = &rtw8814a_txpwr_lmt_type0_tbl, + .pwr_track_tbl = &rtw8814a_rtw_pwrtrk_type0_tbl }, + [1] = { .phy_pg_tbl = &rtw8814a_bb_pg_tbl, + .txpwr_lmt_tbl = &rtw8814a_txpwr_lmt_type1_tbl, + .pwr_track_tbl = &rtw8814a_rtw_pwrtrk_tbl }, +}; + +/* rssi in percentage % (dbm = % - 100) */ +static const u8 wl_rssi_step_8814a[] = {60, 50, 44, 30}; +static const u8 bt_rssi_step_8814a[] = {30, 30, 30, 30}; + +/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */ +static const struct coex_rf_para rf_para_tx_8814a[] = { + {0, 0, false, 7}, /* for normal */ + {0, 16, false, 7}, /* for WL-CPT */ + {4, 0, true, 1}, + {3, 6, true, 1}, + {2, 9, true, 1}, + {1, 13, true, 1} +}; + +static const struct coex_rf_para rf_para_rx_8814a[] = { + {0, 0, false, 7}, /* for normal */ + {0, 16, false, 7}, /* for WL-CPT */ + {4, 0, true, 1}, + {3, 6, true, 1}, + {2, 9, true, 1}, + {1, 13, true, 1} +}; + +static_assert(ARRAY_SIZE(rf_para_tx_8814a) == ARRAY_SIZE(rf_para_rx_8814a)); + +const struct rtw_chip_info rtw8814a_hw_spec = { + .ops = &rtw8814a_ops, + .id = RTW_CHIP_TYPE_8814A, + .fw_name = "rtw88/rtw8814a_fw.bin", + .wlan_cpu = RTW_WCPU_11AC, + .tx_pkt_desc_sz = 40, + .tx_buf_desc_sz = 16, + .rx_pkt_desc_sz = 24, + .rx_buf_desc_sz = 8, + .phy_efuse_size = 1024, + .log_efuse_size = 512, + .ptct_efuse_size = 0, + .txff_size = (2048 - 10) * TX_PAGE_SIZE, + .rxff_size = 23552, + .rsvd_drv_pg_num = 8, + .band = RTW_BAND_2G | RTW_BAND_5G, + .page_size = TX_PAGE_SIZE, + .csi_buf_pg_num = 0, + .dig_min = 0x1c, + .txgi_factor = 1, + .is_pwr_by_rate_dec = true, + .rx_ldpc = true, + .max_power_index = 0x3f, + .ampdu_density = IEEE80211_HT_MPDU_DENSITY_2, + .usb_tx_agg_desc_num = 3, + .hw_feature_report = false, + .c2h_ra_report_size = 6, + .old_datarate_fb_limit = false, + .ht_supported = true, + .vht_supported = true, + .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), + .sys_func_en = 0xDC, + .pwr_on_seq = card_enable_flow_8814a, + .pwr_off_seq = card_disable_flow_8814a, + .rqpn_table = rqpn_table_8814a, + .prioq_addrs = &prioq_addrs_8814a, + .page_table = page_table_8814a, + .intf_table = &phy_para_table_8814a, + .dig = rtw8814a_dig, + .dig_cck = NULL, + .rf_base_addr = {0x2800, 0x2c00, 0x3800, 0x3c00}, + .rf_sipi_addr = {0xc90, 0xe90, 0x1890, 0x1a90}, + .ltecoex_addr = NULL, + .mac_tbl = &rtw8814a_mac_tbl, + .agc_tbl = &rtw8814a_agc_tbl, + .bb_tbl = &rtw8814a_bb_tbl, + .rf_tbl = {&rtw8814a_rf_a_tbl, &rtw8814a_rf_b_tbl, + &rtw8814a_rf_c_tbl, &rtw8814a_rf_d_tbl}, + .rfe_defs = rtw8814a_rfe_defs, + .rfe_defs_size = ARRAY_SIZE(rtw8814a_rfe_defs), + .iqk_threshold = 8, + .max_scan_ie_len = IEEE80211_MAX_DATA_LEN, + + .coex_para_ver = 0, + .bt_desired_ver = 0, + .scbd_support = false, + .new_scbd10_def = false, + .ble_hid_profile_support = false, + .wl_mimo_ps_support = false, + .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, + .bt_rssi_type = COEX_BTRSSI_RATIO, + .ant_isolation = 15, + .rssi_tolerance = 2, + .wl_rssi_step = wl_rssi_step_8814a, + .bt_rssi_step = bt_rssi_step_8814a, + .table_sant_num = 0, + .table_sant = NULL, + .table_nsant_num = 0, + .table_nsant = NULL, + .tdma_sant_num = 0, + .tdma_sant = NULL, + .tdma_nsant_num = 0, + .tdma_nsant = NULL, + .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8814a), + .wl_rf_para_tx = rf_para_tx_8814a, + .wl_rf_para_rx = rf_para_rx_8814a, + .bt_afh_span_bw20 = 0x24, + .bt_afh_span_bw40 = 0x36, + .afh_5g_num = 0, + .afh_5g = NULL, + .coex_info_hw_regs_num = 0, + .coex_info_hw_regs = NULL, +}; +EXPORT_SYMBOL(rtw8814a_hw_spec); + +MODULE_FIRMWARE("rtw88/rtw8814a_fw.bin"); + +MODULE_AUTHOR("Bitterblue Smith "); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8814a driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a.h b/drivers/net/wireless/realtek/rtw88/rtw8814a.h new file mode 100644 index 0000000000000..c57c7c8f915e2 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8814a.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2025 Realtek Corporation + */ + +#ifndef __RTW8814A_H__ +#define __RTW8814A_H__ + +struct rtw8814au_efuse { + u8 vid[2]; /* 0xd0 */ + u8 pid[2]; /* 0xd2 */ + u8 res[4]; /* 0xd4 */ + u8 mac_addr[ETH_ALEN]; /* 0xd8 */ +} __packed; + +struct rtw8814ae_efuse { + u8 mac_addr[ETH_ALEN]; /* 0xd0 */ + u8 vid[2]; /* 0xd6 */ + u8 did[2]; /* 0xd8 */ + u8 svid[2]; /* 0xda */ + u8 smid[2]; /* 0xdc */ +} __packed; + +struct rtw8814a_efuse { + __le16 rtl_id; + u8 res0[0x0c]; + u8 usb_mode; /* 0x0e */ + u8 res1; + + /* power index for four RF paths */ + struct rtw_txpwr_idx txpwr_idx_table[4]; + + u8 channel_plan; /* 0xb8 */ + u8 xtal_k; /* 0xb9 */ + u8 thermal_meter; /* 0xba */ + u8 iqk_lck; /* 0xbb */ + u8 pa_type; /* 0xbc */ + u8 lna_type_2g[2]; /* 0xbd */ + u8 lna_type_5g[2]; /* 0xbf */ + u8 rf_board_option; /* 0xc1 */ + u8 res2; + u8 rf_bt_setting; /* 0xc3 */ + u8 eeprom_version; /* 0xc4 */ + u8 eeprom_customer_id; /* 0xc5 */ + u8 tx_bb_swing_setting_2g; /* 0xc6 */ + u8 tx_bb_swing_setting_5g; /* 0xc7 */ + u8 res3; + u8 trx_antenna_option; /* 0xc9 */ + u8 rfe_option; /* 0xca */ + u8 country_code[2]; /* 0xcb */ + u8 res4[3]; + union { + struct rtw8814au_efuse u; + struct rtw8814ae_efuse e; + }; + u8 res5[0x122]; /* 0xde */ +} __packed; + +static_assert(sizeof(struct rtw8814a_efuse) == 512); + +extern const struct rtw_chip_info rtw8814a_hw_spec; + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a_table.c b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.c new file mode 100644 index 0000000000000..b9ce51a09fc85 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.c @@ -0,0 +1,23930 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2025 Realtek Corporation + */ + +#include "main.h" +#include "phy.h" +#include "rtw8814a_table.h" + +static const u32 rtw8814a_mac[] = { + 0x010, 0x0000007C, + 0x014, 0x000000DB, + 0x016, 0x00000002, + 0x073, 0x00000010, + 0x420, 0x00000080, + 0x421, 0x0000000F, + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000007, + 0x437, 0x00000008, + 0x43C, 0x00000004, + 0x43D, 0x00000005, + 0x43E, 0x00000007, + 0x43F, 0x00000008, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x000000F0, + 0x446, 0x00000001, + 0x447, 0x000000FE, + 0x448, 0x00000000, + 0x449, 0x00000000, + 0x44A, 0x00000000, + 0x44B, 0x00000040, + 0x44C, 0x00000010, + 0x44D, 0x000000F0, + 0x44E, 0x0000003F, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x00000000, + 0x452, 0x00000000, + 0x453, 0x00000040, + 0x45E, 0x00000004, + 0x49C, 0x00000010, + 0x49D, 0x000000F0, + 0x49E, 0x00000000, + 0x49F, 0x00000006, + 0x4A0, 0x000000E0, + 0x4A1, 0x00000003, + 0x4A2, 0x00000000, + 0x4A3, 0x00000040, + 0x4A4, 0x00000015, + 0x4A5, 0x000000F0, + 0x4A6, 0x00000000, + 0x4A7, 0x00000006, + 0x4A8, 0x000000E0, + 0x4A9, 0x00000000, + 0x4AA, 0x00000000, + 0x4AB, 0x00000000, + 0x7DA, 0x00000008, + 0x1448, 0x00000006, + 0x144A, 0x00000006, + 0x144C, 0x00000006, + 0x144E, 0x00000006, + 0x4C8, 0x000000FF, + 0x4C9, 0x00000008, + 0x4CA, 0x0000003C, + 0x4CB, 0x0000003C, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x4CF, 0x00000008, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x521, 0x0000002F, + 0x525, 0x00000047, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000064, + 0x55D, 0x000000FF, + 0x577, 0x00000003, + 0x5BE, 0x00000064, + 0x604, 0x00000001, + 0x605, 0x00000030, + 0x607, 0x00000001, + 0x608, 0x0000000E, + 0x609, 0x0000002A, + 0x60A, 0x00000000, + 0x60C, 0x00000018, + 0x60D, 0x00000050, + 0x6A0, 0x000000FF, + 0x6A1, 0x000000FF, + 0x6A2, 0x000000FF, + 0x6A3, 0x000000FF, + 0x6A4, 0x000000FF, + 0x6A5, 0x000000FF, + 0x6DE, 0x00000084, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000064, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000040, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + 0x718, 0x00000040, + 0x7D5, 0x000000BC, + 0x7D8, 0x00000028, + 0x7D9, 0x00000000, + 0x7DA, 0x0000000B, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8814a_mac, rtw_phy_cfg_mac); + +static const u32 rtw8814a_agc[] = { + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000003, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000003, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0xA0000000, 0x00000000, + 0x81C, 0xFF000003, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x81C, 0xFD020003, + 0x81C, 0xFC040003, + 0x81C, 0xFB060003, + 0x81C, 0xFA080003, + 0x81C, 0xF90A0003, + 0x81C, 0xF80C0003, + 0x81C, 0xF70E0003, + 0x81C, 0xF6100003, + 0x81C, 0xF5120003, + 0x81C, 0xF4140003, + 0x81C, 0xF3160003, + 0x81C, 0xF2180003, + 0x81C, 0xF11A0003, + 0x81C, 0xF01C0003, + 0x81C, 0xEF1E0003, + 0x81C, 0xEE200003, + 0x81C, 0xED220003, + 0x81C, 0xCF240003, + 0x81C, 0xCE260003, + 0x81C, 0xCD280003, + 0x81C, 0xCC2A0003, + 0x81C, 0xCB2C0003, + 0x81C, 0xCA2E0003, + 0x81C, 0xC9300003, + 0x81C, 0xC8320003, + 0x81C, 0xC7340003, + 0x81C, 0xC6360003, + 0x81C, 0xC5380003, + 0x81C, 0xC43A0003, + 0x81C, 0xA63C0003, + 0x81C, 0xA53E0003, + 0x81C, 0xA4400003, + 0x81C, 0xA3420003, + 0x81C, 0xA2440003, + 0x81C, 0xA1460003, + 0x81C, 0x86480003, + 0x81C, 0x854A0003, + 0x81C, 0x844C0003, + 0x81C, 0x834E0003, + 0x81C, 0x66500003, + 0x81C, 0x65520003, + 0x81C, 0x64540003, + 0x81C, 0x63560003, + 0x81C, 0x62580003, + 0x81C, 0x615A0003, + 0x81C, 0x435C0003, + 0x81C, 0x425E0003, + 0x81C, 0x41600003, + 0x81C, 0x27620003, + 0x81C, 0x26640003, + 0x81C, 0x25660003, + 0x81C, 0x24680003, + 0x81C, 0x236A0003, + 0x81C, 0x226C0003, + 0x81C, 0x216E0003, + 0x81C, 0x21700003, + 0x81C, 0x21720003, + 0x81C, 0x21740003, + 0x81C, 0x21760003, + 0x81C, 0x21780003, + 0x81C, 0x217A0003, + 0x81C, 0x217C0003, + 0x81C, 0x217E0003, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000003, + 0x81C, 0xFE020003, + 0x81C, 0xFD040003, + 0x81C, 0xFC060003, + 0x81C, 0xFB080003, + 0x81C, 0xFA0A0003, + 0x81C, 0xF90C0003, + 0x81C, 0xF80E0003, + 0x81C, 0xF7100003, + 0x81C, 0xF6120003, + 0x81C, 0xF5140003, + 0x81C, 0xF4160003, + 0x81C, 0xF3180003, + 0x81C, 0xF21A0003, + 0x81C, 0xF11C0003, + 0x81C, 0xF01E0003, + 0x81C, 0xEF200003, + 0x81C, 0xEE220003, + 0x81C, 0xED240003, + 0x81C, 0xEC260003, + 0x81C, 0xEB280003, + 0x81C, 0xEA2A0003, + 0x81C, 0xE92C0003, + 0x81C, 0xE82E0003, + 0x81C, 0xE7300003, + 0x81C, 0xE6320003, + 0x81C, 0xE5340003, + 0x81C, 0xE4360003, + 0x81C, 0xE3380003, + 0x81C, 0xC53A0003, + 0x81C, 0xC43C0003, + 0x81C, 0xC33E0003, + 0x81C, 0xC2400003, + 0x81C, 0xC1420003, + 0x81C, 0xA8440003, + 0x81C, 0xA7460003, + 0x81C, 0xA6480003, + 0x81C, 0xA54A0003, + 0x81C, 0xA44C0003, + 0x81C, 0xA34E0003, + 0x81C, 0xA2500003, + 0x81C, 0x65520003, + 0x81C, 0x64540003, + 0x81C, 0x63560003, + 0x81C, 0x62580003, + 0x81C, 0x615A0003, + 0x81C, 0x475C0003, + 0x81C, 0x465E0003, + 0x81C, 0x45600003, + 0x81C, 0x44620003, + 0x81C, 0x43640003, + 0x81C, 0x42660003, + 0x81C, 0x41680003, + 0x81C, 0x416A0003, + 0x81C, 0x416C0003, + 0x81C, 0x416E0003, + 0x81C, 0x41700003, + 0x81C, 0x41720003, + 0x81C, 0x41740003, + 0x81C, 0x41760003, + 0x81C, 0x41780003, + 0x81C, 0x417A0003, + 0x81C, 0x417C0003, + 0x81C, 0x417E0003, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x81C, 0xFD020003, + 0x81C, 0xFC040003, + 0x81C, 0xFB060003, + 0x81C, 0xFA080003, + 0x81C, 0xF90A0003, + 0x81C, 0xF80C0003, + 0x81C, 0xF70E0003, + 0x81C, 0xF6100003, + 0x81C, 0xF5120003, + 0x81C, 0xF4140003, + 0x81C, 0xF3160003, + 0x81C, 0xF2180003, + 0x81C, 0xF11A0003, + 0x81C, 0xF01C0003, + 0x81C, 0xEF1E0003, + 0x81C, 0xEE200003, + 0x81C, 0xED220003, + 0x81C, 0xEC240003, + 0x81C, 0xEB260003, + 0x81C, 0xEA280003, + 0x81C, 0xE92A0003, + 0x81C, 0xE82C0003, + 0x81C, 0xE72E0003, + 0x81C, 0xE6300003, + 0x81C, 0xE5320003, + 0x81C, 0xE4340003, + 0x81C, 0xE3360003, + 0x81C, 0xC6380003, + 0x81C, 0xC53A0003, + 0x81C, 0xC43C0003, + 0x81C, 0xC33E0003, + 0x81C, 0xC2400003, + 0x81C, 0xA9420003, + 0x81C, 0xA8440003, + 0x81C, 0xA7460003, + 0x81C, 0xA6480003, + 0x81C, 0xA54A0003, + 0x81C, 0xA44C0003, + 0x81C, 0xA34E0003, + 0x81C, 0x66500003, + 0x81C, 0x65520003, + 0x81C, 0x64540003, + 0x81C, 0x63560003, + 0x81C, 0x49580003, + 0x81C, 0x485A0003, + 0x81C, 0x475C0003, + 0x81C, 0x465E0003, + 0x81C, 0x45600003, + 0x81C, 0x44620003, + 0x81C, 0x43640003, + 0x81C, 0x42660003, + 0x81C, 0x41680003, + 0x81C, 0x416A0003, + 0x81C, 0x416C0003, + 0x81C, 0x416E0003, + 0x81C, 0x41700003, + 0x81C, 0x41720003, + 0x81C, 0x41740003, + 0x81C, 0x41760003, + 0x81C, 0x41780003, + 0x81C, 0x417A0003, + 0x81C, 0x417C0003, + 0x81C, 0x417E0003, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000003, + 0x81C, 0xFE020003, + 0x81C, 0xFD040003, + 0x81C, 0xFC060003, + 0x81C, 0xFB080003, + 0x81C, 0xFA0A0003, + 0x81C, 0xF90C0003, + 0x81C, 0xF80E0003, + 0x81C, 0xF7100003, + 0x81C, 0xF6120003, + 0x81C, 0xF5140003, + 0x81C, 0xF4160003, + 0x81C, 0xF3180003, + 0x81C, 0xF21A0003, + 0x81C, 0xF11C0003, + 0x81C, 0xF01E0003, + 0x81C, 0xEF200003, + 0x81C, 0xEE220003, + 0x81C, 0xED240003, + 0x81C, 0xEC260003, + 0x81C, 0xEB280003, + 0x81C, 0xEA2A0003, + 0x81C, 0xE92C0003, + 0x81C, 0xE82E0003, + 0x81C, 0xE7300003, + 0x81C, 0xE6320003, + 0x81C, 0xE5340003, + 0x81C, 0xE4360003, + 0x81C, 0xE3380003, + 0x81C, 0xC53A0003, + 0x81C, 0xC43C0003, + 0x81C, 0xC33E0003, + 0x81C, 0xC2400003, + 0x81C, 0xC1420003, + 0x81C, 0xA8440003, + 0x81C, 0xA7460003, + 0x81C, 0xA6480003, + 0x81C, 0xA54A0003, + 0x81C, 0xA44C0003, + 0x81C, 0xA34E0003, + 0x81C, 0xA2500003, + 0x81C, 0x65520003, + 0x81C, 0x64540003, + 0x81C, 0x63560003, + 0x81C, 0x62580003, + 0x81C, 0x615A0003, + 0x81C, 0x475C0003, + 0x81C, 0x465E0003, + 0x81C, 0x45600003, + 0x81C, 0x44620003, + 0x81C, 0x43640003, + 0x81C, 0x42660003, + 0x81C, 0x41680003, + 0x81C, 0x416A0003, + 0x81C, 0x416C0003, + 0x81C, 0x416E0003, + 0x81C, 0x41700003, + 0x81C, 0x41720003, + 0x81C, 0x41740003, + 0x81C, 0x41760003, + 0x81C, 0x41780003, + 0x81C, 0x417A0003, + 0x81C, 0x417C0003, + 0x81C, 0x417E0003, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x81C, 0xFD020003, + 0x81C, 0xFC040003, + 0x81C, 0xFB060003, + 0x81C, 0xFA080003, + 0x81C, 0xF90A0003, + 0x81C, 0xF80C0003, + 0x81C, 0xF70E0003, + 0x81C, 0xF6100003, + 0x81C, 0xF5120003, + 0x81C, 0xF4140003, + 0x81C, 0xF3160003, + 0x81C, 0xF2180003, + 0x81C, 0xF11A0003, + 0x81C, 0xF01C0003, + 0x81C, 0xEF1E0003, + 0x81C, 0xEE200003, + 0x81C, 0xED220003, + 0x81C, 0xEC240003, + 0x81C, 0xEB260003, + 0x81C, 0xEA280003, + 0x81C, 0xE92A0003, + 0x81C, 0xE82C0003, + 0x81C, 0xE72E0003, + 0x81C, 0xE6300003, + 0x81C, 0xE5320003, + 0x81C, 0xE4340003, + 0x81C, 0xE3360003, + 0x81C, 0xC6380003, + 0x81C, 0xC53A0003, + 0x81C, 0xC43C0003, + 0x81C, 0xC33E0003, + 0x81C, 0xC2400003, + 0x81C, 0xA9420003, + 0x81C, 0xA8440003, + 0x81C, 0xA7460003, + 0x81C, 0xA6480003, + 0x81C, 0xA54A0003, + 0x81C, 0xA44C0003, + 0x81C, 0xA34E0003, + 0x81C, 0x66500003, + 0x81C, 0x65520003, + 0x81C, 0x64540003, + 0x81C, 0x63560003, + 0x81C, 0x49580003, + 0x81C, 0x485A0003, + 0x81C, 0x475C0003, + 0x81C, 0x465E0003, + 0x81C, 0x45600003, + 0x81C, 0x44620003, + 0x81C, 0x43640003, + 0x81C, 0x42660003, + 0x81C, 0x41680003, + 0x81C, 0x416A0003, + 0x81C, 0x416C0003, + 0x81C, 0x416E0003, + 0x81C, 0x41700003, + 0x81C, 0x41720003, + 0x81C, 0x41740003, + 0x81C, 0x41760003, + 0x81C, 0x41780003, + 0x81C, 0x417A0003, + 0x81C, 0x417C0003, + 0x81C, 0x417E0003, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xDF000003, + 0x81C, 0xDF020003, + 0x81C, 0xDF040003, + 0x81C, 0xDE060003, + 0x81C, 0xDD080003, + 0x81C, 0xDC0A0003, + 0x81C, 0xDB0C0003, + 0x81C, 0xDA0E0003, + 0x81C, 0xD9100003, + 0x81C, 0xD8120003, + 0x81C, 0xD7140003, + 0x81C, 0xD6160003, + 0x81C, 0xD5180003, + 0x81C, 0xD41A0003, + 0x81C, 0xD31C0003, + 0x81C, 0xD21E0003, + 0x81C, 0xD1200003, + 0x81C, 0xD0220003, + 0x81C, 0xCF240003, + 0x81C, 0xCE260003, + 0x81C, 0xCD280003, + 0x81C, 0xCC2A0003, + 0x81C, 0xCB2C0003, + 0x81C, 0xCA2E0003, + 0x81C, 0xC9300003, + 0x81C, 0xC8320003, + 0x81C, 0xC7340003, + 0x81C, 0xC6360003, + 0x81C, 0xC5380003, + 0x81C, 0xA73A0003, + 0x81C, 0xA63C0003, + 0x81C, 0xA53E0003, + 0x81C, 0xA4400003, + 0x81C, 0xA3420003, + 0x81C, 0xA2440003, + 0x81C, 0x87460003, + 0x81C, 0x86480003, + 0x81C, 0x854A0003, + 0x81C, 0x844C0003, + 0x81C, 0x834E0003, + 0x81C, 0x82500003, + 0x81C, 0x81520003, + 0x81C, 0x64540003, + 0x81C, 0x63560003, + 0x81C, 0x62580003, + 0x81C, 0x615A0003, + 0x81C, 0x445C0003, + 0x81C, 0x435E0003, + 0x81C, 0x42600003, + 0x81C, 0x41620003, + 0x81C, 0x27640003, + 0x81C, 0x26660003, + 0x81C, 0x25680003, + 0x81C, 0x246A0003, + 0x81C, 0x236C0003, + 0x81C, 0x226E0003, + 0x81C, 0x21700003, + 0x81C, 0x21720003, + 0x81C, 0x21740003, + 0x81C, 0x21760003, + 0x81C, 0x21780003, + 0x81C, 0x217A0003, + 0x81C, 0x217C0003, + 0x81C, 0x217E0003, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000003, + 0x81C, 0xFE020003, + 0x81C, 0xFD040003, + 0x81C, 0xFC060003, + 0x81C, 0xFB080003, + 0x81C, 0xFA0A0003, + 0x81C, 0xF90C0003, + 0x81C, 0xF80E0003, + 0x81C, 0xF7100003, + 0x81C, 0xF6120003, + 0x81C, 0xF5140003, + 0x81C, 0xF4160003, + 0x81C, 0xF3180003, + 0x81C, 0xF21A0003, + 0x81C, 0xF11C0003, + 0x81C, 0xF01E0003, + 0x81C, 0xEF200003, + 0x81C, 0xEE220003, + 0x81C, 0xED240003, + 0x81C, 0xEC260003, + 0x81C, 0xEB280003, + 0x81C, 0xEA2A0003, + 0x81C, 0xE92C0003, + 0x81C, 0xE82E0003, + 0x81C, 0xE7300003, + 0x81C, 0xE6320003, + 0x81C, 0xE5340003, + 0x81C, 0xE4360003, + 0x81C, 0xE3380003, + 0x81C, 0xC53A0003, + 0x81C, 0xC43C0003, + 0x81C, 0xC33E0003, + 0x81C, 0xC2400003, + 0x81C, 0xC1420003, + 0x81C, 0xA8440003, + 0x81C, 0xA7460003, + 0x81C, 0xA6480003, + 0x81C, 0xA54A0003, + 0x81C, 0xA44C0003, + 0x81C, 0xA34E0003, + 0x81C, 0xA2500003, + 0x81C, 0x65520003, + 0x81C, 0x64540003, + 0x81C, 0x63560003, + 0x81C, 0x62580003, + 0x81C, 0x615A0003, + 0x81C, 0x475C0003, + 0x81C, 0x465E0003, + 0x81C, 0x45600003, + 0x81C, 0x44620003, + 0x81C, 0x43640003, + 0x81C, 0x42660003, + 0x81C, 0x41680003, + 0x81C, 0x416A0003, + 0x81C, 0x416C0003, + 0x81C, 0x416E0003, + 0x81C, 0x41700003, + 0x81C, 0x41720003, + 0x81C, 0x41740003, + 0x81C, 0x41760003, + 0x81C, 0x41780003, + 0x81C, 0x417A0003, + 0x81C, 0x417C0003, + 0x81C, 0x417E0003, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xDF000003, + 0x81C, 0xDF020003, + 0x81C, 0xDF040003, + 0x81C, 0xDE060003, + 0x81C, 0xDD080003, + 0x81C, 0xDC0A0003, + 0x81C, 0xDB0C0003, + 0x81C, 0xDA0E0003, + 0x81C, 0xD9100003, + 0x81C, 0xD8120003, + 0x81C, 0xD7140003, + 0x81C, 0xD6160003, + 0x81C, 0xD5180003, + 0x81C, 0xD41A0003, + 0x81C, 0xD31C0003, + 0x81C, 0xD21E0003, + 0x81C, 0xD1200003, + 0x81C, 0xD0220003, + 0x81C, 0xCF240003, + 0x81C, 0xCE260003, + 0x81C, 0xCD280003, + 0x81C, 0xCC2A0003, + 0x81C, 0xCB2C0003, + 0x81C, 0xCA2E0003, + 0x81C, 0xC9300003, + 0x81C, 0xC8320003, + 0x81C, 0xC7340003, + 0x81C, 0xC6360003, + 0x81C, 0xC5380003, + 0x81C, 0xA73A0003, + 0x81C, 0xA63C0003, + 0x81C, 0xA53E0003, + 0x81C, 0xA4400003, + 0x81C, 0xA3420003, + 0x81C, 0xA2440003, + 0x81C, 0x87460003, + 0x81C, 0x86480003, + 0x81C, 0x854A0003, + 0x81C, 0x844C0003, + 0x81C, 0x834E0003, + 0x81C, 0x82500003, + 0x81C, 0x81520003, + 0x81C, 0x64540003, + 0x81C, 0x63560003, + 0x81C, 0x62580003, + 0x81C, 0x615A0003, + 0x81C, 0x445C0003, + 0x81C, 0x435E0003, + 0x81C, 0x42600003, + 0x81C, 0x41620003, + 0x81C, 0x27640003, + 0x81C, 0x26660003, + 0x81C, 0x25680003, + 0x81C, 0x246A0003, + 0x81C, 0x236C0003, + 0x81C, 0x226E0003, + 0x81C, 0x21700003, + 0x81C, 0x21720003, + 0x81C, 0x21740003, + 0x81C, 0x21760003, + 0x81C, 0x21780003, + 0x81C, 0x217A0003, + 0x81C, 0x217C0003, + 0x81C, 0x217E0003, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x81C, 0xFE020003, + 0x81C, 0xFD040003, + 0x81C, 0xFC060003, + 0x81C, 0xFB080003, + 0x81C, 0xFA0A0003, + 0x81C, 0xF90C0003, + 0x81C, 0xF80E0003, + 0x81C, 0xF7100003, + 0x81C, 0xF6120003, + 0x81C, 0xF5140003, + 0x81C, 0xF4160003, + 0x81C, 0xF3180003, + 0x81C, 0xF21A0003, + 0x81C, 0xF11C0003, + 0x81C, 0xF01E0003, + 0x81C, 0xEF200003, + 0x81C, 0xEE220003, + 0x81C, 0xED240003, + 0x81C, 0x0F260003, + 0x81C, 0x0E280003, + 0x81C, 0x0D2A0003, + 0x81C, 0x0C2C0003, + 0x81C, 0x0B2E0003, + 0x81C, 0x0A300003, + 0x81C, 0x09320003, + 0x81C, 0x08340003, + 0x81C, 0x07360003, + 0x81C, 0x06380003, + 0x81C, 0x053A0003, + 0x81C, 0x043C0003, + 0x81C, 0x033E0003, + 0x81C, 0x23400003, + 0x81C, 0x22420003, + 0x81C, 0xA8440003, + 0x81C, 0xA7460003, + 0x81C, 0xA6480003, + 0x81C, 0xA54A0003, + 0x81C, 0xA44C0003, + 0x81C, 0x684E0003, + 0x81C, 0x67500003, + 0x81C, 0x66520003, + 0x81C, 0x65540003, + 0x81C, 0x64560003, + 0x81C, 0x63580003, + 0x81C, 0x625A0003, + 0x81C, 0x615C0003, + 0x81C, 0x475E0003, + 0x81C, 0x46600003, + 0x81C, 0x45620003, + 0x81C, 0x44640003, + 0x81C, 0x43660003, + 0x81C, 0x42680003, + 0x81C, 0x416A0003, + 0x81C, 0x416C0003, + 0x81C, 0x416E0003, + 0x81C, 0x41700003, + 0x81C, 0x41720003, + 0x81C, 0x41740003, + 0x81C, 0x41760003, + 0x81C, 0x41780003, + 0x81C, 0x417A0003, + 0x81C, 0x417C0003, + 0x81C, 0x417E0003, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000003, + 0x81C, 0xFE020003, + 0x81C, 0xFD040003, + 0x81C, 0xFC060003, + 0x81C, 0xFB080003, + 0x81C, 0xFA0A0003, + 0x81C, 0xF90C0003, + 0x81C, 0xF80E0003, + 0x81C, 0xF7100003, + 0x81C, 0xF6120003, + 0x81C, 0xF5140003, + 0x81C, 0xF4160003, + 0x81C, 0xF3180003, + 0x81C, 0xF21A0003, + 0x81C, 0xF11C0003, + 0x81C, 0xF01E0003, + 0x81C, 0xEF200003, + 0x81C, 0xEE220003, + 0x81C, 0xED240003, + 0x81C, 0xEC260003, + 0x81C, 0xEB280003, + 0x81C, 0xEA2A0003, + 0x81C, 0xE92C0003, + 0x81C, 0xE72E0003, + 0x81C, 0xE6300003, + 0x81C, 0xE5320003, + 0x81C, 0x08340003, + 0x81C, 0x07360003, + 0x81C, 0x06380003, + 0x81C, 0x053A0003, + 0x81C, 0x043C0003, + 0x81C, 0x033E0003, + 0x81C, 0x02400003, + 0x81C, 0xA9420003, + 0x81C, 0xA8440003, + 0x81C, 0xA7460003, + 0x81C, 0xA6480003, + 0x81C, 0xA54A0003, + 0x81C, 0x684C0003, + 0x81C, 0x674E0003, + 0x81C, 0x66500003, + 0x81C, 0x65520003, + 0x81C, 0x64540003, + 0x81C, 0x63560003, + 0x81C, 0x62580003, + 0x81C, 0x615A0003, + 0x81C, 0x475C0003, + 0x81C, 0x465E0003, + 0x81C, 0x45600003, + 0x81C, 0x44620003, + 0x81C, 0x43640003, + 0x81C, 0x42660003, + 0x81C, 0x41680003, + 0x81C, 0x416A0003, + 0x81C, 0x416C0003, + 0x81C, 0x416E0003, + 0x81C, 0x41700003, + 0x81C, 0x41720003, + 0x81C, 0x41740003, + 0x81C, 0x41760003, + 0x81C, 0x41780003, + 0x81C, 0x417A0003, + 0x81C, 0x417C0003, + 0x81C, 0x417E0003, + 0xA0000000, 0x00000000, + 0x81C, 0xFE000003, + 0x81C, 0xFD020003, + 0x81C, 0xFC040003, + 0x81C, 0xFB060003, + 0x81C, 0xFA080003, + 0x81C, 0xF90A0003, + 0x81C, 0xF80C0003, + 0x81C, 0xF70E0003, + 0x81C, 0xF6100003, + 0x81C, 0xF5120003, + 0x81C, 0xF4140003, + 0x81C, 0xF3160003, + 0x81C, 0xF2180003, + 0x81C, 0xF11A0003, + 0x81C, 0xF01C0003, + 0x81C, 0xEF1E0003, + 0x81C, 0xEE200003, + 0x81C, 0xED220003, + 0x81C, 0xCF240003, + 0x81C, 0xCE260003, + 0x81C, 0xCD280003, + 0x81C, 0xCC2A0003, + 0x81C, 0xCB2C0003, + 0x81C, 0xCA2E0003, + 0x81C, 0xC9300003, + 0x81C, 0xC8320003, + 0x81C, 0xC7340003, + 0x81C, 0xC6360003, + 0x81C, 0xC5380003, + 0x81C, 0xC43A0003, + 0x81C, 0xA63C0003, + 0x81C, 0xA53E0003, + 0x81C, 0xA4400003, + 0x81C, 0xA3420003, + 0x81C, 0xA2440003, + 0x81C, 0xA1460003, + 0x81C, 0x86480003, + 0x81C, 0x854A0003, + 0x81C, 0x844C0003, + 0x81C, 0x834E0003, + 0x81C, 0x66500003, + 0x81C, 0x65520003, + 0x81C, 0x64540003, + 0x81C, 0x63560003, + 0x81C, 0x62580003, + 0x81C, 0x615A0003, + 0x81C, 0x435C0003, + 0x81C, 0x425E0003, + 0x81C, 0x41600003, + 0x81C, 0x27620003, + 0x81C, 0x26640003, + 0x81C, 0x25660003, + 0x81C, 0x24680003, + 0x81C, 0x236A0003, + 0x81C, 0x226C0003, + 0x81C, 0x216E0003, + 0x81C, 0x21700003, + 0x81C, 0x21720003, + 0x81C, 0x21740003, + 0x81C, 0x21760003, + 0x81C, 0x21780003, + 0x81C, 0x217A0003, + 0x81C, 0x217C0003, + 0x81C, 0x217E0003, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF9000103, + 0x81C, 0xF8020103, + 0x81C, 0xF7040103, + 0x81C, 0xF6060103, + 0x81C, 0xF5080103, + 0x81C, 0xF40A0103, + 0x81C, 0xF30C0103, + 0x81C, 0xF20E0103, + 0x81C, 0xF1100103, + 0x81C, 0xF0120103, + 0x81C, 0xEF140103, + 0x81C, 0xEE160103, + 0x81C, 0xED180103, + 0x81C, 0xEC1A0103, + 0x81C, 0xEB1C0103, + 0x81C, 0xEA1E0103, + 0x81C, 0xE9200103, + 0x81C, 0xE8220103, + 0x81C, 0xE7240103, + 0x81C, 0xE6260103, + 0x81C, 0xE5280103, + 0x81C, 0xE42A0103, + 0x81C, 0xE32C0103, + 0x81C, 0xC32E0103, + 0x81C, 0xC2300103, + 0x81C, 0xC1320103, + 0x81C, 0xA5340103, + 0x81C, 0xA4360103, + 0x81C, 0xA3380103, + 0x81C, 0xA23A0103, + 0x81C, 0xA13C0103, + 0x81C, 0x843E0103, + 0x81C, 0x83400103, + 0x81C, 0x82420103, + 0x81C, 0x81440103, + 0x81C, 0x64460103, + 0x81C, 0x63480103, + 0x81C, 0x624A0103, + 0x81C, 0x614C0103, + 0x81C, 0x444E0103, + 0x81C, 0x43500103, + 0x81C, 0x42520103, + 0x81C, 0x41540103, + 0x81C, 0x25560103, + 0x81C, 0x24580103, + 0x81C, 0x235A0103, + 0x81C, 0x065C0103, + 0x81C, 0x055E0103, + 0x81C, 0x04600103, + 0x81C, 0x03620103, + 0x81C, 0x02640103, + 0x81C, 0x01660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF8000103, + 0x81C, 0xF7020103, + 0x81C, 0xF6040103, + 0x81C, 0xF5060103, + 0x81C, 0xF4080103, + 0x81C, 0xF30A0103, + 0x81C, 0xF20C0103, + 0x81C, 0xF10E0103, + 0x81C, 0xF0100103, + 0x81C, 0xEF120103, + 0x81C, 0xEE140103, + 0x81C, 0xED160103, + 0x81C, 0xEC180103, + 0x81C, 0xEB1A0103, + 0x81C, 0xEA1C0103, + 0x81C, 0xE91E0103, + 0x81C, 0xE8200103, + 0x81C, 0xE7220103, + 0x81C, 0xE6240103, + 0x81C, 0xE5260103, + 0x81C, 0xE4280103, + 0x81C, 0xE32A0103, + 0x81C, 0xE22C0103, + 0x81C, 0xE12E0103, + 0x81C, 0xA5300103, + 0x81C, 0xA4320103, + 0x81C, 0xA3340103, + 0x81C, 0xA2360103, + 0x81C, 0xA1380103, + 0x81C, 0x843A0103, + 0x81C, 0x833C0103, + 0x81C, 0x823E0103, + 0x81C, 0x81400103, + 0x81C, 0x64420103, + 0x81C, 0x63440103, + 0x81C, 0x62460103, + 0x81C, 0x61480103, + 0x81C, 0x454A0103, + 0x81C, 0x444C0103, + 0x81C, 0x434E0103, + 0x81C, 0x42500103, + 0x81C, 0x25520103, + 0x81C, 0x24540103, + 0x81C, 0x23560103, + 0x81C, 0x06580103, + 0x81C, 0x055A0103, + 0x81C, 0x045C0103, + 0x81C, 0x035E0103, + 0x81C, 0x02600103, + 0x81C, 0x01620103, + 0x81C, 0x01640103, + 0x81C, 0x01660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFC000103, + 0x81C, 0xFB020103, + 0x81C, 0xFA040103, + 0x81C, 0xF9060103, + 0x81C, 0xF8080103, + 0x81C, 0xF70A0103, + 0x81C, 0xF60C0103, + 0x81C, 0xF50E0103, + 0x81C, 0xF4100103, + 0x81C, 0xF3120103, + 0x81C, 0xF2140103, + 0x81C, 0xF1160103, + 0x81C, 0xF0180103, + 0x81C, 0xEF1A0103, + 0x81C, 0xEE1C0103, + 0x81C, 0xED1E0103, + 0x81C, 0xEC200103, + 0x81C, 0xEB220103, + 0x81C, 0xEA240103, + 0x81C, 0xE9260103, + 0x81C, 0xE8280103, + 0x81C, 0xE72A0103, + 0x81C, 0xE62C0103, + 0x81C, 0xE52E0103, + 0x81C, 0xE4300103, + 0x81C, 0xE3320103, + 0x81C, 0xE2340103, + 0x81C, 0xE1360103, + 0x81C, 0x87380103, + 0x81C, 0x863A0103, + 0x81C, 0x853C0103, + 0x81C, 0x843E0103, + 0x81C, 0x83400103, + 0x81C, 0x82420103, + 0x81C, 0x81440103, + 0x81C, 0x64460103, + 0x81C, 0x63480103, + 0x81C, 0x624A0103, + 0x81C, 0x464C0103, + 0x81C, 0x454E0103, + 0x81C, 0x44500103, + 0x81C, 0x43520103, + 0x81C, 0x26540103, + 0x81C, 0x25560103, + 0x81C, 0x24580103, + 0x81C, 0x075A0103, + 0x81C, 0x065C0103, + 0x81C, 0x055E0103, + 0x81C, 0x04600103, + 0x81C, 0x03620103, + 0x81C, 0x02640103, + 0x81C, 0x01660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF9000103, + 0x81C, 0xF8020103, + 0x81C, 0xF7040103, + 0x81C, 0xF6060103, + 0x81C, 0xF5080103, + 0x81C, 0xF40A0103, + 0x81C, 0xF30C0103, + 0x81C, 0xF20E0103, + 0x81C, 0xF1100103, + 0x81C, 0xF0120103, + 0x81C, 0xEF140103, + 0x81C, 0xEE160103, + 0x81C, 0xED180103, + 0x81C, 0xEC1A0103, + 0x81C, 0xEB1C0103, + 0x81C, 0xEA1E0103, + 0x81C, 0xE9200103, + 0x81C, 0xE8220103, + 0x81C, 0xE7240103, + 0x81C, 0xE6260103, + 0x81C, 0xE5280103, + 0x81C, 0xE42A0103, + 0x81C, 0xE32C0103, + 0x81C, 0xE22E0103, + 0x81C, 0xA6300103, + 0x81C, 0xA5320103, + 0x81C, 0xA4340103, + 0x81C, 0xA3360103, + 0x81C, 0xA2380103, + 0x81C, 0xA13A0103, + 0x81C, 0x843C0103, + 0x81C, 0x833E0103, + 0x81C, 0x82400103, + 0x81C, 0x81420103, + 0x81C, 0x64440103, + 0x81C, 0x63460103, + 0x81C, 0x62480103, + 0x81C, 0x614A0103, + 0x81C, 0x444C0103, + 0x81C, 0x434E0103, + 0x81C, 0x42500103, + 0x81C, 0x41520103, + 0x81C, 0x25540103, + 0x81C, 0x24560103, + 0x81C, 0x23580103, + 0x81C, 0x225A0103, + 0x81C, 0x055C0103, + 0x81C, 0x045E0103, + 0x81C, 0x03600103, + 0x81C, 0x02620103, + 0x81C, 0x01640103, + 0x81C, 0x01660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFD000103, + 0x81C, 0xFC020103, + 0x81C, 0xFB040103, + 0x81C, 0xFA060103, + 0x81C, 0xF9080103, + 0x81C, 0xF80A0103, + 0x81C, 0xF70C0103, + 0x81C, 0xF60E0103, + 0x81C, 0xF5100103, + 0x81C, 0xF4120103, + 0x81C, 0xF3140103, + 0x81C, 0xF2160103, + 0x81C, 0xF1180103, + 0x81C, 0xF01A0103, + 0x81C, 0xEF1C0103, + 0x81C, 0xEE1E0103, + 0x81C, 0xED200103, + 0x81C, 0xEC220103, + 0x81C, 0xEB240103, + 0x81C, 0xEA260103, + 0x81C, 0xE9280103, + 0x81C, 0xE82A0103, + 0x81C, 0xE72C0103, + 0x81C, 0xE62E0103, + 0x81C, 0xE5300103, + 0x81C, 0xE4320103, + 0x81C, 0xE3340103, + 0x81C, 0xE2360103, + 0x81C, 0xE1380103, + 0x81C, 0xA33A0103, + 0x81C, 0xA23C0103, + 0x81C, 0xA13E0103, + 0x81C, 0x84400103, + 0x81C, 0x83420103, + 0x81C, 0x82440103, + 0x81C, 0x81460103, + 0x81C, 0x64480103, + 0x81C, 0x634A0103, + 0x81C, 0x624C0103, + 0x81C, 0x614E0103, + 0x81C, 0x45500103, + 0x81C, 0x44520103, + 0x81C, 0x43540103, + 0x81C, 0x42560103, + 0x81C, 0x25580103, + 0x81C, 0x245A0103, + 0x81C, 0x235C0103, + 0x81C, 0x065E0103, + 0x81C, 0x05600103, + 0x81C, 0x04620103, + 0x81C, 0x03640103, + 0x81C, 0x02660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFA000103, + 0x81C, 0xF9020103, + 0x81C, 0xF8040103, + 0x81C, 0xF7060103, + 0x81C, 0xF6080103, + 0x81C, 0xF50A0103, + 0x81C, 0xF40C0103, + 0x81C, 0xF30E0103, + 0x81C, 0xF2100103, + 0x81C, 0xF1120103, + 0x81C, 0xF0140103, + 0x81C, 0xEF160103, + 0x81C, 0xEE180103, + 0x81C, 0xED1A0103, + 0x81C, 0xEC1C0103, + 0x81C, 0xEB1E0103, + 0x81C, 0xEA200103, + 0x81C, 0xE9220103, + 0x81C, 0xE8240103, + 0x81C, 0xE7260103, + 0x81C, 0xE6280103, + 0x81C, 0xE52A0103, + 0x81C, 0xE42C0103, + 0x81C, 0xE32E0103, + 0x81C, 0xE2300103, + 0x81C, 0xE1320103, + 0x81C, 0xA5340103, + 0x81C, 0xA4360103, + 0x81C, 0xA3380103, + 0x81C, 0xA23A0103, + 0x81C, 0xA13C0103, + 0x81C, 0x843E0103, + 0x81C, 0x83400103, + 0x81C, 0x82420103, + 0x81C, 0x81440103, + 0x81C, 0x64460103, + 0x81C, 0x63480103, + 0x81C, 0x624A0103, + 0x81C, 0x614C0103, + 0x81C, 0x454E0103, + 0x81C, 0x44500103, + 0x81C, 0x43520103, + 0x81C, 0x42540103, + 0x81C, 0x41560103, + 0x81C, 0x24580103, + 0x81C, 0x235A0103, + 0x81C, 0x225C0103, + 0x81C, 0x055E0103, + 0x81C, 0x04600103, + 0x81C, 0x03620103, + 0x81C, 0x02640103, + 0x81C, 0x01660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000103, + 0x81C, 0xFF020103, + 0x81C, 0xFE040103, + 0x81C, 0xFD060103, + 0x81C, 0xFC080103, + 0x81C, 0xFB0A0103, + 0x81C, 0xFA0C0103, + 0x81C, 0xF90E0103, + 0x81C, 0xF8100103, + 0x81C, 0xF7120103, + 0x81C, 0xF6140103, + 0x81C, 0xF5160103, + 0x81C, 0xF4180103, + 0x81C, 0xF31A0103, + 0x81C, 0xF21C0103, + 0x81C, 0xF11E0103, + 0x81C, 0xF0200103, + 0x81C, 0xEF220103, + 0x81C, 0xEE240103, + 0x81C, 0xED260103, + 0x81C, 0xEC280103, + 0x81C, 0xEB2A0103, + 0x81C, 0xEA2C0103, + 0x81C, 0xE92E0103, + 0x81C, 0xE8300103, + 0x81C, 0xE7320103, + 0x81C, 0xE6340103, + 0x81C, 0xE5360103, + 0x81C, 0xE4380103, + 0x81C, 0xE33A0103, + 0x81C, 0xA53C0103, + 0x81C, 0xA43E0103, + 0x81C, 0xA3400103, + 0x81C, 0xA2420103, + 0x81C, 0xA1440103, + 0x81C, 0x85460103, + 0x81C, 0x84480103, + 0x81C, 0x834A0103, + 0x81C, 0x824C0103, + 0x81C, 0x814E0103, + 0x81C, 0x64500103, + 0x81C, 0x63520103, + 0x81C, 0x62540103, + 0x81C, 0x44560103, + 0x81C, 0x43580103, + 0x81C, 0x425A0103, + 0x81C, 0x265C0103, + 0x81C, 0x255E0103, + 0x81C, 0x24600103, + 0x81C, 0x07620103, + 0x81C, 0x06640103, + 0x81C, 0x05660103, + 0x81C, 0x04680103, + 0x81C, 0x036A0103, + 0x81C, 0x026C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF8000103, + 0x81C, 0xF7020103, + 0x81C, 0xF6040103, + 0x81C, 0xF5060103, + 0x81C, 0xF4080103, + 0x81C, 0xF30A0103, + 0x81C, 0xF20C0103, + 0x81C, 0xF10E0103, + 0x81C, 0xF0100103, + 0x81C, 0xEF120103, + 0x81C, 0xEE140103, + 0x81C, 0xED160103, + 0x81C, 0xEC180103, + 0x81C, 0xEB1A0103, + 0x81C, 0xEA1C0103, + 0x81C, 0xE91E0103, + 0x81C, 0xE8200103, + 0x81C, 0xE7220103, + 0x81C, 0xE6240103, + 0x81C, 0xE5260103, + 0x81C, 0xE4280103, + 0x81C, 0xE32A0103, + 0x81C, 0xE22C0103, + 0x81C, 0xE12E0103, + 0x81C, 0xA4300103, + 0x81C, 0xA3320103, + 0x81C, 0xA2340103, + 0x81C, 0xA1360103, + 0x81C, 0x85380103, + 0x81C, 0x843A0103, + 0x81C, 0x833C0103, + 0x81C, 0x823E0103, + 0x81C, 0x65400103, + 0x81C, 0x64420103, + 0x81C, 0x63440103, + 0x81C, 0x62460103, + 0x81C, 0x45480103, + 0x81C, 0x444A0103, + 0x81C, 0x434C0103, + 0x81C, 0x264E0103, + 0x81C, 0x25500103, + 0x81C, 0x24520103, + 0x81C, 0x08540103, + 0x81C, 0x07560103, + 0x81C, 0x06580103, + 0x81C, 0x055A0103, + 0x81C, 0x045C0103, + 0x81C, 0x035E0103, + 0x81C, 0x02600103, + 0x81C, 0x01620103, + 0x81C, 0x01640103, + 0x81C, 0x01660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000103, + 0x81C, 0xFF020103, + 0x81C, 0xFE040103, + 0x81C, 0xFD060103, + 0x81C, 0xFC080103, + 0x81C, 0xFB0A0103, + 0x81C, 0xFA0C0103, + 0x81C, 0xF90E0103, + 0x81C, 0xF8100103, + 0x81C, 0xF7120103, + 0x81C, 0xF6140103, + 0x81C, 0xF5160103, + 0x81C, 0xF4180103, + 0x81C, 0xF31A0103, + 0x81C, 0xF21C0103, + 0x81C, 0xF11E0103, + 0x81C, 0xF0200103, + 0x81C, 0xEF220103, + 0x81C, 0xEE240103, + 0x81C, 0xED260103, + 0x81C, 0xEC280103, + 0x81C, 0xEB2A0103, + 0x81C, 0xEA2C0103, + 0x81C, 0xE92E0103, + 0x81C, 0xE8300103, + 0x81C, 0xE7320103, + 0x81C, 0xE6340103, + 0x81C, 0xE5360103, + 0x81C, 0xE4380103, + 0x81C, 0xE33A0103, + 0x81C, 0xA53C0103, + 0x81C, 0xA43E0103, + 0x81C, 0xA3400103, + 0x81C, 0xA2420103, + 0x81C, 0xA1440103, + 0x81C, 0x85460103, + 0x81C, 0x84480103, + 0x81C, 0x834A0103, + 0x81C, 0x824C0103, + 0x81C, 0x814E0103, + 0x81C, 0x64500103, + 0x81C, 0x63520103, + 0x81C, 0x62540103, + 0x81C, 0x44560103, + 0x81C, 0x43580103, + 0x81C, 0x425A0103, + 0x81C, 0x265C0103, + 0x81C, 0x255E0103, + 0x81C, 0x24600103, + 0x81C, 0x07620103, + 0x81C, 0x06640103, + 0x81C, 0x05660103, + 0x81C, 0x04680103, + 0x81C, 0x036A0103, + 0x81C, 0x026C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF9000103, + 0x81C, 0xF8020103, + 0x81C, 0xF7040103, + 0x81C, 0xF6060103, + 0x81C, 0xF5080103, + 0x81C, 0xF40A0103, + 0x81C, 0xF30C0103, + 0x81C, 0xF20E0103, + 0x81C, 0xF1100103, + 0x81C, 0xF0120103, + 0x81C, 0xEF140103, + 0x81C, 0xEE160103, + 0x81C, 0xED180103, + 0x81C, 0xEC1A0103, + 0x81C, 0xEB1C0103, + 0x81C, 0xEA1E0103, + 0x81C, 0xE9200103, + 0x81C, 0xE8220103, + 0x81C, 0xE7240103, + 0x81C, 0xE6260103, + 0x81C, 0xE5280103, + 0x81C, 0xE42A0103, + 0x81C, 0xE32C0103, + 0x81C, 0xE22E0103, + 0x81C, 0xA6300103, + 0x81C, 0xA5320103, + 0x81C, 0xA4340103, + 0x81C, 0xA3360103, + 0x81C, 0xA2380103, + 0x81C, 0xA13A0103, + 0x81C, 0x843C0103, + 0x81C, 0x833E0103, + 0x81C, 0x82400103, + 0x81C, 0x81420103, + 0x81C, 0x64440103, + 0x81C, 0x63460103, + 0x81C, 0x62480103, + 0x81C, 0x614A0103, + 0x81C, 0x444C0103, + 0x81C, 0x434E0103, + 0x81C, 0x42500103, + 0x81C, 0x41520103, + 0x81C, 0x25540103, + 0x81C, 0x24560103, + 0x81C, 0x23580103, + 0x81C, 0x225A0103, + 0x81C, 0x055C0103, + 0x81C, 0x045E0103, + 0x81C, 0x03600103, + 0x81C, 0x02620103, + 0x81C, 0x01640103, + 0x81C, 0x01660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFA000103, + 0x81C, 0xF9020103, + 0x81C, 0xF8040103, + 0x81C, 0xF7060103, + 0x81C, 0xF6080103, + 0x81C, 0xF50A0103, + 0x81C, 0xF40C0103, + 0x81C, 0xF30E0103, + 0x81C, 0xF2100103, + 0x81C, 0xF1120103, + 0x81C, 0xF0140103, + 0x81C, 0xEF160103, + 0x81C, 0xEE180103, + 0x81C, 0xED1A0103, + 0x81C, 0xCC1C0103, + 0x81C, 0xCB1E0103, + 0x81C, 0xCA200103, + 0x81C, 0xE9220103, + 0x81C, 0xE8240103, + 0x81C, 0xE7260103, + 0x81C, 0xE6280103, + 0x81C, 0xE42A0103, + 0x81C, 0xE32C0103, + 0x81C, 0xE22E0103, + 0x81C, 0xA7300103, + 0x81C, 0xA6320103, + 0x81C, 0xA5340103, + 0x81C, 0xA4360103, + 0x81C, 0xA3380103, + 0x81C, 0xA23A0103, + 0x81C, 0xA13C0103, + 0x81C, 0x843E0103, + 0x81C, 0x83400103, + 0x81C, 0x82420103, + 0x81C, 0x65440103, + 0x81C, 0x64460103, + 0x81C, 0x63480103, + 0x81C, 0x624A0103, + 0x81C, 0x614C0103, + 0x81C, 0x444E0103, + 0x81C, 0x43500103, + 0x81C, 0x42520103, + 0x81C, 0x41540103, + 0x81C, 0x24560103, + 0x81C, 0x23580103, + 0x81C, 0x055A0103, + 0x81C, 0x045C0103, + 0x81C, 0x035E0103, + 0x81C, 0x02600103, + 0x81C, 0x01620103, + 0x81C, 0x01640103, + 0x81C, 0x01660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0xA0000000, 0x00000000, + 0x81C, 0xFF000103, + 0x81C, 0xFE020103, + 0x81C, 0xFD040103, + 0x81C, 0xFC060103, + 0x81C, 0xFB080103, + 0x81C, 0xFA0A0103, + 0x81C, 0xF90C0103, + 0x81C, 0xF80E0103, + 0x81C, 0xF7100103, + 0x81C, 0xF6120103, + 0x81C, 0xF5140103, + 0x81C, 0xF4160103, + 0x81C, 0xF3180103, + 0x81C, 0xF21A0103, + 0x81C, 0xF11C0103, + 0x81C, 0xF01E0103, + 0x81C, 0xEF200103, + 0x81C, 0xEE220103, + 0x81C, 0xED240103, + 0x81C, 0xEC260103, + 0x81C, 0xEB280103, + 0x81C, 0xEA2A0103, + 0x81C, 0xE92C0103, + 0x81C, 0xE82E0103, + 0x81C, 0xE7300103, + 0x81C, 0xE6320103, + 0x81C, 0xE5340103, + 0x81C, 0xE4360103, + 0x81C, 0xE3380103, + 0x81C, 0xE23A0103, + 0x81C, 0xE13C0103, + 0x81C, 0xA43E0103, + 0x81C, 0xA3400103, + 0x81C, 0xA2420103, + 0x81C, 0xA1440103, + 0x81C, 0x86460103, + 0x81C, 0x85480103, + 0x81C, 0x844A0103, + 0x81C, 0x834C0103, + 0x81C, 0x824E0103, + 0x81C, 0x81500103, + 0x81C, 0x64520103, + 0x81C, 0x63540103, + 0x81C, 0x62560103, + 0x81C, 0x61580103, + 0x81C, 0x435A0103, + 0x81C, 0x425C0103, + 0x81C, 0x415E0103, + 0x81C, 0x25600103, + 0x81C, 0x24620103, + 0x81C, 0x06640103, + 0x81C, 0x05660103, + 0x81C, 0x04680103, + 0x81C, 0x036A0103, + 0x81C, 0x026C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFA000203, + 0x81C, 0xF9020203, + 0x81C, 0xF8040203, + 0x81C, 0xF7060203, + 0x81C, 0xF6080203, + 0x81C, 0xF50A0203, + 0x81C, 0xF40C0203, + 0x81C, 0xF30E0203, + 0x81C, 0xF2100203, + 0x81C, 0xF1120203, + 0x81C, 0xF0140203, + 0x81C, 0xEF160203, + 0x81C, 0xEE180203, + 0x81C, 0xED1A0203, + 0x81C, 0xEC1C0203, + 0x81C, 0xEB1E0203, + 0x81C, 0xEA200203, + 0x81C, 0xE9220203, + 0x81C, 0xE8240203, + 0x81C, 0xE7260203, + 0x81C, 0xE6280203, + 0x81C, 0xE52A0203, + 0x81C, 0xE42C0203, + 0x81C, 0xE32E0203, + 0x81C, 0xE2300203, + 0x81C, 0xE1320203, + 0x81C, 0xA5340203, + 0x81C, 0xA4360203, + 0x81C, 0xA3380203, + 0x81C, 0xA23A0203, + 0x81C, 0xA13C0203, + 0x81C, 0x843E0203, + 0x81C, 0x83400203, + 0x81C, 0x82420203, + 0x81C, 0x81440203, + 0x81C, 0x63460203, + 0x81C, 0x62480203, + 0x81C, 0x614A0203, + 0x81C, 0x464C0203, + 0x81C, 0x454E0203, + 0x81C, 0x44500203, + 0x81C, 0x43520203, + 0x81C, 0x42540203, + 0x81C, 0x41560203, + 0x81C, 0x24580203, + 0x81C, 0x235A0203, + 0x81C, 0x065C0203, + 0x81C, 0x055E0203, + 0x81C, 0x04600203, + 0x81C, 0x03620203, + 0x81C, 0x02640203, + 0x81C, 0x01660203, + 0x81C, 0x01680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF8000203, + 0x81C, 0xF7020203, + 0x81C, 0xF6040203, + 0x81C, 0xF5060203, + 0x81C, 0xF4080203, + 0x81C, 0xF30A0203, + 0x81C, 0xF20C0203, + 0x81C, 0xF10E0203, + 0x81C, 0xF0100203, + 0x81C, 0xEF120203, + 0x81C, 0xEE140203, + 0x81C, 0xED160203, + 0x81C, 0xEC180203, + 0x81C, 0xEB1A0203, + 0x81C, 0xEA1C0203, + 0x81C, 0xE91E0203, + 0x81C, 0xE8200203, + 0x81C, 0xE7220203, + 0x81C, 0xE6240203, + 0x81C, 0xE5260203, + 0x81C, 0xE4280203, + 0x81C, 0xE32A0203, + 0x81C, 0xE22C0203, + 0x81C, 0xE12E0203, + 0x81C, 0xA6300203, + 0x81C, 0xA5320203, + 0x81C, 0xA4340203, + 0x81C, 0xA3360203, + 0x81C, 0xA2380203, + 0x81C, 0x853A0203, + 0x81C, 0x843C0203, + 0x81C, 0x833E0203, + 0x81C, 0x82400203, + 0x81C, 0x81420203, + 0x81C, 0x64440203, + 0x81C, 0x63460203, + 0x81C, 0x62480203, + 0x81C, 0x614A0203, + 0x81C, 0x444C0203, + 0x81C, 0x434E0203, + 0x81C, 0x42500203, + 0x81C, 0x25520203, + 0x81C, 0x24540203, + 0x81C, 0x23560203, + 0x81C, 0x06580203, + 0x81C, 0x055A0203, + 0x81C, 0x045C0203, + 0x81C, 0x035E0203, + 0x81C, 0x02600203, + 0x81C, 0x01620203, + 0x81C, 0x01640203, + 0x81C, 0x01660203, + 0x81C, 0x01680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFC000203, + 0x81C, 0xFB020203, + 0x81C, 0xFA040203, + 0x81C, 0xF9060203, + 0x81C, 0xF8080203, + 0x81C, 0xF70A0203, + 0x81C, 0xF60C0203, + 0x81C, 0xF50E0203, + 0x81C, 0xF4100203, + 0x81C, 0xF3120203, + 0x81C, 0xF2140203, + 0x81C, 0xF1160203, + 0x81C, 0xF0180203, + 0x81C, 0xEF1A0203, + 0x81C, 0xEE1C0203, + 0x81C, 0xED1E0203, + 0x81C, 0xEC200203, + 0x81C, 0xEB220203, + 0x81C, 0xEA240203, + 0x81C, 0xE9260203, + 0x81C, 0xE8280203, + 0x81C, 0xE72A0203, + 0x81C, 0xE62C0203, + 0x81C, 0xE52E0203, + 0x81C, 0xE4300203, + 0x81C, 0xE3320203, + 0x81C, 0xE2340203, + 0x81C, 0xE1360203, + 0x81C, 0x87380203, + 0x81C, 0x863A0203, + 0x81C, 0x853C0203, + 0x81C, 0x843E0203, + 0x81C, 0x83400203, + 0x81C, 0x82420203, + 0x81C, 0x81440203, + 0x81C, 0x64460203, + 0x81C, 0x63480203, + 0x81C, 0x624A0203, + 0x81C, 0x474C0203, + 0x81C, 0x464E0203, + 0x81C, 0x45500203, + 0x81C, 0x44520203, + 0x81C, 0x43540203, + 0x81C, 0x42560203, + 0x81C, 0x24580203, + 0x81C, 0x235A0203, + 0x81C, 0x075C0203, + 0x81C, 0x065E0203, + 0x81C, 0x05600203, + 0x81C, 0x04620203, + 0x81C, 0x03640203, + 0x81C, 0x02660203, + 0x81C, 0x01680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF8000203, + 0x81C, 0xF7020203, + 0x81C, 0xF6040203, + 0x81C, 0xF5060203, + 0x81C, 0xF4080203, + 0x81C, 0xF30A0203, + 0x81C, 0xF20C0203, + 0x81C, 0xF10E0203, + 0x81C, 0xF0100203, + 0x81C, 0xEF120203, + 0x81C, 0xEE140203, + 0x81C, 0xED160203, + 0x81C, 0xEC180203, + 0x81C, 0xEB1A0203, + 0x81C, 0xEA1C0203, + 0x81C, 0xE91E0203, + 0x81C, 0xE8200203, + 0x81C, 0xE7220203, + 0x81C, 0xE6240203, + 0x81C, 0xE5260203, + 0x81C, 0xE4280203, + 0x81C, 0xE32A0203, + 0x81C, 0xE22C0203, + 0x81C, 0xE12E0203, + 0x81C, 0xA6300203, + 0x81C, 0xA5320203, + 0x81C, 0xA4340203, + 0x81C, 0xA3360203, + 0x81C, 0xA2380203, + 0x81C, 0xA13A0203, + 0x81C, 0x843C0203, + 0x81C, 0x833E0203, + 0x81C, 0x82400203, + 0x81C, 0x81420203, + 0x81C, 0x64440203, + 0x81C, 0x63460203, + 0x81C, 0x62480203, + 0x81C, 0x614A0203, + 0x81C, 0x444C0203, + 0x81C, 0x434E0203, + 0x81C, 0x42500203, + 0x81C, 0x41520203, + 0x81C, 0x25540203, + 0x81C, 0x24560203, + 0x81C, 0x23580203, + 0x81C, 0x065A0203, + 0x81C, 0x055C0203, + 0x81C, 0x045E0203, + 0x81C, 0x03600203, + 0x81C, 0x02620203, + 0x81C, 0x01640203, + 0x81C, 0x01660203, + 0x81C, 0x01680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000203, + 0x81C, 0xFA020203, + 0x81C, 0xF9040203, + 0x81C, 0xF8060203, + 0x81C, 0xF7080203, + 0x81C, 0xF60A0203, + 0x81C, 0xF50C0203, + 0x81C, 0xF40E0203, + 0x81C, 0xF3100203, + 0x81C, 0xF2120203, + 0x81C, 0xF1140203, + 0x81C, 0xF0160203, + 0x81C, 0xEF180203, + 0x81C, 0xEE1A0203, + 0x81C, 0xED1C0203, + 0x81C, 0xEC1E0203, + 0x81C, 0xEB200203, + 0x81C, 0xEA220203, + 0x81C, 0xE9240203, + 0x81C, 0xE8260203, + 0x81C, 0xE7280203, + 0x81C, 0xE62A0203, + 0x81C, 0xE52C0203, + 0x81C, 0xE42E0203, + 0x81C, 0xE3300203, + 0x81C, 0xE2320203, + 0x81C, 0xE1340203, + 0x81C, 0xA5360203, + 0x81C, 0xA4380203, + 0x81C, 0xA33A0203, + 0x81C, 0xA23C0203, + 0x81C, 0x843E0203, + 0x81C, 0x83400203, + 0x81C, 0x82420203, + 0x81C, 0x81440203, + 0x81C, 0x64460203, + 0x81C, 0x63480203, + 0x81C, 0x624A0203, + 0x81C, 0x614C0203, + 0x81C, 0x474E0203, + 0x81C, 0x46500203, + 0x81C, 0x45520203, + 0x81C, 0x44540203, + 0x81C, 0x43560203, + 0x81C, 0x25580203, + 0x81C, 0x245A0203, + 0x81C, 0x235C0203, + 0x81C, 0x075E0203, + 0x81C, 0x06600203, + 0x81C, 0x05620203, + 0x81C, 0x04640203, + 0x81C, 0x03660203, + 0x81C, 0x02680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFC000203, + 0x81C, 0xFB020203, + 0x81C, 0xFA040203, + 0x81C, 0xF9060203, + 0x81C, 0xF8080203, + 0x81C, 0xF70A0203, + 0x81C, 0xF60C0203, + 0x81C, 0xF50E0203, + 0x81C, 0xF4100203, + 0x81C, 0xF3120203, + 0x81C, 0xF2140203, + 0x81C, 0xF1160203, + 0x81C, 0xF0180203, + 0x81C, 0xEF1A0203, + 0x81C, 0xEE1C0203, + 0x81C, 0xED1E0203, + 0x81C, 0xEC200203, + 0x81C, 0xEB220203, + 0x81C, 0xEA240203, + 0x81C, 0xE9260203, + 0x81C, 0xE8280203, + 0x81C, 0xE72A0203, + 0x81C, 0xE62C0203, + 0x81C, 0xE52E0203, + 0x81C, 0xE4300203, + 0x81C, 0xE3320203, + 0x81C, 0xE2340203, + 0x81C, 0xE1360203, + 0x81C, 0xA5380203, + 0x81C, 0xA43A0203, + 0x81C, 0xA33C0203, + 0x81C, 0x853E0203, + 0x81C, 0x84400203, + 0x81C, 0x83420203, + 0x81C, 0x82440203, + 0x81C, 0x81460203, + 0x81C, 0x64480203, + 0x81C, 0x634A0203, + 0x81C, 0x624C0203, + 0x81C, 0x614E0203, + 0x81C, 0x46500203, + 0x81C, 0x45520203, + 0x81C, 0x44540203, + 0x81C, 0x43560203, + 0x81C, 0x25580203, + 0x81C, 0x245A0203, + 0x81C, 0x235C0203, + 0x81C, 0x075E0203, + 0x81C, 0x06600203, + 0x81C, 0x05620203, + 0x81C, 0x04640203, + 0x81C, 0x03660203, + 0x81C, 0x02680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000203, + 0x81C, 0xFF020203, + 0x81C, 0xFE040203, + 0x81C, 0xFD060203, + 0x81C, 0xFC080203, + 0x81C, 0xFB0A0203, + 0x81C, 0xFA0C0203, + 0x81C, 0xF90E0203, + 0x81C, 0xF8100203, + 0x81C, 0xF7120203, + 0x81C, 0xF6140203, + 0x81C, 0xF5160203, + 0x81C, 0xF4180203, + 0x81C, 0xF31A0203, + 0x81C, 0xF21C0203, + 0x81C, 0xF11E0203, + 0x81C, 0xF0200203, + 0x81C, 0xEF220203, + 0x81C, 0xEE240203, + 0x81C, 0xED260203, + 0x81C, 0xEC280203, + 0x81C, 0xEB2A0203, + 0x81C, 0xEA2C0203, + 0x81C, 0xE92E0203, + 0x81C, 0xE8300203, + 0x81C, 0xE7320203, + 0x81C, 0xE6340203, + 0x81C, 0xE5360203, + 0x81C, 0xE4380203, + 0x81C, 0xE33A0203, + 0x81C, 0xE23C0203, + 0x81C, 0xE13E0203, + 0x81C, 0xA4400203, + 0x81C, 0xA3420203, + 0x81C, 0xA2440203, + 0x81C, 0xA1460203, + 0x81C, 0x84480203, + 0x81C, 0x834A0203, + 0x81C, 0x824C0203, + 0x81C, 0x814E0203, + 0x81C, 0x64500203, + 0x81C, 0x63520203, + 0x81C, 0x62540203, + 0x81C, 0x61560203, + 0x81C, 0x45580203, + 0x81C, 0x445A0203, + 0x81C, 0x435C0203, + 0x81C, 0x425E0203, + 0x81C, 0x24600203, + 0x81C, 0x23620203, + 0x81C, 0x07640203, + 0x81C, 0x06660203, + 0x81C, 0x05680203, + 0x81C, 0x046A0203, + 0x81C, 0x036C0203, + 0x81C, 0x026E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF7000203, + 0x81C, 0xF6020203, + 0x81C, 0xF5040203, + 0x81C, 0xF4060203, + 0x81C, 0xF3080203, + 0x81C, 0xF20A0203, + 0x81C, 0xF10C0203, + 0x81C, 0xF00E0203, + 0x81C, 0xEF100203, + 0x81C, 0xEE120203, + 0x81C, 0xED140203, + 0x81C, 0xEC160203, + 0x81C, 0xEB180203, + 0x81C, 0xEA1A0203, + 0x81C, 0xE91C0203, + 0x81C, 0xE81E0203, + 0x81C, 0xE7200203, + 0x81C, 0xE6220203, + 0x81C, 0xE5240203, + 0x81C, 0xE4260203, + 0x81C, 0xE3280203, + 0x81C, 0xE22A0203, + 0x81C, 0xA62C0203, + 0x81C, 0xA52E0203, + 0x81C, 0xA4300203, + 0x81C, 0xA3320203, + 0x81C, 0xA2340203, + 0x81C, 0xA1360203, + 0x81C, 0x86380203, + 0x81C, 0x853A0203, + 0x81C, 0x843C0203, + 0x81C, 0x833E0203, + 0x81C, 0x65400203, + 0x81C, 0x64420203, + 0x81C, 0x63440203, + 0x81C, 0x46460203, + 0x81C, 0x45480203, + 0x81C, 0x444A0203, + 0x81C, 0x434C0203, + 0x81C, 0x264E0203, + 0x81C, 0x25500203, + 0x81C, 0x24520203, + 0x81C, 0x08540203, + 0x81C, 0x07560203, + 0x81C, 0x06580203, + 0x81C, 0x055A0203, + 0x81C, 0x045C0203, + 0x81C, 0x035E0203, + 0x81C, 0x02600203, + 0x81C, 0x01620203, + 0x81C, 0x01640203, + 0x81C, 0x01660203, + 0x81C, 0x01680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000203, + 0x81C, 0xFF020203, + 0x81C, 0xFE040203, + 0x81C, 0xFD060203, + 0x81C, 0xFC080203, + 0x81C, 0xFB0A0203, + 0x81C, 0xFA0C0203, + 0x81C, 0xF90E0203, + 0x81C, 0xF8100203, + 0x81C, 0xF7120203, + 0x81C, 0xF6140203, + 0x81C, 0xF5160203, + 0x81C, 0xF4180203, + 0x81C, 0xF31A0203, + 0x81C, 0xF21C0203, + 0x81C, 0xF11E0203, + 0x81C, 0xF0200203, + 0x81C, 0xEF220203, + 0x81C, 0xEE240203, + 0x81C, 0xED260203, + 0x81C, 0xEC280203, + 0x81C, 0xEB2A0203, + 0x81C, 0xEA2C0203, + 0x81C, 0xE92E0203, + 0x81C, 0xE8300203, + 0x81C, 0xE7320203, + 0x81C, 0xE6340203, + 0x81C, 0xE5360203, + 0x81C, 0xE4380203, + 0x81C, 0xE33A0203, + 0x81C, 0xE23C0203, + 0x81C, 0xE13E0203, + 0x81C, 0xA4400203, + 0x81C, 0xA3420203, + 0x81C, 0xA2440203, + 0x81C, 0xA1460203, + 0x81C, 0x84480203, + 0x81C, 0x834A0203, + 0x81C, 0x824C0203, + 0x81C, 0x814E0203, + 0x81C, 0x64500203, + 0x81C, 0x63520203, + 0x81C, 0x62540203, + 0x81C, 0x61560203, + 0x81C, 0x45580203, + 0x81C, 0x445A0203, + 0x81C, 0x435C0203, + 0x81C, 0x425E0203, + 0x81C, 0x24600203, + 0x81C, 0x23620203, + 0x81C, 0x07640203, + 0x81C, 0x06660203, + 0x81C, 0x05680203, + 0x81C, 0x046A0203, + 0x81C, 0x036C0203, + 0x81C, 0x026E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF8000203, + 0x81C, 0xF7020203, + 0x81C, 0xF6040203, + 0x81C, 0xF5060203, + 0x81C, 0xF4080203, + 0x81C, 0xF30A0203, + 0x81C, 0xF20C0203, + 0x81C, 0xF10E0203, + 0x81C, 0xF0100203, + 0x81C, 0xEF120203, + 0x81C, 0xEE140203, + 0x81C, 0xED160203, + 0x81C, 0xEC180203, + 0x81C, 0xEB1A0203, + 0x81C, 0xEA1C0203, + 0x81C, 0xE91E0203, + 0x81C, 0xE8200203, + 0x81C, 0xE7220203, + 0x81C, 0xE6240203, + 0x81C, 0xE5260203, + 0x81C, 0xE4280203, + 0x81C, 0xE32A0203, + 0x81C, 0xE22C0203, + 0x81C, 0xE12E0203, + 0x81C, 0xA6300203, + 0x81C, 0xA5320203, + 0x81C, 0xA4340203, + 0x81C, 0xA3360203, + 0x81C, 0xA2380203, + 0x81C, 0xA13A0203, + 0x81C, 0x843C0203, + 0x81C, 0x833E0203, + 0x81C, 0x82400203, + 0x81C, 0x81420203, + 0x81C, 0x64440203, + 0x81C, 0x63460203, + 0x81C, 0x62480203, + 0x81C, 0x614A0203, + 0x81C, 0x444C0203, + 0x81C, 0x434E0203, + 0x81C, 0x42500203, + 0x81C, 0x41520203, + 0x81C, 0x25540203, + 0x81C, 0x24560203, + 0x81C, 0x23580203, + 0x81C, 0x065A0203, + 0x81C, 0x055C0203, + 0x81C, 0x045E0203, + 0x81C, 0x03600203, + 0x81C, 0x02620203, + 0x81C, 0x01640203, + 0x81C, 0x01660203, + 0x81C, 0x01680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF9000203, + 0x81C, 0xF8020203, + 0x81C, 0xF7040203, + 0x81C, 0xF6060203, + 0x81C, 0xF5080203, + 0x81C, 0xF40A0203, + 0x81C, 0xF30C0203, + 0x81C, 0xF20E0203, + 0x81C, 0xF1100203, + 0x81C, 0xF0120203, + 0x81C, 0xEF140203, + 0x81C, 0xCE160203, + 0x81C, 0xCD180203, + 0x81C, 0xCC1A0203, + 0x81C, 0xCB1C0203, + 0x81C, 0xCA1E0203, + 0x81C, 0xC9200203, + 0x81C, 0xC8220203, + 0x81C, 0xC7240203, + 0x81C, 0xC6260203, + 0x81C, 0xC5280203, + 0x81C, 0xC42A0203, + 0x81C, 0xC32C0203, + 0x81C, 0xC22E0203, + 0x81C, 0xC1300203, + 0x81C, 0xA5320203, + 0x81C, 0xA4340203, + 0x81C, 0xA3360203, + 0x81C, 0xA2380203, + 0x81C, 0xA13A0203, + 0x81C, 0x853C0203, + 0x81C, 0x843E0203, + 0x81C, 0x83400203, + 0x81C, 0x82420203, + 0x81C, 0x81440203, + 0x81C, 0x64460203, + 0x81C, 0x63480203, + 0x81C, 0x624A0203, + 0x81C, 0x614C0203, + 0x81C, 0x444E0203, + 0x81C, 0x43500203, + 0x81C, 0x42520203, + 0x81C, 0x41540203, + 0x81C, 0x24560203, + 0x81C, 0x23580203, + 0x81C, 0x075A0203, + 0x81C, 0x065C0203, + 0x81C, 0x055E0203, + 0x81C, 0x04600203, + 0x81C, 0x03620203, + 0x81C, 0x02640203, + 0x81C, 0x01660203, + 0x81C, 0x01680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0xA0000000, 0x00000000, + 0x81C, 0xFF000203, + 0x81C, 0xFF020203, + 0x81C, 0xFE040203, + 0x81C, 0xFD060203, + 0x81C, 0xFC080203, + 0x81C, 0xFB0A0203, + 0x81C, 0xFA0C0203, + 0x81C, 0xF90E0203, + 0x81C, 0xF8100203, + 0x81C, 0xF7120203, + 0x81C, 0xF6140203, + 0x81C, 0xF5160203, + 0x81C, 0xF4180203, + 0x81C, 0xF31A0203, + 0x81C, 0xF21C0203, + 0x81C, 0xF11E0203, + 0x81C, 0xF0200203, + 0x81C, 0xEF220203, + 0x81C, 0xEE240203, + 0x81C, 0xED260203, + 0x81C, 0xEC280203, + 0x81C, 0xEB2A0203, + 0x81C, 0xEA2C0203, + 0x81C, 0xE92E0203, + 0x81C, 0xE8300203, + 0x81C, 0xE7320203, + 0x81C, 0xE6340203, + 0x81C, 0xE5360203, + 0x81C, 0xE4380203, + 0x81C, 0xE33A0203, + 0x81C, 0xE23C0203, + 0x81C, 0xE13E0203, + 0x81C, 0xA4400203, + 0x81C, 0xA3420203, + 0x81C, 0xA2440203, + 0x81C, 0xA1460203, + 0x81C, 0x85480203, + 0x81C, 0x844A0203, + 0x81C, 0x834C0203, + 0x81C, 0x824E0203, + 0x81C, 0x81500203, + 0x81C, 0x64520203, + 0x81C, 0x63540203, + 0x81C, 0x62560203, + 0x81C, 0x61580203, + 0x81C, 0x445A0203, + 0x81C, 0x435C0203, + 0x81C, 0x425E0203, + 0x81C, 0x25600203, + 0x81C, 0x24620203, + 0x81C, 0x06640203, + 0x81C, 0x05660203, + 0x81C, 0x04680203, + 0x81C, 0x036A0203, + 0x81C, 0x026C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF8000303, + 0x81C, 0xF7020303, + 0x81C, 0xF6040303, + 0x81C, 0xF5060303, + 0x81C, 0xF4080303, + 0x81C, 0xF30A0303, + 0x81C, 0xF20C0303, + 0x81C, 0xF10E0303, + 0x81C, 0xF0100303, + 0x81C, 0xEF120303, + 0x81C, 0xEE140303, + 0x81C, 0xED160303, + 0x81C, 0xEC180303, + 0x81C, 0xEB1A0303, + 0x81C, 0xEA1C0303, + 0x81C, 0xE91E0303, + 0x81C, 0xE8200303, + 0x81C, 0xE7220303, + 0x81C, 0xE6240303, + 0x81C, 0xE5260303, + 0x81C, 0xE4280303, + 0x81C, 0xE32A0303, + 0x81C, 0xE22C0303, + 0x81C, 0xE12E0303, + 0x81C, 0xA6300303, + 0x81C, 0xA5320303, + 0x81C, 0xA4340303, + 0x81C, 0xA3360303, + 0x81C, 0xA2380303, + 0x81C, 0xA13A0303, + 0x81C, 0x843C0303, + 0x81C, 0x833E0303, + 0x81C, 0x82400303, + 0x81C, 0x81420303, + 0x81C, 0x64440303, + 0x81C, 0x63460303, + 0x81C, 0x62480303, + 0x81C, 0x614A0303, + 0x81C, 0x454C0303, + 0x81C, 0x444E0303, + 0x81C, 0x43500303, + 0x81C, 0x42520303, + 0x81C, 0x41540303, + 0x81C, 0x24560303, + 0x81C, 0x23580303, + 0x81C, 0x065A0303, + 0x81C, 0x055C0303, + 0x81C, 0x045E0303, + 0x81C, 0x03600303, + 0x81C, 0x02620303, + 0x81C, 0x01640303, + 0x81C, 0x01660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF7000303, + 0x81C, 0xF6020303, + 0x81C, 0xF5040303, + 0x81C, 0xF4060303, + 0x81C, 0xF3080303, + 0x81C, 0xF20A0303, + 0x81C, 0xF10C0303, + 0x81C, 0xF00E0303, + 0x81C, 0xEF100303, + 0x81C, 0xEE120303, + 0x81C, 0xED140303, + 0x81C, 0xEC160303, + 0x81C, 0xEB180303, + 0x81C, 0xEA1A0303, + 0x81C, 0xE91C0303, + 0x81C, 0xE81E0303, + 0x81C, 0xE7200303, + 0x81C, 0xE6220303, + 0x81C, 0xE5240303, + 0x81C, 0xE4260303, + 0x81C, 0xE3280303, + 0x81C, 0xC32A0303, + 0x81C, 0xC22C0303, + 0x81C, 0xC12E0303, + 0x81C, 0xA5300303, + 0x81C, 0xA4320303, + 0x81C, 0xA3340303, + 0x81C, 0xA2360303, + 0x81C, 0xA1380303, + 0x81C, 0x853A0303, + 0x81C, 0x843C0303, + 0x81C, 0x833E0303, + 0x81C, 0x82400303, + 0x81C, 0x81420303, + 0x81C, 0x64440303, + 0x81C, 0x63460303, + 0x81C, 0x62480303, + 0x81C, 0x614A0303, + 0x81C, 0x454C0303, + 0x81C, 0x444E0303, + 0x81C, 0x43500303, + 0x81C, 0x25520303, + 0x81C, 0x24540303, + 0x81C, 0x23560303, + 0x81C, 0x06580303, + 0x81C, 0x055A0303, + 0x81C, 0x045C0303, + 0x81C, 0x035E0303, + 0x81C, 0x02600303, + 0x81C, 0x01620303, + 0x81C, 0x01640303, + 0x81C, 0x01660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF9000303, + 0x81C, 0xF8020303, + 0x81C, 0xF7040303, + 0x81C, 0xF6060303, + 0x81C, 0xF5080303, + 0x81C, 0xF40A0303, + 0x81C, 0xF30C0303, + 0x81C, 0xF20E0303, + 0x81C, 0xF1100303, + 0x81C, 0xF0120303, + 0x81C, 0xEF140303, + 0x81C, 0xEE160303, + 0x81C, 0xED180303, + 0x81C, 0xEC1A0303, + 0x81C, 0xEB1C0303, + 0x81C, 0xEA1E0303, + 0x81C, 0xE9200303, + 0x81C, 0xE8220303, + 0x81C, 0xE7240303, + 0x81C, 0xE6260303, + 0x81C, 0xE5280303, + 0x81C, 0xE42A0303, + 0x81C, 0xE32C0303, + 0x81C, 0xE22E0303, + 0x81C, 0xE1300303, + 0x81C, 0xA4320303, + 0x81C, 0xA3340303, + 0x81C, 0xA2360303, + 0x81C, 0xA1380303, + 0x81C, 0x853A0303, + 0x81C, 0x843C0303, + 0x81C, 0x833E0303, + 0x81C, 0x82400303, + 0x81C, 0x81420303, + 0x81C, 0x64440303, + 0x81C, 0x63460303, + 0x81C, 0x62480303, + 0x81C, 0x614A0303, + 0x81C, 0x444C0303, + 0x81C, 0x434E0303, + 0x81C, 0x42500303, + 0x81C, 0x25520303, + 0x81C, 0x24540303, + 0x81C, 0x23560303, + 0x81C, 0x07580303, + 0x81C, 0x065A0303, + 0x81C, 0x055C0303, + 0x81C, 0x045E0303, + 0x81C, 0x03600303, + 0x81C, 0x02620303, + 0x81C, 0x01640303, + 0x81C, 0x01660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF7000303, + 0x81C, 0xF6020303, + 0x81C, 0xF5040303, + 0x81C, 0xF4060303, + 0x81C, 0xF3080303, + 0x81C, 0xF20A0303, + 0x81C, 0xF10C0303, + 0x81C, 0xF00E0303, + 0x81C, 0xEF100303, + 0x81C, 0xEE120303, + 0x81C, 0xED140303, + 0x81C, 0xEC160303, + 0x81C, 0xEB180303, + 0x81C, 0xEA1A0303, + 0x81C, 0xE91C0303, + 0x81C, 0xE81E0303, + 0x81C, 0xE7200303, + 0x81C, 0xE6220303, + 0x81C, 0xE5240303, + 0x81C, 0xE4260303, + 0x81C, 0xE3280303, + 0x81C, 0xE22A0303, + 0x81C, 0xE12C0303, + 0x81C, 0xA72E0303, + 0x81C, 0xA6300303, + 0x81C, 0xA5320303, + 0x81C, 0xA4340303, + 0x81C, 0xA3360303, + 0x81C, 0xA2380303, + 0x81C, 0xA13A0303, + 0x81C, 0x843C0303, + 0x81C, 0x833E0303, + 0x81C, 0x82400303, + 0x81C, 0x81420303, + 0x81C, 0x64440303, + 0x81C, 0x63460303, + 0x81C, 0x62480303, + 0x81C, 0x614A0303, + 0x81C, 0x454C0303, + 0x81C, 0x444E0303, + 0x81C, 0x43500303, + 0x81C, 0x42520303, + 0x81C, 0x41540303, + 0x81C, 0x24560303, + 0x81C, 0x23580303, + 0x81C, 0x065A0303, + 0x81C, 0x055C0303, + 0x81C, 0x045E0303, + 0x81C, 0x03600303, + 0x81C, 0x02620303, + 0x81C, 0x01640303, + 0x81C, 0x01660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000303, + 0x81C, 0xFA020303, + 0x81C, 0xF9040303, + 0x81C, 0xF8060303, + 0x81C, 0xF7080303, + 0x81C, 0xF60A0303, + 0x81C, 0xF50C0303, + 0x81C, 0xF40E0303, + 0x81C, 0xF3100303, + 0x81C, 0xF2120303, + 0x81C, 0xF1140303, + 0x81C, 0xF0160303, + 0x81C, 0xEF180303, + 0x81C, 0xEE1A0303, + 0x81C, 0xED1C0303, + 0x81C, 0xEC1E0303, + 0x81C, 0xEB200303, + 0x81C, 0xEA220303, + 0x81C, 0xE9240303, + 0x81C, 0xE8260303, + 0x81C, 0xE7280303, + 0x81C, 0xE62A0303, + 0x81C, 0xE52C0303, + 0x81C, 0xE42E0303, + 0x81C, 0xE3300303, + 0x81C, 0xE2320303, + 0x81C, 0xE1340303, + 0x81C, 0xC2360303, + 0x81C, 0xC1380303, + 0x81C, 0xA33A0303, + 0x81C, 0xA23C0303, + 0x81C, 0x853E0303, + 0x81C, 0x84400303, + 0x81C, 0x83420303, + 0x81C, 0x66440303, + 0x81C, 0x65460303, + 0x81C, 0x64480303, + 0x81C, 0x634A0303, + 0x81C, 0x624C0303, + 0x81C, 0x614E0303, + 0x81C, 0x45500303, + 0x81C, 0x44520303, + 0x81C, 0x43540303, + 0x81C, 0x42560303, + 0x81C, 0x25580303, + 0x81C, 0x245A0303, + 0x81C, 0x235C0303, + 0x81C, 0x065E0303, + 0x81C, 0x05600303, + 0x81C, 0x04620303, + 0x81C, 0x03640303, + 0x81C, 0x02660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF9000303, + 0x81C, 0xF8020303, + 0x81C, 0xF7040303, + 0x81C, 0xF6060303, + 0x81C, 0xF5080303, + 0x81C, 0xF40A0303, + 0x81C, 0xF30C0303, + 0x81C, 0xF20E0303, + 0x81C, 0xF1100303, + 0x81C, 0xF0120303, + 0x81C, 0xEF140303, + 0x81C, 0xEE160303, + 0x81C, 0xED180303, + 0x81C, 0xEC1A0303, + 0x81C, 0xEB1C0303, + 0x81C, 0xEA1E0303, + 0x81C, 0xE9200303, + 0x81C, 0xE8220303, + 0x81C, 0xE7240303, + 0x81C, 0xE6260303, + 0x81C, 0xE5280303, + 0x81C, 0xE42A0303, + 0x81C, 0xE32C0303, + 0x81C, 0xE22E0303, + 0x81C, 0xE1300303, + 0x81C, 0xA6320303, + 0x81C, 0xA5340303, + 0x81C, 0xA4360303, + 0x81C, 0xA3380303, + 0x81C, 0xA23A0303, + 0x81C, 0xA13C0303, + 0x81C, 0x853E0303, + 0x81C, 0x84400303, + 0x81C, 0x83420303, + 0x81C, 0x82440303, + 0x81C, 0x81460303, + 0x81C, 0x64480303, + 0x81C, 0x634A0303, + 0x81C, 0x624C0303, + 0x81C, 0x614E0303, + 0x81C, 0x44500303, + 0x81C, 0x43520303, + 0x81C, 0x42540303, + 0x81C, 0x41560303, + 0x81C, 0x25580303, + 0x81C, 0x245A0303, + 0x81C, 0x235C0303, + 0x81C, 0x055E0303, + 0x81C, 0x04600303, + 0x81C, 0x03620303, + 0x81C, 0x02640303, + 0x81C, 0x01660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000303, + 0x81C, 0xFD020303, + 0x81C, 0xFC040303, + 0x81C, 0xFB060303, + 0x81C, 0xFA080303, + 0x81C, 0xF90A0303, + 0x81C, 0xF80C0303, + 0x81C, 0xF70E0303, + 0x81C, 0xF6100303, + 0x81C, 0xF5120303, + 0x81C, 0xF4140303, + 0x81C, 0xF3160303, + 0x81C, 0xF2180303, + 0x81C, 0xF11A0303, + 0x81C, 0xF01C0303, + 0x81C, 0xEF1E0303, + 0x81C, 0xEE200303, + 0x81C, 0xED220303, + 0x81C, 0xEC240303, + 0x81C, 0xEB260303, + 0x81C, 0xEA280303, + 0x81C, 0xE92A0303, + 0x81C, 0xE82C0303, + 0x81C, 0xE72E0303, + 0x81C, 0xE6300303, + 0x81C, 0xE5320303, + 0x81C, 0xE4340303, + 0x81C, 0xE3360303, + 0x81C, 0xC3380303, + 0x81C, 0xC23A0303, + 0x81C, 0xC13C0303, + 0x81C, 0xA43E0303, + 0x81C, 0xA3400303, + 0x81C, 0xA2420303, + 0x81C, 0xA1440303, + 0x81C, 0x85460303, + 0x81C, 0x84480303, + 0x81C, 0x834A0303, + 0x81C, 0x824C0303, + 0x81C, 0x814E0303, + 0x81C, 0x64500303, + 0x81C, 0x63520303, + 0x81C, 0x62540303, + 0x81C, 0x61560303, + 0x81C, 0x44580303, + 0x81C, 0x435A0303, + 0x81C, 0x425C0303, + 0x81C, 0x265E0303, + 0x81C, 0x25600303, + 0x81C, 0x24620303, + 0x81C, 0x06640303, + 0x81C, 0x05660303, + 0x81C, 0x04680303, + 0x81C, 0x036A0303, + 0x81C, 0x026C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF7000303, + 0x81C, 0xF6020303, + 0x81C, 0xF5040303, + 0x81C, 0xF4060303, + 0x81C, 0xF3080303, + 0x81C, 0xF20A0303, + 0x81C, 0xF10C0303, + 0x81C, 0xF00E0303, + 0x81C, 0xEF100303, + 0x81C, 0xEE120303, + 0x81C, 0xED140303, + 0x81C, 0xEC160303, + 0x81C, 0xEB180303, + 0x81C, 0xEA1A0303, + 0x81C, 0xE91C0303, + 0x81C, 0xE81E0303, + 0x81C, 0xE7200303, + 0x81C, 0xE6220303, + 0x81C, 0xE5240303, + 0x81C, 0xE4260303, + 0x81C, 0xE3280303, + 0x81C, 0xE22A0303, + 0x81C, 0xA62C0303, + 0x81C, 0xA52E0303, + 0x81C, 0xA4300303, + 0x81C, 0xA3320303, + 0x81C, 0xA2340303, + 0x81C, 0x87360303, + 0x81C, 0x86380303, + 0x81C, 0x853A0303, + 0x81C, 0x843C0303, + 0x81C, 0x833E0303, + 0x81C, 0x66400303, + 0x81C, 0x65420303, + 0x81C, 0x64440303, + 0x81C, 0x45460303, + 0x81C, 0x44480303, + 0x81C, 0x434A0303, + 0x81C, 0x274C0303, + 0x81C, 0x264E0303, + 0x81C, 0x25500303, + 0x81C, 0x24520303, + 0x81C, 0x23540303, + 0x81C, 0x08560303, + 0x81C, 0x07580303, + 0x81C, 0x065A0303, + 0x81C, 0x055C0303, + 0x81C, 0x045E0303, + 0x81C, 0x03600303, + 0x81C, 0x02620303, + 0x81C, 0x01640303, + 0x81C, 0x01660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFE000303, + 0x81C, 0xFD020303, + 0x81C, 0xFC040303, + 0x81C, 0xFB060303, + 0x81C, 0xFA080303, + 0x81C, 0xF90A0303, + 0x81C, 0xF80C0303, + 0x81C, 0xF70E0303, + 0x81C, 0xF6100303, + 0x81C, 0xF5120303, + 0x81C, 0xF4140303, + 0x81C, 0xF3160303, + 0x81C, 0xF2180303, + 0x81C, 0xF11A0303, + 0x81C, 0xF01C0303, + 0x81C, 0xEF1E0303, + 0x81C, 0xEE200303, + 0x81C, 0xED220303, + 0x81C, 0xEC240303, + 0x81C, 0xEB260303, + 0x81C, 0xEA280303, + 0x81C, 0xE92A0303, + 0x81C, 0xE82C0303, + 0x81C, 0xE72E0303, + 0x81C, 0xE6300303, + 0x81C, 0xE5320303, + 0x81C, 0xE4340303, + 0x81C, 0xE3360303, + 0x81C, 0xC3380303, + 0x81C, 0xC23A0303, + 0x81C, 0xC13C0303, + 0x81C, 0xA43E0303, + 0x81C, 0xA3400303, + 0x81C, 0xA2420303, + 0x81C, 0xA1440303, + 0x81C, 0x85460303, + 0x81C, 0x84480303, + 0x81C, 0x834A0303, + 0x81C, 0x824C0303, + 0x81C, 0x814E0303, + 0x81C, 0x64500303, + 0x81C, 0x63520303, + 0x81C, 0x62540303, + 0x81C, 0x61560303, + 0x81C, 0x44580303, + 0x81C, 0x435A0303, + 0x81C, 0x425C0303, + 0x81C, 0x265E0303, + 0x81C, 0x25600303, + 0x81C, 0x24620303, + 0x81C, 0x06640303, + 0x81C, 0x05660303, + 0x81C, 0x04680303, + 0x81C, 0x036A0303, + 0x81C, 0x026C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF7000303, + 0x81C, 0xF6020303, + 0x81C, 0xF5040303, + 0x81C, 0xF4060303, + 0x81C, 0xF3080303, + 0x81C, 0xF20A0303, + 0x81C, 0xF10C0303, + 0x81C, 0xF00E0303, + 0x81C, 0xEF100303, + 0x81C, 0xEE120303, + 0x81C, 0xED140303, + 0x81C, 0xEC160303, + 0x81C, 0xEB180303, + 0x81C, 0xEA1A0303, + 0x81C, 0xE91C0303, + 0x81C, 0xE81E0303, + 0x81C, 0xE7200303, + 0x81C, 0xE6220303, + 0x81C, 0xE5240303, + 0x81C, 0xE4260303, + 0x81C, 0xE3280303, + 0x81C, 0xE22A0303, + 0x81C, 0xE12C0303, + 0x81C, 0xA72E0303, + 0x81C, 0xA6300303, + 0x81C, 0xA5320303, + 0x81C, 0xA4340303, + 0x81C, 0xA3360303, + 0x81C, 0xA2380303, + 0x81C, 0xA13A0303, + 0x81C, 0x843C0303, + 0x81C, 0x833E0303, + 0x81C, 0x82400303, + 0x81C, 0x81420303, + 0x81C, 0x64440303, + 0x81C, 0x63460303, + 0x81C, 0x62480303, + 0x81C, 0x614A0303, + 0x81C, 0x454C0303, + 0x81C, 0x444E0303, + 0x81C, 0x43500303, + 0x81C, 0x42520303, + 0x81C, 0x41540303, + 0x81C, 0x24560303, + 0x81C, 0x23580303, + 0x81C, 0x065A0303, + 0x81C, 0x055C0303, + 0x81C, 0x045E0303, + 0x81C, 0x03600303, + 0x81C, 0x02620303, + 0x81C, 0x01640303, + 0x81C, 0x01660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF7000303, + 0x81C, 0xF6020303, + 0x81C, 0xF5040303, + 0x81C, 0xF4060303, + 0x81C, 0xF3080303, + 0x81C, 0xF20A0303, + 0x81C, 0xF10C0303, + 0x81C, 0xF00E0303, + 0x81C, 0xEF100303, + 0x81C, 0xEE120303, + 0x81C, 0xED140303, + 0x81C, 0xEC160303, + 0x81C, 0xEB180303, + 0x81C, 0xEA1A0303, + 0x81C, 0xAF1C0303, + 0x81C, 0xAE1E0303, + 0x81C, 0xAD200303, + 0x81C, 0xAC220303, + 0x81C, 0xAB240303, + 0x81C, 0xAA260303, + 0x81C, 0xC5280303, + 0x81C, 0xC42A0303, + 0x81C, 0xC32C0303, + 0x81C, 0xC22E0303, + 0x81C, 0xA5300303, + 0x81C, 0xA4320303, + 0x81C, 0xA3340303, + 0x81C, 0xA2360303, + 0x81C, 0xA1380303, + 0x81C, 0x853A0303, + 0x81C, 0x843C0303, + 0x81C, 0x833E0303, + 0x81C, 0x82400303, + 0x81C, 0x81420303, + 0x81C, 0x64440303, + 0x81C, 0x63460303, + 0x81C, 0x62480303, + 0x81C, 0x614A0303, + 0x81C, 0x444C0303, + 0x81C, 0x434E0303, + 0x81C, 0x42500303, + 0x81C, 0x41520303, + 0x81C, 0x25540303, + 0x81C, 0x24560303, + 0x81C, 0x06580303, + 0x81C, 0x055A0303, + 0x81C, 0x045C0303, + 0x81C, 0x035E0303, + 0x81C, 0x02600303, + 0x81C, 0x01620303, + 0x81C, 0x01640303, + 0x81C, 0x01660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0xA0000000, 0x00000000, + 0x81C, 0xFD000303, + 0x81C, 0xFC020303, + 0x81C, 0xFB040303, + 0x81C, 0xFA060303, + 0x81C, 0xF9080303, + 0x81C, 0xF80A0303, + 0x81C, 0xF70C0303, + 0x81C, 0xF60E0303, + 0x81C, 0xF5100303, + 0x81C, 0xF4120303, + 0x81C, 0xF3140303, + 0x81C, 0xF2160303, + 0x81C, 0xF1180303, + 0x81C, 0xF01A0303, + 0x81C, 0xEF1C0303, + 0x81C, 0xEE1E0303, + 0x81C, 0xED200303, + 0x81C, 0xEC220303, + 0x81C, 0xEB240303, + 0x81C, 0xEA260303, + 0x81C, 0xE9280303, + 0x81C, 0xE82A0303, + 0x81C, 0xE72C0303, + 0x81C, 0xE62E0303, + 0x81C, 0xE5300303, + 0x81C, 0xE4320303, + 0x81C, 0xE3340303, + 0x81C, 0xE2360303, + 0x81C, 0xE1380303, + 0x81C, 0xA53A0303, + 0x81C, 0xA43C0303, + 0x81C, 0xA33E0303, + 0x81C, 0xA2400303, + 0x81C, 0xA1420303, + 0x81C, 0x87440303, + 0x81C, 0x86460303, + 0x81C, 0x85480303, + 0x81C, 0x844A0303, + 0x81C, 0x834C0303, + 0x81C, 0x824E0303, + 0x81C, 0x81500303, + 0x81C, 0x64520303, + 0x81C, 0x63540303, + 0x81C, 0x62560303, + 0x81C, 0x61580303, + 0x81C, 0x435A0303, + 0x81C, 0x425C0303, + 0x81C, 0x415E0303, + 0x81C, 0x07600303, + 0x81C, 0x06620303, + 0x81C, 0x05640303, + 0x81C, 0x04660303, + 0x81C, 0x03680303, + 0x81C, 0x026A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0xB0000000, 0x00000000, + 0xC50, 0x00000022, + 0xC50, 0x00000020, + 0xE50, 0x00000022, + 0xE50, 0x00000020, + 0x1850, 0x00000022, + 0x1850, 0x00000020, + 0x1A50, 0x00000022, + 0x1A50, 0x00000020, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8814a_agc, rtw_phy_cfg_agc); + +static const u32 rtw8814a_bb[] = { + 0x800, 0x9020D010, + 0x804, 0x08011280, + 0x808, 0x0E0282FF, + 0x80C, 0x1000002F, + 0x80000003, 0x00000000, 0x40000000, 0x00000000, + 0x810, 0x33303265, + 0xA0000000, 0x00000000, + 0x810, 0x33303265, + 0xB0000000, 0x00000000, + 0x814, 0x020C3D10, + 0x818, 0x04A10385, + 0x820, 0x00000000, + 0x824, 0x00033E40, + 0x828, 0x00000000, + 0x82C, 0x73985170, + 0x830, 0x79A0EA08, + 0x834, 0x042E708A, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x838, 0x86667640, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x838, 0x86667641, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x838, 0x86667641, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x838, 0x86667641, + 0xA0000000, 0x00000000, + 0x838, 0x86667640, + 0xB0000000, 0x00000000, + 0x83C, 0x9798B9B9, + 0x840, 0x17578F60, + 0x844, 0x4BBDFCDE, + 0x848, 0x5CD07F8B, + 0x84C, 0x6CFBF7B5, + 0x850, 0x28834706, + 0x854, 0x0001520C, + 0x858, 0x4060C000, + 0x85C, 0x74210368, + 0x860, 0x6929C321, + 0x864, 0x79727432, + 0x868, 0x8CA7A314, + 0x86C, 0x438C2878, + 0x870, 0x44444444, + 0x874, 0x21612C2E, + 0x878, 0x00003152, + 0x87C, 0x000FC000, + 0x8A0, 0x00000013, + 0x8A4, 0x7F7F7F7F, + 0x8A8, 0xA202033E, + 0x8AC, 0xF40F550A, + 0x8B0, 0x00000600, + 0x8B4, 0x000FC080, + 0x8B8, 0xEC0057FF, + 0x8BC, 0x8CA520C3, + 0x8C0, 0x3FF00020, + 0x8C4, 0x44C00000, + 0x80000009, 0x00000000, 0x40000000, 0x00000000, + 0x8C8, 0x80025969, + 0xA0000000, 0x00000000, + 0x8C8, 0x80025167, + 0xB0000000, 0x00000000, + 0x8CC, 0x08250492, + 0x8D0, 0x0000B800, + 0x8D4, 0x940008A0, + 0x8D8, 0x290B5612, + 0x8DC, 0x00000000, + 0x8E0, 0x32316407, + 0x8E4, 0x4A092925, + 0x8E8, 0xFFFFC42C, + 0x8EC, 0x99999999, + 0x8F0, 0x00009999, + 0x8F4, 0x00F80FA1, + 0x8F8, 0x400082C0, + 0x8FC, 0x00000000, + 0x900, 0x00400700, + 0x90C, 0x09004000, + 0x910, 0x0000FC00, + 0x914, 0xD6400404, + 0x918, 0x1C1028C0, + 0x91C, 0x64B11A1C, + 0x920, 0xE0767233, + 0x924, 0x055AA500, + 0x928, 0x4AB0E4E4, + 0x92C, 0xFFFE0000, + 0x930, 0xFFFFFFFE, + 0x934, 0x001FFFFF, + 0x938, 0x00008400, + 0x93C, 0x932C0642, + 0x940, 0x093E9360, + 0x944, 0x08000000, + 0x948, 0x04000000, + 0x950, 0x02010080, + 0x954, 0x86510080, + 0x960, 0x00000000, + 0x964, 0x00000000, + 0x968, 0x00000000, + 0x96C, 0x00000000, + 0x970, 0x801FFFFF, + 0x978, 0x00000000, + 0x97C, 0x00000000, + 0x980, 0x00000000, + 0x984, 0x00000000, + 0x988, 0x00000000, + 0x98C, 0x03440000, + 0x990, 0x27100000, + 0x994, 0xFFFF0100, + 0x998, 0xFFFFFF5C, + 0x99C, 0xFFFFFFFF, + 0x9A0, 0x000000FF, + 0x9A4, 0x00080080, + 0x9A8, 0x0C2F0000, + 0x9AC, 0x00560000, + 0x9B0, 0x81081008, + 0x9B4, 0x00000000, + 0x9B8, 0x01081008, + 0x9BC, 0x01081008, + 0x9D0, 0x00000000, + 0x9D4, 0x00000000, + 0x9D8, 0x00000000, + 0x9DC, 0x00000000, + 0x9E4, 0x00000002, + 0x9E8, 0x000022D5, + 0x9FC, 0xEFFFF7FF, + 0xB00, 0xE3100000, + 0xB04, 0x0000B000, + 0xB0C, 0x31EAA006, + 0xB5C, 0x41CFFFFF, + 0xC00, 0x00000007, + 0xC04, 0x00042020, + 0xC08, 0x80410231, + 0xC0C, 0x00000000, + 0xC10, 0x00000100, + 0xC14, 0x01000000, + 0xC1C, 0x40000053, + 0xC50, 0x00000020, + 0xC54, 0x00000000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0xC58, 0x3C0A0C14, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0xC58, 0x3C0A0C14, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0xC58, 0x3C0A0C14, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xC58, 0x3C0A0C14, + 0xA0000000, 0x00000000, + 0xC58, 0x3C020C14, + 0xB0000000, 0x00000000, + 0xC5C, 0x0D000058, + 0xC60, 0x1B800000, + 0xC60, 0x0B800001, + 0xC60, 0x05800002, + 0xC60, 0x07800003, + 0xC60, 0x1A800004, + 0xC60, 0x0B800005, + 0xC60, 0x05800006, + 0xC60, 0x0E800007, + 0xC60, 0x1A800008, + 0xC60, 0x0B800009, + 0xC60, 0x1580000A, + 0xC60, 0x0880000B, + 0xC60, 0x1A80000C, + 0xC60, 0x0B80000D, + 0xC60, 0x0580000E, + 0xC60, 0x0E80000F, + 0xC60, 0x1A800010, + 0xC60, 0x0B800011, + 0xC60, 0x15800012, + 0xC60, 0x08800013, + 0xC60, 0x1A800014, + 0xC60, 0x0B800015, + 0xC60, 0x05800016, + 0xC60, 0x07800017, + 0xC60, 0x1A800018, + 0xC60, 0x0B800019, + 0xC60, 0x1580001A, + 0xC60, 0x0880001B, + 0xC60, 0x1B80001C, + 0xC60, 0x0B80001D, + 0xC60, 0x0580001E, + 0xC60, 0x0780001F, + 0xC60, 0x1B800020, + 0xC60, 0x0B800021, + 0xC60, 0x05800022, + 0xC60, 0x07800023, + 0xC60, 0x1B800024, + 0xC60, 0x0B800025, + 0xC60, 0x05800026, + 0xC60, 0x07800027, + 0xC60, 0x1B800028, + 0xC60, 0x0B800029, + 0xC60, 0x0580002A, + 0xC60, 0x0780002B, + 0xC60, 0x1B800030, + 0xC60, 0x0B800031, + 0xC60, 0x05800032, + 0xC60, 0x00800033, + 0xC60, 0x1B800034, + 0xC60, 0x0B800035, + 0xC60, 0x05800036, + 0xC60, 0x00800037, + 0xC60, 0x1B800038, + 0xC60, 0x0B800039, + 0xC60, 0x0580003A, + 0xC60, 0x0E80803B, + 0xC94, 0x01000401, + 0xC98, 0x00188000, + 0xCA0, 0x00002929, + 0xCA4, 0x08040201, + 0xCA8, 0x80402010, + 0xCAC, 0x77777000, + 0xCB0, 0x54775477, + 0xCB4, 0x54775477, + 0xCB8, 0x00500000, + 0xCBC, 0x77700000, + 0xCC0, 0x00000010, + 0xCC8, 0x00000010, + 0xE00, 0x00000007, + 0xE04, 0x00042020, + 0xE08, 0x80410231, + 0xE0C, 0x00000000, + 0xE10, 0x00000100, + 0xE14, 0x01000000, + 0xE1C, 0x40000053, + 0xE50, 0x00000020, + 0xE54, 0x00000000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0xE58, 0x3C0A0C14, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0xE58, 0x3C0A0C14, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0xE58, 0x3C0A0C14, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xE58, 0x3C0A0C14, + 0xA0000000, 0x00000000, + 0xE58, 0x3C020C14, + 0xB0000000, 0x00000000, + 0xE5C, 0x0D000058, + 0xE60, 0x1B800000, + 0xE60, 0x0B800001, + 0xE60, 0x05800002, + 0xE60, 0x07800003, + 0xE60, 0x1A800004, + 0xE60, 0x0B800005, + 0xE60, 0x05800006, + 0xE60, 0x0E800007, + 0xE60, 0x1A800008, + 0xE60, 0x0B800009, + 0xE60, 0x1580000A, + 0xE60, 0x0880000B, + 0xE60, 0x1A80000C, + 0xE60, 0x0B80000D, + 0xE60, 0x0580000E, + 0xE60, 0x0E80000F, + 0xE60, 0x1A800010, + 0xE60, 0x0B800011, + 0xE60, 0x15800012, + 0xE60, 0x08800013, + 0xE60, 0x1A800014, + 0xE60, 0x0B800015, + 0xE60, 0x05800016, + 0xE60, 0x07800017, + 0xE60, 0x1A800018, + 0xE60, 0x0B800019, + 0xE60, 0x1580001A, + 0xE60, 0x0880001B, + 0xE60, 0x1B80001C, + 0xE60, 0x0B80001D, + 0xE60, 0x0580001E, + 0xE60, 0x0780001F, + 0xE60, 0x1B800020, + 0xE60, 0x0B800021, + 0xE60, 0x05800022, + 0xE60, 0x07800023, + 0xE60, 0x1B800024, + 0xE60, 0x0B800025, + 0xE60, 0x05800026, + 0xE60, 0x07800027, + 0xE60, 0x1B800028, + 0xE60, 0x0B800029, + 0xE60, 0x0580002A, + 0xE60, 0x0780002B, + 0xE60, 0x1B800030, + 0xE60, 0x0B800031, + 0xE60, 0x05800032, + 0xE60, 0x00800033, + 0xE60, 0x1B800034, + 0xE60, 0x0B800035, + 0xE60, 0x05800036, + 0xE60, 0x00800037, + 0xE60, 0x1B800038, + 0xE60, 0x0B800039, + 0xE60, 0x0580003A, + 0xE60, 0x0E80803B, + 0xE94, 0x01000401, + 0xE98, 0x00188000, + 0xEA0, 0x00002929, + 0xEA4, 0x08040201, + 0xEA8, 0x80402010, + 0xEAC, 0x77777000, + 0xEB0, 0x54775477, + 0xEB4, 0x54775477, + 0xEB8, 0x00500000, + 0xEBC, 0x77700000, + 0x1800, 0x00000007, + 0x1804, 0x00042020, + 0x1808, 0x80410231, + 0x180C, 0x00000000, + 0x1810, 0x00000100, + 0x1814, 0x01000000, + 0x181C, 0x40000053, + 0x1850, 0x00000020, + 0x1854, 0x00000000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x1858, 0x3C0A0C14, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1858, 0x3C0A0C14, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1858, 0x3C0A0C14, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1858, 0x3C0A0C14, + 0xA0000000, 0x00000000, + 0x1858, 0x3C020C14, + 0xB0000000, 0x00000000, + 0x185C, 0x0D000058, + 0x1860, 0x1B800000, + 0x1860, 0x0B800001, + 0x1860, 0x05800002, + 0x1860, 0x07800003, + 0x1860, 0x1A800004, + 0x1860, 0x0B800005, + 0x1860, 0x05800006, + 0x1860, 0x0E800007, + 0x1860, 0x1A800008, + 0x1860, 0x0B800009, + 0x1860, 0x1580000A, + 0x1860, 0x0880000B, + 0x1860, 0x1A80000C, + 0x1860, 0x0B80000D, + 0x1860, 0x0580000E, + 0x1860, 0x0E80000F, + 0x1860, 0x1A800010, + 0x1860, 0x0B800011, + 0x1860, 0x15800012, + 0x1860, 0x08800013, + 0x1860, 0x1A800014, + 0x1860, 0x0B800015, + 0x1860, 0x05800016, + 0x1860, 0x07800017, + 0x1860, 0x1A800018, + 0x1860, 0x0B800019, + 0x1860, 0x1580001A, + 0x1860, 0x0880001B, + 0x1860, 0x1B80001C, + 0x1860, 0x0B80001D, + 0x1860, 0x0580001E, + 0x1860, 0x0780001F, + 0x1860, 0x1B800020, + 0x1860, 0x0B800021, + 0x1860, 0x05800022, + 0x1860, 0x07800023, + 0x1860, 0x1B800024, + 0x1860, 0x0B800025, + 0x1860, 0x05800026, + 0x1860, 0x07800027, + 0x1860, 0x1B800028, + 0x1860, 0x0B800029, + 0x1860, 0x0580002A, + 0x1860, 0x0780002B, + 0x1860, 0x1B800030, + 0x1860, 0x0B800031, + 0x1860, 0x05800032, + 0x1860, 0x00800033, + 0x1860, 0x1B800034, + 0x1860, 0x0B800035, + 0x1860, 0x05800036, + 0x1860, 0x00800037, + 0x1860, 0x1B800038, + 0x1860, 0x0B800039, + 0x1860, 0x0580003A, + 0x1860, 0x0E80803B, + 0x1894, 0x01000401, + 0x1898, 0x00188000, + 0x18A0, 0x00002929, + 0x18A4, 0x08040201, + 0x18A8, 0x80402010, + 0x18AC, 0x77777000, + 0x18B0, 0x54775477, + 0x18B4, 0x54775477, + 0x18B8, 0x00500000, + 0x18BC, 0x77700000, + 0x1A00, 0x00000007, + 0x1A04, 0x00042020, + 0x1A08, 0x80410231, + 0x1A0C, 0x00000000, + 0x1A10, 0x00000100, + 0x1A14, 0x01000000, + 0x1A1C, 0x40000053, + 0x1A50, 0x00000020, + 0x1A54, 0x00000000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x1A58, 0x3C0A0C14, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1A58, 0x3C0A0C14, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1A58, 0x3C0A0C14, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1A58, 0x3C0A0C14, + 0xA0000000, 0x00000000, + 0x1A58, 0x3C020C14, + 0xB0000000, 0x00000000, + 0x1A5C, 0x0D000058, + 0x1A60, 0x1B800000, + 0x1A60, 0x0B800001, + 0x1A60, 0x05800002, + 0x1A60, 0x07800003, + 0x1A60, 0x1A800004, + 0x1A60, 0x0B800005, + 0x1A60, 0x05800006, + 0x1A60, 0x0E800007, + 0x1A60, 0x1A800008, + 0x1A60, 0x0B800009, + 0x1A60, 0x1580000A, + 0x1A60, 0x0880000B, + 0x1A60, 0x1A80000C, + 0x1A60, 0x0B80000D, + 0x1A60, 0x0580000E, + 0x1A60, 0x0E80000F, + 0x1A60, 0x1A800010, + 0x1A60, 0x0B800011, + 0x1A60, 0x15800012, + 0x1A60, 0x08800013, + 0x1A60, 0x1A800014, + 0x1A60, 0x0B800015, + 0x1A60, 0x05800016, + 0x1A60, 0x07800017, + 0x1A60, 0x1A800018, + 0x1A60, 0x0B800019, + 0x1A60, 0x1580001A, + 0x1A60, 0x0880001B, + 0x1A60, 0x1B80001C, + 0x1A60, 0x0B80001D, + 0x1A60, 0x0580001E, + 0x1A60, 0x0780001F, + 0x1A60, 0x1B800020, + 0x1A60, 0x0B800021, + 0x1A60, 0x05800022, + 0x1A60, 0x07800023, + 0x1A60, 0x1B800024, + 0x1A60, 0x0B800025, + 0x1A60, 0x05800026, + 0x1A60, 0x07800027, + 0x1A60, 0x1B800028, + 0x1A60, 0x0B800029, + 0x1A60, 0x0580002A, + 0x1A60, 0x0780002B, + 0x1A60, 0x1B800030, + 0x1A60, 0x0B800031, + 0x1A60, 0x05800032, + 0x1A60, 0x00800033, + 0x1A60, 0x1B800034, + 0x1A60, 0x0B800035, + 0x1A60, 0x05800036, + 0x1A60, 0x00800037, + 0x1A60, 0x1B800038, + 0x1A60, 0x0B800039, + 0x1A60, 0x0580003A, + 0x1A60, 0x0E80803B, + 0x1A94, 0x01000401, + 0x1A98, 0x00188000, + 0x1AA0, 0x00002929, + 0x1AA4, 0x08040201, + 0x1AA8, 0x80402010, + 0x1AAC, 0x77777000, + 0x1AB0, 0x54775477, + 0x1AB4, 0x54775477, + 0x1AB8, 0x00500000, + 0x1ABC, 0x77700000, + 0x1904, 0x00030000, + 0x1914, 0x00030000, + 0x1984, 0x03000000, + 0x1988, 0x00000087, + 0x198C, 0x00000007, + 0x1990, 0xFFAA5500, + 0x1994, 0x00000077, + 0x1998, 0x12801000, + 0x1998, 0x12801000, + 0x1998, 0x12801001, + 0x1998, 0x12801002, + 0x1998, 0x12801003, + 0x1998, 0x12801004, + 0x1998, 0x12801005, + 0x1998, 0x12801006, + 0x1998, 0x12801007, + 0x1998, 0x12801008, + 0x1998, 0x12801009, + 0x1998, 0x1280100A, + 0x1998, 0x1280100B, + 0x1998, 0x1280100C, + 0x1998, 0x1280100D, + 0x1998, 0x1280100E, + 0x1998, 0x1280100F, + 0x1998, 0x12801010, + 0x1998, 0x12801011, + 0x1998, 0x12801012, + 0x1998, 0x12801013, + 0x1998, 0x12801014, + 0x1998, 0x12801015, + 0x1998, 0x12801016, + 0x1998, 0x12801017, + 0x1998, 0x12801018, + 0x1998, 0x12801019, + 0x1998, 0x1280101A, + 0x1998, 0x1280101B, + 0x1998, 0x1280101C, + 0x1998, 0x1280101D, + 0x1998, 0x1280101E, + 0x1998, 0x1280101F, + 0x1998, 0x12801020, + 0x1998, 0x12801021, + 0x1998, 0x12801022, + 0x1998, 0x12801023, + 0x1998, 0x1280102C, + 0x1998, 0x1280102D, + 0x1998, 0x1280102E, + 0x1998, 0x1280102F, + 0x1998, 0x12801030, + 0x1998, 0x12801031, + 0x1998, 0x12801032, + 0x1998, 0x12801033, + 0x1998, 0x12801034, + 0x1998, 0x12801035, + 0x1998, 0x12801036, + 0x1998, 0x12801037, + 0x1998, 0x12801038, + 0x1998, 0x12801039, + 0x1998, 0x1280103A, + 0x1998, 0x1280103B, + 0x1998, 0x1280103C, + 0x1998, 0x1280103D, + 0x1998, 0x1280103E, + 0x1998, 0x1280103F, + 0x1998, 0x12801040, + 0x1998, 0x12801041, + 0x1998, 0x12801042, + 0x1998, 0x12801043, + 0x1998, 0x12801044, + 0x1998, 0x12801045, + 0x1998, 0x12801046, + 0x1998, 0x12801047, + 0x1998, 0x12801048, + 0x1998, 0x12801049, + 0x1998, 0x12801100, + 0x1998, 0x12801101, + 0x1998, 0x12801102, + 0x1998, 0x12801103, + 0x1998, 0x12801104, + 0x1998, 0x12801105, + 0x1998, 0x12801106, + 0x1998, 0x12801107, + 0x1998, 0x12801108, + 0x1998, 0x12801109, + 0x1998, 0x1280110A, + 0x1998, 0x1280110B, + 0x1998, 0x1280110C, + 0x1998, 0x1280110D, + 0x1998, 0x1280110E, + 0x1998, 0x1280110F, + 0x1998, 0x12801110, + 0x1998, 0x12801111, + 0x1998, 0x12801112, + 0x1998, 0x12801113, + 0x1998, 0x12801114, + 0x1998, 0x12801115, + 0x1998, 0x12801116, + 0x1998, 0x12801117, + 0x1998, 0x12801118, + 0x1998, 0x12801119, + 0x1998, 0x1280111A, + 0x1998, 0x1280111B, + 0x1998, 0x1280111C, + 0x1998, 0x1280111D, + 0x1998, 0x1280111E, + 0x1998, 0x1280111F, + 0x1998, 0x12801120, + 0x1998, 0x12801121, + 0x1998, 0x12801122, + 0x1998, 0x12801123, + 0x1998, 0x1280112C, + 0x1998, 0x1280112D, + 0x1998, 0x1280112E, + 0x1998, 0x1280112F, + 0x1998, 0x12801130, + 0x1998, 0x12801131, + 0x1998, 0x12801132, + 0x1998, 0x12801133, + 0x1998, 0x12801134, + 0x1998, 0x12801135, + 0x1998, 0x12801136, + 0x1998, 0x12801137, + 0x1998, 0x12801138, + 0x1998, 0x12801139, + 0x1998, 0x1280113A, + 0x1998, 0x1280113B, + 0x1998, 0x1280113C, + 0x1998, 0x1280113D, + 0x1998, 0x1280113E, + 0x1998, 0x1280113F, + 0x1998, 0x12801140, + 0x1998, 0x12801141, + 0x1998, 0x12801142, + 0x1998, 0x12801143, + 0x1998, 0x12801144, + 0x1998, 0x12801145, + 0x1998, 0x12801146, + 0x1998, 0x12801147, + 0x1998, 0x12801148, + 0x1998, 0x12801149, + 0x1998, 0x12801200, + 0x1998, 0x12801201, + 0x1998, 0x12801202, + 0x1998, 0x12801203, + 0x1998, 0x12801204, + 0x1998, 0x12801205, + 0x1998, 0x12801206, + 0x1998, 0x12801207, + 0x1998, 0x12801208, + 0x1998, 0x12801209, + 0x1998, 0x1280120A, + 0x1998, 0x1280120B, + 0x1998, 0x1280120C, + 0x1998, 0x1280120D, + 0x1998, 0x1280120E, + 0x1998, 0x1280120F, + 0x1998, 0x12801210, + 0x1998, 0x12801211, + 0x1998, 0x12801212, + 0x1998, 0x12801213, + 0x1998, 0x12801214, + 0x1998, 0x12801215, + 0x1998, 0x12801216, + 0x1998, 0x12801217, + 0x1998, 0x12801218, + 0x1998, 0x12801219, + 0x1998, 0x1280121A, + 0x1998, 0x1280121B, + 0x1998, 0x1280121C, + 0x1998, 0x1280121D, + 0x1998, 0x1280121E, + 0x1998, 0x1280121F, + 0x1998, 0x12801220, + 0x1998, 0x12801221, + 0x1998, 0x12801222, + 0x1998, 0x12801223, + 0x1998, 0x1280122C, + 0x1998, 0x1280122D, + 0x1998, 0x1280122E, + 0x1998, 0x1280122F, + 0x1998, 0x12801230, + 0x1998, 0x12801231, + 0x1998, 0x12801232, + 0x1998, 0x12801233, + 0x1998, 0x12801234, + 0x1998, 0x12801235, + 0x1998, 0x12801236, + 0x1998, 0x12801237, + 0x1998, 0x12801238, + 0x1998, 0x12801239, + 0x1998, 0x1280123A, + 0x1998, 0x1280123B, + 0x1998, 0x1280123C, + 0x1998, 0x1280123D, + 0x1998, 0x1280123E, + 0x1998, 0x1280123F, + 0x1998, 0x12801240, + 0x1998, 0x12801241, + 0x1998, 0x12801242, + 0x1998, 0x12801243, + 0x1998, 0x12801244, + 0x1998, 0x12801245, + 0x1998, 0x12801246, + 0x1998, 0x12801247, + 0x1998, 0x12801248, + 0x1998, 0x12801249, + 0x1998, 0x12801300, + 0x1998, 0x12801301, + 0x1998, 0x12801302, + 0x1998, 0x12801303, + 0x1998, 0x12801304, + 0x1998, 0x12801305, + 0x1998, 0x12801306, + 0x1998, 0x12801307, + 0x1998, 0x12801308, + 0x1998, 0x12801309, + 0x1998, 0x1280130A, + 0x1998, 0x1280130B, + 0x1998, 0x1280130C, + 0x1998, 0x1280130D, + 0x1998, 0x1280130E, + 0x1998, 0x1280130F, + 0x1998, 0x12801310, + 0x1998, 0x12801311, + 0x1998, 0x12801312, + 0x1998, 0x12801313, + 0x1998, 0x12801314, + 0x1998, 0x12801315, + 0x1998, 0x12801316, + 0x1998, 0x12801317, + 0x1998, 0x12801318, + 0x1998, 0x12801319, + 0x1998, 0x1280131A, + 0x1998, 0x1280131B, + 0x1998, 0x1280131C, + 0x1998, 0x1280131D, + 0x1998, 0x1280131E, + 0x1998, 0x1280131F, + 0x1998, 0x12801320, + 0x1998, 0x12801321, + 0x1998, 0x12801322, + 0x1998, 0x12801323, + 0x1998, 0x1280132C, + 0x1998, 0x1280132D, + 0x1998, 0x1280132E, + 0x1998, 0x1280132F, + 0x1998, 0x12801330, + 0x1998, 0x12801331, + 0x1998, 0x12801332, + 0x1998, 0x12801333, + 0x1998, 0x12801334, + 0x1998, 0x12801335, + 0x1998, 0x12801336, + 0x1998, 0x12801337, + 0x1998, 0x12801338, + 0x1998, 0x12801339, + 0x1998, 0x1280133A, + 0x1998, 0x1280133B, + 0x1998, 0x1280133C, + 0x1998, 0x1280133D, + 0x1998, 0x1280133E, + 0x1998, 0x1280133F, + 0x1998, 0x12801340, + 0x1998, 0x12801341, + 0x1998, 0x12801342, + 0x1998, 0x12801343, + 0x1998, 0x12801344, + 0x1998, 0x12801345, + 0x1998, 0x12801346, + 0x1998, 0x12801347, + 0x1998, 0x12801348, + 0x1998, 0x12801349, + 0x19D4, 0x88888888, + 0x19D8, 0x00000888, + 0xB00, 0xE3100100, + 0xB00, 0xE7100100, + 0xC60, 0x15808002, + 0xC60, 0x01808003, + 0xE60, 0x15808002, + 0xE60, 0x01808003, + 0x1860, 0x15808002, + 0x1860, 0x01808003, + 0x1A60, 0x15808002, + 0x1A60, 0x01808003, + 0xB00, 0xE3100100, + 0xC5C, 0x0D080058, + 0xE5C, 0x0D080058, + 0x185C, 0x0D080058, + 0x1A5C, 0x0D080058, + 0xC5C, 0x0D000058, + 0xE5C, 0x0D000058, + 0x185C, 0x0D000058, + 0x1A5C, 0x0D000058, + 0xC60, 0x05808002, + 0xC60, 0x0E808003, + 0xE60, 0x05808002, + 0xE60, 0x0E808003, + 0x1860, 0x05808002, + 0x1860, 0x0E808003, + 0x1A60, 0x05808002, + 0x1A60, 0x0E808003, + 0xB00, 0xE7100100, + 0xB00, 0xE3100100, + 0xB00, 0xE3100000, + 0x1C38, 0x00000002, + 0xA00, 0x00D047C8, + 0xA04, 0x46FF800C, + 0xA08, 0x8C838300, + 0xA0C, 0x2E7E000F, + 0xA10, 0x9500BB78, + 0xA14, 0x11144028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0x1A1B0030, + 0xA24, 0x090E1317, + 0xA28, 0x00000204, + 0xA2C, 0x00900000, + 0xA70, 0x101FFF00, + 0xA74, 0x00000128, + 0xA78, 0x00000900, + 0xA7C, 0x225B0606, + 0xA80, 0x218075B2, + 0xA84, 0x9C1F8C00, + 0x1B04, 0xE24628D2, + 0x1B10, 0x88010D46, + 0x1B14, 0x00000000, + 0x1B18, 0x00292903, + 0x1B00, 0xF8000000, + 0x1B00, 0xF800D000, + 0x1B00, 0xF801F000, + 0x1B1C, 0xA2123DB2, + 0x1B20, 0x07040001, + 0x1B24, 0x07060807, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0xA0000000, 0x00000000, + 0x1B28, 0xC0060348, + 0xB0000000, 0x00000000, + 0x1B2C, 0x20000003, + 0x1B30, 0x20000000, + 0x1B38, 0x20000000, + 0x1B3C, 0x20000000, + 0x1BD4, 0x00000001, + 0x1B94, 0x80000000, + 0x1B34, 0x00000000, + 0x1B34, 0x00000002, + 0x1B34, 0x00000000, + 0x1B00, 0xF8000002, + 0x1B00, 0xF800D002, + 0x1B00, 0xF801F002, + 0x1B1C, 0xA2123DB2, + 0x1B20, 0x07040001, + 0x1B24, 0x07060807, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0xA0000000, 0x00000000, + 0x1B28, 0xC0060348, + 0xB0000000, 0x00000000, + 0x1B2C, 0x20000003, + 0x1B30, 0x20000000, + 0x1B38, 0x20000000, + 0x1B3C, 0x20000000, + 0x1BD4, 0x00000001, + 0x1B94, 0x80000000, + 0x1B34, 0x00000000, + 0x1B34, 0x00000002, + 0x1B34, 0x00000000, + 0x1B00, 0xF8000004, + 0x1B00, 0xF800D004, + 0x1B00, 0xF801F004, + 0x1B1C, 0xA2123DB2, + 0x1B20, 0x07040001, + 0x1B24, 0x07060807, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0xA0000000, 0x00000000, + 0x1B28, 0xC0060348, + 0xB0000000, 0x00000000, + 0x1B2C, 0x20000003, + 0x1B30, 0x20000000, + 0x1B38, 0x20000000, + 0x1B3C, 0x20000000, + 0x1BD4, 0x00000001, + 0x1B94, 0x80000000, + 0x1B34, 0x00000000, + 0x1B34, 0x00000002, + 0x1B34, 0x00000000, + 0x1B00, 0xF8000006, + 0x1B00, 0xF800D006, + 0x1B00, 0xF801F006, + 0x1B1C, 0xA2123DB2, + 0x1B20, 0x07040001, + 0x1B24, 0x07060807, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B28, 0xC0060324, + 0xA0000000, 0x00000000, + 0x1B28, 0xC0060348, + 0xB0000000, 0x00000000, + 0x1B2C, 0x20000003, + 0x1B30, 0x20000000, + 0x1B38, 0x20000000, + 0x1B3C, 0x20000000, + 0x1BD4, 0x00000001, + 0x1B94, 0x80000000, + 0x1B34, 0x00000000, + 0x1B34, 0x00000002, + 0x1B34, 0x00000000, + 0x1B00, 0xF8000000, + 0x1B80, 0x00000007, + 0x1B80, 0x09060005, + 0x1B80, 0x09060007, + 0x1B80, 0x0FFE0015, + 0x1B80, 0x0FFE0017, + 0x1B80, 0x00240025, + 0x1B80, 0x00240027, + 0x1B80, 0x00040035, + 0x1B80, 0x00040037, + 0x1B80, 0x05C00045, + 0x1B80, 0x05C00047, + 0x1B80, 0x00070055, + 0x1B80, 0x00070057, + 0x1B80, 0x64000065, + 0x1B80, 0x64000067, + 0x1B80, 0x00020075, + 0x1B80, 0x00020077, + 0x1B80, 0x00080085, + 0x1B80, 0x00080087, + 0x1B80, 0x80000095, + 0x1B80, 0x80000097, + 0x1B80, 0x090100A5, + 0x1B80, 0x090100A7, + 0x1B80, 0x0F0200B5, + 0x1B80, 0x0F0200B7, + 0x1B80, 0x002400C5, + 0x1B80, 0x002400C7, + 0x1B80, 0x000400D5, + 0x1B80, 0x000400D7, + 0x1B80, 0x05C000E5, + 0x1B80, 0x05C000E7, + 0x1B80, 0x000700F5, + 0x1B80, 0x000700F7, + 0x1B80, 0x64020105, + 0x1B80, 0x64020107, + 0x1B80, 0x00020115, + 0x1B80, 0x00020117, + 0x1B80, 0x00040125, + 0x1B80, 0x00040127, + 0x1B80, 0x4A000135, + 0x1B80, 0x4A000137, + 0x1B80, 0x4B040145, + 0x1B80, 0x4B040147, + 0x1B80, 0x85030155, + 0x1B80, 0x85030157, + 0x1B80, 0x40010165, + 0x1B80, 0x40010167, + 0x1B80, 0xE0290175, + 0x1B80, 0xE0290177, + 0x1B80, 0x00040185, + 0x1B80, 0x00040187, + 0x1B80, 0x4B050195, + 0x1B80, 0x4B050197, + 0x1B80, 0x860301A5, + 0x1B80, 0x860301A7, + 0x1B80, 0x400301B5, + 0x1B80, 0x400301B7, + 0x1B80, 0xE02901C5, + 0x1B80, 0xE02901C7, + 0x1B80, 0x000401D5, + 0x1B80, 0x000401D7, + 0x1B80, 0x4B0601E5, + 0x1B80, 0x4B0601E7, + 0x1B80, 0x870301F5, + 0x1B80, 0x870301F7, + 0x1B80, 0x40050205, + 0x1B80, 0x40050207, + 0x1B80, 0xE0290215, + 0x1B80, 0xE0290217, + 0x1B80, 0x00040225, + 0x1B80, 0x00040227, + 0x1B80, 0x4B070235, + 0x1B80, 0x4B070237, + 0x1B80, 0x88030245, + 0x1B80, 0x88030247, + 0x1B80, 0x40070255, + 0x1B80, 0x40070257, + 0x1B80, 0xE0290265, + 0x1B80, 0xE0290267, + 0x1B80, 0x4B000275, + 0x1B80, 0x4B000277, + 0x1B80, 0x30000285, + 0x1B80, 0x30000287, + 0x1B80, 0xFE100295, + 0x1B80, 0xFE100297, + 0x1B80, 0xFF1002A5, + 0x1B80, 0xFF1002A7, + 0x1B80, 0xE18602B5, + 0x1B80, 0xE18602B7, + 0x1B80, 0xF00A02C5, + 0x1B80, 0xF00A02C7, + 0x1B80, 0xF10A02D5, + 0x1B80, 0xF10A02D7, + 0x1B80, 0xF20A02E5, + 0x1B80, 0xF20A02E7, + 0x1B80, 0xF30802F5, + 0x1B80, 0xF30802F7, + 0x1B80, 0xF4070305, + 0x1B80, 0xF4070307, + 0x1B80, 0xF5060315, + 0x1B80, 0xF5060317, + 0x1B80, 0xF7060325, + 0x1B80, 0xF7060327, + 0x1B80, 0xF8050335, + 0x1B80, 0xF8050337, + 0x1B80, 0xF9040345, + 0x1B80, 0xF9040347, + 0x1B80, 0x00010355, + 0x1B80, 0x00010357, + 0x1B80, 0x303B0365, + 0x1B80, 0x303B0367, + 0x1B80, 0x30500375, + 0x1B80, 0x30500377, + 0x1B80, 0x305C0385, + 0x1B80, 0x305C0387, + 0x1B80, 0x31D50395, + 0x1B80, 0x31D50397, + 0x1B80, 0x31C503A5, + 0x1B80, 0x31C503A7, + 0x1B80, 0x4D0403B5, + 0x1B80, 0x4D0403B7, + 0x1B80, 0x2EF003C5, + 0x1B80, 0x2EF003C7, + 0x1B80, 0x000203D5, + 0x1B80, 0x000203D7, + 0x1B80, 0x208003E5, + 0x1B80, 0x208003E7, + 0x1B80, 0x000003F5, + 0x1B80, 0x000003F7, + 0x1B80, 0x4D000405, + 0x1B80, 0x4D000407, + 0x1B80, 0x55070415, + 0x1B80, 0x55070417, + 0x1B80, 0xE1230425, + 0x1B80, 0xE1230427, + 0x1B80, 0xE1230435, + 0x1B80, 0xE1230437, + 0x1B80, 0x4D040445, + 0x1B80, 0x4D040447, + 0x1B80, 0x20800455, + 0x1B80, 0x20800457, + 0x1B80, 0x84000465, + 0x1B80, 0x84000467, + 0x1B80, 0x4D000475, + 0x1B80, 0x4D000477, + 0x1B80, 0x550F0485, + 0x1B80, 0x550F0487, + 0x1B80, 0xE1230495, + 0x1B80, 0xE1230497, + 0x1B80, 0x4F0204A5, + 0x1B80, 0x4F0204A7, + 0x1B80, 0x4E0004B5, + 0x1B80, 0x4E0004B7, + 0x1B80, 0x530204C5, + 0x1B80, 0x530204C7, + 0x1B80, 0x520104D5, + 0x1B80, 0x520104D7, + 0x1B80, 0xE12704E5, + 0x1B80, 0xE12704E7, + 0x1B80, 0x000104F5, + 0x1B80, 0x000104F7, + 0x1B80, 0x5C720505, + 0x1B80, 0x5C720507, + 0x1B80, 0xE1320515, + 0x1B80, 0xE1320517, + 0x1B80, 0x54E50525, + 0x1B80, 0x54E50527, + 0x1B80, 0x54BF0535, + 0x1B80, 0x54BF0537, + 0x1B80, 0x54C50545, + 0x1B80, 0x54C50547, + 0x1B80, 0x54BE0555, + 0x1B80, 0x54BE0557, + 0x1B80, 0x54DF0565, + 0x1B80, 0x54DF0567, + 0x1B80, 0x0BA60575, + 0x1B80, 0x0BA60577, + 0x1B80, 0xF3130585, + 0x1B80, 0xF3130587, + 0x1B80, 0xF41E0595, + 0x1B80, 0xF41E0597, + 0x1B80, 0xF53C05A5, + 0x1B80, 0xF53C05A7, + 0x1B80, 0x000105B5, + 0x1B80, 0x000105B7, + 0x1B80, 0x620605C5, + 0x1B80, 0x620605C7, + 0x1B80, 0x600605D5, + 0x1B80, 0x600605D7, + 0x1B80, 0xE1A905E5, + 0x1B80, 0xE1A905E7, + 0x1B80, 0x0C0005F5, + 0x1B80, 0x0C0005F7, + 0x1B80, 0x5C720605, + 0x1B80, 0x5C720607, + 0x1B80, 0xE1320615, + 0x1B80, 0xE1320617, + 0x1B80, 0x5CF10625, + 0x1B80, 0x5CF10627, + 0x1B80, 0x0C010635, + 0x1B80, 0x0C010637, + 0x1B80, 0xF2020645, + 0x1B80, 0xF2020647, + 0x1B80, 0x30D60655, + 0x1B80, 0x30D60657, + 0x1B80, 0x0AC60665, + 0x1B80, 0x0AC60667, + 0x1B80, 0xE1B60675, + 0x1B80, 0xE1B60677, + 0x1B80, 0xE1580685, + 0x1B80, 0xE1580687, + 0x1B80, 0x54E50695, + 0x1B80, 0x54E50697, + 0x1B80, 0x000106A5, + 0x1B80, 0x000106A7, + 0x1B80, 0x560106B5, + 0x1B80, 0x560106B7, + 0x1B80, 0x5CE206C5, + 0x1B80, 0x5CE206C7, + 0x1B80, 0x0AE106D5, + 0x1B80, 0x0AE106D7, + 0x1B80, 0x630C06E5, + 0x1B80, 0x630C06E7, + 0x1B80, 0xE13F06F5, + 0x1B80, 0xE13F06F7, + 0x1B80, 0x00270705, + 0x1B80, 0x00270707, + 0x1B80, 0xE16C0715, + 0x1B80, 0xE16C0717, + 0x1B80, 0x00020725, + 0x1B80, 0x00020727, + 0x1B80, 0x002A0735, + 0x1B80, 0x002A0737, + 0x1B80, 0x07140745, + 0x1B80, 0x07140747, + 0x1B80, 0x00020755, + 0x1B80, 0x00020757, + 0x1B80, 0x30C30765, + 0x1B80, 0x30C30767, + 0x1B80, 0x56010775, + 0x1B80, 0x56010777, + 0x1B80, 0x5CE20785, + 0x1B80, 0x5CE20787, + 0x1B80, 0x0AE10795, + 0x1B80, 0x0AE10797, + 0x1B80, 0x631707A5, + 0x1B80, 0x631707A7, + 0x1B80, 0xE13F07B5, + 0x1B80, 0xE13F07B7, + 0x1B80, 0x002507C5, + 0x1B80, 0x002507C7, + 0x1B80, 0xE16C07D5, + 0x1B80, 0xE16C07D7, + 0x1B80, 0x000207E5, + 0x1B80, 0x000207E7, + 0x1B80, 0x630F07F5, + 0x1B80, 0x630F07F7, + 0x1B80, 0xE13F0805, + 0x1B80, 0xE13F0807, + 0x1B80, 0x63070815, + 0x1B80, 0x63070817, + 0x1B80, 0xE13F0825, + 0x1B80, 0xE13F0827, + 0x1B80, 0x07140835, + 0x1B80, 0x07140837, + 0x1B80, 0x56000845, + 0x1B80, 0x56000847, + 0x1B80, 0x5CF20855, + 0x1B80, 0x5CF20857, + 0x1B80, 0x0AF10865, + 0x1B80, 0x0AF10867, + 0x1B80, 0x07140875, + 0x1B80, 0x07140877, + 0x1B80, 0x07140885, + 0x1B80, 0x07140887, + 0x1B80, 0x630F0895, + 0x1B80, 0x630F0897, + 0x1B80, 0xE13F08A5, + 0x1B80, 0xE13F08A7, + 0x1B80, 0x631708B5, + 0x1B80, 0x631708B7, + 0x1B80, 0xE13F08C5, + 0x1B80, 0xE13F08C7, + 0x1B80, 0x002508D5, + 0x1B80, 0x002508D7, + 0x1B80, 0xE16C08E5, + 0x1B80, 0xE16C08E7, + 0x1B80, 0x000208F5, + 0x1B80, 0x000208F7, + 0x1B80, 0x30C30905, + 0x1B80, 0x30C30907, + 0x1B80, 0xE1A90915, + 0x1B80, 0xE1A90917, + 0x1B80, 0x62060925, + 0x1B80, 0x62060927, + 0x1B80, 0x60060935, + 0x1B80, 0x60060937, + 0x1B80, 0xE1160945, + 0x1B80, 0xE1160947, + 0x1B80, 0x54BE0955, + 0x1B80, 0x54BE0957, + 0x1B80, 0x56010965, + 0x1B80, 0x56010967, + 0x1B80, 0x5CE20975, + 0x1B80, 0x5CE20977, + 0x1B80, 0x0AE10985, + 0x1B80, 0x0AE10987, + 0x1B80, 0x633A0995, + 0x1B80, 0x633A0997, + 0x1B80, 0xE13F09A5, + 0x1B80, 0xE13F09A7, + 0x1B80, 0x633709B5, + 0x1B80, 0x633709B7, + 0x1B80, 0xE13F09C5, + 0x1B80, 0xE13F09C7, + 0x1B80, 0x632F09D5, + 0x1B80, 0x632F09D7, + 0x1B80, 0xE13F09E5, + 0x1B80, 0xE13F09E7, + 0x1B80, 0x632709F5, + 0x1B80, 0x632709F7, + 0x1B80, 0xE13F0A05, + 0x1B80, 0xE13F0A07, + 0x1B80, 0x631F0A15, + 0x1B80, 0x631F0A17, + 0x1B80, 0xE13F0A25, + 0x1B80, 0xE13F0A27, + 0x1B80, 0x63170A35, + 0x1B80, 0x63170A37, + 0x1B80, 0xE13F0A45, + 0x1B80, 0xE13F0A47, + 0x1B80, 0x630F0A55, + 0x1B80, 0x630F0A57, + 0x1B80, 0xE13F0A65, + 0x1B80, 0xE13F0A67, + 0x1B80, 0x63070A75, + 0x1B80, 0x63070A77, + 0x1B80, 0xE13F0A85, + 0x1B80, 0xE13F0A87, + 0x1B80, 0xE16C0A95, + 0x1B80, 0xE16C0A97, + 0x1B80, 0x56000AA5, + 0x1B80, 0x56000AA7, + 0x1B80, 0x5CF20AB5, + 0x1B80, 0x5CF20AB7, + 0x1B80, 0x0AF10AC5, + 0x1B80, 0x0AF10AC7, + 0x1B80, 0xF5040AD5, + 0x1B80, 0xF5040AD7, + 0x1B80, 0xE13F0AE5, + 0x1B80, 0xE13F0AE7, + 0x1B80, 0xE16C0AF5, + 0x1B80, 0xE16C0AF7, + 0x1B80, 0x30B30B05, + 0x1B80, 0x30B30B07, + 0x1B80, 0x07140B15, + 0x1B80, 0x07140B17, + 0x1B80, 0x07140B25, + 0x1B80, 0x07140B27, + 0x1B80, 0x630F0B35, + 0x1B80, 0x630F0B37, + 0x1B80, 0xE13F0B45, + 0x1B80, 0xE13F0B47, + 0x1B80, 0x63170B55, + 0x1B80, 0x63170B57, + 0x1B80, 0xE13F0B65, + 0x1B80, 0xE13F0B67, + 0x1B80, 0x631F0B75, + 0x1B80, 0x631F0B77, + 0x1B80, 0xE13F0B85, + 0x1B80, 0xE13F0B87, + 0x1B80, 0x63270B95, + 0x1B80, 0x63270B97, + 0x1B80, 0xE13F0BA5, + 0x1B80, 0xE13F0BA7, + 0x1B80, 0x632F0BB5, + 0x1B80, 0x632F0BB7, + 0x1B80, 0xE13F0BC5, + 0x1B80, 0xE13F0BC7, + 0x1B80, 0x63370BD5, + 0x1B80, 0x63370BD7, + 0x1B80, 0xE13F0BE5, + 0x1B80, 0xE13F0BE7, + 0x1B80, 0x633A0BF5, + 0x1B80, 0x633A0BF7, + 0x1B80, 0xE13F0C05, + 0x1B80, 0xE13F0C07, + 0x1B80, 0xF60B0C15, + 0x1B80, 0xF60B0C17, + 0x1B80, 0xF7170C25, + 0x1B80, 0xF7170C27, + 0x1B80, 0x4D300C35, + 0x1B80, 0x4D300C37, + 0x1B80, 0x57040C45, + 0x1B80, 0x57040C47, + 0x1B80, 0x57000C55, + 0x1B80, 0x57000C57, + 0x1B80, 0x96000C65, + 0x1B80, 0x96000C67, + 0x1B80, 0x57080C75, + 0x1B80, 0x57080C77, + 0x1B80, 0x57000C85, + 0x1B80, 0x57000C87, + 0x1B80, 0x95000C95, + 0x1B80, 0x95000C97, + 0x1B80, 0x4D000CA5, + 0x1B80, 0x4D000CA7, + 0x1B80, 0x6C070CB5, + 0x1B80, 0x6C070CB7, + 0x1B80, 0x00010CC5, + 0x1B80, 0x00010CC7, + 0x1B80, 0x00220CD5, + 0x1B80, 0x00220CD7, + 0x1B80, 0x06140CE5, + 0x1B80, 0x06140CE7, + 0x1B80, 0xE16C0CF5, + 0x1B80, 0xE16C0CF7, + 0x1B80, 0x00020D05, + 0x1B80, 0x00020D07, + 0x1B80, 0x00250D15, + 0x1B80, 0x00250D17, + 0x1B80, 0x06140D25, + 0x1B80, 0x06140D27, + 0x1B80, 0xE16C0D35, + 0x1B80, 0xE16C0D37, + 0x1B80, 0x00020D45, + 0x1B80, 0x00020D47, + 0x1B80, 0x00010D55, + 0x1B80, 0x00010D57, + 0x1B80, 0x00320D65, + 0x1B80, 0x00320D67, + 0x1B80, 0xE16C0D75, + 0x1B80, 0xE16C0D77, + 0x1B80, 0x00020D85, + 0x1B80, 0x00020D87, + 0x1B80, 0xE1860D95, + 0x1B80, 0xE1860D97, + 0x1B80, 0xE1B60DA5, + 0x1B80, 0xE1B60DA7, + 0x1B80, 0x5CD10DB5, + 0x1B80, 0x5CD10DB7, + 0x1B80, 0x673A0DC5, + 0x1B80, 0x673A0DC7, + 0x1B80, 0xE1230DD5, + 0x1B80, 0xE1230DD7, + 0x1B80, 0xF80B0DE5, + 0x1B80, 0xF80B0DE7, + 0x1B80, 0xF9110DF5, + 0x1B80, 0xF9110DF7, + 0x1B80, 0xE1580E05, + 0x1B80, 0xE1580E07, + 0x1B80, 0x67370E15, + 0x1B80, 0x67370E17, + 0x1B80, 0xE1580E25, + 0x1B80, 0xE1580E27, + 0x1B80, 0x672F0E35, + 0x1B80, 0x672F0E37, + 0x1B80, 0xE1580E45, + 0x1B80, 0xE1580E47, + 0x1B80, 0x67270E55, + 0x1B80, 0x67270E57, + 0x1B80, 0xE1580E65, + 0x1B80, 0xE1580E67, + 0x1B80, 0x671F0E75, + 0x1B80, 0x671F0E77, + 0x1B80, 0xE1580E85, + 0x1B80, 0xE1580E87, + 0x1B80, 0x67170E95, + 0x1B80, 0x67170E97, + 0x1B80, 0xE1580EA5, + 0x1B80, 0xE1580EA7, + 0x1B80, 0xF8020EB5, + 0x1B80, 0xF8020EB7, + 0x1B80, 0x30EE0EC5, + 0x1B80, 0x30EE0EC7, + 0x1B80, 0xE0D10ED5, + 0x1B80, 0xE0D10ED7, + 0x1B80, 0x670F0EE5, + 0x1B80, 0x670F0EE7, + 0x1B80, 0xE1580EF5, + 0x1B80, 0xE1580EF7, + 0x1B80, 0x67070F05, + 0x1B80, 0x67070F07, + 0x1B80, 0xE1580F15, + 0x1B80, 0xE1580F17, + 0x1B80, 0xF9020F25, + 0x1B80, 0xF9020F27, + 0x1B80, 0x30F50F35, + 0x1B80, 0x30F50F37, + 0x1B80, 0xE0CD0F45, + 0x1B80, 0xE0CD0F47, + 0x1B80, 0x06140F55, + 0x1B80, 0x06140F57, + 0x1B80, 0xE16C0F65, + 0x1B80, 0xE16C0F67, + 0x1B80, 0x5CF10F75, + 0x1B80, 0x5CF10F77, + 0x1B80, 0xE1580F85, + 0x1B80, 0xE1580F87, + 0x1B80, 0x06140F95, + 0x1B80, 0x06140F97, + 0x1B80, 0xE16C0FA5, + 0x1B80, 0xE16C0FA7, + 0x1B80, 0xF9020FB5, + 0x1B80, 0xF9020FB7, + 0x1B80, 0x30FF0FC5, + 0x1B80, 0x30FF0FC7, + 0x1B80, 0xE0CD0FD5, + 0x1B80, 0xE0CD0FD7, + 0x1B80, 0x31130FE5, + 0x1B80, 0x31130FE7, + 0x1B80, 0x670F0FF5, + 0x1B80, 0x670F0FF7, + 0x1B80, 0xE1581005, + 0x1B80, 0xE1581007, + 0x1B80, 0x67171015, + 0x1B80, 0x67171017, + 0x1B80, 0xE1581025, + 0x1B80, 0xE1581027, + 0x1B80, 0xF8021035, + 0x1B80, 0xF8021037, + 0x1B80, 0x31071045, + 0x1B80, 0x31071047, + 0x1B80, 0xE0D11055, + 0x1B80, 0xE0D11057, + 0x1B80, 0x31131065, + 0x1B80, 0x31131067, + 0x1B80, 0x670F1075, + 0x1B80, 0x670F1077, + 0x1B80, 0xE1581085, + 0x1B80, 0xE1581087, + 0x1B80, 0x671F1095, + 0x1B80, 0x671F1097, + 0x1B80, 0xE15810A5, + 0x1B80, 0xE15810A7, + 0x1B80, 0x672710B5, + 0x1B80, 0x672710B7, + 0x1B80, 0xE15810C5, + 0x1B80, 0xE15810C7, + 0x1B80, 0x672F10D5, + 0x1B80, 0x672F10D7, + 0x1B80, 0xE15810E5, + 0x1B80, 0xE15810E7, + 0x1B80, 0x673710F5, + 0x1B80, 0x673710F7, + 0x1B80, 0xE1581105, + 0x1B80, 0xE1581107, + 0x1B80, 0x673A1115, + 0x1B80, 0x673A1117, + 0x1B80, 0xE1581125, + 0x1B80, 0xE1581127, + 0x1B80, 0x4D101135, + 0x1B80, 0x4D101137, + 0x1B80, 0x30C41145, + 0x1B80, 0x30C41147, + 0x1B80, 0x00011155, + 0x1B80, 0x00011157, + 0x1B80, 0x6F241165, + 0x1B80, 0x6F241167, + 0x1B80, 0x6E401175, + 0x1B80, 0x6E401177, + 0x1B80, 0x6D001185, + 0x1B80, 0x6D001187, + 0x1B80, 0x55031195, + 0x1B80, 0x55031197, + 0x1B80, 0x312311A5, + 0x1B80, 0x312311A7, + 0x1B80, 0x6F1C11B5, + 0x1B80, 0x6F1C11B7, + 0x1B80, 0x6E4011C5, + 0x1B80, 0x6E4011C7, + 0x1B80, 0x550B11D5, + 0x1B80, 0x550B11D7, + 0x1B80, 0x312311E5, + 0x1B80, 0x312311E7, + 0x1B80, 0x061C11F5, + 0x1B80, 0x061C11F7, + 0x1B80, 0x54DE1205, + 0x1B80, 0x54DE1207, + 0x1B80, 0x06DC1215, + 0x1B80, 0x06DC1217, + 0x1B80, 0x55131225, + 0x1B80, 0x55131227, + 0x1B80, 0x74011235, + 0x1B80, 0x74011237, + 0x1B80, 0x74001245, + 0x1B80, 0x74001247, + 0x1B80, 0x8E001255, + 0x1B80, 0x8E001257, + 0x1B80, 0x00011265, + 0x1B80, 0x00011267, + 0x1B80, 0x57021275, + 0x1B80, 0x57021277, + 0x1B80, 0x57001285, + 0x1B80, 0x57001287, + 0x1B80, 0x97001295, + 0x1B80, 0x97001297, + 0x1B80, 0x000112A5, + 0x1B80, 0x000112A7, + 0x1B80, 0x54BF12B5, + 0x1B80, 0x54BF12B7, + 0x1B80, 0x54C112C5, + 0x1B80, 0x54C112C7, + 0x1B80, 0x54A212D5, + 0x1B80, 0x54A212D7, + 0x1B80, 0x54C012E5, + 0x1B80, 0x54C012E7, + 0x1B80, 0x54A112F5, + 0x1B80, 0x54A112F7, + 0x1B80, 0x54DF1305, + 0x1B80, 0x54DF1307, + 0x1B80, 0x00011315, + 0x1B80, 0x00011317, + 0x1B80, 0x55001325, + 0x1B80, 0x55001327, + 0x1B80, 0xE1231335, + 0x1B80, 0xE1231337, + 0x1B80, 0x54811345, + 0x1B80, 0x54811347, + 0x1B80, 0xE1231355, + 0x1B80, 0xE1231357, + 0x1B80, 0x54801365, + 0x1B80, 0x54801367, + 0x1B80, 0x002A1375, + 0x1B80, 0x002A1377, + 0x1B80, 0xE12B1385, + 0x1B80, 0xE12B1387, + 0x1B80, 0xE1231395, + 0x1B80, 0xE1231397, + 0x1B80, 0x548013A5, + 0x1B80, 0x548013A7, + 0x1B80, 0xE17213B5, + 0x1B80, 0xE17213B7, + 0x1B80, 0xBF3013C5, + 0x1B80, 0xBF3013C7, + 0x1B80, 0x000213D5, + 0x1B80, 0x000213D7, + 0x1B80, 0x302813E5, + 0x1B80, 0x302813E7, + 0x1B80, 0x4F7813F5, + 0x1B80, 0x4F7813F7, + 0x1B80, 0x4E001405, + 0x1B80, 0x4E001407, + 0x1B80, 0x53871415, + 0x1B80, 0x53871417, + 0x1B80, 0x52F11425, + 0x1B80, 0x52F11427, + 0x1B80, 0xE1161435, + 0x1B80, 0xE1161437, + 0x1B80, 0xE11B1445, + 0x1B80, 0xE11B1447, + 0x1B80, 0xE11F1455, + 0x1B80, 0xE11F1457, + 0x1B80, 0xE1271465, + 0x1B80, 0xE1271467, + 0x1B80, 0x54811475, + 0x1B80, 0x54811477, + 0x1B80, 0xE1161485, + 0x1B80, 0xE1161487, + 0x1B80, 0xE11B1495, + 0x1B80, 0xE11B1497, + 0x1B80, 0xE11F14A5, + 0x1B80, 0xE11F14A7, + 0x1B80, 0xE12714B5, + 0x1B80, 0xE12714B7, + 0x1B80, 0x548014C5, + 0x1B80, 0x548014C7, + 0x1B80, 0x002A14D5, + 0x1B80, 0x002A14D7, + 0x1B80, 0xE12B14E5, + 0x1B80, 0xE12B14E7, + 0x1B80, 0xE11614F5, + 0x1B80, 0xE11614F7, + 0x1B80, 0xE11B1505, + 0x1B80, 0xE11B1507, + 0x1B80, 0xE11F1515, + 0x1B80, 0xE11F1517, + 0x1B80, 0xE1271525, + 0x1B80, 0xE1271527, + 0x1B80, 0x54801535, + 0x1B80, 0x54801537, + 0x1B80, 0xE1721545, + 0x1B80, 0xE1721547, + 0x1B80, 0xBF171555, + 0x1B80, 0xBF171557, + 0x1B80, 0x00021565, + 0x1B80, 0x00021567, + 0x1B80, 0x30281575, + 0x1B80, 0x30281577, + 0x1B80, 0x06141585, + 0x1B80, 0x06141587, + 0x1B80, 0x73201595, + 0x1B80, 0x73201597, + 0x1B80, 0x720015A5, + 0x1B80, 0x720015A7, + 0x1B80, 0x710015B5, + 0x1B80, 0x710015B7, + 0x1B80, 0x550115C5, + 0x1B80, 0x550115C7, + 0x1B80, 0xE12315D5, + 0x1B80, 0xE12315D7, + 0x1B80, 0xE12715E5, + 0x1B80, 0xE12715E7, + 0x1B80, 0x548115F5, + 0x1B80, 0x548115F7, + 0x1B80, 0xE1231605, + 0x1B80, 0xE1231607, + 0x1B80, 0xE1271615, + 0x1B80, 0xE1271617, + 0x1B80, 0x54801625, + 0x1B80, 0x54801627, + 0x1B80, 0x002A1635, + 0x1B80, 0x002A1637, + 0x1B80, 0xE12B1645, + 0x1B80, 0xE12B1647, + 0x1B80, 0xE1231655, + 0x1B80, 0xE1231657, + 0x1B80, 0xE1271665, + 0x1B80, 0xE1271667, + 0x1B80, 0x54801675, + 0x1B80, 0x54801677, + 0x1B80, 0xE1721685, + 0x1B80, 0xE1721687, + 0x1B80, 0xBF031695, + 0x1B80, 0xBF031697, + 0x1B80, 0x000216A5, + 0x1B80, 0x000216A7, + 0x1B80, 0x302816B5, + 0x1B80, 0x302816B7, + 0x1B80, 0x54BF16C5, + 0x1B80, 0x54BF16C7, + 0x1B80, 0x54C516D5, + 0x1B80, 0x54C516D7, + 0x1B80, 0x050A16E5, + 0x1B80, 0x050A16E7, + 0x1B80, 0x071416F5, + 0x1B80, 0x071416F7, + 0x1B80, 0x54DF1705, + 0x1B80, 0x54DF1707, + 0x1B80, 0x00011715, + 0x1B80, 0x00011717, + 0x1B80, 0x54BF1725, + 0x1B80, 0x54BF1727, + 0x1B80, 0x54C01735, + 0x1B80, 0x54C01737, + 0x1B80, 0x54A31745, + 0x1B80, 0x54A31747, + 0x1B80, 0x54C11755, + 0x1B80, 0x54C11757, + 0x1B80, 0x54A41765, + 0x1B80, 0x54A41767, + 0x1B80, 0x4C831775, + 0x1B80, 0x4C831777, + 0x1B80, 0x4C031785, + 0x1B80, 0x4C031787, + 0x1B80, 0xBF0B1795, + 0x1B80, 0xBF0B1797, + 0x1B80, 0x54C217A5, + 0x1B80, 0x54C217A7, + 0x1B80, 0x54A417B5, + 0x1B80, 0x54A417B7, + 0x1B80, 0x4C8517C5, + 0x1B80, 0x4C8517C7, + 0x1B80, 0x4C0517D5, + 0x1B80, 0x4C0517D7, + 0x1B80, 0xBF0617E5, + 0x1B80, 0xBF0617E7, + 0x1B80, 0x54C117F5, + 0x1B80, 0x54C117F7, + 0x1B80, 0x54A31805, + 0x1B80, 0x54A31807, + 0x1B80, 0x4C861815, + 0x1B80, 0x4C861817, + 0x1B80, 0x4C061825, + 0x1B80, 0x4C061827, + 0x1B80, 0xBF011835, + 0x1B80, 0xBF011837, + 0x1B80, 0x54DF1845, + 0x1B80, 0x54DF1847, + 0x1B80, 0x00011855, + 0x1B80, 0x00011857, + 0x1B80, 0x00071865, + 0x1B80, 0x00071867, + 0x1B80, 0x54011875, + 0x1B80, 0x54011877, + 0x1B80, 0x00041885, + 0x1B80, 0x00041887, + 0x1B80, 0x56001895, + 0x1B80, 0x56001897, + 0x1B80, 0x5CF218A5, + 0x1B80, 0x5CF218A7, + 0x1B80, 0x630718B5, + 0x1B80, 0x630718B7, + 0x1B80, 0x620418C5, + 0x1B80, 0x620418C7, + 0x1B80, 0x610018D5, + 0x1B80, 0x610018D7, + 0x1B80, 0x670718E5, + 0x1B80, 0x670718E7, + 0x1B80, 0x660618F5, + 0x1B80, 0x660618F7, + 0x1B80, 0x6F201905, + 0x1B80, 0x6F201907, + 0x1B80, 0x6E001915, + 0x1B80, 0x6E001917, + 0x1B80, 0x6D001925, + 0x1B80, 0x6D001927, + 0x1B80, 0x6C031935, + 0x1B80, 0x6C031937, + 0x1B80, 0x73201945, + 0x1B80, 0x73201947, + 0x1B80, 0x72001955, + 0x1B80, 0x72001957, + 0x1B80, 0x71001965, + 0x1B80, 0x71001967, + 0x1B80, 0x7B201975, + 0x1B80, 0x7B201977, + 0x1B80, 0x7A001985, + 0x1B80, 0x7A001987, + 0x1B80, 0x79001995, + 0x1B80, 0x79001997, + 0x1B80, 0x7F2019A5, + 0x1B80, 0x7F2019A7, + 0x1B80, 0x7E0019B5, + 0x1B80, 0x7E0019B7, + 0x1B80, 0x7D0019C5, + 0x1B80, 0x7D0019C7, + 0x1B80, 0x090119D5, + 0x1B80, 0x090119D7, + 0x1B80, 0x0AC619E5, + 0x1B80, 0x0AC619E7, + 0x1B80, 0x0BA619F5, + 0x1B80, 0x0BA619F7, + 0x1B80, 0x0C011A05, + 0x1B80, 0x0C011A07, + 0x1B80, 0x0D021A15, + 0x1B80, 0x0D021A17, + 0x1B80, 0x0E041A25, + 0x1B80, 0x0E041A27, + 0x1B80, 0x0FFF1A35, + 0x1B80, 0x0FFF1A37, + 0x1B80, 0x4D041A45, + 0x1B80, 0x4D041A47, + 0x1B80, 0x28F81A55, + 0x1B80, 0x28F81A57, + 0x1B80, 0xE0001A65, + 0x1B80, 0xE0001A67, + 0x1B80, 0x4D001A75, + 0x1B80, 0x4D001A77, + 0x1B80, 0x00011A85, + 0x1B80, 0x00011A87, + 0x1B80, 0x4D041A95, + 0x1B80, 0x4D041A97, + 0x1B80, 0x2EF81AA5, + 0x1B80, 0x2EF81AA7, + 0x1B80, 0x00021AB5, + 0x1B80, 0x00021AB7, + 0x1B80, 0x23031AC5, + 0x1B80, 0x23031AC7, + 0x1B80, 0x00001AD5, + 0x1B80, 0x00001AD7, + 0x1B80, 0x23131AE5, + 0x1B80, 0x23131AE7, + 0x1B80, 0xE77F1AF5, + 0x1B80, 0xE77F1AF7, + 0x1B80, 0x232F1B05, + 0x1B80, 0x232F1B07, + 0x1B80, 0xEFBF1B15, + 0x1B80, 0xEFBF1B17, + 0x1B80, 0x2EF01B25, + 0x1B80, 0x2EF01B27, + 0x1B80, 0x00021B35, + 0x1B80, 0x00021B37, + 0x1B80, 0x4D001B45, + 0x1B80, 0x4D001B47, + 0x1B80, 0x00011B55, + 0x1B80, 0x00011B57, + 0x1B80, 0x4D041B65, + 0x1B80, 0x4D041B67, + 0x1B80, 0x2EF81B75, + 0x1B80, 0x2EF81B77, + 0x1B80, 0x00021B85, + 0x1B80, 0x00021B87, + 0x1B80, 0x23031B95, + 0x1B80, 0x23031B97, + 0x1B80, 0x00001BA5, + 0x1B80, 0x00001BA7, + 0x1B80, 0x23131BB5, + 0x1B80, 0x23131BB7, + 0x1B80, 0xE77F1BC5, + 0x1B80, 0xE77F1BC7, + 0x1B80, 0x232F1BD5, + 0x1B80, 0x232F1BD7, + 0x1B80, 0xE79F1BE5, + 0x1B80, 0xE79F1BE7, + 0x1B80, 0x2EF01BF5, + 0x1B80, 0x2EF01BF7, + 0x1B80, 0x00021C05, + 0x1B80, 0x00021C07, + 0x1B80, 0x28F81C15, + 0x1B80, 0x28F81C17, + 0x1B80, 0x80001C25, + 0x1B80, 0x80001C27, + 0x1B80, 0x4D001C35, + 0x1B80, 0x4D001C37, + 0x1B80, 0x00011C45, + 0x1B80, 0x00011C47, + 0x1B80, 0x00041C55, + 0x1B80, 0x00041C57, + 0x1B80, 0x6BC01C65, + 0x1B80, 0x6BC01C67, + 0x1B80, 0x4D041C75, + 0x1B80, 0x4D041C77, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241C85, + 0x1B80, 0x68241C87, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241C85, + 0x1B80, 0x68241C87, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241C85, + 0x1B80, 0x68241C87, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241C85, + 0x1B80, 0x68241C87, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241C85, + 0x1B80, 0x68241C87, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241C85, + 0x1B80, 0x68241C87, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241C85, + 0x1B80, 0x68241C87, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241C85, + 0x1B80, 0x68241C87, + 0xA0000000, 0x00000000, + 0x1B80, 0x68481C85, + 0x1B80, 0x68481C87, + 0xB0000000, 0x00000000, + 0x1B80, 0x66061C95, + 0x1B80, 0x66061C97, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x650C1CA5, + 0x1B80, 0x650C1CA7, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x650C1CA5, + 0x1B80, 0x650C1CA7, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x650C1CA5, + 0x1B80, 0x650C1CA7, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x650C1CA5, + 0x1B80, 0x650C1CA7, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x650C1CA5, + 0x1B80, 0x650C1CA7, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x650C1CA5, + 0x1B80, 0x650C1CA7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x650C1CA5, + 0x1B80, 0x650C1CA7, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x650C1CA5, + 0x1B80, 0x650C1CA7, + 0xA0000000, 0x00000000, + 0x1B80, 0x65041CA5, + 0x1B80, 0x65041CA7, + 0xB0000000, 0x00000000, + 0x1B80, 0x64471CB5, + 0x1B80, 0x64471CB7, + 0x1B80, 0x23411CC5, + 0x1B80, 0x23411CC7, + 0x1B80, 0x100E1CD5, + 0x1B80, 0x100E1CD7, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60101CE5, + 0x1B80, 0x60101CE7, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60101CE5, + 0x1B80, 0x60101CE7, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60101CE5, + 0x1B80, 0x60101CE7, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60101CE5, + 0x1B80, 0x60101CE7, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60101CE5, + 0x1B80, 0x60101CE7, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60101CE5, + 0x1B80, 0x60101CE7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60101CE5, + 0x1B80, 0x60101CE7, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60101CE5, + 0x1B80, 0x60101CE7, + 0xA0000000, 0x00000000, + 0x1B80, 0x60011CE5, + 0x1B80, 0x60011CE7, + 0xB0000000, 0x00000000, + 0x1B80, 0x23411CF5, + 0x1B80, 0x23411CF7, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60811D05, + 0x1B80, 0x60811D07, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60811D05, + 0x1B80, 0x60811D07, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60811D05, + 0x1B80, 0x60811D07, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60811D05, + 0x1B80, 0x60811D07, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60811D05, + 0x1B80, 0x60811D07, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60811D05, + 0x1B80, 0x60811D07, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60811D05, + 0x1B80, 0x60811D07, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60811D05, + 0x1B80, 0x60811D07, + 0xA0000000, 0x00000000, + 0x1B80, 0x60611D05, + 0x1B80, 0x60611D07, + 0xB0000000, 0x00000000, + 0x1B80, 0x23411D15, + 0x1B80, 0x23411D17, + 0x1B80, 0x70E11D25, + 0x1B80, 0x70E11D27, + 0x1B80, 0x4D001D35, + 0x1B80, 0x4D001D37, + 0x1B80, 0x00011D45, + 0x1B80, 0x00011D47, + 0x1B80, 0x00041D55, + 0x1B80, 0x00041D57, + 0x1B80, 0x6B401D65, + 0x1B80, 0x6B401D67, + 0x1B80, 0x4D041D75, + 0x1B80, 0x4D041D77, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241D85, + 0x1B80, 0x68241D87, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241D85, + 0x1B80, 0x68241D87, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241D85, + 0x1B80, 0x68241D87, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241D85, + 0x1B80, 0x68241D87, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241D85, + 0x1B80, 0x68241D87, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x68241D85, + 0x1B80, 0x68241D87, + 0xA0000000, 0x00000000, + 0x1B80, 0x68481D85, + 0x1B80, 0x68481D87, + 0xB0000000, 0x00000000, + 0x1B80, 0x66061D95, + 0x1B80, 0x66061D97, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x65081DA5, + 0x1B80, 0x65081DA7, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x65181DA5, + 0x1B80, 0x65181DA7, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x65181DA5, + 0x1B80, 0x65181DA7, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x65181DA5, + 0x1B80, 0x65181DA7, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x65181DA5, + 0x1B80, 0x65181DA7, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x65081DA5, + 0x1B80, 0x65081DA7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x65181DA5, + 0x1B80, 0x65181DA7, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x65181DA5, + 0x1B80, 0x65181DA7, + 0xA0000000, 0x00000000, + 0x1B80, 0x65081DA5, + 0x1B80, 0x65081DA7, + 0xB0000000, 0x00000000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x64481DB5, + 0x1B80, 0x64481DB7, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x64481DB5, + 0x1B80, 0x64481DB7, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x64481DB5, + 0x1B80, 0x64481DB7, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x64481DB5, + 0x1B80, 0x64481DB7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x64481DB5, + 0x1B80, 0x64481DB7, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x64481DB5, + 0x1B80, 0x64481DB7, + 0xA0000000, 0x00000000, + 0x1B80, 0x64471DB5, + 0x1B80, 0x64471DB7, + 0xB0000000, 0x00000000, + 0x1B80, 0x23411DC5, + 0x1B80, 0x23411DC7, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x11E41DD5, + 0x1B80, 0x11E41DD7, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x11E81DD5, + 0x1B80, 0x11E81DD7, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x11E81DD5, + 0x1B80, 0x11E81DD7, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x11E81DD5, + 0x1B80, 0x11E81DD7, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x11E81DD5, + 0x1B80, 0x11E81DD7, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x11E41DD5, + 0x1B80, 0x11E41DD7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x11E81DD5, + 0x1B80, 0x11E81DD7, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x11E81DD5, + 0x1B80, 0x11E81DD7, + 0xA0000000, 0x00000000, + 0x1B80, 0x11E41DD5, + 0x1B80, 0x11E41DD7, + 0xB0000000, 0x00000000, + 0x1B80, 0x60011DE5, + 0x1B80, 0x60011DE7, + 0x1B80, 0x23411DF5, + 0x1B80, 0x23411DF7, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60E11E05, + 0x1B80, 0x60E11E07, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x61E11E05, + 0x1B80, 0x61E11E07, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x61E11E05, + 0x1B80, 0x61E11E07, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x61E11E05, + 0x1B80, 0x61E11E07, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x61E11E05, + 0x1B80, 0x61E11E07, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x60E11E05, + 0x1B80, 0x60E11E07, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x61E11E05, + 0x1B80, 0x61E11E07, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x1B80, 0x61E11E05, + 0x1B80, 0x61E11E07, + 0xA0000000, 0x00000000, + 0x1B80, 0x60E11E05, + 0x1B80, 0x60E11E07, + 0xB0000000, 0x00000000, + 0x1B80, 0x23411E15, + 0x1B80, 0x23411E17, + 0x1B80, 0x70611E25, + 0x1B80, 0x70611E27, + 0x1B80, 0x4D001E35, + 0x1B80, 0x4D001E37, + 0x1B80, 0x00011E45, + 0x1B80, 0x00011E47, + 0x1B80, 0x00001E55, + 0x1B80, 0x00001E57, + 0x1B80, 0x00001E65, + 0x1B80, 0x00001E67, + 0x1B80, 0x00001E75, + 0x1B80, 0x00001E77, + 0x1B80, 0x00001E85, + 0x1B80, 0x00001E87, + 0x1B80, 0x00001E95, + 0x1B80, 0x00001E97, + 0x1B80, 0x00001EA5, + 0x1B80, 0x00001EA7, + 0x1B80, 0x00001EB5, + 0x1B80, 0x00001EB7, + 0x1B80, 0x00001EC5, + 0x1B80, 0x00001EC7, + 0x1B80, 0x00001ED5, + 0x1B80, 0x00001ED7, + 0x1B80, 0x00001EE5, + 0x1B80, 0x00001EE7, + 0x1B80, 0x00001EF5, + 0x1B80, 0x00001EF7, + 0x1B80, 0x00001F05, + 0x1B80, 0x00001F07, + 0x1B80, 0x00001F15, + 0x1B80, 0x00001F17, + 0x1B80, 0x00001F25, + 0x1B80, 0x00001F27, + 0x1B80, 0x00001F35, + 0x1B80, 0x00001F37, + 0x1B80, 0x00001F45, + 0x1B80, 0x00001F47, + 0x1B80, 0x00001F55, + 0x1B80, 0x00001F57, + 0x1B80, 0x00001F65, + 0x1B80, 0x00001F67, + 0x1B80, 0x00001F75, + 0x1B80, 0x00001F77, + 0x1B80, 0x00001F85, + 0x1B80, 0x00001F87, + 0x1B80, 0x00001F95, + 0x1B80, 0x00001F97, + 0x1B80, 0x00001FA5, + 0x1B80, 0x00001FA7, + 0x1B80, 0x00001FB5, + 0x1B80, 0x00001FB7, + 0x1B80, 0x00001FC5, + 0x1B80, 0x00001FC7, + 0x1B80, 0x00001FD5, + 0x1B80, 0x00001FD7, + 0x1B80, 0x00001FE5, + 0x1B80, 0x00001FE7, + 0x1B80, 0x00001FF5, + 0x1B80, 0x00001FF7, + 0x1B80, 0x00000006, + 0x1B80, 0x00000002, +}; + +RTW_DECL_TABLE_PHY_COND(rtw8814a_bb, rtw_phy_cfg_bb); + +static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, }, + { 0, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, }, + { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, }, + { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, }, + { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, }, + { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, }, + { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, }, + { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, }, + { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, }, + { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, }, + { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, }, + { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, }, + { 0, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, }, + { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, }, + { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, }, + { 0, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, }, + { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, }, + { 0, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, }, + { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, }, + { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, }, + { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, }, + { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, }, + { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, }, + { 0, 2, 0, 0x00001820, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001824, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001828, 0xffffffff, 0x30323434, }, + { 0, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001830, 0xffffffff, 0x28303234, }, + { 0, 2, 1, 0x00001834, 0xffffffff, 0x32323232, }, + { 0, 2, 1, 0x00001838, 0xffffffff, 0x26283032, }, + { 0, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, }, + { 0, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, }, + { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, }, + { 0, 2, 0, 0x00001844, 0xffffffff, 0x32322426, }, + { 0, 2, 1, 0x00001848, 0xffffffff, 0x30323232, }, + { 0, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, }, + { 0, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, }, + { 0, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, }, + { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, }, + { 0, 3, 0, 0x00001a20, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, }, + { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, }, + { 0, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, }, + { 0, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, }, + { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, }, + { 0, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, }, + { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, }, + { 0, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, }, + { 0, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, }, + { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, }, + { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, }, + { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, }, + { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, }, + { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, }, + { 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, }, + { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, }, + { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, }, + { 1, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, }, + { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, }, + { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, }, + { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, }, + { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, }, + { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, }, + { 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, }, + { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, }, + { 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, }, + { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, }, + { 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, }, + { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, }, + { 1, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, }, + { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, }, + { 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, }, + { 1, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, }, + { 1, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, }, + { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, }, + { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, }, + { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, }, + { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, }, + { 1, 2, 0, 0x00001824, 0xffffffff, 0x34343434, }, + { 1, 2, 0, 0x00001828, 0xffffffff, 0x30323434, }, + { 1, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, }, + { 1, 2, 0, 0x00001830, 0xffffffff, 0x28303234, }, + { 1, 2, 1, 0x00001834, 0xffffffff, 0x32323232, }, + { 1, 2, 1, 0x00001838, 0xffffffff, 0x26283032, }, + { 1, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, }, + { 1, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, }, + { 1, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, }, + { 1, 2, 0, 0x00001840, 0xffffffff, 0x28303234, }, + { 1, 2, 0, 0x00001844, 0xffffffff, 0x32322426, }, + { 1, 2, 1, 0x00001848, 0xffffffff, 0x30323232, }, + { 1, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, }, + { 1, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, }, + { 1, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, }, + { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, }, + { 1, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, }, + { 1, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, }, + { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, }, + { 1, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, }, + { 1, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, }, + { 1, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, }, + { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, }, + { 1, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, }, + { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, }, + { 1, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, }, + { 1, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, }, + { 1, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, }, + { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, }, + { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, }, + { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, }, + { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg); + +static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type0[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32323232, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x32323232, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303232, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x32323232, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, }, + { 0, 0, 1, 0x00000c34, 0xffffffff, 0x30303030, }, + { 0, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, }, + { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x28282828, }, + { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x22242628, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x32323232, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x30302224, }, + { 0, 0, 1, 0x00000c48, 0xffffffff, 0x28303030, }, + { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, }, + { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x28282828, }, + { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x22242628, }, + { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x18202020, }, + { 0, 1, 0, 0x00000e20, 0xffffffff, 0x32323232, }, + { 0, 1, 0, 0x00000e24, 0xffffffff, 0x32323232, }, + { 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303232, }, + { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x32323232, }, + { 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, }, + { 0, 1, 1, 0x00000e34, 0xffffffff, 0x30303030, }, + { 0, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, }, + { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x28282828, }, + { 0, 1, 2, 0x00000edc, 0xffffffff, 0x22242628, }, + { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x32323232, }, + { 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, }, + { 0, 1, 0, 0x00000e44, 0xffffffff, 0x30302224, }, + { 0, 1, 1, 0x00000e48, 0xffffffff, 0x28303030, }, + { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, }, + { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x28282828, }, + { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x22242628, }, + { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x18202020, }, + { 0, 2, 0, 0x00001820, 0xffffffff, 0x32323232, }, + { 0, 2, 0, 0x00001824, 0xffffffff, 0x32323232, }, + { 0, 2, 0, 0x00001828, 0xffffffff, 0x28303232, }, + { 0, 2, 0, 0x0000182c, 0xffffffff, 0x32323232, }, + { 0, 2, 0, 0x00001830, 0xffffffff, 0x26283032, }, + { 0, 2, 1, 0x00001834, 0xffffffff, 0x30303030, }, + { 0, 2, 1, 0x00001838, 0xffffffff, 0x24262830, }, + { 0, 2, 2, 0x000018d8, 0xffffffff, 0x28282828, }, + { 0, 2, 2, 0x000018dc, 0xffffffff, 0x22242628, }, + { 0, 2, 0, 0x0000183c, 0xffffffff, 0x32323232, }, + { 0, 2, 0, 0x00001840, 0xffffffff, 0x26283032, }, + { 0, 2, 0, 0x00001844, 0xffffffff, 0x30302224, }, + { 0, 2, 1, 0x00001848, 0xffffffff, 0x28303030, }, + { 0, 2, 1, 0x0000184c, 0xffffffff, 0x20222426, }, + { 0, 2, 2, 0x000018e0, 0xffffffff, 0x28282828, }, + { 0, 2, 2, 0x000018e4, 0xffffffff, 0x22242628, }, + { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x18202020, }, + { 0, 3, 0, 0x00001a20, 0xffffffff, 0x32323232, }, + { 0, 3, 0, 0x00001a24, 0xffffffff, 0x32323232, }, + { 0, 3, 0, 0x00001a28, 0xffffffff, 0x28303232, }, + { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x32323232, }, + { 0, 3, 0, 0x00001a30, 0xffffffff, 0x26283032, }, + { 0, 3, 1, 0x00001a34, 0xffffffff, 0x30303030, }, + { 0, 3, 1, 0x00001a38, 0xffffffff, 0x24262830, }, + { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x28282828, }, + { 0, 3, 2, 0x00001adc, 0xffffffff, 0x22242628, }, + { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x32323232, }, + { 0, 3, 0, 0x00001a40, 0xffffffff, 0x26283032, }, + { 0, 3, 0, 0x00001a44, 0xffffffff, 0x30302224, }, + { 0, 3, 1, 0x00001a48, 0xffffffff, 0x28303030, }, + { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x20222426, }, + { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x28282828, }, + { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x22242628, }, + { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x18202020, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x32323232, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x28303232, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32323232, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, }, + { 1, 0, 1, 0x00000c34, 0xffffffff, 0x30303030, }, + { 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830, }, + { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x28282828, }, + { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x22242628, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32323232, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x30302224, }, + { 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303030, }, + { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426, }, + { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x28282828, }, + { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x22242628, }, + { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x18202020, }, + { 1, 1, 0, 0x00000e24, 0xffffffff, 0x32323232, }, + { 1, 1, 0, 0x00000e28, 0xffffffff, 0x28303232, }, + { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32323232, }, + { 1, 1, 0, 0x00000e30, 0xffffffff, 0x26283032, }, + { 1, 1, 1, 0x00000e34, 0xffffffff, 0x30303030, }, + { 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830, }, + { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x28282828, }, + { 1, 1, 2, 0x00000edc, 0xffffffff, 0x22242628, }, + { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32323232, }, + { 1, 1, 0, 0x00000e40, 0xffffffff, 0x26283032, }, + { 1, 1, 0, 0x00000e44, 0xffffffff, 0x30302224, }, + { 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303030, }, + { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426, }, + { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x28282828, }, + { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x22242628, }, + { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x18202020, }, + { 1, 2, 0, 0x00001824, 0xffffffff, 0x32323232, }, + { 1, 2, 0, 0x00001828, 0xffffffff, 0x28303232, }, + { 1, 2, 0, 0x0000182c, 0xffffffff, 0x32323232, }, + { 1, 2, 0, 0x00001830, 0xffffffff, 0x26283032, }, + { 1, 2, 1, 0x00001834, 0xffffffff, 0x30303030, }, + { 1, 2, 1, 0x00001838, 0xffffffff, 0x24262830, }, + { 1, 2, 2, 0x000018d8, 0xffffffff, 0x28282828, }, + { 1, 2, 2, 0x000018dc, 0xffffffff, 0x22242628, }, + { 1, 2, 0, 0x0000183c, 0xffffffff, 0x32323232, }, + { 1, 2, 0, 0x00001840, 0xffffffff, 0x26283032, }, + { 1, 2, 0, 0x00001844, 0xffffffff, 0x30302224, }, + { 1, 2, 1, 0x00001848, 0xffffffff, 0x28303030, }, + { 1, 2, 1, 0x0000184c, 0xffffffff, 0x20222426, }, + { 1, 2, 2, 0x000018e0, 0xffffffff, 0x28282828, }, + { 1, 2, 2, 0x000018e4, 0xffffffff, 0x22242628, }, + { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x18202020, }, + { 1, 3, 0, 0x00001a24, 0xffffffff, 0x32323232, }, + { 1, 3, 0, 0x00001a28, 0xffffffff, 0x28303232, }, + { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x32323232, }, + { 1, 3, 0, 0x00001a30, 0xffffffff, 0x26283032, }, + { 1, 3, 1, 0x00001a34, 0xffffffff, 0x30303030, }, + { 1, 3, 1, 0x00001a38, 0xffffffff, 0x24262830, }, + { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x28282828, }, + { 1, 3, 2, 0x00001adc, 0xffffffff, 0x22242628, }, + { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x32323232, }, + { 1, 3, 0, 0x00001a40, 0xffffffff, 0x26283032, }, + { 1, 3, 0, 0x00001a44, 0xffffffff, 0x30302224, }, + { 1, 3, 1, 0x00001a48, 0xffffffff, 0x28303030, }, + { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x20222426, }, + { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x28282828, }, + { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x22242628, }, + { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x18202020, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type0); + +static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type2[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, }, + { 0, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, }, + { 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, }, + { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, }, + { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, }, + { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, }, + { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, }, + { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, }, + { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, }, + { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, }, + { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, }, + { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, }, + { 0, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, }, + { 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, }, + { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, }, + { 0, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, }, + { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, }, + { 0, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, }, + { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, }, + { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, }, + { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, }, + { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, }, + { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, }, + { 0, 2, 0, 0x00001820, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001824, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001828, 0xffffffff, 0x30323434, }, + { 0, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001830, 0xffffffff, 0x28303234, }, + { 0, 2, 1, 0x00001834, 0xffffffff, 0x32323232, }, + { 0, 2, 1, 0x00001838, 0xffffffff, 0x26283032, }, + { 0, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, }, + { 0, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, }, + { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, }, + { 0, 2, 0, 0x00001844, 0xffffffff, 0x32322426, }, + { 0, 2, 1, 0x00001848, 0xffffffff, 0x30323232, }, + { 0, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, }, + { 0, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, }, + { 0, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, }, + { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, }, + { 0, 3, 0, 0x00001a20, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, }, + { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, }, + { 0, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, }, + { 0, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, }, + { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, }, + { 0, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, }, + { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, }, + { 0, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, }, + { 0, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, }, + { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, }, + { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, }, + { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, }, + { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, }, + { 1, 0, 1, 0x00000c34, 0xffffffff, 0x32323232, }, + { 1, 0, 1, 0x00000c38, 0xffffffff, 0x26283032, }, + { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x30303030, }, + { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x24262830, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, }, + { 1, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, }, + { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, }, + { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, }, + { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, }, + { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, }, + { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, }, + { 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, }, + { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, }, + { 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, }, + { 1, 1, 1, 0x00000e34, 0xffffffff, 0x32323232, }, + { 1, 1, 1, 0x00000e38, 0xffffffff, 0x26283032, }, + { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x30303030, }, + { 1, 1, 2, 0x00000edc, 0xffffffff, 0x24262830, }, + { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, }, + { 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, }, + { 1, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, }, + { 1, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, }, + { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, }, + { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, }, + { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, }, + { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, }, + { 1, 2, 0, 0x00001824, 0xffffffff, 0x34343434, }, + { 1, 2, 0, 0x00001828, 0xffffffff, 0x30323434, }, + { 1, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, }, + { 1, 2, 0, 0x00001830, 0xffffffff, 0x28303234, }, + { 1, 2, 1, 0x00001834, 0xffffffff, 0x32323232, }, + { 1, 2, 1, 0x00001838, 0xffffffff, 0x26283032, }, + { 1, 2, 2, 0x000018d8, 0xffffffff, 0x30303030, }, + { 1, 2, 2, 0x000018dc, 0xffffffff, 0x24262830, }, + { 1, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, }, + { 1, 2, 0, 0x00001840, 0xffffffff, 0x28303234, }, + { 1, 2, 0, 0x00001844, 0xffffffff, 0x32322426, }, + { 1, 2, 1, 0x00001848, 0xffffffff, 0x30323232, }, + { 1, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, }, + { 1, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, }, + { 1, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, }, + { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, }, + { 1, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, }, + { 1, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, }, + { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, }, + { 1, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, }, + { 1, 3, 1, 0x00001a34, 0xffffffff, 0x32323232, }, + { 1, 3, 1, 0x00001a38, 0xffffffff, 0x26283032, }, + { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x30303030, }, + { 1, 3, 2, 0x00001adc, 0xffffffff, 0x24262830, }, + { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, }, + { 1, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, }, + { 1, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, }, + { 1, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, }, + { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, }, + { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, }, + { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, }, + { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type2); + +static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type3[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x48484848, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, }, + { 0, 0, 1, 0x00000c34, 0xffffffff, 0x46464646, }, + { 0, 0, 1, 0x00000c38, 0xffffffff, 0x42444646, }, + { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x46464646, }, + { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x42444646, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x46463840, }, + { 0, 0, 1, 0x00000c48, 0xffffffff, 0x46464646, }, + { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x38404244, }, + { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x46464646, }, + { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x42444646, }, + { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x38383840, }, + { 0, 1, 0, 0x00000e20, 0xffffffff, 0x48484848, }, + { 0, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, }, + { 0, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, }, + { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, }, + { 0, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, }, + { 0, 1, 1, 0x00000e34, 0xffffffff, 0x46464646, }, + { 0, 1, 1, 0x00000e38, 0xffffffff, 0x42444646, }, + { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x46464646, }, + { 0, 1, 2, 0x00000edc, 0xffffffff, 0x42444646, }, + { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, }, + { 0, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, }, + { 0, 1, 0, 0x00000e44, 0xffffffff, 0x46463840, }, + { 0, 1, 1, 0x00000e48, 0xffffffff, 0x46464646, }, + { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x38404244, }, + { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x46464646, }, + { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x42444646, }, + { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x38383840, }, + { 0, 2, 0, 0x00001820, 0xffffffff, 0x48484848, }, + { 0, 2, 0, 0x00001824, 0xffffffff, 0x46464646, }, + { 0, 2, 0, 0x00001828, 0xffffffff, 0x44464646, }, + { 0, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, }, + { 0, 2, 0, 0x00001830, 0xffffffff, 0x42444646, }, + { 0, 2, 1, 0x00001834, 0xffffffff, 0x46464646, }, + { 0, 2, 1, 0x00001838, 0xffffffff, 0x42444646, }, + { 0, 2, 2, 0x000018d8, 0xffffffff, 0x46464646, }, + { 0, 2, 2, 0x000018dc, 0xffffffff, 0x42444646, }, + { 0, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, }, + { 0, 2, 0, 0x00001840, 0xffffffff, 0x42444646, }, + { 0, 2, 0, 0x00001844, 0xffffffff, 0x46463840, }, + { 0, 2, 1, 0x00001848, 0xffffffff, 0x46464646, }, + { 0, 2, 1, 0x0000184c, 0xffffffff, 0x38404244, }, + { 0, 2, 2, 0x000018e0, 0xffffffff, 0x46464646, }, + { 0, 2, 2, 0x000018e4, 0xffffffff, 0x42444646, }, + { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x38383840, }, + { 0, 3, 0, 0x00001a20, 0xffffffff, 0x48484848, }, + { 0, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, }, + { 0, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, }, + { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, }, + { 0, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, }, + { 0, 3, 1, 0x00001a34, 0xffffffff, 0x46464646, }, + { 0, 3, 1, 0x00001a38, 0xffffffff, 0x42444646, }, + { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x46464646, }, + { 0, 3, 2, 0x00001adc, 0xffffffff, 0x42444646, }, + { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, }, + { 0, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, }, + { 0, 3, 0, 0x00001a44, 0xffffffff, 0x46463840, }, + { 0, 3, 1, 0x00001a48, 0xffffffff, 0x46464646, }, + { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x38404244, }, + { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x46464646, }, + { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x42444646, }, + { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x38383840, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, }, + { 1, 0, 1, 0x00000c34, 0xffffffff, 0x46464646, }, + { 1, 0, 1, 0x00000c38, 0xffffffff, 0x42444646, }, + { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x46464646, }, + { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x42444646, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x46463840, }, + { 1, 0, 1, 0x00000c48, 0xffffffff, 0x46464646, }, + { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x38404244, }, + { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x46464646, }, + { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x42444646, }, + { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x38383840, }, + { 1, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, }, + { 1, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, }, + { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, }, + { 1, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, }, + { 1, 1, 1, 0x00000e34, 0xffffffff, 0x46464646, }, + { 1, 1, 1, 0x00000e38, 0xffffffff, 0x42444646, }, + { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x46464646, }, + { 1, 1, 2, 0x00000edc, 0xffffffff, 0x42444646, }, + { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, }, + { 1, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, }, + { 1, 1, 0, 0x00000e44, 0xffffffff, 0x46463840, }, + { 1, 1, 1, 0x00000e48, 0xffffffff, 0x46464646, }, + { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x38404244, }, + { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x46464646, }, + { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x42444646, }, + { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x38383840, }, + { 1, 2, 0, 0x00001824, 0xffffffff, 0x46464646, }, + { 1, 2, 0, 0x00001828, 0xffffffff, 0x44464646, }, + { 1, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, }, + { 1, 2, 0, 0x00001830, 0xffffffff, 0x42444646, }, + { 1, 2, 1, 0x00001834, 0xffffffff, 0x46464646, }, + { 1, 2, 1, 0x00001838, 0xffffffff, 0x42444646, }, + { 1, 2, 2, 0x000018d8, 0xffffffff, 0x46464646, }, + { 1, 2, 2, 0x000018dc, 0xffffffff, 0x42444646, }, + { 1, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, }, + { 1, 2, 0, 0x00001840, 0xffffffff, 0x42444646, }, + { 1, 2, 0, 0x00001844, 0xffffffff, 0x46463840, }, + { 1, 2, 1, 0x00001848, 0xffffffff, 0x46464646, }, + { 1, 2, 1, 0x0000184c, 0xffffffff, 0x38404244, }, + { 1, 2, 2, 0x000018e0, 0xffffffff, 0x46464646, }, + { 1, 2, 2, 0x000018e4, 0xffffffff, 0x42444646, }, + { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x38383840, }, + { 1, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, }, + { 1, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, }, + { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, }, + { 1, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, }, + { 1, 3, 1, 0x00001a34, 0xffffffff, 0x46464646, }, + { 1, 3, 1, 0x00001a38, 0xffffffff, 0x42444646, }, + { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x46464646, }, + { 1, 3, 2, 0x00001adc, 0xffffffff, 0x42444646, }, + { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, }, + { 1, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, }, + { 1, 3, 0, 0x00001a44, 0xffffffff, 0x46463840, }, + { 1, 3, 1, 0x00001a48, 0xffffffff, 0x46464646, }, + { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x38404244, }, + { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x46464646, }, + { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x42444646, }, + { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x38383840, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type3); + +static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type4[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x42424242, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x42424242, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x36384042, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x42424242, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x34363840, }, + { 0, 0, 1, 0x00000c34, 0xffffffff, 0x42424242, }, + { 0, 0, 1, 0x00000c38, 0xffffffff, 0x34363840, }, + { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, }, + { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x34363840, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x42424242, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x34363840, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x42423032, }, + { 0, 0, 1, 0x00000c48, 0xffffffff, 0x38404242, }, + { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x30323436, }, + { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, }, + { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x34363840, }, + { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x30303032, }, + { 0, 1, 0, 0x00000e20, 0xffffffff, 0x42424242, }, + { 0, 1, 0, 0x00000e24, 0xffffffff, 0x42424242, }, + { 0, 1, 0, 0x00000e28, 0xffffffff, 0x36384042, }, + { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x42424242, }, + { 0, 1, 0, 0x00000e30, 0xffffffff, 0x34363840, }, + { 0, 1, 1, 0x00000e34, 0xffffffff, 0x42424242, }, + { 0, 1, 1, 0x00000e38, 0xffffffff, 0x34363840, }, + { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, }, + { 0, 1, 2, 0x00000edc, 0xffffffff, 0x34363840, }, + { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x42424242, }, + { 0, 1, 0, 0x00000e40, 0xffffffff, 0x34363840, }, + { 0, 1, 0, 0x00000e44, 0xffffffff, 0x42423032, }, + { 0, 1, 1, 0x00000e48, 0xffffffff, 0x38404242, }, + { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x30323436, }, + { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, }, + { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x34363840, }, + { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x30303032, }, + { 0, 2, 0, 0x00001820, 0xffffffff, 0x42424242, }, + { 0, 2, 0, 0x00001824, 0xffffffff, 0x42424242, }, + { 0, 2, 0, 0x00001828, 0xffffffff, 0x36384042, }, + { 0, 2, 0, 0x0000182c, 0xffffffff, 0x42424242, }, + { 0, 2, 0, 0x00001830, 0xffffffff, 0x34363840, }, + { 0, 2, 1, 0x00001834, 0xffffffff, 0x42424242, }, + { 0, 2, 1, 0x00001838, 0xffffffff, 0x34363840, }, + { 0, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, }, + { 0, 2, 2, 0x000018dc, 0xffffffff, 0x34363840, }, + { 0, 2, 0, 0x0000183c, 0xffffffff, 0x42424242, }, + { 0, 2, 0, 0x00001840, 0xffffffff, 0x34363840, }, + { 0, 2, 0, 0x00001844, 0xffffffff, 0x42423032, }, + { 0, 2, 1, 0x00001848, 0xffffffff, 0x38404242, }, + { 0, 2, 1, 0x0000184c, 0xffffffff, 0x30323436, }, + { 0, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, }, + { 0, 2, 2, 0x000018e4, 0xffffffff, 0x34363840, }, + { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x30303032, }, + { 0, 3, 0, 0x00001a20, 0xffffffff, 0x42424242, }, + { 0, 3, 0, 0x00001a24, 0xffffffff, 0x42424242, }, + { 0, 3, 0, 0x00001a28, 0xffffffff, 0x36384042, }, + { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x42424242, }, + { 0, 3, 0, 0x00001a30, 0xffffffff, 0x34363840, }, + { 0, 3, 1, 0x00001a34, 0xffffffff, 0x42424242, }, + { 0, 3, 1, 0x00001a38, 0xffffffff, 0x34363840, }, + { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, }, + { 0, 3, 2, 0x00001adc, 0xffffffff, 0x34363840, }, + { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x42424242, }, + { 0, 3, 0, 0x00001a40, 0xffffffff, 0x34363840, }, + { 0, 3, 0, 0x00001a44, 0xffffffff, 0x42423032, }, + { 0, 3, 1, 0x00001a48, 0xffffffff, 0x38404242, }, + { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x30323436, }, + { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, }, + { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x34363840, }, + { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x30303032, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x42424242, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x36384042, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x42424242, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x34363840, }, + { 1, 0, 1, 0x00000c34, 0xffffffff, 0x42424242, }, + { 1, 0, 1, 0x00000c38, 0xffffffff, 0x34363840, }, + { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, }, + { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x34363840, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x42424242, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x34363840, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x42423032, }, + { 1, 0, 1, 0x00000c48, 0xffffffff, 0x38404242, }, + { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x30323436, }, + { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, }, + { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x34363840, }, + { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x30303032, }, + { 1, 1, 0, 0x00000e24, 0xffffffff, 0x42424242, }, + { 1, 1, 0, 0x00000e28, 0xffffffff, 0x36384042, }, + { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x42424242, }, + { 1, 1, 0, 0x00000e30, 0xffffffff, 0x34363840, }, + { 1, 1, 1, 0x00000e34, 0xffffffff, 0x42424242, }, + { 1, 1, 1, 0x00000e38, 0xffffffff, 0x34363840, }, + { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, }, + { 1, 1, 2, 0x00000edc, 0xffffffff, 0x34363840, }, + { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x42424242, }, + { 1, 1, 0, 0x00000e40, 0xffffffff, 0x34363840, }, + { 1, 1, 0, 0x00000e44, 0xffffffff, 0x42423032, }, + { 1, 1, 1, 0x00000e48, 0xffffffff, 0x38404242, }, + { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x30323436, }, + { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, }, + { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x34363840, }, + { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x30303032, }, + { 1, 2, 0, 0x00001824, 0xffffffff, 0x42424242, }, + { 1, 2, 0, 0x00001828, 0xffffffff, 0x36384042, }, + { 1, 2, 0, 0x0000182c, 0xffffffff, 0x42424242, }, + { 1, 2, 0, 0x00001830, 0xffffffff, 0x34363840, }, + { 1, 2, 1, 0x00001834, 0xffffffff, 0x42424242, }, + { 1, 2, 1, 0x00001838, 0xffffffff, 0x34363840, }, + { 1, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, }, + { 1, 2, 2, 0x000018dc, 0xffffffff, 0x34363840, }, + { 1, 2, 0, 0x0000183c, 0xffffffff, 0x42424242, }, + { 1, 2, 0, 0x00001840, 0xffffffff, 0x34363840, }, + { 1, 2, 0, 0x00001844, 0xffffffff, 0x42423032, }, + { 1, 2, 1, 0x00001848, 0xffffffff, 0x38404242, }, + { 1, 2, 1, 0x0000184c, 0xffffffff, 0x30323436, }, + { 1, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, }, + { 1, 2, 2, 0x000018e4, 0xffffffff, 0x34363840, }, + { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x30303032, }, + { 1, 3, 0, 0x00001a24, 0xffffffff, 0x42424242, }, + { 1, 3, 0, 0x00001a28, 0xffffffff, 0x36384042, }, + { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x42424242, }, + { 1, 3, 0, 0x00001a30, 0xffffffff, 0x34363840, }, + { 1, 3, 1, 0x00001a34, 0xffffffff, 0x42424242, }, + { 1, 3, 1, 0x00001a38, 0xffffffff, 0x34363840, }, + { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, }, + { 1, 3, 2, 0x00001adc, 0xffffffff, 0x34363840, }, + { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x42424242, }, + { 1, 3, 0, 0x00001a40, 0xffffffff, 0x34363840, }, + { 1, 3, 0, 0x00001a44, 0xffffffff, 0x42423032, }, + { 1, 3, 1, 0x00001a48, 0xffffffff, 0x38404242, }, + { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x30323436, }, + { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, }, + { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x34363840, }, + { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x30303032, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type4); + +static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type5[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x48484848, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, }, + { 0, 0, 1, 0x00000c34, 0xffffffff, 0x44444444, }, + { 0, 0, 1, 0x00000c38, 0xffffffff, 0x40424444, }, + { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, }, + { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x38404242, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x44444040, }, + { 0, 0, 1, 0x00000c48, 0xffffffff, 0x44444444, }, + { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x38384042, }, + { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, }, + { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x38404242, }, + { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20203636, }, + { 0, 1, 0, 0x00000e20, 0xffffffff, 0x48484848, }, + { 0, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, }, + { 0, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, }, + { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, }, + { 0, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, }, + { 0, 1, 1, 0x00000e34, 0xffffffff, 0x44444444, }, + { 0, 1, 1, 0x00000e38, 0xffffffff, 0x40424444, }, + { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, }, + { 0, 1, 2, 0x00000edc, 0xffffffff, 0x38404242, }, + { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, }, + { 0, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, }, + { 0, 1, 0, 0x00000e44, 0xffffffff, 0x44444040, }, + { 0, 1, 1, 0x00000e48, 0xffffffff, 0x44444444, }, + { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x38384042, }, + { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, }, + { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x38404242, }, + { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20203636, }, + { 0, 2, 0, 0x00001820, 0xffffffff, 0x48484848, }, + { 0, 2, 0, 0x00001824, 0xffffffff, 0x46464646, }, + { 0, 2, 0, 0x00001828, 0xffffffff, 0x44464646, }, + { 0, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, }, + { 0, 2, 0, 0x00001830, 0xffffffff, 0x42444646, }, + { 0, 2, 1, 0x00001834, 0xffffffff, 0x44444444, }, + { 0, 2, 1, 0x00001838, 0xffffffff, 0x40424444, }, + { 0, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, }, + { 0, 2, 2, 0x000018dc, 0xffffffff, 0x38404242, }, + { 0, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, }, + { 0, 2, 0, 0x00001840, 0xffffffff, 0x42444646, }, + { 0, 2, 0, 0x00001844, 0xffffffff, 0x44444040, }, + { 0, 2, 1, 0x00001848, 0xffffffff, 0x44444444, }, + { 0, 2, 1, 0x0000184c, 0xffffffff, 0x38384042, }, + { 0, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, }, + { 0, 2, 2, 0x000018e4, 0xffffffff, 0x38404242, }, + { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20203636, }, + { 0, 3, 0, 0x00001a20, 0xffffffff, 0x48484848, }, + { 0, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, }, + { 0, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, }, + { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, }, + { 0, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, }, + { 0, 3, 1, 0x00001a34, 0xffffffff, 0x44444444, }, + { 0, 3, 1, 0x00001a38, 0xffffffff, 0x40424444, }, + { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, }, + { 0, 3, 2, 0x00001adc, 0xffffffff, 0x38404242, }, + { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, }, + { 0, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, }, + { 0, 3, 0, 0x00001a44, 0xffffffff, 0x44444040, }, + { 0, 3, 1, 0x00001a48, 0xffffffff, 0x44444444, }, + { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x38384042, }, + { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, }, + { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x38404242, }, + { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20203636, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x44464646, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x42444646, }, + { 1, 0, 1, 0x00000c34, 0xffffffff, 0x44444444, }, + { 1, 0, 1, 0x00000c38, 0xffffffff, 0x40424444, }, + { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x42424242, }, + { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x38404242, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x42444646, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x44443840, }, + { 1, 0, 1, 0x00000c48, 0xffffffff, 0x44444444, }, + { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x36384042, }, + { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x42424242, }, + { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x38404242, }, + { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x20203436, }, + { 1, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, }, + { 1, 1, 0, 0x00000e28, 0xffffffff, 0x44464646, }, + { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, }, + { 1, 1, 0, 0x00000e30, 0xffffffff, 0x42444646, }, + { 1, 1, 1, 0x00000e34, 0xffffffff, 0x44444444, }, + { 1, 1, 1, 0x00000e38, 0xffffffff, 0x40424444, }, + { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x42424242, }, + { 1, 1, 2, 0x00000edc, 0xffffffff, 0x38404242, }, + { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, }, + { 1, 1, 0, 0x00000e40, 0xffffffff, 0x42444646, }, + { 1, 1, 0, 0x00000e44, 0xffffffff, 0x44443840, }, + { 1, 1, 1, 0x00000e48, 0xffffffff, 0x44444444, }, + { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x36384042, }, + { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x42424242, }, + { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x38404242, }, + { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x20203436, }, + { 1, 2, 0, 0x00001824, 0xffffffff, 0x46464646, }, + { 1, 2, 0, 0x00001828, 0xffffffff, 0x44464646, }, + { 1, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, }, + { 1, 2, 0, 0x00001830, 0xffffffff, 0x42444646, }, + { 1, 2, 1, 0x00001834, 0xffffffff, 0x44444444, }, + { 1, 2, 1, 0x00001838, 0xffffffff, 0x40424444, }, + { 1, 2, 2, 0x000018d8, 0xffffffff, 0x42424242, }, + { 1, 2, 2, 0x000018dc, 0xffffffff, 0x38404242, }, + { 1, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, }, + { 1, 2, 0, 0x00001840, 0xffffffff, 0x42444646, }, + { 1, 2, 0, 0x00001844, 0xffffffff, 0x44443840, }, + { 1, 2, 1, 0x00001848, 0xffffffff, 0x44444444, }, + { 1, 2, 1, 0x0000184c, 0xffffffff, 0x36384042, }, + { 1, 2, 2, 0x000018e0, 0xffffffff, 0x42424242, }, + { 1, 2, 2, 0x000018e4, 0xffffffff, 0x38404242, }, + { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x20203436, }, + { 1, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, }, + { 1, 3, 0, 0x00001a28, 0xffffffff, 0x44464646, }, + { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, }, + { 1, 3, 0, 0x00001a30, 0xffffffff, 0x42444646, }, + { 1, 3, 1, 0x00001a34, 0xffffffff, 0x44444444, }, + { 1, 3, 1, 0x00001a38, 0xffffffff, 0x40424444, }, + { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x42424242, }, + { 1, 3, 2, 0x00001adc, 0xffffffff, 0x38404242, }, + { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, }, + { 1, 3, 0, 0x00001a40, 0xffffffff, 0x42444646, }, + { 1, 3, 0, 0x00001a44, 0xffffffff, 0x44443840, }, + { 1, 3, 1, 0x00001a48, 0xffffffff, 0x44444444, }, + { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x36384042, }, + { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x42424242, }, + { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x38404242, }, + { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x20203436, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type5); + +static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type7[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, }, + { 0, 0, 1, 0x00000c34, 0xffffffff, 0x34343434, }, + { 0, 0, 1, 0x00000c38, 0xffffffff, 0x28303234, }, + { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x34343434, }, + { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x28303234, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x34342426, }, + { 0, 0, 1, 0x00000c48, 0xffffffff, 0x32343434, }, + { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x24262830, }, + { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x34343434, }, + { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x28303234, }, + { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x24263434, }, + { 0, 1, 0, 0x00000e20, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, }, + { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, }, + { 0, 1, 1, 0x00000e34, 0xffffffff, 0x34343434, }, + { 0, 1, 1, 0x00000e38, 0xffffffff, 0x28303234, }, + { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x34343434, }, + { 0, 1, 2, 0x00000edc, 0xffffffff, 0x28303234, }, + { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, }, + { 0, 1, 0, 0x00000e44, 0xffffffff, 0x34342426, }, + { 0, 1, 1, 0x00000e48, 0xffffffff, 0x32343434, }, + { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x24262830, }, + { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x34343434, }, + { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x28303234, }, + { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x24263434, }, + { 0, 2, 0, 0x00001820, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001824, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001828, 0xffffffff, 0x30323434, }, + { 0, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001830, 0xffffffff, 0x28303234, }, + { 0, 2, 1, 0x00001834, 0xffffffff, 0x34343434, }, + { 0, 2, 1, 0x00001838, 0xffffffff, 0x28303234, }, + { 0, 2, 2, 0x000018d8, 0xffffffff, 0x34343434, }, + { 0, 2, 2, 0x000018dc, 0xffffffff, 0x28303234, }, + { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, }, + { 0, 2, 0, 0x00001844, 0xffffffff, 0x34342426, }, + { 0, 2, 1, 0x00001848, 0xffffffff, 0x32343434, }, + { 0, 2, 1, 0x0000184c, 0xffffffff, 0x24262830, }, + { 0, 2, 2, 0x000018e0, 0xffffffff, 0x34343434, }, + { 0, 2, 2, 0x000018e4, 0xffffffff, 0x28303234, }, + { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x24263434, }, + { 0, 3, 0, 0x00001a20, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, }, + { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, }, + { 0, 3, 1, 0x00001a34, 0xffffffff, 0x34343434, }, + { 0, 3, 1, 0x00001a38, 0xffffffff, 0x28303234, }, + { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x34343434, }, + { 0, 3, 2, 0x00001adc, 0xffffffff, 0x28303234, }, + { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, }, + { 0, 3, 0, 0x00001a44, 0xffffffff, 0x34342426, }, + { 0, 3, 1, 0x00001a48, 0xffffffff, 0x32343434, }, + { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x24262830, }, + { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x34343434, }, + { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x28303234, }, + { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x24263434, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x30323434, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x34343434, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x28303234, }, + { 1, 0, 1, 0x00000c34, 0xffffffff, 0x34343434, }, + { 1, 0, 1, 0x00000c38, 0xffffffff, 0x28303234, }, + { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x34343434, }, + { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x28303234, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x34342426, }, + { 1, 0, 1, 0x00000c48, 0xffffffff, 0x32343434, }, + { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x24262830, }, + { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x34343434, }, + { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x28303234, }, + { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x24263434, }, + { 1, 1, 0, 0x00000e24, 0xffffffff, 0x34343434, }, + { 1, 1, 0, 0x00000e28, 0xffffffff, 0x30323434, }, + { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x34343434, }, + { 1, 1, 0, 0x00000e30, 0xffffffff, 0x28303234, }, + { 1, 1, 1, 0x00000e34, 0xffffffff, 0x34343434, }, + { 1, 1, 1, 0x00000e38, 0xffffffff, 0x28303234, }, + { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x34343434, }, + { 1, 1, 2, 0x00000edc, 0xffffffff, 0x28303234, }, + { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, }, + { 1, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, }, + { 1, 1, 0, 0x00000e44, 0xffffffff, 0x34342426, }, + { 1, 1, 1, 0x00000e48, 0xffffffff, 0x32343434, }, + { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x24262830, }, + { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x34343434, }, + { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x28303234, }, + { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x24263434, }, + { 1, 2, 0, 0x00001824, 0xffffffff, 0x34343434, }, + { 1, 2, 0, 0x00001828, 0xffffffff, 0x30323434, }, + { 1, 2, 0, 0x0000182c, 0xffffffff, 0x34343434, }, + { 1, 2, 0, 0x00001830, 0xffffffff, 0x28303234, }, + { 1, 2, 1, 0x00001834, 0xffffffff, 0x34343434, }, + { 1, 2, 1, 0x00001838, 0xffffffff, 0x28303234, }, + { 1, 2, 2, 0x000018d8, 0xffffffff, 0x34343434, }, + { 1, 2, 2, 0x000018dc, 0xffffffff, 0x28303234, }, + { 1, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, }, + { 1, 2, 0, 0x00001840, 0xffffffff, 0x28303234, }, + { 1, 2, 0, 0x00001844, 0xffffffff, 0x34342426, }, + { 1, 2, 1, 0x00001848, 0xffffffff, 0x32343434, }, + { 1, 2, 1, 0x0000184c, 0xffffffff, 0x24262830, }, + { 1, 2, 2, 0x000018e0, 0xffffffff, 0x34343434, }, + { 1, 2, 2, 0x000018e4, 0xffffffff, 0x28303234, }, + { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x24263434, }, + { 1, 3, 0, 0x00001a24, 0xffffffff, 0x34343434, }, + { 1, 3, 0, 0x00001a28, 0xffffffff, 0x30323434, }, + { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x34343434, }, + { 1, 3, 0, 0x00001a30, 0xffffffff, 0x28303234, }, + { 1, 3, 1, 0x00001a34, 0xffffffff, 0x34343434, }, + { 1, 3, 1, 0x00001a38, 0xffffffff, 0x28303234, }, + { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x34343434, }, + { 1, 3, 2, 0x00001adc, 0xffffffff, 0x28303234, }, + { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, }, + { 1, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, }, + { 1, 3, 0, 0x00001a44, 0xffffffff, 0x34342426, }, + { 1, 3, 1, 0x00001a48, 0xffffffff, 0x32343434, }, + { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x24262830, }, + { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x34343434, }, + { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x28303234, }, + { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x24263434, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type7); + +static const struct rtw_phy_pg_cfg_pair rtw8814a_bb_pg_type8[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x43434343, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x43434343, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x35373941, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x43434343, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x33353739, }, + { 0, 0, 1, 0x00000c34, 0xffffffff, 0x43434343, }, + { 0, 0, 1, 0x00000c38, 0xffffffff, 0x31333537, }, + { 0, 0, 2, 0x00000cd8, 0xffffffff, 0x43434343, }, + { 0, 0, 2, 0x00000cdc, 0xffffffff, 0x29313335, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34343434, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x28303234, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x32322426, }, + { 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323232, }, + { 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628, }, + { 0, 0, 2, 0x00000ce0, 0xffffffff, 0x30303030, }, + { 0, 0, 2, 0x00000ce4, 0xffffffff, 0x24262830, }, + { 0, 0, 2, 0x00000ce8, 0x0000ffff, 0x20222222, }, + { 0, 1, 0, 0x00000e20, 0xffffffff, 0x43434343, }, + { 0, 1, 0, 0x00000e24, 0xffffffff, 0x43434343, }, + { 0, 1, 0, 0x00000e28, 0xffffffff, 0x35373941, }, + { 0, 1, 0, 0x00000e2c, 0xffffffff, 0x41434343, }, + { 0, 1, 0, 0x00000e30, 0xffffffff, 0x33353739, }, + { 0, 1, 1, 0x00000e34, 0xffffffff, 0x39414141, }, + { 0, 1, 1, 0x00000e38, 0xffffffff, 0x31333537, }, + { 0, 1, 2, 0x00000ed8, 0xffffffff, 0x37393939, }, + { 0, 1, 2, 0x00000edc, 0xffffffff, 0x29313335, }, + { 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34343434, }, + { 0, 1, 0, 0x00000e40, 0xffffffff, 0x28303234, }, + { 0, 1, 0, 0x00000e44, 0xffffffff, 0x32322426, }, + { 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323232, }, + { 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628, }, + { 0, 1, 2, 0x00000ee0, 0xffffffff, 0x30303030, }, + { 0, 1, 2, 0x00000ee4, 0xffffffff, 0x24262830, }, + { 0, 1, 2, 0x00000ee8, 0x0000ffff, 0x20222222, }, + { 0, 2, 0, 0x00001820, 0xffffffff, 0x43434343, }, + { 0, 2, 0, 0x00001824, 0xffffffff, 0x43434343, }, + { 0, 2, 0, 0x00001828, 0xffffffff, 0x35373941, }, + { 0, 2, 0, 0x0000182c, 0xffffffff, 0x41434343, }, + { 0, 2, 0, 0x00001830, 0xffffffff, 0x33353739, }, + { 0, 2, 1, 0x00001834, 0xffffffff, 0x39414141, }, + { 0, 2, 1, 0x00001838, 0xffffffff, 0x31333537, }, + { 0, 2, 2, 0x000018d8, 0xffffffff, 0x37393939, }, + { 0, 2, 2, 0x000018dc, 0xffffffff, 0x29313335, }, + { 0, 2, 0, 0x0000183c, 0xffffffff, 0x34343434, }, + { 0, 2, 0, 0x00001840, 0xffffffff, 0x28303234, }, + { 0, 2, 0, 0x00001844, 0xffffffff, 0x32322426, }, + { 0, 2, 1, 0x00001848, 0xffffffff, 0x30323232, }, + { 0, 2, 1, 0x0000184c, 0xffffffff, 0x22242628, }, + { 0, 2, 2, 0x000018e0, 0xffffffff, 0x30303030, }, + { 0, 2, 2, 0x000018e4, 0xffffffff, 0x24262830, }, + { 0, 2, 2, 0x000018e8, 0x0000ffff, 0x20222222, }, + { 0, 3, 0, 0x00001a20, 0xffffffff, 0x43434343, }, + { 0, 3, 0, 0x00001a24, 0xffffffff, 0x43434343, }, + { 0, 3, 0, 0x00001a28, 0xffffffff, 0x35373941, }, + { 0, 3, 0, 0x00001a2c, 0xffffffff, 0x41434343, }, + { 0, 3, 0, 0x00001a30, 0xffffffff, 0x33353739, }, + { 0, 3, 1, 0x00001a34, 0xffffffff, 0x39414141, }, + { 0, 3, 1, 0x00001a38, 0xffffffff, 0x31333537, }, + { 0, 3, 2, 0x00001ad8, 0xffffffff, 0x37393939, }, + { 0, 3, 2, 0x00001adc, 0xffffffff, 0x29313335, }, + { 0, 3, 0, 0x00001a3c, 0xffffffff, 0x34343434, }, + { 0, 3, 0, 0x00001a40, 0xffffffff, 0x28303234, }, + { 0, 3, 0, 0x00001a44, 0xffffffff, 0x32322426, }, + { 0, 3, 1, 0x00001a48, 0xffffffff, 0x30323232, }, + { 0, 3, 1, 0x00001a4c, 0xffffffff, 0x22242628, }, + { 0, 3, 2, 0x00001ae0, 0xffffffff, 0x30303030, }, + { 0, 3, 2, 0x00001ae4, 0xffffffff, 0x24262830, }, + { 0, 3, 2, 0x00001ae8, 0x0000ffff, 0x20222222, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x46464646, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x39414345, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x46464646, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x38404244, }, + { 1, 0, 1, 0x00000c34, 0xffffffff, 0x46464646, }, + { 1, 0, 1, 0x00000c38, 0xffffffff, 0x36384042, }, + { 1, 0, 2, 0x00000cd8, 0xffffffff, 0x46464646, }, + { 1, 0, 2, 0x00000cdc, 0xffffffff, 0x34363840, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x46464646, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x38404244, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x46463738, }, + { 1, 0, 1, 0x00000c48, 0xffffffff, 0x42444646, }, + { 1, 0, 1, 0x00000c4c, 0xffffffff, 0x35373840, }, + { 1, 0, 2, 0x00000ce0, 0xffffffff, 0x46464646, }, + { 1, 0, 2, 0x00000ce4, 0xffffffff, 0x37394143, }, + { 1, 0, 2, 0x00000ce8, 0x0000ffff, 0x33333335, }, + { 1, 1, 0, 0x00000e24, 0xffffffff, 0x46464646, }, + { 1, 1, 0, 0x00000e28, 0xffffffff, 0x39414345, }, + { 1, 1, 0, 0x00000e2c, 0xffffffff, 0x46464646, }, + { 1, 1, 0, 0x00000e30, 0xffffffff, 0x38404244, }, + { 1, 1, 1, 0x00000e34, 0xffffffff, 0x46464646, }, + { 1, 1, 1, 0x00000e38, 0xffffffff, 0x36384042, }, + { 1, 1, 2, 0x00000ed8, 0xffffffff, 0x46464646, }, + { 1, 1, 2, 0x00000edc, 0xffffffff, 0x34363840, }, + { 1, 1, 0, 0x00000e3c, 0xffffffff, 0x46464646, }, + { 1, 1, 0, 0x00000e40, 0xffffffff, 0x38404244, }, + { 1, 1, 0, 0x00000e44, 0xffffffff, 0x46463738, }, + { 1, 1, 1, 0x00000e48, 0xffffffff, 0x42444646, }, + { 1, 1, 1, 0x00000e4c, 0xffffffff, 0x35373840, }, + { 1, 1, 2, 0x00000ee0, 0xffffffff, 0x46464646, }, + { 1, 1, 2, 0x00000ee4, 0xffffffff, 0x37394143, }, + { 1, 1, 2, 0x00000ee8, 0x0000ffff, 0x33333335, }, + { 1, 2, 0, 0x00001824, 0xffffffff, 0x46464646, }, + { 1, 2, 0, 0x00001828, 0xffffffff, 0x39414345, }, + { 1, 2, 0, 0x0000182c, 0xffffffff, 0x46464646, }, + { 1, 2, 0, 0x00001830, 0xffffffff, 0x38404244, }, + { 1, 2, 1, 0x00001834, 0xffffffff, 0x46464646, }, + { 1, 2, 1, 0x00001838, 0xffffffff, 0x36384042, }, + { 1, 2, 2, 0x000018d8, 0xffffffff, 0x46464646, }, + { 1, 2, 2, 0x000018dc, 0xffffffff, 0x34363840, }, + { 1, 2, 0, 0x0000183c, 0xffffffff, 0x46464646, }, + { 1, 2, 0, 0x00001840, 0xffffffff, 0x38404244, }, + { 1, 2, 0, 0x00001844, 0xffffffff, 0x46463738, }, + { 1, 2, 1, 0x00001848, 0xffffffff, 0x42444646, }, + { 1, 2, 1, 0x0000184c, 0xffffffff, 0x35373840, }, + { 1, 2, 2, 0x000018e0, 0xffffffff, 0x46464646, }, + { 1, 2, 2, 0x000018e4, 0xffffffff, 0x37394143, }, + { 1, 2, 2, 0x000018e8, 0x0000ffff, 0x33333335, }, + { 1, 3, 0, 0x00001a24, 0xffffffff, 0x46464646, }, + { 1, 3, 0, 0x00001a28, 0xffffffff, 0x39414345, }, + { 1, 3, 0, 0x00001a2c, 0xffffffff, 0x46464646, }, + { 1, 3, 0, 0x00001a30, 0xffffffff, 0x38404244, }, + { 1, 3, 1, 0x00001a34, 0xffffffff, 0x46464646, }, + { 1, 3, 1, 0x00001a38, 0xffffffff, 0x36384042, }, + { 1, 3, 2, 0x00001ad8, 0xffffffff, 0x46464646, }, + { 1, 3, 2, 0x00001adc, 0xffffffff, 0x34363840, }, + { 1, 3, 0, 0x00001a3c, 0xffffffff, 0x46464646, }, + { 1, 3, 0, 0x00001a40, 0xffffffff, 0x38404244, }, + { 1, 3, 0, 0x00001a44, 0xffffffff, 0x46463738, }, + { 1, 3, 1, 0x00001a48, 0xffffffff, 0x42444646, }, + { 1, 3, 1, 0x00001a4c, 0xffffffff, 0x35373840, }, + { 1, 3, 2, 0x00001ae0, 0xffffffff, 0x46464646, }, + { 1, 3, 2, 0x00001ae4, 0xffffffff, 0x37394143, }, + { 1, 3, 2, 0x00001ae8, 0x0000ffff, 0x33333335, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8814a_bb_pg_type8); + +static const u32 rtw8814a_rf_a[] = { + 0x018, 0x00013124, + 0x040, 0x00000C00, + 0x058, 0x00000F98, + 0x07F, 0x00068004, + 0x0B0, 0x000FFFFE, + 0x0B1, 0x0003FF48, + 0x0B2, 0x0006AA3F, + 0x0B3, 0x000FFC9A, + 0x0B4, 0x0000A78F, + 0x0B5, 0x00000A3F, + 0x0B6, 0x0000C09C, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x0B7, 0x00030008, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x0B7, 0x00030008, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x0B7, 0x00030008, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x0B7, 0x00030008, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x0B7, 0x00030008, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x0B7, 0x00030008, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x0B7, 0x00030008, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x0B7, 0x00030008, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x0B7, 0x00030008, + 0xA0000000, 0x00000000, + 0x0B7, 0x0003000C, + 0xB0000000, 0x00000000, + 0x0B8, 0x0007400E, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x0B9, 0x000FBF50, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x0B9, 0x000FBF50, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x0B9, 0x000FBF50, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x0B9, 0x000FBF50, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x0B9, 0x000FBF50, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x0B9, 0x000FBF50, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x0B9, 0x000FBF50, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x0B9, 0x000FBF50, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x0B9, 0x000FBF50, + 0xA0000000, 0x00000000, + 0x0B9, 0x000FBF50, + 0xB0000000, 0x00000000, + 0x0BA, 0x00050780, + 0x0BB, 0x00000000, + 0x0BC, 0x00040009, + 0x0BD, 0x00000000, + 0x0BE, 0x00000000, + 0x0BF, 0x00000000, + 0x0EF, 0x00020000, + 0x03E, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030000, + 0xA0000000, 0x00000000, + 0x03F, 0x00030000, + 0xB0000000, 0x00000000, + 0x03E, 0x00020000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00040000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00040000, + 0xA0000000, 0x00000000, + 0x03F, 0x00040000, + 0xB0000000, 0x00000000, + 0x03E, 0x00040000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030000, + 0xA0000000, 0x00000000, + 0x03F, 0x00030000, + 0xB0000000, 0x00000000, + 0x03E, 0x00060000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03F, 0x00030000, + 0xA0000000, 0x00000000, + 0x03F, 0x00030000, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x00010000, + 0x03E, 0x00000000, + 0x03F, 0x00006800, + 0x03E, 0x00000080, + 0x03F, 0x00006000, + 0x03E, 0x00000100, + 0x03F, 0x00004800, + 0x03E, 0x00000180, + 0x03F, 0x00004000, + 0x03E, 0x00000200, + 0x03F, 0x00004000, + 0x03E, 0x00000280, + 0x03F, 0x00002800, + 0x03E, 0x00000300, + 0x03F, 0x00002800, + 0x03E, 0x00000380, + 0x03F, 0x00002000, + 0x0EF, 0x00000000, + 0x0EF, 0x00040000, + 0x03E, 0x00000000, + 0x03F, 0x000000BC, + 0x03E, 0x00000040, + 0x03F, 0x00000053, + 0x03E, 0x00000050, + 0x03F, 0x00000050, + 0x03E, 0x00000060, + 0x03F, 0x00000050, + 0x0EF, 0x00000000, + 0x0EF, 0x00000400, + 0x03E, 0x00000006, + 0x041, 0x000EE080, + 0x03E, 0x00000008, + 0x041, 0x000EE0C0, + 0x03E, 0x0000000A, + 0x041, 0x000EE100, + 0x03E, 0x0000000C, + 0x041, 0x000EE100, + 0x0EF, 0x00000000, + 0x018, 0x00000006, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0xA0000000, 0x00000000, + 0x086, 0x000E4B58, + 0x087, 0x00049F80, + 0xB0000000, 0x00000000, + 0x0DF, 0x00000008, + 0x0EF, 0x00002000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x000179C3, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x000179C3, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x000179C3, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x000179C3, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x000179C3, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x000179C3, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x000179C3, + 0xA0000000, 0x00000000, + 0x03B, 0x0003F258, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000100, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, + 0x034, 0x0000ADF6, + 0x034, 0x00009DF3, + 0x034, 0x00008DF0, + 0x034, 0x00007DED, + 0x034, 0x00006DEA, + 0x034, 0x00005CED, + 0x034, 0x00004CEA, + 0x034, 0x000034EA, + 0x034, 0x000024E7, + 0x034, 0x0000146A, + 0x034, 0x0000006B, + 0xB0000000, 0x00000000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, + 0x034, 0x0008ADF6, + 0x034, 0x00089DF3, + 0x034, 0x00088DF0, + 0x034, 0x00087DED, + 0x034, 0x00086DEA, + 0x034, 0x00085CED, + 0x034, 0x00084CEA, + 0x034, 0x000834EA, + 0x034, 0x000824E7, + 0x034, 0x0008146A, + 0x034, 0x0008006B, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000192, + 0x035, 0x00008192, + 0x035, 0x00010192, + 0x036, 0x00000024, + 0x036, 0x00008024, + 0x036, 0x00010024, + 0x036, 0x00018024, + 0x0EF, 0x00000000, + 0x051, 0x00000C21, + 0x052, 0x000006D9, + 0x053, 0x000FC649, + 0x054, 0x0000017E, + 0x018, 0x0001012A, + 0x081, 0x0007FC00, + 0x089, 0x00050110, + 0x08A, 0x00043E50, + 0x08B, 0x0002E180, + 0x08C, 0x00093C3C, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x085, 0x000F8000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x085, 0x000F8000, + 0xA0000000, 0x00000000, + 0x085, 0x000F8000, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0xA0000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0xB0000000, 0x00000000, + 0x0EF, 0x00001000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00084000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0xA0000000, 0x00000000, + 0x03C, 0x00028000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00030023, + 0x03C, 0x00048000, + 0x03A, 0x0000013C, + 0x03B, 0x00028623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00021633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0001C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00010293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00009593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0000118B, + 0xA0000000, 0x00000000, + 0x03B, 0x0000078B, + 0xB0000000, 0x00000000, + 0x03C, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0xA0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000AC000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00040000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0xA0000000, 0x00000000, + 0x03C, 0x0004C000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00070023, + 0x03C, 0x00048000, + 0x03A, 0x0000013C, + 0x03B, 0x00068623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00061633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0005C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00050293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00049593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0004138B, + 0xA0000000, 0x00000000, + 0x03B, 0x0004078B, + 0xB0000000, 0x00000000, + 0x03C, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0xA0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00084000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0008C000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00084000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00084000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00060000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00084000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00084000, + 0xA0000000, 0x00000000, + 0x03C, 0x00004000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B0023, + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0xA0000000, 0x00000000, + 0x03C, 0x00020000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000A8623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000A1633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0009C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00090293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00089593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0008118B, + 0xA0000000, 0x00000000, + 0x03B, 0x0008078B, + 0xB0000000, 0x00000000, + 0x03C, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x00000800, + 0x03B, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000801, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0xA0000000, 0x00000000, + 0x03A, 0x00000803, + 0xB0000000, 0x00000000, + 0x03B, 0x00040000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001801, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000003, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000003, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001001, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0xA0000000, 0x00000000, + 0x03A, 0x00001000, + 0xB0000000, 0x00000000, + 0x03B, 0x00080000, + 0x80000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001802, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000800, + 0xA0000000, 0x00000000, + 0x03A, 0x00001002, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0xB0000000, 0x00000000, + 0x018, 0x00013124, + 0x0EF, 0x00000100, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A1AD, + 0x034, 0x000491AA, + 0x034, 0x000481A7, + 0x034, 0x000470AA, + 0x034, 0x000460A7, + 0x034, 0x00045049, + 0x034, 0x00044046, + 0x034, 0x00043026, + 0x034, 0x00042009, + 0x034, 0x00041006, + 0x034, 0x00040003, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AF, + 0x034, 0x000483AB, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004406A, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AF, + 0x034, 0x000483AB, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004406A, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AF, + 0x034, 0x000483AB, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004406A, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AF, + 0x034, 0x000483AB, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004406A, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3F5, + 0x034, 0x000493F2, + 0x034, 0x000483B0, + 0x034, 0x00047370, + 0x034, 0x0004636D, + 0x034, 0x0004536A, + 0x034, 0x00044349, + 0x034, 0x0004316A, + 0x034, 0x00042167, + 0x034, 0x00041129, + 0x034, 0x00040049, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AF, + 0x034, 0x000483AB, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004406A, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AF, + 0x034, 0x000483AB, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004406A, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0xA0000000, 0x00000000, + 0x034, 0x0004AFF1, + 0x034, 0x00049FEE, + 0x034, 0x00048FEB, + 0x034, 0x00047FE8, + 0x034, 0x00046DEA, + 0x034, 0x00045DE7, + 0x034, 0x00044CEA, + 0x034, 0x00043CE7, + 0x034, 0x00042C69, + 0x034, 0x00041C66, + 0x034, 0x00040C28, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A1AD, + 0x034, 0x000291AA, + 0x034, 0x000281A7, + 0x034, 0x000270AA, + 0x034, 0x000260A7, + 0x034, 0x00025049, + 0x034, 0x00024046, + 0x034, 0x00023026, + 0x034, 0x00022009, + 0x034, 0x00021006, + 0x034, 0x00020003, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EF, + 0x034, 0x000293AC, + 0x034, 0x0002838A, + 0x034, 0x0002718C, + 0x034, 0x00026189, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EF, + 0x034, 0x000293AC, + 0x034, 0x0002838A, + 0x034, 0x0002718C, + 0x034, 0x00026189, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EF, + 0x034, 0x000293AC, + 0x034, 0x0002838A, + 0x034, 0x0002718C, + 0x034, 0x00026189, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EF, + 0x034, 0x000293AC, + 0x034, 0x0002838A, + 0x034, 0x0002718C, + 0x034, 0x00026189, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3F5, + 0x034, 0x000293F2, + 0x034, 0x000282F1, + 0x034, 0x000272B0, + 0x034, 0x000262AD, + 0x034, 0x000252AA, + 0x034, 0x000242A7, + 0x034, 0x000230EC, + 0x034, 0x000220E9, + 0x034, 0x0002106A, + 0x034, 0x00020067, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EF, + 0x034, 0x000293AC, + 0x034, 0x0002838A, + 0x034, 0x0002718C, + 0x034, 0x00026189, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EF, + 0x034, 0x000293AC, + 0x034, 0x0002838A, + 0x034, 0x0002718C, + 0x034, 0x00026189, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0xA0000000, 0x00000000, + 0x034, 0x0002AFF1, + 0x034, 0x00029FEE, + 0x034, 0x00028FEB, + 0x034, 0x00027FE8, + 0x034, 0x00026DEA, + 0x034, 0x00025DE7, + 0x034, 0x00024CEA, + 0x034, 0x00023CE7, + 0x034, 0x00022C69, + 0x034, 0x00021C66, + 0x034, 0x00020C28, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EC, + 0x034, 0x0000938C, + 0x034, 0x000081AD, + 0x034, 0x000071AA, + 0x034, 0x000061A7, + 0x034, 0x000050AA, + 0x034, 0x000040A7, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x0000100C, + 0x034, 0x00000009, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3F4, + 0x034, 0x000093F1, + 0x034, 0x000082B1, + 0x034, 0x000071D1, + 0x034, 0x000061CE, + 0x034, 0x000051CB, + 0x034, 0x000041C8, + 0x034, 0x000030CB, + 0x034, 0x000020C8, + 0x034, 0x00001087, + 0x034, 0x00000084, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0xA0000000, 0x00000000, + 0x034, 0x0000AFF1, + 0x034, 0x00009FEE, + 0x034, 0x00008FEB, + 0x034, 0x00007FE8, + 0x034, 0x00006DEA, + 0x034, 0x00005DE7, + 0x034, 0x00004CEA, + 0x034, 0x00003CE7, + 0x034, 0x00002C69, + 0x034, 0x00001C66, + 0x034, 0x00000C28, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA1AD, + 0x034, 0x000C91AA, + 0x034, 0x000C81A7, + 0x034, 0x000C70AA, + 0x034, 0x000C60A7, + 0x034, 0x000C5049, + 0x034, 0x000C4046, + 0x034, 0x000C3026, + 0x034, 0x000C2009, + 0x034, 0x000C1006, + 0x034, 0x000C0003, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AF, + 0x034, 0x000C83AB, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C406A, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AF, + 0x034, 0x000C83AB, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C406A, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AF, + 0x034, 0x000C83AB, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C406A, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AF, + 0x034, 0x000C83AB, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C406A, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3F5, + 0x034, 0x000C93F2, + 0x034, 0x000C83B0, + 0x034, 0x000C7370, + 0x034, 0x000C636D, + 0x034, 0x000C536A, + 0x034, 0x000C4349, + 0x034, 0x000C316A, + 0x034, 0x000C2167, + 0x034, 0x000C1129, + 0x034, 0x000C0049, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AF, + 0x034, 0x000C83AB, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C406A, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AF, + 0x034, 0x000C83AB, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C406A, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0xA0000000, 0x00000000, + 0x034, 0x000CA794, + 0x034, 0x000C9791, + 0x034, 0x000C878E, + 0x034, 0x000C778B, + 0x034, 0x000C658D, + 0x034, 0x000C558A, + 0x034, 0x000C448D, + 0x034, 0x000C348A, + 0x034, 0x000C244C, + 0x034, 0x000C1449, + 0x034, 0x000C042B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA1AD, + 0x034, 0x000A91AA, + 0x034, 0x000A81A7, + 0x034, 0x000A70AA, + 0x034, 0x000A60A7, + 0x034, 0x000A5049, + 0x034, 0x000A4046, + 0x034, 0x000A3026, + 0x034, 0x000A2009, + 0x034, 0x000A1006, + 0x034, 0x000A0003, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EF, + 0x034, 0x000A93AC, + 0x034, 0x000A838A, + 0x034, 0x000A718C, + 0x034, 0x000A6189, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EF, + 0x034, 0x000A93AC, + 0x034, 0x000A838A, + 0x034, 0x000A718C, + 0x034, 0x000A6189, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EF, + 0x034, 0x000A93AC, + 0x034, 0x000A838A, + 0x034, 0x000A718C, + 0x034, 0x000A6189, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EF, + 0x034, 0x000A93AC, + 0x034, 0x000A838A, + 0x034, 0x000A718C, + 0x034, 0x000A6189, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3F5, + 0x034, 0x000A93F2, + 0x034, 0x000A82F1, + 0x034, 0x000A72B0, + 0x034, 0x000A62AD, + 0x034, 0x000A52AA, + 0x034, 0x000A42A7, + 0x034, 0x000A30EC, + 0x034, 0x000A20E9, + 0x034, 0x000A106A, + 0x034, 0x000A0067, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EF, + 0x034, 0x000A93AC, + 0x034, 0x000A838A, + 0x034, 0x000A718C, + 0x034, 0x000A6189, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EF, + 0x034, 0x000A93AC, + 0x034, 0x000A838A, + 0x034, 0x000A718C, + 0x034, 0x000A6189, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0xA0000000, 0x00000000, + 0x034, 0x000AA794, + 0x034, 0x000A9791, + 0x034, 0x000A878E, + 0x034, 0x000A778B, + 0x034, 0x000A658D, + 0x034, 0x000A558A, + 0x034, 0x000A448D, + 0x034, 0x000A348A, + 0x034, 0x000A244C, + 0x034, 0x000A1449, + 0x034, 0x000A042B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EC, + 0x034, 0x0008938C, + 0x034, 0x000881AD, + 0x034, 0x000871AA, + 0x034, 0x000861A7, + 0x034, 0x000850AA, + 0x034, 0x000840A7, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x0008100C, + 0x034, 0x00080009, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3F4, + 0x034, 0x000893F1, + 0x034, 0x000882B1, + 0x034, 0x000871D1, + 0x034, 0x000861CE, + 0x034, 0x000851CB, + 0x034, 0x000841C8, + 0x034, 0x000830CB, + 0x034, 0x000820C8, + 0x034, 0x00081087, + 0x034, 0x00080084, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0xA0000000, 0x00000000, + 0x034, 0x0008A794, + 0x034, 0x00089791, + 0x034, 0x0008878E, + 0x034, 0x0008778B, + 0x034, 0x0008658D, + 0x034, 0x0008558A, + 0x034, 0x0008448D, + 0x034, 0x0008348A, + 0x034, 0x0008244C, + 0x034, 0x00081449, + 0x034, 0x0008042B, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0DF, 0x00000001, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0xA0000000, 0x00000000, + 0x035, 0x00000747, + 0x035, 0x00008747, + 0x035, 0x00010747, + 0x035, 0x00020747, + 0x035, 0x00028747, + 0x035, 0x00030747, + 0x035, 0x00040747, + 0x035, 0x00048747, + 0x035, 0x00050747, + 0x035, 0x000805FB, + 0x035, 0x000885FB, + 0x035, 0x000905FB, + 0x035, 0x000A05FB, + 0x035, 0x000A85FB, + 0x035, 0x000B05FB, + 0x035, 0x000C05FB, + 0x035, 0x000C85FB, + 0x035, 0x000D05FB, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0DF, 0x00000001, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000473, + 0x036, 0x00008473, + 0x036, 0x00010473, + 0x036, 0x00020473, + 0x036, 0x00028473, + 0x036, 0x00030473, + 0x036, 0x00040473, + 0x036, 0x00048473, + 0x036, 0x00050473, + 0x036, 0x00080473, + 0x036, 0x00088473, + 0x036, 0x00090473, + 0x036, 0x000A0473, + 0x036, 0x000A8473, + 0x036, 0x000B0473, + 0x036, 0x000C0473, + 0x036, 0x000C8473, + 0x036, 0x000D0473, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0xA0000000, 0x00000000, + 0x036, 0x00000473, + 0x036, 0x00008473, + 0x036, 0x00010473, + 0x036, 0x00020473, + 0x036, 0x00028473, + 0x036, 0x00030473, + 0x036, 0x00040473, + 0x036, 0x00048473, + 0x036, 0x00050473, + 0x036, 0x00080473, + 0x036, 0x00088473, + 0x036, 0x00090473, + 0x036, 0x000A0473, + 0x036, 0x000A8473, + 0x036, 0x000B0473, + 0x036, 0x000C0473, + 0x036, 0x000C8473, + 0x036, 0x000D0473, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000004, + 0x037, 0x00000000, + 0x038, 0x00005146, + 0x037, 0x00004000, + 0x038, 0x00005146, + 0x037, 0x00008000, + 0x038, 0x00005146, + 0x037, 0x00010000, + 0x038, 0x00005146, + 0x037, 0x00014000, + 0x038, 0x00005146, + 0x037, 0x00018000, + 0x038, 0x00004D4E, + 0x037, 0x0001C000, + 0x038, 0x00004D4E, + 0x037, 0x00020000, + 0x038, 0x00004D4E, + 0x037, 0x00024000, + 0x038, 0x000071C6, + 0x037, 0x00028000, + 0x038, 0x000071C6, + 0x037, 0x0002C000, + 0x038, 0x000071C6, + 0x037, 0x00030000, + 0x038, 0x000071CE, + 0x037, 0x00034000, + 0x038, 0x000071CE, + 0x037, 0x00038000, + 0x038, 0x00005126, + 0x037, 0x0003C000, + 0x038, 0x00005126, + 0x037, 0x00040000, + 0x038, 0x00005126, + 0x037, 0x00044000, + 0x038, 0x00005126, + 0x037, 0x00048000, + 0x038, 0x00005126, + 0x037, 0x00080000, + 0x038, 0x00005ECE, + 0x037, 0x00084000, + 0x038, 0x00005ECE, + 0x037, 0x00088000, + 0x038, 0x00005ECE, + 0x037, 0x00090000, + 0x038, 0x00005ECE, + 0x037, 0x00094000, + 0x038, 0x00005ECE, + 0x037, 0x00098000, + 0x038, 0x00005ECE, + 0x037, 0x0009C000, + 0x038, 0x00005ECE, + 0x037, 0x000A0000, + 0x038, 0x00005ECE, + 0x037, 0x000A4000, + 0x038, 0x00005ECE, + 0x037, 0x000A8000, + 0x038, 0x00005ECE, + 0x037, 0x000AC000, + 0x038, 0x00005ECE, + 0x037, 0x000B0000, + 0x038, 0x00005ECE, + 0x037, 0x000B4000, + 0x038, 0x00005ECE, + 0x037, 0x000B8000, + 0x038, 0x00005ECE, + 0x037, 0x000BC000, + 0x038, 0x00005ECE, + 0x037, 0x000C0000, + 0x038, 0x00005ECE, + 0x037, 0x000C4000, + 0x038, 0x00005ECE, + 0x037, 0x000C8000, + 0x038, 0x00005ECE, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000008, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x03C, 0x0000007D, + 0x03C, 0x0000047D, + 0x03C, 0x0000087D, + 0x03C, 0x0000107D, + 0x03C, 0x0000147D, + 0x03C, 0x0000187D, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x0000054A, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x0000154A, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x0000254A, + 0x03C, 0x00002821, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x0000054A, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x0000154A, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x0000254A, + 0x03C, 0x00002821, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x0000054A, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x0000154A, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x0000254A, + 0x03C, 0x00002821, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x0000054A, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x0000154A, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x0000254A, + 0x03C, 0x00002821, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x0000054A, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x0000154A, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x0000254A, + 0x03C, 0x00002821, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x0000054A, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x0000154A, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x0000254A, + 0x03C, 0x00002821, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x0000054A, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x0000154A, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x0000254A, + 0x03C, 0x00002821, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x0000054A, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x0000154A, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x0000254A, + 0x03C, 0x00002821, + 0xA0000000, 0x00000000, + 0x03C, 0x0000037E, + 0x03C, 0x00000575, + 0x03C, 0x00000971, + 0x03C, 0x0000127E, + 0x03C, 0x00001575, + 0x03C, 0x00001871, + 0x03C, 0x0000217E, + 0x03C, 0x00002575, + 0x03C, 0x00002871, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x061, 0x000C0D47, + 0x062, 0x0000133C, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0xA0000000, 0x00000000, + 0x063, 0x0007D0E7, + 0xB0000000, 0x00000000, + 0x064, 0x00014FEC, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0xA0000000, 0x00000000, + 0x065, 0x000933FF, + 0xB0000000, 0x00000000, + 0x066, 0x00000040, + 0x057, 0x00050000, + 0x056, 0x00051DF0, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x055, 0x00082061, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x055, 0x00082061, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x055, 0x00082061, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x055, 0x00082061, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x055, 0x00082061, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x055, 0x00082061, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x055, 0x00082061, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x055, 0x00082061, + 0xA0000000, 0x00000000, + 0x055, 0x00082060, + 0xB0000000, 0x00000000, + 0x01C, 0x000739D2, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x01F, 0x0002255C, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x01F, 0x0002255C, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x01F, 0x0002255C, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x01F, 0x0002255C, + 0xA0000000, 0x00000000, + 0x01F, 0x0002255C, + 0xB0000000, 0x00000000, + 0x0B1, 0x0007FF48, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x0C4, 0x00081700, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x0C4, 0x00081700, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x0C4, 0x00081700, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x0C4, 0x00081700, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x0C4, 0x00081700, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x0C4, 0x00081700, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x0C4, 0x00081700, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x0C4, 0x00081700, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x0C4, 0x00081700, + 0xA0000000, 0x00000000, + 0x0C4, 0x00083F00, + 0xB0000000, 0x00000000, + 0x018, 0x0001B126, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0x018, 0x00013126, + 0x018, 0x00013124, +}; + +RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_a, A); + +static const u32 rtw8814a_rf_b[] = { + 0x018, 0x00013124, + 0x040, 0x00000C00, + 0x058, 0x00000F98, + 0x07F, 0x00068004, + 0x018, 0x00000006, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0xA0000000, 0x00000000, + 0x086, 0x000E4B58, + 0x087, 0x00049F80, + 0xB0000000, 0x00000000, + 0x0DF, 0x00000008, + 0x0EF, 0x00002000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017BC3, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F39B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017BC3, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017BC3, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F39B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017BC3, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017BC3, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017BC3, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017BC3, + 0xA0000000, 0x00000000, + 0x03B, 0x0003F258, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000100, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, + 0x034, 0x0000ADF6, + 0x034, 0x00009DF3, + 0x034, 0x00008DF0, + 0x034, 0x00007DED, + 0x034, 0x00006DEA, + 0x034, 0x00005CED, + 0x034, 0x00004CEA, + 0x034, 0x000034EA, + 0x034, 0x000024E7, + 0x034, 0x0000146A, + 0x034, 0x0000006B, + 0xB0000000, 0x00000000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, + 0x034, 0x0008ADF6, + 0x034, 0x00089DF3, + 0x034, 0x00088DF0, + 0x034, 0x00087DED, + 0x034, 0x00086DEA, + 0x034, 0x00085CED, + 0x034, 0x00084CEA, + 0x034, 0x000834EA, + 0x034, 0x000824E7, + 0x034, 0x0008146A, + 0x034, 0x0008006B, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000192, + 0x035, 0x00008192, + 0x035, 0x00010192, + 0x036, 0x00000024, + 0x036, 0x00008024, + 0x036, 0x00010024, + 0x036, 0x00018024, + 0x0EF, 0x00000000, + 0x051, 0x00000C21, + 0x052, 0x000006D9, + 0x053, 0x000FC649, + 0x054, 0x0000017E, + 0x018, 0x0001012A, + 0x081, 0x0007FC00, + 0x089, 0x00050110, + 0x08A, 0x00043E50, + 0x08B, 0x0002E180, + 0x08C, 0x00093C3C, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x085, 0x000F8000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x085, 0x000F8000, + 0xA0000000, 0x00000000, + 0x085, 0x000F8000, + 0xB0000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x0EF, 0x00001000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00084000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0xA0000000, 0x00000000, + 0x03C, 0x00040000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00030023, + 0x03C, 0x00048000, + 0x03A, 0x0000013C, + 0x03B, 0x00028623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00021633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0001C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00010293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00009593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x00000F8B, + 0xA0000000, 0x00000000, + 0x03B, 0x0000078B, + 0xB0000000, 0x00000000, + 0x03C, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0xA0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00060000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00048000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00048000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00048000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00048000, + 0xA0000000, 0x00000000, + 0x03C, 0x00020000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00070023, + 0x03C, 0x00048000, + 0x03A, 0x0000013C, + 0x03B, 0x00068623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00061633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0005C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00050293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00049593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x03B, 0x0004078B, + 0x03C, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0xA0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00048000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00060000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0004C000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00044000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0004C000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00048000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00004000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00044000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00044000, + 0xA0000000, 0x00000000, + 0x03C, 0x00020000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B0023, + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0xA0000000, 0x00000000, + 0x03C, 0x00020000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000A8623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000A1633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0009C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00090293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00089593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0008138B, + 0xA0000000, 0x00000000, + 0x03B, 0x0008078B, + 0xB0000000, 0x00000000, + 0x03C, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x00000800, + 0x03B, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0xA0000000, 0x00000000, + 0x03A, 0x00000803, + 0xB0000000, 0x00000000, + 0x03B, 0x00040000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000800, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000800, + 0xA0000000, 0x00000000, + 0x03A, 0x00001000, + 0xB0000000, 0x00000000, + 0x03B, 0x00080000, + 0x80000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001802, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001802, + 0xA0000000, 0x00000000, + 0x03A, 0x00001002, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0xB0000000, 0x00000000, + 0x018, 0x00013124, + 0x0EF, 0x00000100, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A38C, + 0x034, 0x000491AD, + 0x034, 0x000481AA, + 0x034, 0x000471A7, + 0x034, 0x000460AA, + 0x034, 0x000450A7, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x0004200C, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A38C, + 0x034, 0x00049389, + 0x034, 0x0004816D, + 0x034, 0x0004716A, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A38B, + 0x034, 0x00049388, + 0x034, 0x0004818B, + 0x034, 0x00047188, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A38C, + 0x034, 0x00049389, + 0x034, 0x0004816D, + 0x034, 0x0004716A, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A38B, + 0x034, 0x00049388, + 0x034, 0x0004818B, + 0x034, 0x00047188, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3F5, + 0x034, 0x000493F3, + 0x034, 0x000483B2, + 0x034, 0x00047390, + 0x034, 0x0004638D, + 0x034, 0x0004538A, + 0x034, 0x00044387, + 0x034, 0x0004324A, + 0x034, 0x00042247, + 0x034, 0x0004104D, + 0x034, 0x0004004A, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004AFF7, + 0x034, 0x00049FF6, + 0x034, 0x00048FF3, + 0x034, 0x00047FF0, + 0x034, 0x00046FED, + 0x034, 0x00045FEA, + 0x034, 0x00044FE7, + 0x034, 0x00043DEA, + 0x034, 0x00042DE7, + 0x034, 0x00041DE4, + 0x034, 0x00040CE7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A38C, + 0x034, 0x00049389, + 0x034, 0x0004816D, + 0x034, 0x0004716A, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A38C, + 0x034, 0x00049389, + 0x034, 0x0004816D, + 0x034, 0x0004716A, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0xA0000000, 0x00000000, + 0x034, 0x0004AFF4, + 0x034, 0x00049FF1, + 0x034, 0x00048FEE, + 0x034, 0x00047FEB, + 0x034, 0x00046FE8, + 0x034, 0x00045DEA, + 0x034, 0x00044CED, + 0x034, 0x00043CEA, + 0x034, 0x00042C6C, + 0x034, 0x00041C69, + 0x034, 0x00040C2B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A38C, + 0x034, 0x000291AD, + 0x034, 0x000281AA, + 0x034, 0x000271A7, + 0x034, 0x000260AA, + 0x034, 0x000250A7, + 0x034, 0x0002402C, + 0x034, 0x00023029, + 0x034, 0x0002200C, + 0x034, 0x00021009, + 0x034, 0x00020006, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EE, + 0x034, 0x000293AC, + 0x034, 0x00028389, + 0x034, 0x0002716D, + 0x034, 0x0002616A, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EF, + 0x034, 0x000293AD, + 0x034, 0x0002838A, + 0x034, 0x0002718C, + 0x034, 0x00026189, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EE, + 0x034, 0x000293AC, + 0x034, 0x00028389, + 0x034, 0x0002716D, + 0x034, 0x0002616A, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EF, + 0x034, 0x000293AD, + 0x034, 0x0002838A, + 0x034, 0x0002718C, + 0x034, 0x00026189, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3F5, + 0x034, 0x000293F3, + 0x034, 0x000283D0, + 0x034, 0x00027371, + 0x034, 0x0002636E, + 0x034, 0x0002536B, + 0x034, 0x00024368, + 0x034, 0x0002332A, + 0x034, 0x00022327, + 0x034, 0x0002104C, + 0x034, 0x00020049, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002AFF7, + 0x034, 0x00029FF6, + 0x034, 0x00028FF3, + 0x034, 0x00027FF0, + 0x034, 0x00026FED, + 0x034, 0x00025FEA, + 0x034, 0x00024FE7, + 0x034, 0x00023DEA, + 0x034, 0x00022DE7, + 0x034, 0x00021DE4, + 0x034, 0x00020F25, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EE, + 0x034, 0x000293AC, + 0x034, 0x00028389, + 0x034, 0x0002716D, + 0x034, 0x0002616A, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EE, + 0x034, 0x000293AC, + 0x034, 0x00028389, + 0x034, 0x0002716D, + 0x034, 0x0002616A, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0xA0000000, 0x00000000, + 0x034, 0x0002AFF4, + 0x034, 0x00029FF1, + 0x034, 0x00028FEE, + 0x034, 0x00027FEB, + 0x034, 0x00026FE8, + 0x034, 0x00025DEA, + 0x034, 0x00024CED, + 0x034, 0x00023CEA, + 0x034, 0x00022C6C, + 0x034, 0x00021C69, + 0x034, 0x00020C2B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A38C, + 0x034, 0x000091AD, + 0x034, 0x000081AA, + 0x034, 0x000071A7, + 0x034, 0x000060AA, + 0x034, 0x000050A7, + 0x034, 0x0000402C, + 0x034, 0x00003029, + 0x034, 0x00002026, + 0x034, 0x00001009, + 0x034, 0x00000006, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EC, + 0x034, 0x000093AC, + 0x034, 0x000081EC, + 0x034, 0x0000716D, + 0x034, 0x0000616A, + 0x034, 0x0000506D, + 0x034, 0x0000404C, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EF, + 0x034, 0x000093AD, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EC, + 0x034, 0x000093AC, + 0x034, 0x000081EC, + 0x034, 0x0000716D, + 0x034, 0x0000616A, + 0x034, 0x0000506D, + 0x034, 0x0000404C, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EF, + 0x034, 0x000093AD, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3F4, + 0x034, 0x000093F0, + 0x034, 0x000083AE, + 0x034, 0x00007350, + 0x034, 0x0000634D, + 0x034, 0x0000534A, + 0x034, 0x00004347, + 0x034, 0x0000312D, + 0x034, 0x0000212A, + 0x034, 0x00001127, + 0x034, 0x0000002A, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000AFF7, + 0x034, 0x00009FF4, + 0x034, 0x00008FF1, + 0x034, 0x00007FEE, + 0x034, 0x00006FEB, + 0x034, 0x00005FE8, + 0x034, 0x00004DEB, + 0x034, 0x00003DE8, + 0x034, 0x00002DE5, + 0x034, 0x00001C8B, + 0x034, 0x00000C88, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EC, + 0x034, 0x000093AC, + 0x034, 0x000081EC, + 0x034, 0x0000716D, + 0x034, 0x0000616A, + 0x034, 0x0000506D, + 0x034, 0x0000404C, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EC, + 0x034, 0x000093AC, + 0x034, 0x000081EC, + 0x034, 0x0000716D, + 0x034, 0x0000616A, + 0x034, 0x0000506D, + 0x034, 0x0000404C, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0xA0000000, 0x00000000, + 0x034, 0x0000AFF4, + 0x034, 0x00009FF1, + 0x034, 0x00008FEE, + 0x034, 0x00007FEB, + 0x034, 0x00006FE8, + 0x034, 0x00005DEA, + 0x034, 0x00004CED, + 0x034, 0x00003CEA, + 0x034, 0x00002C6C, + 0x034, 0x00001C69, + 0x034, 0x00000C2B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA38C, + 0x034, 0x000C91AD, + 0x034, 0x000C81AA, + 0x034, 0x000C71A7, + 0x034, 0x000C60AA, + 0x034, 0x000C50A7, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C200C, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA38C, + 0x034, 0x000C9389, + 0x034, 0x000C816D, + 0x034, 0x000C716A, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA38B, + 0x034, 0x000C9388, + 0x034, 0x000C818B, + 0x034, 0x000C7188, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA38C, + 0x034, 0x000C9389, + 0x034, 0x000C816D, + 0x034, 0x000C716A, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA38B, + 0x034, 0x000C9388, + 0x034, 0x000C818B, + 0x034, 0x000C7188, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3F5, + 0x034, 0x000C93F3, + 0x034, 0x000C83B2, + 0x034, 0x000C7390, + 0x034, 0x000C638D, + 0x034, 0x000C538A, + 0x034, 0x000C4387, + 0x034, 0x000C324A, + 0x034, 0x000C2247, + 0x034, 0x000C104D, + 0x034, 0x000C004A, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CAFF7, + 0x034, 0x000C9FF6, + 0x034, 0x000C8FF3, + 0x034, 0x000C7FF0, + 0x034, 0x000C6FED, + 0x034, 0x000C5FEA, + 0x034, 0x000C4FE7, + 0x034, 0x000C3DEA, + 0x034, 0x000C2DE7, + 0x034, 0x000C1DE4, + 0x034, 0x000C0CE7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA38C, + 0x034, 0x000C9389, + 0x034, 0x000C816D, + 0x034, 0x000C716A, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA38C, + 0x034, 0x000C9389, + 0x034, 0x000C816D, + 0x034, 0x000C716A, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0xA0000000, 0x00000000, + 0x034, 0x000CA794, + 0x034, 0x000C9791, + 0x034, 0x000C878E, + 0x034, 0x000C778B, + 0x034, 0x000C658D, + 0x034, 0x000C558A, + 0x034, 0x000C448D, + 0x034, 0x000C348A, + 0x034, 0x000C244C, + 0x034, 0x000C1449, + 0x034, 0x000C042B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA38C, + 0x034, 0x000A91AD, + 0x034, 0x000A81AA, + 0x034, 0x000A71A7, + 0x034, 0x000A60AA, + 0x034, 0x000A50A7, + 0x034, 0x000A402C, + 0x034, 0x000A3029, + 0x034, 0x000A200C, + 0x034, 0x000A1009, + 0x034, 0x000A0006, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EE, + 0x034, 0x000A93AC, + 0x034, 0x000A8389, + 0x034, 0x000A716D, + 0x034, 0x000A616A, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EF, + 0x034, 0x000A93AD, + 0x034, 0x000A838A, + 0x034, 0x000A718C, + 0x034, 0x000A6189, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EE, + 0x034, 0x000A93AC, + 0x034, 0x000A8389, + 0x034, 0x000A716D, + 0x034, 0x000A616A, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EF, + 0x034, 0x000A93AD, + 0x034, 0x000A838A, + 0x034, 0x000A718C, + 0x034, 0x000A6189, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3F5, + 0x034, 0x000A93F3, + 0x034, 0x000A83D0, + 0x034, 0x000A7371, + 0x034, 0x000A636E, + 0x034, 0x000A536B, + 0x034, 0x000A4368, + 0x034, 0x000A332A, + 0x034, 0x000A2327, + 0x034, 0x000A104C, + 0x034, 0x000A0049, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AAFF7, + 0x034, 0x000A9FF6, + 0x034, 0x000A8FF3, + 0x034, 0x000A7FF0, + 0x034, 0x000A6FED, + 0x034, 0x000A5FEA, + 0x034, 0x000A4FE7, + 0x034, 0x000A3DEA, + 0x034, 0x000A2DE7, + 0x034, 0x000A1DE4, + 0x034, 0x000A0F25, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EE, + 0x034, 0x000A93AC, + 0x034, 0x000A8389, + 0x034, 0x000A716D, + 0x034, 0x000A616A, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EE, + 0x034, 0x000A93AC, + 0x034, 0x000A8389, + 0x034, 0x000A716D, + 0x034, 0x000A616A, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0xA0000000, 0x00000000, + 0x034, 0x000AA794, + 0x034, 0x000A9791, + 0x034, 0x000A878E, + 0x034, 0x000A778B, + 0x034, 0x000A658D, + 0x034, 0x000A558A, + 0x034, 0x000A448D, + 0x034, 0x000A348A, + 0x034, 0x000A244C, + 0x034, 0x000A1449, + 0x034, 0x000A042B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A38C, + 0x034, 0x000891AD, + 0x034, 0x000881AA, + 0x034, 0x000871A7, + 0x034, 0x000860AA, + 0x034, 0x000850A7, + 0x034, 0x0008402C, + 0x034, 0x00083029, + 0x034, 0x00082026, + 0x034, 0x00081009, + 0x034, 0x00080006, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EC, + 0x034, 0x000893AC, + 0x034, 0x000881EC, + 0x034, 0x0008716D, + 0x034, 0x0008616A, + 0x034, 0x0008506D, + 0x034, 0x0008404C, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EF, + 0x034, 0x000893AD, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EC, + 0x034, 0x000893AC, + 0x034, 0x000881EC, + 0x034, 0x0008716D, + 0x034, 0x0008616A, + 0x034, 0x0008506D, + 0x034, 0x0008404C, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EF, + 0x034, 0x000893AD, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3F4, + 0x034, 0x000893F0, + 0x034, 0x000883AE, + 0x034, 0x00087350, + 0x034, 0x0008634D, + 0x034, 0x0008534A, + 0x034, 0x00084347, + 0x034, 0x0008312D, + 0x034, 0x0008212A, + 0x034, 0x00081127, + 0x034, 0x0008002A, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008AFF7, + 0x034, 0x00089FF4, + 0x034, 0x00088FF1, + 0x034, 0x00087FEE, + 0x034, 0x00086FEB, + 0x034, 0x00085FE8, + 0x034, 0x00084DEB, + 0x034, 0x00083DE8, + 0x034, 0x00082DE5, + 0x034, 0x00081C8B, + 0x034, 0x00080C88, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EC, + 0x034, 0x000893AC, + 0x034, 0x000881EC, + 0x034, 0x0008716D, + 0x034, 0x0008616A, + 0x034, 0x0008506D, + 0x034, 0x0008404C, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EC, + 0x034, 0x000893AC, + 0x034, 0x000881EC, + 0x034, 0x0008716D, + 0x034, 0x0008616A, + 0x034, 0x0008506D, + 0x034, 0x0008404C, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0xA0000000, 0x00000000, + 0x034, 0x0008A794, + 0x034, 0x00089791, + 0x034, 0x0008878E, + 0x034, 0x0008778B, + 0x034, 0x0008658D, + 0x034, 0x0008558A, + 0x034, 0x0008448D, + 0x034, 0x0008348A, + 0x034, 0x0008244C, + 0x034, 0x00081449, + 0x034, 0x0008042B, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0xA0000000, 0x00000000, + 0x0DF, 0x00000000, + 0xB0000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0xA0000000, 0x00000000, + 0x035, 0x00000484, + 0x035, 0x00008484, + 0x035, 0x00010484, + 0x035, 0x00020584, + 0x035, 0x00028584, + 0x035, 0x00030584, + 0x035, 0x00040584, + 0x035, 0x00048584, + 0x035, 0x00050584, + 0x035, 0x000805FB, + 0x035, 0x000885FB, + 0x035, 0x000905FB, + 0x035, 0x000A05FB, + 0x035, 0x000A85FB, + 0x035, 0x000B05FB, + 0x035, 0x000C05FB, + 0x035, 0x000C85FB, + 0x035, 0x000D05FB, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0xA0000000, 0x00000000, + 0x0DF, 0x00000000, + 0xB0000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000473, + 0x036, 0x00008473, + 0x036, 0x00010473, + 0x036, 0x00020473, + 0x036, 0x00028473, + 0x036, 0x00030473, + 0x036, 0x00040473, + 0x036, 0x00048473, + 0x036, 0x00050473, + 0x036, 0x00080473, + 0x036, 0x00088473, + 0x036, 0x00090473, + 0x036, 0x000A0473, + 0x036, 0x000A8473, + 0x036, 0x000B0473, + 0x036, 0x000C0473, + 0x036, 0x000C8473, + 0x036, 0x000D0473, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0xA0000000, 0x00000000, + 0x036, 0x00000474, + 0x036, 0x00008474, + 0x036, 0x00010474, + 0x036, 0x00020474, + 0x036, 0x00028474, + 0x036, 0x00030474, + 0x036, 0x00040474, + 0x036, 0x00048474, + 0x036, 0x00050474, + 0x036, 0x00080474, + 0x036, 0x00088474, + 0x036, 0x00090474, + 0x036, 0x000A0474, + 0x036, 0x000A8474, + 0x036, 0x000B0474, + 0x036, 0x000C0474, + 0x036, 0x000C8474, + 0x036, 0x000D0474, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000004, + 0x037, 0x00000000, + 0x038, 0x0000514E, + 0x037, 0x00004000, + 0x038, 0x0000514E, + 0x037, 0x00008000, + 0x038, 0x0000514E, + 0x037, 0x00010000, + 0x038, 0x0000514E, + 0x037, 0x00014000, + 0x038, 0x0000514E, + 0x037, 0x00018000, + 0x038, 0x0000514E, + 0x037, 0x0001C000, + 0x038, 0x0000514E, + 0x037, 0x00020000, + 0x038, 0x0000514E, + 0x037, 0x00024000, + 0x038, 0x0000514E, + 0x037, 0x00028000, + 0x038, 0x0000514E, + 0x037, 0x0002C000, + 0x038, 0x0000714E, + 0x037, 0x00030000, + 0x038, 0x0000514E, + 0x037, 0x00034000, + 0x038, 0x0000514E, + 0x037, 0x00038000, + 0x038, 0x0000514E, + 0x037, 0x0003C000, + 0x038, 0x0000514E, + 0x037, 0x00040000, + 0x038, 0x0000514E, + 0x037, 0x00044000, + 0x038, 0x0000514E, + 0x037, 0x00048000, + 0x038, 0x0000514E, + 0x037, 0x00080000, + 0x038, 0x00005ECE, + 0x037, 0x00084000, + 0x038, 0x00005ECE, + 0x037, 0x00088000, + 0x038, 0x00005ECE, + 0x037, 0x00090000, + 0x038, 0x00005ECE, + 0x037, 0x00094000, + 0x038, 0x00005ECE, + 0x037, 0x00098000, + 0x038, 0x00005ECE, + 0x037, 0x0009C000, + 0x038, 0x00005ECE, + 0x037, 0x000A0000, + 0x038, 0x00005ECE, + 0x037, 0x000A4000, + 0x038, 0x00005ECE, + 0x037, 0x000A8000, + 0x038, 0x00005ECE, + 0x037, 0x000AC000, + 0x038, 0x00005ECE, + 0x037, 0x000B0000, + 0x038, 0x00005ECE, + 0x037, 0x000B4000, + 0x038, 0x00005ECE, + 0x037, 0x000B8000, + 0x038, 0x00005ECE, + 0x037, 0x000BC000, + 0x038, 0x00005ECE, + 0x037, 0x000C0000, + 0x038, 0x00005ECE, + 0x037, 0x000C4000, + 0x038, 0x00005ECE, + 0x037, 0x000C8000, + 0x038, 0x00005ECE, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000008, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x03C, 0x0000007D, + 0x03C, 0x0000047D, + 0x03C, 0x0000087D, + 0x03C, 0x0000107D, + 0x03C, 0x0000147D, + 0x03C, 0x0000187D, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027E, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127E, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227E, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027E, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127E, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227E, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027E, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127E, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227E, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027E, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127E, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227E, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027E, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127E, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227E, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027E, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127E, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227E, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027E, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127E, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227E, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027E, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127E, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227E, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0xA0000000, 0x00000000, + 0x03C, 0x0000037E, + 0x03C, 0x00000575, + 0x03C, 0x00000971, + 0x03C, 0x0000127E, + 0x03C, 0x00001575, + 0x03C, 0x00001871, + 0x03C, 0x0000217E, + 0x03C, 0x00002575, + 0x03C, 0x00002871, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x061, 0x000C0D47, + 0x062, 0x0000133C, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0xA0000000, 0x00000000, + 0x063, 0x0007D0E7, + 0xB0000000, 0x00000000, + 0x064, 0x00014FEC, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0xA0000000, 0x00000000, + 0x065, 0x000923FF, + 0xB0000000, 0x00000000, + 0x066, 0x00000040, + 0x057, 0x00050000, + 0x056, 0x00051DF0, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x055, 0x00082060, + 0xB0000000, 0x00000000, +}; + +RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_b, B); + +static const u32 rtw8814a_rf_c[] = { + 0x018, 0x00013124, + 0x040, 0x00000C00, + 0x058, 0x00000F98, + 0x07F, 0x00068004, + 0x018, 0x00000006, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0xA0000000, 0x00000000, + 0x086, 0x000E4B58, + 0x087, 0x00049F80, + 0xB0000000, 0x00000000, + 0x0DF, 0x00000008, + 0x0EF, 0x00002000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017823, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017823, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017823, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017823, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017823, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017823, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017823, + 0xA0000000, 0x00000000, + 0x03B, 0x0003F258, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000100, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, + 0x034, 0x0000ADF6, + 0x034, 0x00009DF3, + 0x034, 0x00008DF0, + 0x034, 0x00007DED, + 0x034, 0x00006DEA, + 0x034, 0x00005CED, + 0x034, 0x00004CEA, + 0x034, 0x000034EA, + 0x034, 0x000024E7, + 0x034, 0x0000146A, + 0x034, 0x0000006B, + 0xB0000000, 0x00000000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, + 0x034, 0x0008ADF6, + 0x034, 0x00089DF3, + 0x034, 0x00088DF0, + 0x034, 0x00087DED, + 0x034, 0x00086DEA, + 0x034, 0x00085CED, + 0x034, 0x00084CEA, + 0x034, 0x000834EA, + 0x034, 0x000824E7, + 0x034, 0x0008146A, + 0x034, 0x0008006B, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000192, + 0x035, 0x00008192, + 0x035, 0x00010192, + 0x036, 0x00000024, + 0x036, 0x00008024, + 0x036, 0x00010024, + 0x036, 0x00018024, + 0x0EF, 0x00000000, + 0x051, 0x00000C21, + 0x052, 0x000006D9, + 0x053, 0x000FC649, + 0x054, 0x0000017E, + 0x018, 0x0001012A, + 0x081, 0x0007FC00, + 0x089, 0x00050110, + 0x08A, 0x00043E50, + 0x08B, 0x0002E180, + 0x08C, 0x00093C3C, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x085, 0x000F8000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x085, 0x000F8000, + 0xA0000000, 0x00000000, + 0x085, 0x000F8000, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0xA0000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0xB0000000, 0x00000000, + 0x0EF, 0x00001000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0006C000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000D4000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00080000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0006C000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0008C000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00004000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0xA0000000, 0x00000000, + 0x03C, 0x000A0000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00030023, + 0x03C, 0x00048000, + 0x03A, 0x0000013C, + 0x03B, 0x00028623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00021633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0001C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00010293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00009593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0000118B, + 0xA0000000, 0x00000000, + 0x03B, 0x0000078B, + 0xB0000000, 0x00000000, + 0x03C, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0xA0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0004C000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00084000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00080000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0004C000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x000D0000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00080000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00080000, + 0xA0000000, 0x00000000, + 0x03C, 0x00028000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00070023, + 0x03C, 0x00048000, + 0x03A, 0x0000013C, + 0x03B, 0x00068623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00061633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0005C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00050293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00049593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x03B, 0x0004078B, + 0x03C, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0xA0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00060000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00080000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0xA0000000, 0x00000000, + 0x03C, 0x00020000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B0023, + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0xA0000000, 0x00000000, + 0x03C, 0x00020000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000A8623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000A1633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0009C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00090293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00089593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0008128B, + 0xA0000000, 0x00000000, + 0x03B, 0x0008078B, + 0xB0000000, 0x00000000, + 0x03C, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x00000800, + 0x03B, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001803, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001803, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0xA0000000, 0x00000000, + 0x03A, 0x00000803, + 0xB0000000, 0x00000000, + 0x03B, 0x00040000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000800, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0xA0000000, 0x00000000, + 0x03A, 0x00001000, + 0xB0000000, 0x00000000, + 0x03B, 0x00080000, + 0x80000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001802, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001802, + 0xA0000000, 0x00000000, + 0x03A, 0x00001002, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0xB0000000, 0x00000000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00013124, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00013124, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00013124, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00013124, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00013124, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00013124, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00013124, + 0xA0000000, 0x00000000, + 0x018, 0x00013124, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000100, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A38C, + 0x034, 0x000491AD, + 0x034, 0x000481AA, + 0x034, 0x000471A7, + 0x034, 0x000460AA, + 0x034, 0x000450A7, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x0004200C, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AD, + 0x034, 0x0004838A, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004404C, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AD, + 0x034, 0x0004838A, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004404C, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AD, + 0x034, 0x0004838A, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004404C, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AD, + 0x034, 0x0004838A, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004404C, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3F5, + 0x034, 0x000493F3, + 0x034, 0x00048393, + 0x034, 0x00047390, + 0x034, 0x0004638D, + 0x034, 0x0004538A, + 0x034, 0x00044387, + 0x034, 0x000430ED, + 0x034, 0x000420EA, + 0x034, 0x000410E7, + 0x034, 0x0004002D, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004AFF7, + 0x034, 0x00049FF6, + 0x034, 0x00048FF3, + 0x034, 0x00047FF0, + 0x034, 0x00046FED, + 0x034, 0x00045FEA, + 0x034, 0x00044FE7, + 0x034, 0x00043CD0, + 0x034, 0x00042CCD, + 0x034, 0x00041CCA, + 0x034, 0x00040CC7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AD, + 0x034, 0x0004838A, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004404C, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EF, + 0x034, 0x000493AD, + 0x034, 0x0004838A, + 0x034, 0x0004718C, + 0x034, 0x00046189, + 0x034, 0x0004506D, + 0x034, 0x0004404C, + 0x034, 0x0004302C, + 0x034, 0x00042029, + 0x034, 0x00041026, + 0x034, 0x00040023, + 0xA0000000, 0x00000000, + 0x034, 0x0004AFF4, + 0x034, 0x00049FF1, + 0x034, 0x00048FEE, + 0x034, 0x00047FEB, + 0x034, 0x00046FE8, + 0x034, 0x00045DEA, + 0x034, 0x00044CED, + 0x034, 0x00043CEA, + 0x034, 0x00042C6C, + 0x034, 0x00041C69, + 0x034, 0x00040C2B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EC, + 0x034, 0x0002938C, + 0x034, 0x000281AD, + 0x034, 0x000271AA, + 0x034, 0x000261A7, + 0x034, 0x000250AA, + 0x034, 0x000240A7, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x0002100C, + 0x034, 0x00020009, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EC, + 0x034, 0x0002936D, + 0x034, 0x0002836A, + 0x034, 0x0002716D, + 0x034, 0x0002616A, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EC, + 0x034, 0x000293AC, + 0x034, 0x0002838A, + 0x034, 0x0002718C, + 0x034, 0x00026189, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EC, + 0x034, 0x0002936D, + 0x034, 0x0002836A, + 0x034, 0x0002716D, + 0x034, 0x0002616A, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EC, + 0x034, 0x000293AC, + 0x034, 0x0002838A, + 0x034, 0x0002718C, + 0x034, 0x00026189, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3F5, + 0x034, 0x000293F3, + 0x034, 0x000282F2, + 0x034, 0x000272D0, + 0x034, 0x000262CD, + 0x034, 0x000252CA, + 0x034, 0x000242C7, + 0x034, 0x000230CD, + 0x034, 0x000220CA, + 0x034, 0x000210C7, + 0x034, 0x00020086, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002AFF7, + 0x034, 0x00029FF6, + 0x034, 0x00028FF3, + 0x034, 0x00027FF0, + 0x034, 0x00026FED, + 0x034, 0x00025FEA, + 0x034, 0x00024FE7, + 0x034, 0x00023DEA, + 0x034, 0x00022DE7, + 0x034, 0x00021DE4, + 0x034, 0x00020E44, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EC, + 0x034, 0x0002936D, + 0x034, 0x0002836A, + 0x034, 0x0002716D, + 0x034, 0x0002616A, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EC, + 0x034, 0x0002936D, + 0x034, 0x0002836A, + 0x034, 0x0002716D, + 0x034, 0x0002616A, + 0x034, 0x0002506D, + 0x034, 0x0002406A, + 0x034, 0x0002302C, + 0x034, 0x00022029, + 0x034, 0x00021026, + 0x034, 0x00020023, + 0xA0000000, 0x00000000, + 0x034, 0x0002AFF4, + 0x034, 0x00029FF1, + 0x034, 0x00028FEE, + 0x034, 0x00027FEB, + 0x034, 0x00026FE8, + 0x034, 0x00025DEA, + 0x034, 0x00024CED, + 0x034, 0x00023CEA, + 0x034, 0x00022C6C, + 0x034, 0x00021C69, + 0x034, 0x00020C2B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A38C, + 0x034, 0x000091AD, + 0x034, 0x000081AA, + 0x034, 0x000071A7, + 0x034, 0x000060AA, + 0x034, 0x000050A7, + 0x034, 0x0000402C, + 0x034, 0x00003029, + 0x034, 0x0000200C, + 0x034, 0x00001009, + 0x034, 0x00000006, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AB, + 0x034, 0x00008389, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AB, + 0x034, 0x00008389, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AB, + 0x034, 0x00008389, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AB, + 0x034, 0x00008389, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3F5, + 0x034, 0x000093F1, + 0x034, 0x000083B0, + 0x034, 0x00007370, + 0x034, 0x0000636D, + 0x034, 0x0000536A, + 0x034, 0x00004367, + 0x034, 0x0000308E, + 0x034, 0x0000208B, + 0x034, 0x00001088, + 0x034, 0x00000085, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000AFF7, + 0x034, 0x00009FF5, + 0x034, 0x00008FF2, + 0x034, 0x00007FEF, + 0x034, 0x00006FEC, + 0x034, 0x00005FE9, + 0x034, 0x00004EAA, + 0x034, 0x00003EA7, + 0x034, 0x00002C70, + 0x034, 0x00001C6D, + 0x034, 0x00000C6A, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AB, + 0x034, 0x00008389, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AB, + 0x034, 0x00008389, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0xA0000000, 0x00000000, + 0x034, 0x0000AFF4, + 0x034, 0x00009FF1, + 0x034, 0x00008FEE, + 0x034, 0x00007FEB, + 0x034, 0x00006FE8, + 0x034, 0x00005DEA, + 0x034, 0x00004CED, + 0x034, 0x00003CEA, + 0x034, 0x00002C6C, + 0x034, 0x00001C69, + 0x034, 0x00000C2B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA38C, + 0x034, 0x000C91AD, + 0x034, 0x000C81AA, + 0x034, 0x000C71A7, + 0x034, 0x000C60AA, + 0x034, 0x000C50A7, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C200C, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AD, + 0x034, 0x000C838A, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C404C, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AD, + 0x034, 0x000C838A, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C404C, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AD, + 0x034, 0x000C838A, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C404C, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AD, + 0x034, 0x000C838A, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C404C, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3F5, + 0x034, 0x000C93F3, + 0x034, 0x000C8393, + 0x034, 0x000C7390, + 0x034, 0x000C638D, + 0x034, 0x000C538A, + 0x034, 0x000C4387, + 0x034, 0x000C30ED, + 0x034, 0x000C20EA, + 0x034, 0x000C10E7, + 0x034, 0x000C002D, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CAFF7, + 0x034, 0x000C9FF6, + 0x034, 0x000C8FF3, + 0x034, 0x000C7FF0, + 0x034, 0x000C6FED, + 0x034, 0x000C5FEA, + 0x034, 0x000C4FE7, + 0x034, 0x000C3CD0, + 0x034, 0x000C2CCD, + 0x034, 0x000C1CCA, + 0x034, 0x000C0CC7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AD, + 0x034, 0x000C838A, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C404C, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EF, + 0x034, 0x000C93AD, + 0x034, 0x000C838A, + 0x034, 0x000C718C, + 0x034, 0x000C6189, + 0x034, 0x000C506D, + 0x034, 0x000C404C, + 0x034, 0x000C302C, + 0x034, 0x000C2029, + 0x034, 0x000C1026, + 0x034, 0x000C0023, + 0xA0000000, 0x00000000, + 0x034, 0x000CA794, + 0x034, 0x000C9791, + 0x034, 0x000C878E, + 0x034, 0x000C778B, + 0x034, 0x000C658D, + 0x034, 0x000C558A, + 0x034, 0x000C448D, + 0x034, 0x000C348A, + 0x034, 0x000C244C, + 0x034, 0x000C1449, + 0x034, 0x000C042B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EC, + 0x034, 0x000A938C, + 0x034, 0x000A81AD, + 0x034, 0x000A71AA, + 0x034, 0x000A61A7, + 0x034, 0x000A50AA, + 0x034, 0x000A40A7, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A100C, + 0x034, 0x000A0009, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EC, + 0x034, 0x000A936D, + 0x034, 0x000A836A, + 0x034, 0x000A716D, + 0x034, 0x000A616A, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EC, + 0x034, 0x000A93AC, + 0x034, 0x000A838A, + 0x034, 0x000A718C, + 0x034, 0x000A6189, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EC, + 0x034, 0x000A936D, + 0x034, 0x000A836A, + 0x034, 0x000A716D, + 0x034, 0x000A616A, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EC, + 0x034, 0x000A93AC, + 0x034, 0x000A838A, + 0x034, 0x000A718C, + 0x034, 0x000A6189, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3F5, + 0x034, 0x000A93F3, + 0x034, 0x000A82F2, + 0x034, 0x000A72D0, + 0x034, 0x000A62CD, + 0x034, 0x000A52CA, + 0x034, 0x000A42C7, + 0x034, 0x000A30CD, + 0x034, 0x000A20CA, + 0x034, 0x000A10C7, + 0x034, 0x000A0086, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AAFF7, + 0x034, 0x000A9FF6, + 0x034, 0x000A8FF3, + 0x034, 0x000A7FF0, + 0x034, 0x000A6FED, + 0x034, 0x000A5FEA, + 0x034, 0x000A4FE7, + 0x034, 0x000A3DEA, + 0x034, 0x000A2DE7, + 0x034, 0x000A1DE4, + 0x034, 0x000A0E44, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EC, + 0x034, 0x000A936D, + 0x034, 0x000A836A, + 0x034, 0x000A716D, + 0x034, 0x000A616A, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EC, + 0x034, 0x000A936D, + 0x034, 0x000A836A, + 0x034, 0x000A716D, + 0x034, 0x000A616A, + 0x034, 0x000A506D, + 0x034, 0x000A406A, + 0x034, 0x000A302C, + 0x034, 0x000A2029, + 0x034, 0x000A1026, + 0x034, 0x000A0023, + 0xA0000000, 0x00000000, + 0x034, 0x000AA794, + 0x034, 0x000A9791, + 0x034, 0x000A878E, + 0x034, 0x000A778B, + 0x034, 0x000A658D, + 0x034, 0x000A558A, + 0x034, 0x000A448D, + 0x034, 0x000A348A, + 0x034, 0x000A244C, + 0x034, 0x000A1449, + 0x034, 0x000A042B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A38C, + 0x034, 0x000891AD, + 0x034, 0x000881AA, + 0x034, 0x000871A7, + 0x034, 0x000860AA, + 0x034, 0x000850A7, + 0x034, 0x0008402C, + 0x034, 0x00083029, + 0x034, 0x0008200C, + 0x034, 0x00081009, + 0x034, 0x00000006, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AB, + 0x034, 0x00088389, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AB, + 0x034, 0x00088389, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AB, + 0x034, 0x00088389, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AB, + 0x034, 0x00088389, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3F5, + 0x034, 0x000893F1, + 0x034, 0x000883B0, + 0x034, 0x00087370, + 0x034, 0x0008636D, + 0x034, 0x0008536A, + 0x034, 0x00084367, + 0x034, 0x0008308E, + 0x034, 0x0008208B, + 0x034, 0x00081088, + 0x034, 0x00080085, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008AFF7, + 0x034, 0x00089FF5, + 0x034, 0x00088FF2, + 0x034, 0x00087FEF, + 0x034, 0x00086FEC, + 0x034, 0x00085FE9, + 0x034, 0x00084EAA, + 0x034, 0x00083EA7, + 0x034, 0x00082C70, + 0x034, 0x00081C6D, + 0x034, 0x00080C6A, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AB, + 0x034, 0x00088389, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AB, + 0x034, 0x00088389, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0xA0000000, 0x00000000, + 0x034, 0x0008A794, + 0x034, 0x00089791, + 0x034, 0x0008878E, + 0x034, 0x0008778B, + 0x034, 0x0008658D, + 0x034, 0x0008558A, + 0x034, 0x0008448D, + 0x034, 0x0008348A, + 0x034, 0x0008244C, + 0x034, 0x00081449, + 0x034, 0x0008042B, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0xA0000000, 0x00000000, + 0x0DF, 0x00000000, + 0xB0000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0xA0000000, 0x00000000, + 0x035, 0x00000484, + 0x035, 0x00008484, + 0x035, 0x00010484, + 0x035, 0x00020584, + 0x035, 0x00028584, + 0x035, 0x00030584, + 0x035, 0x00040584, + 0x035, 0x00048584, + 0x035, 0x00050584, + 0x035, 0x000805FB, + 0x035, 0x000885FB, + 0x035, 0x000905FB, + 0x035, 0x000A05FB, + 0x035, 0x000A85FB, + 0x035, 0x000B05FB, + 0x035, 0x000C05FB, + 0x035, 0x000C85FB, + 0x035, 0x000D05FB, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0xA0000000, 0x00000000, + 0x0DF, 0x00000000, + 0xB0000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000473, + 0x036, 0x00008473, + 0x036, 0x00010473, + 0x036, 0x00020473, + 0x036, 0x00028473, + 0x036, 0x00030473, + 0x036, 0x00040473, + 0x036, 0x00048473, + 0x036, 0x00050473, + 0x036, 0x00080473, + 0x036, 0x00088473, + 0x036, 0x00090473, + 0x036, 0x000A0473, + 0x036, 0x000A8473, + 0x036, 0x000B0473, + 0x036, 0x000C0473, + 0x036, 0x000C8473, + 0x036, 0x000D0473, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0xA0000000, 0x00000000, + 0x036, 0x00000474, + 0x036, 0x00008474, + 0x036, 0x00010474, + 0x036, 0x00020474, + 0x036, 0x00028474, + 0x036, 0x00030474, + 0x036, 0x00040474, + 0x036, 0x00048474, + 0x036, 0x00050474, + 0x036, 0x00080474, + 0x036, 0x00088474, + 0x036, 0x00090474, + 0x036, 0x000A0474, + 0x036, 0x000A8474, + 0x036, 0x000B0474, + 0x036, 0x000C0474, + 0x036, 0x000C8474, + 0x036, 0x000D0474, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000004, + 0x037, 0x00000000, + 0x038, 0x0000514E, + 0x037, 0x00004000, + 0x038, 0x0000514E, + 0x037, 0x00008000, + 0x038, 0x0000514E, + 0x037, 0x00010000, + 0x038, 0x0000514E, + 0x037, 0x00014000, + 0x038, 0x0000514E, + 0x037, 0x00018000, + 0x038, 0x0000514E, + 0x037, 0x0001C000, + 0x038, 0x0000514E, + 0x037, 0x00020000, + 0x038, 0x0000514E, + 0x037, 0x00024000, + 0x038, 0x0000514E, + 0x037, 0x00028000, + 0x038, 0x0000514E, + 0x037, 0x0002C000, + 0x038, 0x0000714E, + 0x037, 0x00030000, + 0x038, 0x0000514E, + 0x037, 0x00034000, + 0x038, 0x0000514E, + 0x037, 0x00038000, + 0x038, 0x0000514E, + 0x037, 0x0003C000, + 0x038, 0x0000514E, + 0x037, 0x00040000, + 0x038, 0x0000514E, + 0x037, 0x00044000, + 0x038, 0x0000514E, + 0x037, 0x00048000, + 0x038, 0x0000514E, + 0x037, 0x00080000, + 0x038, 0x00005ECE, + 0x037, 0x00084000, + 0x038, 0x00005ECE, + 0x037, 0x00088000, + 0x038, 0x00005ECE, + 0x037, 0x00090000, + 0x038, 0x00005ECE, + 0x037, 0x00094000, + 0x038, 0x00005ECE, + 0x037, 0x00098000, + 0x038, 0x00005ECE, + 0x037, 0x0009C000, + 0x038, 0x00005ECE, + 0x037, 0x000A0000, + 0x038, 0x00005ECE, + 0x037, 0x000A4000, + 0x038, 0x00005ECE, + 0x037, 0x000A8000, + 0x038, 0x00005ECE, + 0x037, 0x000AC000, + 0x038, 0x00005ECE, + 0x037, 0x000B0000, + 0x038, 0x00005ECE, + 0x037, 0x000B4000, + 0x038, 0x00005ECE, + 0x037, 0x000B8000, + 0x038, 0x00005ECE, + 0x037, 0x000BC000, + 0x038, 0x00005ECE, + 0x037, 0x000C0000, + 0x038, 0x00005ECE, + 0x037, 0x000C4000, + 0x038, 0x00005ECE, + 0x037, 0x000C8000, + 0x038, 0x00005ECE, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000008, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x03C, 0x0000007D, + 0x03C, 0x0000047D, + 0x03C, 0x0000087D, + 0x03C, 0x0000107D, + 0x03C, 0x0000147D, + 0x03C, 0x0000187D, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x00000541, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x00001541, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x00002541, + 0x03C, 0x00002821, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027D, + 0x03C, 0x00000546, + 0x03C, 0x00000821, + 0x03C, 0x0000127D, + 0x03C, 0x00001546, + 0x03C, 0x00001821, + 0x03C, 0x0000227D, + 0x03C, 0x00002546, + 0x03C, 0x00002821, + 0xA0000000, 0x00000000, + 0x03C, 0x0000037E, + 0x03C, 0x00000575, + 0x03C, 0x00000971, + 0x03C, 0x0000127E, + 0x03C, 0x00001575, + 0x03C, 0x00001871, + 0x03C, 0x0000217E, + 0x03C, 0x00002575, + 0x03C, 0x00002871, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x061, 0x000C0D47, + 0x062, 0x0000133C, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0xA0000000, 0x00000000, + 0x063, 0x0007D0E7, + 0xB0000000, 0x00000000, + 0x064, 0x00014FEC, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0xA0000000, 0x00000000, + 0x065, 0x000923FF, + 0xB0000000, 0x00000000, + 0x066, 0x00000040, + 0x057, 0x00050000, + 0x056, 0x00051DF0, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x055, 0x00082060, + 0xB0000000, 0x00000000, +}; + +RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_c, C); + +static const u32 rtw8814a_rf_d[] = { + 0x018, 0x00013124, + 0x040, 0x00000C00, + 0x058, 0x00000F98, + 0x07F, 0x00068004, + 0x018, 0x00000006, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x086, 0x000E335A, + 0x087, 0x00079F80, + 0xA0000000, 0x00000000, + 0x086, 0x000E4B58, + 0x087, 0x00049F80, + 0xB0000000, 0x00000000, + 0x0DF, 0x00000008, + 0x0EF, 0x00002000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017803, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F09B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017803, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017803, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F09B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017803, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017803, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017803, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0003F19B, + 0x03B, 0x00037A5B, + 0x03B, 0x0002A433, + 0x03B, 0x00027BD3, + 0x03B, 0x0001F80B, + 0x03B, 0x00017803, + 0xA0000000, 0x00000000, + 0x03B, 0x0003F258, + 0x03B, 0x00030A58, + 0x03B, 0x0002FA58, + 0x03B, 0x00022590, + 0x03B, 0x0001FA50, + 0x03B, 0x00010248, + 0x03B, 0x00008240, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000100, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, + 0x034, 0x0000ADF6, + 0x034, 0x00009DF3, + 0x034, 0x00008DF0, + 0x034, 0x00007DED, + 0x034, 0x00006DEA, + 0x034, 0x00005CED, + 0x034, 0x00004CEA, + 0x034, 0x000034EA, + 0x034, 0x000024E7, + 0x034, 0x0000146A, + 0x034, 0x0000006B, + 0xB0000000, 0x00000000, + 0x80000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A0D0, + 0x034, 0x000090CD, + 0x034, 0x000080CA, + 0x034, 0x0000704D, + 0x034, 0x0000604A, + 0x034, 0x00005047, + 0x034, 0x0000400A, + 0x034, 0x00003007, + 0x034, 0x00002004, + 0x034, 0x00001001, + 0x034, 0x00000001, + 0xA0000000, 0x00000000, + 0x034, 0x0008ADF6, + 0x034, 0x00089DF3, + 0x034, 0x00088DF0, + 0x034, 0x00087DED, + 0x034, 0x00086DEA, + 0x034, 0x00085CED, + 0x034, 0x00084CEA, + 0x034, 0x000834EA, + 0x034, 0x000824E7, + 0x034, 0x0008146A, + 0x034, 0x0008006B, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x000020A2, + 0x0DF, 0x00000080, + 0x035, 0x00000192, + 0x035, 0x00008192, + 0x035, 0x00010192, + 0x036, 0x00000024, + 0x036, 0x00008024, + 0x036, 0x00010024, + 0x036, 0x00018024, + 0x0EF, 0x00000000, + 0x051, 0x00000C21, + 0x052, 0x000006D9, + 0x053, 0x000FC649, + 0x054, 0x0000017E, + 0x018, 0x0001012A, + 0x081, 0x0007FC00, + 0x089, 0x00050110, + 0x08A, 0x00043E50, + 0x08B, 0x0002E180, + 0x08C, 0x00093C3C, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x085, 0x000F8000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x085, 0x000F8000, + 0xA0000000, 0x00000000, + 0x085, 0x000F8000, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0xA0000000, 0x00000000, + 0x08D, 0x000FFFF0, + 0xB0000000, 0x00000000, + 0x0EF, 0x00001000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0xA0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00038023, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00044000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00048000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00044000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00040000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00088000, + 0xA0000000, 0x00000000, + 0x03C, 0x00048000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00030023, + 0x03C, 0x00048000, + 0x03A, 0x0000013C, + 0x03B, 0x00028623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00021633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0001C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00010293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00009593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x00000F8B, + 0xA0000000, 0x00000000, + 0x03B, 0x0000078B, + 0xB0000000, 0x00000000, + 0x03C, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0xA0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00078023, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00044000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00044000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00044000, + 0xA0000000, 0x00000000, + 0x03C, 0x00024000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00070023, + 0x03C, 0x00048000, + 0x03A, 0x0000013C, + 0x03B, 0x00068623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00061633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0005C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00050293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00049593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x00040F8B, + 0xA0000000, 0x00000000, + 0x03B, 0x0004078B, + 0xB0000000, 0x00000000, + 0x03C, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0xA0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B8023, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00004000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00060000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00004000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00060000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00024000, + 0xA0000000, 0x00000000, + 0x03C, 0x00004000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000B0023, + 0x80000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00020000, + 0xA0000000, 0x00000000, + 0x03C, 0x00020000, + 0xB0000000, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000A8623, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x000A1633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x0009C633, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00090293, + 0x03C, 0x00000000, + 0x03A, 0x0000013C, + 0x03B, 0x00089593, + 0x03C, 0x00000000, + 0x03A, 0x00000148, + 0x80000008, 0x00000000, 0x40000000, 0x00000000, + 0x03B, 0x0008138B, + 0xA0000000, 0x00000000, + 0x03B, 0x0008078B, + 0xB0000000, 0x00000000, + 0x03C, 0x00000000, + 0x0EF, 0x00000000, + 0x0EF, 0x00000800, + 0x03B, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001003, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001803, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000803, + 0xA0000000, 0x00000000, + 0x03A, 0x00000803, + 0xB0000000, 0x00000000, + 0x03B, 0x00040000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001002, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000001, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x90000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000802, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001803, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0xA0000000, 0x00000000, + 0x03A, 0x00001000, + 0xB0000000, 0x00000000, + 0x03B, 0x00080000, + 0x80000007, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001802, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00001000, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x03A, 0x00000802, + 0xA0000000, 0x00000000, + 0x03A, 0x00001002, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000006, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0xB0000000, 0x00000000, + 0x018, 0x00013124, + 0x0EF, 0x00000100, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3EB, + 0x034, 0x0004938B, + 0x034, 0x000481AC, + 0x034, 0x000471A9, + 0x034, 0x000460AC, + 0x034, 0x000450A9, + 0x034, 0x0004402E, + 0x034, 0x0004302B, + 0x034, 0x00042028, + 0x034, 0x0004100B, + 0x034, 0x00040008, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3AD, + 0x034, 0x0004938A, + 0x034, 0x0004818C, + 0x034, 0x00047189, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3AD, + 0x034, 0x0004938A, + 0x034, 0x0004818C, + 0x034, 0x00047189, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3AD, + 0x034, 0x0004938A, + 0x034, 0x0004818C, + 0x034, 0x00047189, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3AD, + 0x034, 0x0004938A, + 0x034, 0x0004818C, + 0x034, 0x00047189, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3F4, + 0x034, 0x000493D2, + 0x034, 0x000482D1, + 0x034, 0x000471F1, + 0x034, 0x000461EE, + 0x034, 0x000451EB, + 0x034, 0x000441E8, + 0x034, 0x0004314B, + 0x034, 0x00042148, + 0x034, 0x0004104B, + 0x034, 0x00040048, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004AFF7, + 0x034, 0x00049FF6, + 0x034, 0x00048FF3, + 0x034, 0x00047FF0, + 0x034, 0x00046FED, + 0x034, 0x00045FEA, + 0x034, 0x00044FE7, + 0x034, 0x00043CB1, + 0x034, 0x00042CAE, + 0x034, 0x00041CAB, + 0x034, 0x00040CA8, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3AD, + 0x034, 0x0004938A, + 0x034, 0x0004818C, + 0x034, 0x00047189, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0004A3AD, + 0x034, 0x0004938A, + 0x034, 0x0004818C, + 0x034, 0x00047189, + 0x034, 0x0004606D, + 0x034, 0x0004506A, + 0x034, 0x0004402C, + 0x034, 0x00043029, + 0x034, 0x00042026, + 0x034, 0x00041009, + 0x034, 0x00040006, + 0xA0000000, 0x00000000, + 0x034, 0x0004AFF4, + 0x034, 0x00049FF1, + 0x034, 0x00048FEE, + 0x034, 0x00047FEB, + 0x034, 0x00046FE8, + 0x034, 0x00045DEA, + 0x034, 0x00044CED, + 0x034, 0x00043CEA, + 0x034, 0x00042C6C, + 0x034, 0x00041C69, + 0x034, 0x00040C2B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3EE, + 0x034, 0x000293EB, + 0x034, 0x0002838B, + 0x034, 0x000271AC, + 0x034, 0x000261A9, + 0x034, 0x000250AC, + 0x034, 0x000240A9, + 0x034, 0x000230A6, + 0x034, 0x0002202C, + 0x034, 0x00021029, + 0x034, 0x00020026, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3AD, + 0x034, 0x0002938A, + 0x034, 0x0002818C, + 0x034, 0x00027189, + 0x034, 0x0002606D, + 0x034, 0x0002504C, + 0x034, 0x0002402C, + 0x034, 0x00023029, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020006, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3AD, + 0x034, 0x0002938A, + 0x034, 0x0002818C, + 0x034, 0x00027189, + 0x034, 0x0002606D, + 0x034, 0x0002504C, + 0x034, 0x0002402C, + 0x034, 0x00023029, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020006, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3AD, + 0x034, 0x0002938A, + 0x034, 0x0002818C, + 0x034, 0x00027189, + 0x034, 0x0002606D, + 0x034, 0x0002504C, + 0x034, 0x0002402C, + 0x034, 0x00023029, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020006, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3AD, + 0x034, 0x0002938A, + 0x034, 0x0002818C, + 0x034, 0x00027189, + 0x034, 0x0002606D, + 0x034, 0x0002504C, + 0x034, 0x0002402C, + 0x034, 0x00023029, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020006, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3F5, + 0x034, 0x000293D2, + 0x034, 0x000283CE, + 0x034, 0x00027290, + 0x034, 0x0002628D, + 0x034, 0x0002528A, + 0x034, 0x00024287, + 0x034, 0x0002308D, + 0x034, 0x0002208A, + 0x034, 0x00021087, + 0x034, 0x00020048, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002AFF7, + 0x034, 0x00029FF6, + 0x034, 0x00028FF3, + 0x034, 0x00027FF0, + 0x034, 0x00026FED, + 0x034, 0x00025FEA, + 0x034, 0x00024FE7, + 0x034, 0x00023DEA, + 0x034, 0x00022DE7, + 0x034, 0x00021DE4, + 0x034, 0x00020D48, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3AD, + 0x034, 0x0002938A, + 0x034, 0x0002818C, + 0x034, 0x00027189, + 0x034, 0x0002606D, + 0x034, 0x0002504C, + 0x034, 0x0002402C, + 0x034, 0x00023029, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020006, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0002A3AD, + 0x034, 0x0002938A, + 0x034, 0x0002818C, + 0x034, 0x00027189, + 0x034, 0x0002606D, + 0x034, 0x0002504C, + 0x034, 0x0002402C, + 0x034, 0x00023029, + 0x034, 0x00022026, + 0x034, 0x00021023, + 0x034, 0x00020006, + 0xA0000000, 0x00000000, + 0x034, 0x0002AFF4, + 0x034, 0x00029FF1, + 0x034, 0x00028FEE, + 0x034, 0x00027FEB, + 0x034, 0x00026FE8, + 0x034, 0x00025DEA, + 0x034, 0x00024CED, + 0x034, 0x00023CEA, + 0x034, 0x00022C6C, + 0x034, 0x00021C69, + 0x034, 0x00020C2B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EF, + 0x034, 0x000093EC, + 0x034, 0x0000838C, + 0x034, 0x000071AD, + 0x034, 0x000061AA, + 0x034, 0x000050AD, + 0x034, 0x000040AA, + 0x034, 0x0000306A, + 0x034, 0x0000202D, + 0x034, 0x0000102A, + 0x034, 0x00000027, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3F1, + 0x034, 0x000092B1, + 0x034, 0x000081CF, + 0x034, 0x00007170, + 0x034, 0x0000616D, + 0x034, 0x0000516A, + 0x034, 0x00004167, + 0x034, 0x0000302F, + 0x034, 0x0000202C, + 0x034, 0x00001029, + 0x034, 0x00000026, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000AFF7, + 0x034, 0x00009FF6, + 0x034, 0x00008FF3, + 0x034, 0x00007FF0, + 0x034, 0x00006FED, + 0x034, 0x00005FEA, + 0x034, 0x00004FE7, + 0x034, 0x00003EC7, + 0x034, 0x00002EC4, + 0x034, 0x00001D4B, + 0x034, 0x00000D48, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0000A3EE, + 0x034, 0x000093AC, + 0x034, 0x0000838A, + 0x034, 0x0000718C, + 0x034, 0x00006189, + 0x034, 0x0000506D, + 0x034, 0x0000406A, + 0x034, 0x0000302C, + 0x034, 0x00002029, + 0x034, 0x00001026, + 0x034, 0x00000023, + 0xA0000000, 0x00000000, + 0x034, 0x0000AFF4, + 0x034, 0x00009FF1, + 0x034, 0x00008FEE, + 0x034, 0x00007FEB, + 0x034, 0x00006FE8, + 0x034, 0x00005DEA, + 0x034, 0x00004CED, + 0x034, 0x00003CEA, + 0x034, 0x00002C6C, + 0x034, 0x00001C69, + 0x034, 0x00000C2B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3EB, + 0x034, 0x000C938B, + 0x034, 0x000C81AC, + 0x034, 0x000C71A9, + 0x034, 0x000C60AC, + 0x034, 0x000C50A9, + 0x034, 0x000C402E, + 0x034, 0x000C302B, + 0x034, 0x000C2028, + 0x034, 0x000C100B, + 0x034, 0x000C0008, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3AD, + 0x034, 0x000C938A, + 0x034, 0x000C818C, + 0x034, 0x000C7189, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3AD, + 0x034, 0x000C938A, + 0x034, 0x000C818C, + 0x034, 0x000C7189, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3AD, + 0x034, 0x000C938A, + 0x034, 0x000C818C, + 0x034, 0x000C7189, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3AD, + 0x034, 0x000C938A, + 0x034, 0x000C818C, + 0x034, 0x000C7189, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3F4, + 0x034, 0x000C93D2, + 0x034, 0x000C82D1, + 0x034, 0x000C71F1, + 0x034, 0x000C61EE, + 0x034, 0x000C51EB, + 0x034, 0x000C41E8, + 0x034, 0x000C314B, + 0x034, 0x000C2148, + 0x034, 0x000C104B, + 0x034, 0x000C0048, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CAFF7, + 0x034, 0x000C9FF6, + 0x034, 0x000C8FF3, + 0x034, 0x000C7FF0, + 0x034, 0x000C6FED, + 0x034, 0x000C5FEA, + 0x034, 0x000C4FE7, + 0x034, 0x000C3CB1, + 0x034, 0x000C2CAE, + 0x034, 0x000C1CAB, + 0x034, 0x000C0CA8, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3AD, + 0x034, 0x000C938A, + 0x034, 0x000C818C, + 0x034, 0x000C7189, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000CA3AD, + 0x034, 0x000C938A, + 0x034, 0x000C818C, + 0x034, 0x000C7189, + 0x034, 0x000C606D, + 0x034, 0x000C506A, + 0x034, 0x000C402C, + 0x034, 0x000C3029, + 0x034, 0x000C2026, + 0x034, 0x000C1009, + 0x034, 0x000C0006, + 0xA0000000, 0x00000000, + 0x034, 0x000CA794, + 0x034, 0x000C9791, + 0x034, 0x000C878E, + 0x034, 0x000C778B, + 0x034, 0x000C658D, + 0x034, 0x000C558A, + 0x034, 0x000C448D, + 0x034, 0x000C348A, + 0x034, 0x000C244C, + 0x034, 0x000C1449, + 0x034, 0x000C042B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3EE, + 0x034, 0x000A93EB, + 0x034, 0x000A838B, + 0x034, 0x000A71AC, + 0x034, 0x000A61A9, + 0x034, 0x000A50AC, + 0x034, 0x000A40A9, + 0x034, 0x000A30A6, + 0x034, 0x000A202C, + 0x034, 0x000A1029, + 0x034, 0x000A0026, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3AD, + 0x034, 0x000A938A, + 0x034, 0x000A818C, + 0x034, 0x000A7189, + 0x034, 0x000A606D, + 0x034, 0x000A504C, + 0x034, 0x000A402C, + 0x034, 0x000A3029, + 0x034, 0x000A2026, + 0x034, 0x000A1023, + 0x034, 0x000A0006, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3AD, + 0x034, 0x000A938A, + 0x034, 0x000A818C, + 0x034, 0x000A7189, + 0x034, 0x000A606D, + 0x034, 0x000A504C, + 0x034, 0x000A402C, + 0x034, 0x000A3029, + 0x034, 0x000A2026, + 0x034, 0x000A1023, + 0x034, 0x000A0006, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3AD, + 0x034, 0x000A938A, + 0x034, 0x000A818C, + 0x034, 0x000A7189, + 0x034, 0x000A606D, + 0x034, 0x000A504C, + 0x034, 0x000A402C, + 0x034, 0x000A3029, + 0x034, 0x000A2026, + 0x034, 0x000A1023, + 0x034, 0x000A0006, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3AD, + 0x034, 0x000A938A, + 0x034, 0x000A818C, + 0x034, 0x000A7189, + 0x034, 0x000A606D, + 0x034, 0x000A504C, + 0x034, 0x000A402C, + 0x034, 0x000A3029, + 0x034, 0x000A2026, + 0x034, 0x000A1023, + 0x034, 0x000A0006, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3F5, + 0x034, 0x000A93D2, + 0x034, 0x000A83CE, + 0x034, 0x000A7290, + 0x034, 0x000A628D, + 0x034, 0x000A528A, + 0x034, 0x000A4287, + 0x034, 0x000A308D, + 0x034, 0x000A208A, + 0x034, 0x000A1087, + 0x034, 0x000A0048, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AAFF7, + 0x034, 0x000A9FF6, + 0x034, 0x000A8FF3, + 0x034, 0x000A7FF0, + 0x034, 0x000A6FED, + 0x034, 0x000A5FEA, + 0x034, 0x000A4FE7, + 0x034, 0x000A3DEA, + 0x034, 0x000A2DE7, + 0x034, 0x000A1DE4, + 0x034, 0x000A0D48, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3AD, + 0x034, 0x000A938A, + 0x034, 0x000A818C, + 0x034, 0x000A7189, + 0x034, 0x000A606D, + 0x034, 0x000A504C, + 0x034, 0x000A402C, + 0x034, 0x000A3029, + 0x034, 0x000A2026, + 0x034, 0x000A1023, + 0x034, 0x000A0006, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x000AA3AD, + 0x034, 0x000A938A, + 0x034, 0x000A818C, + 0x034, 0x000A7189, + 0x034, 0x000A606D, + 0x034, 0x000A504C, + 0x034, 0x000A402C, + 0x034, 0x000A3029, + 0x034, 0x000A2026, + 0x034, 0x000A1023, + 0x034, 0x000A0006, + 0xA0000000, 0x00000000, + 0x034, 0x000AA794, + 0x034, 0x000A9791, + 0x034, 0x000A878E, + 0x034, 0x000A778B, + 0x034, 0x000A658D, + 0x034, 0x000A558A, + 0x034, 0x000A448D, + 0x034, 0x000A348A, + 0x034, 0x000A244C, + 0x034, 0x000A1449, + 0x034, 0x000A042B, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EF, + 0x034, 0x000893EC, + 0x034, 0x0008838C, + 0x034, 0x000871AD, + 0x034, 0x000861AA, + 0x034, 0x000850AD, + 0x034, 0x000840AA, + 0x034, 0x0008306A, + 0x034, 0x0008202D, + 0x034, 0x0008102A, + 0x034, 0x00080027, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3F1, + 0x034, 0x000892B1, + 0x034, 0x000881CF, + 0x034, 0x00087170, + 0x034, 0x0008616D, + 0x034, 0x0008516A, + 0x034, 0x00084167, + 0x034, 0x0008302F, + 0x034, 0x0008202C, + 0x034, 0x00081029, + 0x034, 0x00080026, + 0x90000009, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008AFF7, + 0x034, 0x00089FF6, + 0x034, 0x00088FF3, + 0x034, 0x00087FF0, + 0x034, 0x00086FED, + 0x034, 0x00085FEA, + 0x034, 0x00084FE7, + 0x034, 0x00083EC7, + 0x034, 0x00082EC4, + 0x034, 0x00081D4B, + 0x034, 0x00080D48, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x034, 0x0008A3EE, + 0x034, 0x000893AC, + 0x034, 0x0008838A, + 0x034, 0x0008718C, + 0x034, 0x00086189, + 0x034, 0x0008506D, + 0x034, 0x0008406A, + 0x034, 0x0008302C, + 0x034, 0x00082029, + 0x034, 0x00081026, + 0x034, 0x00080023, + 0xA0000000, 0x00000000, + 0x034, 0x0008A794, + 0x034, 0x00089791, + 0x034, 0x0008878E, + 0x034, 0x0008778B, + 0x034, 0x0008658D, + 0x034, 0x0008558A, + 0x034, 0x0008448D, + 0x034, 0x0008348A, + 0x034, 0x0008244C, + 0x034, 0x00081449, + 0x034, 0x0008042B, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0xA0000000, 0x00000000, + 0x0DF, 0x00000000, + 0xB0000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000040, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x035, 0x000006CC, + 0x035, 0x000086CC, + 0x035, 0x000106CC, + 0x035, 0x000206CC, + 0x035, 0x000286CC, + 0x035, 0x000306CC, + 0x035, 0x000406CC, + 0x035, 0x000486CC, + 0x035, 0x000506CC, + 0x035, 0x000806CC, + 0x035, 0x000886CC, + 0x035, 0x000906CC, + 0x035, 0x000A06CC, + 0x035, 0x000A86CC, + 0x035, 0x000B06CC, + 0x035, 0x000C06CC, + 0x035, 0x000C86CC, + 0x035, 0x000D06CC, + 0xA0000000, 0x00000000, + 0x035, 0x00000484, + 0x035, 0x00008484, + 0x035, 0x00010484, + 0x035, 0x00020584, + 0x035, 0x00028584, + 0x035, 0x00030584, + 0x035, 0x00040584, + 0x035, 0x00048584, + 0x035, 0x00050584, + 0x035, 0x000805FB, + 0x035, 0x000885FB, + 0x035, 0x000905FB, + 0x035, 0x000A05FB, + 0x035, 0x000A85FB, + 0x035, 0x000B05FB, + 0x035, 0x000C05FB, + 0x035, 0x000C85FB, + 0x035, 0x000D05FB, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x0DF, 0x00000001, + 0xA0000000, 0x00000000, + 0x0DF, 0x00000000, + 0xB0000000, 0x00000000, + 0x018, 0x0001712A, + 0x0EF, 0x00000010, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000473, + 0x036, 0x00008473, + 0x036, 0x00010473, + 0x036, 0x00020473, + 0x036, 0x00028473, + 0x036, 0x00030473, + 0x036, 0x00040473, + 0x036, 0x00048473, + 0x036, 0x00050473, + 0x036, 0x00080473, + 0x036, 0x00088473, + 0x036, 0x00090473, + 0x036, 0x000A0473, + 0x036, 0x000A8473, + 0x036, 0x000B0473, + 0x036, 0x000C0473, + 0x036, 0x000C8473, + 0x036, 0x000D0473, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x036, 0x00000475, + 0x036, 0x00008475, + 0x036, 0x00010475, + 0x036, 0x00020475, + 0x036, 0x00028475, + 0x036, 0x00030475, + 0x036, 0x00040475, + 0x036, 0x00048475, + 0x036, 0x00050475, + 0x036, 0x00080475, + 0x036, 0x00088475, + 0x036, 0x00090475, + 0x036, 0x000A0475, + 0x036, 0x000A8475, + 0x036, 0x000B0475, + 0x036, 0x000C0475, + 0x036, 0x000C8475, + 0x036, 0x000D0475, + 0xA0000000, 0x00000000, + 0x036, 0x00000474, + 0x036, 0x00008474, + 0x036, 0x00010474, + 0x036, 0x00020474, + 0x036, 0x00028474, + 0x036, 0x00030474, + 0x036, 0x00040474, + 0x036, 0x00048474, + 0x036, 0x00050474, + 0x036, 0x00080474, + 0x036, 0x00088474, + 0x036, 0x00090474, + 0x036, 0x000A0474, + 0x036, 0x000A8474, + 0x036, 0x000B0474, + 0x036, 0x000C0474, + 0x036, 0x000C8474, + 0x036, 0x000D0474, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000004, + 0x037, 0x00000000, + 0x038, 0x0000514E, + 0x037, 0x00004000, + 0x038, 0x0000514E, + 0x037, 0x00008000, + 0x038, 0x0000514E, + 0x037, 0x00010000, + 0x038, 0x0000514E, + 0x037, 0x00014000, + 0x038, 0x0000514E, + 0x037, 0x00018000, + 0x038, 0x0000514E, + 0x037, 0x0001C000, + 0x038, 0x0000514E, + 0x037, 0x00020000, + 0x038, 0x0000514E, + 0x037, 0x00024000, + 0x038, 0x0000514E, + 0x037, 0x00028000, + 0x038, 0x0000514E, + 0x037, 0x0002C000, + 0x038, 0x0000714E, + 0x037, 0x00030000, + 0x038, 0x0000514E, + 0x037, 0x00034000, + 0x038, 0x0000514E, + 0x037, 0x00038000, + 0x038, 0x0000514E, + 0x037, 0x0003C000, + 0x038, 0x0000514E, + 0x037, 0x00040000, + 0x038, 0x0000514E, + 0x037, 0x00044000, + 0x038, 0x0000514E, + 0x037, 0x00048000, + 0x038, 0x0000514E, + 0x037, 0x00080000, + 0x038, 0x00005ECE, + 0x037, 0x00084000, + 0x038, 0x00005ECE, + 0x037, 0x00088000, + 0x038, 0x00005ECE, + 0x037, 0x00090000, + 0x038, 0x00005ECE, + 0x037, 0x00094000, + 0x038, 0x00005ECE, + 0x037, 0x00098000, + 0x038, 0x00005ECE, + 0x037, 0x0009C000, + 0x038, 0x00005ECE, + 0x037, 0x000A0000, + 0x038, 0x00005ECE, + 0x037, 0x000A4000, + 0x038, 0x00005ECE, + 0x037, 0x000A8000, + 0x038, 0x00005ECE, + 0x037, 0x000AC000, + 0x038, 0x00005ECE, + 0x037, 0x000B0000, + 0x038, 0x00005ECE, + 0x037, 0x000B4000, + 0x038, 0x00005ECE, + 0x037, 0x000B8000, + 0x038, 0x00005ECE, + 0x037, 0x000BC000, + 0x038, 0x00005ECE, + 0x037, 0x000C0000, + 0x038, 0x00005ECE, + 0x037, 0x000C4000, + 0x038, 0x00005ECE, + 0x037, 0x000C8000, + 0x038, 0x00005ECE, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000008, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x03C, 0x0000007D, + 0x03C, 0x0000047D, + 0x03C, 0x0000087D, + 0x03C, 0x0000107D, + 0x03C, 0x0000147D, + 0x03C, 0x0000187D, + 0xB0000000, 0x00000000, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x00000275, + 0x03C, 0x00000542, + 0x03C, 0x00000821, + 0x03C, 0x00001275, + 0x03C, 0x00001542, + 0x03C, 0x00001821, + 0x03C, 0x00002275, + 0x03C, 0x00002542, + 0x03C, 0x00002821, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027F, + 0x03C, 0x00000542, + 0x03C, 0x00000821, + 0x03C, 0x0000127F, + 0x03C, 0x00001542, + 0x03C, 0x00001821, + 0x03C, 0x0000227F, + 0x03C, 0x00002542, + 0x03C, 0x00002821, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027F, + 0x03C, 0x00000542, + 0x03C, 0x00000821, + 0x03C, 0x0000127F, + 0x03C, 0x00001542, + 0x03C, 0x00001821, + 0x03C, 0x0000227F, + 0x03C, 0x00002542, + 0x03C, 0x00002821, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027F, + 0x03C, 0x00000542, + 0x03C, 0x00000821, + 0x03C, 0x0000127F, + 0x03C, 0x00001542, + 0x03C, 0x00001821, + 0x03C, 0x0000227F, + 0x03C, 0x00002542, + 0x03C, 0x00002821, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027F, + 0x03C, 0x00000542, + 0x03C, 0x00000821, + 0x03C, 0x0000127F, + 0x03C, 0x00001542, + 0x03C, 0x00001821, + 0x03C, 0x0000227F, + 0x03C, 0x00002542, + 0x03C, 0x00002821, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027F, + 0x03C, 0x00000542, + 0x03C, 0x00000821, + 0x03C, 0x0000127F, + 0x03C, 0x00001542, + 0x03C, 0x00001821, + 0x03C, 0x0000227F, + 0x03C, 0x00002542, + 0x03C, 0x00002821, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027F, + 0x03C, 0x00000542, + 0x03C, 0x00000821, + 0x03C, 0x0000127F, + 0x03C, 0x00001542, + 0x03C, 0x00001821, + 0x03C, 0x0000227F, + 0x03C, 0x00002542, + 0x03C, 0x00002821, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x03C, 0x0000027F, + 0x03C, 0x00000542, + 0x03C, 0x00000821, + 0x03C, 0x0000127F, + 0x03C, 0x00001542, + 0x03C, 0x00001821, + 0x03C, 0x0000227F, + 0x03C, 0x00002542, + 0x03C, 0x00002821, + 0xA0000000, 0x00000000, + 0x03C, 0x0000037E, + 0x03C, 0x00000575, + 0x03C, 0x00000971, + 0x03C, 0x0000127E, + 0x03C, 0x00001575, + 0x03C, 0x00001871, + 0x03C, 0x0000217E, + 0x03C, 0x00002575, + 0x03C, 0x00002871, + 0xB0000000, 0x00000000, + 0x0EF, 0x00000000, + 0x061, 0x000C0D47, + 0x062, 0x0000133C, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x063, 0x000750E7, + 0xA0000000, 0x00000000, + 0x063, 0x0007D0E7, + 0xB0000000, 0x00000000, + 0x064, 0x00014FEC, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0x065, 0x000920D0, + 0xA0000000, 0x00000000, + 0x065, 0x000923FF, + 0xB0000000, 0x00000000, + 0x066, 0x00000040, + 0x057, 0x00050000, + 0x056, 0x00051DF0, + 0x80000001, 0x00000000, 0x40000000, 0x00000000, + 0x90000002, 0x00000000, 0x40000000, 0x00000000, + 0x90000003, 0x00000000, 0x40000000, 0x00000000, + 0x90000004, 0x00000000, 0x40000000, 0x00000000, + 0x90000005, 0x00000000, 0x40000000, 0x00000000, + 0x90000008, 0x00000000, 0x40000000, 0x00000000, + 0x9000000a, 0x00000000, 0x40000000, 0x00000000, + 0x9000000b, 0x00000000, 0x40000000, 0x00000000, + 0xA0000000, 0x00000000, + 0x055, 0x00082060, + 0xB0000000, 0x00000000, +}; + +RTW_DECL_TABLE_RF_RADIO(rtw8814a_rf_d, D); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt[] = { + { 0, 0, 0, 0, 1, 36, }, + { 2, 0, 0, 0, 1, 32, }, + { 1, 0, 0, 0, 1, 32, }, + { 0, 0, 0, 0, 2, 36, }, + { 2, 0, 0, 0, 2, 32, }, + { 1, 0, 0, 0, 2, 32, }, + { 0, 0, 0, 0, 3, 36, }, + { 2, 0, 0, 0, 3, 32, }, + { 1, 0, 0, 0, 3, 32, }, + { 0, 0, 0, 0, 4, 36, }, + { 2, 0, 0, 0, 4, 32, }, + { 1, 0, 0, 0, 4, 32, }, + { 0, 0, 0, 0, 5, 36, }, + { 2, 0, 0, 0, 5, 32, }, + { 1, 0, 0, 0, 5, 32, }, + { 0, 0, 0, 0, 6, 36, }, + { 2, 0, 0, 0, 6, 32, }, + { 1, 0, 0, 0, 6, 32, }, + { 0, 0, 0, 0, 7, 36, }, + { 2, 0, 0, 0, 7, 32, }, + { 1, 0, 0, 0, 7, 32, }, + { 0, 0, 0, 0, 8, 36, }, + { 2, 0, 0, 0, 8, 32, }, + { 1, 0, 0, 0, 8, 32, }, + { 0, 0, 0, 0, 9, 36, }, + { 2, 0, 0, 0, 9, 32, }, + { 1, 0, 0, 0, 9, 32, }, + { 0, 0, 0, 0, 10, 36, }, + { 2, 0, 0, 0, 10, 32, }, + { 1, 0, 0, 0, 10, 32, }, + { 0, 0, 0, 0, 11, 36, }, + { 2, 0, 0, 0, 11, 32, }, + { 1, 0, 0, 0, 11, 32, }, + { 0, 0, 0, 0, 12, 63, }, + { 2, 0, 0, 0, 12, 32, }, + { 1, 0, 0, 0, 12, 32, }, + { 0, 0, 0, 0, 13, 63, }, + { 2, 0, 0, 0, 13, 32, }, + { 1, 0, 0, 0, 13, 32, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 32, }, + { 0, 0, 0, 1, 1, 34, }, + { 2, 0, 0, 1, 1, 32, }, + { 1, 0, 0, 1, 1, 32, }, + { 0, 0, 0, 1, 2, 36, }, + { 2, 0, 0, 1, 2, 32, }, + { 1, 0, 0, 1, 2, 32, }, + { 0, 0, 0, 1, 3, 36, }, + { 2, 0, 0, 1, 3, 32, }, + { 1, 0, 0, 1, 3, 32, }, + { 0, 0, 0, 1, 4, 36, }, + { 2, 0, 0, 1, 4, 32, }, + { 1, 0, 0, 1, 4, 32, }, + { 0, 0, 0, 1, 5, 36, }, + { 2, 0, 0, 1, 5, 32, }, + { 1, 0, 0, 1, 5, 32, }, + { 0, 0, 0, 1, 6, 36, }, + { 2, 0, 0, 1, 6, 32, }, + { 1, 0, 0, 1, 6, 32, }, + { 0, 0, 0, 1, 7, 36, }, + { 2, 0, 0, 1, 7, 32, }, + { 1, 0, 0, 1, 7, 32, }, + { 0, 0, 0, 1, 8, 36, }, + { 2, 0, 0, 1, 8, 32, }, + { 1, 0, 0, 1, 8, 32, }, + { 0, 0, 0, 1, 9, 36, }, + { 2, 0, 0, 1, 9, 32, }, + { 1, 0, 0, 1, 9, 32, }, + { 0, 0, 0, 1, 10, 36, }, + { 2, 0, 0, 1, 10, 32, }, + { 1, 0, 0, 1, 10, 32, }, + { 0, 0, 0, 1, 11, 32, }, + { 2, 0, 0, 1, 11, 32, }, + { 1, 0, 0, 1, 11, 32, }, + { 0, 0, 0, 1, 12, 63, }, + { 2, 0, 0, 1, 12, 32, }, + { 1, 0, 0, 1, 12, 32, }, + { 0, 0, 0, 1, 13, 63, }, + { 2, 0, 0, 1, 13, 32, }, + { 1, 0, 0, 1, 13, 32, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 34, }, + { 2, 0, 0, 2, 1, 32, }, + { 1, 0, 0, 2, 1, 32, }, + { 0, 0, 0, 2, 2, 36, }, + { 2, 0, 0, 2, 2, 32, }, + { 1, 0, 0, 2, 2, 32, }, + { 0, 0, 0, 2, 3, 36, }, + { 2, 0, 0, 2, 3, 32, }, + { 1, 0, 0, 2, 3, 32, }, + { 0, 0, 0, 2, 4, 36, }, + { 2, 0, 0, 2, 4, 32, }, + { 1, 0, 0, 2, 4, 32, }, + { 0, 0, 0, 2, 5, 36, }, + { 2, 0, 0, 2, 5, 32, }, + { 1, 0, 0, 2, 5, 32, }, + { 0, 0, 0, 2, 6, 36, }, + { 2, 0, 0, 2, 6, 32, }, + { 1, 0, 0, 2, 6, 32, }, + { 0, 0, 0, 2, 7, 36, }, + { 2, 0, 0, 2, 7, 32, }, + { 1, 0, 0, 2, 7, 32, }, + { 0, 0, 0, 2, 8, 36, }, + { 2, 0, 0, 2, 8, 32, }, + { 1, 0, 0, 2, 8, 32, }, + { 0, 0, 0, 2, 9, 36, }, + { 2, 0, 0, 2, 9, 32, }, + { 1, 0, 0, 2, 9, 32, }, + { 0, 0, 0, 2, 10, 36, }, + { 2, 0, 0, 2, 10, 32, }, + { 1, 0, 0, 2, 10, 32, }, + { 0, 0, 0, 2, 11, 32, }, + { 2, 0, 0, 2, 11, 32, }, + { 1, 0, 0, 2, 11, 32, }, + { 0, 0, 0, 2, 12, 63, }, + { 2, 0, 0, 2, 12, 32, }, + { 1, 0, 0, 2, 12, 32, }, + { 0, 0, 0, 2, 13, 63, }, + { 2, 0, 0, 2, 13, 32, }, + { 1, 0, 0, 2, 13, 32, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 32, }, + { 2, 0, 0, 3, 1, 30, }, + { 1, 0, 0, 3, 1, 30, }, + { 0, 0, 0, 3, 2, 34, }, + { 2, 0, 0, 3, 2, 30, }, + { 1, 0, 0, 3, 2, 30, }, + { 0, 0, 0, 3, 3, 34, }, + { 2, 0, 0, 3, 3, 30, }, + { 1, 0, 0, 3, 3, 30, }, + { 0, 0, 0, 3, 4, 34, }, + { 2, 0, 0, 3, 4, 30, }, + { 1, 0, 0, 3, 4, 30, }, + { 0, 0, 0, 3, 5, 34, }, + { 2, 0, 0, 3, 5, 30, }, + { 1, 0, 0, 3, 5, 30, }, + { 0, 0, 0, 3, 6, 34, }, + { 2, 0, 0, 3, 6, 30, }, + { 1, 0, 0, 3, 6, 30, }, + { 0, 0, 0, 3, 7, 34, }, + { 2, 0, 0, 3, 7, 30, }, + { 1, 0, 0, 3, 7, 30, }, + { 0, 0, 0, 3, 8, 34, }, + { 2, 0, 0, 3, 8, 30, }, + { 1, 0, 0, 3, 8, 30, }, + { 0, 0, 0, 3, 9, 34, }, + { 2, 0, 0, 3, 9, 30, }, + { 1, 0, 0, 3, 9, 30, }, + { 0, 0, 0, 3, 10, 34, }, + { 2, 0, 0, 3, 10, 30, }, + { 1, 0, 0, 3, 10, 30, }, + { 0, 0, 0, 3, 11, 30, }, + { 2, 0, 0, 3, 11, 30, }, + { 1, 0, 0, 3, 11, 30, }, + { 0, 0, 0, 3, 12, 63, }, + { 2, 0, 0, 3, 12, 30, }, + { 1, 0, 0, 3, 12, 30, }, + { 0, 0, 0, 3, 13, 63, }, + { 2, 0, 0, 3, 13, 30, }, + { 1, 0, 0, 3, 13, 30, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 0, 6, 1, 30, }, + { 2, 0, 0, 6, 1, 28, }, + { 1, 0, 0, 6, 1, 28, }, + { 0, 0, 0, 6, 2, 32, }, + { 2, 0, 0, 6, 2, 28, }, + { 1, 0, 0, 6, 2, 28, }, + { 0, 0, 0, 6, 3, 32, }, + { 2, 0, 0, 6, 3, 28, }, + { 1, 0, 0, 6, 3, 28, }, + { 0, 0, 0, 6, 4, 32, }, + { 2, 0, 0, 6, 4, 28, }, + { 1, 0, 0, 6, 4, 28, }, + { 0, 0, 0, 6, 5, 32, }, + { 2, 0, 0, 6, 5, 28, }, + { 1, 0, 0, 6, 5, 28, }, + { 0, 0, 0, 6, 6, 32, }, + { 2, 0, 0, 6, 6, 28, }, + { 1, 0, 0, 6, 6, 28, }, + { 0, 0, 0, 6, 7, 32, }, + { 2, 0, 0, 6, 7, 28, }, + { 1, 0, 0, 6, 7, 28, }, + { 0, 0, 0, 6, 8, 32, }, + { 2, 0, 0, 6, 8, 28, }, + { 1, 0, 0, 6, 8, 28, }, + { 0, 0, 0, 6, 9, 32, }, + { 2, 0, 0, 6, 9, 28, }, + { 1, 0, 0, 6, 9, 28, }, + { 0, 0, 0, 6, 10, 32, }, + { 2, 0, 0, 6, 10, 28, }, + { 1, 0, 0, 6, 10, 28, }, + { 0, 0, 0, 6, 11, 28, }, + { 2, 0, 0, 6, 11, 28, }, + { 1, 0, 0, 6, 11, 28, }, + { 0, 0, 0, 6, 12, 63, }, + { 2, 0, 0, 6, 12, 28, }, + { 1, 0, 0, 6, 12, 28, }, + { 0, 0, 0, 6, 13, 63, }, + { 2, 0, 0, 6, 13, 28, }, + { 1, 0, 0, 6, 13, 28, }, + { 0, 0, 0, 6, 14, 63, }, + { 2, 0, 0, 6, 14, 63, }, + { 1, 0, 0, 6, 14, 63, }, + { 0, 0, 0, 7, 1, 28, }, + { 2, 0, 0, 7, 1, 26, }, + { 1, 0, 0, 7, 1, 26, }, + { 0, 0, 0, 7, 2, 30, }, + { 2, 0, 0, 7, 2, 26, }, + { 1, 0, 0, 7, 2, 26, }, + { 0, 0, 0, 7, 3, 30, }, + { 2, 0, 0, 7, 3, 26, }, + { 1, 0, 0, 7, 3, 26, }, + { 0, 0, 0, 7, 4, 30, }, + { 2, 0, 0, 7, 4, 26, }, + { 1, 0, 0, 7, 4, 26, }, + { 0, 0, 0, 7, 5, 30, }, + { 2, 0, 0, 7, 5, 26, }, + { 1, 0, 0, 7, 5, 26, }, + { 0, 0, 0, 7, 6, 30, }, + { 2, 0, 0, 7, 6, 26, }, + { 1, 0, 0, 7, 6, 26, }, + { 0, 0, 0, 7, 7, 30, }, + { 2, 0, 0, 7, 7, 26, }, + { 1, 0, 0, 7, 7, 26, }, + { 0, 0, 0, 7, 8, 30, }, + { 2, 0, 0, 7, 8, 26, }, + { 1, 0, 0, 7, 8, 26, }, + { 0, 0, 0, 7, 9, 30, }, + { 2, 0, 0, 7, 9, 26, }, + { 1, 0, 0, 7, 9, 26, }, + { 0, 0, 0, 7, 10, 30, }, + { 2, 0, 0, 7, 10, 26, }, + { 1, 0, 0, 7, 10, 26, }, + { 0, 0, 0, 7, 11, 26, }, + { 2, 0, 0, 7, 11, 26, }, + { 1, 0, 0, 7, 11, 26, }, + { 0, 0, 0, 7, 12, 63, }, + { 2, 0, 0, 7, 12, 26, }, + { 1, 0, 0, 7, 12, 26, }, + { 0, 0, 0, 7, 13, 63, }, + { 2, 0, 0, 7, 13, 26, }, + { 1, 0, 0, 7, 13, 26, }, + { 0, 0, 0, 7, 14, 63, }, + { 2, 0, 0, 7, 14, 63, }, + { 1, 0, 0, 7, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 32, }, + { 2, 0, 1, 2, 3, 32, }, + { 1, 0, 1, 2, 3, 32, }, + { 0, 0, 1, 2, 4, 36, }, + { 2, 0, 1, 2, 4, 32, }, + { 1, 0, 1, 2, 4, 32, }, + { 0, 0, 1, 2, 5, 36, }, + { 2, 0, 1, 2, 5, 32, }, + { 1, 0, 1, 2, 5, 32, }, + { 0, 0, 1, 2, 6, 36, }, + { 2, 0, 1, 2, 6, 32, }, + { 1, 0, 1, 2, 6, 32, }, + { 0, 0, 1, 2, 7, 36, }, + { 2, 0, 1, 2, 7, 32, }, + { 1, 0, 1, 2, 7, 32, }, + { 0, 0, 1, 2, 8, 36, }, + { 2, 0, 1, 2, 8, 32, }, + { 1, 0, 1, 2, 8, 32, }, + { 0, 0, 1, 2, 9, 36, }, + { 2, 0, 1, 2, 9, 32, }, + { 1, 0, 1, 2, 9, 32, }, + { 0, 0, 1, 2, 10, 36, }, + { 2, 0, 1, 2, 10, 32, }, + { 1, 0, 1, 2, 10, 32, }, + { 0, 0, 1, 2, 11, 32, }, + { 2, 0, 1, 2, 11, 32, }, + { 1, 0, 1, 2, 11, 32, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 32, }, + { 1, 0, 1, 2, 12, 32, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 32, }, + { 1, 0, 1, 2, 13, 32, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 30, }, + { 2, 0, 1, 3, 3, 30, }, + { 1, 0, 1, 3, 3, 30, }, + { 0, 0, 1, 3, 4, 34, }, + { 2, 0, 1, 3, 4, 30, }, + { 1, 0, 1, 3, 4, 30, }, + { 0, 0, 1, 3, 5, 34, }, + { 2, 0, 1, 3, 5, 30, }, + { 1, 0, 1, 3, 5, 30, }, + { 0, 0, 1, 3, 6, 34, }, + { 2, 0, 1, 3, 6, 30, }, + { 1, 0, 1, 3, 6, 30, }, + { 0, 0, 1, 3, 7, 34, }, + { 2, 0, 1, 3, 7, 30, }, + { 1, 0, 1, 3, 7, 30, }, + { 0, 0, 1, 3, 8, 34, }, + { 2, 0, 1, 3, 8, 30, }, + { 1, 0, 1, 3, 8, 30, }, + { 0, 0, 1, 3, 9, 34, }, + { 2, 0, 1, 3, 9, 30, }, + { 1, 0, 1, 3, 9, 30, }, + { 0, 0, 1, 3, 10, 34, }, + { 2, 0, 1, 3, 10, 30, }, + { 1, 0, 1, 3, 10, 30, }, + { 0, 0, 1, 3, 11, 30, }, + { 2, 0, 1, 3, 11, 30, }, + { 1, 0, 1, 3, 11, 30, }, + { 0, 0, 1, 3, 12, 63, }, + { 2, 0, 1, 3, 12, 30, }, + { 1, 0, 1, 3, 12, 30, }, + { 0, 0, 1, 3, 13, 63, }, + { 2, 0, 1, 3, 13, 30, }, + { 1, 0, 1, 3, 13, 30, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 0, 1, 6, 1, 63, }, + { 2, 0, 1, 6, 1, 63, }, + { 1, 0, 1, 6, 1, 63, }, + { 0, 0, 1, 6, 2, 63, }, + { 2, 0, 1, 6, 2, 63, }, + { 1, 0, 1, 6, 2, 63, }, + { 0, 0, 1, 6, 3, 28, }, + { 2, 0, 1, 6, 3, 28, }, + { 1, 0, 1, 6, 3, 28, }, + { 0, 0, 1, 6, 4, 32, }, + { 2, 0, 1, 6, 4, 28, }, + { 1, 0, 1, 6, 4, 28, }, + { 0, 0, 1, 6, 5, 32, }, + { 2, 0, 1, 6, 5, 28, }, + { 1, 0, 1, 6, 5, 28, }, + { 0, 0, 1, 6, 6, 32, }, + { 2, 0, 1, 6, 6, 28, }, + { 1, 0, 1, 6, 6, 28, }, + { 0, 0, 1, 6, 7, 32, }, + { 2, 0, 1, 6, 7, 28, }, + { 1, 0, 1, 6, 7, 28, }, + { 0, 0, 1, 6, 8, 32, }, + { 2, 0, 1, 6, 8, 28, }, + { 1, 0, 1, 6, 8, 28, }, + { 0, 0, 1, 6, 9, 32, }, + { 2, 0, 1, 6, 9, 28, }, + { 1, 0, 1, 6, 9, 28, }, + { 0, 0, 1, 6, 10, 32, }, + { 2, 0, 1, 6, 10, 28, }, + { 1, 0, 1, 6, 10, 28, }, + { 0, 0, 1, 6, 11, 28, }, + { 2, 0, 1, 6, 11, 28, }, + { 1, 0, 1, 6, 11, 28, }, + { 0, 0, 1, 6, 12, 63, }, + { 2, 0, 1, 6, 12, 28, }, + { 1, 0, 1, 6, 12, 28, }, + { 0, 0, 1, 6, 13, 63, }, + { 2, 0, 1, 6, 13, 28, }, + { 1, 0, 1, 6, 13, 28, }, + { 0, 0, 1, 6, 14, 63, }, + { 2, 0, 1, 6, 14, 63, }, + { 1, 0, 1, 6, 14, 63, }, + { 0, 0, 1, 7, 1, 63, }, + { 2, 0, 1, 7, 1, 63, }, + { 1, 0, 1, 7, 1, 63, }, + { 0, 0, 1, 7, 2, 63, }, + { 2, 0, 1, 7, 2, 63, }, + { 1, 0, 1, 7, 2, 63, }, + { 0, 0, 1, 7, 3, 26, }, + { 2, 0, 1, 7, 3, 26, }, + { 1, 0, 1, 7, 3, 26, }, + { 0, 0, 1, 7, 4, 30, }, + { 2, 0, 1, 7, 4, 26, }, + { 1, 0, 1, 7, 4, 26, }, + { 0, 0, 1, 7, 5, 30, }, + { 2, 0, 1, 7, 5, 26, }, + { 1, 0, 1, 7, 5, 26, }, + { 0, 0, 1, 7, 6, 30, }, + { 2, 0, 1, 7, 6, 26, }, + { 1, 0, 1, 7, 6, 26, }, + { 0, 0, 1, 7, 7, 30, }, + { 2, 0, 1, 7, 7, 26, }, + { 1, 0, 1, 7, 7, 26, }, + { 0, 0, 1, 7, 8, 30, }, + { 2, 0, 1, 7, 8, 26, }, + { 1, 0, 1, 7, 8, 26, }, + { 0, 0, 1, 7, 9, 30, }, + { 2, 0, 1, 7, 9, 26, }, + { 1, 0, 1, 7, 9, 26, }, + { 0, 0, 1, 7, 10, 30, }, + { 2, 0, 1, 7, 10, 26, }, + { 1, 0, 1, 7, 10, 26, }, + { 0, 0, 1, 7, 11, 26, }, + { 2, 0, 1, 7, 11, 26, }, + { 1, 0, 1, 7, 11, 26, }, + { 0, 0, 1, 7, 12, 63, }, + { 2, 0, 1, 7, 12, 26, }, + { 1, 0, 1, 7, 12, 26, }, + { 0, 0, 1, 7, 13, 63, }, + { 2, 0, 1, 7, 13, 26, }, + { 1, 0, 1, 7, 13, 26, }, + { 0, 0, 1, 7, 14, 63, }, + { 2, 0, 1, 7, 14, 63, }, + { 1, 0, 1, 7, 14, 63, }, + { 0, 1, 0, 1, 36, 30, }, + { 2, 1, 0, 1, 36, 32, }, + { 1, 1, 0, 1, 36, 32, }, + { 0, 1, 0, 1, 40, 30, }, + { 2, 1, 0, 1, 40, 32, }, + { 1, 1, 0, 1, 40, 32, }, + { 0, 1, 0, 1, 44, 30, }, + { 2, 1, 0, 1, 44, 32, }, + { 1, 1, 0, 1, 44, 32, }, + { 0, 1, 0, 1, 48, 30, }, + { 2, 1, 0, 1, 48, 32, }, + { 1, 1, 0, 1, 48, 32, }, + { 0, 1, 0, 1, 52, 36, }, + { 2, 1, 0, 1, 52, 32, }, + { 1, 1, 0, 1, 52, 32, }, + { 0, 1, 0, 1, 56, 34, }, + { 2, 1, 0, 1, 56, 32, }, + { 1, 1, 0, 1, 56, 32, }, + { 0, 1, 0, 1, 60, 32, }, + { 2, 1, 0, 1, 60, 32, }, + { 1, 1, 0, 1, 60, 32, }, + { 0, 1, 0, 1, 64, 28, }, + { 2, 1, 0, 1, 64, 32, }, + { 1, 1, 0, 1, 64, 32, }, + { 0, 1, 0, 1, 100, 30, }, + { 2, 1, 0, 1, 100, 32, }, + { 1, 1, 0, 1, 100, 32, }, + { 0, 1, 0, 1, 104, 30, }, + { 2, 1, 0, 1, 104, 32, }, + { 1, 1, 0, 1, 104, 32, }, + { 0, 1, 0, 1, 108, 32, }, + { 2, 1, 0, 1, 108, 32, }, + { 1, 1, 0, 1, 108, 32, }, + { 0, 1, 0, 1, 112, 34, }, + { 2, 1, 0, 1, 112, 32, }, + { 1, 1, 0, 1, 112, 32, }, + { 0, 1, 0, 1, 116, 34, }, + { 2, 1, 0, 1, 116, 32, }, + { 1, 1, 0, 1, 116, 32, }, + { 0, 1, 0, 1, 120, 36, }, + { 2, 1, 0, 1, 120, 32, }, + { 1, 1, 0, 1, 120, 32, }, + { 0, 1, 0, 1, 124, 34, }, + { 2, 1, 0, 1, 124, 32, }, + { 1, 1, 0, 1, 124, 32, }, + { 0, 1, 0, 1, 128, 32, }, + { 2, 1, 0, 1, 128, 32, }, + { 1, 1, 0, 1, 128, 32, }, + { 0, 1, 0, 1, 132, 30, }, + { 2, 1, 0, 1, 132, 32, }, + { 1, 1, 0, 1, 132, 32, }, + { 0, 1, 0, 1, 136, 30, }, + { 2, 1, 0, 1, 136, 32, }, + { 1, 1, 0, 1, 136, 32, }, + { 0, 1, 0, 1, 140, 28, }, + { 2, 1, 0, 1, 140, 32, }, + { 1, 1, 0, 1, 140, 32, }, + { 0, 1, 0, 1, 149, 36, }, + { 2, 1, 0, 1, 149, 32, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 36, }, + { 2, 1, 0, 1, 153, 32, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 36, }, + { 2, 1, 0, 1, 157, 32, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 36, }, + { 2, 1, 0, 1, 161, 32, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 36, }, + { 2, 1, 0, 1, 165, 32, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 30, }, + { 2, 1, 0, 2, 36, 32, }, + { 1, 1, 0, 2, 36, 32, }, + { 0, 1, 0, 2, 40, 30, }, + { 2, 1, 0, 2, 40, 32, }, + { 1, 1, 0, 2, 40, 32, }, + { 0, 1, 0, 2, 44, 30, }, + { 2, 1, 0, 2, 44, 32, }, + { 1, 1, 0, 2, 44, 32, }, + { 0, 1, 0, 2, 48, 30, }, + { 2, 1, 0, 2, 48, 32, }, + { 1, 1, 0, 2, 48, 32, }, + { 0, 1, 0, 2, 52, 36, }, + { 2, 1, 0, 2, 52, 32, }, + { 1, 1, 0, 2, 52, 32, }, + { 0, 1, 0, 2, 56, 34, }, + { 2, 1, 0, 2, 56, 32, }, + { 1, 1, 0, 2, 56, 32, }, + { 0, 1, 0, 2, 60, 32, }, + { 2, 1, 0, 2, 60, 32, }, + { 1, 1, 0, 2, 60, 32, }, + { 0, 1, 0, 2, 64, 28, }, + { 2, 1, 0, 2, 64, 32, }, + { 1, 1, 0, 2, 64, 32, }, + { 0, 1, 0, 2, 100, 30, }, + { 2, 1, 0, 2, 100, 32, }, + { 1, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 30, }, + { 2, 1, 0, 2, 104, 32, }, + { 1, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 32, }, + { 2, 1, 0, 2, 108, 32, }, + { 1, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 34, }, + { 2, 1, 0, 2, 112, 32, }, + { 1, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 34, }, + { 2, 1, 0, 2, 116, 32, }, + { 1, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 36, }, + { 2, 1, 0, 2, 120, 32, }, + { 1, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 34, }, + { 2, 1, 0, 2, 124, 32, }, + { 1, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 32, }, + { 2, 1, 0, 2, 128, 32, }, + { 1, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 30, }, + { 2, 1, 0, 2, 132, 32, }, + { 1, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 30, }, + { 2, 1, 0, 2, 136, 32, }, + { 1, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 28, }, + { 2, 1, 0, 2, 140, 32, }, + { 1, 1, 0, 2, 140, 32, }, + { 0, 1, 0, 2, 149, 36, }, + { 2, 1, 0, 2, 149, 32, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 36, }, + { 2, 1, 0, 2, 153, 32, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 36, }, + { 2, 1, 0, 2, 157, 32, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 36, }, + { 2, 1, 0, 2, 161, 32, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 36, }, + { 2, 1, 0, 2, 165, 32, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 28, }, + { 2, 1, 0, 3, 36, 30, }, + { 1, 1, 0, 3, 36, 30, }, + { 0, 1, 0, 3, 40, 28, }, + { 2, 1, 0, 3, 40, 30, }, + { 1, 1, 0, 3, 40, 30, }, + { 0, 1, 0, 3, 44, 28, }, + { 2, 1, 0, 3, 44, 30, }, + { 1, 1, 0, 3, 44, 30, }, + { 0, 1, 0, 3, 48, 28, }, + { 2, 1, 0, 3, 48, 30, }, + { 1, 1, 0, 3, 48, 30, }, + { 0, 1, 0, 3, 52, 34, }, + { 2, 1, 0, 3, 52, 30, }, + { 1, 1, 0, 3, 52, 30, }, + { 0, 1, 0, 3, 56, 32, }, + { 2, 1, 0, 3, 56, 30, }, + { 1, 1, 0, 3, 56, 30, }, + { 0, 1, 0, 3, 60, 30, }, + { 2, 1, 0, 3, 60, 30, }, + { 1, 1, 0, 3, 60, 30, }, + { 0, 1, 0, 3, 64, 26, }, + { 2, 1, 0, 3, 64, 30, }, + { 1, 1, 0, 3, 64, 30, }, + { 0, 1, 0, 3, 100, 28, }, + { 2, 1, 0, 3, 100, 30, }, + { 1, 1, 0, 3, 100, 30, }, + { 0, 1, 0, 3, 104, 28, }, + { 2, 1, 0, 3, 104, 30, }, + { 1, 1, 0, 3, 104, 30, }, + { 0, 1, 0, 3, 108, 30, }, + { 2, 1, 0, 3, 108, 30, }, + { 1, 1, 0, 3, 108, 30, }, + { 0, 1, 0, 3, 112, 32, }, + { 2, 1, 0, 3, 112, 30, }, + { 1, 1, 0, 3, 112, 30, }, + { 0, 1, 0, 3, 116, 32, }, + { 2, 1, 0, 3, 116, 30, }, + { 1, 1, 0, 3, 116, 30, }, + { 0, 1, 0, 3, 120, 34, }, + { 2, 1, 0, 3, 120, 30, }, + { 1, 1, 0, 3, 120, 30, }, + { 0, 1, 0, 3, 124, 32, }, + { 2, 1, 0, 3, 124, 30, }, + { 1, 1, 0, 3, 124, 30, }, + { 0, 1, 0, 3, 128, 30, }, + { 2, 1, 0, 3, 128, 30, }, + { 1, 1, 0, 3, 128, 30, }, + { 0, 1, 0, 3, 132, 28, }, + { 2, 1, 0, 3, 132, 30, }, + { 1, 1, 0, 3, 132, 30, }, + { 0, 1, 0, 3, 136, 28, }, + { 2, 1, 0, 3, 136, 30, }, + { 1, 1, 0, 3, 136, 30, }, + { 0, 1, 0, 3, 140, 26, }, + { 2, 1, 0, 3, 140, 30, }, + { 1, 1, 0, 3, 140, 30, }, + { 0, 1, 0, 3, 149, 34, }, + { 2, 1, 0, 3, 149, 30, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 34, }, + { 2, 1, 0, 3, 153, 30, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 34, }, + { 2, 1, 0, 3, 157, 30, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 34, }, + { 2, 1, 0, 3, 161, 30, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 34, }, + { 2, 1, 0, 3, 165, 30, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 0, 6, 36, 26, }, + { 2, 1, 0, 6, 36, 28, }, + { 1, 1, 0, 6, 36, 28, }, + { 0, 1, 0, 6, 40, 26, }, + { 2, 1, 0, 6, 40, 28, }, + { 1, 1, 0, 6, 40, 28, }, + { 0, 1, 0, 6, 44, 26, }, + { 2, 1, 0, 6, 44, 28, }, + { 1, 1, 0, 6, 44, 28, }, + { 0, 1, 0, 6, 48, 26, }, + { 2, 1, 0, 6, 48, 28, }, + { 1, 1, 0, 6, 48, 28, }, + { 0, 1, 0, 6, 52, 32, }, + { 2, 1, 0, 6, 52, 28, }, + { 1, 1, 0, 6, 52, 28, }, + { 0, 1, 0, 6, 56, 30, }, + { 2, 1, 0, 6, 56, 28, }, + { 1, 1, 0, 6, 56, 28, }, + { 0, 1, 0, 6, 60, 28, }, + { 2, 1, 0, 6, 60, 28, }, + { 1, 1, 0, 6, 60, 28, }, + { 0, 1, 0, 6, 64, 24, }, + { 2, 1, 0, 6, 64, 28, }, + { 1, 1, 0, 6, 64, 28, }, + { 0, 1, 0, 6, 100, 26, }, + { 2, 1, 0, 6, 100, 28, }, + { 1, 1, 0, 6, 100, 28, }, + { 0, 1, 0, 6, 104, 26, }, + { 2, 1, 0, 6, 104, 28, }, + { 1, 1, 0, 6, 104, 28, }, + { 0, 1, 0, 6, 108, 28, }, + { 2, 1, 0, 6, 108, 28, }, + { 1, 1, 0, 6, 108, 28, }, + { 0, 1, 0, 6, 112, 30, }, + { 2, 1, 0, 6, 112, 28, }, + { 1, 1, 0, 6, 112, 28, }, + { 0, 1, 0, 6, 116, 30, }, + { 2, 1, 0, 6, 116, 28, }, + { 1, 1, 0, 6, 116, 28, }, + { 0, 1, 0, 6, 120, 32, }, + { 2, 1, 0, 6, 120, 28, }, + { 1, 1, 0, 6, 120, 28, }, + { 0, 1, 0, 6, 124, 30, }, + { 2, 1, 0, 6, 124, 28, }, + { 1, 1, 0, 6, 124, 28, }, + { 0, 1, 0, 6, 128, 28, }, + { 2, 1, 0, 6, 128, 28, }, + { 1, 1, 0, 6, 128, 28, }, + { 0, 1, 0, 6, 132, 26, }, + { 2, 1, 0, 6, 132, 28, }, + { 1, 1, 0, 6, 132, 28, }, + { 0, 1, 0, 6, 136, 26, }, + { 2, 1, 0, 6, 136, 28, }, + { 1, 1, 0, 6, 136, 28, }, + { 0, 1, 0, 6, 140, 24, }, + { 2, 1, 0, 6, 140, 28, }, + { 1, 1, 0, 6, 140, 28, }, + { 0, 1, 0, 6, 149, 32, }, + { 2, 1, 0, 6, 149, 28, }, + { 1, 1, 0, 6, 149, 63, }, + { 0, 1, 0, 6, 153, 32, }, + { 2, 1, 0, 6, 153, 28, }, + { 1, 1, 0, 6, 153, 63, }, + { 0, 1, 0, 6, 157, 32, }, + { 2, 1, 0, 6, 157, 28, }, + { 1, 1, 0, 6, 157, 63, }, + { 0, 1, 0, 6, 161, 32, }, + { 2, 1, 0, 6, 161, 28, }, + { 1, 1, 0, 6, 161, 63, }, + { 0, 1, 0, 6, 165, 32, }, + { 2, 1, 0, 6, 165, 28, }, + { 1, 1, 0, 6, 165, 63, }, + { 0, 1, 0, 7, 36, 24, }, + { 2, 1, 0, 7, 36, 26, }, + { 1, 1, 0, 7, 36, 26, }, + { 0, 1, 0, 7, 40, 24, }, + { 2, 1, 0, 7, 40, 26, }, + { 1, 1, 0, 7, 40, 26, }, + { 0, 1, 0, 7, 44, 24, }, + { 2, 1, 0, 7, 44, 26, }, + { 1, 1, 0, 7, 44, 26, }, + { 0, 1, 0, 7, 48, 24, }, + { 2, 1, 0, 7, 48, 26, }, + { 1, 1, 0, 7, 48, 26, }, + { 0, 1, 0, 7, 52, 30, }, + { 2, 1, 0, 7, 52, 26, }, + { 1, 1, 0, 7, 52, 26, }, + { 0, 1, 0, 7, 56, 28, }, + { 2, 1, 0, 7, 56, 26, }, + { 1, 1, 0, 7, 56, 26, }, + { 0, 1, 0, 7, 60, 26, }, + { 2, 1, 0, 7, 60, 26, }, + { 1, 1, 0, 7, 60, 26, }, + { 0, 1, 0, 7, 64, 22, }, + { 2, 1, 0, 7, 64, 26, }, + { 1, 1, 0, 7, 64, 26, }, + { 0, 1, 0, 7, 100, 24, }, + { 2, 1, 0, 7, 100, 26, }, + { 1, 1, 0, 7, 100, 26, }, + { 0, 1, 0, 7, 104, 24, }, + { 2, 1, 0, 7, 104, 26, }, + { 1, 1, 0, 7, 104, 26, }, + { 0, 1, 0, 7, 108, 26, }, + { 2, 1, 0, 7, 108, 26, }, + { 1, 1, 0, 7, 108, 26, }, + { 0, 1, 0, 7, 112, 28, }, + { 2, 1, 0, 7, 112, 26, }, + { 1, 1, 0, 7, 112, 26, }, + { 0, 1, 0, 7, 116, 28, }, + { 2, 1, 0, 7, 116, 26, }, + { 1, 1, 0, 7, 116, 26, }, + { 0, 1, 0, 7, 120, 30, }, + { 2, 1, 0, 7, 120, 26, }, + { 1, 1, 0, 7, 120, 26, }, + { 0, 1, 0, 7, 124, 28, }, + { 2, 1, 0, 7, 124, 26, }, + { 1, 1, 0, 7, 124, 26, }, + { 0, 1, 0, 7, 128, 26, }, + { 2, 1, 0, 7, 128, 26, }, + { 1, 1, 0, 7, 128, 26, }, + { 0, 1, 0, 7, 132, 24, }, + { 2, 1, 0, 7, 132, 26, }, + { 1, 1, 0, 7, 132, 26, }, + { 0, 1, 0, 7, 136, 24, }, + { 2, 1, 0, 7, 136, 26, }, + { 1, 1, 0, 7, 136, 26, }, + { 0, 1, 0, 7, 140, 22, }, + { 2, 1, 0, 7, 140, 26, }, + { 1, 1, 0, 7, 140, 26, }, + { 0, 1, 0, 7, 149, 30, }, + { 2, 1, 0, 7, 149, 26, }, + { 1, 1, 0, 7, 149, 63, }, + { 0, 1, 0, 7, 153, 30, }, + { 2, 1, 0, 7, 153, 26, }, + { 1, 1, 0, 7, 153, 63, }, + { 0, 1, 0, 7, 157, 30, }, + { 2, 1, 0, 7, 157, 26, }, + { 1, 1, 0, 7, 157, 63, }, + { 0, 1, 0, 7, 161, 30, }, + { 2, 1, 0, 7, 161, 26, }, + { 1, 1, 0, 7, 161, 63, }, + { 0, 1, 0, 7, 165, 30, }, + { 2, 1, 0, 7, 165, 26, }, + { 1, 1, 0, 7, 165, 63, }, + { 0, 1, 1, 2, 38, 30, }, + { 2, 1, 1, 2, 38, 32, }, + { 1, 1, 1, 2, 38, 32, }, + { 0, 1, 1, 2, 46, 30, }, + { 2, 1, 1, 2, 46, 32, }, + { 1, 1, 1, 2, 46, 32, }, + { 0, 1, 1, 2, 54, 32, }, + { 2, 1, 1, 2, 54, 32, }, + { 1, 1, 1, 2, 54, 32, }, + { 0, 1, 1, 2, 62, 32, }, + { 2, 1, 1, 2, 62, 32, }, + { 1, 1, 1, 2, 62, 32, }, + { 0, 1, 1, 2, 102, 28, }, + { 2, 1, 1, 2, 102, 32, }, + { 1, 1, 1, 2, 102, 32, }, + { 0, 1, 1, 2, 110, 32, }, + { 2, 1, 1, 2, 110, 32, }, + { 1, 1, 1, 2, 110, 32, }, + { 0, 1, 1, 2, 118, 36, }, + { 2, 1, 1, 2, 118, 32, }, + { 1, 1, 1, 2, 118, 32, }, + { 0, 1, 1, 2, 126, 34, }, + { 2, 1, 1, 2, 126, 32, }, + { 1, 1, 1, 2, 126, 32, }, + { 0, 1, 1, 2, 134, 32, }, + { 2, 1, 1, 2, 134, 32, }, + { 1, 1, 1, 2, 134, 32, }, + { 0, 1, 1, 2, 151, 36, }, + { 2, 1, 1, 2, 151, 32, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 36, }, + { 2, 1, 1, 2, 159, 32, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 28, }, + { 2, 1, 1, 3, 38, 30, }, + { 1, 1, 1, 3, 38, 30, }, + { 0, 1, 1, 3, 46, 28, }, + { 2, 1, 1, 3, 46, 30, }, + { 1, 1, 1, 3, 46, 30, }, + { 0, 1, 1, 3, 54, 30, }, + { 2, 1, 1, 3, 54, 30, }, + { 1, 1, 1, 3, 54, 30, }, + { 0, 1, 1, 3, 62, 30, }, + { 2, 1, 1, 3, 62, 30, }, + { 1, 1, 1, 3, 62, 30, }, + { 0, 1, 1, 3, 102, 26, }, + { 2, 1, 1, 3, 102, 30, }, + { 1, 1, 1, 3, 102, 30, }, + { 0, 1, 1, 3, 110, 30, }, + { 2, 1, 1, 3, 110, 30, }, + { 1, 1, 1, 3, 110, 30, }, + { 0, 1, 1, 3, 118, 34, }, + { 2, 1, 1, 3, 118, 30, }, + { 1, 1, 1, 3, 118, 30, }, + { 0, 1, 1, 3, 126, 32, }, + { 2, 1, 1, 3, 126, 30, }, + { 1, 1, 1, 3, 126, 30, }, + { 0, 1, 1, 3, 134, 30, }, + { 2, 1, 1, 3, 134, 30, }, + { 1, 1, 1, 3, 134, 30, }, + { 0, 1, 1, 3, 151, 34, }, + { 2, 1, 1, 3, 151, 30, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 34, }, + { 2, 1, 1, 3, 159, 30, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 1, 6, 38, 26, }, + { 2, 1, 1, 6, 38, 28, }, + { 1, 1, 1, 6, 38, 28, }, + { 0, 1, 1, 6, 46, 26, }, + { 2, 1, 1, 6, 46, 28, }, + { 1, 1, 1, 6, 46, 28, }, + { 0, 1, 1, 6, 54, 28, }, + { 2, 1, 1, 6, 54, 28, }, + { 1, 1, 1, 6, 54, 28, }, + { 0, 1, 1, 6, 62, 28, }, + { 2, 1, 1, 6, 62, 28, }, + { 1, 1, 1, 6, 62, 28, }, + { 0, 1, 1, 6, 102, 24, }, + { 2, 1, 1, 6, 102, 28, }, + { 1, 1, 1, 6, 102, 28, }, + { 0, 1, 1, 6, 110, 28, }, + { 2, 1, 1, 6, 110, 28, }, + { 1, 1, 1, 6, 110, 28, }, + { 0, 1, 1, 6, 118, 32, }, + { 2, 1, 1, 6, 118, 28, }, + { 1, 1, 1, 6, 118, 28, }, + { 0, 1, 1, 6, 126, 30, }, + { 2, 1, 1, 6, 126, 28, }, + { 1, 1, 1, 6, 126, 28, }, + { 0, 1, 1, 6, 134, 28, }, + { 2, 1, 1, 6, 134, 28, }, + { 1, 1, 1, 6, 134, 28, }, + { 0, 1, 1, 6, 151, 32, }, + { 2, 1, 1, 6, 151, 28, }, + { 1, 1, 1, 6, 151, 63, }, + { 0, 1, 1, 6, 159, 32, }, + { 2, 1, 1, 6, 159, 28, }, + { 1, 1, 1, 6, 159, 63, }, + { 0, 1, 1, 7, 38, 24, }, + { 2, 1, 1, 7, 38, 26, }, + { 1, 1, 1, 7, 38, 26, }, + { 0, 1, 1, 7, 46, 24, }, + { 2, 1, 1, 7, 46, 26, }, + { 1, 1, 1, 7, 46, 26, }, + { 0, 1, 1, 7, 54, 26, }, + { 2, 1, 1, 7, 54, 26, }, + { 1, 1, 1, 7, 54, 26, }, + { 0, 1, 1, 7, 62, 26, }, + { 2, 1, 1, 7, 62, 26, }, + { 1, 1, 1, 7, 62, 26, }, + { 0, 1, 1, 7, 102, 22, }, + { 2, 1, 1, 7, 102, 26, }, + { 1, 1, 1, 7, 102, 26, }, + { 0, 1, 1, 7, 110, 26, }, + { 2, 1, 1, 7, 110, 26, }, + { 1, 1, 1, 7, 110, 26, }, + { 0, 1, 1, 7, 118, 30, }, + { 2, 1, 1, 7, 118, 26, }, + { 1, 1, 1, 7, 118, 26, }, + { 0, 1, 1, 7, 126, 28, }, + { 2, 1, 1, 7, 126, 26, }, + { 1, 1, 1, 7, 126, 26, }, + { 0, 1, 1, 7, 134, 26, }, + { 2, 1, 1, 7, 134, 26, }, + { 1, 1, 1, 7, 134, 26, }, + { 0, 1, 1, 7, 151, 30, }, + { 2, 1, 1, 7, 151, 26, }, + { 1, 1, 1, 7, 151, 63, }, + { 0, 1, 1, 7, 159, 30, }, + { 2, 1, 1, 7, 159, 26, }, + { 1, 1, 1, 7, 159, 63, }, + { 0, 1, 2, 4, 42, 30, }, + { 2, 1, 2, 4, 42, 32, }, + { 1, 1, 2, 4, 42, 32, }, + { 0, 1, 2, 4, 58, 28, }, + { 2, 1, 2, 4, 58, 32, }, + { 1, 1, 2, 4, 58, 32, }, + { 0, 1, 2, 4, 106, 30, }, + { 2, 1, 2, 4, 106, 32, }, + { 1, 1, 2, 4, 106, 32, }, + { 0, 1, 2, 4, 122, 34, }, + { 2, 1, 2, 4, 122, 32, }, + { 1, 1, 2, 4, 122, 32, }, + { 0, 1, 2, 4, 155, 36, }, + { 2, 1, 2, 4, 155, 32, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 28, }, + { 2, 1, 2, 5, 42, 30, }, + { 1, 1, 2, 5, 42, 30, }, + { 0, 1, 2, 5, 58, 26, }, + { 2, 1, 2, 5, 58, 30, }, + { 1, 1, 2, 5, 58, 30, }, + { 0, 1, 2, 5, 106, 28, }, + { 2, 1, 2, 5, 106, 30, }, + { 1, 1, 2, 5, 106, 30, }, + { 0, 1, 2, 5, 122, 32, }, + { 2, 1, 2, 5, 122, 30, }, + { 1, 1, 2, 5, 122, 30, }, + { 0, 1, 2, 5, 155, 34, }, + { 2, 1, 2, 5, 155, 30, }, + { 1, 1, 2, 5, 155, 63, }, + { 0, 1, 2, 8, 42, 26, }, + { 2, 1, 2, 8, 42, 28, }, + { 1, 1, 2, 8, 42, 28, }, + { 0, 1, 2, 8, 58, 24, }, + { 2, 1, 2, 8, 58, 28, }, + { 1, 1, 2, 8, 58, 28, }, + { 0, 1, 2, 8, 106, 26, }, + { 2, 1, 2, 8, 106, 28, }, + { 1, 1, 2, 8, 106, 28, }, + { 0, 1, 2, 8, 122, 30, }, + { 2, 1, 2, 8, 122, 28, }, + { 1, 1, 2, 8, 122, 28, }, + { 0, 1, 2, 8, 155, 32, }, + { 2, 1, 2, 8, 155, 28, }, + { 1, 1, 2, 8, 155, 63, }, + { 0, 1, 2, 9, 42, 24, }, + { 2, 1, 2, 9, 42, 26, }, + { 1, 1, 2, 9, 42, 26, }, + { 0, 1, 2, 9, 58, 22, }, + { 2, 1, 2, 9, 58, 26, }, + { 1, 1, 2, 9, 58, 26, }, + { 0, 1, 2, 9, 106, 24, }, + { 2, 1, 2, 9, 106, 26, }, + { 1, 1, 2, 9, 106, 26, }, + { 0, 1, 2, 9, 122, 28, }, + { 2, 1, 2, 9, 122, 26, }, + { 1, 1, 2, 9, 122, 26, }, + { 0, 1, 2, 9, 155, 30, }, + { 2, 1, 2, 9, 155, 26, }, + { 1, 1, 2, 9, 155, 63, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type0[] = { + { 0, 0, 0, 0, 1, 32, }, + { 2, 0, 0, 0, 1, 32, }, + { 1, 0, 0, 0, 1, 32, }, + { 0, 0, 0, 0, 2, 32, }, + { 2, 0, 0, 0, 2, 32, }, + { 1, 0, 0, 0, 2, 32, }, + { 0, 0, 0, 0, 3, 32, }, + { 2, 0, 0, 0, 3, 32, }, + { 1, 0, 0, 0, 3, 32, }, + { 0, 0, 0, 0, 4, 32, }, + { 2, 0, 0, 0, 4, 32, }, + { 1, 0, 0, 0, 4, 32, }, + { 0, 0, 0, 0, 5, 32, }, + { 2, 0, 0, 0, 5, 32, }, + { 1, 0, 0, 0, 5, 32, }, + { 0, 0, 0, 0, 6, 32, }, + { 2, 0, 0, 0, 6, 32, }, + { 1, 0, 0, 0, 6, 32, }, + { 0, 0, 0, 0, 7, 32, }, + { 2, 0, 0, 0, 7, 32, }, + { 1, 0, 0, 0, 7, 32, }, + { 0, 0, 0, 0, 8, 32, }, + { 2, 0, 0, 0, 8, 32, }, + { 1, 0, 0, 0, 8, 32, }, + { 0, 0, 0, 0, 9, 32, }, + { 2, 0, 0, 0, 9, 32, }, + { 1, 0, 0, 0, 9, 32, }, + { 0, 0, 0, 0, 10, 32, }, + { 2, 0, 0, 0, 10, 32, }, + { 1, 0, 0, 0, 10, 32, }, + { 0, 0, 0, 0, 11, 32, }, + { 2, 0, 0, 0, 11, 32, }, + { 1, 0, 0, 0, 11, 32, }, + { 0, 0, 0, 0, 12, 24, }, + { 2, 0, 0, 0, 12, 32, }, + { 1, 0, 0, 0, 12, 32, }, + { 0, 0, 0, 0, 13, 16, }, + { 2, 0, 0, 0, 13, 32, }, + { 1, 0, 0, 0, 13, 32, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 32, }, + { 0, 0, 0, 1, 1, 28, }, + { 2, 0, 0, 1, 1, 32, }, + { 1, 0, 0, 1, 1, 32, }, + { 0, 0, 0, 1, 2, 32, }, + { 2, 0, 0, 1, 2, 32, }, + { 1, 0, 0, 1, 2, 32, }, + { 0, 0, 0, 1, 3, 32, }, + { 2, 0, 0, 1, 3, 32, }, + { 1, 0, 0, 1, 3, 32, }, + { 0, 0, 0, 1, 4, 32, }, + { 2, 0, 0, 1, 4, 32, }, + { 1, 0, 0, 1, 4, 32, }, + { 0, 0, 0, 1, 5, 32, }, + { 2, 0, 0, 1, 5, 32, }, + { 1, 0, 0, 1, 5, 32, }, + { 0, 0, 0, 1, 6, 32, }, + { 2, 0, 0, 1, 6, 32, }, + { 1, 0, 0, 1, 6, 32, }, + { 0, 0, 0, 1, 7, 32, }, + { 2, 0, 0, 1, 7, 32, }, + { 1, 0, 0, 1, 7, 32, }, + { 0, 0, 0, 1, 8, 32, }, + { 2, 0, 0, 1, 8, 32, }, + { 1, 0, 0, 1, 8, 32, }, + { 0, 0, 0, 1, 9, 32, }, + { 2, 0, 0, 1, 9, 32, }, + { 1, 0, 0, 1, 9, 32, }, + { 0, 0, 0, 1, 10, 32, }, + { 2, 0, 0, 1, 10, 32, }, + { 1, 0, 0, 1, 10, 32, }, + { 0, 0, 0, 1, 11, 28, }, + { 2, 0, 0, 1, 11, 32, }, + { 1, 0, 0, 1, 11, 32, }, + { 0, 0, 0, 1, 12, 18, }, + { 2, 0, 0, 1, 12, 32, }, + { 1, 0, 0, 1, 12, 32, }, + { 0, 0, 0, 1, 13, 8, }, + { 2, 0, 0, 1, 13, 32, }, + { 1, 0, 0, 1, 13, 32, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 26, }, + { 2, 0, 0, 2, 1, 32, }, + { 1, 0, 0, 2, 1, 32, }, + { 0, 0, 0, 2, 2, 32, }, + { 2, 0, 0, 2, 2, 32, }, + { 1, 0, 0, 2, 2, 32, }, + { 0, 0, 0, 2, 3, 32, }, + { 2, 0, 0, 2, 3, 32, }, + { 1, 0, 0, 2, 3, 32, }, + { 0, 0, 0, 2, 4, 32, }, + { 2, 0, 0, 2, 4, 32, }, + { 1, 0, 0, 2, 4, 32, }, + { 0, 0, 0, 2, 5, 32, }, + { 2, 0, 0, 2, 5, 32, }, + { 1, 0, 0, 2, 5, 32, }, + { 0, 0, 0, 2, 6, 32, }, + { 2, 0, 0, 2, 6, 32, }, + { 1, 0, 0, 2, 6, 32, }, + { 0, 0, 0, 2, 7, 32, }, + { 2, 0, 0, 2, 7, 32, }, + { 1, 0, 0, 2, 7, 32, }, + { 0, 0, 0, 2, 8, 32, }, + { 2, 0, 0, 2, 8, 32, }, + { 1, 0, 0, 2, 8, 32, }, + { 0, 0, 0, 2, 9, 32, }, + { 2, 0, 0, 2, 9, 32, }, + { 1, 0, 0, 2, 9, 32, }, + { 0, 0, 0, 2, 10, 32, }, + { 2, 0, 0, 2, 10, 32, }, + { 1, 0, 0, 2, 10, 32, }, + { 0, 0, 0, 2, 11, 26, }, + { 2, 0, 0, 2, 11, 32, }, + { 1, 0, 0, 2, 11, 32, }, + { 0, 0, 0, 2, 12, 16, }, + { 2, 0, 0, 2, 12, 32, }, + { 1, 0, 0, 2, 12, 32, }, + { 0, 0, 0, 2, 13, 6, }, + { 2, 0, 0, 2, 13, 32, }, + { 1, 0, 0, 2, 13, 32, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 24, }, + { 2, 0, 0, 3, 1, 30, }, + { 1, 0, 0, 3, 1, 30, }, + { 0, 0, 0, 3, 2, 30, }, + { 2, 0, 0, 3, 2, 30, }, + { 1, 0, 0, 3, 2, 30, }, + { 0, 0, 0, 3, 3, 30, }, + { 2, 0, 0, 3, 3, 30, }, + { 1, 0, 0, 3, 3, 30, }, + { 0, 0, 0, 3, 4, 30, }, + { 2, 0, 0, 3, 4, 30, }, + { 1, 0, 0, 3, 4, 30, }, + { 0, 0, 0, 3, 5, 30, }, + { 2, 0, 0, 3, 5, 30, }, + { 1, 0, 0, 3, 5, 30, }, + { 0, 0, 0, 3, 6, 30, }, + { 2, 0, 0, 3, 6, 30, }, + { 1, 0, 0, 3, 6, 30, }, + { 0, 0, 0, 3, 7, 30, }, + { 2, 0, 0, 3, 7, 30, }, + { 1, 0, 0, 3, 7, 30, }, + { 0, 0, 0, 3, 8, 30, }, + { 2, 0, 0, 3, 8, 30, }, + { 1, 0, 0, 3, 8, 30, }, + { 0, 0, 0, 3, 9, 30, }, + { 2, 0, 0, 3, 9, 30, }, + { 1, 0, 0, 3, 9, 30, }, + { 0, 0, 0, 3, 10, 30, }, + { 2, 0, 0, 3, 10, 30, }, + { 1, 0, 0, 3, 10, 30, }, + { 0, 0, 0, 3, 11, 24, }, + { 2, 0, 0, 3, 11, 30, }, + { 1, 0, 0, 3, 11, 30, }, + { 0, 0, 0, 3, 12, 14, }, + { 2, 0, 0, 3, 12, 30, }, + { 1, 0, 0, 3, 12, 30, }, + { 0, 0, 0, 3, 13, 4, }, + { 2, 0, 0, 3, 13, 30, }, + { 1, 0, 0, 3, 13, 30, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 0, 6, 1, 22, }, + { 2, 0, 0, 6, 1, 28, }, + { 1, 0, 0, 6, 1, 28, }, + { 0, 0, 0, 6, 2, 28, }, + { 2, 0, 0, 6, 2, 28, }, + { 1, 0, 0, 6, 2, 28, }, + { 0, 0, 0, 6, 3, 28, }, + { 2, 0, 0, 6, 3, 28, }, + { 1, 0, 0, 6, 3, 28, }, + { 0, 0, 0, 6, 4, 28, }, + { 2, 0, 0, 6, 4, 28, }, + { 1, 0, 0, 6, 4, 28, }, + { 0, 0, 0, 6, 5, 28, }, + { 2, 0, 0, 6, 5, 28, }, + { 1, 0, 0, 6, 5, 28, }, + { 0, 0, 0, 6, 6, 28, }, + { 2, 0, 0, 6, 6, 28, }, + { 1, 0, 0, 6, 6, 28, }, + { 0, 0, 0, 6, 7, 28, }, + { 2, 0, 0, 6, 7, 28, }, + { 1, 0, 0, 6, 7, 28, }, + { 0, 0, 0, 6, 8, 28, }, + { 2, 0, 0, 6, 8, 28, }, + { 1, 0, 0, 6, 8, 28, }, + { 0, 0, 0, 6, 9, 28, }, + { 2, 0, 0, 6, 9, 28, }, + { 1, 0, 0, 6, 9, 28, }, + { 0, 0, 0, 6, 10, 28, }, + { 2, 0, 0, 6, 10, 28, }, + { 1, 0, 0, 6, 10, 28, }, + { 0, 0, 0, 6, 11, 22, }, + { 2, 0, 0, 6, 11, 28, }, + { 1, 0, 0, 6, 11, 28, }, + { 0, 0, 0, 6, 12, 14, }, + { 2, 0, 0, 6, 12, 28, }, + { 1, 0, 0, 6, 12, 28, }, + { 0, 0, 0, 6, 13, 4, }, + { 2, 0, 0, 6, 13, 28, }, + { 1, 0, 0, 6, 13, 28, }, + { 0, 0, 0, 6, 14, 63, }, + { 2, 0, 0, 6, 14, 63, }, + { 1, 0, 0, 6, 14, 63, }, + { 0, 0, 0, 7, 1, 20, }, + { 2, 0, 0, 7, 1, 26, }, + { 1, 0, 0, 7, 1, 26, }, + { 0, 0, 0, 7, 2, 26, }, + { 2, 0, 0, 7, 2, 26, }, + { 1, 0, 0, 7, 2, 26, }, + { 0, 0, 0, 7, 3, 26, }, + { 2, 0, 0, 7, 3, 26, }, + { 1, 0, 0, 7, 3, 26, }, + { 0, 0, 0, 7, 4, 26, }, + { 2, 0, 0, 7, 4, 26, }, + { 1, 0, 0, 7, 4, 26, }, + { 0, 0, 0, 7, 5, 26, }, + { 2, 0, 0, 7, 5, 26, }, + { 1, 0, 0, 7, 5, 26, }, + { 0, 0, 0, 7, 6, 26, }, + { 2, 0, 0, 7, 6, 26, }, + { 1, 0, 0, 7, 6, 26, }, + { 0, 0, 0, 7, 7, 26, }, + { 2, 0, 0, 7, 7, 26, }, + { 1, 0, 0, 7, 7, 26, }, + { 0, 0, 0, 7, 8, 26, }, + { 2, 0, 0, 7, 8, 26, }, + { 1, 0, 0, 7, 8, 26, }, + { 0, 0, 0, 7, 9, 26, }, + { 2, 0, 0, 7, 9, 26, }, + { 1, 0, 0, 7, 9, 26, }, + { 0, 0, 0, 7, 10, 26, }, + { 2, 0, 0, 7, 10, 26, }, + { 1, 0, 0, 7, 10, 26, }, + { 0, 0, 0, 7, 11, 20, }, + { 2, 0, 0, 7, 11, 26, }, + { 1, 0, 0, 7, 11, 26, }, + { 0, 0, 0, 7, 12, 14, }, + { 2, 0, 0, 7, 12, 26, }, + { 1, 0, 0, 7, 12, 26, }, + { 0, 0, 0, 7, 13, 4, }, + { 2, 0, 0, 7, 13, 26, }, + { 1, 0, 0, 7, 13, 26, }, + { 0, 0, 0, 7, 14, 63, }, + { 2, 0, 0, 7, 14, 63, }, + { 1, 0, 0, 7, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 26, }, + { 2, 0, 1, 2, 3, 32, }, + { 1, 0, 1, 2, 3, 32, }, + { 0, 0, 1, 2, 4, 32, }, + { 2, 0, 1, 2, 4, 32, }, + { 1, 0, 1, 2, 4, 32, }, + { 0, 0, 1, 2, 5, 32, }, + { 2, 0, 1, 2, 5, 32, }, + { 1, 0, 1, 2, 5, 32, }, + { 0, 0, 1, 2, 6, 32, }, + { 2, 0, 1, 2, 6, 32, }, + { 1, 0, 1, 2, 6, 32, }, + { 0, 0, 1, 2, 7, 32, }, + { 2, 0, 1, 2, 7, 32, }, + { 1, 0, 1, 2, 7, 32, }, + { 0, 0, 1, 2, 8, 32, }, + { 2, 0, 1, 2, 8, 32, }, + { 1, 0, 1, 2, 8, 32, }, + { 0, 0, 1, 2, 9, 32, }, + { 2, 0, 1, 2, 9, 32, }, + { 1, 0, 1, 2, 9, 32, }, + { 0, 0, 1, 2, 10, 32, }, + { 2, 0, 1, 2, 10, 32, }, + { 1, 0, 1, 2, 10, 32, }, + { 0, 0, 1, 2, 11, 26, }, + { 2, 0, 1, 2, 11, 32, }, + { 1, 0, 1, 2, 11, 32, }, + { 0, 0, 1, 2, 12, 16, }, + { 2, 0, 1, 2, 12, 32, }, + { 1, 0, 1, 2, 12, 32, }, + { 0, 0, 1, 2, 13, 10, }, + { 2, 0, 1, 2, 13, 32, }, + { 1, 0, 1, 2, 13, 32, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 24, }, + { 2, 0, 1, 3, 3, 30, }, + { 1, 0, 1, 3, 3, 30, }, + { 0, 0, 1, 3, 4, 30, }, + { 2, 0, 1, 3, 4, 30, }, + { 1, 0, 1, 3, 4, 30, }, + { 0, 0, 1, 3, 5, 30, }, + { 2, 0, 1, 3, 5, 30, }, + { 1, 0, 1, 3, 5, 30, }, + { 0, 0, 1, 3, 6, 30, }, + { 2, 0, 1, 3, 6, 30, }, + { 1, 0, 1, 3, 6, 30, }, + { 0, 0, 1, 3, 7, 30, }, + { 2, 0, 1, 3, 7, 30, }, + { 1, 0, 1, 3, 7, 30, }, + { 0, 0, 1, 3, 8, 30, }, + { 2, 0, 1, 3, 8, 30, }, + { 1, 0, 1, 3, 8, 30, }, + { 0, 0, 1, 3, 9, 30, }, + { 2, 0, 1, 3, 9, 30, }, + { 1, 0, 1, 3, 9, 30, }, + { 0, 0, 1, 3, 10, 30, }, + { 2, 0, 1, 3, 10, 30, }, + { 1, 0, 1, 3, 10, 30, }, + { 0, 0, 1, 3, 11, 24, }, + { 2, 0, 1, 3, 11, 30, }, + { 1, 0, 1, 3, 11, 30, }, + { 0, 0, 1, 3, 12, 14, }, + { 2, 0, 1, 3, 12, 30, }, + { 1, 0, 1, 3, 12, 30, }, + { 0, 0, 1, 3, 13, 8, }, + { 2, 0, 1, 3, 13, 30, }, + { 1, 0, 1, 3, 13, 30, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 0, 1, 6, 1, 63, }, + { 2, 0, 1, 6, 1, 63, }, + { 1, 0, 1, 6, 1, 63, }, + { 0, 0, 1, 6, 2, 63, }, + { 2, 0, 1, 6, 2, 63, }, + { 1, 0, 1, 6, 2, 63, }, + { 0, 0, 1, 6, 3, 22, }, + { 2, 0, 1, 6, 3, 28, }, + { 1, 0, 1, 6, 3, 28, }, + { 0, 0, 1, 6, 4, 28, }, + { 2, 0, 1, 6, 4, 28, }, + { 1, 0, 1, 6, 4, 28, }, + { 0, 0, 1, 6, 5, 28, }, + { 2, 0, 1, 6, 5, 28, }, + { 1, 0, 1, 6, 5, 28, }, + { 0, 0, 1, 6, 6, 28, }, + { 2, 0, 1, 6, 6, 28, }, + { 1, 0, 1, 6, 6, 28, }, + { 0, 0, 1, 6, 7, 28, }, + { 2, 0, 1, 6, 7, 28, }, + { 1, 0, 1, 6, 7, 28, }, + { 0, 0, 1, 6, 8, 28, }, + { 2, 0, 1, 6, 8, 28, }, + { 1, 0, 1, 6, 8, 28, }, + { 0, 0, 1, 6, 9, 28, }, + { 2, 0, 1, 6, 9, 28, }, + { 1, 0, 1, 6, 9, 28, }, + { 0, 0, 1, 6, 10, 28, }, + { 2, 0, 1, 6, 10, 28, }, + { 1, 0, 1, 6, 10, 28, }, + { 0, 0, 1, 6, 11, 22, }, + { 2, 0, 1, 6, 11, 28, }, + { 1, 0, 1, 6, 11, 28, }, + { 0, 0, 1, 6, 12, 14, }, + { 2, 0, 1, 6, 12, 28, }, + { 1, 0, 1, 6, 12, 28, }, + { 0, 0, 1, 6, 13, 8, }, + { 2, 0, 1, 6, 13, 28, }, + { 1, 0, 1, 6, 13, 28, }, + { 0, 0, 1, 6, 14, 63, }, + { 2, 0, 1, 6, 14, 63, }, + { 1, 0, 1, 6, 14, 63, }, + { 0, 0, 1, 7, 1, 63, }, + { 2, 0, 1, 7, 1, 63, }, + { 1, 0, 1, 7, 1, 63, }, + { 0, 0, 1, 7, 2, 63, }, + { 2, 0, 1, 7, 2, 63, }, + { 1, 0, 1, 7, 2, 63, }, + { 0, 0, 1, 7, 3, 20, }, + { 2, 0, 1, 7, 3, 26, }, + { 1, 0, 1, 7, 3, 26, }, + { 0, 0, 1, 7, 4, 26, }, + { 2, 0, 1, 7, 4, 26, }, + { 1, 0, 1, 7, 4, 26, }, + { 0, 0, 1, 7, 5, 26, }, + { 2, 0, 1, 7, 5, 26, }, + { 1, 0, 1, 7, 5, 26, }, + { 0, 0, 1, 7, 6, 26, }, + { 2, 0, 1, 7, 6, 26, }, + { 1, 0, 1, 7, 6, 26, }, + { 0, 0, 1, 7, 7, 26, }, + { 2, 0, 1, 7, 7, 26, }, + { 1, 0, 1, 7, 7, 26, }, + { 0, 0, 1, 7, 8, 26, }, + { 2, 0, 1, 7, 8, 26, }, + { 1, 0, 1, 7, 8, 26, }, + { 0, 0, 1, 7, 9, 26, }, + { 2, 0, 1, 7, 9, 26, }, + { 1, 0, 1, 7, 9, 26, }, + { 0, 0, 1, 7, 10, 26, }, + { 2, 0, 1, 7, 10, 26, }, + { 1, 0, 1, 7, 10, 26, }, + { 0, 0, 1, 7, 11, 20, }, + { 2, 0, 1, 7, 11, 26, }, + { 1, 0, 1, 7, 11, 26, }, + { 0, 0, 1, 7, 12, 14, }, + { 2, 0, 1, 7, 12, 26, }, + { 1, 0, 1, 7, 12, 26, }, + { 0, 0, 1, 7, 13, 8, }, + { 2, 0, 1, 7, 13, 26, }, + { 1, 0, 1, 7, 13, 26, }, + { 0, 0, 1, 7, 14, 63, }, + { 2, 0, 1, 7, 14, 63, }, + { 1, 0, 1, 7, 14, 63, }, + { 0, 1, 0, 1, 36, 28, }, + { 2, 1, 0, 1, 36, 32, }, + { 1, 1, 0, 1, 36, 32, }, + { 0, 1, 0, 1, 40, 32, }, + { 2, 1, 0, 1, 40, 32, }, + { 1, 1, 0, 1, 40, 32, }, + { 0, 1, 0, 1, 44, 32, }, + { 2, 1, 0, 1, 44, 32, }, + { 1, 1, 0, 1, 44, 32, }, + { 0, 1, 0, 1, 48, 32, }, + { 2, 1, 0, 1, 48, 32, }, + { 1, 1, 0, 1, 48, 32, }, + { 0, 1, 0, 1, 52, 32, }, + { 2, 1, 0, 1, 52, 32, }, + { 1, 1, 0, 1, 52, 32, }, + { 0, 1, 0, 1, 56, 32, }, + { 2, 1, 0, 1, 56, 32, }, + { 1, 1, 0, 1, 56, 32, }, + { 0, 1, 0, 1, 60, 32, }, + { 2, 1, 0, 1, 60, 32, }, + { 1, 1, 0, 1, 60, 32, }, + { 0, 1, 0, 1, 64, 28, }, + { 2, 1, 0, 1, 64, 32, }, + { 1, 1, 0, 1, 64, 32, }, + { 0, 1, 0, 1, 100, 28, }, + { 2, 1, 0, 1, 100, 32, }, + { 1, 1, 0, 1, 100, 32, }, + { 0, 1, 0, 1, 104, 32, }, + { 2, 1, 0, 1, 104, 32, }, + { 1, 1, 0, 1, 104, 32, }, + { 0, 1, 0, 1, 108, 32, }, + { 2, 1, 0, 1, 108, 32, }, + { 1, 1, 0, 1, 108, 32, }, + { 0, 1, 0, 1, 112, 32, }, + { 2, 1, 0, 1, 112, 32, }, + { 1, 1, 0, 1, 112, 32, }, + { 0, 1, 0, 1, 116, 32, }, + { 2, 1, 0, 1, 116, 32, }, + { 1, 1, 0, 1, 116, 32, }, + { 0, 1, 0, 1, 120, 32, }, + { 2, 1, 0, 1, 120, 32, }, + { 1, 1, 0, 1, 120, 32, }, + { 0, 1, 0, 1, 124, 32, }, + { 2, 1, 0, 1, 124, 32, }, + { 1, 1, 0, 1, 124, 32, }, + { 0, 1, 0, 1, 128, 32, }, + { 2, 1, 0, 1, 128, 32, }, + { 1, 1, 0, 1, 128, 32, }, + { 0, 1, 0, 1, 132, 32, }, + { 2, 1, 0, 1, 132, 32, }, + { 1, 1, 0, 1, 132, 32, }, + { 0, 1, 0, 1, 136, 32, }, + { 2, 1, 0, 1, 136, 32, }, + { 1, 1, 0, 1, 136, 32, }, + { 0, 1, 0, 1, 140, 28, }, + { 2, 1, 0, 1, 140, 32, }, + { 1, 1, 0, 1, 140, 32, }, + { 0, 1, 0, 1, 149, 28, }, + { 2, 1, 0, 1, 149, 32, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 32, }, + { 2, 1, 0, 1, 153, 32, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 32, }, + { 2, 1, 0, 1, 157, 32, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 32, }, + { 2, 1, 0, 1, 161, 32, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 32, }, + { 2, 1, 0, 1, 165, 32, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 26, }, + { 2, 1, 0, 2, 36, 32, }, + { 1, 1, 0, 2, 36, 32, }, + { 0, 1, 0, 2, 40, 32, }, + { 2, 1, 0, 2, 40, 32, }, + { 1, 1, 0, 2, 40, 32, }, + { 0, 1, 0, 2, 44, 32, }, + { 2, 1, 0, 2, 44, 32, }, + { 1, 1, 0, 2, 44, 32, }, + { 0, 1, 0, 2, 48, 32, }, + { 2, 1, 0, 2, 48, 32, }, + { 1, 1, 0, 2, 48, 32, }, + { 0, 1, 0, 2, 52, 32, }, + { 2, 1, 0, 2, 52, 32, }, + { 1, 1, 0, 2, 52, 32, }, + { 0, 1, 0, 2, 56, 32, }, + { 2, 1, 0, 2, 56, 32, }, + { 1, 1, 0, 2, 56, 32, }, + { 0, 1, 0, 2, 60, 32, }, + { 2, 1, 0, 2, 60, 32, }, + { 1, 1, 0, 2, 60, 32, }, + { 0, 1, 0, 2, 64, 26, }, + { 2, 1, 0, 2, 64, 32, }, + { 1, 1, 0, 2, 64, 32, }, + { 0, 1, 0, 2, 100, 26, }, + { 2, 1, 0, 2, 100, 32, }, + { 1, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 32, }, + { 2, 1, 0, 2, 104, 32, }, + { 1, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 32, }, + { 2, 1, 0, 2, 108, 32, }, + { 1, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 32, }, + { 2, 1, 0, 2, 112, 32, }, + { 1, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 32, }, + { 2, 1, 0, 2, 116, 32, }, + { 1, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 32, }, + { 2, 1, 0, 2, 120, 32, }, + { 1, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 32, }, + { 2, 1, 0, 2, 124, 32, }, + { 1, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 32, }, + { 2, 1, 0, 2, 128, 32, }, + { 1, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 32, }, + { 2, 1, 0, 2, 132, 32, }, + { 1, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 32, }, + { 2, 1, 0, 2, 136, 32, }, + { 1, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 26, }, + { 2, 1, 0, 2, 140, 32, }, + { 1, 1, 0, 2, 140, 32, }, + { 0, 1, 0, 2, 149, 26, }, + { 2, 1, 0, 2, 149, 32, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 32, }, + { 2, 1, 0, 2, 153, 32, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 32, }, + { 2, 1, 0, 2, 157, 32, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 32, }, + { 2, 1, 0, 2, 161, 32, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 32, }, + { 2, 1, 0, 2, 165, 32, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 24, }, + { 2, 1, 0, 3, 36, 28, }, + { 1, 1, 0, 3, 36, 28, }, + { 0, 1, 0, 3, 40, 28, }, + { 2, 1, 0, 3, 40, 28, }, + { 1, 1, 0, 3, 40, 28, }, + { 0, 1, 0, 3, 44, 28, }, + { 2, 1, 0, 3, 44, 28, }, + { 1, 1, 0, 3, 44, 28, }, + { 0, 1, 0, 3, 48, 28, }, + { 2, 1, 0, 3, 48, 28, }, + { 1, 1, 0, 3, 48, 28, }, + { 0, 1, 0, 3, 52, 28, }, + { 2, 1, 0, 3, 52, 28, }, + { 1, 1, 0, 3, 52, 28, }, + { 0, 1, 0, 3, 56, 28, }, + { 2, 1, 0, 3, 56, 28, }, + { 1, 1, 0, 3, 56, 28, }, + { 0, 1, 0, 3, 60, 28, }, + { 2, 1, 0, 3, 60, 28, }, + { 1, 1, 0, 3, 60, 28, }, + { 0, 1, 0, 3, 64, 24, }, + { 2, 1, 0, 3, 64, 28, }, + { 1, 1, 0, 3, 64, 28, }, + { 0, 1, 0, 3, 100, 24, }, + { 2, 1, 0, 3, 100, 28, }, + { 1, 1, 0, 3, 100, 28, }, + { 0, 1, 0, 3, 104, 28, }, + { 2, 1, 0, 3, 104, 28, }, + { 1, 1, 0, 3, 104, 28, }, + { 0, 1, 0, 3, 108, 28, }, + { 2, 1, 0, 3, 108, 28, }, + { 1, 1, 0, 3, 108, 28, }, + { 0, 1, 0, 3, 112, 28, }, + { 2, 1, 0, 3, 112, 28, }, + { 1, 1, 0, 3, 112, 28, }, + { 0, 1, 0, 3, 116, 28, }, + { 2, 1, 0, 3, 116, 28, }, + { 1, 1, 0, 3, 116, 28, }, + { 0, 1, 0, 3, 120, 28, }, + { 2, 1, 0, 3, 120, 28, }, + { 1, 1, 0, 3, 120, 28, }, + { 0, 1, 0, 3, 124, 28, }, + { 2, 1, 0, 3, 124, 28, }, + { 1, 1, 0, 3, 124, 28, }, + { 0, 1, 0, 3, 128, 28, }, + { 2, 1, 0, 3, 128, 28, }, + { 1, 1, 0, 3, 128, 28, }, + { 0, 1, 0, 3, 132, 28, }, + { 2, 1, 0, 3, 132, 28, }, + { 1, 1, 0, 3, 132, 28, }, + { 0, 1, 0, 3, 136, 28, }, + { 2, 1, 0, 3, 136, 28, }, + { 1, 1, 0, 3, 136, 28, }, + { 0, 1, 0, 3, 140, 24, }, + { 2, 1, 0, 3, 140, 28, }, + { 1, 1, 0, 3, 140, 28, }, + { 0, 1, 0, 3, 149, 24, }, + { 2, 1, 0, 3, 149, 28, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 28, }, + { 2, 1, 0, 3, 153, 28, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 28, }, + { 2, 1, 0, 3, 157, 28, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 28, }, + { 2, 1, 0, 3, 161, 28, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 28, }, + { 2, 1, 0, 3, 165, 28, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 0, 6, 36, 22, }, + { 2, 1, 0, 6, 36, 26, }, + { 1, 1, 0, 6, 36, 26, }, + { 0, 1, 0, 6, 40, 26, }, + { 2, 1, 0, 6, 40, 26, }, + { 1, 1, 0, 6, 40, 26, }, + { 0, 1, 0, 6, 44, 26, }, + { 2, 1, 0, 6, 44, 26, }, + { 1, 1, 0, 6, 44, 26, }, + { 0, 1, 0, 6, 48, 26, }, + { 2, 1, 0, 6, 48, 26, }, + { 1, 1, 0, 6, 48, 26, }, + { 0, 1, 0, 6, 52, 26, }, + { 2, 1, 0, 6, 52, 26, }, + { 1, 1, 0, 6, 52, 26, }, + { 0, 1, 0, 6, 56, 26, }, + { 2, 1, 0, 6, 56, 26, }, + { 1, 1, 0, 6, 56, 26, }, + { 0, 1, 0, 6, 60, 26, }, + { 2, 1, 0, 6, 60, 26, }, + { 1, 1, 0, 6, 60, 26, }, + { 0, 1, 0, 6, 64, 22, }, + { 2, 1, 0, 6, 64, 26, }, + { 1, 1, 0, 6, 64, 26, }, + { 0, 1, 0, 6, 100, 22, }, + { 2, 1, 0, 6, 100, 26, }, + { 1, 1, 0, 6, 100, 26, }, + { 0, 1, 0, 6, 104, 26, }, + { 2, 1, 0, 6, 104, 26, }, + { 1, 1, 0, 6, 104, 26, }, + { 0, 1, 0, 6, 108, 26, }, + { 2, 1, 0, 6, 108, 26, }, + { 1, 1, 0, 6, 108, 26, }, + { 0, 1, 0, 6, 112, 26, }, + { 2, 1, 0, 6, 112, 26, }, + { 1, 1, 0, 6, 112, 26, }, + { 0, 1, 0, 6, 116, 26, }, + { 2, 1, 0, 6, 116, 26, }, + { 1, 1, 0, 6, 116, 26, }, + { 0, 1, 0, 6, 120, 26, }, + { 2, 1, 0, 6, 120, 26, }, + { 1, 1, 0, 6, 120, 26, }, + { 0, 1, 0, 6, 124, 26, }, + { 2, 1, 0, 6, 124, 26, }, + { 1, 1, 0, 6, 124, 26, }, + { 0, 1, 0, 6, 128, 26, }, + { 2, 1, 0, 6, 128, 26, }, + { 1, 1, 0, 6, 128, 26, }, + { 0, 1, 0, 6, 132, 26, }, + { 2, 1, 0, 6, 132, 26, }, + { 1, 1, 0, 6, 132, 26, }, + { 0, 1, 0, 6, 136, 26, }, + { 2, 1, 0, 6, 136, 26, }, + { 1, 1, 0, 6, 136, 26, }, + { 0, 1, 0, 6, 140, 22, }, + { 2, 1, 0, 6, 140, 26, }, + { 1, 1, 0, 6, 140, 26, }, + { 0, 1, 0, 6, 149, 22, }, + { 2, 1, 0, 6, 149, 26, }, + { 1, 1, 0, 6, 149, 63, }, + { 0, 1, 0, 6, 153, 26, }, + { 2, 1, 0, 6, 153, 26, }, + { 1, 1, 0, 6, 153, 63, }, + { 0, 1, 0, 6, 157, 26, }, + { 2, 1, 0, 6, 157, 26, }, + { 1, 1, 0, 6, 157, 63, }, + { 0, 1, 0, 6, 161, 26, }, + { 2, 1, 0, 6, 161, 26, }, + { 1, 1, 0, 6, 161, 63, }, + { 0, 1, 0, 6, 165, 26, }, + { 2, 1, 0, 6, 165, 26, }, + { 1, 1, 0, 6, 165, 63, }, + { 0, 1, 0, 7, 36, 20, }, + { 2, 1, 0, 7, 36, 24, }, + { 1, 1, 0, 7, 36, 24, }, + { 0, 1, 0, 7, 40, 24, }, + { 2, 1, 0, 7, 40, 24, }, + { 1, 1, 0, 7, 40, 24, }, + { 0, 1, 0, 7, 44, 24, }, + { 2, 1, 0, 7, 44, 24, }, + { 1, 1, 0, 7, 44, 24, }, + { 0, 1, 0, 7, 48, 24, }, + { 2, 1, 0, 7, 48, 24, }, + { 1, 1, 0, 7, 48, 24, }, + { 0, 1, 0, 7, 52, 24, }, + { 2, 1, 0, 7, 52, 24, }, + { 1, 1, 0, 7, 52, 24, }, + { 0, 1, 0, 7, 56, 24, }, + { 2, 1, 0, 7, 56, 24, }, + { 1, 1, 0, 7, 56, 24, }, + { 0, 1, 0, 7, 60, 24, }, + { 2, 1, 0, 7, 60, 24, }, + { 1, 1, 0, 7, 60, 24, }, + { 0, 1, 0, 7, 64, 20, }, + { 2, 1, 0, 7, 64, 24, }, + { 1, 1, 0, 7, 64, 24, }, + { 0, 1, 0, 7, 100, 20, }, + { 2, 1, 0, 7, 100, 24, }, + { 1, 1, 0, 7, 100, 24, }, + { 0, 1, 0, 7, 104, 24, }, + { 2, 1, 0, 7, 104, 24, }, + { 1, 1, 0, 7, 104, 24, }, + { 0, 1, 0, 7, 108, 24, }, + { 2, 1, 0, 7, 108, 24, }, + { 1, 1, 0, 7, 108, 24, }, + { 0, 1, 0, 7, 112, 24, }, + { 2, 1, 0, 7, 112, 24, }, + { 1, 1, 0, 7, 112, 24, }, + { 0, 1, 0, 7, 116, 24, }, + { 2, 1, 0, 7, 116, 24, }, + { 1, 1, 0, 7, 116, 24, }, + { 0, 1, 0, 7, 120, 24, }, + { 2, 1, 0, 7, 120, 24, }, + { 1, 1, 0, 7, 120, 24, }, + { 0, 1, 0, 7, 124, 24, }, + { 2, 1, 0, 7, 124, 24, }, + { 1, 1, 0, 7, 124, 24, }, + { 0, 1, 0, 7, 128, 24, }, + { 2, 1, 0, 7, 128, 24, }, + { 1, 1, 0, 7, 128, 24, }, + { 0, 1, 0, 7, 132, 24, }, + { 2, 1, 0, 7, 132, 24, }, + { 1, 1, 0, 7, 132, 24, }, + { 0, 1, 0, 7, 136, 24, }, + { 2, 1, 0, 7, 136, 24, }, + { 1, 1, 0, 7, 136, 24, }, + { 0, 1, 0, 7, 140, 20, }, + { 2, 1, 0, 7, 140, 24, }, + { 1, 1, 0, 7, 140, 24, }, + { 0, 1, 0, 7, 149, 20, }, + { 2, 1, 0, 7, 149, 24, }, + { 1, 1, 0, 7, 149, 63, }, + { 0, 1, 0, 7, 153, 24, }, + { 2, 1, 0, 7, 153, 24, }, + { 1, 1, 0, 7, 153, 63, }, + { 0, 1, 0, 7, 157, 24, }, + { 2, 1, 0, 7, 157, 24, }, + { 1, 1, 0, 7, 157, 63, }, + { 0, 1, 0, 7, 161, 24, }, + { 2, 1, 0, 7, 161, 24, }, + { 1, 1, 0, 7, 161, 63, }, + { 0, 1, 0, 7, 165, 24, }, + { 2, 1, 0, 7, 165, 24, }, + { 1, 1, 0, 7, 165, 63, }, + { 0, 1, 1, 2, 38, 26, }, + { 2, 1, 1, 2, 38, 32, }, + { 1, 1, 1, 2, 38, 32, }, + { 0, 1, 1, 2, 46, 32, }, + { 2, 1, 1, 2, 46, 32, }, + { 1, 1, 1, 2, 46, 32, }, + { 0, 1, 1, 2, 54, 32, }, + { 2, 1, 1, 2, 54, 32, }, + { 1, 1, 1, 2, 54, 32, }, + { 0, 1, 1, 2, 62, 26, }, + { 2, 1, 1, 2, 62, 32, }, + { 1, 1, 1, 2, 62, 32, }, + { 0, 1, 1, 2, 102, 26, }, + { 2, 1, 1, 2, 102, 32, }, + { 1, 1, 1, 2, 102, 32, }, + { 0, 1, 1, 2, 110, 32, }, + { 2, 1, 1, 2, 110, 32, }, + { 1, 1, 1, 2, 110, 32, }, + { 0, 1, 1, 2, 118, 32, }, + { 2, 1, 1, 2, 118, 32, }, + { 1, 1, 1, 2, 118, 32, }, + { 0, 1, 1, 2, 126, 32, }, + { 2, 1, 1, 2, 126, 32, }, + { 1, 1, 1, 2, 126, 32, }, + { 0, 1, 1, 2, 134, 32, }, + { 2, 1, 1, 2, 134, 32, }, + { 1, 1, 1, 2, 134, 32, }, + { 0, 1, 1, 2, 151, 26, }, + { 2, 1, 1, 2, 151, 32, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 32, }, + { 2, 1, 1, 2, 159, 32, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 24, }, + { 2, 1, 1, 3, 38, 28, }, + { 1, 1, 1, 3, 38, 28, }, + { 0, 1, 1, 3, 46, 28, }, + { 2, 1, 1, 3, 46, 28, }, + { 1, 1, 1, 3, 46, 28, }, + { 0, 1, 1, 3, 54, 28, }, + { 2, 1, 1, 3, 54, 28, }, + { 1, 1, 1, 3, 54, 28, }, + { 0, 1, 1, 3, 62, 24, }, + { 2, 1, 1, 3, 62, 28, }, + { 1, 1, 1, 3, 62, 28, }, + { 0, 1, 1, 3, 102, 24, }, + { 2, 1, 1, 3, 102, 28, }, + { 1, 1, 1, 3, 102, 28, }, + { 0, 1, 1, 3, 110, 28, }, + { 2, 1, 1, 3, 110, 28, }, + { 1, 1, 1, 3, 110, 28, }, + { 0, 1, 1, 3, 118, 28, }, + { 2, 1, 1, 3, 118, 28, }, + { 1, 1, 1, 3, 118, 28, }, + { 0, 1, 1, 3, 126, 28, }, + { 2, 1, 1, 3, 126, 28, }, + { 1, 1, 1, 3, 126, 28, }, + { 0, 1, 1, 3, 134, 28, }, + { 2, 1, 1, 3, 134, 28, }, + { 1, 1, 1, 3, 134, 28, }, + { 0, 1, 1, 3, 151, 24, }, + { 2, 1, 1, 3, 151, 28, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 28, }, + { 2, 1, 1, 3, 159, 28, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 1, 6, 38, 20, }, + { 2, 1, 1, 6, 38, 26, }, + { 1, 1, 1, 6, 38, 26, }, + { 0, 1, 1, 6, 46, 26, }, + { 2, 1, 1, 6, 46, 26, }, + { 1, 1, 1, 6, 46, 26, }, + { 0, 1, 1, 6, 54, 26, }, + { 2, 1, 1, 6, 54, 26, }, + { 1, 1, 1, 6, 54, 26, }, + { 0, 1, 1, 6, 62, 20, }, + { 2, 1, 1, 6, 62, 26, }, + { 1, 1, 1, 6, 62, 26, }, + { 0, 1, 1, 6, 102, 20, }, + { 2, 1, 1, 6, 102, 26, }, + { 1, 1, 1, 6, 102, 26, }, + { 0, 1, 1, 6, 110, 26, }, + { 2, 1, 1, 6, 110, 26, }, + { 1, 1, 1, 6, 110, 26, }, + { 0, 1, 1, 6, 118, 26, }, + { 2, 1, 1, 6, 118, 26, }, + { 1, 1, 1, 6, 118, 26, }, + { 0, 1, 1, 6, 126, 26, }, + { 2, 1, 1, 6, 126, 26, }, + { 1, 1, 1, 6, 126, 26, }, + { 0, 1, 1, 6, 134, 26, }, + { 2, 1, 1, 6, 134, 26, }, + { 1, 1, 1, 6, 134, 26, }, + { 0, 1, 1, 6, 151, 20, }, + { 2, 1, 1, 6, 151, 26, }, + { 1, 1, 1, 6, 151, 63, }, + { 0, 1, 1, 6, 159, 26, }, + { 2, 1, 1, 6, 159, 26, }, + { 1, 1, 1, 6, 159, 63, }, + { 0, 1, 1, 7, 38, 18, }, + { 2, 1, 1, 7, 38, 24, }, + { 1, 1, 1, 7, 38, 24, }, + { 0, 1, 1, 7, 46, 24, }, + { 2, 1, 1, 7, 46, 24, }, + { 1, 1, 1, 7, 46, 24, }, + { 0, 1, 1, 7, 54, 24, }, + { 2, 1, 1, 7, 54, 24, }, + { 1, 1, 1, 7, 54, 24, }, + { 0, 1, 1, 7, 62, 18, }, + { 2, 1, 1, 7, 62, 24, }, + { 1, 1, 1, 7, 62, 24, }, + { 0, 1, 1, 7, 102, 18, }, + { 2, 1, 1, 7, 102, 24, }, + { 1, 1, 1, 7, 102, 24, }, + { 0, 1, 1, 7, 110, 24, }, + { 2, 1, 1, 7, 110, 24, }, + { 1, 1, 1, 7, 110, 24, }, + { 0, 1, 1, 7, 118, 24, }, + { 2, 1, 1, 7, 118, 24, }, + { 1, 1, 1, 7, 118, 24, }, + { 0, 1, 1, 7, 126, 24, }, + { 2, 1, 1, 7, 126, 24, }, + { 1, 1, 1, 7, 126, 24, }, + { 0, 1, 1, 7, 134, 24, }, + { 2, 1, 1, 7, 134, 24, }, + { 1, 1, 1, 7, 134, 24, }, + { 0, 1, 1, 7, 151, 18, }, + { 2, 1, 1, 7, 151, 24, }, + { 1, 1, 1, 7, 151, 63, }, + { 0, 1, 1, 7, 159, 24, }, + { 2, 1, 1, 7, 159, 24, }, + { 1, 1, 1, 7, 159, 63, }, + { 0, 1, 2, 4, 42, 22, }, + { 2, 1, 2, 4, 42, 30, }, + { 1, 1, 2, 4, 42, 30, }, + { 0, 1, 2, 4, 58, 22, }, + { 2, 1, 2, 4, 58, 30, }, + { 1, 1, 2, 4, 58, 30, }, + { 0, 1, 2, 4, 106, 22, }, + { 2, 1, 2, 4, 106, 30, }, + { 1, 1, 2, 4, 106, 30, }, + { 0, 1, 2, 4, 122, 30, }, + { 2, 1, 2, 4, 122, 30, }, + { 1, 1, 2, 4, 122, 30, }, + { 0, 1, 2, 4, 155, 22, }, + { 2, 1, 2, 4, 155, 30, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 20, }, + { 2, 1, 2, 5, 42, 28, }, + { 1, 1, 2, 5, 42, 28, }, + { 0, 1, 2, 5, 58, 20, }, + { 2, 1, 2, 5, 58, 28, }, + { 1, 1, 2, 5, 58, 28, }, + { 0, 1, 2, 5, 106, 20, }, + { 2, 1, 2, 5, 106, 28, }, + { 1, 1, 2, 5, 106, 28, }, + { 0, 1, 2, 5, 122, 28, }, + { 2, 1, 2, 5, 122, 28, }, + { 1, 1, 2, 5, 122, 28, }, + { 0, 1, 2, 5, 155, 20, }, + { 2, 1, 2, 5, 155, 28, }, + { 1, 1, 2, 5, 155, 63, }, + { 0, 1, 2, 8, 42, 18, }, + { 2, 1, 2, 8, 42, 26, }, + { 1, 1, 2, 8, 42, 26, }, + { 0, 1, 2, 8, 58, 18, }, + { 2, 1, 2, 8, 58, 26, }, + { 1, 1, 2, 8, 58, 26, }, + { 0, 1, 2, 8, 106, 18, }, + { 2, 1, 2, 8, 106, 26, }, + { 1, 1, 2, 8, 106, 26, }, + { 0, 1, 2, 8, 122, 26, }, + { 2, 1, 2, 8, 122, 26, }, + { 1, 1, 2, 8, 122, 26, }, + { 0, 1, 2, 8, 155, 18, }, + { 2, 1, 2, 8, 155, 26, }, + { 1, 1, 2, 8, 155, 63, }, + { 0, 1, 2, 9, 42, 16, }, + { 2, 1, 2, 9, 42, 24, }, + { 1, 1, 2, 9, 42, 24, }, + { 0, 1, 2, 9, 58, 16, }, + { 2, 1, 2, 9, 58, 24, }, + { 1, 1, 2, 9, 58, 24, }, + { 0, 1, 2, 9, 106, 16, }, + { 2, 1, 2, 9, 106, 24, }, + { 1, 1, 2, 9, 106, 24, }, + { 0, 1, 2, 9, 122, 24, }, + { 2, 1, 2, 9, 122, 24, }, + { 1, 1, 2, 9, 122, 24, }, + { 0, 1, 2, 9, 155, 16, }, + { 2, 1, 2, 9, 155, 24, }, + { 1, 1, 2, 9, 155, 63, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type0); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type1[] = { + { 0, 0, 0, 0, 1, 34, }, + { 2, 0, 0, 0, 1, 32, }, + { 1, 0, 0, 0, 1, 32, }, + { 0, 0, 0, 0, 2, 34, }, + { 2, 0, 0, 0, 2, 32, }, + { 1, 0, 0, 0, 2, 32, }, + { 0, 0, 0, 0, 3, 34, }, + { 2, 0, 0, 0, 3, 32, }, + { 1, 0, 0, 0, 3, 32, }, + { 0, 0, 0, 0, 4, 34, }, + { 2, 0, 0, 0, 4, 32, }, + { 1, 0, 0, 0, 4, 32, }, + { 0, 0, 0, 0, 5, 34, }, + { 2, 0, 0, 0, 5, 32, }, + { 1, 0, 0, 0, 5, 32, }, + { 0, 0, 0, 0, 6, 34, }, + { 2, 0, 0, 0, 6, 32, }, + { 1, 0, 0, 0, 6, 32, }, + { 0, 0, 0, 0, 7, 34, }, + { 2, 0, 0, 0, 7, 32, }, + { 1, 0, 0, 0, 7, 32, }, + { 0, 0, 0, 0, 8, 34, }, + { 2, 0, 0, 0, 8, 32, }, + { 1, 0, 0, 0, 8, 32, }, + { 0, 0, 0, 0, 9, 34, }, + { 2, 0, 0, 0, 9, 32, }, + { 1, 0, 0, 0, 9, 32, }, + { 0, 0, 0, 0, 10, 34, }, + { 2, 0, 0, 0, 10, 32, }, + { 1, 0, 0, 0, 10, 32, }, + { 0, 0, 0, 0, 11, 34, }, + { 2, 0, 0, 0, 11, 32, }, + { 1, 0, 0, 0, 11, 32, }, + { 0, 0, 0, 0, 12, 24, }, + { 2, 0, 0, 0, 12, 32, }, + { 1, 0, 0, 0, 12, 32, }, + { 0, 0, 0, 0, 13, 16, }, + { 2, 0, 0, 0, 13, 32, }, + { 1, 0, 0, 0, 13, 32, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 32, }, + { 0, 0, 0, 1, 1, 30, }, + { 2, 0, 0, 1, 1, 32, }, + { 1, 0, 0, 1, 1, 32, }, + { 0, 0, 0, 1, 2, 32, }, + { 2, 0, 0, 1, 2, 32, }, + { 1, 0, 0, 1, 2, 32, }, + { 0, 0, 0, 1, 3, 32, }, + { 2, 0, 0, 1, 3, 32, }, + { 1, 0, 0, 1, 3, 32, }, + { 0, 0, 0, 1, 4, 32, }, + { 2, 0, 0, 1, 4, 32, }, + { 1, 0, 0, 1, 4, 32, }, + { 0, 0, 0, 1, 5, 32, }, + { 2, 0, 0, 1, 5, 32, }, + { 1, 0, 0, 1, 5, 32, }, + { 0, 0, 0, 1, 6, 32, }, + { 2, 0, 0, 1, 6, 32, }, + { 1, 0, 0, 1, 6, 32, }, + { 0, 0, 0, 1, 7, 32, }, + { 2, 0, 0, 1, 7, 32, }, + { 1, 0, 0, 1, 7, 32, }, + { 0, 0, 0, 1, 8, 32, }, + { 2, 0, 0, 1, 8, 32, }, + { 1, 0, 0, 1, 8, 32, }, + { 0, 0, 0, 1, 9, 32, }, + { 2, 0, 0, 1, 9, 32, }, + { 1, 0, 0, 1, 9, 32, }, + { 0, 0, 0, 1, 10, 32, }, + { 2, 0, 0, 1, 10, 32, }, + { 1, 0, 0, 1, 10, 32, }, + { 0, 0, 0, 1, 11, 30, }, + { 2, 0, 0, 1, 11, 32, }, + { 1, 0, 0, 1, 11, 32, }, + { 0, 0, 0, 1, 12, 18, }, + { 2, 0, 0, 1, 12, 32, }, + { 1, 0, 0, 1, 12, 32, }, + { 0, 0, 0, 1, 13, 8, }, + { 2, 0, 0, 1, 13, 32, }, + { 1, 0, 0, 1, 13, 32, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 28, }, + { 2, 0, 0, 2, 1, 32, }, + { 1, 0, 0, 2, 1, 32, }, + { 0, 0, 0, 2, 2, 32, }, + { 2, 0, 0, 2, 2, 32, }, + { 1, 0, 0, 2, 2, 32, }, + { 0, 0, 0, 2, 3, 32, }, + { 2, 0, 0, 2, 3, 32, }, + { 1, 0, 0, 2, 3, 32, }, + { 0, 0, 0, 2, 4, 32, }, + { 2, 0, 0, 2, 4, 32, }, + { 1, 0, 0, 2, 4, 32, }, + { 0, 0, 0, 2, 5, 32, }, + { 2, 0, 0, 2, 5, 32, }, + { 1, 0, 0, 2, 5, 32, }, + { 0, 0, 0, 2, 6, 32, }, + { 2, 0, 0, 2, 6, 32, }, + { 1, 0, 0, 2, 6, 32, }, + { 0, 0, 0, 2, 7, 32, }, + { 2, 0, 0, 2, 7, 32, }, + { 1, 0, 0, 2, 7, 32, }, + { 0, 0, 0, 2, 8, 32, }, + { 2, 0, 0, 2, 8, 32, }, + { 1, 0, 0, 2, 8, 32, }, + { 0, 0, 0, 2, 9, 32, }, + { 2, 0, 0, 2, 9, 32, }, + { 1, 0, 0, 2, 9, 32, }, + { 0, 0, 0, 2, 10, 32, }, + { 2, 0, 0, 2, 10, 32, }, + { 1, 0, 0, 2, 10, 32, }, + { 0, 0, 0, 2, 11, 28, }, + { 2, 0, 0, 2, 11, 32, }, + { 1, 0, 0, 2, 11, 32, }, + { 0, 0, 0, 2, 12, 16, }, + { 2, 0, 0, 2, 12, 32, }, + { 1, 0, 0, 2, 12, 32, }, + { 0, 0, 0, 2, 13, 6, }, + { 2, 0, 0, 2, 13, 32, }, + { 1, 0, 0, 2, 13, 32, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 26, }, + { 2, 0, 0, 3, 1, 30, }, + { 1, 0, 0, 3, 1, 30, }, + { 0, 0, 0, 3, 2, 30, }, + { 2, 0, 0, 3, 2, 30, }, + { 1, 0, 0, 3, 2, 30, }, + { 0, 0, 0, 3, 3, 30, }, + { 2, 0, 0, 3, 3, 30, }, + { 1, 0, 0, 3, 3, 30, }, + { 0, 0, 0, 3, 4, 30, }, + { 2, 0, 0, 3, 4, 30, }, + { 1, 0, 0, 3, 4, 30, }, + { 0, 0, 0, 3, 5, 30, }, + { 2, 0, 0, 3, 5, 30, }, + { 1, 0, 0, 3, 5, 30, }, + { 0, 0, 0, 3, 6, 30, }, + { 2, 0, 0, 3, 6, 30, }, + { 1, 0, 0, 3, 6, 30, }, + { 0, 0, 0, 3, 7, 30, }, + { 2, 0, 0, 3, 7, 30, }, + { 1, 0, 0, 3, 7, 30, }, + { 0, 0, 0, 3, 8, 30, }, + { 2, 0, 0, 3, 8, 30, }, + { 1, 0, 0, 3, 8, 30, }, + { 0, 0, 0, 3, 9, 30, }, + { 2, 0, 0, 3, 9, 30, }, + { 1, 0, 0, 3, 9, 30, }, + { 0, 0, 0, 3, 10, 30, }, + { 2, 0, 0, 3, 10, 30, }, + { 1, 0, 0, 3, 10, 30, }, + { 0, 0, 0, 3, 11, 26, }, + { 2, 0, 0, 3, 11, 30, }, + { 1, 0, 0, 3, 11, 30, }, + { 0, 0, 0, 3, 12, 14, }, + { 2, 0, 0, 3, 12, 30, }, + { 1, 0, 0, 3, 12, 30, }, + { 0, 0, 0, 3, 13, 4, }, + { 2, 0, 0, 3, 13, 30, }, + { 1, 0, 0, 3, 13, 30, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 0, 6, 1, 24, }, + { 2, 0, 0, 6, 1, 28, }, + { 1, 0, 0, 6, 1, 28, }, + { 0, 0, 0, 6, 2, 28, }, + { 2, 0, 0, 6, 2, 28, }, + { 1, 0, 0, 6, 2, 28, }, + { 0, 0, 0, 6, 3, 28, }, + { 2, 0, 0, 6, 3, 28, }, + { 1, 0, 0, 6, 3, 28, }, + { 0, 0, 0, 6, 4, 28, }, + { 2, 0, 0, 6, 4, 28, }, + { 1, 0, 0, 6, 4, 28, }, + { 0, 0, 0, 6, 5, 28, }, + { 2, 0, 0, 6, 5, 28, }, + { 1, 0, 0, 6, 5, 28, }, + { 0, 0, 0, 6, 6, 28, }, + { 2, 0, 0, 6, 6, 28, }, + { 1, 0, 0, 6, 6, 28, }, + { 0, 0, 0, 6, 7, 28, }, + { 2, 0, 0, 6, 7, 28, }, + { 1, 0, 0, 6, 7, 28, }, + { 0, 0, 0, 6, 8, 28, }, + { 2, 0, 0, 6, 8, 28, }, + { 1, 0, 0, 6, 8, 28, }, + { 0, 0, 0, 6, 9, 28, }, + { 2, 0, 0, 6, 9, 28, }, + { 1, 0, 0, 6, 9, 28, }, + { 0, 0, 0, 6, 10, 28, }, + { 2, 0, 0, 6, 10, 28, }, + { 1, 0, 0, 6, 10, 28, }, + { 0, 0, 0, 6, 11, 24, }, + { 2, 0, 0, 6, 11, 28, }, + { 1, 0, 0, 6, 11, 28, }, + { 0, 0, 0, 6, 12, 14, }, + { 2, 0, 0, 6, 12, 28, }, + { 1, 0, 0, 6, 12, 28, }, + { 0, 0, 0, 6, 13, 4, }, + { 2, 0, 0, 6, 13, 28, }, + { 1, 0, 0, 6, 13, 28, }, + { 0, 0, 0, 6, 14, 63, }, + { 2, 0, 0, 6, 14, 63, }, + { 1, 0, 0, 6, 14, 63, }, + { 0, 0, 0, 7, 1, 22, }, + { 2, 0, 0, 7, 1, 26, }, + { 1, 0, 0, 7, 1, 26, }, + { 0, 0, 0, 7, 2, 26, }, + { 2, 0, 0, 7, 2, 26, }, + { 1, 0, 0, 7, 2, 26, }, + { 0, 0, 0, 7, 3, 26, }, + { 2, 0, 0, 7, 3, 26, }, + { 1, 0, 0, 7, 3, 26, }, + { 0, 0, 0, 7, 4, 26, }, + { 2, 0, 0, 7, 4, 26, }, + { 1, 0, 0, 7, 4, 26, }, + { 0, 0, 0, 7, 5, 26, }, + { 2, 0, 0, 7, 5, 26, }, + { 1, 0, 0, 7, 5, 26, }, + { 0, 0, 0, 7, 6, 26, }, + { 2, 0, 0, 7, 6, 26, }, + { 1, 0, 0, 7, 6, 26, }, + { 0, 0, 0, 7, 7, 26, }, + { 2, 0, 0, 7, 7, 26, }, + { 1, 0, 0, 7, 7, 26, }, + { 0, 0, 0, 7, 8, 26, }, + { 2, 0, 0, 7, 8, 26, }, + { 1, 0, 0, 7, 8, 26, }, + { 0, 0, 0, 7, 9, 26, }, + { 2, 0, 0, 7, 9, 26, }, + { 1, 0, 0, 7, 9, 26, }, + { 0, 0, 0, 7, 10, 26, }, + { 2, 0, 0, 7, 10, 26, }, + { 1, 0, 0, 7, 10, 26, }, + { 0, 0, 0, 7, 11, 22, }, + { 2, 0, 0, 7, 11, 26, }, + { 1, 0, 0, 7, 11, 26, }, + { 0, 0, 0, 7, 12, 14, }, + { 2, 0, 0, 7, 12, 26, }, + { 1, 0, 0, 7, 12, 26, }, + { 0, 0, 0, 7, 13, 4, }, + { 2, 0, 0, 7, 13, 26, }, + { 1, 0, 0, 7, 13, 26, }, + { 0, 0, 0, 7, 14, 63, }, + { 2, 0, 0, 7, 14, 63, }, + { 1, 0, 0, 7, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 28, }, + { 2, 0, 1, 2, 3, 32, }, + { 1, 0, 1, 2, 3, 32, }, + { 0, 0, 1, 2, 4, 32, }, + { 2, 0, 1, 2, 4, 32, }, + { 1, 0, 1, 2, 4, 32, }, + { 0, 0, 1, 2, 5, 32, }, + { 2, 0, 1, 2, 5, 32, }, + { 1, 0, 1, 2, 5, 32, }, + { 0, 0, 1, 2, 6, 32, }, + { 2, 0, 1, 2, 6, 32, }, + { 1, 0, 1, 2, 6, 32, }, + { 0, 0, 1, 2, 7, 32, }, + { 2, 0, 1, 2, 7, 32, }, + { 1, 0, 1, 2, 7, 32, }, + { 0, 0, 1, 2, 8, 32, }, + { 2, 0, 1, 2, 8, 32, }, + { 1, 0, 1, 2, 8, 32, }, + { 0, 0, 1, 2, 9, 32, }, + { 2, 0, 1, 2, 9, 32, }, + { 1, 0, 1, 2, 9, 32, }, + { 0, 0, 1, 2, 10, 32, }, + { 2, 0, 1, 2, 10, 32, }, + { 1, 0, 1, 2, 10, 32, }, + { 0, 0, 1, 2, 11, 28, }, + { 2, 0, 1, 2, 11, 32, }, + { 1, 0, 1, 2, 11, 32, }, + { 0, 0, 1, 2, 12, 16, }, + { 2, 0, 1, 2, 12, 32, }, + { 1, 0, 1, 2, 12, 32, }, + { 0, 0, 1, 2, 13, 10, }, + { 2, 0, 1, 2, 13, 32, }, + { 1, 0, 1, 2, 13, 32, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 26, }, + { 2, 0, 1, 3, 3, 30, }, + { 1, 0, 1, 3, 3, 30, }, + { 0, 0, 1, 3, 4, 30, }, + { 2, 0, 1, 3, 4, 30, }, + { 1, 0, 1, 3, 4, 30, }, + { 0, 0, 1, 3, 5, 30, }, + { 2, 0, 1, 3, 5, 30, }, + { 1, 0, 1, 3, 5, 30, }, + { 0, 0, 1, 3, 6, 30, }, + { 2, 0, 1, 3, 6, 30, }, + { 1, 0, 1, 3, 6, 30, }, + { 0, 0, 1, 3, 7, 30, }, + { 2, 0, 1, 3, 7, 30, }, + { 1, 0, 1, 3, 7, 30, }, + { 0, 0, 1, 3, 8, 30, }, + { 2, 0, 1, 3, 8, 30, }, + { 1, 0, 1, 3, 8, 30, }, + { 0, 0, 1, 3, 9, 30, }, + { 2, 0, 1, 3, 9, 30, }, + { 1, 0, 1, 3, 9, 30, }, + { 0, 0, 1, 3, 10, 30, }, + { 2, 0, 1, 3, 10, 30, }, + { 1, 0, 1, 3, 10, 30, }, + { 0, 0, 1, 3, 11, 26, }, + { 2, 0, 1, 3, 11, 30, }, + { 1, 0, 1, 3, 11, 30, }, + { 0, 0, 1, 3, 12, 14, }, + { 2, 0, 1, 3, 12, 30, }, + { 1, 0, 1, 3, 12, 30, }, + { 0, 0, 1, 3, 13, 8, }, + { 2, 0, 1, 3, 13, 30, }, + { 1, 0, 1, 3, 13, 30, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 0, 1, 6, 1, 63, }, + { 2, 0, 1, 6, 1, 63, }, + { 1, 0, 1, 6, 1, 63, }, + { 0, 0, 1, 6, 2, 63, }, + { 2, 0, 1, 6, 2, 63, }, + { 1, 0, 1, 6, 2, 63, }, + { 0, 0, 1, 6, 3, 24, }, + { 2, 0, 1, 6, 3, 28, }, + { 1, 0, 1, 6, 3, 28, }, + { 0, 0, 1, 6, 4, 28, }, + { 2, 0, 1, 6, 4, 28, }, + { 1, 0, 1, 6, 4, 28, }, + { 0, 0, 1, 6, 5, 28, }, + { 2, 0, 1, 6, 5, 28, }, + { 1, 0, 1, 6, 5, 28, }, + { 0, 0, 1, 6, 6, 28, }, + { 2, 0, 1, 6, 6, 28, }, + { 1, 0, 1, 6, 6, 28, }, + { 0, 0, 1, 6, 7, 28, }, + { 2, 0, 1, 6, 7, 28, }, + { 1, 0, 1, 6, 7, 28, }, + { 0, 0, 1, 6, 8, 28, }, + { 2, 0, 1, 6, 8, 28, }, + { 1, 0, 1, 6, 8, 28, }, + { 0, 0, 1, 6, 9, 28, }, + { 2, 0, 1, 6, 9, 28, }, + { 1, 0, 1, 6, 9, 28, }, + { 0, 0, 1, 6, 10, 28, }, + { 2, 0, 1, 6, 10, 28, }, + { 1, 0, 1, 6, 10, 28, }, + { 0, 0, 1, 6, 11, 24, }, + { 2, 0, 1, 6, 11, 28, }, + { 1, 0, 1, 6, 11, 28, }, + { 0, 0, 1, 6, 12, 14, }, + { 2, 0, 1, 6, 12, 28, }, + { 1, 0, 1, 6, 12, 28, }, + { 0, 0, 1, 6, 13, 8, }, + { 2, 0, 1, 6, 13, 28, }, + { 1, 0, 1, 6, 13, 28, }, + { 0, 0, 1, 6, 14, 63, }, + { 2, 0, 1, 6, 14, 63, }, + { 1, 0, 1, 6, 14, 63, }, + { 0, 0, 1, 7, 1, 63, }, + { 2, 0, 1, 7, 1, 63, }, + { 1, 0, 1, 7, 1, 63, }, + { 0, 0, 1, 7, 2, 63, }, + { 2, 0, 1, 7, 2, 63, }, + { 1, 0, 1, 7, 2, 63, }, + { 0, 0, 1, 7, 3, 22, }, + { 2, 0, 1, 7, 3, 26, }, + { 1, 0, 1, 7, 3, 26, }, + { 0, 0, 1, 7, 4, 26, }, + { 2, 0, 1, 7, 4, 26, }, + { 1, 0, 1, 7, 4, 26, }, + { 0, 0, 1, 7, 5, 26, }, + { 2, 0, 1, 7, 5, 26, }, + { 1, 0, 1, 7, 5, 26, }, + { 0, 0, 1, 7, 6, 26, }, + { 2, 0, 1, 7, 6, 26, }, + { 1, 0, 1, 7, 6, 26, }, + { 0, 0, 1, 7, 7, 26, }, + { 2, 0, 1, 7, 7, 26, }, + { 1, 0, 1, 7, 7, 26, }, + { 0, 0, 1, 7, 8, 26, }, + { 2, 0, 1, 7, 8, 26, }, + { 1, 0, 1, 7, 8, 26, }, + { 0, 0, 1, 7, 9, 26, }, + { 2, 0, 1, 7, 9, 26, }, + { 1, 0, 1, 7, 9, 26, }, + { 0, 0, 1, 7, 10, 26, }, + { 2, 0, 1, 7, 10, 26, }, + { 1, 0, 1, 7, 10, 26, }, + { 0, 0, 1, 7, 11, 22, }, + { 2, 0, 1, 7, 11, 26, }, + { 1, 0, 1, 7, 11, 26, }, + { 0, 0, 1, 7, 12, 14, }, + { 2, 0, 1, 7, 12, 26, }, + { 1, 0, 1, 7, 12, 26, }, + { 0, 0, 1, 7, 13, 8, }, + { 2, 0, 1, 7, 13, 26, }, + { 1, 0, 1, 7, 13, 26, }, + { 0, 0, 1, 7, 14, 63, }, + { 2, 0, 1, 7, 14, 63, }, + { 1, 0, 1, 7, 14, 63, }, + { 0, 1, 0, 1, 36, 28, }, + { 2, 1, 0, 1, 36, 32, }, + { 1, 1, 0, 1, 36, 32, }, + { 0, 1, 0, 1, 40, 32, }, + { 2, 1, 0, 1, 40, 32, }, + { 1, 1, 0, 1, 40, 32, }, + { 0, 1, 0, 1, 44, 32, }, + { 2, 1, 0, 1, 44, 32, }, + { 1, 1, 0, 1, 44, 32, }, + { 0, 1, 0, 1, 48, 32, }, + { 2, 1, 0, 1, 48, 32, }, + { 1, 1, 0, 1, 48, 32, }, + { 0, 1, 0, 1, 52, 32, }, + { 2, 1, 0, 1, 52, 32, }, + { 1, 1, 0, 1, 52, 32, }, + { 0, 1, 0, 1, 56, 32, }, + { 2, 1, 0, 1, 56, 32, }, + { 1, 1, 0, 1, 56, 32, }, + { 0, 1, 0, 1, 60, 32, }, + { 2, 1, 0, 1, 60, 32, }, + { 1, 1, 0, 1, 60, 32, }, + { 0, 1, 0, 1, 64, 30, }, + { 2, 1, 0, 1, 64, 32, }, + { 1, 1, 0, 1, 64, 32, }, + { 0, 1, 0, 1, 100, 28, }, + { 2, 1, 0, 1, 100, 32, }, + { 1, 1, 0, 1, 100, 32, }, + { 0, 1, 0, 1, 104, 32, }, + { 2, 1, 0, 1, 104, 32, }, + { 1, 1, 0, 1, 104, 32, }, + { 0, 1, 0, 1, 108, 32, }, + { 2, 1, 0, 1, 108, 32, }, + { 1, 1, 0, 1, 108, 32, }, + { 0, 1, 0, 1, 112, 32, }, + { 2, 1, 0, 1, 112, 32, }, + { 1, 1, 0, 1, 112, 32, }, + { 0, 1, 0, 1, 116, 32, }, + { 2, 1, 0, 1, 116, 32, }, + { 1, 1, 0, 1, 116, 32, }, + { 0, 1, 0, 1, 120, 32, }, + { 2, 1, 0, 1, 120, 32, }, + { 1, 1, 0, 1, 120, 32, }, + { 0, 1, 0, 1, 124, 32, }, + { 2, 1, 0, 1, 124, 32, }, + { 1, 1, 0, 1, 124, 32, }, + { 0, 1, 0, 1, 128, 32, }, + { 2, 1, 0, 1, 128, 32, }, + { 1, 1, 0, 1, 128, 32, }, + { 0, 1, 0, 1, 132, 32, }, + { 2, 1, 0, 1, 132, 32, }, + { 1, 1, 0, 1, 132, 32, }, + { 0, 1, 0, 1, 136, 32, }, + { 2, 1, 0, 1, 136, 32, }, + { 1, 1, 0, 1, 136, 32, }, + { 0, 1, 0, 1, 140, 28, }, + { 2, 1, 0, 1, 140, 32, }, + { 1, 1, 0, 1, 140, 32, }, + { 0, 1, 0, 1, 149, 28, }, + { 2, 1, 0, 1, 149, 32, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 32, }, + { 2, 1, 0, 1, 153, 32, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 32, }, + { 2, 1, 0, 1, 157, 32, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 32, }, + { 2, 1, 0, 1, 161, 32, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 32, }, + { 2, 1, 0, 1, 165, 32, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 28, }, + { 2, 1, 0, 2, 36, 32, }, + { 1, 1, 0, 2, 36, 32, }, + { 0, 1, 0, 2, 40, 32, }, + { 2, 1, 0, 2, 40, 32, }, + { 1, 1, 0, 2, 40, 32, }, + { 0, 1, 0, 2, 44, 32, }, + { 2, 1, 0, 2, 44, 32, }, + { 1, 1, 0, 2, 44, 32, }, + { 0, 1, 0, 2, 48, 32, }, + { 2, 1, 0, 2, 48, 32, }, + { 1, 1, 0, 2, 48, 32, }, + { 0, 1, 0, 2, 52, 32, }, + { 2, 1, 0, 2, 52, 32, }, + { 1, 1, 0, 2, 52, 32, }, + { 0, 1, 0, 2, 56, 32, }, + { 2, 1, 0, 2, 56, 32, }, + { 1, 1, 0, 2, 56, 32, }, + { 0, 1, 0, 2, 60, 32, }, + { 2, 1, 0, 2, 60, 32, }, + { 1, 1, 0, 2, 60, 32, }, + { 0, 1, 0, 2, 64, 30, }, + { 2, 1, 0, 2, 64, 32, }, + { 1, 1, 0, 2, 64, 32, }, + { 0, 1, 0, 2, 100, 28, }, + { 2, 1, 0, 2, 100, 32, }, + { 1, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 32, }, + { 2, 1, 0, 2, 104, 32, }, + { 1, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 32, }, + { 2, 1, 0, 2, 108, 32, }, + { 1, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 32, }, + { 2, 1, 0, 2, 112, 32, }, + { 1, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 32, }, + { 2, 1, 0, 2, 116, 32, }, + { 1, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 32, }, + { 2, 1, 0, 2, 120, 32, }, + { 1, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 32, }, + { 2, 1, 0, 2, 124, 32, }, + { 1, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 32, }, + { 2, 1, 0, 2, 128, 32, }, + { 1, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 32, }, + { 2, 1, 0, 2, 132, 32, }, + { 1, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 32, }, + { 2, 1, 0, 2, 136, 32, }, + { 1, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 28, }, + { 2, 1, 0, 2, 140, 32, }, + { 1, 1, 0, 2, 140, 32, }, + { 0, 1, 0, 2, 149, 28, }, + { 2, 1, 0, 2, 149, 32, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 32, }, + { 2, 1, 0, 2, 153, 32, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 32, }, + { 2, 1, 0, 2, 157, 32, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 32, }, + { 2, 1, 0, 2, 161, 32, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 32, }, + { 2, 1, 0, 2, 165, 32, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 26, }, + { 2, 1, 0, 3, 36, 30, }, + { 1, 1, 0, 3, 36, 30, }, + { 0, 1, 0, 3, 40, 30, }, + { 2, 1, 0, 3, 40, 30, }, + { 1, 1, 0, 3, 40, 30, }, + { 0, 1, 0, 3, 44, 30, }, + { 2, 1, 0, 3, 44, 30, }, + { 1, 1, 0, 3, 44, 30, }, + { 0, 1, 0, 3, 48, 30, }, + { 2, 1, 0, 3, 48, 30, }, + { 1, 1, 0, 3, 48, 30, }, + { 0, 1, 0, 3, 52, 30, }, + { 2, 1, 0, 3, 52, 30, }, + { 1, 1, 0, 3, 52, 30, }, + { 0, 1, 0, 3, 56, 30, }, + { 2, 1, 0, 3, 56, 30, }, + { 1, 1, 0, 3, 56, 30, }, + { 0, 1, 0, 3, 60, 30, }, + { 2, 1, 0, 3, 60, 30, }, + { 1, 1, 0, 3, 60, 30, }, + { 0, 1, 0, 3, 64, 28, }, + { 2, 1, 0, 3, 64, 30, }, + { 1, 1, 0, 3, 64, 30, }, + { 0, 1, 0, 3, 100, 28, }, + { 2, 1, 0, 3, 100, 30, }, + { 1, 1, 0, 3, 100, 30, }, + { 0, 1, 0, 3, 104, 30, }, + { 2, 1, 0, 3, 104, 30, }, + { 1, 1, 0, 3, 104, 30, }, + { 0, 1, 0, 3, 108, 30, }, + { 2, 1, 0, 3, 108, 30, }, + { 1, 1, 0, 3, 108, 30, }, + { 0, 1, 0, 3, 112, 30, }, + { 2, 1, 0, 3, 112, 30, }, + { 1, 1, 0, 3, 112, 30, }, + { 0, 1, 0, 3, 116, 30, }, + { 2, 1, 0, 3, 116, 30, }, + { 1, 1, 0, 3, 116, 30, }, + { 0, 1, 0, 3, 120, 30, }, + { 2, 1, 0, 3, 120, 30, }, + { 1, 1, 0, 3, 120, 30, }, + { 0, 1, 0, 3, 124, 30, }, + { 2, 1, 0, 3, 124, 30, }, + { 1, 1, 0, 3, 124, 30, }, + { 0, 1, 0, 3, 128, 30, }, + { 2, 1, 0, 3, 128, 30, }, + { 1, 1, 0, 3, 128, 30, }, + { 0, 1, 0, 3, 132, 30, }, + { 2, 1, 0, 3, 132, 30, }, + { 1, 1, 0, 3, 132, 30, }, + { 0, 1, 0, 3, 136, 30, }, + { 2, 1, 0, 3, 136, 30, }, + { 1, 1, 0, 3, 136, 30, }, + { 0, 1, 0, 3, 140, 26, }, + { 2, 1, 0, 3, 140, 30, }, + { 1, 1, 0, 3, 140, 30, }, + { 0, 1, 0, 3, 149, 26, }, + { 2, 1, 0, 3, 149, 30, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 30, }, + { 2, 1, 0, 3, 153, 30, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 30, }, + { 2, 1, 0, 3, 157, 30, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 30, }, + { 2, 1, 0, 3, 161, 30, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 30, }, + { 2, 1, 0, 3, 165, 30, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 0, 6, 36, 24, }, + { 2, 1, 0, 6, 36, 28, }, + { 1, 1, 0, 6, 36, 28, }, + { 0, 1, 0, 6, 40, 28, }, + { 2, 1, 0, 6, 40, 28, }, + { 1, 1, 0, 6, 40, 28, }, + { 0, 1, 0, 6, 44, 28, }, + { 2, 1, 0, 6, 44, 28, }, + { 1, 1, 0, 6, 44, 28, }, + { 0, 1, 0, 6, 48, 28, }, + { 2, 1, 0, 6, 48, 28, }, + { 1, 1, 0, 6, 48, 28, }, + { 0, 1, 0, 6, 52, 28, }, + { 2, 1, 0, 6, 52, 28, }, + { 1, 1, 0, 6, 52, 28, }, + { 0, 1, 0, 6, 56, 28, }, + { 2, 1, 0, 6, 56, 28, }, + { 1, 1, 0, 6, 56, 28, }, + { 0, 1, 0, 6, 60, 28, }, + { 2, 1, 0, 6, 60, 28, }, + { 1, 1, 0, 6, 60, 28, }, + { 0, 1, 0, 6, 64, 26, }, + { 2, 1, 0, 6, 64, 28, }, + { 1, 1, 0, 6, 64, 28, }, + { 0, 1, 0, 6, 100, 24, }, + { 2, 1, 0, 6, 100, 28, }, + { 1, 1, 0, 6, 100, 28, }, + { 0, 1, 0, 6, 104, 28, }, + { 2, 1, 0, 6, 104, 28, }, + { 1, 1, 0, 6, 104, 28, }, + { 0, 1, 0, 6, 108, 28, }, + { 2, 1, 0, 6, 108, 28, }, + { 1, 1, 0, 6, 108, 28, }, + { 0, 1, 0, 6, 112, 28, }, + { 2, 1, 0, 6, 112, 28, }, + { 1, 1, 0, 6, 112, 28, }, + { 0, 1, 0, 6, 116, 28, }, + { 2, 1, 0, 6, 116, 28, }, + { 1, 1, 0, 6, 116, 28, }, + { 0, 1, 0, 6, 120, 28, }, + { 2, 1, 0, 6, 120, 28, }, + { 1, 1, 0, 6, 120, 28, }, + { 0, 1, 0, 6, 124, 28, }, + { 2, 1, 0, 6, 124, 28, }, + { 1, 1, 0, 6, 124, 28, }, + { 0, 1, 0, 6, 128, 28, }, + { 2, 1, 0, 6, 128, 28, }, + { 1, 1, 0, 6, 128, 28, }, + { 0, 1, 0, 6, 132, 28, }, + { 2, 1, 0, 6, 132, 28, }, + { 1, 1, 0, 6, 132, 28, }, + { 0, 1, 0, 6, 136, 28, }, + { 2, 1, 0, 6, 136, 28, }, + { 1, 1, 0, 6, 136, 28, }, + { 0, 1, 0, 6, 140, 24, }, + { 2, 1, 0, 6, 140, 28, }, + { 1, 1, 0, 6, 140, 28, }, + { 0, 1, 0, 6, 149, 24, }, + { 2, 1, 0, 6, 149, 28, }, + { 1, 1, 0, 6, 149, 63, }, + { 0, 1, 0, 6, 153, 28, }, + { 2, 1, 0, 6, 153, 28, }, + { 1, 1, 0, 6, 153, 63, }, + { 0, 1, 0, 6, 157, 28, }, + { 2, 1, 0, 6, 157, 28, }, + { 1, 1, 0, 6, 157, 63, }, + { 0, 1, 0, 6, 161, 28, }, + { 2, 1, 0, 6, 161, 28, }, + { 1, 1, 0, 6, 161, 63, }, + { 0, 1, 0, 6, 165, 28, }, + { 2, 1, 0, 6, 165, 28, }, + { 1, 1, 0, 6, 165, 63, }, + { 0, 1, 0, 7, 36, 22, }, + { 2, 1, 0, 7, 36, 26, }, + { 1, 1, 0, 7, 36, 26, }, + { 0, 1, 0, 7, 40, 26, }, + { 2, 1, 0, 7, 40, 26, }, + { 1, 1, 0, 7, 40, 26, }, + { 0, 1, 0, 7, 44, 26, }, + { 2, 1, 0, 7, 44, 26, }, + { 1, 1, 0, 7, 44, 26, }, + { 0, 1, 0, 7, 48, 26, }, + { 2, 1, 0, 7, 48, 26, }, + { 1, 1, 0, 7, 48, 26, }, + { 0, 1, 0, 7, 52, 26, }, + { 2, 1, 0, 7, 52, 26, }, + { 1, 1, 0, 7, 52, 26, }, + { 0, 1, 0, 7, 56, 26, }, + { 2, 1, 0, 7, 56, 26, }, + { 1, 1, 0, 7, 56, 26, }, + { 0, 1, 0, 7, 60, 26, }, + { 2, 1, 0, 7, 60, 26, }, + { 1, 1, 0, 7, 60, 26, }, + { 0, 1, 0, 7, 64, 24, }, + { 2, 1, 0, 7, 64, 26, }, + { 1, 1, 0, 7, 64, 26, }, + { 0, 1, 0, 7, 100, 22, }, + { 2, 1, 0, 7, 100, 26, }, + { 1, 1, 0, 7, 100, 26, }, + { 0, 1, 0, 7, 104, 26, }, + { 2, 1, 0, 7, 104, 26, }, + { 1, 1, 0, 7, 104, 26, }, + { 0, 1, 0, 7, 108, 26, }, + { 2, 1, 0, 7, 108, 26, }, + { 1, 1, 0, 7, 108, 26, }, + { 0, 1, 0, 7, 112, 26, }, + { 2, 1, 0, 7, 112, 26, }, + { 1, 1, 0, 7, 112, 26, }, + { 0, 1, 0, 7, 116, 26, }, + { 2, 1, 0, 7, 116, 26, }, + { 1, 1, 0, 7, 116, 26, }, + { 0, 1, 0, 7, 120, 26, }, + { 2, 1, 0, 7, 120, 26, }, + { 1, 1, 0, 7, 120, 26, }, + { 0, 1, 0, 7, 124, 26, }, + { 2, 1, 0, 7, 124, 26, }, + { 1, 1, 0, 7, 124, 26, }, + { 0, 1, 0, 7, 128, 26, }, + { 2, 1, 0, 7, 128, 26, }, + { 1, 1, 0, 7, 128, 26, }, + { 0, 1, 0, 7, 132, 26, }, + { 2, 1, 0, 7, 132, 26, }, + { 1, 1, 0, 7, 132, 26, }, + { 0, 1, 0, 7, 136, 26, }, + { 2, 1, 0, 7, 136, 26, }, + { 1, 1, 0, 7, 136, 26, }, + { 0, 1, 0, 7, 140, 22, }, + { 2, 1, 0, 7, 140, 26, }, + { 1, 1, 0, 7, 140, 26, }, + { 0, 1, 0, 7, 149, 22, }, + { 2, 1, 0, 7, 149, 26, }, + { 1, 1, 0, 7, 149, 63, }, + { 0, 1, 0, 7, 153, 26, }, + { 2, 1, 0, 7, 153, 26, }, + { 1, 1, 0, 7, 153, 63, }, + { 0, 1, 0, 7, 157, 26, }, + { 2, 1, 0, 7, 157, 26, }, + { 1, 1, 0, 7, 157, 63, }, + { 0, 1, 0, 7, 161, 26, }, + { 2, 1, 0, 7, 161, 26, }, + { 1, 1, 0, 7, 161, 63, }, + { 0, 1, 0, 7, 165, 26, }, + { 2, 1, 0, 7, 165, 26, }, + { 1, 1, 0, 7, 165, 63, }, + { 0, 1, 1, 2, 38, 28, }, + { 2, 1, 1, 2, 38, 32, }, + { 1, 1, 1, 2, 38, 32, }, + { 0, 1, 1, 2, 46, 32, }, + { 2, 1, 1, 2, 46, 32, }, + { 1, 1, 1, 2, 46, 32, }, + { 0, 1, 1, 2, 54, 32, }, + { 2, 1, 1, 2, 54, 32, }, + { 1, 1, 1, 2, 54, 32, }, + { 0, 1, 1, 2, 62, 28, }, + { 2, 1, 1, 2, 62, 32, }, + { 1, 1, 1, 2, 62, 32, }, + { 0, 1, 1, 2, 102, 28, }, + { 2, 1, 1, 2, 102, 32, }, + { 1, 1, 1, 2, 102, 32, }, + { 0, 1, 1, 2, 110, 32, }, + { 2, 1, 1, 2, 110, 32, }, + { 1, 1, 1, 2, 110, 32, }, + { 0, 1, 1, 2, 118, 32, }, + { 2, 1, 1, 2, 118, 32, }, + { 1, 1, 1, 2, 118, 32, }, + { 0, 1, 1, 2, 126, 32, }, + { 2, 1, 1, 2, 126, 32, }, + { 1, 1, 1, 2, 126, 32, }, + { 0, 1, 1, 2, 134, 30, }, + { 2, 1, 1, 2, 134, 32, }, + { 1, 1, 1, 2, 134, 32, }, + { 0, 1, 1, 2, 151, 28, }, + { 2, 1, 1, 2, 151, 32, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 32, }, + { 2, 1, 1, 2, 159, 32, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 26, }, + { 2, 1, 1, 3, 38, 30, }, + { 1, 1, 1, 3, 38, 30, }, + { 0, 1, 1, 3, 46, 30, }, + { 2, 1, 1, 3, 46, 30, }, + { 1, 1, 1, 3, 46, 30, }, + { 0, 1, 1, 3, 54, 30, }, + { 2, 1, 1, 3, 54, 30, }, + { 1, 1, 1, 3, 54, 30, }, + { 0, 1, 1, 3, 62, 26, }, + { 2, 1, 1, 3, 62, 30, }, + { 1, 1, 1, 3, 62, 30, }, + { 0, 1, 1, 3, 102, 26, }, + { 2, 1, 1, 3, 102, 30, }, + { 1, 1, 1, 3, 102, 30, }, + { 0, 1, 1, 3, 110, 30, }, + { 2, 1, 1, 3, 110, 30, }, + { 1, 1, 1, 3, 110, 30, }, + { 0, 1, 1, 3, 118, 30, }, + { 2, 1, 1, 3, 118, 30, }, + { 1, 1, 1, 3, 118, 30, }, + { 0, 1, 1, 3, 126, 30, }, + { 2, 1, 1, 3, 126, 30, }, + { 1, 1, 1, 3, 126, 30, }, + { 0, 1, 1, 3, 134, 28, }, + { 2, 1, 1, 3, 134, 30, }, + { 1, 1, 1, 3, 134, 30, }, + { 0, 1, 1, 3, 151, 26, }, + { 2, 1, 1, 3, 151, 30, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 30, }, + { 2, 1, 1, 3, 159, 30, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 1, 6, 38, 20, }, + { 2, 1, 1, 6, 38, 28, }, + { 1, 1, 1, 6, 38, 28, }, + { 0, 1, 1, 6, 46, 28, }, + { 2, 1, 1, 6, 46, 28, }, + { 1, 1, 1, 6, 46, 28, }, + { 0, 1, 1, 6, 54, 28, }, + { 2, 1, 1, 6, 54, 28, }, + { 1, 1, 1, 6, 54, 28, }, + { 0, 1, 1, 6, 62, 20, }, + { 2, 1, 1, 6, 62, 28, }, + { 1, 1, 1, 6, 62, 28, }, + { 0, 1, 1, 6, 102, 22, }, + { 2, 1, 1, 6, 102, 28, }, + { 1, 1, 1, 6, 102, 28, }, + { 0, 1, 1, 6, 110, 28, }, + { 2, 1, 1, 6, 110, 28, }, + { 1, 1, 1, 6, 110, 28, }, + { 0, 1, 1, 6, 118, 28, }, + { 2, 1, 1, 6, 118, 28, }, + { 1, 1, 1, 6, 118, 28, }, + { 0, 1, 1, 6, 126, 28, }, + { 2, 1, 1, 6, 126, 28, }, + { 1, 1, 1, 6, 126, 28, }, + { 0, 1, 1, 6, 134, 26, }, + { 2, 1, 1, 6, 134, 28, }, + { 1, 1, 1, 6, 134, 28, }, + { 0, 1, 1, 6, 151, 22, }, + { 2, 1, 1, 6, 151, 28, }, + { 1, 1, 1, 6, 151, 63, }, + { 0, 1, 1, 6, 159, 28, }, + { 2, 1, 1, 6, 159, 28, }, + { 1, 1, 1, 6, 159, 63, }, + { 0, 1, 1, 7, 38, 18, }, + { 2, 1, 1, 7, 38, 26, }, + { 1, 1, 1, 7, 38, 26, }, + { 0, 1, 1, 7, 46, 26, }, + { 2, 1, 1, 7, 46, 26, }, + { 1, 1, 1, 7, 46, 26, }, + { 0, 1, 1, 7, 54, 26, }, + { 2, 1, 1, 7, 54, 26, }, + { 1, 1, 1, 7, 54, 26, }, + { 0, 1, 1, 7, 62, 18, }, + { 2, 1, 1, 7, 62, 26, }, + { 1, 1, 1, 7, 62, 26, }, + { 0, 1, 1, 7, 102, 20, }, + { 2, 1, 1, 7, 102, 26, }, + { 1, 1, 1, 7, 102, 26, }, + { 0, 1, 1, 7, 110, 26, }, + { 2, 1, 1, 7, 110, 26, }, + { 1, 1, 1, 7, 110, 26, }, + { 0, 1, 1, 7, 118, 26, }, + { 2, 1, 1, 7, 118, 26, }, + { 1, 1, 1, 7, 118, 26, }, + { 0, 1, 1, 7, 126, 26, }, + { 2, 1, 1, 7, 126, 26, }, + { 1, 1, 1, 7, 126, 26, }, + { 0, 1, 1, 7, 134, 24, }, + { 2, 1, 1, 7, 134, 26, }, + { 1, 1, 1, 7, 134, 26, }, + { 0, 1, 1, 7, 151, 20, }, + { 2, 1, 1, 7, 151, 26, }, + { 1, 1, 1, 7, 151, 63, }, + { 0, 1, 1, 7, 159, 26, }, + { 2, 1, 1, 7, 159, 26, }, + { 1, 1, 1, 7, 159, 63, }, + { 0, 1, 2, 4, 42, 24, }, + { 2, 1, 2, 4, 42, 32, }, + { 1, 1, 2, 4, 42, 32, }, + { 0, 1, 2, 4, 58, 24, }, + { 2, 1, 2, 4, 58, 32, }, + { 1, 1, 2, 4, 58, 32, }, + { 0, 1, 2, 4, 106, 24, }, + { 2, 1, 2, 4, 106, 32, }, + { 1, 1, 2, 4, 106, 32, }, + { 0, 1, 2, 4, 122, 32, }, + { 2, 1, 2, 4, 122, 32, }, + { 1, 1, 2, 4, 122, 32, }, + { 0, 1, 2, 4, 155, 26, }, + { 2, 1, 2, 4, 155, 32, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 22, }, + { 2, 1, 2, 5, 42, 30, }, + { 1, 1, 2, 5, 42, 30, }, + { 0, 1, 2, 5, 58, 22, }, + { 2, 1, 2, 5, 58, 30, }, + { 1, 1, 2, 5, 58, 30, }, + { 0, 1, 2, 5, 106, 22, }, + { 2, 1, 2, 5, 106, 30, }, + { 1, 1, 2, 5, 106, 30, }, + { 0, 1, 2, 5, 122, 30, }, + { 2, 1, 2, 5, 122, 30, }, + { 1, 1, 2, 5, 122, 30, }, + { 0, 1, 2, 5, 155, 24, }, + { 2, 1, 2, 5, 155, 30, }, + { 1, 1, 2, 5, 155, 63, }, + { 0, 1, 2, 8, 42, 20, }, + { 2, 1, 2, 8, 42, 28, }, + { 1, 1, 2, 8, 42, 28, }, + { 0, 1, 2, 8, 58, 20, }, + { 2, 1, 2, 8, 58, 28, }, + { 1, 1, 2, 8, 58, 28, }, + { 0, 1, 2, 8, 106, 20, }, + { 2, 1, 2, 8, 106, 28, }, + { 1, 1, 2, 8, 106, 28, }, + { 0, 1, 2, 8, 122, 28, }, + { 2, 1, 2, 8, 122, 28, }, + { 1, 1, 2, 8, 122, 28, }, + { 0, 1, 2, 8, 155, 20, }, + { 2, 1, 2, 8, 155, 28, }, + { 1, 1, 2, 8, 155, 63, }, + { 0, 1, 2, 9, 42, 18, }, + { 2, 1, 2, 9, 42, 26, }, + { 1, 1, 2, 9, 42, 26, }, + { 0, 1, 2, 9, 58, 18, }, + { 2, 1, 2, 9, 58, 26, }, + { 1, 1, 2, 9, 58, 26, }, + { 0, 1, 2, 9, 106, 18, }, + { 2, 1, 2, 9, 106, 26, }, + { 1, 1, 2, 9, 106, 26, }, + { 0, 1, 2, 9, 122, 26, }, + { 2, 1, 2, 9, 122, 26, }, + { 1, 1, 2, 9, 122, 26, }, + { 0, 1, 2, 9, 155, 18, }, + { 2, 1, 2, 9, 155, 26, }, + { 1, 1, 2, 9, 155, 63, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type1); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type2[] = { + { 0, 0, 0, 0, 1, 42, }, + { 2, 0, 0, 0, 1, 42, }, + { 1, 0, 0, 0, 1, 42, }, + { 0, 0, 0, 0, 2, 50, }, + { 2, 0, 0, 0, 2, 42, }, + { 1, 0, 0, 0, 2, 42, }, + { 0, 0, 0, 0, 3, 50, }, + { 2, 0, 0, 0, 3, 42, }, + { 1, 0, 0, 0, 3, 42, }, + { 0, 0, 0, 0, 4, 50, }, + { 2, 0, 0, 0, 4, 42, }, + { 1, 0, 0, 0, 4, 42, }, + { 0, 0, 0, 0, 5, 50, }, + { 2, 0, 0, 0, 5, 42, }, + { 1, 0, 0, 0, 5, 42, }, + { 0, 0, 0, 0, 6, 50, }, + { 2, 0, 0, 0, 6, 42, }, + { 1, 0, 0, 0, 6, 42, }, + { 0, 0, 0, 0, 7, 50, }, + { 2, 0, 0, 0, 7, 42, }, + { 1, 0, 0, 0, 7, 42, }, + { 0, 0, 0, 0, 8, 50, }, + { 2, 0, 0, 0, 8, 42, }, + { 1, 0, 0, 0, 8, 42, }, + { 0, 0, 0, 0, 9, 50, }, + { 2, 0, 0, 0, 9, 42, }, + { 1, 0, 0, 0, 9, 42, }, + { 0, 0, 0, 0, 10, 50, }, + { 2, 0, 0, 0, 10, 42, }, + { 1, 0, 0, 0, 10, 42, }, + { 0, 0, 0, 0, 11, 44, }, + { 2, 0, 0, 0, 11, 42, }, + { 1, 0, 0, 0, 11, 42, }, + { 0, 0, 0, 0, 12, 63, }, + { 2, 0, 0, 0, 12, 42, }, + { 1, 0, 0, 0, 12, 42, }, + { 0, 0, 0, 0, 13, 63, }, + { 2, 0, 0, 0, 13, 42, }, + { 1, 0, 0, 0, 13, 42, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 42, }, + { 0, 0, 0, 1, 1, 32, }, + { 2, 0, 0, 1, 1, 42, }, + { 1, 0, 0, 1, 1, 42, }, + { 0, 0, 0, 1, 2, 42, }, + { 2, 0, 0, 1, 2, 42, }, + { 1, 0, 0, 1, 2, 42, }, + { 0, 0, 0, 1, 3, 42, }, + { 2, 0, 0, 1, 3, 42, }, + { 1, 0, 0, 1, 3, 42, }, + { 0, 0, 0, 1, 4, 42, }, + { 2, 0, 0, 1, 4, 42, }, + { 1, 0, 0, 1, 4, 42, }, + { 0, 0, 0, 1, 5, 42, }, + { 2, 0, 0, 1, 5, 42, }, + { 1, 0, 0, 1, 5, 42, }, + { 0, 0, 0, 1, 6, 42, }, + { 2, 0, 0, 1, 6, 42, }, + { 1, 0, 0, 1, 6, 42, }, + { 0, 0, 0, 1, 7, 42, }, + { 2, 0, 0, 1, 7, 42, }, + { 1, 0, 0, 1, 7, 42, }, + { 0, 0, 0, 1, 8, 42, }, + { 2, 0, 0, 1, 8, 42, }, + { 1, 0, 0, 1, 8, 42, }, + { 0, 0, 0, 1, 9, 42, }, + { 2, 0, 0, 1, 9, 42, }, + { 1, 0, 0, 1, 9, 42, }, + { 0, 0, 0, 1, 10, 42, }, + { 2, 0, 0, 1, 10, 42, }, + { 1, 0, 0, 1, 10, 42, }, + { 0, 0, 0, 1, 11, 32, }, + { 2, 0, 0, 1, 11, 42, }, + { 1, 0, 0, 1, 11, 42, }, + { 0, 0, 0, 1, 12, 63, }, + { 2, 0, 0, 1, 12, 42, }, + { 1, 0, 0, 1, 12, 42, }, + { 0, 0, 0, 1, 13, 63, }, + { 2, 0, 0, 1, 13, 42, }, + { 1, 0, 0, 1, 13, 42, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 32, }, + { 2, 0, 0, 2, 1, 42, }, + { 1, 0, 0, 2, 1, 42, }, + { 0, 0, 0, 2, 2, 40, }, + { 2, 0, 0, 2, 2, 42, }, + { 1, 0, 0, 2, 2, 42, }, + { 0, 0, 0, 2, 3, 40, }, + { 2, 0, 0, 2, 3, 42, }, + { 1, 0, 0, 2, 3, 42, }, + { 0, 0, 0, 2, 4, 40, }, + { 2, 0, 0, 2, 4, 42, }, + { 1, 0, 0, 2, 4, 42, }, + { 0, 0, 0, 2, 5, 40, }, + { 2, 0, 0, 2, 5, 42, }, + { 1, 0, 0, 2, 5, 42, }, + { 0, 0, 0, 2, 6, 40, }, + { 2, 0, 0, 2, 6, 42, }, + { 1, 0, 0, 2, 6, 42, }, + { 0, 0, 0, 2, 7, 40, }, + { 2, 0, 0, 2, 7, 42, }, + { 1, 0, 0, 2, 7, 42, }, + { 0, 0, 0, 2, 8, 40, }, + { 2, 0, 0, 2, 8, 42, }, + { 1, 0, 0, 2, 8, 42, }, + { 0, 0, 0, 2, 9, 40, }, + { 2, 0, 0, 2, 9, 42, }, + { 1, 0, 0, 2, 9, 42, }, + { 0, 0, 0, 2, 10, 40, }, + { 2, 0, 0, 2, 10, 42, }, + { 1, 0, 0, 2, 10, 42, }, + { 0, 0, 0, 2, 11, 28, }, + { 2, 0, 0, 2, 11, 42, }, + { 1, 0, 0, 2, 11, 42, }, + { 0, 0, 0, 2, 12, 63, }, + { 2, 0, 0, 2, 12, 42, }, + { 1, 0, 0, 2, 12, 42, }, + { 0, 0, 0, 2, 13, 63, }, + { 2, 0, 0, 2, 13, 42, }, + { 1, 0, 0, 2, 13, 42, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 32, }, + { 2, 0, 0, 3, 1, 40, }, + { 1, 0, 0, 3, 1, 40, }, + { 0, 0, 0, 3, 2, 40, }, + { 2, 0, 0, 3, 2, 40, }, + { 1, 0, 0, 3, 2, 40, }, + { 0, 0, 0, 3, 3, 40, }, + { 2, 0, 0, 3, 3, 40, }, + { 1, 0, 0, 3, 3, 40, }, + { 0, 0, 0, 3, 4, 40, }, + { 2, 0, 0, 3, 4, 40, }, + { 1, 0, 0, 3, 4, 40, }, + { 0, 0, 0, 3, 5, 40, }, + { 2, 0, 0, 3, 5, 40, }, + { 1, 0, 0, 3, 5, 40, }, + { 0, 0, 0, 3, 6, 40, }, + { 2, 0, 0, 3, 6, 40, }, + { 1, 0, 0, 3, 6, 40, }, + { 0, 0, 0, 3, 7, 40, }, + { 2, 0, 0, 3, 7, 40, }, + { 1, 0, 0, 3, 7, 40, }, + { 0, 0, 0, 3, 8, 40, }, + { 2, 0, 0, 3, 8, 40, }, + { 1, 0, 0, 3, 8, 40, }, + { 0, 0, 0, 3, 9, 40, }, + { 2, 0, 0, 3, 9, 40, }, + { 1, 0, 0, 3, 9, 40, }, + { 0, 0, 0, 3, 10, 40, }, + { 2, 0, 0, 3, 10, 40, }, + { 1, 0, 0, 3, 10, 40, }, + { 0, 0, 0, 3, 11, 28, }, + { 2, 0, 0, 3, 11, 40, }, + { 1, 0, 0, 3, 11, 40, }, + { 0, 0, 0, 3, 12, 63, }, + { 2, 0, 0, 3, 12, 40, }, + { 1, 0, 0, 3, 12, 40, }, + { 0, 0, 0, 3, 13, 63, }, + { 2, 0, 0, 3, 13, 40, }, + { 1, 0, 0, 3, 13, 40, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 0, 6, 1, 32, }, + { 2, 0, 0, 6, 1, 40, }, + { 1, 0, 0, 6, 1, 40, }, + { 0, 0, 0, 6, 2, 40, }, + { 2, 0, 0, 6, 2, 40, }, + { 1, 0, 0, 6, 2, 40, }, + { 0, 0, 0, 6, 3, 40, }, + { 2, 0, 0, 6, 3, 40, }, + { 1, 0, 0, 6, 3, 40, }, + { 0, 0, 0, 6, 4, 40, }, + { 2, 0, 0, 6, 4, 40, }, + { 1, 0, 0, 6, 4, 40, }, + { 0, 0, 0, 6, 5, 40, }, + { 2, 0, 0, 6, 5, 40, }, + { 1, 0, 0, 6, 5, 40, }, + { 0, 0, 0, 6, 6, 40, }, + { 2, 0, 0, 6, 6, 40, }, + { 1, 0, 0, 6, 6, 40, }, + { 0, 0, 0, 6, 7, 40, }, + { 2, 0, 0, 6, 7, 40, }, + { 1, 0, 0, 6, 7, 40, }, + { 0, 0, 0, 6, 8, 40, }, + { 2, 0, 0, 6, 8, 40, }, + { 1, 0, 0, 6, 8, 40, }, + { 0, 0, 0, 6, 9, 40, }, + { 2, 0, 0, 6, 9, 40, }, + { 1, 0, 0, 6, 9, 40, }, + { 0, 0, 0, 6, 10, 40, }, + { 2, 0, 0, 6, 10, 40, }, + { 1, 0, 0, 6, 10, 40, }, + { 0, 0, 0, 6, 11, 28, }, + { 2, 0, 0, 6, 11, 40, }, + { 1, 0, 0, 6, 11, 40, }, + { 0, 0, 0, 6, 12, 63, }, + { 2, 0, 0, 6, 12, 40, }, + { 1, 0, 0, 6, 12, 40, }, + { 0, 0, 0, 6, 13, 63, }, + { 2, 0, 0, 6, 13, 40, }, + { 1, 0, 0, 6, 13, 40, }, + { 0, 0, 0, 6, 14, 63, }, + { 2, 0, 0, 6, 14, 63, }, + { 1, 0, 0, 6, 14, 63, }, + { 0, 0, 0, 7, 1, 32, }, + { 2, 0, 0, 7, 1, 40, }, + { 1, 0, 0, 7, 1, 40, }, + { 0, 0, 0, 7, 2, 40, }, + { 2, 0, 0, 7, 2, 40, }, + { 1, 0, 0, 7, 2, 40, }, + { 0, 0, 0, 7, 3, 40, }, + { 2, 0, 0, 7, 3, 40, }, + { 1, 0, 0, 7, 3, 40, }, + { 0, 0, 0, 7, 4, 40, }, + { 2, 0, 0, 7, 4, 40, }, + { 1, 0, 0, 7, 4, 40, }, + { 0, 0, 0, 7, 5, 40, }, + { 2, 0, 0, 7, 5, 40, }, + { 1, 0, 0, 7, 5, 40, }, + { 0, 0, 0, 7, 6, 40, }, + { 2, 0, 0, 7, 6, 40, }, + { 1, 0, 0, 7, 6, 40, }, + { 0, 0, 0, 7, 7, 40, }, + { 2, 0, 0, 7, 7, 40, }, + { 1, 0, 0, 7, 7, 40, }, + { 0, 0, 0, 7, 8, 40, }, + { 2, 0, 0, 7, 8, 40, }, + { 1, 0, 0, 7, 8, 40, }, + { 0, 0, 0, 7, 9, 40, }, + { 2, 0, 0, 7, 9, 40, }, + { 1, 0, 0, 7, 9, 40, }, + { 0, 0, 0, 7, 10, 40, }, + { 2, 0, 0, 7, 10, 40, }, + { 1, 0, 0, 7, 10, 40, }, + { 0, 0, 0, 7, 11, 28, }, + { 2, 0, 0, 7, 11, 40, }, + { 1, 0, 0, 7, 11, 40, }, + { 0, 0, 0, 7, 12, 63, }, + { 2, 0, 0, 7, 12, 40, }, + { 1, 0, 0, 7, 12, 40, }, + { 0, 0, 0, 7, 13, 63, }, + { 2, 0, 0, 7, 13, 40, }, + { 1, 0, 0, 7, 13, 40, }, + { 0, 0, 0, 7, 14, 63, }, + { 2, 0, 0, 7, 14, 63, }, + { 1, 0, 0, 7, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 30, }, + { 2, 0, 1, 2, 3, 34, }, + { 1, 0, 1, 2, 3, 34, }, + { 0, 0, 1, 2, 4, 34, }, + { 2, 0, 1, 2, 4, 34, }, + { 1, 0, 1, 2, 4, 34, }, + { 0, 0, 1, 2, 5, 34, }, + { 2, 0, 1, 2, 5, 34, }, + { 1, 0, 1, 2, 5, 34, }, + { 0, 0, 1, 2, 6, 34, }, + { 2, 0, 1, 2, 6, 34, }, + { 1, 0, 1, 2, 6, 34, }, + { 0, 0, 1, 2, 7, 34, }, + { 2, 0, 1, 2, 7, 34, }, + { 1, 0, 1, 2, 7, 34, }, + { 0, 0, 1, 2, 8, 34, }, + { 2, 0, 1, 2, 8, 34, }, + { 1, 0, 1, 2, 8, 34, }, + { 0, 0, 1, 2, 9, 34, }, + { 2, 0, 1, 2, 9, 34, }, + { 1, 0, 1, 2, 9, 34, }, + { 0, 0, 1, 2, 10, 34, }, + { 2, 0, 1, 2, 10, 34, }, + { 1, 0, 1, 2, 10, 34, }, + { 0, 0, 1, 2, 11, 28, }, + { 2, 0, 1, 2, 11, 34, }, + { 1, 0, 1, 2, 11, 34, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 34, }, + { 1, 0, 1, 2, 12, 34, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 34, }, + { 1, 0, 1, 2, 13, 34, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 30, }, + { 2, 0, 1, 3, 3, 34, }, + { 1, 0, 1, 3, 3, 34, }, + { 0, 0, 1, 3, 4, 34, }, + { 2, 0, 1, 3, 4, 34, }, + { 1, 0, 1, 3, 4, 34, }, + { 0, 0, 1, 3, 5, 34, }, + { 2, 0, 1, 3, 5, 34, }, + { 1, 0, 1, 3, 5, 34, }, + { 0, 0, 1, 3, 6, 34, }, + { 2, 0, 1, 3, 6, 34, }, + { 1, 0, 1, 3, 6, 34, }, + { 0, 0, 1, 3, 7, 34, }, + { 2, 0, 1, 3, 7, 34, }, + { 1, 0, 1, 3, 7, 34, }, + { 0, 0, 1, 3, 8, 34, }, + { 2, 0, 1, 3, 8, 34, }, + { 1, 0, 1, 3, 8, 34, }, + { 0, 0, 1, 3, 9, 34, }, + { 2, 0, 1, 3, 9, 34, }, + { 1, 0, 1, 3, 9, 34, }, + { 0, 0, 1, 3, 10, 34, }, + { 2, 0, 1, 3, 10, 34, }, + { 1, 0, 1, 3, 10, 34, }, + { 0, 0, 1, 3, 11, 28, }, + { 2, 0, 1, 3, 11, 34, }, + { 1, 0, 1, 3, 11, 34, }, + { 0, 0, 1, 3, 12, 63, }, + { 2, 0, 1, 3, 12, 34, }, + { 1, 0, 1, 3, 12, 34, }, + { 0, 0, 1, 3, 13, 63, }, + { 2, 0, 1, 3, 13, 34, }, + { 1, 0, 1, 3, 13, 34, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 0, 1, 6, 1, 63, }, + { 2, 0, 1, 6, 1, 63, }, + { 1, 0, 1, 6, 1, 63, }, + { 0, 0, 1, 6, 2, 63, }, + { 2, 0, 1, 6, 2, 63, }, + { 1, 0, 1, 6, 2, 63, }, + { 0, 0, 1, 6, 3, 30, }, + { 2, 0, 1, 6, 3, 34, }, + { 1, 0, 1, 6, 3, 34, }, + { 0, 0, 1, 6, 4, 34, }, + { 2, 0, 1, 6, 4, 34, }, + { 1, 0, 1, 6, 4, 34, }, + { 0, 0, 1, 6, 5, 34, }, + { 2, 0, 1, 6, 5, 34, }, + { 1, 0, 1, 6, 5, 34, }, + { 0, 0, 1, 6, 6, 34, }, + { 2, 0, 1, 6, 6, 34, }, + { 1, 0, 1, 6, 6, 34, }, + { 0, 0, 1, 6, 7, 34, }, + { 2, 0, 1, 6, 7, 34, }, + { 1, 0, 1, 6, 7, 34, }, + { 0, 0, 1, 6, 8, 34, }, + { 2, 0, 1, 6, 8, 34, }, + { 1, 0, 1, 6, 8, 34, }, + { 0, 0, 1, 6, 9, 34, }, + { 2, 0, 1, 6, 9, 34, }, + { 1, 0, 1, 6, 9, 34, }, + { 0, 0, 1, 6, 10, 34, }, + { 2, 0, 1, 6, 10, 34, }, + { 1, 0, 1, 6, 10, 34, }, + { 0, 0, 1, 6, 11, 28, }, + { 2, 0, 1, 6, 11, 34, }, + { 1, 0, 1, 6, 11, 34, }, + { 0, 0, 1, 6, 12, 63, }, + { 2, 0, 1, 6, 12, 34, }, + { 1, 0, 1, 6, 12, 34, }, + { 0, 0, 1, 6, 13, 63, }, + { 2, 0, 1, 6, 13, 34, }, + { 1, 0, 1, 6, 13, 34, }, + { 0, 0, 1, 6, 14, 63, }, + { 2, 0, 1, 6, 14, 63, }, + { 1, 0, 1, 6, 14, 63, }, + { 0, 0, 1, 7, 1, 63, }, + { 2, 0, 1, 7, 1, 63, }, + { 1, 0, 1, 7, 1, 63, }, + { 0, 0, 1, 7, 2, 63, }, + { 2, 0, 1, 7, 2, 63, }, + { 1, 0, 1, 7, 2, 63, }, + { 0, 0, 1, 7, 3, 30, }, + { 2, 0, 1, 7, 3, 34, }, + { 1, 0, 1, 7, 3, 34, }, + { 0, 0, 1, 7, 4, 34, }, + { 2, 0, 1, 7, 4, 34, }, + { 1, 0, 1, 7, 4, 34, }, + { 0, 0, 1, 7, 5, 34, }, + { 2, 0, 1, 7, 5, 34, }, + { 1, 0, 1, 7, 5, 34, }, + { 0, 0, 1, 7, 6, 34, }, + { 2, 0, 1, 7, 6, 34, }, + { 1, 0, 1, 7, 6, 34, }, + { 0, 0, 1, 7, 7, 34, }, + { 2, 0, 1, 7, 7, 34, }, + { 1, 0, 1, 7, 7, 34, }, + { 0, 0, 1, 7, 8, 34, }, + { 2, 0, 1, 7, 8, 34, }, + { 1, 0, 1, 7, 8, 34, }, + { 0, 0, 1, 7, 9, 34, }, + { 2, 0, 1, 7, 9, 34, }, + { 1, 0, 1, 7, 9, 34, }, + { 0, 0, 1, 7, 10, 34, }, + { 2, 0, 1, 7, 10, 34, }, + { 1, 0, 1, 7, 10, 34, }, + { 0, 0, 1, 7, 11, 28, }, + { 2, 0, 1, 7, 11, 34, }, + { 1, 0, 1, 7, 11, 34, }, + { 0, 0, 1, 7, 12, 63, }, + { 2, 0, 1, 7, 12, 34, }, + { 1, 0, 1, 7, 12, 34, }, + { 0, 0, 1, 7, 13, 63, }, + { 2, 0, 1, 7, 13, 34, }, + { 1, 0, 1, 7, 13, 34, }, + { 0, 0, 1, 7, 14, 63, }, + { 2, 0, 1, 7, 14, 63, }, + { 1, 0, 1, 7, 14, 63, }, + { 0, 1, 0, 1, 36, 42, }, + { 2, 1, 0, 1, 36, 42, }, + { 1, 1, 0, 1, 36, 42, }, + { 0, 1, 0, 1, 40, 42, }, + { 2, 1, 0, 1, 40, 42, }, + { 1, 1, 0, 1, 40, 42, }, + { 0, 1, 0, 1, 44, 42, }, + { 2, 1, 0, 1, 44, 42, }, + { 1, 1, 0, 1, 44, 42, }, + { 0, 1, 0, 1, 48, 42, }, + { 2, 1, 0, 1, 48, 42, }, + { 1, 1, 0, 1, 48, 42, }, + { 0, 1, 0, 1, 52, 42, }, + { 2, 1, 0, 1, 52, 42, }, + { 1, 1, 0, 1, 52, 42, }, + { 0, 1, 0, 1, 56, 42, }, + { 2, 1, 0, 1, 56, 42, }, + { 1, 1, 0, 1, 56, 42, }, + { 0, 1, 0, 1, 60, 42, }, + { 2, 1, 0, 1, 60, 42, }, + { 1, 1, 0, 1, 60, 42, }, + { 0, 1, 0, 1, 64, 42, }, + { 2, 1, 0, 1, 64, 42, }, + { 1, 1, 0, 1, 64, 42, }, + { 0, 1, 0, 1, 100, 42, }, + { 2, 1, 0, 1, 100, 42, }, + { 1, 1, 0, 1, 100, 42, }, + { 0, 1, 0, 1, 104, 42, }, + { 2, 1, 0, 1, 104, 42, }, + { 1, 1, 0, 1, 104, 42, }, + { 0, 1, 0, 1, 108, 42, }, + { 2, 1, 0, 1, 108, 42, }, + { 1, 1, 0, 1, 108, 42, }, + { 0, 1, 0, 1, 112, 42, }, + { 2, 1, 0, 1, 112, 42, }, + { 1, 1, 0, 1, 112, 42, }, + { 0, 1, 0, 1, 116, 42, }, + { 2, 1, 0, 1, 116, 42, }, + { 1, 1, 0, 1, 116, 42, }, + { 0, 1, 0, 1, 120, 42, }, + { 2, 1, 0, 1, 120, 42, }, + { 1, 1, 0, 1, 120, 42, }, + { 0, 1, 0, 1, 124, 42, }, + { 2, 1, 0, 1, 124, 42, }, + { 1, 1, 0, 1, 124, 42, }, + { 0, 1, 0, 1, 128, 42, }, + { 2, 1, 0, 1, 128, 42, }, + { 1, 1, 0, 1, 128, 42, }, + { 0, 1, 0, 1, 132, 42, }, + { 2, 1, 0, 1, 132, 42, }, + { 1, 1, 0, 1, 132, 42, }, + { 0, 1, 0, 1, 136, 42, }, + { 2, 1, 0, 1, 136, 42, }, + { 1, 1, 0, 1, 136, 42, }, + { 0, 1, 0, 1, 140, 40, }, + { 2, 1, 0, 1, 140, 40, }, + { 1, 1, 0, 1, 140, 40, }, + { 0, 1, 0, 1, 149, 44, }, + { 2, 1, 0, 1, 149, 44, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 44, }, + { 2, 1, 0, 1, 153, 44, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 44, }, + { 2, 1, 0, 1, 157, 44, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 44, }, + { 2, 1, 0, 1, 161, 44, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 44, }, + { 2, 1, 0, 1, 165, 44, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 32, }, + { 2, 1, 0, 2, 36, 32, }, + { 1, 1, 0, 2, 36, 32, }, + { 0, 1, 0, 2, 40, 32, }, + { 2, 1, 0, 2, 40, 32, }, + { 1, 1, 0, 2, 40, 32, }, + { 0, 1, 0, 2, 44, 32, }, + { 2, 1, 0, 2, 44, 32, }, + { 1, 1, 0, 2, 44, 32, }, + { 0, 1, 0, 2, 48, 36, }, + { 2, 1, 0, 2, 48, 36, }, + { 1, 1, 0, 2, 48, 36, }, + { 0, 1, 0, 2, 52, 36, }, + { 2, 1, 0, 2, 52, 36, }, + { 1, 1, 0, 2, 52, 36, }, + { 0, 1, 0, 2, 56, 32, }, + { 2, 1, 0, 2, 56, 32, }, + { 1, 1, 0, 2, 56, 32, }, + { 0, 1, 0, 2, 60, 32, }, + { 2, 1, 0, 2, 60, 32, }, + { 1, 1, 0, 2, 60, 32, }, + { 0, 1, 0, 2, 64, 32, }, + { 2, 1, 0, 2, 64, 32, }, + { 1, 1, 0, 2, 64, 32, }, + { 0, 1, 0, 2, 100, 32, }, + { 2, 1, 0, 2, 100, 32, }, + { 1, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 32, }, + { 2, 1, 0, 2, 104, 32, }, + { 1, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 32, }, + { 2, 1, 0, 2, 108, 32, }, + { 1, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 32, }, + { 2, 1, 0, 2, 112, 32, }, + { 1, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 32, }, + { 2, 1, 0, 2, 116, 32, }, + { 1, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 32, }, + { 2, 1, 0, 2, 120, 32, }, + { 1, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 32, }, + { 2, 1, 0, 2, 124, 32, }, + { 1, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 32, }, + { 2, 1, 0, 2, 128, 32, }, + { 1, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 32, }, + { 2, 1, 0, 2, 132, 32, }, + { 1, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 32, }, + { 2, 1, 0, 2, 136, 32, }, + { 1, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 30, }, + { 2, 1, 0, 2, 140, 30, }, + { 1, 1, 0, 2, 140, 30, }, + { 0, 1, 0, 2, 149, 40, }, + { 2, 1, 0, 2, 149, 40, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 40, }, + { 2, 1, 0, 2, 153, 40, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 40, }, + { 2, 1, 0, 2, 157, 40, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 40, }, + { 2, 1, 0, 2, 161, 40, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 42, }, + { 2, 1, 0, 2, 165, 42, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 32, }, + { 2, 1, 0, 3, 36, 32, }, + { 1, 1, 0, 3, 36, 32, }, + { 0, 1, 0, 3, 40, 32, }, + { 2, 1, 0, 3, 40, 32, }, + { 1, 1, 0, 3, 40, 32, }, + { 0, 1, 0, 3, 44, 32, }, + { 2, 1, 0, 3, 44, 32, }, + { 1, 1, 0, 3, 44, 32, }, + { 0, 1, 0, 3, 48, 36, }, + { 2, 1, 0, 3, 48, 36, }, + { 1, 1, 0, 3, 48, 36, }, + { 0, 1, 0, 3, 52, 36, }, + { 2, 1, 0, 3, 52, 36, }, + { 1, 1, 0, 3, 52, 36, }, + { 0, 1, 0, 3, 56, 32, }, + { 2, 1, 0, 3, 56, 32, }, + { 1, 1, 0, 3, 56, 32, }, + { 0, 1, 0, 3, 60, 32, }, + { 2, 1, 0, 3, 60, 32, }, + { 1, 1, 0, 3, 60, 32, }, + { 0, 1, 0, 3, 64, 32, }, + { 2, 1, 0, 3, 64, 32, }, + { 1, 1, 0, 3, 64, 32, }, + { 0, 1, 0, 3, 100, 32, }, + { 2, 1, 0, 3, 100, 32, }, + { 1, 1, 0, 3, 100, 32, }, + { 0, 1, 0, 3, 104, 32, }, + { 2, 1, 0, 3, 104, 32, }, + { 1, 1, 0, 3, 104, 32, }, + { 0, 1, 0, 3, 108, 32, }, + { 2, 1, 0, 3, 108, 32, }, + { 1, 1, 0, 3, 108, 32, }, + { 0, 1, 0, 3, 112, 32, }, + { 2, 1, 0, 3, 112, 32, }, + { 1, 1, 0, 3, 112, 32, }, + { 0, 1, 0, 3, 116, 32, }, + { 2, 1, 0, 3, 116, 32, }, + { 1, 1, 0, 3, 116, 32, }, + { 0, 1, 0, 3, 120, 32, }, + { 2, 1, 0, 3, 120, 32, }, + { 1, 1, 0, 3, 120, 32, }, + { 0, 1, 0, 3, 124, 32, }, + { 2, 1, 0, 3, 124, 32, }, + { 1, 1, 0, 3, 124, 32, }, + { 0, 1, 0, 3, 128, 32, }, + { 2, 1, 0, 3, 128, 32, }, + { 1, 1, 0, 3, 128, 32, }, + { 0, 1, 0, 3, 132, 32, }, + { 2, 1, 0, 3, 132, 32, }, + { 1, 1, 0, 3, 132, 32, }, + { 0, 1, 0, 3, 136, 32, }, + { 2, 1, 0, 3, 136, 32, }, + { 1, 1, 0, 3, 136, 32, }, + { 0, 1, 0, 3, 140, 30, }, + { 2, 1, 0, 3, 140, 30, }, + { 1, 1, 0, 3, 140, 30, }, + { 0, 1, 0, 3, 149, 40, }, + { 2, 1, 0, 3, 149, 40, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 40, }, + { 2, 1, 0, 3, 153, 40, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 40, }, + { 2, 1, 0, 3, 157, 40, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 40, }, + { 2, 1, 0, 3, 161, 40, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 42, }, + { 2, 1, 0, 3, 165, 42, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 0, 6, 36, 32, }, + { 2, 1, 0, 6, 36, 32, }, + { 1, 1, 0, 6, 36, 32, }, + { 0, 1, 0, 6, 40, 32, }, + { 2, 1, 0, 6, 40, 32, }, + { 1, 1, 0, 6, 40, 32, }, + { 0, 1, 0, 6, 44, 32, }, + { 2, 1, 0, 6, 44, 32, }, + { 1, 1, 0, 6, 44, 32, }, + { 0, 1, 0, 6, 48, 36, }, + { 2, 1, 0, 6, 48, 36, }, + { 1, 1, 0, 6, 48, 36, }, + { 0, 1, 0, 6, 52, 36, }, + { 2, 1, 0, 6, 52, 36, }, + { 1, 1, 0, 6, 52, 36, }, + { 0, 1, 0, 6, 56, 32, }, + { 2, 1, 0, 6, 56, 32, }, + { 1, 1, 0, 6, 56, 32, }, + { 0, 1, 0, 6, 60, 32, }, + { 2, 1, 0, 6, 60, 32, }, + { 1, 1, 0, 6, 60, 32, }, + { 0, 1, 0, 6, 64, 32, }, + { 2, 1, 0, 6, 64, 32, }, + { 1, 1, 0, 6, 64, 32, }, + { 0, 1, 0, 6, 100, 32, }, + { 2, 1, 0, 6, 100, 32, }, + { 1, 1, 0, 6, 100, 32, }, + { 0, 1, 0, 6, 104, 32, }, + { 2, 1, 0, 6, 104, 32, }, + { 1, 1, 0, 6, 104, 32, }, + { 0, 1, 0, 6, 108, 32, }, + { 2, 1, 0, 6, 108, 32, }, + { 1, 1, 0, 6, 108, 32, }, + { 0, 1, 0, 6, 112, 32, }, + { 2, 1, 0, 6, 112, 32, }, + { 1, 1, 0, 6, 112, 32, }, + { 0, 1, 0, 6, 116, 32, }, + { 2, 1, 0, 6, 116, 32, }, + { 1, 1, 0, 6, 116, 32, }, + { 0, 1, 0, 6, 120, 32, }, + { 2, 1, 0, 6, 120, 32, }, + { 1, 1, 0, 6, 120, 32, }, + { 0, 1, 0, 6, 124, 32, }, + { 2, 1, 0, 6, 124, 32, }, + { 1, 1, 0, 6, 124, 32, }, + { 0, 1, 0, 6, 128, 32, }, + { 2, 1, 0, 6, 128, 32, }, + { 1, 1, 0, 6, 128, 32, }, + { 0, 1, 0, 6, 132, 32, }, + { 2, 1, 0, 6, 132, 32, }, + { 1, 1, 0, 6, 132, 32, }, + { 0, 1, 0, 6, 136, 32, }, + { 2, 1, 0, 6, 136, 32, }, + { 1, 1, 0, 6, 136, 32, }, + { 0, 1, 0, 6, 140, 30, }, + { 2, 1, 0, 6, 140, 30, }, + { 1, 1, 0, 6, 140, 30, }, + { 0, 1, 0, 6, 149, 40, }, + { 2, 1, 0, 6, 149, 40, }, + { 1, 1, 0, 6, 149, 63, }, + { 0, 1, 0, 6, 153, 40, }, + { 2, 1, 0, 6, 153, 40, }, + { 1, 1, 0, 6, 153, 63, }, + { 0, 1, 0, 6, 157, 40, }, + { 2, 1, 0, 6, 157, 40, }, + { 1, 1, 0, 6, 157, 63, }, + { 0, 1, 0, 6, 161, 40, }, + { 2, 1, 0, 6, 161, 40, }, + { 1, 1, 0, 6, 161, 63, }, + { 0, 1, 0, 6, 165, 42, }, + { 2, 1, 0, 6, 165, 42, }, + { 1, 1, 0, 6, 165, 63, }, + { 0, 1, 0, 7, 36, 32, }, + { 2, 1, 0, 7, 36, 32, }, + { 1, 1, 0, 7, 36, 32, }, + { 0, 1, 0, 7, 40, 32, }, + { 2, 1, 0, 7, 40, 32, }, + { 1, 1, 0, 7, 40, 32, }, + { 0, 1, 0, 7, 44, 32, }, + { 2, 1, 0, 7, 44, 32, }, + { 1, 1, 0, 7, 44, 32, }, + { 0, 1, 0, 7, 48, 36, }, + { 2, 1, 0, 7, 48, 36, }, + { 1, 1, 0, 7, 48, 36, }, + { 0, 1, 0, 7, 52, 36, }, + { 2, 1, 0, 7, 52, 36, }, + { 1, 1, 0, 7, 52, 36, }, + { 0, 1, 0, 7, 56, 32, }, + { 2, 1, 0, 7, 56, 32, }, + { 1, 1, 0, 7, 56, 32, }, + { 0, 1, 0, 7, 60, 32, }, + { 2, 1, 0, 7, 60, 32, }, + { 1, 1, 0, 7, 60, 32, }, + { 0, 1, 0, 7, 64, 32, }, + { 2, 1, 0, 7, 64, 32, }, + { 1, 1, 0, 7, 64, 32, }, + { 0, 1, 0, 7, 100, 32, }, + { 2, 1, 0, 7, 100, 32, }, + { 1, 1, 0, 7, 100, 32, }, + { 0, 1, 0, 7, 104, 32, }, + { 2, 1, 0, 7, 104, 32, }, + { 1, 1, 0, 7, 104, 32, }, + { 0, 1, 0, 7, 108, 32, }, + { 2, 1, 0, 7, 108, 32, }, + { 1, 1, 0, 7, 108, 32, }, + { 0, 1, 0, 7, 112, 32, }, + { 2, 1, 0, 7, 112, 32, }, + { 1, 1, 0, 7, 112, 32, }, + { 0, 1, 0, 7, 116, 32, }, + { 2, 1, 0, 7, 116, 32, }, + { 1, 1, 0, 7, 116, 32, }, + { 0, 1, 0, 7, 120, 32, }, + { 2, 1, 0, 7, 120, 32, }, + { 1, 1, 0, 7, 120, 32, }, + { 0, 1, 0, 7, 124, 32, }, + { 2, 1, 0, 7, 124, 32, }, + { 1, 1, 0, 7, 124, 32, }, + { 0, 1, 0, 7, 128, 32, }, + { 2, 1, 0, 7, 128, 32, }, + { 1, 1, 0, 7, 128, 32, }, + { 0, 1, 0, 7, 132, 32, }, + { 2, 1, 0, 7, 132, 32, }, + { 1, 1, 0, 7, 132, 32, }, + { 0, 1, 0, 7, 136, 32, }, + { 2, 1, 0, 7, 136, 32, }, + { 1, 1, 0, 7, 136, 32, }, + { 0, 1, 0, 7, 140, 30, }, + { 2, 1, 0, 7, 140, 30, }, + { 1, 1, 0, 7, 140, 30, }, + { 0, 1, 0, 7, 149, 40, }, + { 2, 1, 0, 7, 149, 40, }, + { 1, 1, 0, 7, 149, 63, }, + { 0, 1, 0, 7, 153, 40, }, + { 2, 1, 0, 7, 153, 40, }, + { 1, 1, 0, 7, 153, 63, }, + { 0, 1, 0, 7, 157, 40, }, + { 2, 1, 0, 7, 157, 40, }, + { 1, 1, 0, 7, 157, 63, }, + { 0, 1, 0, 7, 161, 40, }, + { 2, 1, 0, 7, 161, 40, }, + { 1, 1, 0, 7, 161, 63, }, + { 0, 1, 0, 7, 165, 42, }, + { 2, 1, 0, 7, 165, 42, }, + { 1, 1, 0, 7, 165, 63, }, + { 0, 1, 1, 2, 38, 32, }, + { 2, 1, 1, 2, 38, 32, }, + { 1, 1, 1, 2, 38, 32, }, + { 0, 1, 1, 2, 46, 36, }, + { 2, 1, 1, 2, 46, 36, }, + { 1, 1, 1, 2, 46, 36, }, + { 0, 1, 1, 2, 54, 36, }, + { 2, 1, 1, 2, 54, 36, }, + { 1, 1, 1, 2, 54, 36, }, + { 0, 1, 1, 2, 62, 32, }, + { 2, 1, 1, 2, 62, 32, }, + { 1, 1, 1, 2, 62, 32, }, + { 0, 1, 1, 2, 102, 30, }, + { 2, 1, 1, 2, 102, 30, }, + { 1, 1, 1, 2, 102, 30, }, + { 0, 1, 1, 2, 110, 32, }, + { 2, 1, 1, 2, 110, 32, }, + { 1, 1, 1, 2, 110, 32, }, + { 0, 1, 1, 2, 118, 32, }, + { 2, 1, 1, 2, 118, 32, }, + { 1, 1, 1, 2, 118, 32, }, + { 0, 1, 1, 2, 126, 32, }, + { 2, 1, 1, 2, 126, 32, }, + { 1, 1, 1, 2, 126, 32, }, + { 0, 1, 1, 2, 134, 36, }, + { 2, 1, 1, 2, 134, 36, }, + { 1, 1, 1, 2, 134, 36, }, + { 0, 1, 1, 2, 151, 36, }, + { 2, 1, 1, 2, 151, 36, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 40, }, + { 2, 1, 1, 2, 159, 40, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 32, }, + { 2, 1, 1, 3, 38, 32, }, + { 1, 1, 1, 3, 38, 32, }, + { 0, 1, 1, 3, 46, 36, }, + { 2, 1, 1, 3, 46, 36, }, + { 1, 1, 1, 3, 46, 36, }, + { 0, 1, 1, 3, 54, 36, }, + { 2, 1, 1, 3, 54, 36, }, + { 1, 1, 1, 3, 54, 36, }, + { 0, 1, 1, 3, 62, 32, }, + { 2, 1, 1, 3, 62, 32, }, + { 1, 1, 1, 3, 62, 32, }, + { 0, 1, 1, 3, 102, 30, }, + { 2, 1, 1, 3, 102, 30, }, + { 1, 1, 1, 3, 102, 30, }, + { 0, 1, 1, 3, 110, 32, }, + { 2, 1, 1, 3, 110, 32, }, + { 1, 1, 1, 3, 110, 32, }, + { 0, 1, 1, 3, 118, 32, }, + { 2, 1, 1, 3, 118, 32, }, + { 1, 1, 1, 3, 118, 32, }, + { 0, 1, 1, 3, 126, 32, }, + { 2, 1, 1, 3, 126, 32, }, + { 1, 1, 1, 3, 126, 32, }, + { 0, 1, 1, 3, 134, 36, }, + { 2, 1, 1, 3, 134, 36, }, + { 1, 1, 1, 3, 134, 36, }, + { 0, 1, 1, 3, 151, 36, }, + { 2, 1, 1, 3, 151, 36, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 40, }, + { 2, 1, 1, 3, 159, 40, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 1, 6, 38, 32, }, + { 2, 1, 1, 6, 38, 32, }, + { 1, 1, 1, 6, 38, 32, }, + { 0, 1, 1, 6, 46, 36, }, + { 2, 1, 1, 6, 46, 36, }, + { 1, 1, 1, 6, 46, 36, }, + { 0, 1, 1, 6, 54, 36, }, + { 2, 1, 1, 6, 54, 36, }, + { 1, 1, 1, 6, 54, 36, }, + { 0, 1, 1, 6, 62, 32, }, + { 2, 1, 1, 6, 62, 32, }, + { 1, 1, 1, 6, 62, 32, }, + { 0, 1, 1, 6, 102, 30, }, + { 2, 1, 1, 6, 102, 30, }, + { 1, 1, 1, 6, 102, 30, }, + { 0, 1, 1, 6, 110, 32, }, + { 2, 1, 1, 6, 110, 32, }, + { 1, 1, 1, 6, 110, 32, }, + { 0, 1, 1, 6, 118, 32, }, + { 2, 1, 1, 6, 118, 32, }, + { 1, 1, 1, 6, 118, 32, }, + { 0, 1, 1, 6, 126, 32, }, + { 2, 1, 1, 6, 126, 32, }, + { 1, 1, 1, 6, 126, 32, }, + { 0, 1, 1, 6, 134, 36, }, + { 2, 1, 1, 6, 134, 36, }, + { 1, 1, 1, 6, 134, 36, }, + { 0, 1, 1, 6, 151, 36, }, + { 2, 1, 1, 6, 151, 36, }, + { 1, 1, 1, 6, 151, 63, }, + { 0, 1, 1, 6, 159, 40, }, + { 2, 1, 1, 6, 159, 40, }, + { 1, 1, 1, 6, 159, 63, }, + { 0, 1, 1, 7, 38, 32, }, + { 2, 1, 1, 7, 38, 32, }, + { 1, 1, 1, 7, 38, 32, }, + { 0, 1, 1, 7, 46, 36, }, + { 2, 1, 1, 7, 46, 36, }, + { 1, 1, 1, 7, 46, 36, }, + { 0, 1, 1, 7, 54, 36, }, + { 2, 1, 1, 7, 54, 36, }, + { 1, 1, 1, 7, 54, 36, }, + { 0, 1, 1, 7, 62, 32, }, + { 2, 1, 1, 7, 62, 32, }, + { 1, 1, 1, 7, 62, 32, }, + { 0, 1, 1, 7, 102, 30, }, + { 2, 1, 1, 7, 102, 30, }, + { 1, 1, 1, 7, 102, 30, }, + { 0, 1, 1, 7, 110, 32, }, + { 2, 1, 1, 7, 110, 32, }, + { 1, 1, 1, 7, 110, 32, }, + { 0, 1, 1, 7, 118, 32, }, + { 2, 1, 1, 7, 118, 32, }, + { 1, 1, 1, 7, 118, 32, }, + { 0, 1, 1, 7, 126, 32, }, + { 2, 1, 1, 7, 126, 32, }, + { 1, 1, 1, 7, 126, 32, }, + { 0, 1, 1, 7, 134, 36, }, + { 2, 1, 1, 7, 134, 36, }, + { 1, 1, 1, 7, 134, 36, }, + { 0, 1, 1, 7, 151, 36, }, + { 2, 1, 1, 7, 151, 36, }, + { 1, 1, 1, 7, 151, 63, }, + { 0, 1, 1, 7, 159, 40, }, + { 2, 1, 1, 7, 159, 40, }, + { 1, 1, 1, 7, 159, 63, }, + { 0, 1, 2, 4, 42, 34, }, + { 2, 1, 2, 4, 42, 34, }, + { 1, 1, 2, 4, 42, 34, }, + { 0, 1, 2, 4, 58, 34, }, + { 2, 1, 2, 4, 58, 34, }, + { 1, 1, 2, 4, 58, 34, }, + { 0, 1, 2, 4, 106, 32, }, + { 2, 1, 2, 4, 106, 32, }, + { 1, 1, 2, 4, 106, 32, }, + { 0, 1, 2, 4, 122, 34, }, + { 2, 1, 2, 4, 122, 34, }, + { 1, 1, 2, 4, 122, 34, }, + { 0, 1, 2, 4, 155, 34, }, + { 2, 1, 2, 4, 155, 34, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 34, }, + { 2, 1, 2, 5, 42, 34, }, + { 1, 1, 2, 5, 42, 34, }, + { 0, 1, 2, 5, 58, 34, }, + { 2, 1, 2, 5, 58, 34, }, + { 1, 1, 2, 5, 58, 34, }, + { 0, 1, 2, 5, 106, 32, }, + { 2, 1, 2, 5, 106, 32, }, + { 1, 1, 2, 5, 106, 32, }, + { 0, 1, 2, 5, 122, 34, }, + { 2, 1, 2, 5, 122, 34, }, + { 1, 1, 2, 5, 122, 34, }, + { 0, 1, 2, 5, 155, 34, }, + { 2, 1, 2, 5, 155, 34, }, + { 1, 1, 2, 5, 155, 63, }, + { 0, 1, 2, 8, 42, 34, }, + { 2, 1, 2, 8, 42, 34, }, + { 1, 1, 2, 8, 42, 34, }, + { 0, 1, 2, 8, 58, 34, }, + { 2, 1, 2, 8, 58, 34, }, + { 1, 1, 2, 8, 58, 34, }, + { 0, 1, 2, 8, 106, 32, }, + { 2, 1, 2, 8, 106, 32, }, + { 1, 1, 2, 8, 106, 32, }, + { 0, 1, 2, 8, 122, 34, }, + { 2, 1, 2, 8, 122, 34, }, + { 1, 1, 2, 8, 122, 34, }, + { 0, 1, 2, 8, 155, 34, }, + { 2, 1, 2, 8, 155, 34, }, + { 1, 1, 2, 8, 155, 63, }, + { 0, 1, 2, 9, 42, 34, }, + { 2, 1, 2, 9, 42, 34, }, + { 1, 1, 2, 9, 42, 34, }, + { 0, 1, 2, 9, 58, 34, }, + { 2, 1, 2, 9, 58, 34, }, + { 1, 1, 2, 9, 58, 34, }, + { 0, 1, 2, 9, 106, 32, }, + { 2, 1, 2, 9, 106, 32, }, + { 1, 1, 2, 9, 106, 32, }, + { 0, 1, 2, 9, 122, 34, }, + { 2, 1, 2, 9, 122, 34, }, + { 1, 1, 2, 9, 122, 34, }, + { 0, 1, 2, 9, 155, 34, }, + { 2, 1, 2, 9, 155, 34, }, + { 1, 1, 2, 9, 155, 63, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type2); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type3[] = { + { 0, 0, 0, 0, 1, 46, }, + { 2, 0, 0, 0, 1, 40, }, + { 1, 0, 0, 0, 1, 40, }, + { 0, 0, 0, 0, 2, 46, }, + { 2, 0, 0, 0, 2, 40, }, + { 1, 0, 0, 0, 2, 40, }, + { 0, 0, 0, 0, 3, 46, }, + { 2, 0, 0, 0, 3, 40, }, + { 1, 0, 0, 0, 3, 40, }, + { 0, 0, 0, 0, 4, 46, }, + { 2, 0, 0, 0, 4, 40, }, + { 1, 0, 0, 0, 4, 40, }, + { 0, 0, 0, 0, 5, 46, }, + { 2, 0, 0, 0, 5, 40, }, + { 1, 0, 0, 0, 5, 40, }, + { 0, 0, 0, 0, 6, 46, }, + { 2, 0, 0, 0, 6, 40, }, + { 1, 0, 0, 0, 6, 40, }, + { 0, 0, 0, 0, 7, 46, }, + { 2, 0, 0, 0, 7, 40, }, + { 1, 0, 0, 0, 7, 40, }, + { 0, 0, 0, 0, 8, 46, }, + { 2, 0, 0, 0, 8, 40, }, + { 1, 0, 0, 0, 8, 40, }, + { 0, 0, 0, 0, 9, 46, }, + { 2, 0, 0, 0, 9, 40, }, + { 1, 0, 0, 0, 9, 40, }, + { 0, 0, 0, 0, 10, 46, }, + { 2, 0, 0, 0, 10, 40, }, + { 1, 0, 0, 0, 10, 40, }, + { 0, 0, 0, 0, 11, 46, }, + { 2, 0, 0, 0, 11, 40, }, + { 1, 0, 0, 0, 11, 40, }, + { 0, 0, 0, 0, 12, 63, }, + { 2, 0, 0, 0, 12, 40, }, + { 1, 0, 0, 0, 12, 40, }, + { 0, 0, 0, 0, 13, 63, }, + { 2, 0, 0, 0, 13, 40, }, + { 1, 0, 0, 0, 13, 40, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 40, }, + { 0, 0, 0, 1, 1, 46, }, + { 2, 0, 0, 1, 1, 40, }, + { 1, 0, 0, 1, 1, 40, }, + { 0, 0, 0, 1, 2, 46, }, + { 2, 0, 0, 1, 2, 40, }, + { 1, 0, 0, 1, 2, 40, }, + { 0, 0, 0, 1, 3, 46, }, + { 2, 0, 0, 1, 3, 40, }, + { 1, 0, 0, 1, 3, 40, }, + { 0, 0, 0, 1, 4, 46, }, + { 2, 0, 0, 1, 4, 40, }, + { 1, 0, 0, 1, 4, 40, }, + { 0, 0, 0, 1, 5, 46, }, + { 2, 0, 0, 1, 5, 40, }, + { 1, 0, 0, 1, 5, 40, }, + { 0, 0, 0, 1, 6, 46, }, + { 2, 0, 0, 1, 6, 40, }, + { 1, 0, 0, 1, 6, 40, }, + { 0, 0, 0, 1, 7, 46, }, + { 2, 0, 0, 1, 7, 40, }, + { 1, 0, 0, 1, 7, 40, }, + { 0, 0, 0, 1, 8, 46, }, + { 2, 0, 0, 1, 8, 40, }, + { 1, 0, 0, 1, 8, 40, }, + { 0, 0, 0, 1, 9, 46, }, + { 2, 0, 0, 1, 9, 40, }, + { 1, 0, 0, 1, 9, 40, }, + { 0, 0, 0, 1, 10, 46, }, + { 2, 0, 0, 1, 10, 40, }, + { 1, 0, 0, 1, 10, 40, }, + { 0, 0, 0, 1, 11, 46, }, + { 2, 0, 0, 1, 11, 40, }, + { 1, 0, 0, 1, 11, 40, }, + { 0, 0, 0, 1, 12, 63, }, + { 2, 0, 0, 1, 12, 40, }, + { 1, 0, 0, 1, 12, 40, }, + { 0, 0, 0, 1, 13, 63, }, + { 2, 0, 0, 1, 13, 40, }, + { 1, 0, 0, 1, 13, 40, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 46, }, + { 2, 0, 0, 2, 1, 40, }, + { 1, 0, 0, 2, 1, 40, }, + { 0, 0, 0, 2, 2, 46, }, + { 2, 0, 0, 2, 2, 40, }, + { 1, 0, 0, 2, 2, 40, }, + { 0, 0, 0, 2, 3, 46, }, + { 2, 0, 0, 2, 3, 40, }, + { 1, 0, 0, 2, 3, 40, }, + { 0, 0, 0, 2, 4, 46, }, + { 2, 0, 0, 2, 4, 40, }, + { 1, 0, 0, 2, 4, 40, }, + { 0, 0, 0, 2, 5, 46, }, + { 2, 0, 0, 2, 5, 40, }, + { 1, 0, 0, 2, 5, 40, }, + { 0, 0, 0, 2, 6, 46, }, + { 2, 0, 0, 2, 6, 40, }, + { 1, 0, 0, 2, 6, 40, }, + { 0, 0, 0, 2, 7, 46, }, + { 2, 0, 0, 2, 7, 40, }, + { 1, 0, 0, 2, 7, 40, }, + { 0, 0, 0, 2, 8, 46, }, + { 2, 0, 0, 2, 8, 40, }, + { 1, 0, 0, 2, 8, 40, }, + { 0, 0, 0, 2, 9, 46, }, + { 2, 0, 0, 2, 9, 40, }, + { 1, 0, 0, 2, 9, 40, }, + { 0, 0, 0, 2, 10, 46, }, + { 2, 0, 0, 2, 10, 40, }, + { 1, 0, 0, 2, 10, 40, }, + { 0, 0, 0, 2, 11, 46, }, + { 2, 0, 0, 2, 11, 40, }, + { 1, 0, 0, 2, 11, 40, }, + { 0, 0, 0, 2, 12, 63, }, + { 2, 0, 0, 2, 12, 40, }, + { 1, 0, 0, 2, 12, 40, }, + { 0, 0, 0, 2, 13, 63, }, + { 2, 0, 0, 2, 13, 40, }, + { 1, 0, 0, 2, 13, 40, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 46, }, + { 2, 0, 0, 3, 1, 40, }, + { 1, 0, 0, 3, 1, 40, }, + { 0, 0, 0, 3, 2, 46, }, + { 2, 0, 0, 3, 2, 40, }, + { 1, 0, 0, 3, 2, 40, }, + { 0, 0, 0, 3, 3, 46, }, + { 2, 0, 0, 3, 3, 40, }, + { 1, 0, 0, 3, 3, 40, }, + { 0, 0, 0, 3, 4, 46, }, + { 2, 0, 0, 3, 4, 40, }, + { 1, 0, 0, 3, 4, 40, }, + { 0, 0, 0, 3, 5, 46, }, + { 2, 0, 0, 3, 5, 40, }, + { 1, 0, 0, 3, 5, 40, }, + { 0, 0, 0, 3, 6, 46, }, + { 2, 0, 0, 3, 6, 40, }, + { 1, 0, 0, 3, 6, 40, }, + { 0, 0, 0, 3, 7, 46, }, + { 2, 0, 0, 3, 7, 40, }, + { 1, 0, 0, 3, 7, 40, }, + { 0, 0, 0, 3, 8, 46, }, + { 2, 0, 0, 3, 8, 40, }, + { 1, 0, 0, 3, 8, 40, }, + { 0, 0, 0, 3, 9, 46, }, + { 2, 0, 0, 3, 9, 40, }, + { 1, 0, 0, 3, 9, 40, }, + { 0, 0, 0, 3, 10, 46, }, + { 2, 0, 0, 3, 10, 40, }, + { 1, 0, 0, 3, 10, 40, }, + { 0, 0, 0, 3, 11, 46, }, + { 2, 0, 0, 3, 11, 40, }, + { 1, 0, 0, 3, 11, 40, }, + { 0, 0, 0, 3, 12, 63, }, + { 2, 0, 0, 3, 12, 40, }, + { 1, 0, 0, 3, 12, 40, }, + { 0, 0, 0, 3, 13, 63, }, + { 2, 0, 0, 3, 13, 40, }, + { 1, 0, 0, 3, 13, 40, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 0, 6, 1, 46, }, + { 2, 0, 0, 6, 1, 40, }, + { 1, 0, 0, 6, 1, 40, }, + { 0, 0, 0, 6, 2, 46, }, + { 2, 0, 0, 6, 2, 40, }, + { 1, 0, 0, 6, 2, 40, }, + { 0, 0, 0, 6, 3, 46, }, + { 2, 0, 0, 6, 3, 40, }, + { 1, 0, 0, 6, 3, 40, }, + { 0, 0, 0, 6, 4, 46, }, + { 2, 0, 0, 6, 4, 40, }, + { 1, 0, 0, 6, 4, 40, }, + { 0, 0, 0, 6, 5, 46, }, + { 2, 0, 0, 6, 5, 40, }, + { 1, 0, 0, 6, 5, 40, }, + { 0, 0, 0, 6, 6, 46, }, + { 2, 0, 0, 6, 6, 40, }, + { 1, 0, 0, 6, 6, 40, }, + { 0, 0, 0, 6, 7, 46, }, + { 2, 0, 0, 6, 7, 40, }, + { 1, 0, 0, 6, 7, 40, }, + { 0, 0, 0, 6, 8, 46, }, + { 2, 0, 0, 6, 8, 40, }, + { 1, 0, 0, 6, 8, 40, }, + { 0, 0, 0, 6, 9, 46, }, + { 2, 0, 0, 6, 9, 40, }, + { 1, 0, 0, 6, 9, 40, }, + { 0, 0, 0, 6, 10, 46, }, + { 2, 0, 0, 6, 10, 40, }, + { 1, 0, 0, 6, 10, 40, }, + { 0, 0, 0, 6, 11, 46, }, + { 2, 0, 0, 6, 11, 40, }, + { 1, 0, 0, 6, 11, 40, }, + { 0, 0, 0, 6, 12, 63, }, + { 2, 0, 0, 6, 12, 40, }, + { 1, 0, 0, 6, 12, 40, }, + { 0, 0, 0, 6, 13, 63, }, + { 2, 0, 0, 6, 13, 40, }, + { 1, 0, 0, 6, 13, 40, }, + { 0, 0, 0, 6, 14, 63, }, + { 2, 0, 0, 6, 14, 63, }, + { 1, 0, 0, 6, 14, 63, }, + { 0, 0, 0, 7, 1, 46, }, + { 2, 0, 0, 7, 1, 40, }, + { 1, 0, 0, 7, 1, 40, }, + { 0, 0, 0, 7, 2, 46, }, + { 2, 0, 0, 7, 2, 40, }, + { 1, 0, 0, 7, 2, 40, }, + { 0, 0, 0, 7, 3, 46, }, + { 2, 0, 0, 7, 3, 40, }, + { 1, 0, 0, 7, 3, 40, }, + { 0, 0, 0, 7, 4, 46, }, + { 2, 0, 0, 7, 4, 40, }, + { 1, 0, 0, 7, 4, 40, }, + { 0, 0, 0, 7, 5, 46, }, + { 2, 0, 0, 7, 5, 40, }, + { 1, 0, 0, 7, 5, 40, }, + { 0, 0, 0, 7, 6, 46, }, + { 2, 0, 0, 7, 6, 40, }, + { 1, 0, 0, 7, 6, 40, }, + { 0, 0, 0, 7, 7, 46, }, + { 2, 0, 0, 7, 7, 40, }, + { 1, 0, 0, 7, 7, 40, }, + { 0, 0, 0, 7, 8, 46, }, + { 2, 0, 0, 7, 8, 40, }, + { 1, 0, 0, 7, 8, 40, }, + { 0, 0, 0, 7, 9, 46, }, + { 2, 0, 0, 7, 9, 40, }, + { 1, 0, 0, 7, 9, 40, }, + { 0, 0, 0, 7, 10, 46, }, + { 2, 0, 0, 7, 10, 40, }, + { 1, 0, 0, 7, 10, 40, }, + { 0, 0, 0, 7, 11, 46, }, + { 2, 0, 0, 7, 11, 40, }, + { 1, 0, 0, 7, 11, 40, }, + { 0, 0, 0, 7, 12, 63, }, + { 2, 0, 0, 7, 12, 40, }, + { 1, 0, 0, 7, 12, 40, }, + { 0, 0, 0, 7, 13, 63, }, + { 2, 0, 0, 7, 13, 40, }, + { 1, 0, 0, 7, 13, 40, }, + { 0, 0, 0, 7, 14, 63, }, + { 2, 0, 0, 7, 14, 63, }, + { 1, 0, 0, 7, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 46, }, + { 2, 0, 1, 2, 3, 40, }, + { 1, 0, 1, 2, 3, 40, }, + { 0, 0, 1, 2, 4, 46, }, + { 2, 0, 1, 2, 4, 40, }, + { 1, 0, 1, 2, 4, 40, }, + { 0, 0, 1, 2, 5, 46, }, + { 2, 0, 1, 2, 5, 40, }, + { 1, 0, 1, 2, 5, 40, }, + { 0, 0, 1, 2, 6, 46, }, + { 2, 0, 1, 2, 6, 40, }, + { 1, 0, 1, 2, 6, 40, }, + { 0, 0, 1, 2, 7, 46, }, + { 2, 0, 1, 2, 7, 40, }, + { 1, 0, 1, 2, 7, 40, }, + { 0, 0, 1, 2, 8, 46, }, + { 2, 0, 1, 2, 8, 40, }, + { 1, 0, 1, 2, 8, 40, }, + { 0, 0, 1, 2, 9, 46, }, + { 2, 0, 1, 2, 9, 40, }, + { 1, 0, 1, 2, 9, 40, }, + { 0, 0, 1, 2, 10, 46, }, + { 2, 0, 1, 2, 10, 40, }, + { 1, 0, 1, 2, 10, 40, }, + { 0, 0, 1, 2, 11, 46, }, + { 2, 0, 1, 2, 11, 40, }, + { 1, 0, 1, 2, 11, 40, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 40, }, + { 1, 0, 1, 2, 12, 40, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 40, }, + { 1, 0, 1, 2, 13, 40, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 46, }, + { 2, 0, 1, 3, 3, 40, }, + { 1, 0, 1, 3, 3, 40, }, + { 0, 0, 1, 3, 4, 46, }, + { 2, 0, 1, 3, 4, 40, }, + { 1, 0, 1, 3, 4, 40, }, + { 0, 0, 1, 3, 5, 46, }, + { 2, 0, 1, 3, 5, 40, }, + { 1, 0, 1, 3, 5, 40, }, + { 0, 0, 1, 3, 6, 46, }, + { 2, 0, 1, 3, 6, 40, }, + { 1, 0, 1, 3, 6, 40, }, + { 0, 0, 1, 3, 7, 46, }, + { 2, 0, 1, 3, 7, 40, }, + { 1, 0, 1, 3, 7, 40, }, + { 0, 0, 1, 3, 8, 46, }, + { 2, 0, 1, 3, 8, 40, }, + { 1, 0, 1, 3, 8, 40, }, + { 0, 0, 1, 3, 9, 46, }, + { 2, 0, 1, 3, 9, 40, }, + { 1, 0, 1, 3, 9, 40, }, + { 0, 0, 1, 3, 10, 46, }, + { 2, 0, 1, 3, 10, 40, }, + { 1, 0, 1, 3, 10, 40, }, + { 0, 0, 1, 3, 11, 46, }, + { 2, 0, 1, 3, 11, 40, }, + { 1, 0, 1, 3, 11, 40, }, + { 0, 0, 1, 3, 12, 63, }, + { 2, 0, 1, 3, 12, 40, }, + { 1, 0, 1, 3, 12, 40, }, + { 0, 0, 1, 3, 13, 63, }, + { 2, 0, 1, 3, 13, 40, }, + { 1, 0, 1, 3, 13, 40, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 0, 1, 6, 1, 63, }, + { 2, 0, 1, 6, 1, 63, }, + { 1, 0, 1, 6, 1, 63, }, + { 0, 0, 1, 6, 2, 63, }, + { 2, 0, 1, 6, 2, 63, }, + { 1, 0, 1, 6, 2, 63, }, + { 0, 0, 1, 6, 3, 46, }, + { 2, 0, 1, 6, 3, 40, }, + { 1, 0, 1, 6, 3, 40, }, + { 0, 0, 1, 6, 4, 46, }, + { 2, 0, 1, 6, 4, 40, }, + { 1, 0, 1, 6, 4, 40, }, + { 0, 0, 1, 6, 5, 46, }, + { 2, 0, 1, 6, 5, 40, }, + { 1, 0, 1, 6, 5, 40, }, + { 0, 0, 1, 6, 6, 46, }, + { 2, 0, 1, 6, 6, 40, }, + { 1, 0, 1, 6, 6, 40, }, + { 0, 0, 1, 6, 7, 46, }, + { 2, 0, 1, 6, 7, 40, }, + { 1, 0, 1, 6, 7, 40, }, + { 0, 0, 1, 6, 8, 46, }, + { 2, 0, 1, 6, 8, 40, }, + { 1, 0, 1, 6, 8, 40, }, + { 0, 0, 1, 6, 9, 46, }, + { 2, 0, 1, 6, 9, 40, }, + { 1, 0, 1, 6, 9, 40, }, + { 0, 0, 1, 6, 10, 46, }, + { 2, 0, 1, 6, 10, 40, }, + { 1, 0, 1, 6, 10, 40, }, + { 0, 0, 1, 6, 11, 46, }, + { 2, 0, 1, 6, 11, 40, }, + { 1, 0, 1, 6, 11, 40, }, + { 0, 0, 1, 6, 12, 63, }, + { 2, 0, 1, 6, 12, 40, }, + { 1, 0, 1, 6, 12, 40, }, + { 0, 0, 1, 6, 13, 63, }, + { 2, 0, 1, 6, 13, 40, }, + { 1, 0, 1, 6, 13, 40, }, + { 0, 0, 1, 6, 14, 63, }, + { 2, 0, 1, 6, 14, 63, }, + { 1, 0, 1, 6, 14, 63, }, + { 0, 0, 1, 7, 1, 63, }, + { 2, 0, 1, 7, 1, 63, }, + { 1, 0, 1, 7, 1, 63, }, + { 0, 0, 1, 7, 2, 63, }, + { 2, 0, 1, 7, 2, 63, }, + { 1, 0, 1, 7, 2, 63, }, + { 0, 0, 1, 7, 3, 46, }, + { 2, 0, 1, 7, 3, 40, }, + { 1, 0, 1, 7, 3, 40, }, + { 0, 0, 1, 7, 4, 46, }, + { 2, 0, 1, 7, 4, 40, }, + { 1, 0, 1, 7, 4, 40, }, + { 0, 0, 1, 7, 5, 46, }, + { 2, 0, 1, 7, 5, 40, }, + { 1, 0, 1, 7, 5, 40, }, + { 0, 0, 1, 7, 6, 46, }, + { 2, 0, 1, 7, 6, 40, }, + { 1, 0, 1, 7, 6, 40, }, + { 0, 0, 1, 7, 7, 46, }, + { 2, 0, 1, 7, 7, 40, }, + { 1, 0, 1, 7, 7, 40, }, + { 0, 0, 1, 7, 8, 46, }, + { 2, 0, 1, 7, 8, 40, }, + { 1, 0, 1, 7, 8, 40, }, + { 0, 0, 1, 7, 9, 46, }, + { 2, 0, 1, 7, 9, 40, }, + { 1, 0, 1, 7, 9, 40, }, + { 0, 0, 1, 7, 10, 46, }, + { 2, 0, 1, 7, 10, 40, }, + { 1, 0, 1, 7, 10, 40, }, + { 0, 0, 1, 7, 11, 46, }, + { 2, 0, 1, 7, 11, 40, }, + { 1, 0, 1, 7, 11, 40, }, + { 0, 0, 1, 7, 12, 63, }, + { 2, 0, 1, 7, 12, 40, }, + { 1, 0, 1, 7, 12, 40, }, + { 0, 0, 1, 7, 13, 63, }, + { 2, 0, 1, 7, 13, 40, }, + { 1, 0, 1, 7, 13, 40, }, + { 0, 0, 1, 7, 14, 63, }, + { 2, 0, 1, 7, 14, 63, }, + { 1, 0, 1, 7, 14, 63, }, + { 0, 1, 0, 1, 36, 46, }, + { 2, 1, 0, 1, 36, 40, }, + { 1, 1, 0, 1, 36, 40, }, + { 0, 1, 0, 1, 40, 46, }, + { 2, 1, 0, 1, 40, 40, }, + { 1, 1, 0, 1, 40, 40, }, + { 0, 1, 0, 1, 44, 46, }, + { 2, 1, 0, 1, 44, 40, }, + { 1, 1, 0, 1, 44, 40, }, + { 0, 1, 0, 1, 48, 46, }, + { 2, 1, 0, 1, 48, 40, }, + { 1, 1, 0, 1, 48, 40, }, + { 0, 1, 0, 1, 52, 46, }, + { 2, 1, 0, 1, 52, 40, }, + { 1, 1, 0, 1, 52, 40, }, + { 0, 1, 0, 1, 56, 46, }, + { 2, 1, 0, 1, 56, 40, }, + { 1, 1, 0, 1, 56, 40, }, + { 0, 1, 0, 1, 60, 46, }, + { 2, 1, 0, 1, 60, 40, }, + { 1, 1, 0, 1, 60, 40, }, + { 0, 1, 0, 1, 64, 46, }, + { 2, 1, 0, 1, 64, 40, }, + { 1, 1, 0, 1, 64, 40, }, + { 0, 1, 0, 1, 100, 46, }, + { 2, 1, 0, 1, 100, 40, }, + { 1, 1, 0, 1, 100, 40, }, + { 0, 1, 0, 1, 104, 46, }, + { 2, 1, 0, 1, 104, 40, }, + { 1, 1, 0, 1, 104, 40, }, + { 0, 1, 0, 1, 108, 46, }, + { 2, 1, 0, 1, 108, 40, }, + { 1, 1, 0, 1, 108, 40, }, + { 0, 1, 0, 1, 112, 46, }, + { 2, 1, 0, 1, 112, 40, }, + { 1, 1, 0, 1, 112, 40, }, + { 0, 1, 0, 1, 116, 46, }, + { 2, 1, 0, 1, 116, 40, }, + { 1, 1, 0, 1, 116, 40, }, + { 0, 1, 0, 1, 120, 46, }, + { 2, 1, 0, 1, 120, 40, }, + { 1, 1, 0, 1, 120, 40, }, + { 0, 1, 0, 1, 124, 46, }, + { 2, 1, 0, 1, 124, 40, }, + { 1, 1, 0, 1, 124, 40, }, + { 0, 1, 0, 1, 128, 46, }, + { 2, 1, 0, 1, 128, 40, }, + { 1, 1, 0, 1, 128, 40, }, + { 0, 1, 0, 1, 132, 46, }, + { 2, 1, 0, 1, 132, 40, }, + { 1, 1, 0, 1, 132, 40, }, + { 0, 1, 0, 1, 136, 46, }, + { 2, 1, 0, 1, 136, 40, }, + { 1, 1, 0, 1, 136, 40, }, + { 0, 1, 0, 1, 140, 46, }, + { 2, 1, 0, 1, 140, 40, }, + { 1, 1, 0, 1, 140, 40, }, + { 0, 1, 0, 1, 149, 46, }, + { 2, 1, 0, 1, 149, 40, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 46, }, + { 2, 1, 0, 1, 153, 40, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 46, }, + { 2, 1, 0, 1, 157, 40, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 46, }, + { 2, 1, 0, 1, 161, 40, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 46, }, + { 2, 1, 0, 1, 165, 40, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 46, }, + { 2, 1, 0, 2, 36, 40, }, + { 1, 1, 0, 2, 36, 40, }, + { 0, 1, 0, 2, 40, 46, }, + { 2, 1, 0, 2, 40, 40, }, + { 1, 1, 0, 2, 40, 40, }, + { 0, 1, 0, 2, 44, 46, }, + { 2, 1, 0, 2, 44, 40, }, + { 1, 1, 0, 2, 44, 40, }, + { 0, 1, 0, 2, 48, 46, }, + { 2, 1, 0, 2, 48, 40, }, + { 1, 1, 0, 2, 48, 40, }, + { 0, 1, 0, 2, 52, 46, }, + { 2, 1, 0, 2, 52, 40, }, + { 1, 1, 0, 2, 52, 40, }, + { 0, 1, 0, 2, 56, 46, }, + { 2, 1, 0, 2, 56, 40, }, + { 1, 1, 0, 2, 56, 40, }, + { 0, 1, 0, 2, 60, 46, }, + { 2, 1, 0, 2, 60, 40, }, + { 1, 1, 0, 2, 60, 40, }, + { 0, 1, 0, 2, 64, 46, }, + { 2, 1, 0, 2, 64, 40, }, + { 1, 1, 0, 2, 64, 40, }, + { 0, 1, 0, 2, 100, 46, }, + { 2, 1, 0, 2, 100, 40, }, + { 1, 1, 0, 2, 100, 40, }, + { 0, 1, 0, 2, 104, 46, }, + { 2, 1, 0, 2, 104, 40, }, + { 1, 1, 0, 2, 104, 40, }, + { 0, 1, 0, 2, 108, 46, }, + { 2, 1, 0, 2, 108, 40, }, + { 1, 1, 0, 2, 108, 40, }, + { 0, 1, 0, 2, 112, 46, }, + { 2, 1, 0, 2, 112, 40, }, + { 1, 1, 0, 2, 112, 40, }, + { 0, 1, 0, 2, 116, 46, }, + { 2, 1, 0, 2, 116, 40, }, + { 1, 1, 0, 2, 116, 40, }, + { 0, 1, 0, 2, 120, 46, }, + { 2, 1, 0, 2, 120, 40, }, + { 1, 1, 0, 2, 120, 40, }, + { 0, 1, 0, 2, 124, 46, }, + { 2, 1, 0, 2, 124, 40, }, + { 1, 1, 0, 2, 124, 40, }, + { 0, 1, 0, 2, 128, 46, }, + { 2, 1, 0, 2, 128, 40, }, + { 1, 1, 0, 2, 128, 40, }, + { 0, 1, 0, 2, 132, 46, }, + { 2, 1, 0, 2, 132, 40, }, + { 1, 1, 0, 2, 132, 40, }, + { 0, 1, 0, 2, 136, 46, }, + { 2, 1, 0, 2, 136, 40, }, + { 1, 1, 0, 2, 136, 40, }, + { 0, 1, 0, 2, 140, 46, }, + { 2, 1, 0, 2, 140, 40, }, + { 1, 1, 0, 2, 140, 40, }, + { 0, 1, 0, 2, 149, 46, }, + { 2, 1, 0, 2, 149, 40, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 46, }, + { 2, 1, 0, 2, 153, 40, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 46, }, + { 2, 1, 0, 2, 157, 40, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 46, }, + { 2, 1, 0, 2, 161, 40, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 46, }, + { 2, 1, 0, 2, 165, 40, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 46, }, + { 2, 1, 0, 3, 36, 40, }, + { 1, 1, 0, 3, 36, 40, }, + { 0, 1, 0, 3, 40, 46, }, + { 2, 1, 0, 3, 40, 40, }, + { 1, 1, 0, 3, 40, 40, }, + { 0, 1, 0, 3, 44, 46, }, + { 2, 1, 0, 3, 44, 40, }, + { 1, 1, 0, 3, 44, 40, }, + { 0, 1, 0, 3, 48, 46, }, + { 2, 1, 0, 3, 48, 40, }, + { 1, 1, 0, 3, 48, 40, }, + { 0, 1, 0, 3, 52, 46, }, + { 2, 1, 0, 3, 52, 40, }, + { 1, 1, 0, 3, 52, 40, }, + { 0, 1, 0, 3, 56, 46, }, + { 2, 1, 0, 3, 56, 40, }, + { 1, 1, 0, 3, 56, 40, }, + { 0, 1, 0, 3, 60, 46, }, + { 2, 1, 0, 3, 60, 40, }, + { 1, 1, 0, 3, 60, 40, }, + { 0, 1, 0, 3, 64, 46, }, + { 2, 1, 0, 3, 64, 40, }, + { 1, 1, 0, 3, 64, 40, }, + { 0, 1, 0, 3, 100, 46, }, + { 2, 1, 0, 3, 100, 40, }, + { 1, 1, 0, 3, 100, 40, }, + { 0, 1, 0, 3, 104, 46, }, + { 2, 1, 0, 3, 104, 40, }, + { 1, 1, 0, 3, 104, 40, }, + { 0, 1, 0, 3, 108, 46, }, + { 2, 1, 0, 3, 108, 40, }, + { 1, 1, 0, 3, 108, 40, }, + { 0, 1, 0, 3, 112, 46, }, + { 2, 1, 0, 3, 112, 40, }, + { 1, 1, 0, 3, 112, 40, }, + { 0, 1, 0, 3, 116, 46, }, + { 2, 1, 0, 3, 116, 40, }, + { 1, 1, 0, 3, 116, 40, }, + { 0, 1, 0, 3, 120, 46, }, + { 2, 1, 0, 3, 120, 40, }, + { 1, 1, 0, 3, 120, 40, }, + { 0, 1, 0, 3, 124, 46, }, + { 2, 1, 0, 3, 124, 40, }, + { 1, 1, 0, 3, 124, 40, }, + { 0, 1, 0, 3, 128, 46, }, + { 2, 1, 0, 3, 128, 40, }, + { 1, 1, 0, 3, 128, 40, }, + { 0, 1, 0, 3, 132, 46, }, + { 2, 1, 0, 3, 132, 40, }, + { 1, 1, 0, 3, 132, 40, }, + { 0, 1, 0, 3, 136, 46, }, + { 2, 1, 0, 3, 136, 40, }, + { 1, 1, 0, 3, 136, 40, }, + { 0, 1, 0, 3, 140, 46, }, + { 2, 1, 0, 3, 140, 40, }, + { 1, 1, 0, 3, 140, 40, }, + { 0, 1, 0, 3, 149, 46, }, + { 2, 1, 0, 3, 149, 40, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 46, }, + { 2, 1, 0, 3, 153, 40, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 46, }, + { 2, 1, 0, 3, 157, 40, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 46, }, + { 2, 1, 0, 3, 161, 40, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 46, }, + { 2, 1, 0, 3, 165, 40, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 0, 6, 36, 46, }, + { 2, 1, 0, 6, 36, 40, }, + { 1, 1, 0, 6, 36, 40, }, + { 0, 1, 0, 6, 40, 46, }, + { 2, 1, 0, 6, 40, 40, }, + { 1, 1, 0, 6, 40, 40, }, + { 0, 1, 0, 6, 44, 46, }, + { 2, 1, 0, 6, 44, 40, }, + { 1, 1, 0, 6, 44, 40, }, + { 0, 1, 0, 6, 48, 46, }, + { 2, 1, 0, 6, 48, 40, }, + { 1, 1, 0, 6, 48, 40, }, + { 0, 1, 0, 6, 52, 46, }, + { 2, 1, 0, 6, 52, 40, }, + { 1, 1, 0, 6, 52, 40, }, + { 0, 1, 0, 6, 56, 46, }, + { 2, 1, 0, 6, 56, 40, }, + { 1, 1, 0, 6, 56, 40, }, + { 0, 1, 0, 6, 60, 46, }, + { 2, 1, 0, 6, 60, 40, }, + { 1, 1, 0, 6, 60, 40, }, + { 0, 1, 0, 6, 64, 46, }, + { 2, 1, 0, 6, 64, 40, }, + { 1, 1, 0, 6, 64, 40, }, + { 0, 1, 0, 6, 100, 46, }, + { 2, 1, 0, 6, 100, 40, }, + { 1, 1, 0, 6, 100, 40, }, + { 0, 1, 0, 6, 104, 46, }, + { 2, 1, 0, 6, 104, 40, }, + { 1, 1, 0, 6, 104, 40, }, + { 0, 1, 0, 6, 108, 46, }, + { 2, 1, 0, 6, 108, 40, }, + { 1, 1, 0, 6, 108, 40, }, + { 0, 1, 0, 6, 112, 46, }, + { 2, 1, 0, 6, 112, 40, }, + { 1, 1, 0, 6, 112, 40, }, + { 0, 1, 0, 6, 116, 46, }, + { 2, 1, 0, 6, 116, 40, }, + { 1, 1, 0, 6, 116, 40, }, + { 0, 1, 0, 6, 120, 46, }, + { 2, 1, 0, 6, 120, 40, }, + { 1, 1, 0, 6, 120, 40, }, + { 0, 1, 0, 6, 124, 46, }, + { 2, 1, 0, 6, 124, 40, }, + { 1, 1, 0, 6, 124, 40, }, + { 0, 1, 0, 6, 128, 46, }, + { 2, 1, 0, 6, 128, 40, }, + { 1, 1, 0, 6, 128, 40, }, + { 0, 1, 0, 6, 132, 46, }, + { 2, 1, 0, 6, 132, 40, }, + { 1, 1, 0, 6, 132, 40, }, + { 0, 1, 0, 6, 136, 46, }, + { 2, 1, 0, 6, 136, 40, }, + { 1, 1, 0, 6, 136, 40, }, + { 0, 1, 0, 6, 140, 46, }, + { 2, 1, 0, 6, 140, 40, }, + { 1, 1, 0, 6, 140, 40, }, + { 0, 1, 0, 6, 149, 46, }, + { 2, 1, 0, 6, 149, 40, }, + { 1, 1, 0, 6, 149, 63, }, + { 0, 1, 0, 6, 153, 46, }, + { 2, 1, 0, 6, 153, 40, }, + { 1, 1, 0, 6, 153, 63, }, + { 0, 1, 0, 6, 157, 46, }, + { 2, 1, 0, 6, 157, 40, }, + { 1, 1, 0, 6, 157, 63, }, + { 0, 1, 0, 6, 161, 46, }, + { 2, 1, 0, 6, 161, 40, }, + { 1, 1, 0, 6, 161, 63, }, + { 0, 1, 0, 6, 165, 46, }, + { 2, 1, 0, 6, 165, 40, }, + { 1, 1, 0, 6, 165, 63, }, + { 0, 1, 0, 7, 36, 46, }, + { 2, 1, 0, 7, 36, 40, }, + { 1, 1, 0, 7, 36, 40, }, + { 0, 1, 0, 7, 40, 46, }, + { 2, 1, 0, 7, 40, 40, }, + { 1, 1, 0, 7, 40, 40, }, + { 0, 1, 0, 7, 44, 46, }, + { 2, 1, 0, 7, 44, 40, }, + { 1, 1, 0, 7, 44, 40, }, + { 0, 1, 0, 7, 48, 46, }, + { 2, 1, 0, 7, 48, 40, }, + { 1, 1, 0, 7, 48, 40, }, + { 0, 1, 0, 7, 52, 46, }, + { 2, 1, 0, 7, 52, 40, }, + { 1, 1, 0, 7, 52, 40, }, + { 0, 1, 0, 7, 56, 46, }, + { 2, 1, 0, 7, 56, 40, }, + { 1, 1, 0, 7, 56, 40, }, + { 0, 1, 0, 7, 60, 46, }, + { 2, 1, 0, 7, 60, 40, }, + { 1, 1, 0, 7, 60, 40, }, + { 0, 1, 0, 7, 64, 46, }, + { 2, 1, 0, 7, 64, 40, }, + { 1, 1, 0, 7, 64, 40, }, + { 0, 1, 0, 7, 100, 46, }, + { 2, 1, 0, 7, 100, 40, }, + { 1, 1, 0, 7, 100, 40, }, + { 0, 1, 0, 7, 104, 46, }, + { 2, 1, 0, 7, 104, 40, }, + { 1, 1, 0, 7, 104, 40, }, + { 0, 1, 0, 7, 108, 46, }, + { 2, 1, 0, 7, 108, 40, }, + { 1, 1, 0, 7, 108, 40, }, + { 0, 1, 0, 7, 112, 46, }, + { 2, 1, 0, 7, 112, 40, }, + { 1, 1, 0, 7, 112, 40, }, + { 0, 1, 0, 7, 116, 46, }, + { 2, 1, 0, 7, 116, 40, }, + { 1, 1, 0, 7, 116, 40, }, + { 0, 1, 0, 7, 120, 46, }, + { 2, 1, 0, 7, 120, 40, }, + { 1, 1, 0, 7, 120, 40, }, + { 0, 1, 0, 7, 124, 46, }, + { 2, 1, 0, 7, 124, 40, }, + { 1, 1, 0, 7, 124, 40, }, + { 0, 1, 0, 7, 128, 46, }, + { 2, 1, 0, 7, 128, 40, }, + { 1, 1, 0, 7, 128, 40, }, + { 0, 1, 0, 7, 132, 46, }, + { 2, 1, 0, 7, 132, 40, }, + { 1, 1, 0, 7, 132, 40, }, + { 0, 1, 0, 7, 136, 46, }, + { 2, 1, 0, 7, 136, 40, }, + { 1, 1, 0, 7, 136, 40, }, + { 0, 1, 0, 7, 140, 46, }, + { 2, 1, 0, 7, 140, 40, }, + { 1, 1, 0, 7, 140, 40, }, + { 0, 1, 0, 7, 149, 46, }, + { 2, 1, 0, 7, 149, 40, }, + { 1, 1, 0, 7, 149, 63, }, + { 0, 1, 0, 7, 153, 46, }, + { 2, 1, 0, 7, 153, 40, }, + { 1, 1, 0, 7, 153, 63, }, + { 0, 1, 0, 7, 157, 46, }, + { 2, 1, 0, 7, 157, 40, }, + { 1, 1, 0, 7, 157, 63, }, + { 0, 1, 0, 7, 161, 46, }, + { 2, 1, 0, 7, 161, 40, }, + { 1, 1, 0, 7, 161, 63, }, + { 0, 1, 0, 7, 165, 46, }, + { 2, 1, 0, 7, 165, 40, }, + { 1, 1, 0, 7, 165, 63, }, + { 0, 1, 1, 2, 38, 46, }, + { 2, 1, 1, 2, 38, 40, }, + { 1, 1, 1, 2, 38, 40, }, + { 0, 1, 1, 2, 46, 46, }, + { 2, 1, 1, 2, 46, 40, }, + { 1, 1, 1, 2, 46, 40, }, + { 0, 1, 1, 2, 54, 46, }, + { 2, 1, 1, 2, 54, 40, }, + { 1, 1, 1, 2, 54, 40, }, + { 0, 1, 1, 2, 62, 46, }, + { 2, 1, 1, 2, 62, 40, }, + { 1, 1, 1, 2, 62, 40, }, + { 0, 1, 1, 2, 102, 46, }, + { 2, 1, 1, 2, 102, 40, }, + { 1, 1, 1, 2, 102, 40, }, + { 0, 1, 1, 2, 110, 46, }, + { 2, 1, 1, 2, 110, 40, }, + { 1, 1, 1, 2, 110, 40, }, + { 0, 1, 1, 2, 118, 46, }, + { 2, 1, 1, 2, 118, 40, }, + { 1, 1, 1, 2, 118, 40, }, + { 0, 1, 1, 2, 126, 46, }, + { 2, 1, 1, 2, 126, 40, }, + { 1, 1, 1, 2, 126, 40, }, + { 0, 1, 1, 2, 134, 46, }, + { 2, 1, 1, 2, 134, 40, }, + { 1, 1, 1, 2, 134, 40, }, + { 0, 1, 1, 2, 151, 46, }, + { 2, 1, 1, 2, 151, 40, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 46, }, + { 2, 1, 1, 2, 159, 40, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 46, }, + { 2, 1, 1, 3, 38, 40, }, + { 1, 1, 1, 3, 38, 40, }, + { 0, 1, 1, 3, 46, 46, }, + { 2, 1, 1, 3, 46, 40, }, + { 1, 1, 1, 3, 46, 40, }, + { 0, 1, 1, 3, 54, 46, }, + { 2, 1, 1, 3, 54, 40, }, + { 1, 1, 1, 3, 54, 40, }, + { 0, 1, 1, 3, 62, 46, }, + { 2, 1, 1, 3, 62, 40, }, + { 1, 1, 1, 3, 62, 40, }, + { 0, 1, 1, 3, 102, 46, }, + { 2, 1, 1, 3, 102, 40, }, + { 1, 1, 1, 3, 102, 40, }, + { 0, 1, 1, 3, 110, 46, }, + { 2, 1, 1, 3, 110, 40, }, + { 1, 1, 1, 3, 110, 40, }, + { 0, 1, 1, 3, 118, 46, }, + { 2, 1, 1, 3, 118, 40, }, + { 1, 1, 1, 3, 118, 40, }, + { 0, 1, 1, 3, 126, 46, }, + { 2, 1, 1, 3, 126, 40, }, + { 1, 1, 1, 3, 126, 40, }, + { 0, 1, 1, 3, 134, 46, }, + { 2, 1, 1, 3, 134, 40, }, + { 1, 1, 1, 3, 134, 40, }, + { 0, 1, 1, 3, 151, 46, }, + { 2, 1, 1, 3, 151, 40, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 46, }, + { 2, 1, 1, 3, 159, 40, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 1, 6, 38, 46, }, + { 2, 1, 1, 6, 38, 40, }, + { 1, 1, 1, 6, 38, 40, }, + { 0, 1, 1, 6, 46, 46, }, + { 2, 1, 1, 6, 46, 40, }, + { 1, 1, 1, 6, 46, 40, }, + { 0, 1, 1, 6, 54, 46, }, + { 2, 1, 1, 6, 54, 40, }, + { 1, 1, 1, 6, 54, 40, }, + { 0, 1, 1, 6, 62, 46, }, + { 2, 1, 1, 6, 62, 40, }, + { 1, 1, 1, 6, 62, 40, }, + { 0, 1, 1, 6, 102, 46, }, + { 2, 1, 1, 6, 102, 40, }, + { 1, 1, 1, 6, 102, 40, }, + { 0, 1, 1, 6, 110, 46, }, + { 2, 1, 1, 6, 110, 40, }, + { 1, 1, 1, 6, 110, 40, }, + { 0, 1, 1, 6, 118, 46, }, + { 2, 1, 1, 6, 118, 40, }, + { 1, 1, 1, 6, 118, 40, }, + { 0, 1, 1, 6, 126, 46, }, + { 2, 1, 1, 6, 126, 40, }, + { 1, 1, 1, 6, 126, 40, }, + { 0, 1, 1, 6, 134, 46, }, + { 2, 1, 1, 6, 134, 40, }, + { 1, 1, 1, 6, 134, 40, }, + { 0, 1, 1, 6, 151, 46, }, + { 2, 1, 1, 6, 151, 40, }, + { 1, 1, 1, 6, 151, 63, }, + { 0, 1, 1, 6, 159, 46, }, + { 2, 1, 1, 6, 159, 40, }, + { 1, 1, 1, 6, 159, 63, }, + { 0, 1, 1, 7, 38, 46, }, + { 2, 1, 1, 7, 38, 40, }, + { 1, 1, 1, 7, 38, 40, }, + { 0, 1, 1, 7, 46, 46, }, + { 2, 1, 1, 7, 46, 40, }, + { 1, 1, 1, 7, 46, 40, }, + { 0, 1, 1, 7, 54, 46, }, + { 2, 1, 1, 7, 54, 40, }, + { 1, 1, 1, 7, 54, 40, }, + { 0, 1, 1, 7, 62, 46, }, + { 2, 1, 1, 7, 62, 40, }, + { 1, 1, 1, 7, 62, 40, }, + { 0, 1, 1, 7, 102, 46, }, + { 2, 1, 1, 7, 102, 40, }, + { 1, 1, 1, 7, 102, 40, }, + { 0, 1, 1, 7, 110, 46, }, + { 2, 1, 1, 7, 110, 40, }, + { 1, 1, 1, 7, 110, 40, }, + { 0, 1, 1, 7, 118, 46, }, + { 2, 1, 1, 7, 118, 40, }, + { 1, 1, 1, 7, 118, 40, }, + { 0, 1, 1, 7, 126, 46, }, + { 2, 1, 1, 7, 126, 40, }, + { 1, 1, 1, 7, 126, 40, }, + { 0, 1, 1, 7, 134, 46, }, + { 2, 1, 1, 7, 134, 40, }, + { 1, 1, 1, 7, 134, 40, }, + { 0, 1, 1, 7, 151, 46, }, + { 2, 1, 1, 7, 151, 40, }, + { 1, 1, 1, 7, 151, 63, }, + { 0, 1, 1, 7, 159, 46, }, + { 2, 1, 1, 7, 159, 40, }, + { 1, 1, 1, 7, 159, 63, }, + { 0, 1, 2, 4, 42, 46, }, + { 2, 1, 2, 4, 42, 40, }, + { 1, 1, 2, 4, 42, 40, }, + { 0, 1, 2, 4, 58, 46, }, + { 2, 1, 2, 4, 58, 40, }, + { 1, 1, 2, 4, 58, 40, }, + { 0, 1, 2, 4, 106, 46, }, + { 2, 1, 2, 4, 106, 40, }, + { 1, 1, 2, 4, 106, 40, }, + { 0, 1, 2, 4, 122, 46, }, + { 2, 1, 2, 4, 122, 40, }, + { 1, 1, 2, 4, 122, 40, }, + { 0, 1, 2, 4, 155, 46, }, + { 2, 1, 2, 4, 155, 40, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 46, }, + { 2, 1, 2, 5, 42, 40, }, + { 1, 1, 2, 5, 42, 40, }, + { 0, 1, 2, 5, 58, 46, }, + { 2, 1, 2, 5, 58, 40, }, + { 1, 1, 2, 5, 58, 40, }, + { 0, 1, 2, 5, 106, 46, }, + { 2, 1, 2, 5, 106, 40, }, + { 1, 1, 2, 5, 106, 40, }, + { 0, 1, 2, 5, 122, 46, }, + { 2, 1, 2, 5, 122, 40, }, + { 1, 1, 2, 5, 122, 40, }, + { 0, 1, 2, 5, 155, 46, }, + { 2, 1, 2, 5, 155, 40, }, + { 1, 1, 2, 5, 155, 63, }, + { 0, 1, 2, 8, 42, 46, }, + { 2, 1, 2, 8, 42, 40, }, + { 1, 1, 2, 8, 42, 40, }, + { 0, 1, 2, 8, 58, 46, }, + { 2, 1, 2, 8, 58, 40, }, + { 1, 1, 2, 8, 58, 40, }, + { 0, 1, 2, 8, 106, 46, }, + { 2, 1, 2, 8, 106, 40, }, + { 1, 1, 2, 8, 106, 40, }, + { 0, 1, 2, 8, 122, 46, }, + { 2, 1, 2, 8, 122, 40, }, + { 1, 1, 2, 8, 122, 40, }, + { 0, 1, 2, 8, 155, 46, }, + { 2, 1, 2, 8, 155, 40, }, + { 1, 1, 2, 8, 155, 63, }, + { 0, 1, 2, 9, 42, 46, }, + { 2, 1, 2, 9, 42, 40, }, + { 1, 1, 2, 9, 42, 40, }, + { 0, 1, 2, 9, 58, 46, }, + { 2, 1, 2, 9, 58, 40, }, + { 1, 1, 2, 9, 58, 40, }, + { 0, 1, 2, 9, 106, 46, }, + { 2, 1, 2, 9, 106, 40, }, + { 1, 1, 2, 9, 106, 40, }, + { 0, 1, 2, 9, 122, 46, }, + { 2, 1, 2, 9, 122, 40, }, + { 1, 1, 2, 9, 122, 40, }, + { 0, 1, 2, 9, 155, 46, }, + { 2, 1, 2, 9, 155, 40, }, + { 1, 1, 2, 9, 155, 63, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type3); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type5[] = { + { 0, 0, 0, 0, 1, 46, }, + { 2, 0, 0, 0, 1, 40, }, + { 1, 0, 0, 0, 1, 40, }, + { 0, 0, 0, 0, 2, 46, }, + { 2, 0, 0, 0, 2, 40, }, + { 1, 0, 0, 0, 2, 40, }, + { 0, 0, 0, 0, 3, 46, }, + { 2, 0, 0, 0, 3, 40, }, + { 1, 0, 0, 0, 3, 40, }, + { 0, 0, 0, 0, 4, 46, }, + { 2, 0, 0, 0, 4, 40, }, + { 1, 0, 0, 0, 4, 40, }, + { 0, 0, 0, 0, 5, 46, }, + { 2, 0, 0, 0, 5, 40, }, + { 1, 0, 0, 0, 5, 40, }, + { 0, 0, 0, 0, 6, 46, }, + { 2, 0, 0, 0, 6, 40, }, + { 1, 0, 0, 0, 6, 40, }, + { 0, 0, 0, 0, 7, 46, }, + { 2, 0, 0, 0, 7, 40, }, + { 1, 0, 0, 0, 7, 40, }, + { 0, 0, 0, 0, 8, 46, }, + { 2, 0, 0, 0, 8, 40, }, + { 1, 0, 0, 0, 8, 40, }, + { 0, 0, 0, 0, 9, 46, }, + { 2, 0, 0, 0, 9, 40, }, + { 1, 0, 0, 0, 9, 40, }, + { 0, 0, 0, 0, 10, 46, }, + { 2, 0, 0, 0, 10, 40, }, + { 1, 0, 0, 0, 10, 40, }, + { 0, 0, 0, 0, 11, 46, }, + { 2, 0, 0, 0, 11, 40, }, + { 1, 0, 0, 0, 11, 40, }, + { 0, 0, 0, 0, 12, 63, }, + { 2, 0, 0, 0, 12, 40, }, + { 1, 0, 0, 0, 12, 40, }, + { 0, 0, 0, 0, 13, 63, }, + { 2, 0, 0, 0, 13, 40, }, + { 1, 0, 0, 0, 13, 40, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 40, }, + { 0, 0, 0, 1, 1, 46, }, + { 2, 0, 0, 1, 1, 40, }, + { 1, 0, 0, 1, 1, 40, }, + { 0, 0, 0, 1, 2, 46, }, + { 2, 0, 0, 1, 2, 40, }, + { 1, 0, 0, 1, 2, 40, }, + { 0, 0, 0, 1, 3, 46, }, + { 2, 0, 0, 1, 3, 40, }, + { 1, 0, 0, 1, 3, 40, }, + { 0, 0, 0, 1, 4, 46, }, + { 2, 0, 0, 1, 4, 40, }, + { 1, 0, 0, 1, 4, 40, }, + { 0, 0, 0, 1, 5, 46, }, + { 2, 0, 0, 1, 5, 40, }, + { 1, 0, 0, 1, 5, 40, }, + { 0, 0, 0, 1, 6, 46, }, + { 2, 0, 0, 1, 6, 40, }, + { 1, 0, 0, 1, 6, 40, }, + { 0, 0, 0, 1, 7, 46, }, + { 2, 0, 0, 1, 7, 40, }, + { 1, 0, 0, 1, 7, 40, }, + { 0, 0, 0, 1, 8, 46, }, + { 2, 0, 0, 1, 8, 40, }, + { 1, 0, 0, 1, 8, 40, }, + { 0, 0, 0, 1, 9, 46, }, + { 2, 0, 0, 1, 9, 40, }, + { 1, 0, 0, 1, 9, 40, }, + { 0, 0, 0, 1, 10, 46, }, + { 2, 0, 0, 1, 10, 40, }, + { 1, 0, 0, 1, 10, 40, }, + { 0, 0, 0, 1, 11, 46, }, + { 2, 0, 0, 1, 11, 40, }, + { 1, 0, 0, 1, 11, 40, }, + { 0, 0, 0, 1, 12, 63, }, + { 2, 0, 0, 1, 12, 40, }, + { 1, 0, 0, 1, 12, 40, }, + { 0, 0, 0, 1, 13, 63, }, + { 2, 0, 0, 1, 13, 40, }, + { 1, 0, 0, 1, 13, 40, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 46, }, + { 2, 0, 0, 2, 1, 40, }, + { 1, 0, 0, 2, 1, 40, }, + { 0, 0, 0, 2, 2, 46, }, + { 2, 0, 0, 2, 2, 40, }, + { 1, 0, 0, 2, 2, 40, }, + { 0, 0, 0, 2, 3, 46, }, + { 2, 0, 0, 2, 3, 40, }, + { 1, 0, 0, 2, 3, 40, }, + { 0, 0, 0, 2, 4, 46, }, + { 2, 0, 0, 2, 4, 40, }, + { 1, 0, 0, 2, 4, 40, }, + { 0, 0, 0, 2, 5, 46, }, + { 2, 0, 0, 2, 5, 40, }, + { 1, 0, 0, 2, 5, 40, }, + { 0, 0, 0, 2, 6, 46, }, + { 2, 0, 0, 2, 6, 40, }, + { 1, 0, 0, 2, 6, 40, }, + { 0, 0, 0, 2, 7, 46, }, + { 2, 0, 0, 2, 7, 40, }, + { 1, 0, 0, 2, 7, 40, }, + { 0, 0, 0, 2, 8, 46, }, + { 2, 0, 0, 2, 8, 40, }, + { 1, 0, 0, 2, 8, 40, }, + { 0, 0, 0, 2, 9, 46, }, + { 2, 0, 0, 2, 9, 40, }, + { 1, 0, 0, 2, 9, 40, }, + { 0, 0, 0, 2, 10, 46, }, + { 2, 0, 0, 2, 10, 40, }, + { 1, 0, 0, 2, 10, 40, }, + { 0, 0, 0, 2, 11, 46, }, + { 2, 0, 0, 2, 11, 40, }, + { 1, 0, 0, 2, 11, 40, }, + { 0, 0, 0, 2, 12, 63, }, + { 2, 0, 0, 2, 12, 40, }, + { 1, 0, 0, 2, 12, 40, }, + { 0, 0, 0, 2, 13, 63, }, + { 2, 0, 0, 2, 13, 40, }, + { 1, 0, 0, 2, 13, 40, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 46, }, + { 2, 0, 0, 3, 1, 40, }, + { 1, 0, 0, 3, 1, 40, }, + { 0, 0, 0, 3, 2, 46, }, + { 2, 0, 0, 3, 2, 40, }, + { 1, 0, 0, 3, 2, 40, }, + { 0, 0, 0, 3, 3, 46, }, + { 2, 0, 0, 3, 3, 40, }, + { 1, 0, 0, 3, 3, 40, }, + { 0, 0, 0, 3, 4, 46, }, + { 2, 0, 0, 3, 4, 40, }, + { 1, 0, 0, 3, 4, 40, }, + { 0, 0, 0, 3, 5, 46, }, + { 2, 0, 0, 3, 5, 40, }, + { 1, 0, 0, 3, 5, 40, }, + { 0, 0, 0, 3, 6, 46, }, + { 2, 0, 0, 3, 6, 40, }, + { 1, 0, 0, 3, 6, 40, }, + { 0, 0, 0, 3, 7, 46, }, + { 2, 0, 0, 3, 7, 40, }, + { 1, 0, 0, 3, 7, 40, }, + { 0, 0, 0, 3, 8, 46, }, + { 2, 0, 0, 3, 8, 40, }, + { 1, 0, 0, 3, 8, 40, }, + { 0, 0, 0, 3, 9, 46, }, + { 2, 0, 0, 3, 9, 40, }, + { 1, 0, 0, 3, 9, 40, }, + { 0, 0, 0, 3, 10, 46, }, + { 2, 0, 0, 3, 10, 40, }, + { 1, 0, 0, 3, 10, 40, }, + { 0, 0, 0, 3, 11, 46, }, + { 2, 0, 0, 3, 11, 40, }, + { 1, 0, 0, 3, 11, 40, }, + { 0, 0, 0, 3, 12, 63, }, + { 2, 0, 0, 3, 12, 40, }, + { 1, 0, 0, 3, 12, 40, }, + { 0, 0, 0, 3, 13, 63, }, + { 2, 0, 0, 3, 13, 40, }, + { 1, 0, 0, 3, 13, 40, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 0, 6, 1, 46, }, + { 2, 0, 0, 6, 1, 40, }, + { 1, 0, 0, 6, 1, 40, }, + { 0, 0, 0, 6, 2, 46, }, + { 2, 0, 0, 6, 2, 40, }, + { 1, 0, 0, 6, 2, 40, }, + { 0, 0, 0, 6, 3, 46, }, + { 2, 0, 0, 6, 3, 40, }, + { 1, 0, 0, 6, 3, 40, }, + { 0, 0, 0, 6, 4, 46, }, + { 2, 0, 0, 6, 4, 40, }, + { 1, 0, 0, 6, 4, 40, }, + { 0, 0, 0, 6, 5, 46, }, + { 2, 0, 0, 6, 5, 40, }, + { 1, 0, 0, 6, 5, 40, }, + { 0, 0, 0, 6, 6, 46, }, + { 2, 0, 0, 6, 6, 40, }, + { 1, 0, 0, 6, 6, 40, }, + { 0, 0, 0, 6, 7, 46, }, + { 2, 0, 0, 6, 7, 40, }, + { 1, 0, 0, 6, 7, 40, }, + { 0, 0, 0, 6, 8, 46, }, + { 2, 0, 0, 6, 8, 40, }, + { 1, 0, 0, 6, 8, 40, }, + { 0, 0, 0, 6, 9, 46, }, + { 2, 0, 0, 6, 9, 40, }, + { 1, 0, 0, 6, 9, 40, }, + { 0, 0, 0, 6, 10, 46, }, + { 2, 0, 0, 6, 10, 40, }, + { 1, 0, 0, 6, 10, 40, }, + { 0, 0, 0, 6, 11, 46, }, + { 2, 0, 0, 6, 11, 40, }, + { 1, 0, 0, 6, 11, 40, }, + { 0, 0, 0, 6, 12, 63, }, + { 2, 0, 0, 6, 12, 40, }, + { 1, 0, 0, 6, 12, 40, }, + { 0, 0, 0, 6, 13, 63, }, + { 2, 0, 0, 6, 13, 40, }, + { 1, 0, 0, 6, 13, 40, }, + { 0, 0, 0, 6, 14, 63, }, + { 2, 0, 0, 6, 14, 63, }, + { 1, 0, 0, 6, 14, 63, }, + { 0, 0, 0, 7, 1, 46, }, + { 2, 0, 0, 7, 1, 40, }, + { 1, 0, 0, 7, 1, 40, }, + { 0, 0, 0, 7, 2, 46, }, + { 2, 0, 0, 7, 2, 40, }, + { 1, 0, 0, 7, 2, 40, }, + { 0, 0, 0, 7, 3, 46, }, + { 2, 0, 0, 7, 3, 40, }, + { 1, 0, 0, 7, 3, 40, }, + { 0, 0, 0, 7, 4, 46, }, + { 2, 0, 0, 7, 4, 40, }, + { 1, 0, 0, 7, 4, 40, }, + { 0, 0, 0, 7, 5, 46, }, + { 2, 0, 0, 7, 5, 40, }, + { 1, 0, 0, 7, 5, 40, }, + { 0, 0, 0, 7, 6, 46, }, + { 2, 0, 0, 7, 6, 40, }, + { 1, 0, 0, 7, 6, 40, }, + { 0, 0, 0, 7, 7, 46, }, + { 2, 0, 0, 7, 7, 40, }, + { 1, 0, 0, 7, 7, 40, }, + { 0, 0, 0, 7, 8, 46, }, + { 2, 0, 0, 7, 8, 40, }, + { 1, 0, 0, 7, 8, 40, }, + { 0, 0, 0, 7, 9, 46, }, + { 2, 0, 0, 7, 9, 40, }, + { 1, 0, 0, 7, 9, 40, }, + { 0, 0, 0, 7, 10, 46, }, + { 2, 0, 0, 7, 10, 40, }, + { 1, 0, 0, 7, 10, 40, }, + { 0, 0, 0, 7, 11, 46, }, + { 2, 0, 0, 7, 11, 40, }, + { 1, 0, 0, 7, 11, 40, }, + { 0, 0, 0, 7, 12, 63, }, + { 2, 0, 0, 7, 12, 40, }, + { 1, 0, 0, 7, 12, 40, }, + { 0, 0, 0, 7, 13, 63, }, + { 2, 0, 0, 7, 13, 40, }, + { 1, 0, 0, 7, 13, 40, }, + { 0, 0, 0, 7, 14, 63, }, + { 2, 0, 0, 7, 14, 63, }, + { 1, 0, 0, 7, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 46, }, + { 2, 0, 1, 2, 3, 40, }, + { 1, 0, 1, 2, 3, 40, }, + { 0, 0, 1, 2, 4, 46, }, + { 2, 0, 1, 2, 4, 40, }, + { 1, 0, 1, 2, 4, 40, }, + { 0, 0, 1, 2, 5, 46, }, + { 2, 0, 1, 2, 5, 40, }, + { 1, 0, 1, 2, 5, 40, }, + { 0, 0, 1, 2, 6, 46, }, + { 2, 0, 1, 2, 6, 40, }, + { 1, 0, 1, 2, 6, 40, }, + { 0, 0, 1, 2, 7, 46, }, + { 2, 0, 1, 2, 7, 40, }, + { 1, 0, 1, 2, 7, 40, }, + { 0, 0, 1, 2, 8, 46, }, + { 2, 0, 1, 2, 8, 40, }, + { 1, 0, 1, 2, 8, 40, }, + { 0, 0, 1, 2, 9, 46, }, + { 2, 0, 1, 2, 9, 40, }, + { 1, 0, 1, 2, 9, 40, }, + { 0, 0, 1, 2, 10, 46, }, + { 2, 0, 1, 2, 10, 40, }, + { 1, 0, 1, 2, 10, 40, }, + { 0, 0, 1, 2, 11, 46, }, + { 2, 0, 1, 2, 11, 40, }, + { 1, 0, 1, 2, 11, 40, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 40, }, + { 1, 0, 1, 2, 12, 40, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 40, }, + { 1, 0, 1, 2, 13, 40, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 46, }, + { 2, 0, 1, 3, 3, 40, }, + { 1, 0, 1, 3, 3, 40, }, + { 0, 0, 1, 3, 4, 46, }, + { 2, 0, 1, 3, 4, 40, }, + { 1, 0, 1, 3, 4, 40, }, + { 0, 0, 1, 3, 5, 46, }, + { 2, 0, 1, 3, 5, 40, }, + { 1, 0, 1, 3, 5, 40, }, + { 0, 0, 1, 3, 6, 46, }, + { 2, 0, 1, 3, 6, 40, }, + { 1, 0, 1, 3, 6, 40, }, + { 0, 0, 1, 3, 7, 46, }, + { 2, 0, 1, 3, 7, 40, }, + { 1, 0, 1, 3, 7, 40, }, + { 0, 0, 1, 3, 8, 46, }, + { 2, 0, 1, 3, 8, 40, }, + { 1, 0, 1, 3, 8, 40, }, + { 0, 0, 1, 3, 9, 46, }, + { 2, 0, 1, 3, 9, 40, }, + { 1, 0, 1, 3, 9, 40, }, + { 0, 0, 1, 3, 10, 46, }, + { 2, 0, 1, 3, 10, 40, }, + { 1, 0, 1, 3, 10, 40, }, + { 0, 0, 1, 3, 11, 46, }, + { 2, 0, 1, 3, 11, 40, }, + { 1, 0, 1, 3, 11, 40, }, + { 0, 0, 1, 3, 12, 63, }, + { 2, 0, 1, 3, 12, 40, }, + { 1, 0, 1, 3, 12, 40, }, + { 0, 0, 1, 3, 13, 63, }, + { 2, 0, 1, 3, 13, 40, }, + { 1, 0, 1, 3, 13, 40, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 0, 1, 6, 1, 63, }, + { 2, 0, 1, 6, 1, 63, }, + { 1, 0, 1, 6, 1, 63, }, + { 0, 0, 1, 6, 2, 63, }, + { 2, 0, 1, 6, 2, 63, }, + { 1, 0, 1, 6, 2, 63, }, + { 0, 0, 1, 6, 3, 46, }, + { 2, 0, 1, 6, 3, 40, }, + { 1, 0, 1, 6, 3, 40, }, + { 0, 0, 1, 6, 4, 46, }, + { 2, 0, 1, 6, 4, 40, }, + { 1, 0, 1, 6, 4, 40, }, + { 0, 0, 1, 6, 5, 46, }, + { 2, 0, 1, 6, 5, 40, }, + { 1, 0, 1, 6, 5, 40, }, + { 0, 0, 1, 6, 6, 46, }, + { 2, 0, 1, 6, 6, 40, }, + { 1, 0, 1, 6, 6, 40, }, + { 0, 0, 1, 6, 7, 46, }, + { 2, 0, 1, 6, 7, 40, }, + { 1, 0, 1, 6, 7, 40, }, + { 0, 0, 1, 6, 8, 46, }, + { 2, 0, 1, 6, 8, 40, }, + { 1, 0, 1, 6, 8, 40, }, + { 0, 0, 1, 6, 9, 46, }, + { 2, 0, 1, 6, 9, 40, }, + { 1, 0, 1, 6, 9, 40, }, + { 0, 0, 1, 6, 10, 46, }, + { 2, 0, 1, 6, 10, 40, }, + { 1, 0, 1, 6, 10, 40, }, + { 0, 0, 1, 6, 11, 46, }, + { 2, 0, 1, 6, 11, 40, }, + { 1, 0, 1, 6, 11, 40, }, + { 0, 0, 1, 6, 12, 63, }, + { 2, 0, 1, 6, 12, 40, }, + { 1, 0, 1, 6, 12, 40, }, + { 0, 0, 1, 6, 13, 63, }, + { 2, 0, 1, 6, 13, 40, }, + { 1, 0, 1, 6, 13, 40, }, + { 0, 0, 1, 6, 14, 63, }, + { 2, 0, 1, 6, 14, 63, }, + { 1, 0, 1, 6, 14, 63, }, + { 0, 0, 1, 7, 1, 63, }, + { 2, 0, 1, 7, 1, 63, }, + { 1, 0, 1, 7, 1, 63, }, + { 0, 0, 1, 7, 2, 63, }, + { 2, 0, 1, 7, 2, 63, }, + { 1, 0, 1, 7, 2, 63, }, + { 0, 0, 1, 7, 3, 46, }, + { 2, 0, 1, 7, 3, 40, }, + { 1, 0, 1, 7, 3, 40, }, + { 0, 0, 1, 7, 4, 46, }, + { 2, 0, 1, 7, 4, 40, }, + { 1, 0, 1, 7, 4, 40, }, + { 0, 0, 1, 7, 5, 46, }, + { 2, 0, 1, 7, 5, 40, }, + { 1, 0, 1, 7, 5, 40, }, + { 0, 0, 1, 7, 6, 46, }, + { 2, 0, 1, 7, 6, 40, }, + { 1, 0, 1, 7, 6, 40, }, + { 0, 0, 1, 7, 7, 46, }, + { 2, 0, 1, 7, 7, 40, }, + { 1, 0, 1, 7, 7, 40, }, + { 0, 0, 1, 7, 8, 46, }, + { 2, 0, 1, 7, 8, 40, }, + { 1, 0, 1, 7, 8, 40, }, + { 0, 0, 1, 7, 9, 46, }, + { 2, 0, 1, 7, 9, 40, }, + { 1, 0, 1, 7, 9, 40, }, + { 0, 0, 1, 7, 10, 46, }, + { 2, 0, 1, 7, 10, 40, }, + { 1, 0, 1, 7, 10, 40, }, + { 0, 0, 1, 7, 11, 46, }, + { 2, 0, 1, 7, 11, 40, }, + { 1, 0, 1, 7, 11, 40, }, + { 0, 0, 1, 7, 12, 63, }, + { 2, 0, 1, 7, 12, 40, }, + { 1, 0, 1, 7, 12, 40, }, + { 0, 0, 1, 7, 13, 63, }, + { 2, 0, 1, 7, 13, 40, }, + { 1, 0, 1, 7, 13, 40, }, + { 0, 0, 1, 7, 14, 63, }, + { 2, 0, 1, 7, 14, 63, }, + { 1, 0, 1, 7, 14, 63, }, + { 0, 1, 0, 1, 36, 46, }, + { 2, 1, 0, 1, 36, 40, }, + { 1, 1, 0, 1, 36, 40, }, + { 0, 1, 0, 1, 40, 46, }, + { 2, 1, 0, 1, 40, 40, }, + { 1, 1, 0, 1, 40, 40, }, + { 0, 1, 0, 1, 44, 46, }, + { 2, 1, 0, 1, 44, 40, }, + { 1, 1, 0, 1, 44, 40, }, + { 0, 1, 0, 1, 48, 46, }, + { 2, 1, 0, 1, 48, 40, }, + { 1, 1, 0, 1, 48, 40, }, + { 0, 1, 0, 1, 52, 46, }, + { 2, 1, 0, 1, 52, 40, }, + { 1, 1, 0, 1, 52, 40, }, + { 0, 1, 0, 1, 56, 46, }, + { 2, 1, 0, 1, 56, 40, }, + { 1, 1, 0, 1, 56, 40, }, + { 0, 1, 0, 1, 60, 46, }, + { 2, 1, 0, 1, 60, 40, }, + { 1, 1, 0, 1, 60, 40, }, + { 0, 1, 0, 1, 64, 46, }, + { 2, 1, 0, 1, 64, 40, }, + { 1, 1, 0, 1, 64, 40, }, + { 0, 1, 0, 1, 100, 46, }, + { 2, 1, 0, 1, 100, 40, }, + { 1, 1, 0, 1, 100, 40, }, + { 0, 1, 0, 1, 104, 46, }, + { 2, 1, 0, 1, 104, 40, }, + { 1, 1, 0, 1, 104, 40, }, + { 0, 1, 0, 1, 108, 46, }, + { 2, 1, 0, 1, 108, 40, }, + { 1, 1, 0, 1, 108, 40, }, + { 0, 1, 0, 1, 112, 46, }, + { 2, 1, 0, 1, 112, 40, }, + { 1, 1, 0, 1, 112, 40, }, + { 0, 1, 0, 1, 116, 46, }, + { 2, 1, 0, 1, 116, 40, }, + { 1, 1, 0, 1, 116, 40, }, + { 0, 1, 0, 1, 120, 46, }, + { 2, 1, 0, 1, 120, 40, }, + { 1, 1, 0, 1, 120, 40, }, + { 0, 1, 0, 1, 124, 46, }, + { 2, 1, 0, 1, 124, 40, }, + { 1, 1, 0, 1, 124, 40, }, + { 0, 1, 0, 1, 128, 46, }, + { 2, 1, 0, 1, 128, 40, }, + { 1, 1, 0, 1, 128, 40, }, + { 0, 1, 0, 1, 132, 46, }, + { 2, 1, 0, 1, 132, 40, }, + { 1, 1, 0, 1, 132, 40, }, + { 0, 1, 0, 1, 136, 46, }, + { 2, 1, 0, 1, 136, 40, }, + { 1, 1, 0, 1, 136, 40, }, + { 0, 1, 0, 1, 140, 46, }, + { 2, 1, 0, 1, 140, 40, }, + { 1, 1, 0, 1, 140, 40, }, + { 0, 1, 0, 1, 149, 46, }, + { 2, 1, 0, 1, 149, 40, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 46, }, + { 2, 1, 0, 1, 153, 40, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 46, }, + { 2, 1, 0, 1, 157, 40, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 46, }, + { 2, 1, 0, 1, 161, 40, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 46, }, + { 2, 1, 0, 1, 165, 40, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 46, }, + { 2, 1, 0, 2, 36, 40, }, + { 1, 1, 0, 2, 36, 40, }, + { 0, 1, 0, 2, 40, 46, }, + { 2, 1, 0, 2, 40, 40, }, + { 1, 1, 0, 2, 40, 40, }, + { 0, 1, 0, 2, 44, 46, }, + { 2, 1, 0, 2, 44, 40, }, + { 1, 1, 0, 2, 44, 40, }, + { 0, 1, 0, 2, 48, 46, }, + { 2, 1, 0, 2, 48, 40, }, + { 1, 1, 0, 2, 48, 40, }, + { 0, 1, 0, 2, 52, 46, }, + { 2, 1, 0, 2, 52, 40, }, + { 1, 1, 0, 2, 52, 40, }, + { 0, 1, 0, 2, 56, 46, }, + { 2, 1, 0, 2, 56, 40, }, + { 1, 1, 0, 2, 56, 40, }, + { 0, 1, 0, 2, 60, 46, }, + { 2, 1, 0, 2, 60, 40, }, + { 1, 1, 0, 2, 60, 40, }, + { 0, 1, 0, 2, 64, 46, }, + { 2, 1, 0, 2, 64, 40, }, + { 1, 1, 0, 2, 64, 40, }, + { 0, 1, 0, 2, 100, 46, }, + { 2, 1, 0, 2, 100, 40, }, + { 1, 1, 0, 2, 100, 40, }, + { 0, 1, 0, 2, 104, 46, }, + { 2, 1, 0, 2, 104, 40, }, + { 1, 1, 0, 2, 104, 40, }, + { 0, 1, 0, 2, 108, 46, }, + { 2, 1, 0, 2, 108, 40, }, + { 1, 1, 0, 2, 108, 40, }, + { 0, 1, 0, 2, 112, 46, }, + { 2, 1, 0, 2, 112, 40, }, + { 1, 1, 0, 2, 112, 40, }, + { 0, 1, 0, 2, 116, 46, }, + { 2, 1, 0, 2, 116, 40, }, + { 1, 1, 0, 2, 116, 40, }, + { 0, 1, 0, 2, 120, 46, }, + { 2, 1, 0, 2, 120, 40, }, + { 1, 1, 0, 2, 120, 40, }, + { 0, 1, 0, 2, 124, 46, }, + { 2, 1, 0, 2, 124, 40, }, + { 1, 1, 0, 2, 124, 40, }, + { 0, 1, 0, 2, 128, 46, }, + { 2, 1, 0, 2, 128, 40, }, + { 1, 1, 0, 2, 128, 40, }, + { 0, 1, 0, 2, 132, 46, }, + { 2, 1, 0, 2, 132, 40, }, + { 1, 1, 0, 2, 132, 40, }, + { 0, 1, 0, 2, 136, 46, }, + { 2, 1, 0, 2, 136, 40, }, + { 1, 1, 0, 2, 136, 40, }, + { 0, 1, 0, 2, 140, 46, }, + { 2, 1, 0, 2, 140, 40, }, + { 1, 1, 0, 2, 140, 40, }, + { 0, 1, 0, 2, 149, 46, }, + { 2, 1, 0, 2, 149, 40, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 46, }, + { 2, 1, 0, 2, 153, 40, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 46, }, + { 2, 1, 0, 2, 157, 40, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 46, }, + { 2, 1, 0, 2, 161, 40, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 46, }, + { 2, 1, 0, 2, 165, 40, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 46, }, + { 2, 1, 0, 3, 36, 40, }, + { 1, 1, 0, 3, 36, 40, }, + { 0, 1, 0, 3, 40, 46, }, + { 2, 1, 0, 3, 40, 40, }, + { 1, 1, 0, 3, 40, 40, }, + { 0, 1, 0, 3, 44, 46, }, + { 2, 1, 0, 3, 44, 40, }, + { 1, 1, 0, 3, 44, 40, }, + { 0, 1, 0, 3, 48, 46, }, + { 2, 1, 0, 3, 48, 40, }, + { 1, 1, 0, 3, 48, 40, }, + { 0, 1, 0, 3, 52, 46, }, + { 2, 1, 0, 3, 52, 40, }, + { 1, 1, 0, 3, 52, 40, }, + { 0, 1, 0, 3, 56, 46, }, + { 2, 1, 0, 3, 56, 40, }, + { 1, 1, 0, 3, 56, 40, }, + { 0, 1, 0, 3, 60, 46, }, + { 2, 1, 0, 3, 60, 40, }, + { 1, 1, 0, 3, 60, 40, }, + { 0, 1, 0, 3, 64, 46, }, + { 2, 1, 0, 3, 64, 40, }, + { 1, 1, 0, 3, 64, 40, }, + { 0, 1, 0, 3, 100, 46, }, + { 2, 1, 0, 3, 100, 40, }, + { 1, 1, 0, 3, 100, 40, }, + { 0, 1, 0, 3, 104, 46, }, + { 2, 1, 0, 3, 104, 40, }, + { 1, 1, 0, 3, 104, 40, }, + { 0, 1, 0, 3, 108, 46, }, + { 2, 1, 0, 3, 108, 40, }, + { 1, 1, 0, 3, 108, 40, }, + { 0, 1, 0, 3, 112, 46, }, + { 2, 1, 0, 3, 112, 40, }, + { 1, 1, 0, 3, 112, 40, }, + { 0, 1, 0, 3, 116, 46, }, + { 2, 1, 0, 3, 116, 40, }, + { 1, 1, 0, 3, 116, 40, }, + { 0, 1, 0, 3, 120, 46, }, + { 2, 1, 0, 3, 120, 40, }, + { 1, 1, 0, 3, 120, 40, }, + { 0, 1, 0, 3, 124, 46, }, + { 2, 1, 0, 3, 124, 40, }, + { 1, 1, 0, 3, 124, 40, }, + { 0, 1, 0, 3, 128, 46, }, + { 2, 1, 0, 3, 128, 40, }, + { 1, 1, 0, 3, 128, 40, }, + { 0, 1, 0, 3, 132, 46, }, + { 2, 1, 0, 3, 132, 40, }, + { 1, 1, 0, 3, 132, 40, }, + { 0, 1, 0, 3, 136, 46, }, + { 2, 1, 0, 3, 136, 40, }, + { 1, 1, 0, 3, 136, 40, }, + { 0, 1, 0, 3, 140, 46, }, + { 2, 1, 0, 3, 140, 40, }, + { 1, 1, 0, 3, 140, 40, }, + { 0, 1, 0, 3, 149, 46, }, + { 2, 1, 0, 3, 149, 40, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 46, }, + { 2, 1, 0, 3, 153, 40, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 46, }, + { 2, 1, 0, 3, 157, 40, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 46, }, + { 2, 1, 0, 3, 161, 40, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 46, }, + { 2, 1, 0, 3, 165, 40, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 0, 6, 36, 46, }, + { 2, 1, 0, 6, 36, 40, }, + { 1, 1, 0, 6, 36, 40, }, + { 0, 1, 0, 6, 40, 46, }, + { 2, 1, 0, 6, 40, 40, }, + { 1, 1, 0, 6, 40, 40, }, + { 0, 1, 0, 6, 44, 46, }, + { 2, 1, 0, 6, 44, 40, }, + { 1, 1, 0, 6, 44, 40, }, + { 0, 1, 0, 6, 48, 46, }, + { 2, 1, 0, 6, 48, 40, }, + { 1, 1, 0, 6, 48, 40, }, + { 0, 1, 0, 6, 52, 46, }, + { 2, 1, 0, 6, 52, 40, }, + { 1, 1, 0, 6, 52, 40, }, + { 0, 1, 0, 6, 56, 46, }, + { 2, 1, 0, 6, 56, 40, }, + { 1, 1, 0, 6, 56, 40, }, + { 0, 1, 0, 6, 60, 46, }, + { 2, 1, 0, 6, 60, 40, }, + { 1, 1, 0, 6, 60, 40, }, + { 0, 1, 0, 6, 64, 46, }, + { 2, 1, 0, 6, 64, 40, }, + { 1, 1, 0, 6, 64, 40, }, + { 0, 1, 0, 6, 100, 46, }, + { 2, 1, 0, 6, 100, 40, }, + { 1, 1, 0, 6, 100, 40, }, + { 0, 1, 0, 6, 104, 46, }, + { 2, 1, 0, 6, 104, 40, }, + { 1, 1, 0, 6, 104, 40, }, + { 0, 1, 0, 6, 108, 46, }, + { 2, 1, 0, 6, 108, 40, }, + { 1, 1, 0, 6, 108, 40, }, + { 0, 1, 0, 6, 112, 46, }, + { 2, 1, 0, 6, 112, 40, }, + { 1, 1, 0, 6, 112, 40, }, + { 0, 1, 0, 6, 116, 46, }, + { 2, 1, 0, 6, 116, 40, }, + { 1, 1, 0, 6, 116, 40, }, + { 0, 1, 0, 6, 120, 46, }, + { 2, 1, 0, 6, 120, 40, }, + { 1, 1, 0, 6, 120, 40, }, + { 0, 1, 0, 6, 124, 46, }, + { 2, 1, 0, 6, 124, 40, }, + { 1, 1, 0, 6, 124, 40, }, + { 0, 1, 0, 6, 128, 46, }, + { 2, 1, 0, 6, 128, 40, }, + { 1, 1, 0, 6, 128, 40, }, + { 0, 1, 0, 6, 132, 46, }, + { 2, 1, 0, 6, 132, 40, }, + { 1, 1, 0, 6, 132, 40, }, + { 0, 1, 0, 6, 136, 46, }, + { 2, 1, 0, 6, 136, 40, }, + { 1, 1, 0, 6, 136, 40, }, + { 0, 1, 0, 6, 140, 46, }, + { 2, 1, 0, 6, 140, 40, }, + { 1, 1, 0, 6, 140, 40, }, + { 0, 1, 0, 6, 149, 46, }, + { 2, 1, 0, 6, 149, 40, }, + { 1, 1, 0, 6, 149, 63, }, + { 0, 1, 0, 6, 153, 46, }, + { 2, 1, 0, 6, 153, 40, }, + { 1, 1, 0, 6, 153, 63, }, + { 0, 1, 0, 6, 157, 46, }, + { 2, 1, 0, 6, 157, 40, }, + { 1, 1, 0, 6, 157, 63, }, + { 0, 1, 0, 6, 161, 46, }, + { 2, 1, 0, 6, 161, 40, }, + { 1, 1, 0, 6, 161, 63, }, + { 0, 1, 0, 6, 165, 46, }, + { 2, 1, 0, 6, 165, 40, }, + { 1, 1, 0, 6, 165, 63, }, + { 0, 1, 0, 7, 36, 46, }, + { 2, 1, 0, 7, 36, 40, }, + { 1, 1, 0, 7, 36, 40, }, + { 0, 1, 0, 7, 40, 46, }, + { 2, 1, 0, 7, 40, 40, }, + { 1, 1, 0, 7, 40, 40, }, + { 0, 1, 0, 7, 44, 46, }, + { 2, 1, 0, 7, 44, 40, }, + { 1, 1, 0, 7, 44, 40, }, + { 0, 1, 0, 7, 48, 46, }, + { 2, 1, 0, 7, 48, 40, }, + { 1, 1, 0, 7, 48, 40, }, + { 0, 1, 0, 7, 52, 46, }, + { 2, 1, 0, 7, 52, 40, }, + { 1, 1, 0, 7, 52, 40, }, + { 0, 1, 0, 7, 56, 46, }, + { 2, 1, 0, 7, 56, 40, }, + { 1, 1, 0, 7, 56, 40, }, + { 0, 1, 0, 7, 60, 46, }, + { 2, 1, 0, 7, 60, 40, }, + { 1, 1, 0, 7, 60, 40, }, + { 0, 1, 0, 7, 64, 46, }, + { 2, 1, 0, 7, 64, 40, }, + { 1, 1, 0, 7, 64, 40, }, + { 0, 1, 0, 7, 100, 46, }, + { 2, 1, 0, 7, 100, 40, }, + { 1, 1, 0, 7, 100, 40, }, + { 0, 1, 0, 7, 104, 46, }, + { 2, 1, 0, 7, 104, 40, }, + { 1, 1, 0, 7, 104, 40, }, + { 0, 1, 0, 7, 108, 46, }, + { 2, 1, 0, 7, 108, 40, }, + { 1, 1, 0, 7, 108, 40, }, + { 0, 1, 0, 7, 112, 46, }, + { 2, 1, 0, 7, 112, 40, }, + { 1, 1, 0, 7, 112, 40, }, + { 0, 1, 0, 7, 116, 46, }, + { 2, 1, 0, 7, 116, 40, }, + { 1, 1, 0, 7, 116, 40, }, + { 0, 1, 0, 7, 120, 46, }, + { 2, 1, 0, 7, 120, 40, }, + { 1, 1, 0, 7, 120, 40, }, + { 0, 1, 0, 7, 124, 46, }, + { 2, 1, 0, 7, 124, 40, }, + { 1, 1, 0, 7, 124, 40, }, + { 0, 1, 0, 7, 128, 46, }, + { 2, 1, 0, 7, 128, 40, }, + { 1, 1, 0, 7, 128, 40, }, + { 0, 1, 0, 7, 132, 46, }, + { 2, 1, 0, 7, 132, 40, }, + { 1, 1, 0, 7, 132, 40, }, + { 0, 1, 0, 7, 136, 46, }, + { 2, 1, 0, 7, 136, 40, }, + { 1, 1, 0, 7, 136, 40, }, + { 0, 1, 0, 7, 140, 46, }, + { 2, 1, 0, 7, 140, 40, }, + { 1, 1, 0, 7, 140, 40, }, + { 0, 1, 0, 7, 149, 46, }, + { 2, 1, 0, 7, 149, 40, }, + { 1, 1, 0, 7, 149, 63, }, + { 0, 1, 0, 7, 153, 46, }, + { 2, 1, 0, 7, 153, 40, }, + { 1, 1, 0, 7, 153, 63, }, + { 0, 1, 0, 7, 157, 46, }, + { 2, 1, 0, 7, 157, 40, }, + { 1, 1, 0, 7, 157, 63, }, + { 0, 1, 0, 7, 161, 46, }, + { 2, 1, 0, 7, 161, 40, }, + { 1, 1, 0, 7, 161, 63, }, + { 0, 1, 0, 7, 165, 46, }, + { 2, 1, 0, 7, 165, 40, }, + { 1, 1, 0, 7, 165, 63, }, + { 0, 1, 1, 2, 38, 46, }, + { 2, 1, 1, 2, 38, 40, }, + { 1, 1, 1, 2, 38, 40, }, + { 0, 1, 1, 2, 46, 46, }, + { 2, 1, 1, 2, 46, 40, }, + { 1, 1, 1, 2, 46, 40, }, + { 0, 1, 1, 2, 54, 46, }, + { 2, 1, 1, 2, 54, 40, }, + { 1, 1, 1, 2, 54, 40, }, + { 0, 1, 1, 2, 62, 46, }, + { 2, 1, 1, 2, 62, 40, }, + { 1, 1, 1, 2, 62, 40, }, + { 0, 1, 1, 2, 102, 46, }, + { 2, 1, 1, 2, 102, 40, }, + { 1, 1, 1, 2, 102, 40, }, + { 0, 1, 1, 2, 110, 46, }, + { 2, 1, 1, 2, 110, 40, }, + { 1, 1, 1, 2, 110, 40, }, + { 0, 1, 1, 2, 118, 46, }, + { 2, 1, 1, 2, 118, 40, }, + { 1, 1, 1, 2, 118, 40, }, + { 0, 1, 1, 2, 126, 46, }, + { 2, 1, 1, 2, 126, 40, }, + { 1, 1, 1, 2, 126, 40, }, + { 0, 1, 1, 2, 134, 46, }, + { 2, 1, 1, 2, 134, 40, }, + { 1, 1, 1, 2, 134, 40, }, + { 0, 1, 1, 2, 151, 46, }, + { 2, 1, 1, 2, 151, 40, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 46, }, + { 2, 1, 1, 2, 159, 40, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 46, }, + { 2, 1, 1, 3, 38, 40, }, + { 1, 1, 1, 3, 38, 40, }, + { 0, 1, 1, 3, 46, 46, }, + { 2, 1, 1, 3, 46, 40, }, + { 1, 1, 1, 3, 46, 40, }, + { 0, 1, 1, 3, 54, 46, }, + { 2, 1, 1, 3, 54, 40, }, + { 1, 1, 1, 3, 54, 40, }, + { 0, 1, 1, 3, 62, 46, }, + { 2, 1, 1, 3, 62, 40, }, + { 1, 1, 1, 3, 62, 40, }, + { 0, 1, 1, 3, 102, 46, }, + { 2, 1, 1, 3, 102, 40, }, + { 1, 1, 1, 3, 102, 40, }, + { 0, 1, 1, 3, 110, 46, }, + { 2, 1, 1, 3, 110, 40, }, + { 1, 1, 1, 3, 110, 40, }, + { 0, 1, 1, 3, 118, 46, }, + { 2, 1, 1, 3, 118, 40, }, + { 1, 1, 1, 3, 118, 40, }, + { 0, 1, 1, 3, 126, 46, }, + { 2, 1, 1, 3, 126, 40, }, + { 1, 1, 1, 3, 126, 40, }, + { 0, 1, 1, 3, 134, 46, }, + { 2, 1, 1, 3, 134, 40, }, + { 1, 1, 1, 3, 134, 40, }, + { 0, 1, 1, 3, 151, 46, }, + { 2, 1, 1, 3, 151, 40, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 46, }, + { 2, 1, 1, 3, 159, 40, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 1, 6, 38, 46, }, + { 2, 1, 1, 6, 38, 40, }, + { 1, 1, 1, 6, 38, 40, }, + { 0, 1, 1, 6, 46, 46, }, + { 2, 1, 1, 6, 46, 40, }, + { 1, 1, 1, 6, 46, 40, }, + { 0, 1, 1, 6, 54, 46, }, + { 2, 1, 1, 6, 54, 40, }, + { 1, 1, 1, 6, 54, 40, }, + { 0, 1, 1, 6, 62, 46, }, + { 2, 1, 1, 6, 62, 40, }, + { 1, 1, 1, 6, 62, 40, }, + { 0, 1, 1, 6, 102, 46, }, + { 2, 1, 1, 6, 102, 40, }, + { 1, 1, 1, 6, 102, 40, }, + { 0, 1, 1, 6, 110, 46, }, + { 2, 1, 1, 6, 110, 40, }, + { 1, 1, 1, 6, 110, 40, }, + { 0, 1, 1, 6, 118, 46, }, + { 2, 1, 1, 6, 118, 40, }, + { 1, 1, 1, 6, 118, 40, }, + { 0, 1, 1, 6, 126, 46, }, + { 2, 1, 1, 6, 126, 40, }, + { 1, 1, 1, 6, 126, 40, }, + { 0, 1, 1, 6, 134, 46, }, + { 2, 1, 1, 6, 134, 40, }, + { 1, 1, 1, 6, 134, 40, }, + { 0, 1, 1, 6, 151, 46, }, + { 2, 1, 1, 6, 151, 40, }, + { 1, 1, 1, 6, 151, 63, }, + { 0, 1, 1, 6, 159, 46, }, + { 2, 1, 1, 6, 159, 40, }, + { 1, 1, 1, 6, 159, 63, }, + { 0, 1, 1, 7, 38, 46, }, + { 2, 1, 1, 7, 38, 40, }, + { 1, 1, 1, 7, 38, 40, }, + { 0, 1, 1, 7, 46, 46, }, + { 2, 1, 1, 7, 46, 40, }, + { 1, 1, 1, 7, 46, 40, }, + { 0, 1, 1, 7, 54, 46, }, + { 2, 1, 1, 7, 54, 40, }, + { 1, 1, 1, 7, 54, 40, }, + { 0, 1, 1, 7, 62, 46, }, + { 2, 1, 1, 7, 62, 40, }, + { 1, 1, 1, 7, 62, 40, }, + { 0, 1, 1, 7, 102, 46, }, + { 2, 1, 1, 7, 102, 40, }, + { 1, 1, 1, 7, 102, 40, }, + { 0, 1, 1, 7, 110, 46, }, + { 2, 1, 1, 7, 110, 40, }, + { 1, 1, 1, 7, 110, 40, }, + { 0, 1, 1, 7, 118, 46, }, + { 2, 1, 1, 7, 118, 40, }, + { 1, 1, 1, 7, 118, 40, }, + { 0, 1, 1, 7, 126, 46, }, + { 2, 1, 1, 7, 126, 40, }, + { 1, 1, 1, 7, 126, 40, }, + { 0, 1, 1, 7, 134, 46, }, + { 2, 1, 1, 7, 134, 40, }, + { 1, 1, 1, 7, 134, 40, }, + { 0, 1, 1, 7, 151, 46, }, + { 2, 1, 1, 7, 151, 40, }, + { 1, 1, 1, 7, 151, 63, }, + { 0, 1, 1, 7, 159, 46, }, + { 2, 1, 1, 7, 159, 40, }, + { 1, 1, 1, 7, 159, 63, }, + { 0, 1, 2, 4, 42, 46, }, + { 2, 1, 2, 4, 42, 40, }, + { 1, 1, 2, 4, 42, 40, }, + { 0, 1, 2, 4, 58, 46, }, + { 2, 1, 2, 4, 58, 40, }, + { 1, 1, 2, 4, 58, 40, }, + { 0, 1, 2, 4, 106, 46, }, + { 2, 1, 2, 4, 106, 40, }, + { 1, 1, 2, 4, 106, 40, }, + { 0, 1, 2, 4, 122, 46, }, + { 2, 1, 2, 4, 122, 40, }, + { 1, 1, 2, 4, 122, 40, }, + { 0, 1, 2, 4, 155, 46, }, + { 2, 1, 2, 4, 155, 40, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 46, }, + { 2, 1, 2, 5, 42, 40, }, + { 1, 1, 2, 5, 42, 40, }, + { 0, 1, 2, 5, 58, 46, }, + { 2, 1, 2, 5, 58, 40, }, + { 1, 1, 2, 5, 58, 40, }, + { 0, 1, 2, 5, 106, 46, }, + { 2, 1, 2, 5, 106, 40, }, + { 1, 1, 2, 5, 106, 40, }, + { 0, 1, 2, 5, 122, 46, }, + { 2, 1, 2, 5, 122, 40, }, + { 1, 1, 2, 5, 122, 40, }, + { 0, 1, 2, 5, 155, 46, }, + { 2, 1, 2, 5, 155, 40, }, + { 1, 1, 2, 5, 155, 63, }, + { 0, 1, 2, 8, 42, 46, }, + { 2, 1, 2, 8, 42, 40, }, + { 1, 1, 2, 8, 42, 40, }, + { 0, 1, 2, 8, 58, 46, }, + { 2, 1, 2, 8, 58, 40, }, + { 1, 1, 2, 8, 58, 40, }, + { 0, 1, 2, 8, 106, 46, }, + { 2, 1, 2, 8, 106, 40, }, + { 1, 1, 2, 8, 106, 40, }, + { 0, 1, 2, 8, 122, 46, }, + { 2, 1, 2, 8, 122, 40, }, + { 1, 1, 2, 8, 122, 40, }, + { 0, 1, 2, 8, 155, 46, }, + { 2, 1, 2, 8, 155, 40, }, + { 1, 1, 2, 8, 155, 63, }, + { 0, 1, 2, 9, 42, 46, }, + { 2, 1, 2, 9, 42, 40, }, + { 1, 1, 2, 9, 42, 40, }, + { 0, 1, 2, 9, 58, 46, }, + { 2, 1, 2, 9, 58, 40, }, + { 1, 1, 2, 9, 58, 40, }, + { 0, 1, 2, 9, 106, 46, }, + { 2, 1, 2, 9, 106, 40, }, + { 1, 1, 2, 9, 106, 40, }, + { 0, 1, 2, 9, 122, 46, }, + { 2, 1, 2, 9, 122, 40, }, + { 1, 1, 2, 9, 122, 40, }, + { 0, 1, 2, 9, 155, 46, }, + { 2, 1, 2, 9, 155, 40, }, + { 1, 1, 2, 9, 155, 63, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type5); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type7[] = { + { 0, 0, 0, 0, 1, 44, }, + { 2, 0, 0, 0, 1, 32, }, + { 1, 0, 0, 0, 1, 32, }, + { 0, 0, 0, 0, 2, 52, }, + { 2, 0, 0, 0, 2, 32, }, + { 1, 0, 0, 0, 2, 32, }, + { 0, 0, 0, 0, 3, 52, }, + { 2, 0, 0, 0, 3, 32, }, + { 1, 0, 0, 0, 3, 32, }, + { 0, 0, 0, 0, 4, 52, }, + { 2, 0, 0, 0, 4, 32, }, + { 1, 0, 0, 0, 4, 32, }, + { 0, 0, 0, 0, 5, 52, }, + { 2, 0, 0, 0, 5, 32, }, + { 1, 0, 0, 0, 5, 32, }, + { 0, 0, 0, 0, 6, 52, }, + { 2, 0, 0, 0, 6, 32, }, + { 1, 0, 0, 0, 6, 32, }, + { 0, 0, 0, 0, 7, 52, }, + { 2, 0, 0, 0, 7, 32, }, + { 1, 0, 0, 0, 7, 32, }, + { 0, 0, 0, 0, 8, 52, }, + { 2, 0, 0, 0, 8, 32, }, + { 1, 0, 0, 0, 8, 32, }, + { 0, 0, 0, 0, 9, 52, }, + { 2, 0, 0, 0, 9, 32, }, + { 1, 0, 0, 0, 9, 32, }, + { 0, 0, 0, 0, 10, 52, }, + { 2, 0, 0, 0, 10, 32, }, + { 1, 0, 0, 0, 10, 32, }, + { 0, 0, 0, 0, 11, 44, }, + { 2, 0, 0, 0, 11, 32, }, + { 1, 0, 0, 0, 11, 32, }, + { 0, 0, 0, 0, 12, 63, }, + { 2, 0, 0, 0, 12, 32, }, + { 1, 0, 0, 0, 12, 32, }, + { 0, 0, 0, 0, 13, 63, }, + { 2, 0, 0, 0, 13, 32, }, + { 1, 0, 0, 0, 13, 32, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 32, }, + { 0, 0, 0, 1, 1, 38, }, + { 2, 0, 0, 1, 1, 32, }, + { 1, 0, 0, 1, 1, 32, }, + { 0, 0, 0, 1, 2, 46, }, + { 2, 0, 0, 1, 2, 32, }, + { 1, 0, 0, 1, 2, 32, }, + { 0, 0, 0, 1, 3, 46, }, + { 2, 0, 0, 1, 3, 32, }, + { 1, 0, 0, 1, 3, 32, }, + { 0, 0, 0, 1, 4, 46, }, + { 2, 0, 0, 1, 4, 32, }, + { 1, 0, 0, 1, 4, 32, }, + { 0, 0, 0, 1, 5, 46, }, + { 2, 0, 0, 1, 5, 32, }, + { 1, 0, 0, 1, 5, 32, }, + { 0, 0, 0, 1, 6, 46, }, + { 2, 0, 0, 1, 6, 32, }, + { 1, 0, 0, 1, 6, 32, }, + { 0, 0, 0, 1, 7, 46, }, + { 2, 0, 0, 1, 7, 32, }, + { 1, 0, 0, 1, 7, 32, }, + { 0, 0, 0, 1, 8, 46, }, + { 2, 0, 0, 1, 8, 32, }, + { 1, 0, 0, 1, 8, 32, }, + { 0, 0, 0, 1, 9, 46, }, + { 2, 0, 0, 1, 9, 32, }, + { 1, 0, 0, 1, 9, 32, }, + { 0, 0, 0, 1, 10, 46, }, + { 2, 0, 0, 1, 10, 32, }, + { 1, 0, 0, 1, 10, 32, }, + { 0, 0, 0, 1, 11, 38, }, + { 2, 0, 0, 1, 11, 32, }, + { 1, 0, 0, 1, 11, 32, }, + { 0, 0, 0, 1, 12, 63, }, + { 2, 0, 0, 1, 12, 32, }, + { 1, 0, 0, 1, 12, 32, }, + { 0, 0, 0, 1, 13, 63, }, + { 2, 0, 0, 1, 13, 32, }, + { 1, 0, 0, 1, 13, 32, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 34, }, + { 2, 0, 0, 2, 1, 32, }, + { 1, 0, 0, 2, 1, 32, }, + { 0, 0, 0, 2, 2, 46, }, + { 2, 0, 0, 2, 2, 32, }, + { 1, 0, 0, 2, 2, 32, }, + { 0, 0, 0, 2, 3, 46, }, + { 2, 0, 0, 2, 3, 32, }, + { 1, 0, 0, 2, 3, 32, }, + { 0, 0, 0, 2, 4, 46, }, + { 2, 0, 0, 2, 4, 32, }, + { 1, 0, 0, 2, 4, 32, }, + { 0, 0, 0, 2, 5, 46, }, + { 2, 0, 0, 2, 5, 32, }, + { 1, 0, 0, 2, 5, 32, }, + { 0, 0, 0, 2, 6, 46, }, + { 2, 0, 0, 2, 6, 32, }, + { 1, 0, 0, 2, 6, 32, }, + { 0, 0, 0, 2, 7, 46, }, + { 2, 0, 0, 2, 7, 32, }, + { 1, 0, 0, 2, 7, 32, }, + { 0, 0, 0, 2, 8, 46, }, + { 2, 0, 0, 2, 8, 32, }, + { 1, 0, 0, 2, 8, 32, }, + { 0, 0, 0, 2, 9, 46, }, + { 2, 0, 0, 2, 9, 32, }, + { 1, 0, 0, 2, 9, 32, }, + { 0, 0, 0, 2, 10, 46, }, + { 2, 0, 0, 2, 10, 32, }, + { 1, 0, 0, 2, 10, 32, }, + { 0, 0, 0, 2, 11, 34, }, + { 2, 0, 0, 2, 11, 32, }, + { 1, 0, 0, 2, 11, 32, }, + { 0, 0, 0, 2, 12, 63, }, + { 2, 0, 0, 2, 12, 32, }, + { 1, 0, 0, 2, 12, 32, }, + { 0, 0, 0, 2, 13, 63, }, + { 2, 0, 0, 2, 13, 32, }, + { 1, 0, 0, 2, 13, 32, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 63, }, + { 0, 0, 0, 3, 1, 32, }, + { 2, 0, 0, 3, 1, 30, }, + { 1, 0, 0, 3, 1, 30, }, + { 0, 0, 0, 3, 2, 44, }, + { 2, 0, 0, 3, 2, 30, }, + { 1, 0, 0, 3, 2, 30, }, + { 0, 0, 0, 3, 3, 44, }, + { 2, 0, 0, 3, 3, 30, }, + { 1, 0, 0, 3, 3, 30, }, + { 0, 0, 0, 3, 4, 44, }, + { 2, 0, 0, 3, 4, 30, }, + { 1, 0, 0, 3, 4, 30, }, + { 0, 0, 0, 3, 5, 44, }, + { 2, 0, 0, 3, 5, 30, }, + { 1, 0, 0, 3, 5, 30, }, + { 0, 0, 0, 3, 6, 44, }, + { 2, 0, 0, 3, 6, 30, }, + { 1, 0, 0, 3, 6, 30, }, + { 0, 0, 0, 3, 7, 44, }, + { 2, 0, 0, 3, 7, 30, }, + { 1, 0, 0, 3, 7, 30, }, + { 0, 0, 0, 3, 8, 44, }, + { 2, 0, 0, 3, 8, 30, }, + { 1, 0, 0, 3, 8, 30, }, + { 0, 0, 0, 3, 9, 44, }, + { 2, 0, 0, 3, 9, 30, }, + { 1, 0, 0, 3, 9, 30, }, + { 0, 0, 0, 3, 10, 44, }, + { 2, 0, 0, 3, 10, 30, }, + { 1, 0, 0, 3, 10, 30, }, + { 0, 0, 0, 3, 11, 32, }, + { 2, 0, 0, 3, 11, 30, }, + { 1, 0, 0, 3, 11, 30, }, + { 0, 0, 0, 3, 12, 63, }, + { 2, 0, 0, 3, 12, 30, }, + { 1, 0, 0, 3, 12, 30, }, + { 0, 0, 0, 3, 13, 63, }, + { 2, 0, 0, 3, 13, 30, }, + { 1, 0, 0, 3, 13, 30, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 63, }, + { 0, 0, 0, 6, 1, 30, }, + { 2, 0, 0, 6, 1, 28, }, + { 1, 0, 0, 6, 1, 28, }, + { 0, 0, 0, 6, 2, 42, }, + { 2, 0, 0, 6, 2, 28, }, + { 1, 0, 0, 6, 2, 28, }, + { 0, 0, 0, 6, 3, 42, }, + { 2, 0, 0, 6, 3, 28, }, + { 1, 0, 0, 6, 3, 28, }, + { 0, 0, 0, 6, 4, 42, }, + { 2, 0, 0, 6, 4, 28, }, + { 1, 0, 0, 6, 4, 28, }, + { 0, 0, 0, 6, 5, 42, }, + { 2, 0, 0, 6, 5, 28, }, + { 1, 0, 0, 6, 5, 28, }, + { 0, 0, 0, 6, 6, 42, }, + { 2, 0, 0, 6, 6, 28, }, + { 1, 0, 0, 6, 6, 28, }, + { 0, 0, 0, 6, 7, 42, }, + { 2, 0, 0, 6, 7, 28, }, + { 1, 0, 0, 6, 7, 28, }, + { 0, 0, 0, 6, 8, 42, }, + { 2, 0, 0, 6, 8, 28, }, + { 1, 0, 0, 6, 8, 28, }, + { 0, 0, 0, 6, 9, 42, }, + { 2, 0, 0, 6, 9, 28, }, + { 1, 0, 0, 6, 9, 28, }, + { 0, 0, 0, 6, 10, 42, }, + { 2, 0, 0, 6, 10, 28, }, + { 1, 0, 0, 6, 10, 28, }, + { 0, 0, 0, 6, 11, 30, }, + { 2, 0, 0, 6, 11, 28, }, + { 1, 0, 0, 6, 11, 28, }, + { 0, 0, 0, 6, 12, 63, }, + { 2, 0, 0, 6, 12, 28, }, + { 1, 0, 0, 6, 12, 28, }, + { 0, 0, 0, 6, 13, 63, }, + { 2, 0, 0, 6, 13, 28, }, + { 1, 0, 0, 6, 13, 28, }, + { 0, 0, 0, 6, 14, 63, }, + { 2, 0, 0, 6, 14, 63, }, + { 1, 0, 0, 6, 14, 63, }, + { 0, 0, 0, 7, 1, 28, }, + { 2, 0, 0, 7, 1, 26, }, + { 1, 0, 0, 7, 1, 26, }, + { 0, 0, 0, 7, 2, 40, }, + { 2, 0, 0, 7, 2, 26, }, + { 1, 0, 0, 7, 2, 26, }, + { 0, 0, 0, 7, 3, 40, }, + { 2, 0, 0, 7, 3, 26, }, + { 1, 0, 0, 7, 3, 26, }, + { 0, 0, 0, 7, 4, 40, }, + { 2, 0, 0, 7, 4, 26, }, + { 1, 0, 0, 7, 4, 26, }, + { 0, 0, 0, 7, 5, 40, }, + { 2, 0, 0, 7, 5, 26, }, + { 1, 0, 0, 7, 5, 26, }, + { 0, 0, 0, 7, 6, 40, }, + { 2, 0, 0, 7, 6, 26, }, + { 1, 0, 0, 7, 6, 26, }, + { 0, 0, 0, 7, 7, 40, }, + { 2, 0, 0, 7, 7, 26, }, + { 1, 0, 0, 7, 7, 26, }, + { 0, 0, 0, 7, 8, 40, }, + { 2, 0, 0, 7, 8, 26, }, + { 1, 0, 0, 7, 8, 26, }, + { 0, 0, 0, 7, 9, 40, }, + { 2, 0, 0, 7, 9, 26, }, + { 1, 0, 0, 7, 9, 26, }, + { 0, 0, 0, 7, 10, 40, }, + { 2, 0, 0, 7, 10, 26, }, + { 1, 0, 0, 7, 10, 26, }, + { 0, 0, 0, 7, 11, 28, }, + { 2, 0, 0, 7, 11, 26, }, + { 1, 0, 0, 7, 11, 26, }, + { 0, 0, 0, 7, 12, 63, }, + { 2, 0, 0, 7, 12, 26, }, + { 1, 0, 0, 7, 12, 26, }, + { 0, 0, 0, 7, 13, 63, }, + { 2, 0, 0, 7, 13, 26, }, + { 1, 0, 0, 7, 13, 26, }, + { 0, 0, 0, 7, 14, 63, }, + { 2, 0, 0, 7, 14, 63, }, + { 1, 0, 0, 7, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 36, }, + { 2, 0, 1, 2, 3, 32, }, + { 1, 0, 1, 2, 3, 32, }, + { 0, 0, 1, 2, 4, 40, }, + { 2, 0, 1, 2, 4, 32, }, + { 1, 0, 1, 2, 4, 32, }, + { 0, 0, 1, 2, 5, 40, }, + { 2, 0, 1, 2, 5, 32, }, + { 1, 0, 1, 2, 5, 32, }, + { 0, 0, 1, 2, 6, 40, }, + { 2, 0, 1, 2, 6, 32, }, + { 1, 0, 1, 2, 6, 32, }, + { 0, 0, 1, 2, 7, 40, }, + { 2, 0, 1, 2, 7, 32, }, + { 1, 0, 1, 2, 7, 32, }, + { 0, 0, 1, 2, 8, 40, }, + { 2, 0, 1, 2, 8, 32, }, + { 1, 0, 1, 2, 8, 32, }, + { 0, 0, 1, 2, 9, 40, }, + { 2, 0, 1, 2, 9, 32, }, + { 1, 0, 1, 2, 9, 32, }, + { 0, 0, 1, 2, 10, 40, }, + { 2, 0, 1, 2, 10, 32, }, + { 1, 0, 1, 2, 10, 32, }, + { 0, 0, 1, 2, 11, 34, }, + { 2, 0, 1, 2, 11, 32, }, + { 1, 0, 1, 2, 11, 32, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 32, }, + { 1, 0, 1, 2, 12, 32, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 32, }, + { 1, 0, 1, 2, 13, 32, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 34, }, + { 2, 0, 1, 3, 3, 30, }, + { 1, 0, 1, 3, 3, 30, }, + { 0, 0, 1, 3, 4, 38, }, + { 2, 0, 1, 3, 4, 30, }, + { 1, 0, 1, 3, 4, 30, }, + { 0, 0, 1, 3, 5, 38, }, + { 2, 0, 1, 3, 5, 30, }, + { 1, 0, 1, 3, 5, 30, }, + { 0, 0, 1, 3, 6, 38, }, + { 2, 0, 1, 3, 6, 30, }, + { 1, 0, 1, 3, 6, 30, }, + { 0, 0, 1, 3, 7, 38, }, + { 2, 0, 1, 3, 7, 30, }, + { 1, 0, 1, 3, 7, 30, }, + { 0, 0, 1, 3, 8, 38, }, + { 2, 0, 1, 3, 8, 30, }, + { 1, 0, 1, 3, 8, 30, }, + { 0, 0, 1, 3, 9, 38, }, + { 2, 0, 1, 3, 9, 30, }, + { 1, 0, 1, 3, 9, 30, }, + { 0, 0, 1, 3, 10, 38, }, + { 2, 0, 1, 3, 10, 30, }, + { 1, 0, 1, 3, 10, 30, }, + { 0, 0, 1, 3, 11, 32, }, + { 2, 0, 1, 3, 11, 30, }, + { 1, 0, 1, 3, 11, 30, }, + { 0, 0, 1, 3, 12, 63, }, + { 2, 0, 1, 3, 12, 30, }, + { 1, 0, 1, 3, 12, 30, }, + { 0, 0, 1, 3, 13, 63, }, + { 2, 0, 1, 3, 13, 30, }, + { 1, 0, 1, 3, 13, 30, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 0, 1, 6, 1, 63, }, + { 2, 0, 1, 6, 1, 63, }, + { 1, 0, 1, 6, 1, 63, }, + { 0, 0, 1, 6, 2, 63, }, + { 2, 0, 1, 6, 2, 63, }, + { 1, 0, 1, 6, 2, 63, }, + { 0, 0, 1, 6, 3, 32, }, + { 2, 0, 1, 6, 3, 28, }, + { 1, 0, 1, 6, 3, 28, }, + { 0, 0, 1, 6, 4, 36, }, + { 2, 0, 1, 6, 4, 28, }, + { 1, 0, 1, 6, 4, 28, }, + { 0, 0, 1, 6, 5, 36, }, + { 2, 0, 1, 6, 5, 28, }, + { 1, 0, 1, 6, 5, 28, }, + { 0, 0, 1, 6, 6, 36, }, + { 2, 0, 1, 6, 6, 28, }, + { 1, 0, 1, 6, 6, 28, }, + { 0, 0, 1, 6, 7, 36, }, + { 2, 0, 1, 6, 7, 28, }, + { 1, 0, 1, 6, 7, 28, }, + { 0, 0, 1, 6, 8, 36, }, + { 2, 0, 1, 6, 8, 28, }, + { 1, 0, 1, 6, 8, 28, }, + { 0, 0, 1, 6, 9, 36, }, + { 2, 0, 1, 6, 9, 28, }, + { 1, 0, 1, 6, 9, 28, }, + { 0, 0, 1, 6, 10, 36, }, + { 2, 0, 1, 6, 10, 28, }, + { 1, 0, 1, 6, 10, 28, }, + { 0, 0, 1, 6, 11, 30, }, + { 2, 0, 1, 6, 11, 28, }, + { 1, 0, 1, 6, 11, 28, }, + { 0, 0, 1, 6, 12, 63, }, + { 2, 0, 1, 6, 12, 28, }, + { 1, 0, 1, 6, 12, 28, }, + { 0, 0, 1, 6, 13, 63, }, + { 2, 0, 1, 6, 13, 28, }, + { 1, 0, 1, 6, 13, 28, }, + { 0, 0, 1, 6, 14, 63, }, + { 2, 0, 1, 6, 14, 63, }, + { 1, 0, 1, 6, 14, 63, }, + { 0, 0, 1, 7, 1, 63, }, + { 2, 0, 1, 7, 1, 63, }, + { 1, 0, 1, 7, 1, 63, }, + { 0, 0, 1, 7, 2, 63, }, + { 2, 0, 1, 7, 2, 63, }, + { 1, 0, 1, 7, 2, 63, }, + { 0, 0, 1, 7, 3, 32, }, + { 2, 0, 1, 7, 3, 26, }, + { 1, 0, 1, 7, 3, 26, }, + { 0, 0, 1, 7, 4, 36, }, + { 2, 0, 1, 7, 4, 26, }, + { 1, 0, 1, 7, 4, 26, }, + { 0, 0, 1, 7, 5, 36, }, + { 2, 0, 1, 7, 5, 26, }, + { 1, 0, 1, 7, 5, 26, }, + { 0, 0, 1, 7, 6, 36, }, + { 2, 0, 1, 7, 6, 26, }, + { 1, 0, 1, 7, 6, 26, }, + { 0, 0, 1, 7, 7, 36, }, + { 2, 0, 1, 7, 7, 26, }, + { 1, 0, 1, 7, 7, 26, }, + { 0, 0, 1, 7, 8, 36, }, + { 2, 0, 1, 7, 8, 26, }, + { 1, 0, 1, 7, 8, 26, }, + { 0, 0, 1, 7, 9, 36, }, + { 2, 0, 1, 7, 9, 26, }, + { 1, 0, 1, 7, 9, 26, }, + { 0, 0, 1, 7, 10, 36, }, + { 2, 0, 1, 7, 10, 26, }, + { 1, 0, 1, 7, 10, 26, }, + { 0, 0, 1, 7, 11, 30, }, + { 2, 0, 1, 7, 11, 26, }, + { 1, 0, 1, 7, 11, 26, }, + { 0, 0, 1, 7, 12, 63, }, + { 2, 0, 1, 7, 12, 26, }, + { 1, 0, 1, 7, 12, 26, }, + { 0, 0, 1, 7, 13, 63, }, + { 2, 0, 1, 7, 13, 26, }, + { 1, 0, 1, 7, 13, 26, }, + { 0, 0, 1, 7, 14, 63, }, + { 2, 0, 1, 7, 14, 63, }, + { 1, 0, 1, 7, 14, 63, }, + { 0, 1, 0, 1, 36, 38, }, + { 2, 1, 0, 1, 36, 32, }, + { 1, 1, 0, 1, 36, 32, }, + { 0, 1, 0, 1, 40, 38, }, + { 2, 1, 0, 1, 40, 32, }, + { 1, 1, 0, 1, 40, 32, }, + { 0, 1, 0, 1, 44, 38, }, + { 2, 1, 0, 1, 44, 32, }, + { 1, 1, 0, 1, 44, 32, }, + { 0, 1, 0, 1, 48, 38, }, + { 2, 1, 0, 1, 48, 32, }, + { 1, 1, 0, 1, 48, 32, }, + { 0, 1, 0, 1, 52, 38, }, + { 2, 1, 0, 1, 52, 32, }, + { 1, 1, 0, 1, 52, 32, }, + { 0, 1, 0, 1, 56, 38, }, + { 2, 1, 0, 1, 56, 32, }, + { 1, 1, 0, 1, 56, 32, }, + { 0, 1, 0, 1, 60, 38, }, + { 2, 1, 0, 1, 60, 32, }, + { 1, 1, 0, 1, 60, 32, }, + { 0, 1, 0, 1, 64, 38, }, + { 2, 1, 0, 1, 64, 32, }, + { 1, 1, 0, 1, 64, 32, }, + { 0, 1, 0, 1, 100, 36, }, + { 2, 1, 0, 1, 100, 32, }, + { 1, 1, 0, 1, 100, 32, }, + { 0, 1, 0, 1, 104, 36, }, + { 2, 1, 0, 1, 104, 32, }, + { 1, 1, 0, 1, 104, 32, }, + { 0, 1, 0, 1, 108, 36, }, + { 2, 1, 0, 1, 108, 32, }, + { 1, 1, 0, 1, 108, 32, }, + { 0, 1, 0, 1, 112, 36, }, + { 2, 1, 0, 1, 112, 32, }, + { 1, 1, 0, 1, 112, 32, }, + { 0, 1, 0, 1, 116, 36, }, + { 2, 1, 0, 1, 116, 32, }, + { 1, 1, 0, 1, 116, 32, }, + { 0, 1, 0, 1, 120, 36, }, + { 2, 1, 0, 1, 120, 32, }, + { 1, 1, 0, 1, 120, 32, }, + { 0, 1, 0, 1, 124, 36, }, + { 2, 1, 0, 1, 124, 32, }, + { 1, 1, 0, 1, 124, 32, }, + { 0, 1, 0, 1, 128, 36, }, + { 2, 1, 0, 1, 128, 32, }, + { 1, 1, 0, 1, 128, 32, }, + { 0, 1, 0, 1, 132, 36, }, + { 2, 1, 0, 1, 132, 32, }, + { 1, 1, 0, 1, 132, 32, }, + { 0, 1, 0, 1, 136, 36, }, + { 2, 1, 0, 1, 136, 32, }, + { 1, 1, 0, 1, 136, 32, }, + { 0, 1, 0, 1, 140, 36, }, + { 2, 1, 0, 1, 140, 32, }, + { 1, 1, 0, 1, 140, 32, }, + { 0, 1, 0, 1, 149, 36, }, + { 2, 1, 0, 1, 149, 32, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 36, }, + { 2, 1, 0, 1, 153, 32, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 36, }, + { 2, 1, 0, 1, 157, 32, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 36, }, + { 2, 1, 0, 1, 161, 32, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 36, }, + { 2, 1, 0, 1, 165, 32, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 36, }, + { 2, 1, 0, 2, 36, 32, }, + { 1, 1, 0, 2, 36, 32, }, + { 0, 1, 0, 2, 40, 36, }, + { 2, 1, 0, 2, 40, 32, }, + { 1, 1, 0, 2, 40, 32, }, + { 0, 1, 0, 2, 44, 36, }, + { 2, 1, 0, 2, 44, 32, }, + { 1, 1, 0, 2, 44, 32, }, + { 0, 1, 0, 2, 48, 36, }, + { 2, 1, 0, 2, 48, 32, }, + { 1, 1, 0, 2, 48, 32, }, + { 0, 1, 0, 2, 52, 36, }, + { 2, 1, 0, 2, 52, 32, }, + { 1, 1, 0, 2, 52, 32, }, + { 0, 1, 0, 2, 56, 36, }, + { 2, 1, 0, 2, 56, 32, }, + { 1, 1, 0, 2, 56, 32, }, + { 0, 1, 0, 2, 60, 36, }, + { 2, 1, 0, 2, 60, 32, }, + { 1, 1, 0, 2, 60, 32, }, + { 0, 1, 0, 2, 64, 36, }, + { 2, 1, 0, 2, 64, 32, }, + { 1, 1, 0, 2, 64, 32, }, + { 0, 1, 0, 2, 100, 36, }, + { 2, 1, 0, 2, 100, 32, }, + { 1, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 36, }, + { 2, 1, 0, 2, 104, 32, }, + { 1, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 36, }, + { 2, 1, 0, 2, 108, 32, }, + { 1, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 36, }, + { 2, 1, 0, 2, 112, 32, }, + { 1, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 36, }, + { 2, 1, 0, 2, 116, 32, }, + { 1, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 36, }, + { 2, 1, 0, 2, 120, 32, }, + { 1, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 36, }, + { 2, 1, 0, 2, 124, 32, }, + { 1, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 36, }, + { 2, 1, 0, 2, 128, 32, }, + { 1, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 36, }, + { 2, 1, 0, 2, 132, 32, }, + { 1, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 36, }, + { 2, 1, 0, 2, 136, 32, }, + { 1, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 34, }, + { 2, 1, 0, 2, 140, 32, }, + { 1, 1, 0, 2, 140, 32, }, + { 0, 1, 0, 2, 149, 32, }, + { 2, 1, 0, 2, 149, 32, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 38, }, + { 2, 1, 0, 2, 153, 32, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 38, }, + { 2, 1, 0, 2, 157, 32, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 38, }, + { 2, 1, 0, 2, 161, 32, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 38, }, + { 2, 1, 0, 2, 165, 32, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 34, }, + { 2, 1, 0, 3, 36, 30, }, + { 1, 1, 0, 3, 36, 30, }, + { 0, 1, 0, 3, 40, 34, }, + { 2, 1, 0, 3, 40, 30, }, + { 1, 1, 0, 3, 40, 30, }, + { 0, 1, 0, 3, 44, 34, }, + { 2, 1, 0, 3, 44, 30, }, + { 1, 1, 0, 3, 44, 30, }, + { 0, 1, 0, 3, 48, 34, }, + { 2, 1, 0, 3, 48, 30, }, + { 1, 1, 0, 3, 48, 30, }, + { 0, 1, 0, 3, 52, 34, }, + { 2, 1, 0, 3, 52, 30, }, + { 1, 1, 0, 3, 52, 30, }, + { 0, 1, 0, 3, 56, 34, }, + { 2, 1, 0, 3, 56, 30, }, + { 1, 1, 0, 3, 56, 30, }, + { 0, 1, 0, 3, 60, 34, }, + { 2, 1, 0, 3, 60, 30, }, + { 1, 1, 0, 3, 60, 30, }, + { 0, 1, 0, 3, 64, 34, }, + { 2, 1, 0, 3, 64, 30, }, + { 1, 1, 0, 3, 64, 30, }, + { 0, 1, 0, 3, 100, 34, }, + { 2, 1, 0, 3, 100, 30, }, + { 1, 1, 0, 3, 100, 30, }, + { 0, 1, 0, 3, 104, 34, }, + { 2, 1, 0, 3, 104, 30, }, + { 1, 1, 0, 3, 104, 30, }, + { 0, 1, 0, 3, 108, 34, }, + { 2, 1, 0, 3, 108, 30, }, + { 1, 1, 0, 3, 108, 30, }, + { 0, 1, 0, 3, 112, 34, }, + { 2, 1, 0, 3, 112, 30, }, + { 1, 1, 0, 3, 112, 30, }, + { 0, 1, 0, 3, 116, 34, }, + { 2, 1, 0, 3, 116, 30, }, + { 1, 1, 0, 3, 116, 30, }, + { 0, 1, 0, 3, 120, 34, }, + { 2, 1, 0, 3, 120, 30, }, + { 1, 1, 0, 3, 120, 30, }, + { 0, 1, 0, 3, 124, 34, }, + { 2, 1, 0, 3, 124, 30, }, + { 1, 1, 0, 3, 124, 30, }, + { 0, 1, 0, 3, 128, 34, }, + { 2, 1, 0, 3, 128, 30, }, + { 1, 1, 0, 3, 128, 30, }, + { 0, 1, 0, 3, 132, 34, }, + { 2, 1, 0, 3, 132, 30, }, + { 1, 1, 0, 3, 132, 30, }, + { 0, 1, 0, 3, 136, 34, }, + { 2, 1, 0, 3, 136, 30, }, + { 1, 1, 0, 3, 136, 30, }, + { 0, 1, 0, 3, 140, 32, }, + { 2, 1, 0, 3, 140, 30, }, + { 1, 1, 0, 3, 140, 30, }, + { 0, 1, 0, 3, 149, 30, }, + { 2, 1, 0, 3, 149, 30, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 36, }, + { 2, 1, 0, 3, 153, 30, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 36, }, + { 2, 1, 0, 3, 157, 30, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 36, }, + { 2, 1, 0, 3, 161, 30, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 36, }, + { 2, 1, 0, 3, 165, 30, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 0, 6, 36, 32, }, + { 2, 1, 0, 6, 36, 28, }, + { 1, 1, 0, 6, 36, 28, }, + { 0, 1, 0, 6, 40, 32, }, + { 2, 1, 0, 6, 40, 28, }, + { 1, 1, 0, 6, 40, 28, }, + { 0, 1, 0, 6, 44, 32, }, + { 2, 1, 0, 6, 44, 28, }, + { 1, 1, 0, 6, 44, 28, }, + { 0, 1, 0, 6, 48, 32, }, + { 2, 1, 0, 6, 48, 28, }, + { 1, 1, 0, 6, 48, 28, }, + { 0, 1, 0, 6, 52, 32, }, + { 2, 1, 0, 6, 52, 28, }, + { 1, 1, 0, 6, 52, 28, }, + { 0, 1, 0, 6, 56, 32, }, + { 2, 1, 0, 6, 56, 28, }, + { 1, 1, 0, 6, 56, 28, }, + { 0, 1, 0, 6, 60, 32, }, + { 2, 1, 0, 6, 60, 28, }, + { 1, 1, 0, 6, 60, 28, }, + { 0, 1, 0, 6, 64, 32, }, + { 2, 1, 0, 6, 64, 28, }, + { 1, 1, 0, 6, 64, 28, }, + { 0, 1, 0, 6, 100, 32, }, + { 2, 1, 0, 6, 100, 28, }, + { 1, 1, 0, 6, 100, 28, }, + { 0, 1, 0, 6, 104, 32, }, + { 2, 1, 0, 6, 104, 28, }, + { 1, 1, 0, 6, 104, 28, }, + { 0, 1, 0, 6, 108, 32, }, + { 2, 1, 0, 6, 108, 28, }, + { 1, 1, 0, 6, 108, 28, }, + { 0, 1, 0, 6, 112, 32, }, + { 2, 1, 0, 6, 112, 28, }, + { 1, 1, 0, 6, 112, 28, }, + { 0, 1, 0, 6, 116, 32, }, + { 2, 1, 0, 6, 116, 28, }, + { 1, 1, 0, 6, 116, 28, }, + { 0, 1, 0, 6, 120, 32, }, + { 2, 1, 0, 6, 120, 28, }, + { 1, 1, 0, 6, 120, 28, }, + { 0, 1, 0, 6, 124, 32, }, + { 2, 1, 0, 6, 124, 28, }, + { 1, 1, 0, 6, 124, 28, }, + { 0, 1, 0, 6, 128, 32, }, + { 2, 1, 0, 6, 128, 28, }, + { 1, 1, 0, 6, 128, 28, }, + { 0, 1, 0, 6, 132, 32, }, + { 2, 1, 0, 6, 132, 28, }, + { 1, 1, 0, 6, 132, 28, }, + { 0, 1, 0, 6, 136, 32, }, + { 2, 1, 0, 6, 136, 28, }, + { 1, 1, 0, 6, 136, 28, }, + { 0, 1, 0, 6, 140, 30, }, + { 2, 1, 0, 6, 140, 28, }, + { 1, 1, 0, 6, 140, 28, }, + { 0, 1, 0, 6, 149, 28, }, + { 2, 1, 0, 6, 149, 28, }, + { 1, 1, 0, 6, 149, 63, }, + { 0, 1, 0, 6, 153, 34, }, + { 2, 1, 0, 6, 153, 28, }, + { 1, 1, 0, 6, 153, 63, }, + { 0, 1, 0, 6, 157, 34, }, + { 2, 1, 0, 6, 157, 28, }, + { 1, 1, 0, 6, 157, 63, }, + { 0, 1, 0, 6, 161, 34, }, + { 2, 1, 0, 6, 161, 28, }, + { 1, 1, 0, 6, 161, 63, }, + { 0, 1, 0, 6, 165, 34, }, + { 2, 1, 0, 6, 165, 28, }, + { 1, 1, 0, 6, 165, 63, }, + { 0, 1, 0, 7, 36, 30, }, + { 2, 1, 0, 7, 36, 26, }, + { 1, 1, 0, 7, 36, 26, }, + { 0, 1, 0, 7, 40, 30, }, + { 2, 1, 0, 7, 40, 26, }, + { 1, 1, 0, 7, 40, 26, }, + { 0, 1, 0, 7, 44, 30, }, + { 2, 1, 0, 7, 44, 26, }, + { 1, 1, 0, 7, 44, 26, }, + { 0, 1, 0, 7, 48, 30, }, + { 2, 1, 0, 7, 48, 26, }, + { 1, 1, 0, 7, 48, 26, }, + { 0, 1, 0, 7, 52, 30, }, + { 2, 1, 0, 7, 52, 26, }, + { 1, 1, 0, 7, 52, 26, }, + { 0, 1, 0, 7, 56, 30, }, + { 2, 1, 0, 7, 56, 26, }, + { 1, 1, 0, 7, 56, 26, }, + { 0, 1, 0, 7, 60, 30, }, + { 2, 1, 0, 7, 60, 26, }, + { 1, 1, 0, 7, 60, 26, }, + { 0, 1, 0, 7, 64, 30, }, + { 2, 1, 0, 7, 64, 26, }, + { 1, 1, 0, 7, 64, 26, }, + { 0, 1, 0, 7, 100, 30, }, + { 2, 1, 0, 7, 100, 26, }, + { 1, 1, 0, 7, 100, 26, }, + { 0, 1, 0, 7, 104, 30, }, + { 2, 1, 0, 7, 104, 26, }, + { 1, 1, 0, 7, 104, 26, }, + { 0, 1, 0, 7, 108, 30, }, + { 2, 1, 0, 7, 108, 26, }, + { 1, 1, 0, 7, 108, 26, }, + { 0, 1, 0, 7, 112, 30, }, + { 2, 1, 0, 7, 112, 26, }, + { 1, 1, 0, 7, 112, 26, }, + { 0, 1, 0, 7, 116, 30, }, + { 2, 1, 0, 7, 116, 26, }, + { 1, 1, 0, 7, 116, 26, }, + { 0, 1, 0, 7, 120, 30, }, + { 2, 1, 0, 7, 120, 26, }, + { 1, 1, 0, 7, 120, 26, }, + { 0, 1, 0, 7, 124, 30, }, + { 2, 1, 0, 7, 124, 26, }, + { 1, 1, 0, 7, 124, 26, }, + { 0, 1, 0, 7, 128, 30, }, + { 2, 1, 0, 7, 128, 26, }, + { 1, 1, 0, 7, 128, 26, }, + { 0, 1, 0, 7, 132, 30, }, + { 2, 1, 0, 7, 132, 26, }, + { 1, 1, 0, 7, 132, 26, }, + { 0, 1, 0, 7, 136, 30, }, + { 2, 1, 0, 7, 136, 26, }, + { 1, 1, 0, 7, 136, 26, }, + { 0, 1, 0, 7, 140, 28, }, + { 2, 1, 0, 7, 140, 26, }, + { 1, 1, 0, 7, 140, 26, }, + { 0, 1, 0, 7, 149, 26, }, + { 2, 1, 0, 7, 149, 26, }, + { 1, 1, 0, 7, 149, 63, }, + { 0, 1, 0, 7, 153, 32, }, + { 2, 1, 0, 7, 153, 26, }, + { 1, 1, 0, 7, 153, 63, }, + { 0, 1, 0, 7, 157, 32, }, + { 2, 1, 0, 7, 157, 26, }, + { 1, 1, 0, 7, 157, 63, }, + { 0, 1, 0, 7, 161, 32, }, + { 2, 1, 0, 7, 161, 26, }, + { 1, 1, 0, 7, 161, 63, }, + { 0, 1, 0, 7, 165, 32, }, + { 2, 1, 0, 7, 165, 26, }, + { 1, 1, 0, 7, 165, 63, }, + { 0, 1, 1, 2, 38, 32, }, + { 2, 1, 1, 2, 38, 32, }, + { 1, 1, 1, 2, 38, 32, }, + { 0, 1, 1, 2, 46, 32, }, + { 2, 1, 1, 2, 46, 32, }, + { 1, 1, 1, 2, 46, 32, }, + { 0, 1, 1, 2, 54, 32, }, + { 2, 1, 1, 2, 54, 32, }, + { 1, 1, 1, 2, 54, 32, }, + { 0, 1, 1, 2, 62, 30, }, + { 2, 1, 1, 2, 62, 32, }, + { 1, 1, 1, 2, 62, 32, }, + { 0, 1, 1, 2, 102, 30, }, + { 2, 1, 1, 2, 102, 32, }, + { 1, 1, 1, 2, 102, 32, }, + { 0, 1, 1, 2, 110, 38, }, + { 2, 1, 1, 2, 110, 32, }, + { 1, 1, 1, 2, 110, 32, }, + { 0, 1, 1, 2, 118, 38, }, + { 2, 1, 1, 2, 118, 32, }, + { 1, 1, 1, 2, 118, 32, }, + { 0, 1, 1, 2, 126, 38, }, + { 2, 1, 1, 2, 126, 32, }, + { 1, 1, 1, 2, 126, 32, }, + { 0, 1, 1, 2, 134, 38, }, + { 2, 1, 1, 2, 134, 32, }, + { 1, 1, 1, 2, 134, 32, }, + { 0, 1, 1, 2, 151, 32, }, + { 2, 1, 1, 2, 151, 32, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 32, }, + { 2, 1, 1, 2, 159, 32, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 30, }, + { 2, 1, 1, 3, 38, 30, }, + { 1, 1, 1, 3, 38, 30, }, + { 0, 1, 1, 3, 46, 30, }, + { 2, 1, 1, 3, 46, 30, }, + { 1, 1, 1, 3, 46, 30, }, + { 0, 1, 1, 3, 54, 30, }, + { 2, 1, 1, 3, 54, 30, }, + { 1, 1, 1, 3, 54, 30, }, + { 0, 1, 1, 3, 62, 28, }, + { 2, 1, 1, 3, 62, 30, }, + { 1, 1, 1, 3, 62, 30, }, + { 0, 1, 1, 3, 102, 28, }, + { 2, 1, 1, 3, 102, 30, }, + { 1, 1, 1, 3, 102, 30, }, + { 0, 1, 1, 3, 110, 36, }, + { 2, 1, 1, 3, 110, 30, }, + { 1, 1, 1, 3, 110, 30, }, + { 0, 1, 1, 3, 118, 36, }, + { 2, 1, 1, 3, 118, 30, }, + { 1, 1, 1, 3, 118, 30, }, + { 0, 1, 1, 3, 126, 36, }, + { 2, 1, 1, 3, 126, 30, }, + { 1, 1, 1, 3, 126, 30, }, + { 0, 1, 1, 3, 134, 36, }, + { 2, 1, 1, 3, 134, 30, }, + { 1, 1, 1, 3, 134, 30, }, + { 0, 1, 1, 3, 151, 30, }, + { 2, 1, 1, 3, 151, 30, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 30, }, + { 2, 1, 1, 3, 159, 30, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 1, 6, 38, 28, }, + { 2, 1, 1, 6, 38, 28, }, + { 1, 1, 1, 6, 38, 28, }, + { 0, 1, 1, 6, 46, 28, }, + { 2, 1, 1, 6, 46, 28, }, + { 1, 1, 1, 6, 46, 28, }, + { 0, 1, 1, 6, 54, 28, }, + { 2, 1, 1, 6, 54, 28, }, + { 1, 1, 1, 6, 54, 28, }, + { 0, 1, 1, 6, 62, 26, }, + { 2, 1, 1, 6, 62, 28, }, + { 1, 1, 1, 6, 62, 28, }, + { 0, 1, 1, 6, 102, 26, }, + { 2, 1, 1, 6, 102, 28, }, + { 1, 1, 1, 6, 102, 28, }, + { 0, 1, 1, 6, 110, 34, }, + { 2, 1, 1, 6, 110, 28, }, + { 1, 1, 1, 6, 110, 28, }, + { 0, 1, 1, 6, 118, 34, }, + { 2, 1, 1, 6, 118, 28, }, + { 1, 1, 1, 6, 118, 28, }, + { 0, 1, 1, 6, 126, 34, }, + { 2, 1, 1, 6, 126, 28, }, + { 1, 1, 1, 6, 126, 28, }, + { 0, 1, 1, 6, 134, 34, }, + { 2, 1, 1, 6, 134, 28, }, + { 1, 1, 1, 6, 134, 28, }, + { 0, 1, 1, 6, 151, 28, }, + { 2, 1, 1, 6, 151, 28, }, + { 1, 1, 1, 6, 151, 63, }, + { 0, 1, 1, 6, 159, 28, }, + { 2, 1, 1, 6, 159, 28, }, + { 1, 1, 1, 6, 159, 63, }, + { 0, 1, 1, 7, 38, 26, }, + { 2, 1, 1, 7, 38, 26, }, + { 1, 1, 1, 7, 38, 26, }, + { 0, 1, 1, 7, 46, 26, }, + { 2, 1, 1, 7, 46, 26, }, + { 1, 1, 1, 7, 46, 26, }, + { 0, 1, 1, 7, 54, 26, }, + { 2, 1, 1, 7, 54, 26, }, + { 1, 1, 1, 7, 54, 26, }, + { 0, 1, 1, 7, 62, 24, }, + { 2, 1, 1, 7, 62, 26, }, + { 1, 1, 1, 7, 62, 26, }, + { 0, 1, 1, 7, 102, 24, }, + { 2, 1, 1, 7, 102, 26, }, + { 1, 1, 1, 7, 102, 26, }, + { 0, 1, 1, 7, 110, 32, }, + { 2, 1, 1, 7, 110, 26, }, + { 1, 1, 1, 7, 110, 26, }, + { 0, 1, 1, 7, 118, 32, }, + { 2, 1, 1, 7, 118, 26, }, + { 1, 1, 1, 7, 118, 26, }, + { 0, 1, 1, 7, 126, 32, }, + { 2, 1, 1, 7, 126, 26, }, + { 1, 1, 1, 7, 126, 26, }, + { 0, 1, 1, 7, 134, 32, }, + { 2, 1, 1, 7, 134, 26, }, + { 1, 1, 1, 7, 134, 26, }, + { 0, 1, 1, 7, 151, 26, }, + { 2, 1, 1, 7, 151, 26, }, + { 1, 1, 1, 7, 151, 63, }, + { 0, 1, 1, 7, 159, 26, }, + { 2, 1, 1, 7, 159, 26, }, + { 1, 1, 1, 7, 159, 63, }, + { 0, 1, 2, 4, 42, 26, }, + { 2, 1, 2, 4, 42, 32, }, + { 1, 1, 2, 4, 42, 32, }, + { 0, 1, 2, 4, 58, 26, }, + { 2, 1, 2, 4, 58, 32, }, + { 1, 1, 2, 4, 58, 32, }, + { 0, 1, 2, 4, 106, 28, }, + { 2, 1, 2, 4, 106, 32, }, + { 1, 1, 2, 4, 106, 32, }, + { 0, 1, 2, 4, 122, 28, }, + { 2, 1, 2, 4, 122, 32, }, + { 1, 1, 2, 4, 122, 32, }, + { 0, 1, 2, 4, 155, 28, }, + { 2, 1, 2, 4, 155, 32, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 24, }, + { 2, 1, 2, 5, 42, 30, }, + { 1, 1, 2, 5, 42, 30, }, + { 0, 1, 2, 5, 58, 24, }, + { 2, 1, 2, 5, 58, 30, }, + { 1, 1, 2, 5, 58, 30, }, + { 0, 1, 2, 5, 106, 26, }, + { 2, 1, 2, 5, 106, 30, }, + { 1, 1, 2, 5, 106, 30, }, + { 0, 1, 2, 5, 122, 26, }, + { 2, 1, 2, 5, 122, 30, }, + { 1, 1, 2, 5, 122, 30, }, + { 0, 1, 2, 5, 155, 26, }, + { 2, 1, 2, 5, 155, 30, }, + { 1, 1, 2, 5, 155, 63, }, + { 0, 1, 2, 8, 42, 22, }, + { 2, 1, 2, 8, 42, 28, }, + { 1, 1, 2, 8, 42, 28, }, + { 0, 1, 2, 8, 58, 22, }, + { 2, 1, 2, 8, 58, 28, }, + { 1, 1, 2, 8, 58, 28, }, + { 0, 1, 2, 8, 106, 24, }, + { 2, 1, 2, 8, 106, 28, }, + { 1, 1, 2, 8, 106, 28, }, + { 0, 1, 2, 8, 122, 24, }, + { 2, 1, 2, 8, 122, 28, }, + { 1, 1, 2, 8, 122, 28, }, + { 0, 1, 2, 8, 155, 24, }, + { 2, 1, 2, 8, 155, 28, }, + { 1, 1, 2, 8, 155, 63, }, + { 0, 1, 2, 9, 42, 20, }, + { 2, 1, 2, 9, 42, 26, }, + { 1, 1, 2, 9, 42, 26, }, + { 0, 1, 2, 9, 58, 20, }, + { 2, 1, 2, 9, 58, 26, }, + { 1, 1, 2, 9, 58, 26, }, + { 0, 1, 2, 9, 106, 22, }, + { 2, 1, 2, 9, 106, 26, }, + { 1, 1, 2, 9, 106, 26, }, + { 0, 1, 2, 9, 122, 22, }, + { 2, 1, 2, 9, 122, 26, }, + { 1, 1, 2, 9, 122, 26, }, + { 0, 1, 2, 9, 155, 22, }, + { 2, 1, 2, 9, 155, 26, }, + { 1, 1, 2, 9, 155, 63, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type7); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8814a_txpwr_lmt_type8[] = { + { 0, 0, 0, 0, 1, 46, }, + { 2, 0, 0, 0, 1, 46, }, + { 1, 0, 0, 0, 1, 46, }, + { 0, 0, 0, 0, 2, 46, }, + { 2, 0, 0, 0, 2, 46, }, + { 1, 0, 0, 0, 2, 46, }, + { 0, 0, 0, 0, 3, 46, }, + { 2, 0, 0, 0, 3, 46, }, + { 1, 0, 0, 0, 3, 46, }, + { 0, 0, 0, 0, 4, 46, }, + { 2, 0, 0, 0, 4, 46, }, + { 1, 0, 0, 0, 4, 46, }, + { 0, 0, 0, 0, 5, 46, }, + { 2, 0, 0, 0, 5, 46, }, + { 1, 0, 0, 0, 5, 46, }, + { 0, 0, 0, 0, 6, 46, }, + { 2, 0, 0, 0, 6, 46, }, + { 1, 0, 0, 0, 6, 46, }, + { 0, 0, 0, 0, 7, 46, }, + { 2, 0, 0, 0, 7, 46, }, + { 1, 0, 0, 0, 7, 46, }, + { 0, 0, 0, 0, 8, 46, }, + { 2, 0, 0, 0, 8, 46, }, + { 1, 0, 0, 0, 8, 46, }, + { 0, 0, 0, 0, 9, 46, }, + { 2, 0, 0, 0, 9, 46, }, + { 1, 0, 0, 0, 9, 46, }, + { 0, 0, 0, 0, 10, 46, }, + { 2, 0, 0, 0, 10, 46, }, + { 1, 0, 0, 0, 10, 46, }, + { 0, 0, 0, 0, 11, 46, }, + { 2, 0, 0, 0, 11, 46, }, + { 1, 0, 0, 0, 11, 46, }, + { 0, 0, 0, 0, 12, 63, }, + { 2, 0, 0, 0, 12, 46, }, + { 1, 0, 0, 0, 12, 46, }, + { 0, 0, 0, 0, 13, 63, }, + { 2, 0, 0, 0, 13, 46, }, + { 1, 0, 0, 0, 13, 46, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 0, 14, 46, }, + { 0, 0, 0, 1, 1, 46, }, + { 2, 0, 0, 1, 1, 46, }, + { 1, 0, 0, 1, 1, 46, }, + { 0, 0, 0, 1, 2, 46, }, + { 2, 0, 0, 1, 2, 46, }, + { 1, 0, 0, 1, 2, 46, }, + { 0, 0, 0, 1, 3, 46, }, + { 2, 0, 0, 1, 3, 46, }, + { 1, 0, 0, 1, 3, 46, }, + { 0, 0, 0, 1, 4, 46, }, + { 2, 0, 0, 1, 4, 46, }, + { 1, 0, 0, 1, 4, 46, }, + { 0, 0, 0, 1, 5, 46, }, + { 2, 0, 0, 1, 5, 46, }, + { 1, 0, 0, 1, 5, 46, }, + { 0, 0, 0, 1, 6, 46, }, + { 2, 0, 0, 1, 6, 46, }, + { 1, 0, 0, 1, 6, 46, }, + { 0, 0, 0, 1, 7, 46, }, + { 2, 0, 0, 1, 7, 46, }, + { 1, 0, 0, 1, 7, 46, }, + { 0, 0, 0, 1, 8, 46, }, + { 2, 0, 0, 1, 8, 46, }, + { 1, 0, 0, 1, 8, 46, }, + { 0, 0, 0, 1, 9, 46, }, + { 2, 0, 0, 1, 9, 46, }, + { 1, 0, 0, 1, 9, 46, }, + { 0, 0, 0, 1, 10, 46, }, + { 2, 0, 0, 1, 10, 46, }, + { 1, 0, 0, 1, 10, 46, }, + { 0, 0, 0, 1, 11, 46, }, + { 2, 0, 0, 1, 11, 46, }, + { 1, 0, 0, 1, 11, 46, }, + { 0, 0, 0, 1, 12, 63, }, + { 2, 0, 0, 1, 12, 46, }, + { 1, 0, 0, 1, 12, 46, }, + { 0, 0, 0, 1, 13, 63, }, + { 2, 0, 0, 1, 13, 46, }, + { 1, 0, 0, 1, 13, 46, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 1, 14, 46, }, + { 0, 0, 0, 2, 1, 46, }, + { 2, 0, 0, 2, 1, 46, }, + { 1, 0, 0, 2, 1, 46, }, + { 0, 0, 0, 2, 2, 46, }, + { 2, 0, 0, 2, 2, 46, }, + { 1, 0, 0, 2, 2, 46, }, + { 0, 0, 0, 2, 3, 46, }, + { 2, 0, 0, 2, 3, 46, }, + { 1, 0, 0, 2, 3, 46, }, + { 0, 0, 0, 2, 4, 46, }, + { 2, 0, 0, 2, 4, 46, }, + { 1, 0, 0, 2, 4, 46, }, + { 0, 0, 0, 2, 5, 46, }, + { 2, 0, 0, 2, 5, 46, }, + { 1, 0, 0, 2, 5, 46, }, + { 0, 0, 0, 2, 6, 46, }, + { 2, 0, 0, 2, 6, 46, }, + { 1, 0, 0, 2, 6, 46, }, + { 0, 0, 0, 2, 7, 46, }, + { 2, 0, 0, 2, 7, 46, }, + { 1, 0, 0, 2, 7, 46, }, + { 0, 0, 0, 2, 8, 46, }, + { 2, 0, 0, 2, 8, 46, }, + { 1, 0, 0, 2, 8, 46, }, + { 0, 0, 0, 2, 9, 46, }, + { 2, 0, 0, 2, 9, 46, }, + { 1, 0, 0, 2, 9, 46, }, + { 0, 0, 0, 2, 10, 46, }, + { 2, 0, 0, 2, 10, 46, }, + { 1, 0, 0, 2, 10, 46, }, + { 0, 0, 0, 2, 11, 46, }, + { 2, 0, 0, 2, 11, 46, }, + { 1, 0, 0, 2, 11, 46, }, + { 0, 0, 0, 2, 12, 63, }, + { 2, 0, 0, 2, 12, 46, }, + { 1, 0, 0, 2, 12, 46, }, + { 0, 0, 0, 2, 13, 63, }, + { 2, 0, 0, 2, 13, 46, }, + { 1, 0, 0, 2, 13, 46, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 1, 0, 0, 2, 14, 46, }, + { 0, 0, 0, 3, 1, 46, }, + { 2, 0, 0, 3, 1, 46, }, + { 1, 0, 0, 3, 1, 46, }, + { 0, 0, 0, 3, 2, 46, }, + { 2, 0, 0, 3, 2, 46, }, + { 1, 0, 0, 3, 2, 46, }, + { 0, 0, 0, 3, 3, 46, }, + { 2, 0, 0, 3, 3, 46, }, + { 1, 0, 0, 3, 3, 46, }, + { 0, 0, 0, 3, 4, 46, }, + { 2, 0, 0, 3, 4, 46, }, + { 1, 0, 0, 3, 4, 46, }, + { 0, 0, 0, 3, 5, 46, }, + { 2, 0, 0, 3, 5, 46, }, + { 1, 0, 0, 3, 5, 46, }, + { 0, 0, 0, 3, 6, 46, }, + { 2, 0, 0, 3, 6, 46, }, + { 1, 0, 0, 3, 6, 46, }, + { 0, 0, 0, 3, 7, 46, }, + { 2, 0, 0, 3, 7, 46, }, + { 1, 0, 0, 3, 7, 46, }, + { 0, 0, 0, 3, 8, 46, }, + { 2, 0, 0, 3, 8, 46, }, + { 1, 0, 0, 3, 8, 46, }, + { 0, 0, 0, 3, 9, 46, }, + { 2, 0, 0, 3, 9, 46, }, + { 1, 0, 0, 3, 9, 46, }, + { 0, 0, 0, 3, 10, 46, }, + { 2, 0, 0, 3, 10, 46, }, + { 1, 0, 0, 3, 10, 46, }, + { 0, 0, 0, 3, 11, 46, }, + { 2, 0, 0, 3, 11, 46, }, + { 1, 0, 0, 3, 11, 46, }, + { 0, 0, 0, 3, 12, 63, }, + { 2, 0, 0, 3, 12, 46, }, + { 1, 0, 0, 3, 12, 46, }, + { 0, 0, 0, 3, 13, 63, }, + { 2, 0, 0, 3, 13, 46, }, + { 1, 0, 0, 3, 13, 46, }, + { 0, 0, 0, 3, 14, 63, }, + { 2, 0, 0, 3, 14, 63, }, + { 1, 0, 0, 3, 14, 46, }, + { 0, 0, 0, 6, 1, 46, }, + { 2, 0, 0, 6, 1, 46, }, + { 1, 0, 0, 6, 1, 46, }, + { 0, 0, 0, 6, 2, 46, }, + { 2, 0, 0, 6, 2, 46, }, + { 1, 0, 0, 6, 2, 46, }, + { 0, 0, 0, 6, 3, 46, }, + { 2, 0, 0, 6, 3, 46, }, + { 1, 0, 0, 6, 3, 46, }, + { 0, 0, 0, 6, 4, 46, }, + { 2, 0, 0, 6, 4, 46, }, + { 1, 0, 0, 6, 4, 46, }, + { 0, 0, 0, 6, 5, 46, }, + { 2, 0, 0, 6, 5, 46, }, + { 1, 0, 0, 6, 5, 46, }, + { 0, 0, 0, 6, 6, 46, }, + { 2, 0, 0, 6, 6, 46, }, + { 1, 0, 0, 6, 6, 46, }, + { 0, 0, 0, 6, 7, 46, }, + { 2, 0, 0, 6, 7, 46, }, + { 1, 0, 0, 6, 7, 46, }, + { 0, 0, 0, 6, 8, 46, }, + { 2, 0, 0, 6, 8, 46, }, + { 1, 0, 0, 6, 8, 46, }, + { 0, 0, 0, 6, 9, 46, }, + { 2, 0, 0, 6, 9, 46, }, + { 1, 0, 0, 6, 9, 46, }, + { 0, 0, 0, 6, 10, 46, }, + { 2, 0, 0, 6, 10, 46, }, + { 1, 0, 0, 6, 10, 46, }, + { 0, 0, 0, 6, 11, 46, }, + { 2, 0, 0, 6, 11, 46, }, + { 1, 0, 0, 6, 11, 46, }, + { 0, 0, 0, 6, 12, 63, }, + { 2, 0, 0, 6, 12, 46, }, + { 1, 0, 0, 6, 12, 46, }, + { 0, 0, 0, 6, 13, 63, }, + { 2, 0, 0, 6, 13, 46, }, + { 1, 0, 0, 6, 13, 46, }, + { 0, 0, 0, 6, 14, 63, }, + { 2, 0, 0, 6, 14, 63, }, + { 1, 0, 0, 6, 14, 46, }, + { 0, 0, 0, 7, 1, 46, }, + { 2, 0, 0, 7, 1, 46, }, + { 1, 0, 0, 7, 1, 46, }, + { 0, 0, 0, 7, 2, 46, }, + { 2, 0, 0, 7, 2, 46, }, + { 1, 0, 0, 7, 2, 46, }, + { 0, 0, 0, 7, 3, 46, }, + { 2, 0, 0, 7, 3, 46, }, + { 1, 0, 0, 7, 3, 46, }, + { 0, 0, 0, 7, 4, 46, }, + { 2, 0, 0, 7, 4, 46, }, + { 1, 0, 0, 7, 4, 46, }, + { 0, 0, 0, 7, 5, 46, }, + { 2, 0, 0, 7, 5, 46, }, + { 1, 0, 0, 7, 5, 46, }, + { 0, 0, 0, 7, 6, 46, }, + { 2, 0, 0, 7, 6, 46, }, + { 1, 0, 0, 7, 6, 46, }, + { 0, 0, 0, 7, 7, 46, }, + { 2, 0, 0, 7, 7, 46, }, + { 1, 0, 0, 7, 7, 46, }, + { 0, 0, 0, 7, 8, 46, }, + { 2, 0, 0, 7, 8, 46, }, + { 1, 0, 0, 7, 8, 46, }, + { 0, 0, 0, 7, 9, 46, }, + { 2, 0, 0, 7, 9, 46, }, + { 1, 0, 0, 7, 9, 46, }, + { 0, 0, 0, 7, 10, 46, }, + { 2, 0, 0, 7, 10, 46, }, + { 1, 0, 0, 7, 10, 46, }, + { 0, 0, 0, 7, 11, 46, }, + { 2, 0, 0, 7, 11, 46, }, + { 1, 0, 0, 7, 11, 46, }, + { 0, 0, 0, 7, 12, 63, }, + { 2, 0, 0, 7, 12, 46, }, + { 1, 0, 0, 7, 12, 46, }, + { 0, 0, 0, 7, 13, 63, }, + { 2, 0, 0, 7, 13, 46, }, + { 1, 0, 0, 7, 13, 46, }, + { 0, 0, 0, 7, 14, 63, }, + { 2, 0, 0, 7, 14, 63, }, + { 1, 0, 0, 7, 14, 46, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 30, }, + { 2, 0, 1, 2, 3, 34, }, + { 1, 0, 1, 2, 3, 34, }, + { 0, 0, 1, 2, 4, 34, }, + { 2, 0, 1, 2, 4, 34, }, + { 1, 0, 1, 2, 4, 34, }, + { 0, 0, 1, 2, 5, 34, }, + { 2, 0, 1, 2, 5, 34, }, + { 1, 0, 1, 2, 5, 34, }, + { 0, 0, 1, 2, 6, 34, }, + { 2, 0, 1, 2, 6, 34, }, + { 1, 0, 1, 2, 6, 34, }, + { 0, 0, 1, 2, 7, 34, }, + { 2, 0, 1, 2, 7, 34, }, + { 1, 0, 1, 2, 7, 34, }, + { 0, 0, 1, 2, 8, 34, }, + { 2, 0, 1, 2, 8, 34, }, + { 1, 0, 1, 2, 8, 34, }, + { 0, 0, 1, 2, 9, 34, }, + { 2, 0, 1, 2, 9, 34, }, + { 1, 0, 1, 2, 9, 34, }, + { 0, 0, 1, 2, 10, 34, }, + { 2, 0, 1, 2, 10, 34, }, + { 1, 0, 1, 2, 10, 34, }, + { 0, 0, 1, 2, 11, 28, }, + { 2, 0, 1, 2, 11, 34, }, + { 1, 0, 1, 2, 11, 34, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 34, }, + { 1, 0, 1, 2, 12, 34, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 34, }, + { 1, 0, 1, 2, 13, 34, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 0, 0, 1, 3, 1, 63, }, + { 2, 0, 1, 3, 1, 63, }, + { 1, 0, 1, 3, 1, 63, }, + { 0, 0, 1, 3, 2, 63, }, + { 2, 0, 1, 3, 2, 63, }, + { 1, 0, 1, 3, 2, 63, }, + { 0, 0, 1, 3, 3, 30, }, + { 2, 0, 1, 3, 3, 34, }, + { 1, 0, 1, 3, 3, 34, }, + { 0, 0, 1, 3, 4, 34, }, + { 2, 0, 1, 3, 4, 34, }, + { 1, 0, 1, 3, 4, 34, }, + { 0, 0, 1, 3, 5, 34, }, + { 2, 0, 1, 3, 5, 34, }, + { 1, 0, 1, 3, 5, 34, }, + { 0, 0, 1, 3, 6, 34, }, + { 2, 0, 1, 3, 6, 34, }, + { 1, 0, 1, 3, 6, 34, }, + { 0, 0, 1, 3, 7, 34, }, + { 2, 0, 1, 3, 7, 34, }, + { 1, 0, 1, 3, 7, 34, }, + { 0, 0, 1, 3, 8, 34, }, + { 2, 0, 1, 3, 8, 34, }, + { 1, 0, 1, 3, 8, 34, }, + { 0, 0, 1, 3, 9, 34, }, + { 2, 0, 1, 3, 9, 34, }, + { 1, 0, 1, 3, 9, 34, }, + { 0, 0, 1, 3, 10, 34, }, + { 2, 0, 1, 3, 10, 34, }, + { 1, 0, 1, 3, 10, 34, }, + { 0, 0, 1, 3, 11, 28, }, + { 2, 0, 1, 3, 11, 34, }, + { 1, 0, 1, 3, 11, 34, }, + { 0, 0, 1, 3, 12, 63, }, + { 2, 0, 1, 3, 12, 34, }, + { 1, 0, 1, 3, 12, 34, }, + { 0, 0, 1, 3, 13, 63, }, + { 2, 0, 1, 3, 13, 34, }, + { 1, 0, 1, 3, 13, 34, }, + { 0, 0, 1, 3, 14, 63, }, + { 2, 0, 1, 3, 14, 63, }, + { 1, 0, 1, 3, 14, 63, }, + { 0, 0, 1, 6, 1, 63, }, + { 2, 0, 1, 6, 1, 63, }, + { 1, 0, 1, 6, 1, 63, }, + { 0, 0, 1, 6, 2, 63, }, + { 2, 0, 1, 6, 2, 63, }, + { 1, 0, 1, 6, 2, 63, }, + { 0, 0, 1, 6, 3, 30, }, + { 2, 0, 1, 6, 3, 34, }, + { 1, 0, 1, 6, 3, 34, }, + { 0, 0, 1, 6, 4, 34, }, + { 2, 0, 1, 6, 4, 34, }, + { 1, 0, 1, 6, 4, 34, }, + { 0, 0, 1, 6, 5, 34, }, + { 2, 0, 1, 6, 5, 34, }, + { 1, 0, 1, 6, 5, 34, }, + { 0, 0, 1, 6, 6, 34, }, + { 2, 0, 1, 6, 6, 34, }, + { 1, 0, 1, 6, 6, 34, }, + { 0, 0, 1, 6, 7, 34, }, + { 2, 0, 1, 6, 7, 34, }, + { 1, 0, 1, 6, 7, 34, }, + { 0, 0, 1, 6, 8, 34, }, + { 2, 0, 1, 6, 8, 34, }, + { 1, 0, 1, 6, 8, 34, }, + { 0, 0, 1, 6, 9, 34, }, + { 2, 0, 1, 6, 9, 34, }, + { 1, 0, 1, 6, 9, 34, }, + { 0, 0, 1, 6, 10, 34, }, + { 2, 0, 1, 6, 10, 34, }, + { 1, 0, 1, 6, 10, 34, }, + { 0, 0, 1, 6, 11, 28, }, + { 2, 0, 1, 6, 11, 34, }, + { 1, 0, 1, 6, 11, 34, }, + { 0, 0, 1, 6, 12, 63, }, + { 2, 0, 1, 6, 12, 34, }, + { 1, 0, 1, 6, 12, 34, }, + { 0, 0, 1, 6, 13, 63, }, + { 2, 0, 1, 6, 13, 34, }, + { 1, 0, 1, 6, 13, 34, }, + { 0, 0, 1, 6, 14, 63, }, + { 2, 0, 1, 6, 14, 63, }, + { 1, 0, 1, 6, 14, 63, }, + { 0, 0, 1, 7, 1, 63, }, + { 2, 0, 1, 7, 1, 63, }, + { 1, 0, 1, 7, 1, 63, }, + { 0, 0, 1, 7, 2, 63, }, + { 2, 0, 1, 7, 2, 63, }, + { 1, 0, 1, 7, 2, 63, }, + { 0, 0, 1, 7, 3, 30, }, + { 2, 0, 1, 7, 3, 34, }, + { 1, 0, 1, 7, 3, 34, }, + { 0, 0, 1, 7, 4, 34, }, + { 2, 0, 1, 7, 4, 34, }, + { 1, 0, 1, 7, 4, 34, }, + { 0, 0, 1, 7, 5, 34, }, + { 2, 0, 1, 7, 5, 34, }, + { 1, 0, 1, 7, 5, 34, }, + { 0, 0, 1, 7, 6, 34, }, + { 2, 0, 1, 7, 6, 34, }, + { 1, 0, 1, 7, 6, 34, }, + { 0, 0, 1, 7, 7, 34, }, + { 2, 0, 1, 7, 7, 34, }, + { 1, 0, 1, 7, 7, 34, }, + { 0, 0, 1, 7, 8, 34, }, + { 2, 0, 1, 7, 8, 34, }, + { 1, 0, 1, 7, 8, 34, }, + { 0, 0, 1, 7, 9, 34, }, + { 2, 0, 1, 7, 9, 34, }, + { 1, 0, 1, 7, 9, 34, }, + { 0, 0, 1, 7, 10, 34, }, + { 2, 0, 1, 7, 10, 34, }, + { 1, 0, 1, 7, 10, 34, }, + { 0, 0, 1, 7, 11, 28, }, + { 2, 0, 1, 7, 11, 34, }, + { 1, 0, 1, 7, 11, 34, }, + { 0, 0, 1, 7, 12, 63, }, + { 2, 0, 1, 7, 12, 34, }, + { 1, 0, 1, 7, 12, 34, }, + { 0, 0, 1, 7, 13, 63, }, + { 2, 0, 1, 7, 13, 34, }, + { 1, 0, 1, 7, 13, 34, }, + { 0, 0, 1, 7, 14, 63, }, + { 2, 0, 1, 7, 14, 63, }, + { 1, 0, 1, 7, 14, 63, }, + { 0, 1, 0, 1, 36, 46, }, + { 2, 1, 0, 1, 36, 46, }, + { 1, 1, 0, 1, 36, 46, }, + { 0, 1, 0, 1, 40, 46, }, + { 2, 1, 0, 1, 40, 46, }, + { 1, 1, 0, 1, 40, 46, }, + { 0, 1, 0, 1, 44, 46, }, + { 2, 1, 0, 1, 44, 46, }, + { 1, 1, 0, 1, 44, 46, }, + { 0, 1, 0, 1, 48, 46, }, + { 2, 1, 0, 1, 48, 46, }, + { 1, 1, 0, 1, 48, 46, }, + { 0, 1, 0, 1, 52, 46, }, + { 2, 1, 0, 1, 52, 46, }, + { 1, 1, 0, 1, 52, 46, }, + { 0, 1, 0, 1, 56, 46, }, + { 2, 1, 0, 1, 56, 46, }, + { 1, 1, 0, 1, 56, 46, }, + { 0, 1, 0, 1, 60, 46, }, + { 2, 1, 0, 1, 60, 46, }, + { 1, 1, 0, 1, 60, 46, }, + { 0, 1, 0, 1, 64, 46, }, + { 2, 1, 0, 1, 64, 46, }, + { 1, 1, 0, 1, 64, 46, }, + { 0, 1, 0, 1, 100, 46, }, + { 2, 1, 0, 1, 100, 46, }, + { 1, 1, 0, 1, 100, 46, }, + { 0, 1, 0, 1, 104, 46, }, + { 2, 1, 0, 1, 104, 46, }, + { 1, 1, 0, 1, 104, 46, }, + { 0, 1, 0, 1, 108, 46, }, + { 2, 1, 0, 1, 108, 46, }, + { 1, 1, 0, 1, 108, 46, }, + { 0, 1, 0, 1, 112, 46, }, + { 2, 1, 0, 1, 112, 46, }, + { 1, 1, 0, 1, 112, 46, }, + { 0, 1, 0, 1, 116, 46, }, + { 2, 1, 0, 1, 116, 46, }, + { 1, 1, 0, 1, 116, 46, }, + { 0, 1, 0, 1, 120, 46, }, + { 2, 1, 0, 1, 120, 46, }, + { 1, 1, 0, 1, 120, 46, }, + { 0, 1, 0, 1, 124, 46, }, + { 2, 1, 0, 1, 124, 46, }, + { 1, 1, 0, 1, 124, 46, }, + { 0, 1, 0, 1, 128, 46, }, + { 2, 1, 0, 1, 128, 46, }, + { 1, 1, 0, 1, 128, 46, }, + { 0, 1, 0, 1, 132, 46, }, + { 2, 1, 0, 1, 132, 46, }, + { 1, 1, 0, 1, 132, 46, }, + { 0, 1, 0, 1, 136, 46, }, + { 2, 1, 0, 1, 136, 46, }, + { 1, 1, 0, 1, 136, 46, }, + { 0, 1, 0, 1, 140, 46, }, + { 2, 1, 0, 1, 140, 46, }, + { 1, 1, 0, 1, 140, 46, }, + { 0, 1, 0, 1, 149, 46, }, + { 2, 1, 0, 1, 149, 46, }, + { 1, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 46, }, + { 2, 1, 0, 1, 153, 46, }, + { 1, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 46, }, + { 2, 1, 0, 1, 157, 46, }, + { 1, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 46, }, + { 2, 1, 0, 1, 161, 46, }, + { 1, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 46, }, + { 2, 1, 0, 1, 165, 46, }, + { 1, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 46, }, + { 2, 1, 0, 2, 36, 46, }, + { 1, 1, 0, 2, 36, 46, }, + { 0, 1, 0, 2, 40, 46, }, + { 2, 1, 0, 2, 40, 46, }, + { 1, 1, 0, 2, 40, 46, }, + { 0, 1, 0, 2, 44, 46, }, + { 2, 1, 0, 2, 44, 46, }, + { 1, 1, 0, 2, 44, 46, }, + { 0, 1, 0, 2, 48, 46, }, + { 2, 1, 0, 2, 48, 46, }, + { 1, 1, 0, 2, 48, 46, }, + { 0, 1, 0, 2, 52, 46, }, + { 2, 1, 0, 2, 52, 46, }, + { 1, 1, 0, 2, 52, 46, }, + { 0, 1, 0, 2, 56, 46, }, + { 2, 1, 0, 2, 56, 46, }, + { 1, 1, 0, 2, 56, 46, }, + { 0, 1, 0, 2, 60, 46, }, + { 2, 1, 0, 2, 60, 46, }, + { 1, 1, 0, 2, 60, 46, }, + { 0, 1, 0, 2, 64, 46, }, + { 2, 1, 0, 2, 64, 46, }, + { 1, 1, 0, 2, 64, 46, }, + { 0, 1, 0, 2, 100, 46, }, + { 2, 1, 0, 2, 100, 46, }, + { 1, 1, 0, 2, 100, 46, }, + { 0, 1, 0, 2, 104, 46, }, + { 2, 1, 0, 2, 104, 46, }, + { 1, 1, 0, 2, 104, 46, }, + { 0, 1, 0, 2, 108, 46, }, + { 2, 1, 0, 2, 108, 46, }, + { 1, 1, 0, 2, 108, 46, }, + { 0, 1, 0, 2, 112, 46, }, + { 2, 1, 0, 2, 112, 46, }, + { 1, 1, 0, 2, 112, 46, }, + { 0, 1, 0, 2, 116, 46, }, + { 2, 1, 0, 2, 116, 46, }, + { 1, 1, 0, 2, 116, 46, }, + { 0, 1, 0, 2, 120, 46, }, + { 2, 1, 0, 2, 120, 46, }, + { 1, 1, 0, 2, 120, 46, }, + { 0, 1, 0, 2, 124, 46, }, + { 2, 1, 0, 2, 124, 46, }, + { 1, 1, 0, 2, 124, 46, }, + { 0, 1, 0, 2, 128, 46, }, + { 2, 1, 0, 2, 128, 46, }, + { 1, 1, 0, 2, 128, 46, }, + { 0, 1, 0, 2, 132, 46, }, + { 2, 1, 0, 2, 132, 46, }, + { 1, 1, 0, 2, 132, 46, }, + { 0, 1, 0, 2, 136, 46, }, + { 2, 1, 0, 2, 136, 46, }, + { 1, 1, 0, 2, 136, 46, }, + { 0, 1, 0, 2, 140, 46, }, + { 2, 1, 0, 2, 140, 46, }, + { 1, 1, 0, 2, 140, 46, }, + { 0, 1, 0, 2, 149, 46, }, + { 2, 1, 0, 2, 149, 46, }, + { 1, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 46, }, + { 2, 1, 0, 2, 153, 46, }, + { 1, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 46, }, + { 2, 1, 0, 2, 157, 46, }, + { 1, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 46, }, + { 2, 1, 0, 2, 161, 46, }, + { 1, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 46, }, + { 2, 1, 0, 2, 165, 46, }, + { 1, 1, 0, 2, 165, 63, }, + { 0, 1, 0, 3, 36, 46, }, + { 2, 1, 0, 3, 36, 46, }, + { 1, 1, 0, 3, 36, 46, }, + { 0, 1, 0, 3, 40, 46, }, + { 2, 1, 0, 3, 40, 46, }, + { 1, 1, 0, 3, 40, 46, }, + { 0, 1, 0, 3, 44, 46, }, + { 2, 1, 0, 3, 44, 46, }, + { 1, 1, 0, 3, 44, 46, }, + { 0, 1, 0, 3, 48, 46, }, + { 2, 1, 0, 3, 48, 46, }, + { 1, 1, 0, 3, 48, 46, }, + { 0, 1, 0, 3, 52, 46, }, + { 2, 1, 0, 3, 52, 46, }, + { 1, 1, 0, 3, 52, 46, }, + { 0, 1, 0, 3, 56, 46, }, + { 2, 1, 0, 3, 56, 46, }, + { 1, 1, 0, 3, 56, 46, }, + { 0, 1, 0, 3, 60, 46, }, + { 2, 1, 0, 3, 60, 46, }, + { 1, 1, 0, 3, 60, 46, }, + { 0, 1, 0, 3, 64, 46, }, + { 2, 1, 0, 3, 64, 46, }, + { 1, 1, 0, 3, 64, 46, }, + { 0, 1, 0, 3, 100, 46, }, + { 2, 1, 0, 3, 100, 46, }, + { 1, 1, 0, 3, 100, 46, }, + { 0, 1, 0, 3, 104, 46, }, + { 2, 1, 0, 3, 104, 46, }, + { 1, 1, 0, 3, 104, 46, }, + { 0, 1, 0, 3, 108, 46, }, + { 2, 1, 0, 3, 108, 46, }, + { 1, 1, 0, 3, 108, 46, }, + { 0, 1, 0, 3, 112, 46, }, + { 2, 1, 0, 3, 112, 46, }, + { 1, 1, 0, 3, 112, 46, }, + { 0, 1, 0, 3, 116, 46, }, + { 2, 1, 0, 3, 116, 46, }, + { 1, 1, 0, 3, 116, 46, }, + { 0, 1, 0, 3, 120, 46, }, + { 2, 1, 0, 3, 120, 46, }, + { 1, 1, 0, 3, 120, 46, }, + { 0, 1, 0, 3, 124, 46, }, + { 2, 1, 0, 3, 124, 46, }, + { 1, 1, 0, 3, 124, 46, }, + { 0, 1, 0, 3, 128, 46, }, + { 2, 1, 0, 3, 128, 46, }, + { 1, 1, 0, 3, 128, 46, }, + { 0, 1, 0, 3, 132, 46, }, + { 2, 1, 0, 3, 132, 46, }, + { 1, 1, 0, 3, 132, 46, }, + { 0, 1, 0, 3, 136, 46, }, + { 2, 1, 0, 3, 136, 46, }, + { 1, 1, 0, 3, 136, 46, }, + { 0, 1, 0, 3, 140, 46, }, + { 2, 1, 0, 3, 140, 46, }, + { 1, 1, 0, 3, 140, 46, }, + { 0, 1, 0, 3, 149, 46, }, + { 2, 1, 0, 3, 149, 46, }, + { 1, 1, 0, 3, 149, 63, }, + { 0, 1, 0, 3, 153, 46, }, + { 2, 1, 0, 3, 153, 46, }, + { 1, 1, 0, 3, 153, 63, }, + { 0, 1, 0, 3, 157, 46, }, + { 2, 1, 0, 3, 157, 46, }, + { 1, 1, 0, 3, 157, 63, }, + { 0, 1, 0, 3, 161, 46, }, + { 2, 1, 0, 3, 161, 46, }, + { 1, 1, 0, 3, 161, 63, }, + { 0, 1, 0, 3, 165, 46, }, + { 2, 1, 0, 3, 165, 46, }, + { 1, 1, 0, 3, 165, 63, }, + { 0, 1, 0, 6, 36, 46, }, + { 2, 1, 0, 6, 36, 46, }, + { 1, 1, 0, 6, 36, 46, }, + { 0, 1, 0, 6, 40, 46, }, + { 2, 1, 0, 6, 40, 46, }, + { 1, 1, 0, 6, 40, 46, }, + { 0, 1, 0, 6, 44, 46, }, + { 2, 1, 0, 6, 44, 46, }, + { 1, 1, 0, 6, 44, 46, }, + { 0, 1, 0, 6, 48, 46, }, + { 2, 1, 0, 6, 48, 46, }, + { 1, 1, 0, 6, 48, 46, }, + { 0, 1, 0, 6, 52, 46, }, + { 2, 1, 0, 6, 52, 46, }, + { 1, 1, 0, 6, 52, 46, }, + { 0, 1, 0, 6, 56, 46, }, + { 2, 1, 0, 6, 56, 46, }, + { 1, 1, 0, 6, 56, 46, }, + { 0, 1, 0, 6, 60, 46, }, + { 2, 1, 0, 6, 60, 46, }, + { 1, 1, 0, 6, 60, 46, }, + { 0, 1, 0, 6, 64, 46, }, + { 2, 1, 0, 6, 64, 46, }, + { 1, 1, 0, 6, 64, 46, }, + { 0, 1, 0, 6, 100, 46, }, + { 2, 1, 0, 6, 100, 46, }, + { 1, 1, 0, 6, 100, 46, }, + { 0, 1, 0, 6, 104, 46, }, + { 2, 1, 0, 6, 104, 46, }, + { 1, 1, 0, 6, 104, 46, }, + { 0, 1, 0, 6, 108, 46, }, + { 2, 1, 0, 6, 108, 46, }, + { 1, 1, 0, 6, 108, 46, }, + { 0, 1, 0, 6, 112, 46, }, + { 2, 1, 0, 6, 112, 46, }, + { 1, 1, 0, 6, 112, 46, }, + { 0, 1, 0, 6, 116, 46, }, + { 2, 1, 0, 6, 116, 46, }, + { 1, 1, 0, 6, 116, 46, }, + { 0, 1, 0, 6, 120, 46, }, + { 2, 1, 0, 6, 120, 46, }, + { 1, 1, 0, 6, 120, 46, }, + { 0, 1, 0, 6, 124, 46, }, + { 2, 1, 0, 6, 124, 46, }, + { 1, 1, 0, 6, 124, 46, }, + { 0, 1, 0, 6, 128, 46, }, + { 2, 1, 0, 6, 128, 46, }, + { 1, 1, 0, 6, 128, 46, }, + { 0, 1, 0, 6, 132, 46, }, + { 2, 1, 0, 6, 132, 46, }, + { 1, 1, 0, 6, 132, 46, }, + { 0, 1, 0, 6, 136, 46, }, + { 2, 1, 0, 6, 136, 46, }, + { 1, 1, 0, 6, 136, 46, }, + { 0, 1, 0, 6, 140, 46, }, + { 2, 1, 0, 6, 140, 46, }, + { 1, 1, 0, 6, 140, 46, }, + { 0, 1, 0, 6, 149, 46, }, + { 2, 1, 0, 6, 149, 46, }, + { 1, 1, 0, 6, 149, 63, }, + { 0, 1, 0, 6, 153, 46, }, + { 2, 1, 0, 6, 153, 46, }, + { 1, 1, 0, 6, 153, 63, }, + { 0, 1, 0, 6, 157, 46, }, + { 2, 1, 0, 6, 157, 46, }, + { 1, 1, 0, 6, 157, 63, }, + { 0, 1, 0, 6, 161, 46, }, + { 2, 1, 0, 6, 161, 46, }, + { 1, 1, 0, 6, 161, 63, }, + { 0, 1, 0, 6, 165, 46, }, + { 2, 1, 0, 6, 165, 46, }, + { 1, 1, 0, 6, 165, 63, }, + { 0, 1, 0, 7, 36, 46, }, + { 2, 1, 0, 7, 36, 46, }, + { 1, 1, 0, 7, 36, 46, }, + { 0, 1, 0, 7, 40, 46, }, + { 2, 1, 0, 7, 40, 46, }, + { 1, 1, 0, 7, 40, 46, }, + { 0, 1, 0, 7, 44, 46, }, + { 2, 1, 0, 7, 44, 46, }, + { 1, 1, 0, 7, 44, 46, }, + { 0, 1, 0, 7, 48, 46, }, + { 2, 1, 0, 7, 48, 46, }, + { 1, 1, 0, 7, 48, 46, }, + { 0, 1, 0, 7, 52, 46, }, + { 2, 1, 0, 7, 52, 46, }, + { 1, 1, 0, 7, 52, 46, }, + { 0, 1, 0, 7, 56, 46, }, + { 2, 1, 0, 7, 56, 46, }, + { 1, 1, 0, 7, 56, 46, }, + { 0, 1, 0, 7, 60, 46, }, + { 2, 1, 0, 7, 60, 46, }, + { 1, 1, 0, 7, 60, 46, }, + { 0, 1, 0, 7, 64, 46, }, + { 2, 1, 0, 7, 64, 46, }, + { 1, 1, 0, 7, 64, 46, }, + { 0, 1, 0, 7, 100, 46, }, + { 2, 1, 0, 7, 100, 46, }, + { 1, 1, 0, 7, 100, 46, }, + { 0, 1, 0, 7, 104, 46, }, + { 2, 1, 0, 7, 104, 46, }, + { 1, 1, 0, 7, 104, 46, }, + { 0, 1, 0, 7, 108, 46, }, + { 2, 1, 0, 7, 108, 46, }, + { 1, 1, 0, 7, 108, 46, }, + { 0, 1, 0, 7, 112, 46, }, + { 2, 1, 0, 7, 112, 46, }, + { 1, 1, 0, 7, 112, 46, }, + { 0, 1, 0, 7, 116, 46, }, + { 2, 1, 0, 7, 116, 46, }, + { 1, 1, 0, 7, 116, 46, }, + { 0, 1, 0, 7, 120, 46, }, + { 2, 1, 0, 7, 120, 46, }, + { 1, 1, 0, 7, 120, 46, }, + { 0, 1, 0, 7, 124, 46, }, + { 2, 1, 0, 7, 124, 46, }, + { 1, 1, 0, 7, 124, 46, }, + { 0, 1, 0, 7, 128, 46, }, + { 2, 1, 0, 7, 128, 46, }, + { 1, 1, 0, 7, 128, 46, }, + { 0, 1, 0, 7, 132, 46, }, + { 2, 1, 0, 7, 132, 46, }, + { 1, 1, 0, 7, 132, 46, }, + { 0, 1, 0, 7, 136, 46, }, + { 2, 1, 0, 7, 136, 46, }, + { 1, 1, 0, 7, 136, 46, }, + { 0, 1, 0, 7, 140, 46, }, + { 2, 1, 0, 7, 140, 46, }, + { 1, 1, 0, 7, 140, 46, }, + { 0, 1, 0, 7, 149, 46, }, + { 2, 1, 0, 7, 149, 46, }, + { 1, 1, 0, 7, 149, 63, }, + { 0, 1, 0, 7, 153, 46, }, + { 2, 1, 0, 7, 153, 46, }, + { 1, 1, 0, 7, 153, 63, }, + { 0, 1, 0, 7, 157, 46, }, + { 2, 1, 0, 7, 157, 46, }, + { 1, 1, 0, 7, 157, 63, }, + { 0, 1, 0, 7, 161, 46, }, + { 2, 1, 0, 7, 161, 46, }, + { 1, 1, 0, 7, 161, 63, }, + { 0, 1, 0, 7, 165, 46, }, + { 2, 1, 0, 7, 165, 46, }, + { 1, 1, 0, 7, 165, 63, }, + { 0, 1, 1, 2, 38, 46, }, + { 2, 1, 1, 2, 38, 46, }, + { 1, 1, 1, 2, 38, 46, }, + { 0, 1, 1, 2, 46, 46, }, + { 2, 1, 1, 2, 46, 46, }, + { 1, 1, 1, 2, 46, 46, }, + { 0, 1, 1, 2, 54, 46, }, + { 2, 1, 1, 2, 54, 46, }, + { 1, 1, 1, 2, 54, 46, }, + { 0, 1, 1, 2, 62, 46, }, + { 2, 1, 1, 2, 62, 46, }, + { 1, 1, 1, 2, 62, 46, }, + { 0, 1, 1, 2, 102, 46, }, + { 2, 1, 1, 2, 102, 46, }, + { 1, 1, 1, 2, 102, 46, }, + { 0, 1, 1, 2, 110, 46, }, + { 2, 1, 1, 2, 110, 46, }, + { 1, 1, 1, 2, 110, 46, }, + { 0, 1, 1, 2, 118, 46, }, + { 2, 1, 1, 2, 118, 46, }, + { 1, 1, 1, 2, 118, 46, }, + { 0, 1, 1, 2, 126, 46, }, + { 2, 1, 1, 2, 126, 46, }, + { 1, 1, 1, 2, 126, 46, }, + { 0, 1, 1, 2, 134, 46, }, + { 2, 1, 1, 2, 134, 46, }, + { 1, 1, 1, 2, 134, 46, }, + { 0, 1, 1, 2, 151, 46, }, + { 2, 1, 1, 2, 151, 46, }, + { 1, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 46, }, + { 2, 1, 1, 2, 159, 46, }, + { 1, 1, 1, 2, 159, 63, }, + { 0, 1, 1, 3, 38, 46, }, + { 2, 1, 1, 3, 38, 46, }, + { 1, 1, 1, 3, 38, 46, }, + { 0, 1, 1, 3, 46, 46, }, + { 2, 1, 1, 3, 46, 46, }, + { 1, 1, 1, 3, 46, 46, }, + { 0, 1, 1, 3, 54, 46, }, + { 2, 1, 1, 3, 54, 46, }, + { 1, 1, 1, 3, 54, 46, }, + { 0, 1, 1, 3, 62, 46, }, + { 2, 1, 1, 3, 62, 46, }, + { 1, 1, 1, 3, 62, 46, }, + { 0, 1, 1, 3, 102, 46, }, + { 2, 1, 1, 3, 102, 46, }, + { 1, 1, 1, 3, 102, 46, }, + { 0, 1, 1, 3, 110, 46, }, + { 2, 1, 1, 3, 110, 46, }, + { 1, 1, 1, 3, 110, 46, }, + { 0, 1, 1, 3, 118, 46, }, + { 2, 1, 1, 3, 118, 46, }, + { 1, 1, 1, 3, 118, 46, }, + { 0, 1, 1, 3, 126, 46, }, + { 2, 1, 1, 3, 126, 46, }, + { 1, 1, 1, 3, 126, 46, }, + { 0, 1, 1, 3, 134, 46, }, + { 2, 1, 1, 3, 134, 46, }, + { 1, 1, 1, 3, 134, 46, }, + { 0, 1, 1, 3, 151, 46, }, + { 2, 1, 1, 3, 151, 46, }, + { 1, 1, 1, 3, 151, 63, }, + { 0, 1, 1, 3, 159, 46, }, + { 2, 1, 1, 3, 159, 46, }, + { 1, 1, 1, 3, 159, 63, }, + { 0, 1, 1, 6, 38, 46, }, + { 2, 1, 1, 6, 38, 46, }, + { 1, 1, 1, 6, 38, 46, }, + { 0, 1, 1, 6, 46, 46, }, + { 2, 1, 1, 6, 46, 46, }, + { 1, 1, 1, 6, 46, 46, }, + { 0, 1, 1, 6, 54, 46, }, + { 2, 1, 1, 6, 54, 46, }, + { 1, 1, 1, 6, 54, 46, }, + { 0, 1, 1, 6, 62, 46, }, + { 2, 1, 1, 6, 62, 46, }, + { 1, 1, 1, 6, 62, 46, }, + { 0, 1, 1, 6, 102, 46, }, + { 2, 1, 1, 6, 102, 46, }, + { 1, 1, 1, 6, 102, 46, }, + { 0, 1, 1, 6, 110, 46, }, + { 2, 1, 1, 6, 110, 46, }, + { 1, 1, 1, 6, 110, 46, }, + { 0, 1, 1, 6, 118, 46, }, + { 2, 1, 1, 6, 118, 46, }, + { 1, 1, 1, 6, 118, 46, }, + { 0, 1, 1, 6, 126, 46, }, + { 2, 1, 1, 6, 126, 46, }, + { 1, 1, 1, 6, 126, 46, }, + { 0, 1, 1, 6, 134, 46, }, + { 2, 1, 1, 6, 134, 46, }, + { 1, 1, 1, 6, 134, 46, }, + { 0, 1, 1, 6, 151, 46, }, + { 2, 1, 1, 6, 151, 46, }, + { 1, 1, 1, 6, 151, 63, }, + { 0, 1, 1, 6, 159, 46, }, + { 2, 1, 1, 6, 159, 46, }, + { 1, 1, 1, 6, 159, 63, }, + { 0, 1, 1, 7, 38, 46, }, + { 2, 1, 1, 7, 38, 46, }, + { 1, 1, 1, 7, 38, 46, }, + { 0, 1, 1, 7, 46, 46, }, + { 2, 1, 1, 7, 46, 46, }, + { 1, 1, 1, 7, 46, 46, }, + { 0, 1, 1, 7, 54, 46, }, + { 2, 1, 1, 7, 54, 46, }, + { 1, 1, 1, 7, 54, 46, }, + { 0, 1, 1, 7, 62, 46, }, + { 2, 1, 1, 7, 62, 46, }, + { 1, 1, 1, 7, 62, 46, }, + { 0, 1, 1, 7, 102, 46, }, + { 2, 1, 1, 7, 102, 46, }, + { 1, 1, 1, 7, 102, 46, }, + { 0, 1, 1, 7, 110, 46, }, + { 2, 1, 1, 7, 110, 46, }, + { 1, 1, 1, 7, 110, 46, }, + { 0, 1, 1, 7, 118, 46, }, + { 2, 1, 1, 7, 118, 46, }, + { 1, 1, 1, 7, 118, 46, }, + { 0, 1, 1, 7, 126, 46, }, + { 2, 1, 1, 7, 126, 46, }, + { 1, 1, 1, 7, 126, 46, }, + { 0, 1, 1, 7, 134, 46, }, + { 2, 1, 1, 7, 134, 46, }, + { 1, 1, 1, 7, 134, 46, }, + { 0, 1, 1, 7, 151, 46, }, + { 2, 1, 1, 7, 151, 46, }, + { 1, 1, 1, 7, 151, 63, }, + { 0, 1, 1, 7, 159, 46, }, + { 2, 1, 1, 7, 159, 46, }, + { 1, 1, 1, 7, 159, 63, }, + { 0, 1, 2, 4, 42, 46, }, + { 2, 1, 2, 4, 42, 46, }, + { 1, 1, 2, 4, 42, 46, }, + { 0, 1, 2, 4, 58, 46, }, + { 2, 1, 2, 4, 58, 46, }, + { 1, 1, 2, 4, 58, 46, }, + { 0, 1, 2, 4, 106, 46, }, + { 2, 1, 2, 4, 106, 46, }, + { 1, 1, 2, 4, 106, 46, }, + { 0, 1, 2, 4, 122, 46, }, + { 2, 1, 2, 4, 122, 46, }, + { 1, 1, 2, 4, 122, 46, }, + { 0, 1, 2, 4, 155, 46, }, + { 2, 1, 2, 4, 155, 46, }, + { 1, 1, 2, 4, 155, 63, }, + { 0, 1, 2, 5, 42, 46, }, + { 2, 1, 2, 5, 42, 46, }, + { 1, 1, 2, 5, 42, 46, }, + { 0, 1, 2, 5, 58, 46, }, + { 2, 1, 2, 5, 58, 46, }, + { 1, 1, 2, 5, 58, 46, }, + { 0, 1, 2, 5, 106, 46, }, + { 2, 1, 2, 5, 106, 46, }, + { 1, 1, 2, 5, 106, 46, }, + { 0, 1, 2, 5, 122, 46, }, + { 2, 1, 2, 5, 122, 46, }, + { 1, 1, 2, 5, 122, 46, }, + { 0, 1, 2, 5, 155, 46, }, + { 2, 1, 2, 5, 155, 46, }, + { 1, 1, 2, 5, 155, 63, }, + { 0, 1, 2, 8, 42, 46, }, + { 2, 1, 2, 8, 42, 46, }, + { 1, 1, 2, 8, 42, 46, }, + { 0, 1, 2, 8, 58, 46, }, + { 2, 1, 2, 8, 58, 46, }, + { 1, 1, 2, 8, 58, 46, }, + { 0, 1, 2, 8, 106, 46, }, + { 2, 1, 2, 8, 106, 46, }, + { 1, 1, 2, 8, 106, 46, }, + { 0, 1, 2, 8, 122, 46, }, + { 2, 1, 2, 8, 122, 46, }, + { 1, 1, 2, 8, 122, 46, }, + { 0, 1, 2, 8, 155, 46, }, + { 2, 1, 2, 8, 155, 46, }, + { 1, 1, 2, 8, 155, 63, }, + { 0, 1, 2, 9, 42, 46, }, + { 2, 1, 2, 9, 42, 46, }, + { 1, 1, 2, 9, 42, 46, }, + { 0, 1, 2, 9, 58, 46, }, + { 2, 1, 2, 9, 58, 46, }, + { 1, 1, 2, 9, 58, 46, }, + { 0, 1, 2, 9, 106, 46, }, + { 2, 1, 2, 9, 106, 46, }, + { 1, 1, 2, 9, 106, 46, }, + { 0, 1, 2, 9, 122, 46, }, + { 2, 1, 2, 9, 122, 46, }, + { 1, 1, 2, 9, 122, 46, }, + { 0, 1, 2, 9, 155, 46, }, + { 2, 1, 2, 9, 155, 46, }, + { 1, 1, 2, 9, 155, 63, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8814a_txpwr_lmt_type8); + +static const u8 +rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10, 11, + 11, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18, 19, 19}, + {0, 1, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 10, + 11, 12, 12, 13, 13, 14, 15, 15, 16, 17, 17, 18}, + {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 17, 18, 19}, +}; + +static const u8 +rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25, 25}, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25}, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25}, +}; + +static const u8 +rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10, + 11, 12, 13, 14, 15, 15, 15, 15, 16, 16, 17, 18}, + {0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 17, 17, 18, 19, 19, 20}, + {0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11, + 12, 13, 13, 14, 14, 15, 16, 17, 18, 18, 19, 20, 20}, +}; + +static const u8 +rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 18, 19, 20, 21, 21, 22, 23, 24, 25, 25}, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 17, 18, 19, 20, 21, 22, 23, 24, 24, 25, 25, 25}, + {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 25}, +}; + +static const u8 +rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 13, 13, 14, 14, 15, 15, 16, 17, 17}, + {0, 1, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, + 12, 12, 13, 14, 15, 15, 16, 17, 17, 18, 19, 19, 20}, + {0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11, + 12, 13, 13, 14, 14, 15, 16, 17, 18, 18, 19, 20, 20}, +}; + +static const u8 +rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, + 15, 15, 16, 17, 18, 18, 19, 20, 21, 22, 23, 23, 24}, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25}, + {0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 18, 19, 20, 20, 21, 22, 23, 24, 25, 25}, +}; + +static const u8 +rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 11, 11, 11, 11, 12, 12, 13, 13, 14}, + {0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 11, + 12, 13, 14, 14, 15, 16, 16, 17, 18, 19, 19, 20, 21}, + {0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 17, 18, 19}, +}; + +static const u8 +rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 12, 13, + 14, 15, 16, 16, 17, 18, 19, 20, 21, 21, 22, 23, 24}, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 23, 24, 25, 25, 25, 25}, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 17, 18, 19, 20, 21, 22, 23, 24, 25, 25, 25, 25}, +}; + +static const u8 rtw8814a_pwrtrk_2gd_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, + 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_2gd_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_2gc_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, + 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_2gc_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13 +}; + +static const u8 rtw8814a_pwrtrk_2gb_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, + 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_2gb_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_2ga_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, + 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 13 +}; + +static const u8 rtw8814a_pwrtrk_2ga_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, + 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, + 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, + 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11, 12 +}; + +static const u8 rtw8814a_pwrtrk_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, + 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12, 13 +}; + +static const u8 rtw8814a_pwrtrk_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, + 6, 6, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11 +}; + +static const u8 rtw8814a_pwrtrk_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, + 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, + 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 7, + 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14 +}; + +const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_tbl = { + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gd_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gd_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gc_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gc_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gb_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5gb_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5ga_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_5ga_p[RTW_PWR_TRK_5G_3], + .pwrtrk_2gd_n = rtw8814a_pwrtrk_2gd_n, + .pwrtrk_2gd_p = rtw8814a_pwrtrk_2gd_p, + .pwrtrk_2gc_n = rtw8814a_pwrtrk_2gc_n, + .pwrtrk_2gc_p = rtw8814a_pwrtrk_2gc_p, + .pwrtrk_2gb_n = rtw8814a_pwrtrk_2gb_n, + .pwrtrk_2gb_p = rtw8814a_pwrtrk_2gb_p, + .pwrtrk_2ga_n = rtw8814a_pwrtrk_2ga_n, + .pwrtrk_2ga_p = rtw8814a_pwrtrk_2ga_p, + .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_2g_cck_d_n, + .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_2g_cck_d_p, + .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_2g_cck_c_n, + .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_2g_cck_c_p, + .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_2g_cck_a_p, +}; + +static const u8 +rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, +}; + +static const u8 +rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 +rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, +}; + +static const u8 +rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 +rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, +}; + +static const u8 +rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 +rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, +}; + +static const u8 +rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 rtw8814a_pwrtrk_type0_2gd_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2gd_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2gc_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2gc_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2gb_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2gb_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2ga_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2ga_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type0_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type0_tbl = { + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gd_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gd_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gc_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gc_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gb_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5gb_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5ga_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type0_5ga_p[RTW_PWR_TRK_5G_3], + .pwrtrk_2gd_n = rtw8814a_pwrtrk_type0_2gd_n, + .pwrtrk_2gd_p = rtw8814a_pwrtrk_type0_2gd_p, + .pwrtrk_2gc_n = rtw8814a_pwrtrk_type0_2gc_n, + .pwrtrk_2gc_p = rtw8814a_pwrtrk_type0_2gc_p, + .pwrtrk_2gb_n = rtw8814a_pwrtrk_type0_2gb_n, + .pwrtrk_2gb_p = rtw8814a_pwrtrk_type0_2gb_p, + .pwrtrk_2ga_n = rtw8814a_pwrtrk_type0_2ga_n, + .pwrtrk_2ga_p = rtw8814a_pwrtrk_type0_2ga_p, + .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type0_2g_cck_d_n, + .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type0_2g_cck_d_p, + .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type0_2g_cck_c_n, + .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type0_2g_cck_c_p, + .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type0_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type0_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type0_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type0_2g_cck_a_p, +}; + +static const u8 +rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 11, 12, 12, + 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, + {0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 10, 10, + 11, 11, 12, 12, 13, 14, 15, 16, 16, 16, 16, 16, 16}, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 11, 12, + 12, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15}, +}; + +static const u8 +rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 22, 22, 22, 22, 22}, + {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 22, 22, 22, 22, 22, 22}, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 23, 23, 23, 23, 23, 23}, +}; + +static const u8 +rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 11, 12, + 12, 13, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12, + 13, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, 11, + 12, 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21}, + {0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 20, 20, 21, 21, 21, 21, 21}, + {0, 1, 2, 3, 3, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21}, +}; + +static const u8 +rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 11, 12, 13, + 13, 13, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12, + 13, 13, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, 11, + 12, 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21, 21}, + {0, 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20, 20}, + {0, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14, + 15, 16, 17, 18, 19, 20, 21, 21, 21, 21, 21, 21, 21}, +}; + +static const u8 +rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 4, 5, 5, 6, 7, 7, 8, 9, 10, 10, 11, 11, 11, + 12, 13, 13, 13, 13, 14, 15, 15, 15, 15, 15, 15, 15}, + {0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, + 12, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14}, + {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11, 11, 11, 12, + 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13}, +}; + +static const u8 +rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, + 15, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 20}, + {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 12, 13, 14, + 15, 16, 16, 17, 18, 19, 20, 20, 20, 20, 20, 20, 20}, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11, 12, 13, 14, 15, + 15, 16, 17, 18, 19, 19, 20, 20, 20, 20, 20, 20, 20}, +}; + +static const u8 rtw8814a_pwrtrk_type2_2gd_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 7, 8, 9, 10, 11, 11, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8814a_pwrtrk_type2_2gd_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, + 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type2_2gc_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10, + 10, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type2_2gc_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 7, 8, 8, 9, 10, 10, + 11, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type2_2gb_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type2_2gb_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, + 10, 11, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type2_2ga_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 9, + 10, 11, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_type2_2ga_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10, + 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type2_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 8, + 9, 9, 9, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_type2_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type2_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 6, 6, 7, 8, 9, 9, 10, + 10, 11, 11, 11, 12, 13, 13, 13, 13, 13, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_type2_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10, + 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_type2_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10, + 10, 10, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_type2_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 8, 9, 9, 10, + 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_type2_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 7, 8, 9, 9, 9, 9, + 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_type2_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, + 11, 11, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13 +}; + +const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type2_tbl = { + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gd_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gd_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gc_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gc_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gb_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5gb_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5ga_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type2_5ga_p[RTW_PWR_TRK_5G_3], + .pwrtrk_2gd_n = rtw8814a_pwrtrk_type2_2gd_n, + .pwrtrk_2gd_p = rtw8814a_pwrtrk_type2_2gd_p, + .pwrtrk_2gc_n = rtw8814a_pwrtrk_type2_2gc_n, + .pwrtrk_2gc_p = rtw8814a_pwrtrk_type2_2gc_p, + .pwrtrk_2gb_n = rtw8814a_pwrtrk_type2_2gb_n, + .pwrtrk_2gb_p = rtw8814a_pwrtrk_type2_2gb_p, + .pwrtrk_2ga_n = rtw8814a_pwrtrk_type2_2ga_n, + .pwrtrk_2ga_p = rtw8814a_pwrtrk_type2_2ga_p, + .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type2_2g_cck_d_n, + .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type2_2g_cck_d_p, + .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type2_2g_cck_c_n, + .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type2_2g_cck_c_p, + .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type2_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type2_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type2_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type2_2g_cck_a_p, +}; + +static const u8 +rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 3, 3, 3, 4, 6, 6, 7, 7, 8, 9, 10, 11, 12, 13, 13, 14, + 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 4, 5, 6, 7, 7, 8, 7, 8, 10, 11, 12, 12, 13, 13, 14, 14, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 5, 5, 6, 7, 8, 9, 9, 10, 11, 12, 13, 13, 14, 15, 15, 16, + 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16}, +}; + +static const u8 +rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 9, 10, 10, 11, 12, + 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}, + {0, 0, 0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 11, 12, + 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14}, + {0, 0, 0, 1, 1, 2, 2, 3, 5, 7, 8, 9, 10, 11, 12, 13, 13, + 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 3, 3, 4, 6, 7, 7, 8, 9, 9, 9, 10, 10, 10, + 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 2, 3, 3, 7, 7, 8, 8, 9, 11, 12, 12, 13, 14, 14, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 0, 1, 2, 3, 4, 5, 7, 8, 8, 10, 11, 12, 12, 13, 13, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 0, 1, 1, 2, 2, 3, 4, 5, 6, 6, 7, 8, 9, 11, 11, + 11, 11, 11, 11, 12, 12, 12, 12, 12, 13, 13, 13, 13}, + {0, 0, 1, 2, 3, 3, 5, 5, 6, 8, 8, 9, 10, 11, 13, 13, 13, + 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14}, + {0, 0, 1, 2, 3, 4, 4, 5, 7, 8, 9, 9, 10, 11, 12, 12, 12, + 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 2, 2, 3, 4, 5, 6, 7, 9, 10, 10, 10, 10, 10, + 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 2, 3, 3, 7, 7, 8, 8, 9, 11, 12, 12, 13, 14, 14, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 0, 1, 2, 3, 4, 5, 7, 8, 8, 10, 11, 12, 12, 13, 13, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 1, 2, 2, 4, 5, 6, 6, 7, 8, 9, 10, 11, 11, 11, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 0, 2, 3, 4, 5, 6, 8, 8, 9, 9, 11, 12, 13, 13, 13, + 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, + {0, 0, 0, 1, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 12, 12, + 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 0, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12}, + {0, 2, 3, 4, 5, 7, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 17, + 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18}, + {0, 1, 2, 3, 3, 4, 6, 7, 8, 8, 10, 11, 11, 12, 13, 13, 13, + 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 3, 3, 3, 5, 5, 6, 6, 8, 8, 9, 10, 11, 11, 11, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 8, 9, 11, 12, 13, 14, 15, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 0, 1, 3, 3, 4, 5, 5, 6, 7, 7, 8, 10, 10, 11, 11, 11, + 11, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13}, +}; + +static const u8 rtw8814a_pwrtrk_type5_2gd_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 7, 8, 9, 9, 9, + 9, 9, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8814a_pwrtrk_type5_2gd_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8814a_pwrtrk_type5_2gc_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 10, + 10, 10, 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_type5_2gc_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8, 8, 9, 9, 9, + 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8814a_pwrtrk_type5_2gb_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 10, 10, 10, + 10, 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_type5_2gb_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8, 9, 9, 9, 9, + 9, 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8814a_pwrtrk_type5_2ga_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10, 10, + 10, 11, 11, 11, 11, 111, 12, 12, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_type5_2ga_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 9, + 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11 +}; + +static const u8 rtw8814a_pwrtrk_type5_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 8, 9, 9, 9, 9, + 10, 10, 10, 10, 10, 11, 11, 10, 11, 11, 11, 11 +}; + +static const u8 rtw8814a_pwrtrk_type5_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 8, + 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8814a_pwrtrk_type5_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 8, 8, 9, 10, 10, 10, + 10, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_type5_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 8, + 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8814a_pwrtrk_type5_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 2, 3, 4, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10, 10, 10, + 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8814a_pwrtrk_type5_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 8, + 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8814a_pwrtrk_type5_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 9, 9, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 +}; + +static const u8 rtw8814a_pwrtrk_type5_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 9, + 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10 +}; + +const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type5_tbl = { + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gd_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gd_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gc_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gc_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gb_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5gb_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5ga_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type5_5ga_p[RTW_PWR_TRK_5G_3], + .pwrtrk_2gd_n = rtw8814a_pwrtrk_type5_2gd_n, + .pwrtrk_2gd_p = rtw8814a_pwrtrk_type5_2gd_p, + .pwrtrk_2gc_n = rtw8814a_pwrtrk_type5_2gc_n, + .pwrtrk_2gc_p = rtw8814a_pwrtrk_type5_2gc_p, + .pwrtrk_2gb_n = rtw8814a_pwrtrk_type5_2gb_n, + .pwrtrk_2gb_p = rtw8814a_pwrtrk_type5_2gb_p, + .pwrtrk_2ga_n = rtw8814a_pwrtrk_type5_2ga_n, + .pwrtrk_2ga_p = rtw8814a_pwrtrk_type5_2ga_p, + .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type5_2g_cck_d_n, + .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type5_2g_cck_d_p, + .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type5_2g_cck_c_n, + .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type5_2g_cck_c_p, + .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type5_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type5_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type5_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type5_2g_cck_a_p, +}; + +static const u8 +rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, +}; + +static const u8 +rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 +rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, +}; + +static const u8 +rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 +rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, +}; + +static const u8 +rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 +rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + {0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 8, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, +}; + +static const u8 +rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 rtw8814a_pwrtrk_type7_2gd_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2gd_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2gc_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2gc_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2gb_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2gb_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2ga_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2ga_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, + 7, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8814a_pwrtrk_type7_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 +}; + +const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type7_tbl = { + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gd_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gd_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gc_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gc_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gb_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5gb_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5ga_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type7_5ga_p[RTW_PWR_TRK_5G_3], + .pwrtrk_2gd_n = rtw8814a_pwrtrk_type7_2gd_n, + .pwrtrk_2gd_p = rtw8814a_pwrtrk_type7_2gd_p, + .pwrtrk_2gc_n = rtw8814a_pwrtrk_type7_2gc_n, + .pwrtrk_2gc_p = rtw8814a_pwrtrk_type7_2gc_p, + .pwrtrk_2gb_n = rtw8814a_pwrtrk_type7_2gb_n, + .pwrtrk_2gb_p = rtw8814a_pwrtrk_type7_2gb_p, + .pwrtrk_2ga_n = rtw8814a_pwrtrk_type7_2ga_n, + .pwrtrk_2ga_p = rtw8814a_pwrtrk_type7_2ga_p, + .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type7_2g_cck_d_n, + .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type7_2g_cck_d_p, + .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type7_2g_cck_c_n, + .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type7_2g_cck_c_p, + .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type7_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type7_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type7_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type7_2g_cck_a_p, +}; + +static const u8 +rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 4, 4, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11, 12, + 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}, + {0, 1, 2, 3, 4, 4, 6, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12, + 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}, + {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 8, 9, 10, 10, + 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, +}; + +static const u8 +rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 14, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 0, 1, 2, 3, 5, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, + {0, 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 4, 4, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, 12, + 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}, + {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11, + 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}, + {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11, + 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 +rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 4, 4, 5, 6, 7, 7, 8, 9, 10, 11, 12, 13, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, + {0, 0, 1, 3, 4, 4, 5, 6, 7, 7, 8, 10, 11, 12, 13, 14, + 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}, + {0, 0, 1, 2, 4, 5, 5, 6, 6, 7, 7, 9, 10, 11, 12, 13, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 9, 10, 10, 10, 11, 11, + 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}, + {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11, + 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 +rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, + {0, 0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 10, 11, 11, 13, 13, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, + {0, 0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 10, 10, 11, 13, 13, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 +rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, 11, 11, + 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, 10, 11, 11, + 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}, + {0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 10, 11, 12, + 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}, +}; + +static const u8 +rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_NUM][RTW_PWR_TRK_TBL_SZ] = { + {0, 0, 1, 2, 3, 3, 4, 5, 6, 7, 7, 9, 10, 11, 12, 13, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, + {0, 0, 1, 2, 3, 4, 5, 5, 6, 7, 8, 9, 10, 11, 12, 13, 13, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, + {0, 0, 1, 2, 3, 3, 4, 4, 6, 7, 7, 9, 10, 11, 12, 13, 14, + 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14}, +}; + +static const u8 rtw8814a_pwrtrk_type8_2gd_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, + 6, 6, 7, 8, 9, 10, 11, 11, 11, 11, 11, 11, 11 +}; + +static const u8 rtw8814a_pwrtrk_type8_2gd_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, + 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type8_2gc_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 10, + 10, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type8_2gc_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 7, 8, 8, 9, 10, 10, + 11, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type8_2gb_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type8_2gb_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 9, 10, + 10, 11, 12, 12, 13, 13, 13, 13, 13, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type8_2ga_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 9, + 10, 11, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_type8_2ga_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10, + 11, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type8_2g_cck_d_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 8, + 9, 9, 9, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_type8_2g_cck_d_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, + 10, 11, 12, 12, 13, 14, 14, 14, 14, 14, 14, 14 +}; + +static const u8 rtw8814a_pwrtrk_type8_2g_cck_c_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 6, 6, 6, 7, 8, 9, 9, 10, + 10, 11, 11, 11, 12, 13, 13, 13, 13, 13, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_type8_2g_cck_c_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9, 9, 10, + 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_type8_2g_cck_b_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10, + 10, 10, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_type8_2g_cck_b_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 7, 8, 8, 9, 9, 10, + 10, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13 +}; + +static const u8 rtw8814a_pwrtrk_type8_2g_cck_a_n[RTW_PWR_TRK_TBL_SZ] = { + 0, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 7, 8, 9, 9, 9, 9, + 10, 10, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12 +}; + +static const u8 rtw8814a_pwrtrk_type8_2g_cck_a_p[RTW_PWR_TRK_TBL_SZ] = { + 0, 0, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 10, + 11, 11, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13 +}; + +const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type8_tbl = { + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gd_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gd_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gd_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gc_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gc_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gc_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gb_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5gb_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5gb_p[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_n[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5ga_n[RTW_PWR_TRK_5G_3], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_1] = rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_1], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_2] = rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_2], + .pwrtrk_5ga_p[RTW_PWR_TRK_5G_3] = rtw8814a_pwrtrk_type8_5ga_p[RTW_PWR_TRK_5G_3], + .pwrtrk_2gd_n = rtw8814a_pwrtrk_type8_2gd_n, + .pwrtrk_2gd_p = rtw8814a_pwrtrk_type8_2gd_p, + .pwrtrk_2gc_n = rtw8814a_pwrtrk_type8_2gc_n, + .pwrtrk_2gc_p = rtw8814a_pwrtrk_type8_2gc_p, + .pwrtrk_2gb_n = rtw8814a_pwrtrk_type8_2gb_n, + .pwrtrk_2gb_p = rtw8814a_pwrtrk_type8_2gb_p, + .pwrtrk_2ga_n = rtw8814a_pwrtrk_type8_2ga_n, + .pwrtrk_2ga_p = rtw8814a_pwrtrk_type8_2ga_p, + .pwrtrk_2g_cckd_n = rtw8814a_pwrtrk_type8_2g_cck_d_n, + .pwrtrk_2g_cckd_p = rtw8814a_pwrtrk_type8_2g_cck_d_p, + .pwrtrk_2g_cckc_n = rtw8814a_pwrtrk_type8_2g_cck_c_n, + .pwrtrk_2g_cckc_p = rtw8814a_pwrtrk_type8_2g_cck_c_p, + .pwrtrk_2g_cckb_n = rtw8814a_pwrtrk_type8_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8814a_pwrtrk_type8_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8814a_pwrtrk_type8_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8814a_pwrtrk_type8_2g_cck_a_p, +}; + +static const struct rtw_pwr_seq_cmd init_power_on_8814a[] = { + {0x10c2, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8814a[] = { + {0x0012, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(6), BIT(6)}, + {0x0015, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), 0}, + {0x0015, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(6), 0}, + {0x0023, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4), 0}, + {0x0046, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x00}, + {0x0062, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x00}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), 0}, + {0x0301, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0x0071, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), 0}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8814a[] = { + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), 0}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(1), BIT(1)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), 0}, + {0x00F0, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7), 0}, + {0x0081, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x30, 0x20}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(0), 0}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8814a[] = { + {0x0c00, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x04}, + {0x0e00, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x04}, + {0x1002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_US}, + {0x1002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x001F, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0x0007, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x28}, + {0x0008, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0x02, 0}, + {0x0066, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7), 0}, + {0x0041, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4), 0}, + {0x0042, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x004e, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), BIT(5)}, + {0x0041, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(1), 0}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8814a[] = { + {0x0003, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), 0}, + {0x0080, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x01}, + {0x0081, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x30}, + {0x0045, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x00}, + {0x0046, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0xff}, + {0x0047, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0x0015, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(6), BIT(6)}, + {0x0015, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), BIT(5)}, + {0x0012, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(6), 0}, + {0x0023, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4), BIT(4)}, + {0x0008, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0007, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x20}, + {0x001f, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0020, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0021, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0076, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0091, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xA0, 0xA0}, + {0x0070, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), BIT(3)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), BIT(3)}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +const struct rtw_pwr_seq_cmd * const card_enable_flow_8814a[] = { + init_power_on_8814a, + trans_carddis_to_cardemu_8814a, + trans_cardemu_to_act_8814a, + NULL +}; + +const struct rtw_pwr_seq_cmd * const card_disable_flow_8814a[] = { + trans_act_to_cardemu_8814a, + trans_cardemu_to_carddis_8814a, + NULL +}; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814a_table.h b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.h new file mode 100644 index 0000000000000..69fb87e36c481 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8814a_table.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2025 Realtek Corporation + */ + +#ifndef __RTW8814A_TABLE_H__ +#define __RTW8814A_TABLE_H__ + +extern const struct rtw_table rtw8814a_mac_tbl; +extern const struct rtw_table rtw8814a_agc_tbl; +extern const struct rtw_table rtw8814a_bb_tbl; +extern const struct rtw_table rtw8814a_bb_pg_tbl; +extern const struct rtw_table rtw8814a_bb_pg_type0_tbl; +extern const struct rtw_table rtw8814a_bb_pg_type2_tbl; +extern const struct rtw_table rtw8814a_bb_pg_type3_tbl; +extern const struct rtw_table rtw8814a_bb_pg_type4_tbl; +extern const struct rtw_table rtw8814a_bb_pg_type5_tbl; +extern const struct rtw_table rtw8814a_bb_pg_type7_tbl; +extern const struct rtw_table rtw8814a_bb_pg_type8_tbl; +extern const struct rtw_table rtw8814a_rf_a_tbl; +extern const struct rtw_table rtw8814a_rf_b_tbl; +extern const struct rtw_table rtw8814a_rf_c_tbl; +extern const struct rtw_table rtw8814a_rf_d_tbl; +extern const struct rtw_table rtw8814a_txpwr_lmt_tbl; +extern const struct rtw_table rtw8814a_txpwr_lmt_type0_tbl; +extern const struct rtw_table rtw8814a_txpwr_lmt_type1_tbl; +extern const struct rtw_table rtw8814a_txpwr_lmt_type2_tbl; +extern const struct rtw_table rtw8814a_txpwr_lmt_type3_tbl; +extern const struct rtw_table rtw8814a_txpwr_lmt_type5_tbl; +extern const struct rtw_table rtw8814a_txpwr_lmt_type7_tbl; +extern const struct rtw_table rtw8814a_txpwr_lmt_type8_tbl; +extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_tbl; +extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type0_tbl; +extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type2_tbl; +extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type5_tbl; +extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type7_tbl; +extern const struct rtw_pwr_track_tbl rtw8814a_rtw_pwrtrk_type8_tbl; +extern const struct rtw_pwr_seq_cmd * const card_disable_flow_8814a[]; +extern const struct rtw_pwr_seq_cmd * const card_enable_flow_8814a[]; + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814ae.c b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c new file mode 100644 index 0000000000000..54d2e20a7764e --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8814ae.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2025 Realtek Corporation + */ + +#include +#include +#include "pci.h" +#include "rtw8814a.h" + +static const struct pci_device_id rtw_8814ae_id_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8813), + .driver_data = (kernel_ulong_t)&rtw8814a_hw_spec + }, + {} +}; +MODULE_DEVICE_TABLE(pci, rtw_8814ae_id_table); + +static struct pci_driver rtw_8814ae_driver = { + .name = "rtw_8814ae", + .id_table = rtw_8814ae_id_table, + .probe = rtw_pci_probe, + .remove = rtw_pci_remove, + .driver.pm = &rtw_pm_ops, + .shutdown = rtw_pci_shutdown, +}; +module_pci_driver(rtw_8814ae_driver); + +MODULE_AUTHOR("Bitterblue Smith "); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8814ae driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8814au.c b/drivers/net/wireless/realtek/rtw88/rtw8814au.c new file mode 100644 index 0000000000000..afe045fb84de1 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8814au.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2025 Realtek Corporation + */ + +#include +#include +#include "main.h" +#include "rtw8814a.h" +#include "usb.h" + +static const struct usb_device_id rtw_8814au_id_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8813, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400b, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x400d, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9054, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1817, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1852, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x1853, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x0e66, 0x0026, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331a, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x809a, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x20f4, 0x809b, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0106, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa834, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xa833, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8814a_hw_spec) }, + {}, +}; +MODULE_DEVICE_TABLE(usb, rtw_8814au_id_table); + +static struct usb_driver rtw_8814au_driver = { + .name = "rtw_8814au", + .id_table = rtw_8814au_id_table, + .probe = rtw_usb_probe, + .disconnect = rtw_usb_disconnect, +}; +module_usb_driver(rtw_8814au_driver); + +MODULE_AUTHOR("Bitterblue Smith "); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8814au driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index eb7e34c545d0c..0ade7f11cbd2e 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -680,11 +680,11 @@ static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, } static void -rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) +rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, + u8 rs, u32 *phy_pwr_idx) { struct rtw_hal *hal = &rtwdev->hal; static const u32 offset_txagc[2] = {0x1d00, 0x1d80}; - static u32 phy_pwr_idx; u8 rate, rate_idx, pwr_index, shift; int j; @@ -692,12 +692,12 @@ rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) rate = rtw_rate_section[rs][j]; pwr_index = hal->tx_pwr_tbl[path][rate]; shift = rate & 0x3; - phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); + *phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); if (shift == 0x3 || rate == DESC_RATEVHT1SS_MCS9) { rate_idx = rate & 0xfc; rtw_write32(rtwdev, offset_txagc[path] + rate_idx, - phy_pwr_idx); - phy_pwr_idx = 0; + *phy_pwr_idx); + *phy_pwr_idx = 0; } } } @@ -705,14 +705,16 @@ rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) static void rtw8821c_set_tx_power_index(struct rtw_dev *rtwdev) { struct rtw_hal *hal = &rtwdev->hal; + u32 phy_pwr_idx = 0; int rs, path; for (path = 0; path < hal->rf_path_num; path++) { - for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) { + for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++) { if (rs == RTW_RATE_SECTION_HT_2S || rs == RTW_RATE_SECTION_VHT_2S) continue; - rtw8821c_set_tx_power_index_by_rate(rtwdev, path, rs); + rtw8821c_set_tx_power_index_by_rate(rtwdev, path, rs, + &phy_pwr_idx); } } } diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 7f03903ddf4bb..b4934da88e33a 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -935,11 +935,11 @@ static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, } static void -rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) +rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, + u8 rs, u32 *phy_pwr_idx) { struct rtw_hal *hal = &rtwdev->hal; static const u32 offset_txagc[2] = {0x1d00, 0x1d80}; - static u32 phy_pwr_idx; u8 rate, rate_idx, pwr_index, shift; int j; @@ -947,12 +947,12 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) rate = rtw_rate_section[rs][j]; pwr_index = hal->tx_pwr_tbl[path][rate]; shift = rate & 0x3; - phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); + *phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); if (shift == 0x3) { rate_idx = rate & 0xfc; rtw_write32(rtwdev, offset_txagc[path] + rate_idx, - phy_pwr_idx); - phy_pwr_idx = 0; + *phy_pwr_idx); + *phy_pwr_idx = 0; } } } @@ -960,11 +960,13 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev) { struct rtw_hal *hal = &rtwdev->hal; + u32 phy_pwr_idx = 0; int rs, path; for (path = 0; path < hal->rf_path_num; path++) { - for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) - rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs); + for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++) + rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs, + &phy_pwr_idx); } } diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c index 8883300fc6adb..572d1f31832ee 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822bu.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822bu.c @@ -73,6 +73,10 @@ static const struct usb_device_id rtw_8822bu_id_table[] = { .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* ELECOM WDB-867DU3S */ { USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0107, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30H */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x010a, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* Mercusys MA30N */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3322, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&(rtw8822b_hw_spec) }, /* D-Link DWA-T185 rev. A1 */ {}, }; MODULE_DEVICE_TABLE(usb, rtw_8822bu_id_table); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index ec362a817f5f5..5e53e0db177ef 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -2746,7 +2746,7 @@ static void rtw8822c_set_tx_power_index(struct rtw_dev *rtwdev) s8 diff_idx[4]; rtw8822c_set_write_tx_power_ref(rtwdev, pwr_ref_cck, pwr_ref_ofdm); - for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) { + for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++) { for (j = 0; j < rtw_rate_size[rs]; j++) { rate = rtw_rate_section[rs][j]; pwr_a = hal->tx_pwr_tbl[RF_PATH_A][rate]; diff --git a/drivers/net/wireless/realtek/rtw88/rtw88xxa.c b/drivers/net/wireless/realtek/rtw88/rtw88xxa.c index 71e61b9c0becf..0fa943271fb64 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw88xxa.c +++ b/drivers/net/wireless/realtek/rtw88/rtw88xxa.c @@ -1637,7 +1637,7 @@ void rtw88xxa_set_tx_power_index(struct rtw_dev *rtwdev) int rs, path; for (path = 0; path < hal->rf_path_num; path++) { - for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) { + for (rs = 0; rs <= __RTW_RATE_SECTION_2SS_MAX; rs++) { if (hal->rf_path_num == 1 && (rs == RTW_RATE_SECTION_HT_2S || rs == RTW_RATE_SECTION_VHT_2S)) diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c index 90fc8a5fa89e7..8b0afaaffaa0e 100644 --- a/drivers/net/wireless/realtek/rtw88/rx.c +++ b/drivers/net/wireless/realtek/rtw88/rx.c @@ -73,6 +73,12 @@ static void rtw_rx_phy_stat(struct rtw_dev *rtwdev, rate_ss_evm = 2; evm_id = RTW_EVM_2SS_A; break; + case DESC_RATEMCS16...DESC_RATEMCS23: + case DESC_RATEVHT3SS_MCS0...DESC_RATEVHT3SS_MCS9: + rate_ss = 3; + rate_ss_evm = 3; + evm_id = RTW_EVM_3SS_A; + break; default: rtw_warn(rtwdev, "unknown pkt rate = %d\n", pkt_stat->rate); return; diff --git a/drivers/net/wireless/realtek/rtw88/sar.c b/drivers/net/wireless/realtek/rtw88/sar.c index c472f1502b82a..50b9c2412bb1c 100644 --- a/drivers/net/wireless/realtek/rtw88/sar.c +++ b/drivers/net/wireless/realtek/rtw88/sar.c @@ -97,7 +97,7 @@ int rtw_set_sar_specs(struct rtw_dev *rtwdev, power, BIT(RTW_COMMON_SAR_FCT)); for (j = 0; j < RTW_RF_PATH_MAX; j++) { - for (k = 0; k < RTW_RATE_SECTION_MAX; k++) { + for (k = 0; k < RTW_RATE_SECTION_NUM; k++) { arg = (struct rtw_sar_arg){ .sar_band = idx, .path = j, diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c index e024061bdbf70..6209a49312f17 100644 --- a/drivers/net/wireless/realtek/rtw88/sdio.c +++ b/drivers/net/wireless/realtek/rtw88/sdio.c @@ -1147,7 +1147,7 @@ static void rtw_sdio_declaim(struct rtw_dev *rtwdev, sdio_release_host(sdio_func); } -static struct rtw_hci_ops rtw_sdio_ops = { +static const struct rtw_hci_ops rtw_sdio_ops = { .tx_write = rtw_sdio_tx_write, .tx_kick_off = rtw_sdio_tx_kick_off, .setup = rtw_sdio_setup, diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index c4908db4ff0e8..c8092fa0d9f13 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -881,7 +881,7 @@ static void rtw_usb_dynamic_rx_agg(struct rtw_dev *rtwdev, bool enable) } } -static struct rtw_hci_ops rtw_usb_ops = { +static const struct rtw_hci_ops rtw_usb_ops = { .tx_write = rtw_usb_tx_write, .tx_kick_off = rtw_usb_tx_kick_off, .setup = rtw_usb_setup, diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c index e222d3c01a77e..66819f6944055 100644 --- a/drivers/net/wireless/realtek/rtw88/util.c +++ b/drivers/net/wireless/realtek/rtw88/util.c @@ -101,7 +101,8 @@ void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss) *nss = 4; *mcs = rate - DESC_RATEVHT4SS_MCS0; } else if (rate >= DESC_RATEMCS0 && - rate <= DESC_RATEMCS15) { + rate <= DESC_RATEMCS31) { + *nss = 0; *mcs = rate - DESC_RATEMCS0; } } diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig index b1c86cdd9c0e8..205d7ecca7d78 100644 --- a/drivers/net/wireless/realtek/rtw89/Kconfig +++ b/drivers/net/wireless/realtek/rtw89/Kconfig @@ -123,7 +123,7 @@ config RTW89_DEBUGMSG config RTW89_DEBUGFS bool "Realtek rtw89 debugfs support" - depends on RTW89_CORE + depends on RTW89_CORE && CFG80211_DEBUGFS select RTW89_DEBUG help Enable debugfs support diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 8fa1e6c1ce139..eca3d767ff603 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -476,6 +476,12 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev, case WLAN_CIPHER_SUITE_WEP104: hw_key_type = RTW89_SEC_KEY_TYPE_WEP104; break; + case WLAN_CIPHER_SUITE_TKIP: + if (!chip->hw_tkip_crypto) + return -EOPNOTSUPP; + hw_key_type = RTW89_SEC_KEY_TYPE_TKIP; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + break; case WLAN_CIPHER_SUITE_CCMP: hw_key_type = RTW89_SEC_KEY_TYPE_CCMP128; if (!chip->hw_mgmt_tx_encrypt) diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c index 4df4e04c3e67d..f60e93870b093 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.c +++ b/drivers/net/wireless/realtek/rtw89/chan.c @@ -8,6 +8,7 @@ #include "fw.h" #include "mac.h" #include "ps.h" +#include "sar.h" #include "util.h" static void rtw89_swap_chanctx(struct rtw89_dev *rtwdev, @@ -155,7 +156,7 @@ int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev, int ret; u8 idx; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_CHANCTX) { chan = rtw89_chan_get(rtwdev, idx); @@ -310,7 +311,7 @@ const struct rtw89_chan *__rtw89_mgnt_chan_get(struct rtw89_dev *rtwdev, enum rtw89_entity_mode mode; u8 role_index; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); if (unlikely(link_index >= __RTW89_MLD_MAX_LINK_NUM)) { WARN(1, "link index %u is invalid (max link inst num: %d)\n", @@ -366,7 +367,7 @@ static void rtw89_entity_recalc_mgnt_roles(struct rtw89_dev *rtwdev) u8 pos = 0; int i, j; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); for (i = 0; i < RTW89_MAX_INTERFACE_NUM; i++) mgnt->active_roles[i] = NULL; @@ -427,7 +428,7 @@ enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev) struct rtw89_chan chan; u8 idx; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); bitmap_copy(recalc_map, hal->entity_map, NUM_OF_RTW89_CHANCTX); @@ -2390,7 +2391,7 @@ static void rtw89_mcc_update_limit(struct rtw89_dev *rtwdev) rtw89_iterate_mcc_roles(rtwdev, rtw89_mcc_upd_lmt_iterator, NULL); } -void rtw89_chanctx_work(struct work_struct *work) +void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, chanctx_work.work); @@ -2401,12 +2402,10 @@ void rtw89_chanctx_work(struct work_struct *work) int ret; int i; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); - if (hal->entity_pause) { - mutex_unlock(&rtwdev->mutex); + if (hal->entity_pause) return; - } for (i = 0; i < NUM_OF_RTW89_CHANCTX_CHANGES; i++) { if (test_and_clear_bit(i, hal->changes)) @@ -2445,8 +2444,6 @@ void rtw89_chanctx_work(struct work_struct *work) default: break; } - - mutex_unlock(&rtwdev->mutex); } void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev, @@ -2477,8 +2474,8 @@ void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev, rtw89_debug(rtwdev, RTW89_DBG_CHAN, "queue chanctx work for mode %d with delay %d us\n", mode, delay); - ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->chanctx_work, - usecs_to_jiffies(delay)); + wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->chanctx_work, + usecs_to_jiffies(delay)); } void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev) @@ -2491,7 +2488,7 @@ void rtw89_chanctx_track(struct rtw89_dev *rtwdev) struct rtw89_hal *hal = &rtwdev->hal; enum rtw89_entity_mode mode; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); if (hal->entity_pause) return; @@ -2512,7 +2509,7 @@ void rtw89_chanctx_pause(struct rtw89_dev *rtwdev, struct rtw89_hal *hal = &rtwdev->hal; enum rtw89_entity_mode mode; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); if (hal->entity_pause) return; @@ -2555,7 +2552,7 @@ void rtw89_chanctx_proceed(struct rtw89_dev *rtwdev, enum rtw89_entity_mode mode; int ret; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); if (unlikely(!hal->entity_pause)) { rtw89_chanctx_proceed_cb(rtwdev, cb_parm); @@ -2677,6 +2674,7 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev, struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_entity_mgnt *mgnt = &hal->entity_mgnt; struct rtw89_entity_weight w = {}; + int ret; rtwvif_link->chanctx_idx = cfg->idx; rtwvif_link->chanctx_assigned = true; @@ -2696,7 +2694,13 @@ int rtw89_chanctx_ops_assign_vif(struct rtw89_dev *rtwdev, rtw89_swap_chanctx(rtwdev, cfg->idx, RTW89_CHANCTX_0); out: - return rtw89_set_channel(rtwdev); + ret = rtw89_set_channel(rtwdev); + if (ret) + return ret; + + rtw89_tas_reset(rtwdev, true); + + return 0; } void rtw89_chanctx_ops_unassign_vif(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h index 092a6f676894f..e6391f6f2aa78 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.h +++ b/drivers/net/wireless/realtek/rtw89/chan.h @@ -99,7 +99,7 @@ void rtw89_config_roc_chandef(struct rtw89_dev *rtwdev, const struct cfg80211_chan_def *chandef); void rtw89_entity_init(struct rtw89_dev *rtwdev); enum rtw89_entity_mode rtw89_entity_recalc(struct rtw89_dev *rtwdev); -void rtw89_chanctx_work(struct work_struct *work); +void rtw89_chanctx_work(struct wiphy *wiphy, struct wiphy_work *work); void rtw89_queue_chanctx_work(struct rtw89_dev *rtwdev); void rtw89_queue_chanctx_change(struct rtw89_dev *rtwdev, enum rtw89_chanctx_changes change); diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index 68316d44b2043..5ccf0cbaed2fa 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -10,7 +10,7 @@ #include "ps.h" #include "reg.h" -#define RTW89_COEX_VERSION 0x07000113 +#define RTW89_COEX_VERSION 0x07000413 #define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/ #define BTC_E2G_LIMIT_DEF 80 @@ -89,10 +89,10 @@ static const struct rtw89_btc_fbtc_slot s_def[] = { [CXST_B4] = __DEF_FBTC_SLOT(50, 0xe5555555, SLOT_MIX), [CXST_LK] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_ISO), [CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX), - [CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX), - [CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO), + [CXST_E2G] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_MIX), + [CXST_E5G] = __DEF_FBTC_SLOT(5, 0xffffffff, SLOT_ISO), [CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX), - [CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO), + [CXST_ENULL] = __DEF_FBTC_SLOT(5, 0xaaaaaaaa, SLOT_ISO), [CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX), [CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO), [CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO), @@ -132,6 +132,14 @@ static const u32 cxtbl[] = { static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = { /* firmware version must be in decreasing order for each chip */ + {RTL8852BT, RTW89_FW_VER_CODE(0, 29, 122, 0), + .fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7, + .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7, + .fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7, + .fwlrole = 7, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7, + .fwevntrptl = 1, .fwc2hfunc = 2, .drvinfo_type = 1, .info_buf = 1800, + .max_role_num = 6, + }, {RTL8852BT, RTW89_FW_VER_CODE(0, 29, 90, 0), .fcxbtcrpt = 7, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7, .fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7, @@ -1372,11 +1380,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, } else if (ver->fcxbtcrpt == 8) { pfinfo = &pfwinfo->rpt_ctrl.finfo.v8; pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8); - break; } else if (ver->fcxbtcrpt == 7) { pfinfo = &pfwinfo->rpt_ctrl.finfo.v7; pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v7); - break; } else { goto err; } @@ -1534,6 +1540,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, } else if (ver->fcxbtafh == 2) { pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v2; pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v2); + } else if (ver->fcxbtafh == 7) { + pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v7; + pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v7); } else { goto err; } @@ -1602,7 +1611,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, wl->ver_info.fw = le32_to_cpu(prpt->v4.wl_fw_info.fw_ver); dm->wl_fw_cx_offload = !!le32_to_cpu(prpt->v4.wl_fw_info.cx_offload); - for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++) + for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++) memcpy(&dm->gnt.band[i], &prpt->v4.gnt_val[i], sizeof(dm->gnt.band[i])); @@ -1634,7 +1643,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, wl->ver_info.fw = le32_to_cpu(prpt->v5.rpt_info.fw_ver); dm->wl_fw_cx_offload = 0; - for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++) + for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++) memcpy(&dm->gnt.band[i], &prpt->v5.gnt_val[i][0], sizeof(dm->gnt.band[i])); @@ -1661,7 +1670,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, wl->ver_info.fw = le32_to_cpu(prpt->v105.rpt_info.fw_ver); dm->wl_fw_cx_offload = 0; - for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++) + for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++) memcpy(&dm->gnt.band[i], &prpt->v105.gnt_val[i][0], sizeof(dm->gnt.band[i])); @@ -1687,7 +1696,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, wl->ver_info.fw_coex = le32_to_cpu(prpt->v7.rpt_info.cx_ver); wl->ver_info.fw = le32_to_cpu(prpt->v7.rpt_info.fw_ver); - for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++) + for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++) memcpy(&dm->gnt.band[i], &prpt->v7.gnt_val[i][0], sizeof(dm->gnt.band[i])); @@ -1719,7 +1728,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev, wl->ver_info.fw_coex = le32_to_cpu(prpt->v8.rpt_info.cx_ver); wl->ver_info.fw = le32_to_cpu(prpt->v8.rpt_info.fw_ver); - for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++) + for (i = RTW89_PHY_0; i < RTW89_PHY_NUM; i++) memcpy(&dm->gnt.band[i], &prpt->v8.gnt_val[i][0], sizeof(dm->gnt.band[i])); @@ -2706,7 +2715,7 @@ static void _set_gnt(struct rtw89_dev *rtwdev, u8 phy_map, u8 wl_state, u8 bt_st if (phy_map > BTC_PHY_ALL) return; - for (i = 0; i < RTW89_PHY_MAX; i++) { + for (i = 0; i < RTW89_PHY_NUM; i++) { if (!(phy_map & BIT(i))) continue; @@ -2755,7 +2764,7 @@ static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map, if (phy_map > BTC_PHY_ALL) return; - for (i = 0; i < RTW89_PHY_MAX; i++) { + for (i = 0; i < RTW89_PHY_NUM; i++) { if (!(phy_map & BIT(i))) continue; @@ -2955,7 +2964,7 @@ static void _set_rf_trx_para(struct rtw89_dev *rtwdev) if (ver->fwlrole == 0) { link_mode = wl->role_info.link_mode; - for (i = 0; i < RTW89_PHY_MAX; i++) { + for (i = 0; i < RTW89_PHY_NUM; i++) { if (wl->dbcc_info.real_band[i] == RTW89_BAND_2G) dbcc_2g_phy = i; } @@ -4240,7 +4249,7 @@ static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec, case BTC_ANT_W2G: rtw89_chip_cfg_ctrl_path(rtwdev, BTC_CTRL_BY_WL); if (rtwdev->dbcc_en) { - for (i = 0; i < RTW89_PHY_MAX; i++) { + for (i = 0; i < RTW89_PHY_NUM; i++) { b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G); gnt_wl_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI; @@ -4583,23 +4592,16 @@ static void _action_bt_hid(struct rtw89_dev *rtwdev) static void _action_bt_a2dp(struct rtw89_dev *rtwdev) { struct rtw89_btc *btc = &rtwdev->btc; - struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info; - struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc; struct rtw89_btc_dm *dm = &btc->dm; _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + dm->slot_dur[CXST_W1] = 20; + dm->slot_dur[CXST_B1] = BTC_B1_MAX; + switch (btc->cx.state_map) { case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP */ - if (a2dp.vendor_id == 0x4c || dm->leak_ap) { - dm->slot_dur[CXST_W1] = 40; - dm->slot_dur[CXST_B1] = 200; - _set_policy(rtwdev, - BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP); - } else { - _set_policy(rtwdev, - BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP); - } + _set_policy(rtwdev, BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP); break; case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP */ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP); @@ -4609,15 +4611,10 @@ static void _action_bt_a2dp(struct rtw89_dev *rtwdev) break; case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP */ case BTC_WLINKING: /* wl-connecting + bt-A2DP */ - if (a2dp.vendor_id == 0x4c || dm->leak_ap) { - dm->slot_dur[CXST_W1] = 40; - dm->slot_dur[CXST_B1] = 200; - _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, - BTC_ACT_BT_A2DP); - } else { - _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1, - BTC_ACT_BT_A2DP); - } + if (btc->cx.wl.rfk_info.con_rfk) + _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DP); + else + _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, BTC_ACT_BT_A2DP); break; case BTC_WIDLE: /* wl-idle + bt-A2DP */ _set_policy(rtwdev, BTC_CXP_AUTO_TD20B1, BTC_ACT_BT_A2DP); @@ -4645,7 +4642,10 @@ static void _action_bt_a2dpsink(struct rtw89_dev *rtwdev) _set_policy(rtwdev, BTC_CXP_FIX_TD2060, BTC_ACT_BT_A2DPSINK); break; case BTC_WLINKING: /* wl-connecting + bt-A2dp_Sink */ - _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_A2DPSINK); + if (btc->cx.wl.rfk_info.con_rfk) + _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DPSINK); + else + _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_A2DPSINK); break; case BTC_WIDLE: /* wl-idle + bt-A2dp_Sink */ _set_policy(rtwdev, BTC_CXP_FIX_TD2080, BTC_ACT_BT_A2DPSINK); @@ -4693,27 +4693,20 @@ static void _action_bt_pan(struct rtw89_dev *rtwdev) static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev) { struct rtw89_btc *btc = &rtwdev->btc; - struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info; - struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc; struct rtw89_btc_dm *dm = &btc->dm; _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G); + dm->slot_dur[CXST_W1] = 20; + dm->slot_dur[CXST_B1] = BTC_B1_MAX; + switch (btc->cx.state_map) { case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP+HID */ case BTC_WIDLE: /* wl-idle + bt-A2DP */ - if (a2dp.vendor_id == 0x4c || dm->leak_ap) { - dm->slot_dur[CXST_W1] = 40; - dm->slot_dur[CXST_B1] = 200; - _set_policy(rtwdev, - BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID); - } else { - _set_policy(rtwdev, - BTC_CXP_PAUTO_TD50B1, BTC_ACT_BT_A2DP_HID); - } + _set_policy(rtwdev, BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID); break; case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+HID */ - _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP_HID); + _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070, BTC_ACT_BT_A2DP_HID); break; case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP+HID */ @@ -4721,15 +4714,10 @@ static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev) break; case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP+HID */ case BTC_WLINKING: /* wl-connecting + bt-A2DP+HID */ - if (a2dp.vendor_id == 0x4c || dm->leak_ap) { - dm->slot_dur[CXST_W1] = 40; - dm->slot_dur[CXST_B1] = 200; - _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, - BTC_ACT_BT_A2DP_HID); - } else { - _set_policy(rtwdev, BTC_CXP_AUTO_TD50B1, - BTC_ACT_BT_A2DP_HID); - } + if (btc->cx.wl.rfk_info.con_rfk) + _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_A2DP_HID); + else + _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1, BTC_ACT_BT_A2DP_HID); break; } } @@ -4905,9 +4893,9 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev) if (rtwdev->dbcc_en) { if (ver->fwlrole == 0) { - wl_rinfo.dbcc_2g_phy = RTW89_PHY_MAX; + wl_rinfo.dbcc_2g_phy = RTW89_PHY_NUM; - for (i = 0; i < RTW89_PHY_MAX; i++) { + for (i = 0; i < RTW89_PHY_NUM; i++) { if (wl_dinfo->real_band[i] == RTW89_BAND_2G) wl_rinfo.dbcc_2g_phy = i; } @@ -5408,7 +5396,8 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev) struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; - if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { + if (btc->cx.state_map != BTC_WLINKING && + RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) { _action_wl_25g_mcc(rtwdev); rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n"); } else if (rtwdev->dbcc_en) { @@ -5798,7 +5787,7 @@ static void _update_wl_info(struct rtw89_dev *rtwdev) phy = wl_linfo[i].phy; /* check dbcc role */ - if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) { + if (rtwdev->dbcc_en && phy < RTW89_PHY_NUM) { wl_dinfo->role[phy] = wl_linfo[i].role; wl_dinfo->op_band[phy] = wl_linfo[i].band; _update_dbcc_band(rtwdev, phy); @@ -5948,7 +5937,7 @@ static void _update_wl_info_v1(struct rtw89_dev *rtwdev) phy = wl_linfo[i].phy; - if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) { + if (rtwdev->dbcc_en && phy < RTW89_PHY_NUM) { wl_dinfo->role[phy] = wl_linfo[i].role; wl_dinfo->op_band[phy] = wl_linfo[i].band; _update_dbcc_band(rtwdev, phy); @@ -6098,7 +6087,7 @@ static void _update_wl_info_v2(struct rtw89_dev *rtwdev) phy = wl_linfo[i].phy; - if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) { + if (rtwdev->dbcc_en && phy < RTW89_PHY_NUM) { wl_dinfo->role[phy] = wl_linfo[i].role; wl_dinfo->op_band[phy] = wl_linfo[i].band; _update_dbcc_band(rtwdev, phy); @@ -6389,7 +6378,7 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid) struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info; struct rtw89_btc_wl_active_role_v7 *act_role = NULL; - u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_MAX, phy_dbcc; + u8 i, mode, cnt = 0, cnt_2g = 0, cnt_5g = 0, phy_now = RTW89_PHY_NUM, phy_dbcc; bool b2g = false, b5g = false, client_joined = false, client_inc_2g = false; u8 client_cnt_last[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {}; u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {}; @@ -6400,7 +6389,7 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid) memset(wl_rinfo, 0, sizeof(*wl_rinfo)); for (i = 0; i < RTW89_PORT_NUM; i++) { - if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_MAX) + if (!wl_linfo[i].active || wl_linfo[i].phy >= RTW89_PHY_NUM) continue; act_role = &wl_rinfo->active_role[i]; @@ -6494,7 +6483,7 @@ static void _update_wl_info_v7(struct rtw89_dev *rtwdev, u8 rid) mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role, &dbcc_2g_phy); /* correct 2G-located PHY band for gnt ctrl */ - if (dbcc_2g_phy < RTW89_PHY_MAX) + if (dbcc_2g_phy < RTW89_PHY_NUM) wl_dinfo->op_band[dbcc_2g_phy] = RTW89_BAND_2G; } else if (b2g && b5g && cnt == 2) { mode = BTC_WLINK_25G_MCC; @@ -6720,7 +6709,7 @@ static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE); } -void rtw89_coex_act1_work(struct work_struct *work) +void rtw89_coex_act1_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, coex_act1_work.work); @@ -6729,7 +6718,8 @@ void rtw89_coex_act1_work(struct work_struct *work) struct rtw89_btc_cx *cx = &btc->cx; struct rtw89_btc_wl_info *wl = &cx->wl; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__); dm->cnt_notify[BTC_NCNT_TIMER]++; if (wl->status.map._4way) @@ -6738,10 +6728,9 @@ void rtw89_coex_act1_work(struct work_struct *work) wl->status.map.connecting = false; _run_coex(rtwdev, BTC_RSN_ACT1_WORK); - mutex_unlock(&rtwdev->mutex); } -void rtw89_coex_bt_devinfo_work(struct work_struct *work) +void rtw89_coex_bt_devinfo_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, coex_bt_devinfo_work.work); @@ -6749,15 +6738,15 @@ void rtw89_coex_bt_devinfo_work(struct work_struct *work) struct rtw89_btc_dm *dm = &rtwdev->btc.dm; struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__); dm->cnt_notify[BTC_NCNT_TIMER]++; a2dp->play_latency = 0; _run_coex(rtwdev, BTC_RSN_BT_DEVINFO_WORK); - mutex_unlock(&rtwdev->mutex); } -void rtw89_coex_rfk_chk_work(struct work_struct *work) +void rtw89_coex_rfk_chk_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, coex_rfk_chk_work.work); @@ -6766,7 +6755,8 @@ void rtw89_coex_rfk_chk_work(struct work_struct *work) struct rtw89_btc_cx *cx = &btc->cx; struct rtw89_btc_wl_info *wl = &cx->wl; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); + rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__); dm->cnt_notify[BTC_NCNT_TIMER]++; if (wl->rfk_info.state != BTC_WRFK_STOP) { @@ -6778,7 +6768,6 @@ void rtw89_coex_rfk_chk_work(struct work_struct *work) _write_scbd(rtwdev, BTC_WSCB_WLRFK, false); _run_coex(rtwdev, BTC_RSN_RFK_CHK_WORK); } - mutex_unlock(&rtwdev->mutex); } static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update) @@ -6882,7 +6871,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason) struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; u8 mode, igno_bt, always_freerun; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); dm->run_reason = reason; _update_dm_step(rtwdev, reason); @@ -7002,7 +6991,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason) goto exit; } - if (wl->status.val & btc_scanning_map.val) { + if (wl->status.val & btc_scanning_map.val && !wl->rfk_info.con_rfk) { _action_wl_scan(rtwdev); bt->scan_rx_low_pri = true; goto exit; @@ -7187,7 +7176,7 @@ void rtw89_btc_ntfy_scan_start(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band) "[BTC], %s(): phy_idx=%d, band=%d\n", __func__, phy_idx, band); - if (phy_idx >= RTW89_PHY_MAX) + if (phy_idx >= RTW89_PHY_NUM) return; btc->dm.cnt_notify[BTC_NCNT_SCAN_START]++; @@ -7223,6 +7212,8 @@ void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx) _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC); } + btc->dm.tdma_instant_excute = 1; + _run_coex(rtwdev, BTC_RSN_NTFY_SCAN_FINISH); } @@ -7235,7 +7226,7 @@ void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band) "[BTC], %s(): phy_idx=%d, band=%d\n", __func__, phy_idx, band); - if (phy_idx >= RTW89_PHY_MAX) + if (phy_idx >= RTW89_PHY_NUM) return; btc->dm.cnt_notify[BTC_NCNT_SWITCH_BAND]++; @@ -7284,7 +7275,7 @@ void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev, "[BTC], %s(): EAPOL_End cnt=%d\n", __func__, cnt); wl->status.map._4way = false; - cancel_delayed_work(&rtwdev->coex_act1_work); + wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->coex_act1_work); break; case PACKET_ARP: cnt = ++cx->cnt_wl[BTC_WCNT_ARP]; @@ -7303,56 +7294,56 @@ void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev, } if (delay_work) { - cancel_delayed_work(&rtwdev->coex_act1_work); - ieee80211_queue_delayed_work(rtwdev->hw, - &rtwdev->coex_act1_work, delay); + wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->coex_act1_work); + wiphy_delayed_work_queue(rtwdev->hw->wiphy, + &rtwdev->coex_act1_work, delay); } btc->dm.cnt_notify[BTC_NCNT_SPECIAL_PACKET]++; _run_coex(rtwdev, BTC_RSN_NTFY_SPECIFIC_PACKET); } -void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work) +void rtw89_btc_ntfy_eapol_packet_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, btc.eapol_notify_work); - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); + rtw89_leave_ps_mode(rtwdev); rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_EAPOL); - mutex_unlock(&rtwdev->mutex); } -void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work) +void rtw89_btc_ntfy_arp_packet_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, btc.arp_notify_work); - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); + rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_ARP); - mutex_unlock(&rtwdev->mutex); } -void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work) +void rtw89_btc_ntfy_dhcp_packet_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, btc.dhcp_notify_work); - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); + rtw89_leave_ps_mode(rtwdev); rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_DHCP); - mutex_unlock(&rtwdev->mutex); } -void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work) +void rtw89_btc_ntfy_icmp_packet_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, btc.icmp_notify_work); - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); + rtw89_leave_ps_mode(rtwdev); rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_ICMP); - mutex_unlock(&rtwdev->mutex); } static u8 _update_bt_rssi_level(struct rtw89_dev *rtwdev, u8 rssi) @@ -7531,9 +7522,9 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len) a2dp->vendor_id = 0; a2dp->flush_time = 0; a2dp->play_latency = 1; - ieee80211_queue_delayed_work(rtwdev->hw, - &rtwdev->coex_bt_devinfo_work, - RTW89_COEX_BT_DEVINFO_WORK_PERIOD); + wiphy_delayed_work_queue(rtwdev->hw->wiphy, + &rtwdev->coex_bt_devinfo_work, + RTW89_COEX_BT_DEVINFO_WORK_PERIOD); } _run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO); @@ -7671,7 +7662,8 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, else wl->status.map.connecting = 0; - if (state == BTC_ROLE_MSTS_STA_DIS_CONN) + if (state == BTC_ROLE_MSTS_STA_DIS_CONN || + state == BTC_ROLE_MSTS_STA_CONN_END) wl->status.map._4way = false; _run_coex(rtwdev, BTC_RSN_NTFY_ROLE_INFO); @@ -7781,7 +7773,7 @@ static bool _ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_path, wl->rfk_info.state = BTC_WRFK_STOP; _write_scbd(rtwdev, BTC_WSCB_WLRFK, false); - cancel_delayed_work(&rtwdev->coex_rfk_chk_work); + wiphy_delayed_work_cancel(rtwdev->hw->wiphy, &rtwdev->coex_rfk_chk_work); break; default: rtw89_debug(rtwdev, RTW89_DBG_BTC, @@ -7795,9 +7787,9 @@ static bool _ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_path, _run_coex(rtwdev, BTC_RSN_NTFY_WL_RFK); if (wl->rfk_info.state == BTC_WRFK_START) - ieee80211_queue_delayed_work(rtwdev->hw, - &rtwdev->coex_rfk_chk_work, - RTW89_COEX_RFK_CHK_WORK_PERIOD); + wiphy_delayed_work_queue(rtwdev->hw->wiphy, + &rtwdev->coex_rfk_chk_work, + RTW89_COEX_RFK_CHK_WORK_PERIOD); } rtw89_debug(rtwdev, RTW89_DBG_BTC, @@ -7815,6 +7807,8 @@ void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map, bool allow; int ret; + lockdep_assert_wiphy(rtwdev->hw->wiphy); + band = FIELD_GET(BTC_RFK_BAND_MAP, phy_map); rtw89_debug(rtwdev, RTW89_DBG_RFK, @@ -8115,6 +8109,7 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, return; func = rtw89_btc_c2h_get_index_by_ver(rtwdev, func); + pfwinfo->cnt_c2h++; switch (func) { case BTF_EVNT_BUF_OVERFLOW: @@ -8156,7 +8151,7 @@ void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, #define BTC_CX_FW_OFFLOAD 0 -static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_cx_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { union rtw89_btc_module_info *md = &rtwdev->btc.mdinfo; const struct rtw89_chip_info *chip = rtwdev->chip; @@ -8168,40 +8163,43 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_wl_info *wl = &btc->cx.wl; u32 ver_main = 0, ver_sub = 0, ver_hotfix = 0, id_branch = 0; u8 cv, rfe, iso, ant_num, ant_single_pos; + char *p = buf, *end = buf + bufsz; if (!(dm->coex_info_map & BTC_COEX_INFO_CX)) - return; + return 0; dm->cnt_notify[BTC_NCNT_SHOW_COEX_INFO]++; - seq_printf(m, "========== [BTC COEX INFO (%d)] ==========\n", - chip->chip_id); + p += scnprintf(p, end - p, + "========== [BTC COEX INFO (%d)] ==========\n", + chip->chip_id); ver_main = FIELD_GET(GENMASK(31, 24), RTW89_COEX_VERSION); ver_sub = FIELD_GET(GENMASK(23, 16), RTW89_COEX_VERSION); ver_hotfix = FIELD_GET(GENMASK(15, 8), RTW89_COEX_VERSION); id_branch = FIELD_GET(GENMASK(7, 0), RTW89_COEX_VERSION); - seq_printf(m, " %-15s : Coex:%d.%d.%d(branch:%d), ", - "[coex_version]", ver_main, ver_sub, ver_hotfix, id_branch); + p += scnprintf(p, end - p, " %-15s : Coex:%d.%d.%d(branch:%d), ", + "[coex_version]", ver_main, ver_sub, ver_hotfix, + id_branch); ver_main = FIELD_GET(GENMASK(31, 24), wl->ver_info.fw_coex); ver_sub = FIELD_GET(GENMASK(23, 16), wl->ver_info.fw_coex); ver_hotfix = FIELD_GET(GENMASK(15, 8), wl->ver_info.fw_coex); id_branch = FIELD_GET(GENMASK(7, 0), wl->ver_info.fw_coex); - seq_printf(m, "WL_FW_coex:%d.%d.%d(branch:%d)", - ver_main, ver_sub, ver_hotfix, id_branch); + p += scnprintf(p, end - p, "WL_FW_coex:%d.%d.%d(branch:%d)", + ver_main, ver_sub, ver_hotfix, id_branch); ver_main = FIELD_GET(GENMASK(31, 24), chip->wlcx_desired); ver_sub = FIELD_GET(GENMASK(23, 16), chip->wlcx_desired); ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->wlcx_desired); - seq_printf(m, "(%s, desired:%d.%d.%d), ", - (wl->ver_info.fw_coex >= chip->wlcx_desired ? - "Match" : "Mismatch"), ver_main, ver_sub, ver_hotfix); + p += scnprintf(p, end - p, "(%s, desired:%d.%d.%d), ", + (wl->ver_info.fw_coex >= chip->wlcx_desired ? + "Match" : "Mismatch"), ver_main, ver_sub, ver_hotfix); - seq_printf(m, "BT_FW_coex:%d(%s, desired:%d)\n", - bt->ver_info.fw_coex, - (bt->ver_info.fw_coex >= chip->btcx_desired ? - "Match" : "Mismatch"), chip->btcx_desired); + p += scnprintf(p, end - p, "BT_FW_coex:%d(%s, desired:%d)\n", + bt->ver_info.fw_coex, + (bt->ver_info.fw_coex >= chip->btcx_desired ? + "Match" : "Mismatch"), chip->btcx_desired); if (bt->enable.now && bt->ver_info.fw == 0) rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true); @@ -8212,10 +8210,11 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m) ver_sub = FIELD_GET(GENMASK(23, 16), wl->ver_info.fw); ver_hotfix = FIELD_GET(GENMASK(15, 8), wl->ver_info.fw); id_branch = FIELD_GET(GENMASK(7, 0), wl->ver_info.fw); - seq_printf(m, " %-15s : WL_FW:%d.%d.%d.%d, BT_FW:0x%x(%s)\n", - "[sub_module]", - ver_main, ver_sub, ver_hotfix, id_branch, - bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM"); + p += scnprintf(p, end - p, + " %-15s : WL_FW:%d.%d.%d.%d, BT_FW:0x%x(%s)\n", + "[sub_module]", + ver_main, ver_sub, ver_hotfix, id_branch, + bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM"); if (ver->fcxinit == 7) { cv = md->md_v7.kt_ver; @@ -8231,36 +8230,41 @@ static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m) ant_single_pos = md->md.ant.single_pos; } - seq_printf(m, " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s", - "[hw_info]", cv, rfe, iso, ant_num, - ant_num > 1 ? "" : - ant_single_pos ? "1Ant_Pos:S1, " : "1Ant_Pos:S0, "); + p += scnprintf(p, end - p, + " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s", + "[hw_info]", cv, rfe, iso, ant_num, + ant_num > 1 ? "" : + ant_single_pos ? "1Ant_Pos:S1, " : "1Ant_Pos:S0, "); - seq_printf(m, "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n", - btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss, - hal->rx_nss); + p += scnprintf(p, end - p, + "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n", + btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss, + hal->rx_nss); + + return p - buf; } -static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_wl_role_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_wl_link_info *plink = NULL; struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info; struct rtw89_traffic_stats *t; + char *p = buf, *end = buf + bufsz; u8 i; if (rtwdev->dbcc_en) { - seq_printf(m, - " %-15s : PHY0_band(op:%d/scan:%d/real:%d), ", - "[dbcc_info]", wl_dinfo->op_band[RTW89_PHY_0], - wl_dinfo->scan_band[RTW89_PHY_0], - wl_dinfo->real_band[RTW89_PHY_0]); - seq_printf(m, - "PHY1_band(op:%d/scan:%d/real:%d)\n", - wl_dinfo->op_band[RTW89_PHY_1], - wl_dinfo->scan_band[RTW89_PHY_1], - wl_dinfo->real_band[RTW89_PHY_1]); + p += scnprintf(p, end - p, + " %-15s : PHY0_band(op:%d/scan:%d/real:%d), ", + "[dbcc_info]", wl_dinfo->op_band[RTW89_PHY_0], + wl_dinfo->scan_band[RTW89_PHY_0], + wl_dinfo->real_band[RTW89_PHY_0]); + p += scnprintf(p, end - p, + "PHY1_band(op:%d/scan:%d/real:%d)\n", + wl_dinfo->op_band[RTW89_PHY_1], + wl_dinfo->scan_band[RTW89_PHY_1], + wl_dinfo->real_band[RTW89_PHY_1]); } for (i = 0; i < RTW89_PORT_NUM; i++) { @@ -8272,38 +8276,41 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m) if (!plink->active) continue; - seq_printf(m, - " [port_%d] : role=%d(phy-%d), connect=%d(client_cnt=%d), mode=%d, center_ch=%d, bw=%d", - plink->pid, (u32)plink->role, plink->phy, - (u32)plink->connected, plink->client_cnt - 1, - (u32)plink->mode, plink->ch, (u32)plink->bw); + p += scnprintf(p, end - p, + " [port_%d] : role=%d(phy-%d), connect=%d(client_cnt=%d), mode=%d, center_ch=%d, bw=%d", + plink->pid, (u32)plink->role, plink->phy, + (u32)plink->connected, plink->client_cnt - 1, + (u32)plink->mode, plink->ch, (u32)plink->bw); if (plink->connected == MLME_NO_LINK) continue; - seq_printf(m, - ", mac_id=%d, max_tx_time=%dus, max_tx_retry=%d\n", - plink->mac_id, plink->tx_time, plink->tx_retry); + p += scnprintf(p, end - p, + ", mac_id=%d, max_tx_time=%dus, max_tx_retry=%d\n", + plink->mac_id, plink->tx_time, plink->tx_retry); - seq_printf(m, - " [port_%d] : rssi=-%ddBm(%d), busy=%d, dir=%s, ", - plink->pid, 110 - plink->stat.rssi, - plink->stat.rssi, plink->busy, - plink->dir == RTW89_TFC_UL ? "UL" : "DL"); + p += scnprintf(p, end - p, + " [port_%d] : rssi=-%ddBm(%d), busy=%d, dir=%s, ", + plink->pid, 110 - plink->stat.rssi, + plink->stat.rssi, plink->busy, + plink->dir == RTW89_TFC_UL ? "UL" : "DL"); t = &plink->stat.traffic; - seq_printf(m, - "tx[rate:%d/busy_level:%d], ", - (u32)t->tx_rate, t->tx_tfc_lv); + p += scnprintf(p, end - p, + "tx[rate:%d/busy_level:%d], ", + (u32)t->tx_rate, t->tx_tfc_lv); - seq_printf(m, "rx[rate:%d/busy_level:%d/drop:%d]\n", - (u32)t->rx_rate, - t->rx_tfc_lv, plink->rx_rate_drop_cnt); + p += scnprintf(p, end - p, + "rx[rate:%d/busy_level:%d/drop:%d]\n", + (u32)t->rx_rate, + t->rx_tfc_lv, plink->rx_rate_drop_cnt); } + + return p - buf; } -static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_wl_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; const struct rtw89_btc_ver *ver = btc->ver; @@ -8314,12 +8321,13 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2; struct rtw89_btc_wl_role_info_v7 *wl_rinfo_v7 = &wl->role_info_v7; struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8; + char *p = buf, *end = buf + bufsz; u8 mode; if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL)) - return; + return 0; - seq_puts(m, "========== [WL Status] ==========\n"); + p += scnprintf(p, end - p, "========== [WL Status] ==========\n"); if (ver->fwlrole == 0) mode = wl_rinfo->link_mode; @@ -8332,24 +8340,28 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m) else if (ver->fwlrole == 8) mode = wl_rinfo_v8->link_mode; else - return; + goto out; - seq_printf(m, " %-15s : link_mode:%d, ", "[status]", mode); + p += scnprintf(p, end - p, " %-15s : link_mode:%d, ", "[status]", + mode); - seq_printf(m, - "rf_off:%d, power_save:%d, scan:%s(band:%d/phy_map:0x%x), ", - wl->status.map.rf_off, wl->status.map.lps, - wl->status.map.scan ? "Y" : "N", - wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map); + p += scnprintf(p, end - p, + "rf_off:%d, power_save:%d, scan:%s(band:%d/phy_map:0x%x), ", + wl->status.map.rf_off, wl->status.map.lps, + wl->status.map.scan ? "Y" : "N", + wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map); - seq_printf(m, - "connecting:%s, roam:%s, 4way:%s, init_ok:%s\n", - wl->status.map.connecting ? "Y" : "N", - wl->status.map.roaming ? "Y" : "N", - wl->status.map._4way ? "Y" : "N", - wl->status.map.init_ok ? "Y" : "N"); + p += scnprintf(p, end - p, + "connecting:%s, roam:%s, 4way:%s, init_ok:%s\n", + wl->status.map.connecting ? "Y" : "N", + wl->status.map.roaming ? "Y" : "N", + wl->status.map._4way ? "Y" : "N", + wl->status.map.init_ok ? "Y" : "N"); - _show_wl_role_info(rtwdev, m); + p += _show_wl_role_info(rtwdev, p, end - p); + +out: + return p - buf; } enum btc_bt_a2dp_type { @@ -8358,7 +8370,7 @@ enum btc_bt_a2dp_type { BTC_A2DP_TWS_RELAY = 2, }; -static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_bt_profile_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info; @@ -8366,50 +8378,55 @@ static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_bt_hid_desc hid = bt_linfo->hid_desc; struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc; struct rtw89_btc_bt_pan_desc pan = bt_linfo->pan_desc; + char *p = buf, *end = buf + bufsz; if (hfp.exist) { - seq_printf(m, " %-15s : type:%s, sut_pwr:%d, golden-rx:%d", - "[HFP]", (hfp.type == 0 ? "SCO" : "eSCO"), - bt_linfo->sut_pwr_level[0], - bt_linfo->golden_rx_shift[0]); + p += scnprintf(p, end - p, + " %-15s : type:%s, sut_pwr:%d, golden-rx:%d", + "[HFP]", (hfp.type == 0 ? "SCO" : "eSCO"), + bt_linfo->sut_pwr_level[0], + bt_linfo->golden_rx_shift[0]); } if (hid.exist) { - seq_printf(m, - "\n\r %-15s : type:%s%s%s%s%s pair-cnt:%d, sut_pwr:%d, golden-rx:%d\n", - "[HID]", - hid.type & BTC_HID_218 ? "2/18," : "", - hid.type & BTC_HID_418 ? "4/18," : "", - hid.type & BTC_HID_BLE ? "BLE," : "", - hid.type & BTC_HID_RCU ? "RCU," : "", - hid.type & BTC_HID_RCU_VOICE ? "RCU-Voice," : "", - hid.pair_cnt, bt_linfo->sut_pwr_level[1], - bt_linfo->golden_rx_shift[1]); + p += scnprintf(p, end - p, + "\n\r %-15s : type:%s%s%s%s%s pair-cnt:%d, sut_pwr:%d, golden-rx:%d\n", + "[HID]", + hid.type & BTC_HID_218 ? "2/18," : "", + hid.type & BTC_HID_418 ? "4/18," : "", + hid.type & BTC_HID_BLE ? "BLE," : "", + hid.type & BTC_HID_RCU ? "RCU," : "", + hid.type & BTC_HID_RCU_VOICE ? "RCU-Voice," : "", + hid.pair_cnt, bt_linfo->sut_pwr_level[1], + bt_linfo->golden_rx_shift[1]); } if (a2dp.exist) { - seq_printf(m, - " %-15s : type:%s, bit-pool:%d, flush-time:%d, ", - "[A2DP]", - a2dp.type == BTC_A2DP_LEGACY ? "Legacy" : "TWS", - a2dp.bitpool, a2dp.flush_time); + p += scnprintf(p, end - p, + " %-15s : type:%s, bit-pool:%d, flush-time:%d, ", + "[A2DP]", + a2dp.type == BTC_A2DP_LEGACY ? "Legacy" : "TWS", + a2dp.bitpool, a2dp.flush_time); - seq_printf(m, - "vid:0x%x, Dev-name:0x%x, sut_pwr:%d, golden-rx:%d\n", - a2dp.vendor_id, a2dp.device_name, - bt_linfo->sut_pwr_level[2], - bt_linfo->golden_rx_shift[2]); + p += scnprintf(p, end - p, + "vid:0x%x, Dev-name:0x%x, sut_pwr:%d, golden-rx:%d\n", + a2dp.vendor_id, a2dp.device_name, + bt_linfo->sut_pwr_level[2], + bt_linfo->golden_rx_shift[2]); } if (pan.exist) { - seq_printf(m, " %-15s : sut_pwr:%d, golden-rx:%d\n", - "[PAN]", - bt_linfo->sut_pwr_level[3], - bt_linfo->golden_rx_shift[3]); + p += scnprintf(p, end - p, + " %-15s : sut_pwr:%d, golden-rx:%d\n", + "[PAN]", + bt_linfo->sut_pwr_level[3], + bt_linfo->golden_rx_shift[3]); } + + return p - buf; } -static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_bt_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; const struct rtw89_btc_ver *ver = btc->ver; @@ -8418,129 +8435,136 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_wl_info *wl = &cx->wl; struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info; union rtw89_btc_module_info *md = &btc->mdinfo; + char *p = buf, *end = buf + bufsz; u8 *afh = bt_linfo->afh_map; u8 *afh_le = bt_linfo->afh_map_le; u8 bt_pos; if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT)) - return; + return 0; if (ver->fcxinit == 7) bt_pos = md->md_v7.bt_pos; else bt_pos = md->md.bt_pos; - seq_puts(m, "========== [BT Status] ==========\n"); - - seq_printf(m, " %-15s : enable:%s, btg:%s%s, connect:%s, ", - "[status]", bt->enable.now ? "Y" : "N", - bt->btg_type ? "Y" : "N", - (bt->enable.now && (bt->btg_type != bt_pos) ? - "(efuse-mismatch!!)" : ""), - (bt_linfo->status.map.connect ? "Y" : "N")); - - seq_printf(m, "igno_wl:%s, mailbox_avl:%s, rfk_state:0x%x\n", - bt->igno_wl ? "Y" : "N", - bt->mbx_avl ? "Y" : "N", bt->rfk_info.val); - - seq_printf(m, " %-15s : profile:%s%s%s%s%s ", - "[profile]", - (bt_linfo->profile_cnt.now == 0) ? "None," : "", - bt_linfo->hfp_desc.exist ? "HFP," : "", - bt_linfo->hid_desc.exist ? "HID," : "", - bt_linfo->a2dp_desc.exist ? - (bt_linfo->a2dp_desc.sink ? "A2DP_sink," : "A2DP,") : "", - bt_linfo->pan_desc.exist ? "PAN," : ""); - - seq_printf(m, - "multi-link:%s, role:%s, ble-connect:%s, CQDDR:%s, A2DP_active:%s, PAN_active:%s\n", - bt_linfo->multi_link.now ? "Y" : "N", - bt_linfo->slave_role ? "Slave" : "Master", - bt_linfo->status.map.ble_connect ? "Y" : "N", - bt_linfo->cqddr ? "Y" : "N", - bt_linfo->a2dp_desc.active ? "Y" : "N", - bt_linfo->pan_desc.active ? "Y" : "N"); - - seq_printf(m, - " %-15s : rssi:%ddBm(lvl:%d), tx_rate:%dM, %s%s%s", - "[link]", bt_linfo->rssi - 100, - bt->rssi_level, - bt_linfo->tx_3m ? 3 : 2, - bt_linfo->status.map.inq_pag ? " inq-page!!" : "", - bt_linfo->status.map.acl_busy ? " acl_busy!!" : "", - bt_linfo->status.map.mesh_busy ? " mesh_busy!!" : ""); - - seq_printf(m, - "%s afh_map[%02x%02x_%02x%02x_%02x%02x_%02x%02x_%02x%02x], ", - bt_linfo->relink.now ? " ReLink!!" : "", - afh[0], afh[1], afh[2], afh[3], afh[4], - afh[5], afh[6], afh[7], afh[8], afh[9]); + p += scnprintf(p, end - p, "========== [BT Status] ==========\n"); + + p += scnprintf(p, end - p, + " %-15s : enable:%s, btg:%s%s, connect:%s, ", + "[status]", bt->enable.now ? "Y" : "N", + bt->btg_type ? "Y" : "N", + (bt->enable.now && (bt->btg_type != bt_pos) ? + "(efuse-mismatch!!)" : ""), + (bt_linfo->status.map.connect ? "Y" : "N")); + + p += scnprintf(p, end - p, + "igno_wl:%s, mailbox_avl:%s, rfk_state:0x%x\n", + bt->igno_wl ? "Y" : "N", + bt->mbx_avl ? "Y" : "N", bt->rfk_info.val); + + p += scnprintf(p, end - p, " %-15s : profile:%s%s%s%s%s ", + "[profile]", + (bt_linfo->profile_cnt.now == 0) ? "None," : "", + bt_linfo->hfp_desc.exist ? "HFP," : "", + bt_linfo->hid_desc.exist ? "HID," : "", + bt_linfo->a2dp_desc.exist ? + (bt_linfo->a2dp_desc.sink ? "A2DP_sink," : "A2DP,") : "", + bt_linfo->pan_desc.exist ? "PAN," : ""); + + p += scnprintf(p, end - p, + "multi-link:%s, role:%s, ble-connect:%s, CQDDR:%s, A2DP_active:%s, PAN_active:%s\n", + bt_linfo->multi_link.now ? "Y" : "N", + bt_linfo->slave_role ? "Slave" : "Master", + bt_linfo->status.map.ble_connect ? "Y" : "N", + bt_linfo->cqddr ? "Y" : "N", + bt_linfo->a2dp_desc.active ? "Y" : "N", + bt_linfo->pan_desc.active ? "Y" : "N"); + + p += scnprintf(p, end - p, + " %-15s : rssi:%ddBm(lvl:%d), tx_rate:%dM, %s%s%s", + "[link]", bt_linfo->rssi - 100, + bt->rssi_level, + bt_linfo->tx_3m ? 3 : 2, + bt_linfo->status.map.inq_pag ? " inq-page!!" : "", + bt_linfo->status.map.acl_busy ? " acl_busy!!" : "", + bt_linfo->status.map.mesh_busy ? " mesh_busy!!" : ""); + + p += scnprintf(p, end - p, + "%s afh_map[%02x%02x_%02x%02x_%02x%02x_%02x%02x_%02x%02x], ", + bt_linfo->relink.now ? " ReLink!!" : "", + afh[0], afh[1], afh[2], afh[3], afh[4], + afh[5], afh[6], afh[7], afh[8], afh[9]); if (ver->fcxbtafh == 2 && bt_linfo->status.map.ble_connect) - seq_printf(m, - "LE[%02x%02x_%02x_%02x%02x]", - afh_le[0], afh_le[1], afh_le[2], - afh_le[3], afh_le[4]); - - seq_printf(m, "wl_ch_map[en:%d/ch:%d/bw:%d]\n", - wl->afh_info.en, wl->afh_info.ch, wl->afh_info.bw); - - seq_printf(m, - " %-15s : retry:%d, relink:%d, rate_chg:%d, reinit:%d, reenable:%d, ", - "[stat_cnt]", cx->cnt_bt[BTC_BCNT_RETRY], - cx->cnt_bt[BTC_BCNT_RELINK], cx->cnt_bt[BTC_BCNT_RATECHG], - cx->cnt_bt[BTC_BCNT_REINIT], cx->cnt_bt[BTC_BCNT_REENABLE]); - - seq_printf(m, - "role-switch:%d, afh:%d, inq_page:%d(inq:%d/page:%d), igno_wl:%d\n", - cx->cnt_bt[BTC_BCNT_ROLESW], cx->cnt_bt[BTC_BCNT_AFH], - cx->cnt_bt[BTC_BCNT_INQPAG], cx->cnt_bt[BTC_BCNT_INQ], - cx->cnt_bt[BTC_BCNT_PAGE], cx->cnt_bt[BTC_BCNT_IGNOWL]); - - _show_bt_profile_info(rtwdev, m); - - seq_printf(m, - " %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)\n", - "[bt_info]", bt->raw_info[2], bt->raw_info[3], - bt->raw_info[4], bt->raw_info[5], bt->raw_info[6], - bt->raw_info[7], - bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply", - cx->cnt_bt[BTC_BCNT_INFOUPDATE], - cx->cnt_bt[BTC_BCNT_INFOSAME]); - - seq_printf(m, - " %-15s : Hi-rx = %d, Hi-tx = %d, Lo-rx = %d, Lo-tx = %d (bt_polut_wl_tx = %d)", - "[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX], - cx->cnt_bt[BTC_BCNT_HIPRI_TX], cx->cnt_bt[BTC_BCNT_LOPRI_RX], - cx->cnt_bt[BTC_BCNT_LOPRI_TX], cx->cnt_bt[BTC_BCNT_POLUT]); + p += scnprintf(p, end - p, + "LE[%02x%02x_%02x_%02x%02x]", + afh_le[0], afh_le[1], afh_le[2], + afh_le[3], afh_le[4]); + + p += scnprintf(p, end - p, "wl_ch_map[en:%d/ch:%d/bw:%d]\n", + wl->afh_info.en, wl->afh_info.ch, wl->afh_info.bw); + + p += scnprintf(p, end - p, + " %-15s : retry:%d, relink:%d, rate_chg:%d, reinit:%d, reenable:%d, ", + "[stat_cnt]", cx->cnt_bt[BTC_BCNT_RETRY], + cx->cnt_bt[BTC_BCNT_RELINK], + cx->cnt_bt[BTC_BCNT_RATECHG], + cx->cnt_bt[BTC_BCNT_REINIT], + cx->cnt_bt[BTC_BCNT_REENABLE]); + + p += scnprintf(p, end - p, + "role-switch:%d, afh:%d, inq_page:%d(inq:%d/page:%d), igno_wl:%d\n", + cx->cnt_bt[BTC_BCNT_ROLESW], cx->cnt_bt[BTC_BCNT_AFH], + cx->cnt_bt[BTC_BCNT_INQPAG], cx->cnt_bt[BTC_BCNT_INQ], + cx->cnt_bt[BTC_BCNT_PAGE], cx->cnt_bt[BTC_BCNT_IGNOWL]); + + p += _show_bt_profile_info(rtwdev, p, end - p); + + p += scnprintf(p, end - p, + " %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)\n", + "[bt_info]", bt->raw_info[2], bt->raw_info[3], + bt->raw_info[4], bt->raw_info[5], bt->raw_info[6], + bt->raw_info[7], + bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply", + cx->cnt_bt[BTC_BCNT_INFOUPDATE], + cx->cnt_bt[BTC_BCNT_INFOSAME]); + + p += scnprintf(p, end - p, + " %-15s : Hi-rx = %d, Hi-tx = %d, Lo-rx = %d, Lo-tx = %d (bt_polut_wl_tx = %d)", + "[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX], + cx->cnt_bt[BTC_BCNT_HIPRI_TX], + cx->cnt_bt[BTC_BCNT_LOPRI_RX], + cx->cnt_bt[BTC_BCNT_LOPRI_TX], + cx->cnt_bt[BTC_BCNT_POLUT]); if (!bt->scan_info_update) { rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_SCAN_INFO, true); - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } else { rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_SCAN_INFO, false); if (ver->fcxbtscan == 1) { - seq_printf(m, - "(INQ:%d-%d/PAGE:%d-%d/LE:%d-%d/INIT:%d-%d)", - le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].win), - le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].intvl), - le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].win), - le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].intvl), - le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].win), - le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].intvl), - le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].win), - le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].intvl)); + p += scnprintf(p, end - p, + "(INQ:%d-%d/PAGE:%d-%d/LE:%d-%d/INIT:%d-%d)", + le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].win), + le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INQ].intvl), + le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].win), + le16_to_cpu(bt->scan_info_v1[BTC_SCAN_PAGE].intvl), + le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].win), + le16_to_cpu(bt->scan_info_v1[BTC_SCAN_BLE].intvl), + le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].win), + le16_to_cpu(bt->scan_info_v1[BTC_SCAN_INIT].intvl)); } else if (ver->fcxbtscan == 2) { - seq_printf(m, - "(BG:%d-%d/INIT:%d-%d/LE:%d-%d)", - le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].win), - le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].intvl), - le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].win), - le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].intvl), - le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].win), - le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].intvl)); + p += scnprintf(p, end - p, + "(BG:%d-%d/INIT:%d-%d/LE:%d-%d)", + le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].win), + le16_to_cpu(bt->scan_info_v2[CXSCAN_BG].intvl), + le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].win), + le16_to_cpu(bt->scan_info_v2[CXSCAN_INIT].intvl), + le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].win), + le16_to_cpu(bt->scan_info_v2[CXSCAN_LE].intvl)); } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } if (bt_linfo->profile_cnt.now || bt_linfo->status.map.ble_connect) @@ -8560,6 +8584,8 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m) rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, true); else rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, false); + + return p - buf; } #define CASE_BTC_RSN_STR(e) case BTC_RSN_ ## e: return #e @@ -8853,114 +8879,132 @@ static const char *id_to_ant(u32 id) } static -void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data, - u8 len, u8 seg_len, u8 start_idx, u8 ring_len) +int scnprintf_segment(char *buf, size_t bufsz, const char *prefix, const u16 *data, + u8 len, u8 seg_len, u8 start_idx, u8 ring_len) { - u8 i; + char *p = buf, *end = buf + bufsz; u8 cur_index; + u8 i; for (i = 0; i < len ; i++) { if ((i % seg_len) == 0) - seq_printf(m, " %-15s : ", prefix); + p += scnprintf(p, end - p, " %-15s : ", prefix); cur_index = (start_idx + i) % ring_len; if (i % 3 == 0) - seq_printf(m, "-> %-20s", - steps_to_str(*(data + cur_index))); + p += scnprintf(p, end - p, "-> %-20s", + steps_to_str(*(data + cur_index))); else if (i % 3 == 1) - seq_printf(m, "-> %-15s", - steps_to_str(*(data + cur_index))); + p += scnprintf(p, end - p, "-> %-15s", + steps_to_str(*(data + cur_index))); else - seq_printf(m, "-> %-13s", - steps_to_str(*(data + cur_index))); + p += scnprintf(p, end - p, "-> %-13s", + steps_to_str(*(data + cur_index))); if (i == (len - 1) || (i % seg_len) == (seg_len - 1)) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } + + return p - buf; } -static void _show_dm_step(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_dm_step(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_dm *dm = &btc->dm; + char *p = buf, *end = buf + bufsz; u8 start_idx; u8 len; len = dm->dm_step.step_ov ? RTW89_BTC_DM_MAXSTEP : dm->dm_step.step_pos; start_idx = dm->dm_step.step_ov ? dm->dm_step.step_pos : 0; - seq_print_segment(m, "[dm_steps]", dm->dm_step.step, len, 6, start_idx, - ARRAY_SIZE(dm->dm_step.step)); + p += scnprintf_segment(p, end - p, "[dm_steps]", dm->dm_step.step, len, + 6, start_idx, ARRAY_SIZE(dm->dm_step.step)); + + return p - buf; } -static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_dm_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; const struct rtw89_btc_ver *ver = btc->ver; struct rtw89_btc_dm *dm = &btc->dm; struct rtw89_btc_wl_info *wl = &btc->cx.wl; struct rtw89_btc_bt_info *bt = &btc->cx.bt; + char *p = buf, *end = buf + bufsz; u8 igno_bt; if (!(dm->coex_info_map & BTC_COEX_INFO_DM)) - return; + return 0; - seq_printf(m, "========== [Mechanism Status %s] ==========\n", - (btc->manual_ctrl ? "(Manual)" : "(Auto)")); + p += scnprintf(p, end - p, + "========== [Mechanism Status %s] ==========\n", + (btc->manual_ctrl ? "(Manual)" : "(Auto)")); - seq_printf(m, - " %-15s : type:%s, reason:%s(), action:%s(), ant_path:%s, init_mode:%s, run_cnt:%d\n", - "[status]", - btc->ant_type == BTC_ANT_SHARED ? "shared" : "dedicated", - steps_to_str(dm->run_reason), - steps_to_str(dm->run_action | BTC_ACT_EXT_BIT), - id_to_ant(FIELD_GET(GENMASK(7, 0), dm->set_ant_path)), - id_to_mode(wl->coex_mode), - dm->cnt_dm[BTC_DCNT_RUN]); + p += scnprintf(p, end - p, + " %-15s : type:%s, reason:%s(), action:%s(), ant_path:%s, init_mode:%s, run_cnt:%d\n", + "[status]", + btc->ant_type == BTC_ANT_SHARED ? "shared" : "dedicated", + steps_to_str(dm->run_reason), + steps_to_str(dm->run_action | BTC_ACT_EXT_BIT), + id_to_ant(FIELD_GET(GENMASK(7, 0), dm->set_ant_path)), + id_to_mode(wl->coex_mode), + dm->cnt_dm[BTC_DCNT_RUN]); - _show_dm_step(rtwdev, m); + p += _show_dm_step(rtwdev, p, end - p); if (ver->fcxctrl == 7) igno_bt = btc->ctrl.ctrl_v7.igno_bt; else igno_bt = btc->ctrl.ctrl.igno_bt; - seq_printf(m, " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ", - "[dm_flag]", dm->wl_only, dm->bt_only, igno_bt, - dm->freerun, btc->lps, dm->wl_mimo_ps); + p += scnprintf(p, end - p, + " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ", + "[dm_flag]", dm->wl_only, dm->bt_only, igno_bt, + dm->freerun, btc->lps, dm->wl_mimo_ps); - seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap, - (BTC_CX_FW_OFFLOAD ? "Y" : "N"), - (dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ? - "" : "(Mismatch!!)")); + p += scnprintf(p, end - p, "leak_ap:%d, fw_offload:%s%s\n", + dm->leak_ap, + (BTC_CX_FW_OFFLOAD ? "Y" : "N"), + (dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ? + "" : "(Mismatch!!)")); if (dm->rf_trx_para.wl_tx_power == 0xff) - seq_printf(m, - " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:orig, ", - "[trx_ctrl]", wl->rssi_level, dm->trx_para_level); + p += scnprintf(p, end - p, + " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:orig, ", + "[trx_ctrl]", wl->rssi_level, + dm->trx_para_level); else - seq_printf(m, - " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:%d, ", - "[trx_ctrl]", wl->rssi_level, dm->trx_para_level, - dm->rf_trx_para.wl_tx_power); + p += scnprintf(p, end - p, + " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:%d, ", + "[trx_ctrl]", wl->rssi_level, + dm->trx_para_level, + dm->rf_trx_para.wl_tx_power); - seq_printf(m, - "wl_rx_lvl:%d, bt_tx_pwr_dec:%d, bt_rx_lna:%d(%s-tbl), wl_btg_rx:%d\n", - dm->rf_trx_para.wl_rx_gain, dm->rf_trx_para.bt_tx_power, - dm->rf_trx_para.bt_rx_gain, - (bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx); + p += scnprintf(p, end - p, + "wl_rx_lvl:%d, bt_tx_pwr_dec:%d, bt_rx_lna:%d(%s-tbl), wl_btg_rx:%d\n", + dm->rf_trx_para.wl_rx_gain, + dm->rf_trx_para.bt_tx_power, + dm->rf_trx_para.bt_rx_gain, + (bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx); - seq_printf(m, - " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU, bt_scan_rx_low_pri:%d\n", - "[dm_ctrl]", dm->wl_tx_limit.enable, dm->wl_tx_limit.tx_time, - dm->wl_tx_limit.tx_retry, btc->bt_req_len, bt->scan_rx_low_pri); + p += scnprintf(p, end - p, + " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU, bt_scan_rx_low_pri:%d\n", + "[dm_ctrl]", dm->wl_tx_limit.enable, + dm->wl_tx_limit.tx_time, + dm->wl_tx_limit.tx_retry, btc->bt_req_len, + bt->scan_rx_low_pri); + + return p - buf; } -static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_error(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; const struct rtw89_btc_ver *ver = btc->ver; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; union rtw89_btc_fbtc_cysta_info *pcysta; + char *p = buf, *end = buf + bufsz; u32 except_cnt, exception_map; pcysta = &pfwinfo->rpt_fbtc_cysta.finfo; @@ -8985,81 +9029,87 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m) except_cnt = pcysta->v7.except_cnt; exception_map = le32_to_cpu(pcysta->v7.except_map); } else { - return; + return 0; } if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 && except_cnt == 0 && !pfwinfo->len_mismch && !pfwinfo->fver_mismch) - return; + return 0; - seq_printf(m, " %-15s : ", "[error]"); + p += scnprintf(p, end - p, " %-15s : ", "[error]"); if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]) { - seq_printf(m, - "overflow-cnt: %d, ", - pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]); + p += scnprintf(p, end - p, + "overflow-cnt: %d, ", + pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]); } if (pfwinfo->len_mismch) { - seq_printf(m, - "len-mismatch: 0x%x, ", - pfwinfo->len_mismch); + p += scnprintf(p, end - p, + "len-mismatch: 0x%x, ", + pfwinfo->len_mismch); } if (pfwinfo->fver_mismch) { - seq_printf(m, - "fver-mismatch: 0x%x, ", - pfwinfo->fver_mismch); + p += scnprintf(p, end - p, + "fver-mismatch: 0x%x, ", + pfwinfo->fver_mismch); } /* cycle statistics exceptions */ if (exception_map || except_cnt) { - seq_printf(m, - "exception-type: 0x%x, exception-cnt = %d", - exception_map, except_cnt); + p += scnprintf(p, end - p, + "exception-type: 0x%x, exception-cnt = %d", + exception_map, except_cnt); } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); + + return p - buf; } -static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fbtc_tdma(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; const struct rtw89_btc_ver *ver = btc->ver; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; struct rtw89_btc_fbtc_tdma *t = NULL; + char *p = buf, *end = buf + bufsz; pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo; if (!pcinfo->valid) - return; + return 0; if (ver->fcxtdma == 1) t = &pfwinfo->rpt_fbtc_tdma.finfo.v1; else t = &pfwinfo->rpt_fbtc_tdma.finfo.v3.tdma; - seq_printf(m, - " %-15s : ", "[tdma_policy]"); - seq_printf(m, - "type:%d, rx_flow_ctrl:%d, tx_pause:%d, ", - (u32)t->type, - t->rxflctrl, t->txpause); + p += scnprintf(p, end - p, + " %-15s : ", "[tdma_policy]"); + p += scnprintf(p, end - p, + "type:%d, rx_flow_ctrl:%d, tx_pause:%d, ", + (u32)t->type, + t->rxflctrl, t->txpause); - seq_printf(m, - "wl_toggle_n:%d, leak_n:%d, ext_ctrl:%d, ", - t->wtgle_n, t->leak_n, t->ext_ctrl); + p += scnprintf(p, end - p, + "wl_toggle_n:%d, leak_n:%d, ext_ctrl:%d, ", + t->wtgle_n, t->leak_n, t->ext_ctrl); - seq_printf(m, - "policy_type:%d", - (u32)btc->policy_type); + p += scnprintf(p, end - p, + "policy_type:%d", + (u32)btc->policy_type); - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); + + return p - buf; } -static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fbtc_slots(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_dm *dm = &btc->dm; + char *p = buf, *end = buf + bufsz; u16 dur, cxtype; u32 tbl; u8 i = 0; @@ -9074,28 +9124,30 @@ static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m) tbl = le32_to_cpu(dm->slot_now.v7[i].cxtbl); cxtype = le16_to_cpu(dm->slot_now.v7[i].cxtype); } else { - return; + return 0; } if (i % 5 == 0) - seq_printf(m, - " %-15s : %5s[%03d/0x%x/%d]", - "[slot_list]", - id_to_slot((u32)i), - dur, tbl, cxtype); + p += scnprintf(p, end - p, + " %-15s : %5s[%03d/0x%x/%d]", + "[slot_list]", + id_to_slot((u32)i), + dur, tbl, cxtype); else - seq_printf(m, - ", %5s[%03d/0x%x/%d]", - id_to_slot((u32)i), - dur, tbl, cxtype); + p += scnprintf(p, end - p, + ", %5s[%03d/0x%x/%d]", + id_to_slot((u32)i), + dur, tbl, cxtype); if (i % 5 == 4) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); + + return p - buf; } -static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; @@ -9104,63 +9156,64 @@ static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; struct rtw89_btc_fbtc_cysta_v2 *pcysta_le32 = NULL; union rtw89_btc_fbtc_rxflct r; - u8 i, cnt = 0, slot_pair; u16 cycle, c_begin, c_end, store_index; + char *p = buf, *end = buf + bufsz; + u8 i, cnt = 0, slot_pair; pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo; if (!pcinfo->valid) - return; + return 0; pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo.v2; - seq_printf(m, - " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]", - "[cycle_cnt]", - le16_to_cpu(pcysta_le32->cycles), - le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL]), - le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL_OK]), - le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_SLOT]), - le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_OK])); + p += scnprintf(p, end - p, + " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]", + "[cycle_cnt]", + le16_to_cpu(pcysta_le32->cycles), + le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL]), + le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL_OK]), + le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_SLOT]), + le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_OK])); for (i = 0; i < CXST_MAX; i++) { if (!le32_to_cpu(pcysta_le32->slot_cnt[i])) continue; - seq_printf(m, ", %s:%d", id_to_slot((u32)i), - le32_to_cpu(pcysta_le32->slot_cnt[i])); + p += scnprintf(p, end - p, ", %s:%d", id_to_slot((u32)i), + le32_to_cpu(pcysta_le32->slot_cnt[i])); } if (dm->tdma_now.rxflctrl) { - seq_printf(m, ", leak_rx:%d", - le32_to_cpu(pcysta_le32->leakrx_cnt)); + p += scnprintf(p, end - p, ", leak_rx:%d", + le32_to_cpu(pcysta_le32->leakrx_cnt)); } if (le32_to_cpu(pcysta_le32->collision_cnt)) { - seq_printf(m, ", collision:%d", - le32_to_cpu(pcysta_le32->collision_cnt)); + p += scnprintf(p, end - p, ", collision:%d", + le32_to_cpu(pcysta_le32->collision_cnt)); } if (le32_to_cpu(pcysta_le32->skip_cnt)) { - seq_printf(m, ", skip:%d", - le32_to_cpu(pcysta_le32->skip_cnt)); - } - seq_puts(m, "\n"); - - seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", - "[cycle_time]", - le16_to_cpu(pcysta_le32->tavg_cycle[CXT_WL]), - le16_to_cpu(pcysta_le32->tavg_cycle[CXT_BT]), - le16_to_cpu(pcysta_le32->tavg_lk) / 1000, - le16_to_cpu(pcysta_le32->tavg_lk) % 1000); - seq_printf(m, ", max_t[wl:%d/bt:%d/lk:%d.%03d]", - le16_to_cpu(pcysta_le32->tmax_cycle[CXT_WL]), - le16_to_cpu(pcysta_le32->tmax_cycle[CXT_BT]), - le16_to_cpu(pcysta_le32->tmax_lk) / 1000, - le16_to_cpu(pcysta_le32->tmax_lk) % 1000); - seq_printf(m, ", maxdiff_t[wl:%d/bt:%d]\n", - le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_WL]), - le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_BT])); + p += scnprintf(p, end - p, ", skip:%d", + le32_to_cpu(pcysta_le32->skip_cnt)); + } + p += scnprintf(p, end - p, "\n"); + + p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", + "[cycle_time]", + le16_to_cpu(pcysta_le32->tavg_cycle[CXT_WL]), + le16_to_cpu(pcysta_le32->tavg_cycle[CXT_BT]), + le16_to_cpu(pcysta_le32->tavg_lk) / 1000, + le16_to_cpu(pcysta_le32->tavg_lk) % 1000); + p += scnprintf(p, end - p, ", max_t[wl:%d/bt:%d/lk:%d.%03d]", + le16_to_cpu(pcysta_le32->tmax_cycle[CXT_WL]), + le16_to_cpu(pcysta_le32->tmax_cycle[CXT_BT]), + le16_to_cpu(pcysta_le32->tmax_lk) / 1000, + le16_to_cpu(pcysta_le32->tmax_lk) % 1000); + p += scnprintf(p, end - p, ", maxdiff_t[wl:%d/bt:%d]\n", + le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_WL]), + le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_BT])); if (le16_to_cpu(pcysta_le32->cycles) <= 1) - return; + goto out; /* 1 cycle record 1 wl-slot and 1 bt-slot */ slot_pair = BTC_CYCLE_SLOT_MAX / 2; @@ -9177,53 +9230,57 @@ static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m) store_index = ((cycle - 1) % slot_pair) * 2; if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 1) - seq_printf(m, - " %-15s : ->b%02d->w%02d", "[cycle_step]", - le16_to_cpu(pcysta_le32->tslot_cycle[store_index]), - le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1])); + p += scnprintf(p, end - p, + " %-15s : ->b%02d->w%02d", + "[cycle_step]", + le16_to_cpu(pcysta_le32->tslot_cycle[store_index]), + le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1])); else - seq_printf(m, - "->b%02d->w%02d", - le16_to_cpu(pcysta_le32->tslot_cycle[store_index]), - le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1])); + p += scnprintf(p, end - p, + "->b%02d->w%02d", + le16_to_cpu(pcysta_le32->tslot_cycle[store_index]), + le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1])); if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } if (a2dp->exist) { - seq_printf(m, - " %-15s : a2dp_ept:%d, a2dp_late:%d", - "[a2dp_t_sta]", - le16_to_cpu(pcysta_le32->a2dpept), - le16_to_cpu(pcysta_le32->a2dpeptto)); - - seq_printf(m, - ", avg_t:%d, max_t:%d", - le16_to_cpu(pcysta_le32->tavg_a2dpept), - le16_to_cpu(pcysta_le32->tmax_a2dpept)); + p += scnprintf(p, end - p, + " %-15s : a2dp_ept:%d, a2dp_late:%d", + "[a2dp_t_sta]", + le16_to_cpu(pcysta_le32->a2dpept), + le16_to_cpu(pcysta_le32->a2dpeptto)); + + p += scnprintf(p, end - p, + ", avg_t:%d, max_t:%d", + le16_to_cpu(pcysta_le32->tavg_a2dpept), + le16_to_cpu(pcysta_le32->tmax_a2dpept)); r.val = dm->tdma_now.rxflctrl; if (r.type && r.tgln_n) { - seq_printf(m, - ", cycle[PSTDMA:%d/TDMA:%d], ", - le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_ON]), - le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_OFF])); - - seq_printf(m, - "avg_t[PSTDMA:%d/TDMA:%d], ", - le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_ON]), - le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_OFF])); - - seq_printf(m, - "max_t[PSTDMA:%d/TDMA:%d]", - le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_ON]), - le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_OFF])); + p += scnprintf(p, end - p, + ", cycle[PSTDMA:%d/TDMA:%d], ", + le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_ON]), + le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_OFF])); + + p += scnprintf(p, end - p, + "avg_t[PSTDMA:%d/TDMA:%d], ", + le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_ON]), + le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_OFF])); + + p += scnprintf(p, end - p, + "max_t[PSTDMA:%d/TDMA:%d]", + le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_ON]), + le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_OFF])); } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } + +out: + return p - buf; } -static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc; @@ -9234,60 +9291,64 @@ static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_rpt_cmn_info *pcinfo; u8 i, cnt = 0, slot_pair, divide_cnt; u16 cycle, c_begin, c_end, store_index; + char *p = buf, *end = buf + bufsz; pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo; if (!pcinfo->valid) - return; + return 0; pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v3; - seq_printf(m, - " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]", - "[cycle_cnt]", - le16_to_cpu(pcysta->cycles), - le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]), - le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]), - le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]), - le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK])); + p += scnprintf(p, end - p, + " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]", + "[cycle_cnt]", + le16_to_cpu(pcysta->cycles), + le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]), + le32_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]), + le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]), + le32_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK])); for (i = 0; i < CXST_MAX; i++) { if (!le32_to_cpu(pcysta->slot_cnt[i])) continue; - seq_printf(m, ", %s:%d", id_to_slot(i), - le32_to_cpu(pcysta->slot_cnt[i])); + p += scnprintf(p, end - p, ", %s:%d", id_to_slot(i), + le32_to_cpu(pcysta->slot_cnt[i])); } if (dm->tdma_now.rxflctrl) - seq_printf(m, ", leak_rx:%d", le32_to_cpu(pcysta->leak_slot.cnt_rximr)); + p += scnprintf(p, end - p, ", leak_rx:%d", + le32_to_cpu(pcysta->leak_slot.cnt_rximr)); if (le32_to_cpu(pcysta->collision_cnt)) - seq_printf(m, ", collision:%d", le32_to_cpu(pcysta->collision_cnt)); + p += scnprintf(p, end - p, ", collision:%d", + le32_to_cpu(pcysta->collision_cnt)); if (le32_to_cpu(pcysta->skip_cnt)) - seq_printf(m, ", skip:%d", le32_to_cpu(pcysta->skip_cnt)); - - seq_puts(m, "\n"); - - seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", - "[cycle_time]", - le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]), - le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]), - le16_to_cpu(pcysta->leak_slot.tavg) / 1000, - le16_to_cpu(pcysta->leak_slot.tavg) % 1000); - seq_printf(m, - ", max_t[wl:%d/bt:%d/lk:%d.%03d]", - le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]), - le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]), - le16_to_cpu(pcysta->leak_slot.tmax) / 1000, - le16_to_cpu(pcysta->leak_slot.tmax) % 1000); - seq_printf(m, - ", maxdiff_t[wl:%d/bt:%d]\n", - le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]), - le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT])); + p += scnprintf(p, end - p, ", skip:%d", + le32_to_cpu(pcysta->skip_cnt)); + + p += scnprintf(p, end - p, "\n"); + + p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", + "[cycle_time]", + le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]), + le16_to_cpu(pcysta->leak_slot.tavg) / 1000, + le16_to_cpu(pcysta->leak_slot.tavg) % 1000); + p += scnprintf(p, end - p, + ", max_t[wl:%d/bt:%d/lk:%d.%03d]", + le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]), + le16_to_cpu(pcysta->leak_slot.tmax) / 1000, + le16_to_cpu(pcysta->leak_slot.tmax) % 1000); + p += scnprintf(p, end - p, + ", maxdiff_t[wl:%d/bt:%d]\n", + le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT])); cycle = le16_to_cpu(pcysta->cycles); if (cycle <= 1) - return; + goto out; /* 1 cycle record 1 wl-slot and 1 bt-slot */ slot_pair = BTC_CYCLE_SLOT_MAX / 2; @@ -9309,51 +9370,56 @@ static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m) store_index = ((cycle - 1) % slot_pair) * 2; if (cnt % divide_cnt == 1) - seq_printf(m, " %-15s : ", "[cycle_step]"); + p += scnprintf(p, end - p, " %-15s : ", + "[cycle_step]"); - seq_printf(m, "->b%02d", - le16_to_cpu(pcysta->slot_step_time[store_index])); + p += scnprintf(p, end - p, "->b%02d", + le16_to_cpu(pcysta->slot_step_time[store_index])); if (a2dp->exist) { a2dp_trx = &pcysta->a2dp_trx[store_index]; - seq_printf(m, "(%d/%d/%dM/%d/%d/%d)", - a2dp_trx->empty_cnt, - a2dp_trx->retry_cnt, - a2dp_trx->tx_rate ? 3 : 2, - a2dp_trx->tx_cnt, - a2dp_trx->ack_cnt, - a2dp_trx->nack_cnt); + p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)", + a2dp_trx->empty_cnt, + a2dp_trx->retry_cnt, + a2dp_trx->tx_rate ? 3 : 2, + a2dp_trx->tx_cnt, + a2dp_trx->ack_cnt, + a2dp_trx->nack_cnt); } - seq_printf(m, "->w%02d", - le16_to_cpu(pcysta->slot_step_time[store_index + 1])); + p += scnprintf(p, end - p, "->w%02d", + le16_to_cpu(pcysta->slot_step_time[store_index + 1])); if (a2dp->exist) { a2dp_trx = &pcysta->a2dp_trx[store_index + 1]; - seq_printf(m, "(%d/%d/%dM/%d/%d/%d)", - a2dp_trx->empty_cnt, - a2dp_trx->retry_cnt, - a2dp_trx->tx_rate ? 3 : 2, - a2dp_trx->tx_cnt, - a2dp_trx->ack_cnt, - a2dp_trx->nack_cnt); + p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)", + a2dp_trx->empty_cnt, + a2dp_trx->retry_cnt, + a2dp_trx->tx_rate ? 3 : 2, + a2dp_trx->tx_cnt, + a2dp_trx->ack_cnt, + a2dp_trx->nack_cnt); } if (cnt % divide_cnt == 0 || cnt == c_end) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } if (a2dp->exist) { - seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d", - "[a2dp_t_sta]", - le16_to_cpu(pcysta->a2dp_ept.cnt), - le16_to_cpu(pcysta->a2dp_ept.cnt_timeout)); + p += scnprintf(p, end - p, + " %-15s : a2dp_ept:%d, a2dp_late:%d", + "[a2dp_t_sta]", + le16_to_cpu(pcysta->a2dp_ept.cnt), + le16_to_cpu(pcysta->a2dp_ept.cnt_timeout)); - seq_printf(m, ", avg_t:%d, max_t:%d", - le16_to_cpu(pcysta->a2dp_ept.tavg), - le16_to_cpu(pcysta->a2dp_ept.tmax)); + p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d", + le16_to_cpu(pcysta->a2dp_ept.tavg), + le16_to_cpu(pcysta->a2dp_ept.tmax)); - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } + +out: + return p - buf; } -static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc; @@ -9364,62 +9430,64 @@ static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_rpt_cmn_info *pcinfo; u8 i, cnt = 0, slot_pair, divide_cnt; u16 cycle, c_begin, c_end, store_index; + char *p = buf, *end = buf + bufsz; pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo; if (!pcinfo->valid) - return; + return 0; pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v4; - seq_printf(m, - " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]", - "[cycle_cnt]", - le16_to_cpu(pcysta->cycles), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK])); + p += scnprintf(p, end - p, + " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]", + "[cycle_cnt]", + le16_to_cpu(pcysta->cycles), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK])); for (i = 0; i < CXST_MAX; i++) { if (!le16_to_cpu(pcysta->slot_cnt[i])) continue; - seq_printf(m, ", %s:%d", id_to_slot(i), - le16_to_cpu(pcysta->slot_cnt[i])); + p += scnprintf(p, end - p, ", %s:%d", id_to_slot(i), + le16_to_cpu(pcysta->slot_cnt[i])); } if (dm->tdma_now.rxflctrl) - seq_printf(m, ", leak_rx:%d", - le32_to_cpu(pcysta->leak_slot.cnt_rximr)); + p += scnprintf(p, end - p, ", leak_rx:%d", + le32_to_cpu(pcysta->leak_slot.cnt_rximr)); if (pcysta->collision_cnt) - seq_printf(m, ", collision:%d", pcysta->collision_cnt); + p += scnprintf(p, end - p, ", collision:%d", + pcysta->collision_cnt); if (le16_to_cpu(pcysta->skip_cnt)) - seq_printf(m, ", skip:%d", - le16_to_cpu(pcysta->skip_cnt)); - - seq_puts(m, "\n"); - - seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", - "[cycle_time]", - le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]), - le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]), - le16_to_cpu(pcysta->leak_slot.tavg) / 1000, - le16_to_cpu(pcysta->leak_slot.tavg) % 1000); - seq_printf(m, - ", max_t[wl:%d/bt:%d/lk:%d.%03d]", - le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]), - le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]), - le16_to_cpu(pcysta->leak_slot.tmax) / 1000, - le16_to_cpu(pcysta->leak_slot.tmax) % 1000); - seq_printf(m, - ", maxdiff_t[wl:%d/bt:%d]\n", - le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]), - le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT])); + p += scnprintf(p, end - p, ", skip:%d", + le16_to_cpu(pcysta->skip_cnt)); + + p += scnprintf(p, end - p, "\n"); + + p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", + "[cycle_time]", + le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]), + le16_to_cpu(pcysta->leak_slot.tavg) / 1000, + le16_to_cpu(pcysta->leak_slot.tavg) % 1000); + p += scnprintf(p, end - p, + ", max_t[wl:%d/bt:%d/lk:%d.%03d]", + le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]), + le16_to_cpu(pcysta->leak_slot.tmax) / 1000, + le16_to_cpu(pcysta->leak_slot.tmax) % 1000); + p += scnprintf(p, end - p, + ", maxdiff_t[wl:%d/bt:%d]\n", + le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT])); cycle = le16_to_cpu(pcysta->cycles); if (cycle <= 1) - return; + goto out; /* 1 cycle record 1 wl-slot and 1 bt-slot */ slot_pair = BTC_CYCLE_SLOT_MAX / 2; @@ -9441,51 +9509,56 @@ static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m) store_index = ((cycle - 1) % slot_pair) * 2; if (cnt % divide_cnt == 1) - seq_printf(m, " %-15s : ", "[cycle_step]"); + p += scnprintf(p, end - p, " %-15s : ", + "[cycle_step]"); - seq_printf(m, "->b%02d", - le16_to_cpu(pcysta->slot_step_time[store_index])); + p += scnprintf(p, end - p, "->b%02d", + le16_to_cpu(pcysta->slot_step_time[store_index])); if (a2dp->exist) { a2dp_trx = &pcysta->a2dp_trx[store_index]; - seq_printf(m, "(%d/%d/%dM/%d/%d/%d)", - a2dp_trx->empty_cnt, - a2dp_trx->retry_cnt, - a2dp_trx->tx_rate ? 3 : 2, - a2dp_trx->tx_cnt, - a2dp_trx->ack_cnt, - a2dp_trx->nack_cnt); + p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)", + a2dp_trx->empty_cnt, + a2dp_trx->retry_cnt, + a2dp_trx->tx_rate ? 3 : 2, + a2dp_trx->tx_cnt, + a2dp_trx->ack_cnt, + a2dp_trx->nack_cnt); } - seq_printf(m, "->w%02d", - le16_to_cpu(pcysta->slot_step_time[store_index + 1])); + p += scnprintf(p, end - p, "->w%02d", + le16_to_cpu(pcysta->slot_step_time[store_index + 1])); if (a2dp->exist) { a2dp_trx = &pcysta->a2dp_trx[store_index + 1]; - seq_printf(m, "(%d/%d/%dM/%d/%d/%d)", - a2dp_trx->empty_cnt, - a2dp_trx->retry_cnt, - a2dp_trx->tx_rate ? 3 : 2, - a2dp_trx->tx_cnt, - a2dp_trx->ack_cnt, - a2dp_trx->nack_cnt); + p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)", + a2dp_trx->empty_cnt, + a2dp_trx->retry_cnt, + a2dp_trx->tx_rate ? 3 : 2, + a2dp_trx->tx_cnt, + a2dp_trx->ack_cnt, + a2dp_trx->nack_cnt); } if (cnt % divide_cnt == 0 || cnt == c_end) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } if (a2dp->exist) { - seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d", - "[a2dp_t_sta]", - le16_to_cpu(pcysta->a2dp_ept.cnt), - le16_to_cpu(pcysta->a2dp_ept.cnt_timeout)); + p += scnprintf(p, end - p, + " %-15s : a2dp_ept:%d, a2dp_late:%d", + "[a2dp_t_sta]", + le16_to_cpu(pcysta->a2dp_ept.cnt), + le16_to_cpu(pcysta->a2dp_ept.cnt_timeout)); - seq_printf(m, ", avg_t:%d, max_t:%d", - le16_to_cpu(pcysta->a2dp_ept.tavg), - le16_to_cpu(pcysta->a2dp_ept.tmax)); + p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d", + le16_to_cpu(pcysta->a2dp_ept.tavg), + le16_to_cpu(pcysta->a2dp_ept.tmax)); - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } + +out: + return p - buf; } -static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc; @@ -9496,58 +9569,60 @@ static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_rpt_cmn_info *pcinfo; u8 i, cnt = 0, slot_pair, divide_cnt; u16 cycle, c_begin, c_end, store_index; + char *p = buf, *end = buf + bufsz; pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo; if (!pcinfo->valid) - return; + return 0; pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v5; - seq_printf(m, - " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]", - "[cycle_cnt]", - le16_to_cpu(pcysta->cycles), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK])); + p += scnprintf(p, end - p, + " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]", + "[cycle_cnt]", + le16_to_cpu(pcysta->cycles), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK])); for (i = 0; i < CXST_MAX; i++) { if (!le16_to_cpu(pcysta->slot_cnt[i])) continue; - seq_printf(m, ", %s:%d", id_to_slot(i), - le16_to_cpu(pcysta->slot_cnt[i])); + p += scnprintf(p, end - p, ", %s:%d", id_to_slot(i), + le16_to_cpu(pcysta->slot_cnt[i])); } if (dm->tdma_now.rxflctrl) - seq_printf(m, ", leak_rx:%d", - le32_to_cpu(pcysta->leak_slot.cnt_rximr)); + p += scnprintf(p, end - p, ", leak_rx:%d", + le32_to_cpu(pcysta->leak_slot.cnt_rximr)); if (pcysta->collision_cnt) - seq_printf(m, ", collision:%d", pcysta->collision_cnt); + p += scnprintf(p, end - p, ", collision:%d", + pcysta->collision_cnt); if (le16_to_cpu(pcysta->skip_cnt)) - seq_printf(m, ", skip:%d", - le16_to_cpu(pcysta->skip_cnt)); - - seq_puts(m, "\n"); - - seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", - "[cycle_time]", - le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]), - le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]), - le16_to_cpu(pcysta->leak_slot.tavg) / 1000, - le16_to_cpu(pcysta->leak_slot.tavg) % 1000); - seq_printf(m, - ", max_t[wl:%d/bt:%d/lk:%d.%03d]\n", - le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]), - le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]), - le16_to_cpu(pcysta->leak_slot.tmax) / 1000, - le16_to_cpu(pcysta->leak_slot.tmax) % 1000); + p += scnprintf(p, end - p, ", skip:%d", + le16_to_cpu(pcysta->skip_cnt)); + + p += scnprintf(p, end - p, "\n"); + + p += scnprintf(p, end - p, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", + "[cycle_time]", + le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]), + le16_to_cpu(pcysta->leak_slot.tavg) / 1000, + le16_to_cpu(pcysta->leak_slot.tavg) % 1000); + p += scnprintf(p, end - p, + ", max_t[wl:%d/bt:%d/lk:%d.%03d]\n", + le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]), + le16_to_cpu(pcysta->leak_slot.tmax) / 1000, + le16_to_cpu(pcysta->leak_slot.tmax) % 1000); cycle = le16_to_cpu(pcysta->cycles); if (cycle <= 1) - return; + goto out; /* 1 cycle record 1 wl-slot and 1 bt-slot */ slot_pair = BTC_CYCLE_SLOT_MAX / 2; @@ -9565,58 +9640,63 @@ static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m) divide_cnt = BTC_CYCLE_SLOT_MAX / 4; if (c_begin > c_end) - return; + goto out; for (cycle = c_begin; cycle <= c_end; cycle++) { cnt++; store_index = ((cycle - 1) % slot_pair) * 2; if (cnt % divide_cnt == 1) - seq_printf(m, " %-15s : ", "[cycle_step]"); + p += scnprintf(p, end - p, " %-15s : ", + "[cycle_step]"); - seq_printf(m, "->b%02d", - le16_to_cpu(pcysta->slot_step_time[store_index])); + p += scnprintf(p, end - p, "->b%02d", + le16_to_cpu(pcysta->slot_step_time[store_index])); if (a2dp->exist) { a2dp_trx = &pcysta->a2dp_trx[store_index]; - seq_printf(m, "(%d/%d/%dM/%d/%d/%d)", - a2dp_trx->empty_cnt, - a2dp_trx->retry_cnt, - a2dp_trx->tx_rate ? 3 : 2, - a2dp_trx->tx_cnt, - a2dp_trx->ack_cnt, - a2dp_trx->nack_cnt); + p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)", + a2dp_trx->empty_cnt, + a2dp_trx->retry_cnt, + a2dp_trx->tx_rate ? 3 : 2, + a2dp_trx->tx_cnt, + a2dp_trx->ack_cnt, + a2dp_trx->nack_cnt); } - seq_printf(m, "->w%02d", - le16_to_cpu(pcysta->slot_step_time[store_index + 1])); + p += scnprintf(p, end - p, "->w%02d", + le16_to_cpu(pcysta->slot_step_time[store_index + 1])); if (a2dp->exist) { a2dp_trx = &pcysta->a2dp_trx[store_index + 1]; - seq_printf(m, "(%d/%d/%dM/%d/%d/%d)", - a2dp_trx->empty_cnt, - a2dp_trx->retry_cnt, - a2dp_trx->tx_rate ? 3 : 2, - a2dp_trx->tx_cnt, - a2dp_trx->ack_cnt, - a2dp_trx->nack_cnt); + p += scnprintf(p, end - p, "(%d/%d/%dM/%d/%d/%d)", + a2dp_trx->empty_cnt, + a2dp_trx->retry_cnt, + a2dp_trx->tx_rate ? 3 : 2, + a2dp_trx->tx_cnt, + a2dp_trx->ack_cnt, + a2dp_trx->nack_cnt); } if (cnt % divide_cnt == 0 || cnt == c_end) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } if (a2dp->exist) { - seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d", - "[a2dp_t_sta]", - le16_to_cpu(pcysta->a2dp_ept.cnt), - le16_to_cpu(pcysta->a2dp_ept.cnt_timeout)); + p += scnprintf(p, end - p, + " %-15s : a2dp_ept:%d, a2dp_late:%d", + "[a2dp_t_sta]", + le16_to_cpu(pcysta->a2dp_ept.cnt), + le16_to_cpu(pcysta->a2dp_ept.cnt_timeout)); - seq_printf(m, ", avg_t:%d, max_t:%d", - le16_to_cpu(pcysta->a2dp_ept.tavg), - le16_to_cpu(pcysta->a2dp_ept.tmax)); + p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d", + le16_to_cpu(pcysta->a2dp_ept.tavg), + le16_to_cpu(pcysta->a2dp_ept.tmax)); - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } + +out: + return p - buf; } -static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc_bt_info *bt = &rtwdev->btc.cx.bt; struct rtw89_btc_bt_a2dp_desc *a2dp = &bt->link_info.a2dp_desc; @@ -9624,68 +9704,75 @@ static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_fbtc_cysta_v7 *pcysta = NULL; struct rtw89_btc_dm *dm = &rtwdev->btc.dm; struct rtw89_btc_rpt_cmn_info *pcinfo; + char *p = buf, *end = buf + bufsz; u16 cycle, c_begin, c_end, s_id; u8 i, cnt = 0, divide_cnt; u8 slot_pair; pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo; if (!pcinfo->valid) - return; + return 0; pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v7; - seq_printf(m, "\n\r %-15s : cycle:%d", "[slot_stat]", - le16_to_cpu(pcysta->cycles)); + p += scnprintf(p, end - p, "\n\r %-15s : cycle:%d", "[slot_stat]", + le16_to_cpu(pcysta->cycles)); for (i = 0; i < CXST_MAX; i++) { if (!le16_to_cpu(pcysta->slot_cnt[i])) continue; - seq_printf(m, ", %s:%d", - id_to_slot(i), le16_to_cpu(pcysta->slot_cnt[i])); + p += scnprintf(p, end - p, ", %s:%d", + id_to_slot(i), + le16_to_cpu(pcysta->slot_cnt[i])); } if (dm->tdma_now.rxflctrl) - seq_printf(m, ", leak_rx:%d", - le32_to_cpu(pcysta->leak_slot.cnt_rximr)); + p += scnprintf(p, end - p, ", leak_rx:%d", + le32_to_cpu(pcysta->leak_slot.cnt_rximr)); if (pcysta->collision_cnt) - seq_printf(m, ", collision:%d", pcysta->collision_cnt); + p += scnprintf(p, end - p, ", collision:%d", + pcysta->collision_cnt); if (pcysta->skip_cnt) - seq_printf(m, ", skip:%d", le16_to_cpu(pcysta->skip_cnt)); - - seq_printf(m, "\n\r %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", - "[cycle_stat]", - le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]), - le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]), - le16_to_cpu(pcysta->leak_slot.tavg) / 1000, - le16_to_cpu(pcysta->leak_slot.tavg) % 1000); - seq_printf(m, ", max_t[wl:%d/bt:%d(>%dms:%d)/lk:%d.%03d]", - le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]), - le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]), - dm->bt_slot_flood, dm->cnt_dm[BTC_DCNT_BT_SLOT_FLOOD], - le16_to_cpu(pcysta->leak_slot.tamx) / 1000, - le16_to_cpu(pcysta->leak_slot.tamx) % 1000); - seq_printf(m, ", bcn[all:%d/ok:%d/in_bt:%d/in_bt_ok:%d]", - le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]), - le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK])); + p += scnprintf(p, end - p, ", skip:%d", + le16_to_cpu(pcysta->skip_cnt)); + + p += scnprintf(p, end - p, + "\n\r %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]", + "[cycle_stat]", + le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]), + le16_to_cpu(pcysta->leak_slot.tavg) / 1000, + le16_to_cpu(pcysta->leak_slot.tavg) % 1000); + p += scnprintf(p, end - p, + ", max_t[wl:%d/bt:%d(>%dms:%d)/lk:%d.%03d]", + le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]), + le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]), + dm->bt_slot_flood, dm->cnt_dm[BTC_DCNT_BT_SLOT_FLOOD], + le16_to_cpu(pcysta->leak_slot.tamx) / 1000, + le16_to_cpu(pcysta->leak_slot.tamx) % 1000); + p += scnprintf(p, end - p, ", bcn[all:%d/ok:%d/in_bt:%d/in_bt_ok:%d]", + le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]), + le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK])); if (a2dp->exist) { - seq_printf(m, - "\n\r %-15s : a2dp_ept:%d, a2dp_late:%d(streak 2S:%d/max:%d)", - "[a2dp_stat]", - le16_to_cpu(pcysta->a2dp_ept.cnt), - le16_to_cpu(pcysta->a2dp_ept.cnt_timeout), - a2dp->no_empty_streak_2s, a2dp->no_empty_streak_max); + p += scnprintf(p, end - p, + "\n\r %-15s : a2dp_ept:%d, a2dp_late:%d(streak 2S:%d/max:%d)", + "[a2dp_stat]", + le16_to_cpu(pcysta->a2dp_ept.cnt), + le16_to_cpu(pcysta->a2dp_ept.cnt_timeout), + a2dp->no_empty_streak_2s, + a2dp->no_empty_streak_max); - seq_printf(m, ", avg_t:%d, max_t:%d", - le16_to_cpu(pcysta->a2dp_ept.tavg), - le16_to_cpu(pcysta->a2dp_ept.tmax)); + p += scnprintf(p, end - p, ", avg_t:%d, max_t:%d", + le16_to_cpu(pcysta->a2dp_ept.tavg), + le16_to_cpu(pcysta->a2dp_ept.tmax)); } if (le16_to_cpu(pcysta->cycles) <= 1) - return; + goto out; /* 1 cycle = 1 wl-slot + 1 bt-slot */ slot_pair = BTC_CYCLE_SLOT_MAX / 2; @@ -9703,7 +9790,7 @@ static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m) divide_cnt = 6; if (c_begin > c_end) - return; + goto out; for (cycle = c_begin; cycle <= c_end; cycle++) { cnt++; @@ -9711,129 +9798,142 @@ static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m) if (cnt % divide_cnt == 1) { if (a2dp->exist) - seq_printf(m, "\n\r %-15s : ", "[slotT_wermtan]"); + p += scnprintf(p, end - p, "\n\r %-15s : ", + "[slotT_wermtan]"); else - seq_printf(m, "\n\r %-15s : ", "[slotT_rxerr]"); + p += scnprintf(p, end - p, "\n\r %-15s : ", + "[slotT_rxerr]"); } - seq_printf(m, "->b%d", le16_to_cpu(pcysta->slot_step_time[s_id])); + p += scnprintf(p, end - p, "->b%d", + le16_to_cpu(pcysta->slot_step_time[s_id])); if (a2dp->exist) - seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)", - pcysta->wl_rx_err_ratio[s_id], - pcysta->a2dp_trx[s_id].empty_cnt, - pcysta->a2dp_trx[s_id].retry_cnt, - (pcysta->a2dp_trx[s_id].tx_rate ? 3 : 2), - pcysta->a2dp_trx[s_id].tx_cnt, - pcysta->a2dp_trx[s_id].ack_cnt, - pcysta->a2dp_trx[s_id].nack_cnt); + p += scnprintf(p, end - p, "(%d/%d/%d/%dM/%d/%d/%d)", + pcysta->wl_rx_err_ratio[s_id], + pcysta->a2dp_trx[s_id].empty_cnt, + pcysta->a2dp_trx[s_id].retry_cnt, + (pcysta->a2dp_trx[s_id].tx_rate ? 3 : 2), + pcysta->a2dp_trx[s_id].tx_cnt, + pcysta->a2dp_trx[s_id].ack_cnt, + pcysta->a2dp_trx[s_id].nack_cnt); else - seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id]); + p += scnprintf(p, end - p, "(%d)", + pcysta->wl_rx_err_ratio[s_id]); - seq_printf(m, "->w%d", le16_to_cpu(pcysta->slot_step_time[s_id + 1])); + p += scnprintf(p, end - p, "->w%d", + le16_to_cpu(pcysta->slot_step_time[s_id + 1])); if (a2dp->exist) - seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)", - pcysta->wl_rx_err_ratio[s_id + 1], - pcysta->a2dp_trx[s_id + 1].empty_cnt, - pcysta->a2dp_trx[s_id + 1].retry_cnt, - (pcysta->a2dp_trx[s_id + 1].tx_rate ? 3 : 2), - pcysta->a2dp_trx[s_id + 1].tx_cnt, - pcysta->a2dp_trx[s_id + 1].ack_cnt, - pcysta->a2dp_trx[s_id + 1].nack_cnt); + p += scnprintf(p, end - p, "(%d/%d/%d/%dM/%d/%d/%d)", + pcysta->wl_rx_err_ratio[s_id + 1], + pcysta->a2dp_trx[s_id + 1].empty_cnt, + pcysta->a2dp_trx[s_id + 1].retry_cnt, + (pcysta->a2dp_trx[s_id + 1].tx_rate ? 3 : 2), + pcysta->a2dp_trx[s_id + 1].tx_cnt, + pcysta->a2dp_trx[s_id + 1].ack_cnt, + pcysta->a2dp_trx[s_id + 1].nack_cnt); else - seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id + 1]); + p += scnprintf(p, end - p, "(%d)", + pcysta->wl_rx_err_ratio[s_id + 1]); } + +out: + return p - buf; } -static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fbtc_nullsta(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; const struct rtw89_btc_ver *ver = btc->ver; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; struct rtw89_btc_rpt_cmn_info *pcinfo; union rtw89_btc_fbtc_cynullsta_info *ns; + char *p = buf, *end = buf + bufsz; u8 i = 0; if (!btc->dm.tdma_now.rxflctrl) - return; + return 0; pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo; if (!pcinfo->valid) - return; + return 0; ns = &pfwinfo->rpt_fbtc_nullsta.finfo; if (ver->fcxnullsta == 1) { for (i = 0; i < 2; i++) { - seq_printf(m, " %-15s : ", "[NULL-STA]"); - seq_printf(m, "null-%d", i); - seq_printf(m, "[ok:%d/", - le32_to_cpu(ns->v1.result[i][1])); - seq_printf(m, "fail:%d/", - le32_to_cpu(ns->v1.result[i][0])); - seq_printf(m, "on_time:%d/", - le32_to_cpu(ns->v1.result[i][2])); - seq_printf(m, "retry:%d/", - le32_to_cpu(ns->v1.result[i][3])); - seq_printf(m, "avg_t:%d.%03d/", - le32_to_cpu(ns->v1.avg_t[i]) / 1000, - le32_to_cpu(ns->v1.avg_t[i]) % 1000); - seq_printf(m, "max_t:%d.%03d]\n", - le32_to_cpu(ns->v1.max_t[i]) / 1000, - le32_to_cpu(ns->v1.max_t[i]) % 1000); + p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]"); + p += scnprintf(p, end - p, "null-%d", i); + p += scnprintf(p, end - p, "[ok:%d/", + le32_to_cpu(ns->v1.result[i][1])); + p += scnprintf(p, end - p, "fail:%d/", + le32_to_cpu(ns->v1.result[i][0])); + p += scnprintf(p, end - p, "on_time:%d/", + le32_to_cpu(ns->v1.result[i][2])); + p += scnprintf(p, end - p, "retry:%d/", + le32_to_cpu(ns->v1.result[i][3])); + p += scnprintf(p, end - p, "avg_t:%d.%03d/", + le32_to_cpu(ns->v1.avg_t[i]) / 1000, + le32_to_cpu(ns->v1.avg_t[i]) % 1000); + p += scnprintf(p, end - p, "max_t:%d.%03d]\n", + le32_to_cpu(ns->v1.max_t[i]) / 1000, + le32_to_cpu(ns->v1.max_t[i]) % 1000); } } else if (ver->fcxnullsta == 7) { for (i = 0; i < 2; i++) { - seq_printf(m, " %-15s : ", "[NULL-STA]"); - seq_printf(m, "null-%d", i); - seq_printf(m, "[Tx:%d/", - le32_to_cpu(ns->v7.result[i][4])); - seq_printf(m, "[ok:%d/", - le32_to_cpu(ns->v7.result[i][1])); - seq_printf(m, "fail:%d/", - le32_to_cpu(ns->v7.result[i][0])); - seq_printf(m, "on_time:%d/", - le32_to_cpu(ns->v7.result[i][2])); - seq_printf(m, "retry:%d/", - le32_to_cpu(ns->v7.result[i][3])); - seq_printf(m, "avg_t:%d.%03d/", - le32_to_cpu(ns->v7.tavg[i]) / 1000, - le32_to_cpu(ns->v7.tavg[i]) % 1000); - seq_printf(m, "max_t:%d.%03d]\n", - le32_to_cpu(ns->v7.tmax[i]) / 1000, - le32_to_cpu(ns->v7.tmax[i]) % 1000); + p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]"); + p += scnprintf(p, end - p, "null-%d", i); + p += scnprintf(p, end - p, "[Tx:%d/", + le32_to_cpu(ns->v7.result[i][4])); + p += scnprintf(p, end - p, "[ok:%d/", + le32_to_cpu(ns->v7.result[i][1])); + p += scnprintf(p, end - p, "fail:%d/", + le32_to_cpu(ns->v7.result[i][0])); + p += scnprintf(p, end - p, "on_time:%d/", + le32_to_cpu(ns->v7.result[i][2])); + p += scnprintf(p, end - p, "retry:%d/", + le32_to_cpu(ns->v7.result[i][3])); + p += scnprintf(p, end - p, "avg_t:%d.%03d/", + le32_to_cpu(ns->v7.tavg[i]) / 1000, + le32_to_cpu(ns->v7.tavg[i]) % 1000); + p += scnprintf(p, end - p, "max_t:%d.%03d]\n", + le32_to_cpu(ns->v7.tmax[i]) / 1000, + le32_to_cpu(ns->v7.tmax[i]) % 1000); } } else { for (i = 0; i < 2; i++) { - seq_printf(m, " %-15s : ", "[NULL-STA]"); - seq_printf(m, "null-%d", i); - seq_printf(m, "[Tx:%d/", - le32_to_cpu(ns->v2.result[i][4])); - seq_printf(m, "[ok:%d/", - le32_to_cpu(ns->v2.result[i][1])); - seq_printf(m, "fail:%d/", - le32_to_cpu(ns->v2.result[i][0])); - seq_printf(m, "on_time:%d/", - le32_to_cpu(ns->v2.result[i][2])); - seq_printf(m, "retry:%d/", - le32_to_cpu(ns->v2.result[i][3])); - seq_printf(m, "avg_t:%d.%03d/", - le32_to_cpu(ns->v2.avg_t[i]) / 1000, - le32_to_cpu(ns->v2.avg_t[i]) % 1000); - seq_printf(m, "max_t:%d.%03d]\n", - le32_to_cpu(ns->v2.max_t[i]) / 1000, - le32_to_cpu(ns->v2.max_t[i]) % 1000); + p += scnprintf(p, end - p, " %-15s : ", "[NULL-STA]"); + p += scnprintf(p, end - p, "null-%d", i); + p += scnprintf(p, end - p, "[Tx:%d/", + le32_to_cpu(ns->v2.result[i][4])); + p += scnprintf(p, end - p, "[ok:%d/", + le32_to_cpu(ns->v2.result[i][1])); + p += scnprintf(p, end - p, "fail:%d/", + le32_to_cpu(ns->v2.result[i][0])); + p += scnprintf(p, end - p, "on_time:%d/", + le32_to_cpu(ns->v2.result[i][2])); + p += scnprintf(p, end - p, "retry:%d/", + le32_to_cpu(ns->v2.result[i][3])); + p += scnprintf(p, end - p, "avg_t:%d.%03d/", + le32_to_cpu(ns->v2.avg_t[i]) / 1000, + le32_to_cpu(ns->v2.avg_t[i]) % 1000); + p += scnprintf(p, end - p, "max_t:%d.%03d]\n", + le32_to_cpu(ns->v2.max_t[i]) / 1000, + le32_to_cpu(ns->v2.max_t[i]) % 1000); } } + + return p - buf; } -static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fbtc_step_v2(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; struct rtw89_btc_fbtc_steps_v2 *pstep = NULL; const struct rtw89_btc_ver *ver = btc->ver; + char *p = buf, *end = buf + bufsz; u8 type, val, cnt = 0, state = 0; bool outloop = false; u16 i, diff_t, n_start = 0, n_stop = 0; @@ -9841,14 +9941,14 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m) pcinfo = &pfwinfo->rpt_fbtc_step.cinfo; if (!pcinfo->valid) - return; + return 0; pstep = &pfwinfo->rpt_fbtc_step.finfo.v2; pos_old = le16_to_cpu(pstep->pos_old); pos_new = le16_to_cpu(pstep->pos_new); if (pcinfo->req_fver != pstep->fver) - return; + return 0; /* store step info by using ring instead of FIFO*/ do { @@ -9877,13 +9977,15 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m) continue; if (cnt % 10 == 0) - seq_printf(m, " %-15s : ", "[steps]"); + p += scnprintf(p, end - p, + " %-15s : ", "[steps]"); - seq_printf(m, "-> %s(%02d)(%02d)", - (type == CXSTEP_SLOT ? "SLT" : - "EVT"), (u32)val, diff_t); + p += scnprintf(p, end - p, + "-> %s(%02d)(%02d)", + (type == CXSTEP_SLOT ? "SLT" : + "EVT"), (u32)val, diff_t); if (cnt % 10 == 9) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); cnt++; } @@ -9900,29 +10002,32 @@ static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m) break; } } while (!outloop); + + return p - buf; } -static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fbtc_step_v3(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; struct rtw89_btc_rpt_cmn_info *pcinfo; struct rtw89_btc_fbtc_steps_v3 *pstep; u32 i, n_begin, n_end, array_idx, cnt = 0; + char *p = buf, *end = buf + bufsz; u8 type, val; u16 diff_t; if ((pfwinfo->rpt_en_map & rtw89_btc_fw_rpt_ver(rtwdev, RPT_EN_FW_STEP_INFO)) == 0) - return; + return 0; pcinfo = &pfwinfo->rpt_fbtc_step.cinfo; if (!pcinfo->valid) - return; + return 0; pstep = &pfwinfo->rpt_fbtc_step.finfo.v3; if (pcinfo->req_fver != pstep->fver) - return; + return 0; if (le32_to_cpu(pstep->cnt) <= FCXDEF_STEP) n_begin = 1; @@ -9932,7 +10037,7 @@ static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m) n_end = le32_to_cpu(pstep->cnt); if (n_begin > n_end) - return; + return 0; /* restore step info by using ring instead of FIFO */ for (i = n_begin; i <= n_end; i++) { @@ -9945,50 +10050,55 @@ static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m) continue; if (cnt % 10 == 0) - seq_printf(m, " %-15s : ", "[steps]"); + p += scnprintf(p, end - p, " %-15s : ", "[steps]"); - seq_printf(m, "-> %s(%02d)", - (type == CXSTEP_SLOT ? - id_to_slot((u32)val) : - id_to_evt((u32)val)), diff_t); + p += scnprintf(p, end - p, "-> %s(%02d)", + (type == CXSTEP_SLOT ? + id_to_slot((u32)val) : + id_to_evt((u32)val)), diff_t); if (cnt % 10 == 9) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); cnt++; } + + return p - buf; } -static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_fw_dm_msg(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; const struct rtw89_btc_ver *ver = btc->ver; + char *p = buf, *end = buf + bufsz; if (!(btc->dm.coex_info_map & BTC_COEX_INFO_DM)) - return; + goto out; - _show_error(rtwdev, m); - _show_fbtc_tdma(rtwdev, m); - _show_fbtc_slots(rtwdev, m); + p += _show_error(rtwdev, p, end - p); + p += _show_fbtc_tdma(rtwdev, p, end - p); + p += _show_fbtc_slots(rtwdev, p, end - p); if (ver->fcxcysta == 2) - _show_fbtc_cysta_v2(rtwdev, m); + p += _show_fbtc_cysta_v2(rtwdev, p, end - p); else if (ver->fcxcysta == 3) - _show_fbtc_cysta_v3(rtwdev, m); + p += _show_fbtc_cysta_v3(rtwdev, p, end - p); else if (ver->fcxcysta == 4) - _show_fbtc_cysta_v4(rtwdev, m); + p += _show_fbtc_cysta_v4(rtwdev, p, end - p); else if (ver->fcxcysta == 5) - _show_fbtc_cysta_v5(rtwdev, m); + p += _show_fbtc_cysta_v5(rtwdev, p, end - p); else if (ver->fcxcysta == 7) - _show_fbtc_cysta_v7(rtwdev, m); + p += _show_fbtc_cysta_v7(rtwdev, p, end - p); - _show_fbtc_nullsta(rtwdev, m); + p += _show_fbtc_nullsta(rtwdev, p, end - p); if (ver->fcxstep == 2) - _show_fbtc_step_v2(rtwdev, m); + p += _show_fbtc_step_v2(rtwdev, p, end - p); else if (ver->fcxstep == 3) - _show_fbtc_step_v3(rtwdev, m); + p += _show_fbtc_step_v3(rtwdev, p, end - p); +out: + return p - buf; } static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt_cfg) @@ -10033,12 +10143,13 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt } } -static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_gpio_dbg(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo; const struct rtw89_btc_ver *ver = rtwdev->btc.ver; struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; union rtw89_btc_fbtc_gpio_dbg *gdbg = NULL; + char *p = buf, *end = buf + bufsz; u8 *gpio_map, i; u32 en_map; @@ -10048,8 +10159,8 @@ static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m) rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n", __func__); - seq_puts(m, "\n"); - return; + p += scnprintf(p, end - p, "\n"); + goto out; } if (ver->fcxgpiodbg == 7) { @@ -10061,20 +10172,24 @@ static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m) } if (!en_map) - return; + goto out; - seq_printf(m, " %-15s : enable_map:0x%08x", - "[gpio_dbg]", en_map); + p += scnprintf(p, end - p, " %-15s : enable_map:0x%08x", + "[gpio_dbg]", en_map); for (i = 0; i < BTC_DBG_MAX1; i++) { if (!(en_map & BIT(i))) continue; - seq_printf(m, ", %s->GPIO%d", id_to_gdbg(i), gpio_map[i]); + p += scnprintf(p, end - p, ", %s->GPIO%d", id_to_gdbg(i), + gpio_map[i]); } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); + +out: + return p - buf; } -static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_mreg_v1(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_btc *btc = &rtwdev->btc; @@ -10086,45 +10201,47 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_bt_info *bt = &btc->cx.bt; struct rtw89_mac_ax_coex_gnt gnt_cfg = {}; struct rtw89_mac_ax_gnt gnt; + char *p = buf, *end = buf + bufsz; u8 i = 0, type = 0, cnt = 0; u32 val, offset; if (!(btc->dm.coex_info_map & BTC_COEX_INFO_MREG)) - return; + return 0; - seq_puts(m, "========== [HW Status] ==========\n"); + p += scnprintf(p, end - p, "========== [HW Status] ==========\n"); - seq_printf(m, - " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n", - "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE], - bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD], - cx->cnt_bt[BTC_BCNT_SCBDUPDATE]); + p += scnprintf(p, end - p, + " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n", + "[scoreboard]", wl->scbd, + cx->cnt_wl[BTC_WCNT_SCBDUPDATE], + bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD], + cx->cnt_bt[BTC_BCNT_SCBDUPDATE]); btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev); _get_gnt(rtwdev, &gnt_cfg); gnt = gnt_cfg.band[0]; - seq_printf(m, - " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ", - "[gnt_status]", - chip->chip_id == RTL8852C ? "HW" : - btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT", - gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl, - gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt); + p += scnprintf(p, end - p, + " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ", + "[gnt_status]", + chip->chip_id == RTL8852C ? "HW" : + btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT", + gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl, + gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt); gnt = gnt_cfg.band[1]; - seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n", - gnt.gnt_wl_sw_en ? "SW" : "HW", - gnt.gnt_wl, - gnt.gnt_bt_sw_en ? "SW" : "HW", - gnt.gnt_bt); + p += scnprintf(p, end - p, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n", + gnt.gnt_wl_sw_en ? "SW" : "HW", + gnt.gnt_wl, + gnt.gnt_bt_sw_en ? "SW" : "HW", + gnt.gnt_bt); pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo; if (!pcinfo->valid) { rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): stop due rpt_fbtc_mregval.cinfo\n", __func__); - return; + goto out; } pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v1; @@ -10138,21 +10255,26 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m) val = le32_to_cpu(pmreg->mreg_val[i]); if (cnt % 6 == 0) - seq_printf(m, " %-15s : %d_0x%04x=0x%08x", - "[reg]", (u32)type, offset, val); + p += scnprintf(p, end - p, + " %-15s : %d_0x%04x=0x%08x", + "[reg]", (u32)type, offset, val); else - seq_printf(m, ", %d_0x%04x=0x%08x", (u32)type, - offset, val); + p += scnprintf(p, end - p, ", %d_0x%04x=0x%08x", + (u32)type, + offset, val); if (cnt % 6 == 5) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); cnt++; if (i >= pmreg->reg_num) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } + +out: + return p - buf; } -static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_mreg_v2(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_btc *btc = &rtwdev->btc; @@ -10164,46 +10286,48 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_bt_info *bt = &btc->cx.bt; struct rtw89_mac_ax_coex_gnt gnt_cfg = {}; struct rtw89_mac_ax_gnt gnt; + char *p = buf, *end = buf + bufsz; u8 i = 0, type = 0, cnt = 0; u32 val, offset; if (!(btc->dm.coex_info_map & BTC_COEX_INFO_MREG)) - return; + return 0; - seq_puts(m, "========== [HW Status] ==========\n"); + p += scnprintf(p, end - p, "========== [HW Status] ==========\n"); - seq_printf(m, - " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n", - "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE], - bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD], - cx->cnt_bt[BTC_BCNT_SCBDUPDATE]); + p += scnprintf(p, end - p, + " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n", + "[scoreboard]", wl->scbd, + cx->cnt_wl[BTC_WCNT_SCBDUPDATE], + bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD], + cx->cnt_bt[BTC_BCNT_SCBDUPDATE]); btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev); _get_gnt(rtwdev, &gnt_cfg); gnt = gnt_cfg.band[0]; - seq_printf(m, - " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], polut_type:%s", - "[gnt_status]", - chip->chip_id == RTL8852C ? "HW" : - btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT", - gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl, - gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt, - id_to_polut(wl->bt_polut_type[wl->pta_req_mac])); + p += scnprintf(p, end - p, + " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], polut_type:%s", + "[gnt_status]", + chip->chip_id == RTL8852C ? "HW" : + btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT", + gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl, + gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt, + id_to_polut(wl->bt_polut_type[wl->pta_req_mac])); gnt = gnt_cfg.band[1]; - seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n", - gnt.gnt_wl_sw_en ? "SW" : "HW", - gnt.gnt_wl, - gnt.gnt_bt_sw_en ? "SW" : "HW", - gnt.gnt_bt); + p += scnprintf(p, end - p, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n", + gnt.gnt_wl_sw_en ? "SW" : "HW", + gnt.gnt_wl, + gnt.gnt_bt_sw_en ? "SW" : "HW", + gnt.gnt_bt); pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo; if (!pcinfo->valid) { rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): stop due rpt_fbtc_mregval.cinfo\n", __func__); - return; + goto out; } pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v2; @@ -10217,21 +10341,26 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m) val = le32_to_cpu(pmreg->mreg_val[i]); if (cnt % 6 == 0) - seq_printf(m, " %-15s : %d_0x%04x=0x%08x", - "[reg]", (u32)type, offset, val); + p += scnprintf(p, end - p, + " %-15s : %d_0x%04x=0x%08x", + "[reg]", (u32)type, offset, val); else - seq_printf(m, ", %d_0x%04x=0x%08x", (u32)type, - offset, val); + p += scnprintf(p, end - p, ", %d_0x%04x=0x%08x", + (u32)type, + offset, val); if (cnt % 6 == 5) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); cnt++; if (i >= pmreg->reg_num) - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } + +out: + return p - buf; } -static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_mreg_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; @@ -10242,46 +10371,50 @@ static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_bt_info *bt = &cx->bt; struct rtw89_mac_ax_gnt *gnt = NULL; struct rtw89_btc_dm *dm = &btc->dm; + char *p = buf, *end = buf + bufsz; u8 i, type, cnt = 0; u32 val, offset; if (!(dm->coex_info_map & BTC_COEX_INFO_MREG)) - return; + return 0; - seq_puts(m, "\n\r========== [HW Status] =========="); + p += scnprintf(p, end - p, "\n\r========== [HW Status] =========="); - seq_printf(m, - "\n\r %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)", - "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE], - bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD], - cx->cnt_bt[BTC_BCNT_SCBDUPDATE]); + p += scnprintf(p, end - p, + "\n\r %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)", + "[scoreboard]", wl->scbd, + cx->cnt_wl[BTC_WCNT_SCBDUPDATE], + bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD], + cx->cnt_bt[BTC_BCNT_SCBDUPDATE]); /* To avoid I/O if WL LPS or power-off */ dm->pta_owner = rtw89_mac_get_ctrl_path(rtwdev); - seq_printf(m, - "\n\r %-15s : pta_owner:%s, pta_req_mac:MAC%d, rf_gnt_source: polut_type:%s", - "[gnt_status]", - rtwdev->chip->para_ver & BTC_FEAT_PTA_ONOFF_CTRL ? "HW" : - dm->pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT", - wl->pta_req_mac, id_to_polut(wl->bt_polut_type[wl->pta_req_mac])); + p += scnprintf(p, end - p, + "\n\r %-15s : pta_owner:%s, pta_req_mac:MAC%d, rf_gnt_source: polut_type:%s", + "[gnt_status]", + rtwdev->chip->para_ver & BTC_FEAT_PTA_ONOFF_CTRL ? "HW" : + dm->pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT", + wl->pta_req_mac, + id_to_polut(wl->bt_polut_type[wl->pta_req_mac])); gnt = &dm->gnt.band[RTW89_PHY_0]; - seq_printf(m, ", phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d]", - gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl, - gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt); + p += scnprintf(p, end - p, ", phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d]", + gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl, + gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt); if (rtwdev->dbcc_en) { gnt = &dm->gnt.band[RTW89_PHY_1]; - seq_printf(m, ", phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]", - gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl, - gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt); + p += scnprintf(p, end - p, + ", phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]", + gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl, + gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt); } pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo; if (!pcinfo->valid) - return; + goto out; pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v7; @@ -10291,17 +10424,21 @@ static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m) val = le32_to_cpu(pmreg->mreg_val[i]); if (cnt % 6 == 0) - seq_printf(m, "\n\r %-15s : %s_0x%x=0x%x", "[reg]", - id_to_regtype(type), offset, val); + p += scnprintf(p, end - p, + "\n\r %-15s : %s_0x%x=0x%x", "[reg]", + id_to_regtype(type), offset, val); else - seq_printf(m, ", %s_0x%x=0x%x", - id_to_regtype(type), offset, val); + p += scnprintf(p, end - p, ", %s_0x%x=0x%x", + id_to_regtype(type), offset, val); cnt++; } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); + +out: + return p - buf; } -static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_summary_v1(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; @@ -10312,56 +10449,59 @@ static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_wl_info *wl = &cx->wl; struct rtw89_btc_bt_info *bt = &cx->bt; u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify; + char *p = buf, *end = buf + bufsz; u8 i; if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY)) - return; + return 0; - seq_puts(m, "========== [Statistics] ==========\n"); + p += scnprintf(p, end - p, "========== [Statistics] ==========\n"); pcinfo = &pfwinfo->rpt_ctrl.cinfo; if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) { prptctrl = &pfwinfo->rpt_ctrl.finfo.v1; - seq_printf(m, - " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ", - "[summary]", pfwinfo->cnt_h2c, - pfwinfo->cnt_h2c_fail, prptctrl->h2c_cnt, - pfwinfo->cnt_c2h, prptctrl->c2h_cnt); + p += scnprintf(p, end - p, + " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, prptctrl->h2c_cnt, + pfwinfo->cnt_c2h, prptctrl->c2h_cnt); - seq_printf(m, - "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x", - pfwinfo->event[BTF_EVNT_RPT], prptctrl->rpt_cnt, - prptctrl->rpt_enable, dm->error.val); + p += scnprintf(p, end - p, + "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x", + pfwinfo->event[BTF_EVNT_RPT], + prptctrl->rpt_cnt, + prptctrl->rpt_enable, dm->error.val); if (dm->error.map.wl_fw_hang) - seq_puts(m, " (WL FW Hang!!)"); - seq_puts(m, "\n"); - seq_printf(m, - " %-15s : send_ok:%d, send_fail:%d, recv:%d", - "[mailbox]", prptctrl->mb_send_ok_cnt, - prptctrl->mb_send_fail_cnt, prptctrl->mb_recv_cnt); - - seq_printf(m, - "(A2DP_empty:%d, A2DP_flowstop:%d, A2DP_full:%d)\n", - prptctrl->mb_a2dp_empty_cnt, - prptctrl->mb_a2dp_flct_cnt, - prptctrl->mb_a2dp_full_cnt); - - seq_printf(m, - " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]", - "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ], - cx->cnt_wl[BTC_WCNT_RFK_GO], - cx->cnt_wl[BTC_WCNT_RFK_REJECT], - cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]); - - seq_printf(m, - ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n", - prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REQ], - prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_GO], - prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REJECT], - prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT], - prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_FAIL]); + p += scnprintf(p, end - p, " (WL FW Hang!!)"); + p += scnprintf(p, end - p, "\n"); + p += scnprintf(p, end - p, + " %-15s : send_ok:%d, send_fail:%d, recv:%d", + "[mailbox]", prptctrl->mb_send_ok_cnt, + prptctrl->mb_send_fail_cnt, + prptctrl->mb_recv_cnt); + + p += scnprintf(p, end - p, + "(A2DP_empty:%d, A2DP_flowstop:%d, A2DP_full:%d)\n", + prptctrl->mb_a2dp_empty_cnt, + prptctrl->mb_a2dp_flct_cnt, + prptctrl->mb_a2dp_full_cnt); + + p += scnprintf(p, end - p, + " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]", + "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ], + cx->cnt_wl[BTC_WCNT_RFK_GO], + cx->cnt_wl[BTC_WCNT_RFK_REJECT], + cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]); + + p += scnprintf(p, end - p, + ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n", + prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REQ], + prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_GO], + prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REJECT], + prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT], + prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_FAIL]); if (prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT] > 0) bt->rfk_info.map.timeout = 1; @@ -10370,42 +10510,44 @@ static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m) dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout; } else { - seq_printf(m, - " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x", - "[summary]", pfwinfo->cnt_h2c, - pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h, - pfwinfo->event[BTF_EVNT_RPT], - btc->fwinfo.rpt_en_map); - seq_puts(m, " (WL FW report invalid!!)\n"); + p += scnprintf(p, end - p, + " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h, + pfwinfo->event[BTF_EVNT_RPT], + btc->fwinfo.rpt_en_map); + p += scnprintf(p, end - p, " (WL FW report invalid!!)\n"); } for (i = 0; i < BTC_NCNT_NUM; i++) cnt_sum += dm->cnt_notify[i]; - seq_printf(m, - " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", - "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], - cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + p += scnprintf(p, end - p, + " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", + "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], + cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + + p += scnprintf(p, end - p, + "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n", + cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], + cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], + cnt[BTC_NCNT_WL_STA]); - seq_printf(m, - "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n", - cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], - cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], - cnt[BTC_NCNT_WL_STA]); + p += scnprintf(p, end - p, + " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ", + "[notify_cnt]", cnt[BTC_NCNT_SCAN_START], + cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND], + cnt[BTC_NCNT_SPECIAL_PACKET]); - seq_printf(m, - " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ", - "[notify_cnt]", cnt[BTC_NCNT_SCAN_START], - cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND], - cnt[BTC_NCNT_SPECIAL_PACKET]); + p += scnprintf(p, end - p, + "timer=%d, control=%d, customerize=%d\n", + cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL], + cnt[BTC_NCNT_CUSTOMERIZE]); - seq_printf(m, - "timer=%d, control=%d, customerize=%d\n", - cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL], - cnt[BTC_NCNT_CUSTOMERIZE]); + return p - buf; } -static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_summary_v4(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; @@ -10416,64 +10558,65 @@ static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_wl_info *wl = &cx->wl; struct rtw89_btc_bt_info *bt = &cx->bt; u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify; + char *p = buf, *end = buf + bufsz; u8 i; if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY)) - return; + return 0; - seq_puts(m, "========== [Statistics] ==========\n"); + p += scnprintf(p, end - p, "========== [Statistics] ==========\n"); pcinfo = &pfwinfo->rpt_ctrl.cinfo; if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) { prptctrl = &pfwinfo->rpt_ctrl.finfo.v4; - seq_printf(m, - " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ", - "[summary]", pfwinfo->cnt_h2c, - pfwinfo->cnt_h2c_fail, - le32_to_cpu(prptctrl->rpt_info.cnt_h2c), - pfwinfo->cnt_c2h, - le32_to_cpu(prptctrl->rpt_info.cnt_c2h)); - - seq_printf(m, - "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x", - pfwinfo->event[BTF_EVNT_RPT], - le32_to_cpu(prptctrl->rpt_info.cnt), - le32_to_cpu(prptctrl->rpt_info.en), - dm->error.val); + p += scnprintf(p, end - p, + " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, + le32_to_cpu(prptctrl->rpt_info.cnt_h2c), + pfwinfo->cnt_c2h, + le32_to_cpu(prptctrl->rpt_info.cnt_c2h)); + + p += scnprintf(p, end - p, + "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x", + pfwinfo->event[BTF_EVNT_RPT], + le32_to_cpu(prptctrl->rpt_info.cnt), + le32_to_cpu(prptctrl->rpt_info.en), + dm->error.val); if (dm->error.map.wl_fw_hang) - seq_puts(m, " (WL FW Hang!!)"); - seq_puts(m, "\n"); - seq_printf(m, - " %-15s : send_ok:%d, send_fail:%d, recv:%d, ", - "[mailbox]", - le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), - le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), - le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); - - seq_printf(m, - "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n", - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); - - seq_printf(m, - " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]", - "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ], - cx->cnt_wl[BTC_WCNT_RFK_GO], - cx->cnt_wl[BTC_WCNT_RFK_REJECT], - cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]); - - seq_printf(m, - ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n", - le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]), - le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_GO]), - le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REJECT]), - le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]), - le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_FAIL])); + p += scnprintf(p, end - p, " (WL FW Hang!!)"); + p += scnprintf(p, end - p, "\n"); + p += scnprintf(p, end - p, + " %-15s : send_ok:%d, send_fail:%d, recv:%d, ", + "[mailbox]", + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); + + p += scnprintf(p, end - p, + "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n", + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); + + p += scnprintf(p, end - p, + " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]", + "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ], + cx->cnt_wl[BTC_WCNT_RFK_GO], + cx->cnt_wl[BTC_WCNT_RFK_REJECT], + cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]); + + p += scnprintf(p, end - p, + ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n", + le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]), + le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_GO]), + le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REJECT]), + le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]), + le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_FAIL])); if (le32_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0) bt->rfk_info.map.timeout = 1; @@ -10482,42 +10625,44 @@ static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m) dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout; } else { - seq_printf(m, - " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x", - "[summary]", pfwinfo->cnt_h2c, - pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h, - pfwinfo->event[BTF_EVNT_RPT], - btc->fwinfo.rpt_en_map); - seq_puts(m, " (WL FW report invalid!!)\n"); + p += scnprintf(p, end - p, + " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h, + pfwinfo->event[BTF_EVNT_RPT], + btc->fwinfo.rpt_en_map); + p += scnprintf(p, end - p, " (WL FW report invalid!!)\n"); } for (i = 0; i < BTC_NCNT_NUM; i++) cnt_sum += dm->cnt_notify[i]; - seq_printf(m, - " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", - "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], - cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + p += scnprintf(p, end - p, + " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", + "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], + cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + + p += scnprintf(p, end - p, + "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n", + cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], + cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], + cnt[BTC_NCNT_WL_STA]); - seq_printf(m, - "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n", - cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], - cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], - cnt[BTC_NCNT_WL_STA]); + p += scnprintf(p, end - p, + " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ", + "[notify_cnt]", cnt[BTC_NCNT_SCAN_START], + cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND], + cnt[BTC_NCNT_SPECIAL_PACKET]); - seq_printf(m, - " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ", - "[notify_cnt]", cnt[BTC_NCNT_SCAN_START], - cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND], - cnt[BTC_NCNT_SPECIAL_PACKET]); + p += scnprintf(p, end - p, + "timer=%d, control=%d, customerize=%d\n", + cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL], + cnt[BTC_NCNT_CUSTOMERIZE]); - seq_printf(m, - "timer=%d, control=%d, customerize=%d\n", - cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL], - cnt[BTC_NCNT_CUSTOMERIZE]); + return p - buf; } -static void _show_summary_v5(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_summary_v5(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; @@ -10527,112 +10672,118 @@ static void _show_summary_v5(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_dm *dm = &btc->dm; struct rtw89_btc_wl_info *wl = &cx->wl; u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify; + char *p = buf, *end = buf + bufsz; u8 i; if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY)) - return; + return 0; - seq_puts(m, "========== [Statistics] ==========\n"); + p += scnprintf(p, end - p, "========== [Statistics] ==========\n"); pcinfo = &pfwinfo->rpt_ctrl.cinfo; if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) { prptctrl = &pfwinfo->rpt_ctrl.finfo.v5; - seq_printf(m, - " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ", - "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, - le16_to_cpu(prptctrl->rpt_info.cnt_h2c), - pfwinfo->cnt_c2h, - le16_to_cpu(prptctrl->rpt_info.cnt_c2h), - le16_to_cpu(prptctrl->rpt_info.len_c2h)); - - seq_printf(m, - "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x", - pfwinfo->event[BTF_EVNT_RPT], - le16_to_cpu(prptctrl->rpt_info.cnt), - le32_to_cpu(prptctrl->rpt_info.en)); + p += scnprintf(p, end - p, + " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, + le16_to_cpu(prptctrl->rpt_info.cnt_h2c), + pfwinfo->cnt_c2h, + le16_to_cpu(prptctrl->rpt_info.cnt_c2h), + le16_to_cpu(prptctrl->rpt_info.len_c2h)); + + p += scnprintf(p, end - p, + "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x", + pfwinfo->event[BTF_EVNT_RPT], + le16_to_cpu(prptctrl->rpt_info.cnt), + le32_to_cpu(prptctrl->rpt_info.en)); if (dm->error.map.wl_fw_hang) - seq_puts(m, " (WL FW Hang!!)"); - seq_puts(m, "\n"); - seq_printf(m, - " %-15s : send_ok:%d, send_fail:%d, recv:%d, ", - "[mailbox]", - le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), - le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), - le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); - - seq_printf(m, - "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n", - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); - - seq_printf(m, - " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]", - "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ], - cx->cnt_wl[BTC_WCNT_RFK_GO], - cx->cnt_wl[BTC_WCNT_RFK_REJECT], - cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]); - - seq_printf(m, - ", bt_rfk[req:%d]", - le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ])); - - seq_printf(m, - ", AOAC[RF_on:%d/RF_off:%d]", - le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on), - le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off)); + p += scnprintf(p, end - p, " (WL FW Hang!!)"); + p += scnprintf(p, end - p, "\n"); + p += scnprintf(p, end - p, + " %-15s : send_ok:%d, send_fail:%d, recv:%d, ", + "[mailbox]", + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); + + p += scnprintf(p, end - p, + "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n", + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); + + p += scnprintf(p, end - p, + " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]", + "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ], + cx->cnt_wl[BTC_WCNT_RFK_GO], + cx->cnt_wl[BTC_WCNT_RFK_REJECT], + cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]); + + p += scnprintf(p, end - p, + ", bt_rfk[req:%d]", + le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ])); + + p += scnprintf(p, end - p, + ", AOAC[RF_on:%d/RF_off:%d]", + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on), + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off)); } else { - seq_printf(m, - " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d", - "[summary]", pfwinfo->cnt_h2c, - pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h); + p += scnprintf(p, end - p, + " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h); } if (!pcinfo->valid || pfwinfo->len_mismch || pfwinfo->fver_mismch || pfwinfo->err[BTFRE_EXCEPTION]) { - seq_puts(m, "\n"); - seq_printf(m, - " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:" - "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]", - "[ERROR]", pcinfo->valid, pfwinfo->len_mismch, - pfwinfo->fver_mismch, pfwinfo->err[BTFRE_EXCEPTION], - wl->status.map.lps, wl->status.map.rf_off); + p += scnprintf(p, end - p, "\n"); + p += scnprintf(p, end - p, + " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:" + "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]", + "[ERROR]", pcinfo->valid, pfwinfo->len_mismch, + pfwinfo->fver_mismch, + pfwinfo->err[BTFRE_EXCEPTION], + wl->status.map.lps, wl->status.map.rf_off); } for (i = 0; i < BTC_NCNT_NUM; i++) cnt_sum += dm->cnt_notify[i]; - seq_puts(m, "\n"); - seq_printf(m, - " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", - "[notify_cnt]", - cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], - cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + p += scnprintf(p, end - p, "\n"); + p += scnprintf(p, end - p, + " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", + "[notify_cnt]", + cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], + cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + + p += scnprintf(p, end - p, + "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d", + cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], + cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], + cnt[BTC_NCNT_WL_STA]); - seq_printf(m, - "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d", - cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], - cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], - cnt[BTC_NCNT_WL_STA]); + p += scnprintf(p, end - p, "\n"); + p += scnprintf(p, end - p, + " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ", + "[notify_cnt]", + cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH], + cnt[BTC_NCNT_SWITCH_BAND], + cnt[BTC_NCNT_SPECIAL_PACKET]); - seq_puts(m, "\n"); - seq_printf(m, - " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ", - "[notify_cnt]", - cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH], - cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SPECIAL_PACKET]); + p += scnprintf(p, end - p, + "timer=%d, control=%d, customerize=%d", + cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL], + cnt[BTC_NCNT_CUSTOMERIZE]); - seq_printf(m, - "timer=%d, control=%d, customerize=%d", - cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL], - cnt[BTC_NCNT_CUSTOMERIZE]); + return p - buf; } -static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_summary_v105(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc *btc = &rtwdev->btc; struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo; @@ -10642,112 +10793,118 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_dm *dm = &btc->dm; struct rtw89_btc_wl_info *wl = &cx->wl; u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify; + char *p = buf, *end = buf + bufsz; u8 i; if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY)) - return; + return 0; - seq_puts(m, "========== [Statistics] ==========\n"); + p += scnprintf(p, end - p, "========== [Statistics] ==========\n"); pcinfo = &pfwinfo->rpt_ctrl.cinfo; if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) { prptctrl = &pfwinfo->rpt_ctrl.finfo.v105; - seq_printf(m, - " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ", - "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, - le16_to_cpu(prptctrl->rpt_info.cnt_h2c), - pfwinfo->cnt_c2h, - le16_to_cpu(prptctrl->rpt_info.cnt_c2h), - le16_to_cpu(prptctrl->rpt_info.len_c2h)); - - seq_printf(m, - "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x", - pfwinfo->event[BTF_EVNT_RPT], - le16_to_cpu(prptctrl->rpt_info.cnt), - le32_to_cpu(prptctrl->rpt_info.en)); + p += scnprintf(p, end - p, + " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, + le16_to_cpu(prptctrl->rpt_info.cnt_h2c), + pfwinfo->cnt_c2h, + le16_to_cpu(prptctrl->rpt_info.cnt_c2h), + le16_to_cpu(prptctrl->rpt_info.len_c2h)); + + p += scnprintf(p, end - p, + "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x", + pfwinfo->event[BTF_EVNT_RPT], + le16_to_cpu(prptctrl->rpt_info.cnt), + le32_to_cpu(prptctrl->rpt_info.en)); if (dm->error.map.wl_fw_hang) - seq_puts(m, " (WL FW Hang!!)"); - seq_puts(m, "\n"); - seq_printf(m, - " %-15s : send_ok:%d, send_fail:%d, recv:%d, ", - "[mailbox]", - le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), - le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), - le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); - - seq_printf(m, - "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n", - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); - - seq_printf(m, - " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]", - "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ], - cx->cnt_wl[BTC_WCNT_RFK_GO], - cx->cnt_wl[BTC_WCNT_RFK_REJECT], - cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]); - - seq_printf(m, - ", bt_rfk[req:%d]", - le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ])); - - seq_printf(m, - ", AOAC[RF_on:%d/RF_off:%d]", - le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on), - le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off)); + p += scnprintf(p, end - p, " (WL FW Hang!!)"); + p += scnprintf(p, end - p, "\n"); + p += scnprintf(p, end - p, + " %-15s : send_ok:%d, send_fail:%d, recv:%d, ", + "[mailbox]", + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); + + p += scnprintf(p, end - p, + "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n", + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); + + p += scnprintf(p, end - p, + " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]", + "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ], + cx->cnt_wl[BTC_WCNT_RFK_GO], + cx->cnt_wl[BTC_WCNT_RFK_REJECT], + cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]); + + p += scnprintf(p, end - p, + ", bt_rfk[req:%d]", + le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ])); + + p += scnprintf(p, end - p, + ", AOAC[RF_on:%d/RF_off:%d]", + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on), + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off)); } else { - seq_printf(m, - " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d", - "[summary]", pfwinfo->cnt_h2c, - pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h); + p += scnprintf(p, end - p, + " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h); } if (!pcinfo->valid || pfwinfo->len_mismch || pfwinfo->fver_mismch || pfwinfo->err[BTFRE_EXCEPTION]) { - seq_puts(m, "\n"); - seq_printf(m, - " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:" - "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]", - "[ERROR]", pcinfo->valid, pfwinfo->len_mismch, - pfwinfo->fver_mismch, pfwinfo->err[BTFRE_EXCEPTION], - wl->status.map.lps, wl->status.map.rf_off); + p += scnprintf(p, end - p, "\n"); + p += scnprintf(p, end - p, + " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:" + "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]", + "[ERROR]", pcinfo->valid, pfwinfo->len_mismch, + pfwinfo->fver_mismch, + pfwinfo->err[BTFRE_EXCEPTION], + wl->status.map.lps, wl->status.map.rf_off); } for (i = 0; i < BTC_NCNT_NUM; i++) cnt_sum += dm->cnt_notify[i]; - seq_puts(m, "\n"); - seq_printf(m, - " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", - "[notify_cnt]", - cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], - cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + p += scnprintf(p, end - p, "\n"); + p += scnprintf(p, end - p, + " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", + "[notify_cnt]", + cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], + cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); - seq_printf(m, - "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d", - cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], - cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], - cnt[BTC_NCNT_WL_STA]); + p += scnprintf(p, end - p, + "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d", + cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], + cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], + cnt[BTC_NCNT_WL_STA]); - seq_puts(m, "\n"); - seq_printf(m, - " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ", - "[notify_cnt]", - cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH], - cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SPECIAL_PACKET]); + p += scnprintf(p, end - p, "\n"); + p += scnprintf(p, end - p, + " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ", + "[notify_cnt]", + cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH], + cnt[BTC_NCNT_SWITCH_BAND], + cnt[BTC_NCNT_SPECIAL_PACKET]); - seq_printf(m, - "timer=%d, control=%d, customerize=%d", - cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL], - cnt[BTC_NCNT_CUSTOMERIZE]); + p += scnprintf(p, end - p, + "timer=%d, control=%d, customerize=%d", + cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL], + cnt[BTC_NCNT_CUSTOMERIZE]); + + return p - buf; } -static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_summary_v7(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo; struct rtw89_btc_fbtc_rpt_ctrl_v7 *prptctrl = NULL; @@ -10756,100 +10913,111 @@ static void _show_summary_v7(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_dm *dm = &rtwdev->btc.dm; struct rtw89_btc_wl_info *wl = &cx->wl; u32 *cnt = rtwdev->btc.dm.cnt_notify; + char *p = buf, *end = buf + bufsz; u32 cnt_sum = 0; u8 i; if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY)) - return; + return 0; - seq_printf(m, "%s", "\n\r========== [Statistics] =========="); + p += scnprintf(p, end - p, "%s", + "\n\r========== [Statistics] =========="); pcinfo = &pfwinfo->rpt_ctrl.cinfo; if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF && !wl->status.map.rf_off) { prptctrl = &pfwinfo->rpt_ctrl.finfo.v7; - seq_printf(m, - "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d)," - "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ", - "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, - le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h, - le16_to_cpu(prptctrl->rpt_info.cnt_c2h), - le16_to_cpu(prptctrl->rpt_info.len_c2h), - rtwdev->btc.ver->info_buf); - - seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x", - pfwinfo->event[BTF_EVNT_RPT], - le16_to_cpu(prptctrl->rpt_info.cnt), - le32_to_cpu(prptctrl->rpt_info.en)); + p += scnprintf(p, end - p, + "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d)," + "c2h_cnt=%d(fw_send:%d, len:%d, max:%d), ", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, + le16_to_cpu(prptctrl->rpt_info.cnt_h2c), + pfwinfo->cnt_c2h, + le16_to_cpu(prptctrl->rpt_info.cnt_c2h), + le16_to_cpu(prptctrl->rpt_info.len_c2h), + rtwdev->btc.ver->info_buf); + + p += scnprintf(p, end - p, + "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x", + pfwinfo->event[BTF_EVNT_RPT], + le16_to_cpu(prptctrl->rpt_info.cnt), + le32_to_cpu(prptctrl->rpt_info.en)); if (dm->error.map.wl_fw_hang) - seq_puts(m, " (WL FW Hang!!)"); - - seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ", - "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), - le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), - le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); - - seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)", - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); - - seq_printf(m, - "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]", - "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ], - cx->cnt_wl[BTC_WCNT_RFK_GO], - cx->cnt_wl[BTC_WCNT_RFK_REJECT], - cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT], - wl->rfk_info.proc_time); - - seq_printf(m, ", bt_rfk[req:%d]", - le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ])); - - seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]", - le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on), - le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off)); + p += scnprintf(p, end - p, " (WL FW Hang!!)"); + + p += scnprintf(p, end - p, + "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ", + "[mailbox]", + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); + + p += scnprintf(p, end - p, + "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)", + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); + + p += scnprintf(p, end - p, + "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]", + "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ], + cx->cnt_wl[BTC_WCNT_RFK_GO], + cx->cnt_wl[BTC_WCNT_RFK_REJECT], + cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT], + wl->rfk_info.proc_time); + + p += scnprintf(p, end - p, ", bt_rfk[req:%d]", + le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ])); + + p += scnprintf(p, end - p, ", AOAC[RF_on:%d/RF_off:%d]", + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on), + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off)); } else { - seq_printf(m, - "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)", - "[summary]", - pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, - pfwinfo->cnt_c2h, - wl->status.map.lps, wl->status.map.rf_off); + p += scnprintf(p, end - p, + "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)", + "[summary]", + pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, + pfwinfo->cnt_c2h, + wl->status.map.lps, wl->status.map.rf_off); } for (i = 0; i < BTC_NCNT_NUM; i++) cnt_sum += dm->cnt_notify[i]; - seq_printf(m, - "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", - "[notify_cnt]", - cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], - cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + p += scnprintf(p, end - p, + "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", + "[notify_cnt]", + cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], + cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + + p += scnprintf(p, end - p, + "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d", + cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], + cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], + cnt[BTC_NCNT_WL_STA]); - seq_printf(m, - "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d", - cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], - cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], - cnt[BTC_NCNT_WL_STA]); + p += scnprintf(p, end - p, + "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ", + "[notify_cnt]", + cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH], + cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW], + cnt[BTC_NCNT_SPECIAL_PACKET]); - seq_printf(m, - "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ", - "[notify_cnt]", - cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH], - cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW], - cnt[BTC_NCNT_SPECIAL_PACKET]); + p += scnprintf(p, end - p, + "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d", + cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE], + rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW], + cnt[BTC_NCNT_COUNTRYCODE]); - seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d", - cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE], - rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW], - cnt[BTC_NCNT_COUNTRYCODE]); + return p - buf; } -static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m) +static int _show_summary_v8(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo; struct rtw89_btc_rpt_cmn_info *pcinfo = NULL; @@ -10858,153 +11026,173 @@ static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m) struct rtw89_btc_dm *dm = &rtwdev->btc.dm; struct rtw89_btc_wl_info *wl = &cx->wl; u32 *cnt = rtwdev->btc.dm.cnt_notify; + char *p = buf, *end = buf + bufsz; u32 cnt_sum = 0; u8 i; if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY)) - return; + return 0; - seq_printf(m, "%s", "\n\r========== [Statistics] =========="); + p += scnprintf(p, end - p, "%s", + "\n\r========== [Statistics] =========="); pcinfo = &pfwinfo->rpt_ctrl.cinfo; if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF && !wl->status.map.rf_off) { prptctrl = &pfwinfo->rpt_ctrl.finfo.v8; - seq_printf(m, - "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d, max:fw-%d/drv-%d), ", - "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, - le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h, - le16_to_cpu(prptctrl->rpt_info.cnt_c2h), - le16_to_cpu(prptctrl->rpt_info.len_c2h), - (prptctrl->rpt_len_max_h << 8) + prptctrl->rpt_len_max_l, - rtwdev->btc.ver->info_buf); - - seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x", - pfwinfo->event[BTF_EVNT_RPT], - le16_to_cpu(prptctrl->rpt_info.cnt), - le32_to_cpu(prptctrl->rpt_info.en)); + p += scnprintf(p, end - p, + "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d, max:fw-%d/drv-%d), ", + "[summary]", pfwinfo->cnt_h2c, + pfwinfo->cnt_h2c_fail, + le16_to_cpu(prptctrl->rpt_info.cnt_h2c), + pfwinfo->cnt_c2h, + le16_to_cpu(prptctrl->rpt_info.cnt_c2h), + le16_to_cpu(prptctrl->rpt_info.len_c2h), + (prptctrl->rpt_len_max_h << 8) + prptctrl->rpt_len_max_l, + rtwdev->btc.ver->info_buf); + + p += scnprintf(p, end - p, + "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x", + pfwinfo->event[BTF_EVNT_RPT], + le16_to_cpu(prptctrl->rpt_info.cnt), + le32_to_cpu(prptctrl->rpt_info.en)); if (dm->error.map.wl_fw_hang) - seq_puts(m, " (WL FW Hang!!)"); - - seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ", - "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), - le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), - le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); - - seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)", - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), - le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); - - seq_printf(m, - "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]", - "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ], - cx->cnt_wl[BTC_WCNT_RFK_GO], - cx->cnt_wl[BTC_WCNT_RFK_REJECT], - cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT], - wl->rfk_info.proc_time); - - seq_printf(m, ", bt_rfk[req:%d]", - le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ])); - - seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]", - le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on), - le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off)); + p += scnprintf(p, end - p, " (WL FW Hang!!)"); + + p += scnprintf(p, end - p, + "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ", + "[mailbox]", + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail), + le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv)); + + p += scnprintf(p, end - p, + "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)", + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack), + le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack)); + + p += scnprintf(p, end - p, + "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]", + "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ], + cx->cnt_wl[BTC_WCNT_RFK_GO], + cx->cnt_wl[BTC_WCNT_RFK_REJECT], + cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT], + wl->rfk_info.proc_time); + + p += scnprintf(p, end - p, ", bt_rfk[req:%d]", + le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ])); + + p += scnprintf(p, end - p, ", AOAC[RF_on:%d/RF_off:%d]", + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on), + le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off)); } else { - seq_printf(m, - "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)", - "[summary]", - pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, - pfwinfo->cnt_c2h, - wl->status.map.lps, wl->status.map.rf_off); + p += scnprintf(p, end - p, + "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)", + "[summary]", + pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail, + pfwinfo->cnt_c2h, + wl->status.map.lps, wl->status.map.rf_off); } for (i = 0; i < BTC_NCNT_NUM; i++) cnt_sum += dm->cnt_notify[i]; - seq_printf(m, - "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", - "[notify_cnt]", - cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], - cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); + p += scnprintf(p, end - p, + "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ", + "[notify_cnt]", + cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO], + cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]); - seq_printf(m, - "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d", - cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], - cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], - cnt[BTC_NCNT_WL_STA]); + p += scnprintf(p, end - p, + "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d", + cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE], + cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK], + cnt[BTC_NCNT_WL_STA]); - seq_printf(m, - "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ", - "[notify_cnt]", - cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH], - cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW], - cnt[BTC_NCNT_SPECIAL_PACKET]); + p += scnprintf(p, end - p, + "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ", + "[notify_cnt]", + cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH], + cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW], + cnt[BTC_NCNT_SPECIAL_PACKET]); - seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d", - cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE], - rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW], - cnt[BTC_NCNT_COUNTRYCODE]); + p += scnprintf(p, end - p, + "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d", + cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE], + rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW], + cnt[BTC_NCNT_COUNTRYCODE]); + + return p - buf; } -void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m) +ssize_t rtw89_btc_dump_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal; struct rtw89_btc *btc = &rtwdev->btc; const struct rtw89_btc_ver *ver = btc->ver; struct rtw89_btc_cx *cx = &btc->cx; struct rtw89_btc_bt_info *bt = &cx->bt; - - seq_puts(m, "=========================================\n"); - seq_printf(m, "WL FW / BT FW %d.%d.%d.%d / NA\n", - fw_suit->major_ver, fw_suit->minor_ver, - fw_suit->sub_ver, fw_suit->sub_idex); - seq_printf(m, "manual %d\n", btc->manual_ctrl); - - seq_puts(m, "=========================================\n"); - - seq_printf(m, "\n\r %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)", - "[bt_info]", - bt->raw_info[2], bt->raw_info[3], - bt->raw_info[4], bt->raw_info[5], - bt->raw_info[6], bt->raw_info[7], - bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply", - cx->cnt_bt[BTC_BCNT_INFOUPDATE], - cx->cnt_bt[BTC_BCNT_INFOSAME]); - - seq_puts(m, "\n=========================================\n"); - - _show_cx_info(rtwdev, m); - _show_wl_info(rtwdev, m); - _show_bt_info(rtwdev, m); - _show_dm_info(rtwdev, m); - _show_fw_dm_msg(rtwdev, m); + char *p = buf, *end = buf + bufsz; + + p += scnprintf(p, end - p, + "=========================================\n"); + p += scnprintf(p, end - p, + "WL FW / BT FW %d.%d.%d.%d / NA\n", + fw_suit->major_ver, fw_suit->minor_ver, + fw_suit->sub_ver, fw_suit->sub_idex); + p += scnprintf(p, end - p, "manual %d\n", + btc->manual_ctrl); + + p += scnprintf(p, end - p, + "=========================================\n"); + + p += scnprintf(p, end - p, + "\n\r %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)", + "[bt_info]", + bt->raw_info[2], bt->raw_info[3], + bt->raw_info[4], bt->raw_info[5], + bt->raw_info[6], bt->raw_info[7], + bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply", + cx->cnt_bt[BTC_BCNT_INFOUPDATE], + cx->cnt_bt[BTC_BCNT_INFOSAME]); + + p += scnprintf(p, end - p, + "\n=========================================\n"); + + p += _show_cx_info(rtwdev, p, end - p); + p += _show_wl_info(rtwdev, p, end - p); + p += _show_bt_info(rtwdev, p, end - p); + p += _show_dm_info(rtwdev, p, end - p); + p += _show_fw_dm_msg(rtwdev, p, end - p); if (ver->fcxmreg == 1) - _show_mreg_v1(rtwdev, m); + p += _show_mreg_v1(rtwdev, p, end - p); else if (ver->fcxmreg == 2) - _show_mreg_v2(rtwdev, m); + p += _show_mreg_v2(rtwdev, p, end - p); else if (ver->fcxmreg == 7) - _show_mreg_v7(rtwdev, m); + p += _show_mreg_v7(rtwdev, p, end - p); - _show_gpio_dbg(rtwdev, m); + p += _show_gpio_dbg(rtwdev, p, end - p); if (ver->fcxbtcrpt == 1) - _show_summary_v1(rtwdev, m); + p += _show_summary_v1(rtwdev, p, end - p); else if (ver->fcxbtcrpt == 4) - _show_summary_v4(rtwdev, m); + p += _show_summary_v4(rtwdev, p, end - p); else if (ver->fcxbtcrpt == 5) - _show_summary_v5(rtwdev, m); + p += _show_summary_v5(rtwdev, p, end - p); else if (ver->fcxbtcrpt == 105) - _show_summary_v105(rtwdev, m); + p += _show_summary_v105(rtwdev, p, end - p); else if (ver->fcxbtcrpt == 7) - _show_summary_v7(rtwdev, m); + p += _show_summary_v7(rtwdev, p, end - p); else if (ver->fcxbtcrpt == 8) - _show_summary_v8(rtwdev, m); + p += _show_summary_v8(rtwdev, p, end - p); + + return p - buf; } void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev) @@ -11037,3 +11225,24 @@ void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev) rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] use version def[%d] = 0x%08x\n", (int)(btc->ver - rtw89_btc_ver_defs), btc->ver->fw_ver_code); } + +void rtw89_btc_ntfy_preserve_bt_time(struct rtw89_dev *rtwdev, u32 ms) +{ + struct rtw89_btc_bt_link_info *bt_linfo = &rtwdev->btc.cx.bt.link_info; + struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc; + + if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) + return; + + if (!a2dp.exist) + return; + + fsleep(ms * 1000); +} +EXPORT_SYMBOL(rtw89_btc_ntfy_preserve_bt_time); + +void rtw89_btc_ntfy_conn_rfk(struct rtw89_dev *rtwdev, bool state) +{ + rtwdev->btc.cx.wl.rfk_info.con_rfk = state; +} +EXPORT_SYMBOL(rtw89_btc_ntfy_conn_rfk); diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h index dbdb56e063ef0..e3a1fcd796201 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.h +++ b/drivers/net/wireless/realtek/rtw89/coex.h @@ -267,10 +267,10 @@ void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx); void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band); void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev, enum btc_pkt_type pkt_type); -void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work); -void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work); -void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work); -void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work); +void rtw89_btc_ntfy_eapol_packet_work(struct wiphy *wiphy, struct wiphy_work *work); +void rtw89_btc_ntfy_arp_packet_work(struct wiphy *wiphy, struct wiphy_work *work); +void rtw89_btc_ntfy_dhcp_packet_work(struct wiphy *wiphy, struct wiphy_work *work); +void rtw89_btc_ntfy_icmp_packet_work(struct wiphy *wiphy, struct wiphy_work *work); void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, struct rtw89_sta_link *rtwsta_link, @@ -282,14 +282,16 @@ void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map, void rtw89_btc_ntfy_wl_sta(struct rtw89_dev *rtwdev); void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len, u8 class, u8 func); -void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m); -void rtw89_coex_act1_work(struct work_struct *work); -void rtw89_coex_bt_devinfo_work(struct work_struct *work); -void rtw89_coex_rfk_chk_work(struct work_struct *work); +ssize_t rtw89_btc_dump_info(struct rtw89_dev *rtwdev, char *buf, size_t bufsz); +void rtw89_coex_act1_work(struct wiphy *wiphy, struct wiphy_work *work); +void rtw89_coex_bt_devinfo_work(struct wiphy *wiphy, struct wiphy_work *work); +void rtw89_coex_rfk_chk_work(struct wiphy *wiphy, struct wiphy_work *work); void rtw89_coex_power_on(struct rtw89_dev *rtwdev); void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type); void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type); void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev); +void rtw89_btc_ntfy_preserve_bt_time(struct rtw89_dev *rtwdev, u32 ms); +void rtw89_btc_ntfy_conn_rfk(struct rtw89_dev *rtwdev, bool state); static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index 85f739f1173d8..cc9b014457ace 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -913,16 +913,17 @@ static enum btc_pkt_type rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { + struct wiphy *wiphy = rtwdev->hw->wiphy; struct sk_buff *skb = tx_req->skb; struct udphdr *udphdr; if (IEEE80211_SKB_CB(skb)->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { - ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.eapol_notify_work); + wiphy_work_queue(wiphy, &rtwdev->btc.eapol_notify_work); return PACKET_EAPOL; } if (skb->protocol == htons(ETH_P_ARP)) { - ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.arp_notify_work); + wiphy_work_queue(wiphy, &rtwdev->btc.arp_notify_work); return PACKET_ARP; } @@ -932,14 +933,14 @@ rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev, if (((udphdr->source == htons(67) && udphdr->dest == htons(68)) || (udphdr->source == htons(68) && udphdr->dest == htons(67))) && skb->len > 282) { - ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.dhcp_notify_work); + wiphy_work_queue(wiphy, &rtwdev->btc.dhcp_notify_work); return PACKET_DHCP; } } if (skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_ICMP) { - ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.icmp_notify_work); + wiphy_work_queue(wiphy, &rtwdev->btc.icmp_notify_work); return PACKET_ICMP; } @@ -2071,17 +2072,17 @@ static void rtw89_stats_trigger_frame(struct rtw89_dev *rtwdev, } } -static void rtw89_cancel_6ghz_probe_work(struct work_struct *work) +static void rtw89_cancel_6ghz_probe_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, cancel_6ghz_probe_work); struct list_head *pkt_list = rtwdev->scan_info.pkt_list; struct rtw89_pktofld_info *info; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); if (!rtwdev->scanning) - goto out; + return; list_for_each_entry(info, &pkt_list[NL80211_BAND_6GHZ], list) { if (!info->cancel || !test_bit(info->id, rtwdev->pkt_offload)) @@ -2094,9 +2095,6 @@ static void rtw89_cancel_6ghz_probe_work(struct work_struct *work) * since if during scanning, pkt_list is accessed in bottom half. */ } - -out: - mutex_unlock(&rtwdev->mutex); } static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev, @@ -2131,7 +2129,7 @@ static void rtw89_core_cancel_6ghz_probe_tx(struct rtw89_dev *rtwdev, } if (queue_work) - ieee80211_queue_work(rtwdev->hw, &rtwdev->cancel_6ghz_probe_work); + wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->cancel_6ghz_probe_work); } static void rtw89_vif_sync_bcn_tsf(struct rtw89_vif_link *rtwvif_link, @@ -2194,8 +2192,11 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, } pkt_stat->beacon_nr++; - if (phy_ppdu) + if (phy_ppdu) { ewma_rssi_add(&rtwdev->phystat.bcn_rssi, phy_ppdu->rssi_avg); + if (!test_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags)) + rtwvif_link->bcn_bw_idx = phy_ppdu->bw_idx; + } pkt_stat->beacon_rate = desc_info->data_rate; } @@ -2381,6 +2382,49 @@ static void rtw89_core_validate_rx_signal(struct ieee80211_rx_status *rx_status) rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; } +static void rtw89_core_update_rx_freq_from_ie(struct rtw89_dev *rtwdev, + struct sk_buff *skb, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; + size_t hdr_len, ielen; + u8 *variable; + int chan; + + if (!rtwdev->chip->rx_freq_frome_ie) + return; + + if (!rtwdev->scanning) + return; + + if (ieee80211_is_beacon(mgmt->frame_control)) { + variable = mgmt->u.beacon.variable; + hdr_len = offsetof(struct ieee80211_mgmt, + u.beacon.variable); + } else if (ieee80211_is_probe_resp(mgmt->frame_control)) { + variable = mgmt->u.probe_resp.variable; + hdr_len = offsetof(struct ieee80211_mgmt, + u.probe_resp.variable); + } else { + return; + } + + if (skb->len > hdr_len) + ielen = skb->len - hdr_len; + else + return; + + /* The parsing code for both 2GHz and 5GHz bands is the same in this + * function. + */ + chan = cfg80211_get_ies_channel_number(variable, ielen, NL80211_BAND_2GHZ); + if (chan == -1) + return; + + rx_status->band = chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G; + rx_status->freq = ieee80211_channel_to_frequency(chan, rx_status->band); +} + static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu, struct rtw89_rx_desc_info *desc_info, @@ -2398,6 +2442,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev, rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu); rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status); rtw89_core_validate_rx_signal(rx_status); + rtw89_core_update_rx_freq_from_ie(rtwdev, skb_ppdu, rx_status); /* In low power mode, it does RX in thread context. */ local_bh_disable(); @@ -3143,13 +3188,14 @@ static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinv ieee80211_txq_schedule_end(hw, ac); } -static void rtw89_ips_work(struct work_struct *work) +static void rtw89_ips_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, ips_work); - mutex_lock(&rtwdev->mutex); + + lockdep_assert_wiphy(wiphy); + rtw89_enter_ips_by_hwflags(rtwdev); - mutex_unlock(&rtwdev->mutex); } static void rtw89_core_txq_work(struct work_struct *w) @@ -3291,7 +3337,7 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) u32 reg; int ret; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); rtw89_leave_ips_by_hwflags(rtwdev); rtw89_leave_lps(rtwdev); @@ -3330,9 +3376,9 @@ void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) rtw89_write32_clr(rtwdev, reg, B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH); ieee80211_ready_on_channel(hw); - cancel_delayed_work(&rtwvif->roc.roc_work); - ieee80211_queue_delayed_work(hw, &rtwvif->roc.roc_work, - msecs_to_jiffies(rtwvif->roc.duration)); + wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work); + wiphy_delayed_work_queue(hw->wiphy, &rtwvif->roc.roc_work, + msecs_to_jiffies(rtwvif->roc.duration)); } void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) @@ -3345,7 +3391,7 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) u32 reg; int ret; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); ieee80211_remain_on_channel_expired(hw); @@ -3377,18 +3423,18 @@ void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) queue_work(rtwdev->txq_wq, &rtwdev->txq_work); if (hw->conf.flags & IEEE80211_CONF_IDLE) - ieee80211_queue_delayed_work(hw, &roc->roc_work, - msecs_to_jiffies(RTW89_ROC_IDLE_TIMEOUT)); + wiphy_delayed_work_queue(hw->wiphy, &roc->roc_work, + msecs_to_jiffies(RTW89_ROC_IDLE_TIMEOUT)); } -void rtw89_roc_work(struct work_struct *work) +void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif, roc.roc_work.work); struct rtw89_dev *rtwdev = rtwvif->rtwdev; struct rtw89_roc *roc = &rtwvif->roc; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); switch (roc->state) { case RTW89_ROC_IDLE: @@ -3401,8 +3447,6 @@ void rtw89_roc_work(struct work_struct *work) default: break; } - - mutex_unlock(&rtwdev->mutex); } static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev, @@ -3533,26 +3577,26 @@ void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, ewma_tp_init(&stats->rx_ewma_tp); } -static void rtw89_track_work(struct work_struct *work) +static void rtw89_track_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, track_work.work); bool tfc_changed; + lockdep_assert_wiphy(wiphy); + if (test_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags)) return; - mutex_lock(&rtwdev->mutex); - if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) - goto out; + return; - ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work, - RTW89_TRACK_WORK_PERIOD); + wiphy_delayed_work_queue(wiphy, &rtwdev->track_work, + RTW89_TRACK_WORK_PERIOD); tfc_changed = rtw89_traffic_stats_track(rtwdev); if (rtwdev->scanning) - goto out; + return; rtw89_leave_lps(rtwdev); @@ -3577,9 +3621,6 @@ static void rtw89_track_work(struct work_struct *work) if (rtwdev->lps_enabled && !rtwdev->btc.lps) rtw89_enter_lps_track(rtwdev); - -out: - mutex_unlock(&rtwdev->mutex); } u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size) @@ -3613,7 +3654,7 @@ int rtw89_core_acquire_sta_ba_entry(struct rtw89_dev *rtwdev, u8 idx; int i; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); idx = rtw89_core_acquire_bit_map(cam_info->ba_cam_map, chip->bacam_num); if (idx == chip->bacam_num) { @@ -3657,7 +3698,7 @@ int rtw89_core_release_sta_ba_entry(struct rtw89_dev *rtwdev, struct rtw89_ba_cam_entry *entry = NULL, *tmp; u8 idx; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); list_for_each_entry_safe(entry, tmp, &rtwsta_link->ba_cam_list, list) { if (entry->tid != tid) @@ -4432,26 +4473,26 @@ static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev) { int i; - for (i = 0; i < RTW89_PHY_MAX; i++) + for (i = 0; i < RTW89_PHY_NUM; i++) skb_queue_head_init(&rtwdev->ppdu_sts.rx_queue[i]); - for (i = 0; i < RTW89_PHY_MAX; i++) + for (i = 0; i < RTW89_PHY_NUM; i++) rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX; } -void rtw89_core_update_beacon_work(struct work_struct *work) +void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev; struct rtw89_vif_link *rtwvif_link = container_of(work, struct rtw89_vif_link, update_beacon_work); + lockdep_assert_wiphy(wiphy); + if (rtwvif_link->net_type != RTW89_NET_TYPE_AP_MODE) return; rtwdev = rtwvif_link->rtwvif->rtwdev; - mutex_lock(&rtwdev->mutex); rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link); - mutex_unlock(&rtwdev->mutex); } int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond) @@ -4560,16 +4601,14 @@ int rtw89_core_start(struct rtw89_dev *rtwdev) rtw89_mac_cfg_phy_rpt_bands(rtwdev, true); rtw89_mac_update_rts_threshold(rtwdev); - rtw89_tas_reset(rtwdev); - ret = rtw89_hci_start(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to start hci\n"); return ret; } - ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work, - RTW89_TRACK_WORK_PERIOD); + wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->track_work, + RTW89_TRACK_WORK_PERIOD); set_bit(RTW89_FLAG_RUNNING, rtwdev->flags); @@ -4583,8 +4622,11 @@ int rtw89_core_start(struct rtw89_dev *rtwdev) void rtw89_core_stop(struct rtw89_dev *rtwdev) { + struct wiphy *wiphy = rtwdev->hw->wiphy; struct rtw89_btc *btc = &rtwdev->btc; + lockdep_assert_wiphy(wiphy); + /* Prvent to stop twice; enter_ips and ops_stop */ if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) return; @@ -4593,25 +4635,21 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev) clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags); - mutex_unlock(&rtwdev->mutex); - - cancel_work_sync(&rtwdev->c2h_work); - cancel_work_sync(&rtwdev->cancel_6ghz_probe_work); - cancel_work_sync(&btc->eapol_notify_work); - cancel_work_sync(&btc->arp_notify_work); - cancel_work_sync(&btc->dhcp_notify_work); - cancel_work_sync(&btc->icmp_notify_work); + wiphy_work_cancel(wiphy, &rtwdev->c2h_work); + wiphy_work_cancel(wiphy, &rtwdev->cancel_6ghz_probe_work); + wiphy_work_cancel(wiphy, &btc->eapol_notify_work); + wiphy_work_cancel(wiphy, &btc->arp_notify_work); + wiphy_work_cancel(wiphy, &btc->dhcp_notify_work); + wiphy_work_cancel(wiphy, &btc->icmp_notify_work); cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work); - cancel_delayed_work_sync(&rtwdev->track_work); - cancel_delayed_work_sync(&rtwdev->chanctx_work); - cancel_delayed_work_sync(&rtwdev->coex_act1_work); - cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work); - cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work); - cancel_delayed_work_sync(&rtwdev->cfo_track_work); + wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work); + wiphy_delayed_work_cancel(wiphy, &rtwdev->chanctx_work); + wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_act1_work); + wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_bt_devinfo_work); + wiphy_delayed_work_cancel(wiphy, &rtwdev->coex_rfk_chk_work); + wiphy_delayed_work_cancel(wiphy, &rtwdev->cfo_track_work); cancel_delayed_work_sync(&rtwdev->forbid_ba_work); - cancel_delayed_work_sync(&rtwdev->antdiv_work); - - mutex_lock(&rtwdev->mutex); + wiphy_delayed_work_cancel(wiphy, &rtwdev->antdiv_work); rtw89_btc_ntfy_poweroff(rtwdev); rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, true); @@ -4825,20 +4863,19 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work); INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work); INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work); - INIT_DELAYED_WORK(&rtwdev->track_work, rtw89_track_work); - INIT_DELAYED_WORK(&rtwdev->chanctx_work, rtw89_chanctx_work); - INIT_DELAYED_WORK(&rtwdev->coex_act1_work, rtw89_coex_act1_work); - INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work); - INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); - INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work); + wiphy_delayed_work_init(&rtwdev->track_work, rtw89_track_work); + wiphy_delayed_work_init(&rtwdev->chanctx_work, rtw89_chanctx_work); + wiphy_delayed_work_init(&rtwdev->coex_act1_work, rtw89_coex_act1_work); + wiphy_delayed_work_init(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work); + wiphy_delayed_work_init(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work); + wiphy_delayed_work_init(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work); INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work); - INIT_DELAYED_WORK(&rtwdev->antdiv_work, rtw89_phy_antdiv_work); + wiphy_delayed_work_init(&rtwdev->antdiv_work, rtw89_phy_antdiv_work); rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); if (!rtwdev->txq_wq) return -ENOMEM; spin_lock_init(&rtwdev->ba_lock); spin_lock_init(&rtwdev->rpwm_lock); - mutex_init(&rtwdev->mutex); mutex_init(&rtwdev->rf_mutex); rtwdev->total_sta_assoc = 0; @@ -4847,10 +4884,10 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) rtw89_init_wait(&rtwdev->wow.wait); rtw89_init_wait(&rtwdev->mac.ps_wait); - INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work); - INIT_WORK(&rtwdev->ips_work, rtw89_ips_work); + wiphy_work_init(&rtwdev->c2h_work, rtw89_fw_c2h_work); + wiphy_work_init(&rtwdev->ips_work, rtw89_ips_work); + wiphy_work_init(&rtwdev->cancel_6ghz_probe_work, rtw89_cancel_6ghz_probe_work); INIT_WORK(&rtwdev->load_firmware_work, rtw89_load_firmware_work); - INIT_WORK(&rtwdev->cancel_6ghz_probe_work, rtw89_cancel_6ghz_probe_work); skb_queue_head_init(&rtwdev->c2h_queue); rtw89_core_ppdu_sts_init(rtwdev); @@ -4867,10 +4904,13 @@ int rtw89_core_init(struct rtw89_dev *rtwdev) rtwdev->mlo_dbcc_mode = MLO_2_PLUS_0_1RF; } - INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work); - INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work); - INIT_WORK(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work); - INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work); + rtwdev->bbs[RTW89_PHY_0].phy_idx = RTW89_PHY_0; + rtwdev->bbs[RTW89_PHY_1].phy_idx = RTW89_PHY_1; + + wiphy_work_init(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work); + wiphy_work_init(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work); + wiphy_work_init(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work); + wiphy_work_init(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work); init_completion(&rtwdev->fw.req.completion); init_completion(&rtwdev->rfk_wait.completion); @@ -4890,11 +4930,10 @@ void rtw89_core_deinit(struct rtw89_dev *rtwdev) { rtw89_ser_deinit(rtwdev); rtw89_unload_firmware(rtwdev); - rtw89_fw_free_all_early_h2c(rtwdev); + __rtw89_fw_free_all_early_h2c(rtwdev); destroy_workqueue(rtwdev->txq_wq); mutex_destroy(&rtwdev->rf_mutex); - mutex_destroy(&rtwdev->mutex); } EXPORT_SYMBOL(rtw89_core_deinit); @@ -4903,6 +4942,7 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv { const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); + struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx); rtwdev->scanning = true; rtw89_leave_lps(rtwdev); @@ -4913,7 +4953,8 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwv rtw89_btc_ntfy_scan_start(rtwdev, rtwvif_link->phy_idx, chan->band_type); rtw89_chip_rfk_scan(rtwdev, rtwvif_link, true); rtw89_hci_recalc_int_mit(rtwdev); - rtw89_phy_config_edcca(rtwdev, true); + rtw89_phy_config_edcca(rtwdev, bb, true); + rtw89_tas_scan(rtwdev, true); rtw89_fw_h2c_cam(rtwdev, rtwvif_link, NULL, mac_addr); } @@ -4922,6 +4963,7 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, bool hw_scan) { struct ieee80211_bss_conf *bss_conf; + struct rtw89_bb_ctx *bb; if (!rtwvif_link) return; @@ -4937,12 +4979,15 @@ void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, rtw89_chip_rfk_scan(rtwdev, rtwvif_link, false); rtw89_btc_ntfy_scan_finish(rtwdev, rtwvif_link->phy_idx); - rtw89_phy_config_edcca(rtwdev, false); + bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx); + rtw89_phy_config_edcca(rtwdev, bb, false); + rtw89_tas_scan(rtwdev, false); rtwdev->scanning = false; - rtwdev->dig.bypass_dig = true; + rtw89_for_each_active_bb(rtwdev, bb) + bb->dig.bypass_dig = true; if (hw_scan && (rtwdev->hw->conf.flags & IEEE80211_CONF_IDLE)) - ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work); + wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->ips_work); } static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev) @@ -5042,8 +5087,6 @@ static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev) rtw89_hci_mac_pre_deinit(rtwdev); - rtw89_mac_pwr_off(rtwdev); - return 0; } @@ -5124,36 +5167,45 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev) rtw89_read_chip_ver(rtwdev); + ret = rtw89_mac_pwr_on(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to power on\n"); + return ret; + } + ret = rtw89_wait_firmware_completion(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to wait firmware completion\n"); - return ret; + goto out; } ret = rtw89_fw_recognize(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to recognize firmware\n"); - return ret; + goto out; } ret = rtw89_chip_efuse_info_setup(rtwdev); if (ret) - return ret; + goto out; ret = rtw89_fw_recognize_elements(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to recognize firmware elements\n"); - return ret; + goto out; } ret = rtw89_chip_board_info_setup(rtwdev); if (ret) - return ret; + goto out; rtw89_core_setup_rfe_parms(rtwdev); rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev); - return 0; +out: + rtw89_mac_pwr_off(rtwdev); + + return ret; } EXPORT_SYMBOL(rtw89_chip_info_setup); @@ -5296,7 +5348,7 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) goto err_free_supported_band; } - ret = rtw89_regd_init(rtwdev, rtw89_regd_notifier); + ret = rtw89_regd_init_hint(rtwdev); if (ret) { rtw89_err(rtwdev, "failed to init regd\n"); goto err_unregister_hw; diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index ff4894c7fa8a5..4be05d6cad187 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -17,11 +17,13 @@ struct rtw89_dev; struct rtw89_pci_info; struct rtw89_mac_gen_def; struct rtw89_phy_gen_def; +struct rtw89_fw_blacklist; struct rtw89_efuse_block_cfg; struct rtw89_h2c_rf_tssi; struct rtw89_fw_txpwr_track_cfg; struct rtw89_phy_rfk_log_fmt; struct rtw89_debugfs; +struct rtw89_regd_data; extern const struct ieee80211_ops rtw89_ops; @@ -718,6 +720,7 @@ enum rtw89_ofdma_type { RTW89_OFDMA_NUM, }; +/* neither insert new in the middle, nor change any given definition */ enum rtw89_regulation_type { RTW89_WW = 0, RTW89_ETSI = 1, @@ -826,7 +829,7 @@ enum rtw89_mac_idx { enum rtw89_phy_idx { RTW89_PHY_0 = 0, RTW89_PHY_1 = 1, - RTW89_PHY_MAX + RTW89_PHY_NUM, }; #define __RTW89_MLD_MAX_LINK_NUM 2 @@ -1540,16 +1543,16 @@ struct rtw89_btc_u8_sta_chg { }; struct rtw89_btc_wl_scan_info { - u8 band[RTW89_PHY_MAX]; + u8 band[RTW89_PHY_NUM]; u8 phy_map; u8 rsvd; }; struct rtw89_btc_wl_dbcc_info { - u8 op_band[RTW89_PHY_MAX]; /* op band in each phy */ - u8 scan_band[RTW89_PHY_MAX]; /* scan band in each phy */ - u8 real_band[RTW89_PHY_MAX]; - u8 role[RTW89_PHY_MAX]; /* role in each phy */ + u8 op_band[RTW89_PHY_NUM]; /* op band in each phy */ + u8 scan_band[RTW89_PHY_NUM]; /* scan band in each phy */ + u8 real_band[RTW89_PHY_NUM]; + u8 role[RTW89_PHY_NUM]; /* role in each phy */ }; struct rtw89_btc_wl_active_role { @@ -1761,7 +1764,8 @@ struct rtw89_btc_wl_rfk_info { u32 phy_map: 2; u32 band: 2; u32 type: 8; - u32 rsvd: 14; + u32 con_rfk: 1; + u32 rsvd: 13; u32 start_time; u32 proc_time; @@ -1898,7 +1902,7 @@ struct rtw89_btc_wl_info { u8 cn_report; u8 coex_mode; u8 pta_req_mac; - u8 bt_polut_type[RTW89_PHY_MAX]; /* BT polluted WL-Tx type for phy0/1 */ + u8 bt_polut_type[RTW89_PHY_NUM]; /* BT polluted WL-Tx type for phy0/1 */ bool is_5g_hi_channel; bool pta_reg_mac_chg; @@ -2230,7 +2234,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v4 { struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info wl_fw_info; struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info; __le32 bt_cnt[BTC_BCNT_STA_MAX]; - struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_MAX]; + struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_NUM]; } __packed; struct rtw89_btc_fbtc_rpt_ctrl_v5 { @@ -2238,7 +2242,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v5 { u8 rsvd; __le16 rsvd1; - u8 gnt_val[RTW89_PHY_MAX][4]; + u8 gnt_val[RTW89_PHY_NUM][4]; __le16 bt_cnt[BTC_BCNT_STA_MAX]; struct rtw89_btc_fbtc_rpt_ctrl_info_v5 rpt_info; @@ -2250,7 +2254,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 { u8 rsvd; __le16 rsvd1; - u8 gnt_val[RTW89_PHY_MAX][4]; + u8 gnt_val[RTW89_PHY_NUM][4]; __le16 bt_cnt[BTC_BCNT_STA_MAX_V105]; struct rtw89_btc_fbtc_rpt_ctrl_info_v5 rpt_info; @@ -2263,7 +2267,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v7 { u8 rsvd1; u8 rsvd2; - u8 gnt_val[RTW89_PHY_MAX][4]; + u8 gnt_val[RTW89_PHY_NUM][4]; __le16 bt_cnt[BTC_BCNT_STA_MAX_V105]; struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info; @@ -2276,7 +2280,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_v8 { u8 rpt_len_max_l; /* BTC_RPT_MAX bit0~7 */ u8 rpt_len_max_h; /* BTC_RPT_MAX bit8~15 */ - u8 gnt_val[RTW89_PHY_MAX][4]; + u8 gnt_val[RTW89_PHY_NUM][4]; __le16 bt_cnt[BTC_BCNT_STA_MAX_V105]; struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info; @@ -3040,6 +3044,7 @@ struct rtw89_btc_rpt_cmn_info { union rtw89_btc_fbtc_btafh_info { struct rtw89_btc_fbtc_btafh v1; struct rtw89_btc_fbtc_btafh_v2 v2; + struct rtw89_btc_fbtc_btafh_v7 v7; }; struct rtw89_btc_report_ctrl_state { @@ -3174,10 +3179,10 @@ struct rtw89_btc { struct rtw89_btc_btf_fwinfo fwinfo; struct rtw89_btc_dbg dbg; - struct work_struct eapol_notify_work; - struct work_struct arp_notify_work; - struct work_struct dhcp_notify_work; - struct work_struct icmp_notify_work; + struct wiphy_work eapol_notify_work; + struct wiphy_work arp_notify_work; + struct wiphy_work dhcp_notify_work; + struct wiphy_work icmp_notify_work; u32 bt_req_len; @@ -3444,7 +3449,7 @@ enum rtw89_roc_state { struct rtw89_roc { struct ieee80211_channel chan; - struct delayed_work roc_work; + struct wiphy_delayed_work roc_work; enum ieee80211_roc_type type; enum rtw89_roc_state state; int duration; @@ -3498,6 +3503,7 @@ struct rtw89_vif_link { u8 self_role; u8 wmm; u8 bcn_hit_cond; + u8 bcn_bw_idx; u8 hit_rule; u8 last_noa_nr; u64 sync_bcn_tsf; @@ -3514,7 +3520,7 @@ struct rtw89_vif_link { bool pre_pwr_diff_en; bool pwr_diff_en; u8 def_tri_idx; - struct work_struct update_beacon_work; + struct wiphy_work update_beacon_work; struct rtw89_addr_cam_entry addr_cam; struct rtw89_bssid_cam_entry bssid_cam; struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS]; @@ -3672,6 +3678,8 @@ struct rtw89_chip_ops { int (*h2c_ampdu_cmac_tbl)(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, struct rtw89_sta_link *rtwsta_link); + int (*h2c_txtime_cmac_tbl)(struct rtw89_dev *rtwdev, + struct rtw89_sta_link *rtwsta_link); int (*h2c_default_dmac_tbl)(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, struct rtw89_sta_link *rtwsta_link); @@ -4189,10 +4197,12 @@ struct rtw89_edcca_regs { u32 edcca_p_mask; u32 ppdu_level; u32 ppdu_mask; - u32 rpt_a; - u32 rpt_b; - u32 rpt_sel; - u32 rpt_sel_mask; + struct rtw89_edcca_p_regs { + u32 rpt_a; + u32 rpt_b; + u32 rpt_sel; + u32 rpt_sel_mask; + } p[RTW89_PHY_NUM]; u32 rpt_sel_be; u32 rpt_sel_be_mask; u32 tx_collision_t2r_st; @@ -4231,6 +4241,7 @@ enum rtw89_chanctx_state { enum rtw89_chanctx_callbacks { RTW89_CHANCTX_CALLBACK_PLACEHOLDER, RTW89_CHANCTX_CALLBACK_RFK, + RTW89_CHANCTX_CALLBACK_TAS, NUM_OF_RTW89_CHANCTX_CALLBACKS, }; @@ -4251,6 +4262,7 @@ struct rtw89_chip_info { bool try_ce_fw; u8 bbmcu_nr; u32 needed_fw_elms; + const struct rtw89_fw_blacklist *fw_blacklist; u32 fifo_size; bool small_fifo_size; u32 dle_scc_rsvd_size; @@ -4271,10 +4283,13 @@ struct rtw89_chip_info { bool support_unii4; bool support_rnr; bool support_ant_gain; + bool support_tas; bool ul_tb_waveform_ctrl; bool ul_tb_pwr_diff; + bool rx_freq_frome_ie; bool hw_sec_hdr; bool hw_mgmt_tx_encrypt; + bool hw_tkip_crypto; u8 rf_path_num; u8 tx_nss; u8 rx_nss; @@ -4478,6 +4493,7 @@ enum rtw89_fw_feature { RTW89_FW_FEATURE_CH_INFO_BE_V0, RTW89_FW_FEATURE_LPS_CH_INFO, RTW89_FW_FEATURE_NO_PHYCAP_P1, + RTW89_FW_FEATURE_NO_POWER_DIFFERENCE, }; struct rtw89_fw_suit { @@ -4536,6 +4552,7 @@ struct rtw89_fw_elm_info { struct rtw89_phy_table *rf_nctl; struct rtw89_fw_txpwr_track_cfg *txpwr_trk; struct rtw89_phy_rfk_log_fmt *rfk_log_fmt; + const struct rtw89_regd_data *regd; }; enum rtw89_fw_mss_dev_type { @@ -4651,6 +4668,7 @@ enum rtw89_ant_gain_domain_type { struct rtw89_ant_gain_info { s8 offset[RTW89_ANT_GAIN_CHAIN_NUM][RTW89_ANT_GAIN_SUBBAND_NR]; u32 regd_enabled; + bool block_country; }; struct rtw89_6ghz_span { @@ -4666,18 +4684,29 @@ struct rtw89_6ghz_span { enum rtw89_tas_state { RTW89_TAS_STATE_DPR_OFF, RTW89_TAS_STATE_DPR_ON, - RTW89_TAS_STATE_DPR_FORBID, + RTW89_TAS_STATE_STATIC_SAR, }; -#define RTW89_TAS_MAX_WINDOW 50 +#define RTW89_TAS_TX_RATIO_WINDOW 6 +#define RTW89_TAS_TXPWR_WINDOW 180 struct rtw89_tas_info { - s16 txpwr_history[RTW89_TAS_MAX_WINDOW]; - s32 total_txpwr; - u8 cur_idx; - s8 dpr_gap; - s8 delta; + u16 tx_ratio_history[RTW89_TAS_TX_RATIO_WINDOW]; + u64 txpwr_history[RTW89_TAS_TXPWR_WINDOW]; + u8 txpwr_head_idx; + u8 txpwr_tail_idx; + u8 tx_ratio_idx; + u16 total_tx_ratio; + u64 total_txpwr; + u64 instant_txpwr; + u32 window_size; + s8 dpr_on_threshold; + s8 dpr_off_threshold; + enum rtw89_tas_state backup_state; enum rtw89_tas_state state; + bool keep_history; + bool block_regd; bool enable; + bool pause; }; struct rtw89_chanctx_cfg { @@ -4735,6 +4764,7 @@ struct rtw89_edcca_bak { enum rtw89_dm_type { RTW89_DM_DYNAMIC_EDCCA, RTW89_DM_THERMAL_PROTECT, + RTW89_DM_TAS, }; #define RTW89_THERMAL_PROT_LV_MAX 5 @@ -4762,12 +4792,11 @@ struct rtw89_hal { struct rtw89_chanctx chanctx[NUM_OF_RTW89_CHANCTX]; struct cfg80211_chan_def roc_chandef; - bool entity_active[RTW89_PHY_MAX]; + bool entity_active[RTW89_PHY_NUM]; bool entity_pause; enum rtw89_entity_mode entity_mode; struct rtw89_entity_mgnt entity_mgnt; - struct rtw89_edcca_bak edcca_bak; u32 disabled_dm_bitmap; /* bitmap of enum rtw89_dm_type */ u8 thermal_prot_th; @@ -4973,7 +5002,7 @@ struct rtw89_dpk_bkup_para { struct rtw89_dpk_info { bool is_dpk_enable; bool is_dpk_reload_en; - u8 dpk_gs[RTW89_PHY_MAX]; + u8 dpk_gs[RTW89_PHY_NUM]; u16 dc_i[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; u16 dc_q[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; u8 corr_val[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; @@ -5135,7 +5164,7 @@ struct rtw89_tssi_info { u32 alignment_backup_by_ch[RF_PATH_MAX][TSSI_MAX_CH_NUM][TSSI_ALIMK_VALUE_NUM]; u32 alignment_value[RF_PATH_MAX][TSSI_ALIMK_MAX][TSSI_ALIMK_VALUE_NUM]; bool alignment_done[RF_PATH_MAX][TSSI_ALIMK_MAX]; - u32 tssi_alimk_time; + u64 tssi_alimk_time; }; struct rtw89_power_trim_info { @@ -5146,9 +5175,27 @@ struct rtw89_power_trim_info { u8 pad_bias_trim[RF_PATH_MAX]; }; +enum rtw89_regd_func { + RTW89_REGD_FUNC_TAS = 0, /* TAS (Time Average SAR) */ + RTW89_REGD_FUNC_DAG = 1, /* DAG (Dynamic Antenna Gain) */ + + NUM_OF_RTW89_REGD_FUNC, +}; + struct rtw89_regd { char alpha2[3]; u8 txpwr_regd[RTW89_BAND_NUM]; + DECLARE_BITMAP(func_bitmap, NUM_OF_RTW89_REGD_FUNC); +}; + +struct rtw89_regd_data { + unsigned int nr; + struct rtw89_regd map[] __counted_by(nr); +}; + +struct rtw89_regd_ctrl { + unsigned int nr; + const struct rtw89_regd *map; }; #define RTW89_REGD_MAX_COUNTRY_NUM U8_MAX @@ -5156,6 +5203,7 @@ struct rtw89_regd { #define RTW89_5GHZ_UNII4_START_INDEX 25 struct rtw89_regulatory_info { + struct rtw89_regd_ctrl ctrl; const struct rtw89_regd *regd; enum rtw89_reg_6ghz_power reg_6ghz_power; struct rtw89_reg_6ghz_tpe reg_6ghz_tpe; @@ -5299,8 +5347,8 @@ struct rtw89_lps_parm { }; struct rtw89_ppdu_sts_info { - struct sk_buff_head rx_queue[RTW89_PHY_MAX]; - u8 curr_rx_ppdu_cnt[RTW89_PHY_MAX]; + struct sk_buff_head rx_queue[RTW89_PHY_NUM]; + u8 curr_rx_ppdu_cnt[RTW89_PHY_NUM]; }; struct rtw89_early_h2c { @@ -5419,8 +5467,8 @@ struct rtw89_phy_efuse_gain { bool offset_valid; bool comp_valid; s8 offset[RF_PATH_MAX][RTW89_GAIN_OFFSET_NR]; /* S(8, 0) */ - s8 offset_base[RTW89_PHY_MAX]; /* S(8, 4) */ - s8 rssi_base[RTW89_PHY_MAX]; /* S(8, 4) */ + s8 offset_base[RTW89_PHY_NUM]; /* S(8, 4) */ + s8 rssi_base[RTW89_PHY_NUM]; /* S(8, 4) */ s8 comp[RF_PATH_MAX][RTW89_SUBBAND_NR]; /* S(8, 0) */ }; @@ -5629,8 +5677,6 @@ struct rtw89_dev { struct rtw89_sta_link __rcu *assoc_link_on_macid[RTW89_MAX_MAC_ID_NUM]; refcount_t refcount_ap_info; - /* ensures exclusive access from mac80211 callbacks */ - struct mutex mutex; struct list_head rtwvifs_list; /* used to protect rf read write */ struct mutex rf_mutex; @@ -5650,10 +5696,10 @@ struct rtw89_dev { struct rtw89_cam_info cam_info; struct sk_buff_head c2h_queue; - struct work_struct c2h_work; - struct work_struct ips_work; + struct wiphy_work c2h_work; + struct wiphy_work ips_work; + struct wiphy_work cancel_6ghz_probe_work; struct work_struct load_firmware_work; - struct work_struct cancel_6ghz_probe_work; struct list_head early_h2c_list; @@ -5682,9 +5728,6 @@ struct rtw89_dev { struct rtw89_power_trim_info pwr_trim; struct rtw89_cfo_tracking_info cfo_tracking; - struct rtw89_env_monitor_info env_monitor; - struct rtw89_dig_info dig; - struct rtw89_phy_ch_info ch_info; union { struct rtw89_phy_bb_gain_info ax; struct rtw89_phy_bb_gain_info_be be; @@ -5693,15 +5736,22 @@ struct rtw89_dev { struct rtw89_phy_ul_tb_info ul_tb_info; struct rtw89_antdiv_info antdiv; - struct delayed_work track_work; - struct delayed_work chanctx_work; - struct delayed_work coex_act1_work; - struct delayed_work coex_bt_devinfo_work; - struct delayed_work coex_rfk_chk_work; - struct delayed_work cfo_track_work; + struct rtw89_bb_ctx { + enum rtw89_phy_idx phy_idx; + struct rtw89_env_monitor_info env_monitor; + struct rtw89_dig_info dig; + struct rtw89_phy_ch_info ch_info; + struct rtw89_edcca_bak edcca_bak; + } bbs[RTW89_PHY_NUM]; + + struct wiphy_delayed_work track_work; + struct wiphy_delayed_work chanctx_work; + struct wiphy_delayed_work coex_act1_work; + struct wiphy_delayed_work coex_bt_devinfo_work; + struct wiphy_delayed_work coex_rfk_chk_work; + struct wiphy_delayed_work cfo_track_work; struct delayed_work forbid_ba_work; - struct delayed_work roc_work; - struct delayed_work antdiv_work; + struct wiphy_delayed_work antdiv_work; struct rtw89_ppdu_sts_info ppdu_sts; u8 total_sta_assoc; bool scanning; @@ -6978,6 +7028,48 @@ static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev) } } +static inline u8 rtw89_get_active_phy_bitmap(struct rtw89_dev *rtwdev) +{ + if (!rtwdev->dbcc_en) + return BIT(RTW89_PHY_0); + + switch (rtwdev->mlo_dbcc_mode) { + case MLO_0_PLUS_2_1RF: + case MLO_0_PLUS_2_2RF: + return BIT(RTW89_PHY_1); + case MLO_1_PLUS_1_1RF: + case MLO_1_PLUS_1_2RF: + case MLO_2_PLUS_2_2RF: + case DBCC_LEGACY: + return BIT(RTW89_PHY_0) | BIT(RTW89_PHY_1); + case MLO_2_PLUS_0_1RF: + case MLO_2_PLUS_0_2RF: + default: + return BIT(RTW89_PHY_0); + } +} + +#define rtw89_for_each_active_bb(rtwdev, bb) \ + for (u8 __active_bb_bitmap = rtw89_get_active_phy_bitmap(rtwdev), \ + __phy_idx = 0; __phy_idx < RTW89_PHY_NUM; __phy_idx++) \ + if (__active_bb_bitmap & BIT(__phy_idx) && \ + (bb = &rtwdev->bbs[__phy_idx])) + +#define rtw89_for_each_capab_bb(rtwdev, bb) \ + for (u8 __phy_idx_max = rtwdev->dbcc_en ? RTW89_PHY_1 : RTW89_PHY_0, \ + __phy_idx = 0; __phy_idx <= __phy_idx_max; __phy_idx++) \ + if ((bb = &rtwdev->bbs[__phy_idx])) + +static inline +struct rtw89_bb_ctx *rtw89_get_bb_ctx(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + if (phy_idx >= RTW89_PHY_NUM) + return &rtwdev->bbs[RTW89_PHY_0]; + + return &rtwdev->bbs[phy_idx]; +} + static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev) { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; @@ -7092,9 +7184,7 @@ void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); bool rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate, u16 *bitrate); int rtw89_regd_setup(struct rtw89_dev *rtwdev); -int rtw89_regd_init(struct rtw89_dev *rtwdev, - void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request)); -void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request); +int rtw89_regd_init_hint(struct rtw89_dev *rtwdev); void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev, struct rtw89_traffic_stats *stats); int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond); @@ -7102,8 +7192,8 @@ void rtw89_complete_cond(struct rtw89_wait_info *wait, unsigned int cond, const struct rtw89_completion_data *data); int rtw89_core_start(struct rtw89_dev *rtwdev); void rtw89_core_stop(struct rtw89_dev *rtwdev); -void rtw89_core_update_beacon_work(struct work_struct *work); -void rtw89_roc_work(struct work_struct *work); +void rtw89_core_update_beacon_work(struct wiphy *wiphy, struct wiphy_work *work); +void rtw89_roc_work(struct wiphy *wiphy, struct wiphy_work *work); void rtw89_roc_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); void rtw89_roc_end(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif); void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index 09fa977a6e6d2..f2c5753fd3864 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -13,6 +13,7 @@ #include "ps.h" #include "reg.h" #include "sar.h" +#include "util.h" #ifdef CONFIG_RTW89_DEBUGMSG unsigned int rtw89_debug_mask; @@ -22,11 +23,21 @@ MODULE_PARM_DESC(debug_mask, "Debugging mask"); #endif #ifdef CONFIG_RTW89_DEBUGFS +struct rtw89_debugfs_priv_opt { + bool rlock:1; + bool wlock:1; + size_t rsize; +}; + struct rtw89_debugfs_priv { struct rtw89_dev *rtwdev; - int (*cb_read)(struct seq_file *m, void *v); - ssize_t (*cb_write)(struct file *filp, const char __user *buffer, - size_t count, loff_t *loff); + ssize_t (*cb_read)(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz); + ssize_t (*cb_write)(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count); + struct rtw89_debugfs_priv_opt opt; union { u32 cb_data; struct { @@ -51,6 +62,8 @@ struct rtw89_debugfs_priv { u8 sel; } mac_mem; }; + ssize_t rused; + char *rbuf; }; struct rtw89_debugfs { @@ -74,6 +87,28 @@ struct rtw89_debugfs { struct rtw89_debugfs_priv disable_dm; }; +struct rtw89_debugfs_iter_data { + char *buf; + size_t bufsz; + int written_sz; +}; + +static void rtw89_debugfs_iter_data_setup(struct rtw89_debugfs_iter_data *iter_data, + char *buf, size_t bufsz) +{ + iter_data->buf = buf; + iter_data->bufsz = bufsz; + iter_data->written_sz = 0; +} + +static void rtw89_debugfs_iter_data_next(struct rtw89_debugfs_iter_data *iter_data, + char *buf, size_t bufsz, int written_sz) +{ + iter_data->buf = buf; + iter_data->bufsz = bufsz; + iter_data->written_sz += written_sz; +} + static const u16 rtw89_rate_info_bw_to_mhz_map[] = { [RATE_INFO_BW_20] = 20, [RATE_INFO_BW_40] = 40, @@ -90,84 +125,122 @@ static u16 rtw89_rate_info_bw_to_mhz(enum rate_info_bw bw) return 0; } -static int rtw89_debugfs_single_show(struct seq_file *m, void *v) +static ssize_t rtw89_debugfs_file_read_helper(struct wiphy *wiphy, struct file *file, + char *buf, size_t bufsz, void *data) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; + struct rtw89_debugfs_priv *debugfs_priv = data; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + ssize_t n; - return debugfs_priv->cb_read(m, v); + n = debugfs_priv->cb_read(rtwdev, debugfs_priv, buf, bufsz); + rtw89_might_trailing_ellipsis(buf, bufsz, n); + + return n; } -static ssize_t rtw89_debugfs_single_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *loff) +static ssize_t rtw89_debugfs_file_read(struct file *file, char __user *userbuf, + size_t count, loff_t *ppos) { - struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; + struct rtw89_debugfs_priv *debugfs_priv = file->private_data; + struct rtw89_debugfs_priv_opt *opt = &debugfs_priv->opt; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + size_t bufsz = opt->rsize ? opt->rsize : PAGE_SIZE; + char *buf; + ssize_t n; - return debugfs_priv->cb_write(filp, buffer, count, loff); -} + if (!debugfs_priv->rbuf) + debugfs_priv->rbuf = devm_kzalloc(rtwdev->dev, bufsz, GFP_KERNEL); -static ssize_t rtw89_debugfs_seq_file_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *loff) -{ - struct seq_file *seqpriv = (struct seq_file *)filp->private_data; - struct rtw89_debugfs_priv *debugfs_priv = seqpriv->private; + buf = debugfs_priv->rbuf; + if (!buf) + return -ENOMEM; + + if (*ppos) { + n = debugfs_priv->rused; + goto out; + } + + if (opt->rlock) { + n = wiphy_locked_debugfs_read(rtwdev->hw->wiphy, file, buf, bufsz, + userbuf, count, ppos, + rtw89_debugfs_file_read_helper, + debugfs_priv); + debugfs_priv->rused = n; - return debugfs_priv->cb_write(filp, buffer, count, loff); + return n; + } + + n = rtw89_debugfs_file_read_helper(rtwdev->hw->wiphy, file, buf, bufsz, + debugfs_priv); + debugfs_priv->rused = n; + +out: + return simple_read_from_buffer(userbuf, count, ppos, buf, n); } -static int rtw89_debugfs_single_open(struct inode *inode, struct file *filp) +static ssize_t rtw89_debugfs_file_write_helper(struct wiphy *wiphy, struct file *file, + char *buf, size_t count, void *data) { - return single_open(filp, rtw89_debugfs_single_show, inode->i_private); + struct rtw89_debugfs_priv *debugfs_priv = data; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + + return debugfs_priv->cb_write(rtwdev, debugfs_priv, buf, count); } -static int rtw89_debugfs_close(struct inode *inode, struct file *filp) +static ssize_t rtw89_debugfs_file_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *loff) { - return 0; + struct rtw89_debugfs_priv *debugfs_priv = file->private_data; + struct rtw89_debugfs_priv_opt *opt = &debugfs_priv->opt; + struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char *buf __free(kfree) = kmalloc(count + 1, GFP_KERNEL); + ssize_t n; + + if (!buf) + return -ENOMEM; + + if (opt->wlock) { + n = wiphy_locked_debugfs_write(rtwdev->hw->wiphy, + file, buf, count + 1, + userbuf, count, + rtw89_debugfs_file_write_helper, + debugfs_priv); + return n; + } + + if (copy_from_user(buf, userbuf, count)) + return -EFAULT; + + buf[count] = '\0'; + + return debugfs_priv->cb_write(rtwdev, debugfs_priv, buf, count); } -static const struct file_operations file_ops_single_r = { - .owner = THIS_MODULE, - .open = rtw89_debugfs_single_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, +static const struct debugfs_short_fops file_ops_single_r = { + .read = rtw89_debugfs_file_read, + .llseek = generic_file_llseek, }; -static const struct file_operations file_ops_common_rw = { - .owner = THIS_MODULE, - .open = rtw89_debugfs_single_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, - .write = rtw89_debugfs_seq_file_write, +static const struct debugfs_short_fops file_ops_common_rw = { + .read = rtw89_debugfs_file_read, + .write = rtw89_debugfs_file_write, + .llseek = generic_file_llseek, }; -static const struct file_operations file_ops_single_w = { - .owner = THIS_MODULE, - .write = rtw89_debugfs_single_write, - .open = simple_open, - .release = rtw89_debugfs_close, +static const struct debugfs_short_fops file_ops_single_w = { + .write = rtw89_debugfs_file_write, + .llseek = generic_file_llseek, }; static ssize_t -rtw89_debug_priv_read_reg_select(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +rtw89_debug_priv_read_reg_select(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct seq_file *m = (struct seq_file *)filp->private_data; - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; - char buf[32]; - size_t buf_size; u32 addr, len; int num; - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; num = sscanf(buf, "%x %x", &addr, &len); if (num != 2) { rtw89_info(rtwdev, "invalid format: \n"); @@ -182,11 +255,13 @@ rtw89_debug_priv_read_reg_select(struct file *filp, return count; } -static int rtw89_debug_priv_read_reg_get(struct seq_file *m, void *v) +static +ssize_t rtw89_debug_priv_read_reg_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; - u32 addr, end, data, k; + char *p = buf, *end = buf + bufsz; + u32 addr, addr_end, data, k; u32 len; len = debugfs_priv->read_reg.len; @@ -210,41 +285,34 @@ static int rtw89_debug_priv_read_reg_get(struct seq_file *m, void *v) return -EINVAL; } - seq_printf(m, "get %d bytes at 0x%08x=0x%08x\n", len, addr, data); + p += scnprintf(p, end - p, "get %d bytes at 0x%08x=0x%08x\n", len, + addr, data); - return 0; + return p - buf; ndata: - end = addr + len; + addr_end = addr + len; - for (; addr < end; addr += 16) { - seq_printf(m, "%08xh : ", 0x18600000 + addr); + for (; addr < addr_end; addr += 16) { + p += scnprintf(p, end - p, "%08xh : ", 0x18600000 + addr); for (k = 0; k < 16; k += 4) { data = rtw89_read32(rtwdev, addr + k); - seq_printf(m, "%08x ", data); + p += scnprintf(p, end - p, "%08x ", data); } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } - return 0; + return p - buf; } -static ssize_t rtw89_debug_priv_write_reg_set(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +static +ssize_t rtw89_debug_priv_write_reg_set(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; - char buf[32]; - size_t buf_size; u32 addr, val, len; int num; - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; num = sscanf(buf, "%x %x %x", &addr, &val, &len); if (num != 3) { rtw89_info(rtwdev, "invalid format: \n"); @@ -273,24 +341,14 @@ static ssize_t rtw89_debug_priv_write_reg_set(struct file *filp, } static ssize_t -rtw89_debug_priv_read_rf_select(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +rtw89_debug_priv_read_rf_select(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct seq_file *m = (struct seq_file *)filp->private_data; - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; - char buf[32]; - size_t buf_size; u32 addr, mask; u8 path; int num; - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; num = sscanf(buf, "%hhd %x %x", &path, &addr, &mask); if (num != 3) { rtw89_info(rtwdev, "invalid format: \n"); @@ -310,10 +368,12 @@ rtw89_debug_priv_read_rf_select(struct file *filp, return count; } -static int rtw89_debug_priv_read_rf_get(struct seq_file *m, void *v) +static +ssize_t rtw89_debug_priv_read_rf_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char *p = buf, *end = buf + bufsz; u32 addr, data, mask; u8 path; @@ -323,28 +383,21 @@ static int rtw89_debug_priv_read_rf_get(struct seq_file *m, void *v) data = rtw89_read_rf(rtwdev, path, addr, mask); - seq_printf(m, "path %d, rf register 0x%08x=0x%08x\n", path, addr, data); + p += scnprintf(p, end - p, "path %d, rf register 0x%08x=0x%08x\n", + path, addr, data); - return 0; + return p - buf; } -static ssize_t rtw89_debug_priv_write_rf_set(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +static +ssize_t rtw89_debug_priv_write_rf_set(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; - char buf[32]; - size_t buf_size; u32 addr, val, mask; u8 path; int num; - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; num = sscanf(buf, "%hhd %x %x %x", &path, &addr, &mask, &val); if (num != 4) { rtw89_info(rtwdev, "invalid format: \n"); @@ -363,29 +416,31 @@ static ssize_t rtw89_debug_priv_write_rf_set(struct file *filp, return count; } -static int rtw89_debug_priv_rf_reg_dump_get(struct seq_file *m, void *v) +static +ssize_t rtw89_debug_priv_rf_reg_dump_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; const struct rtw89_chip_info *chip = rtwdev->chip; + char *p = buf, *end = buf + bufsz; u32 addr, offset, data; u8 path; for (path = 0; path < chip->rf_path_num; path++) { - seq_printf(m, "RF path %d:\n\n", path); + p += scnprintf(p, end - p, "RF path %d:\n\n", path); for (addr = 0; addr < 0x100; addr += 4) { - seq_printf(m, "0x%08x: ", addr); + p += scnprintf(p, end - p, "0x%08x: ", addr); for (offset = 0; offset < 4; offset++) { data = rtw89_read_rf(rtwdev, path, addr + offset, RFREG_MASK); - seq_printf(m, "0x%05x ", data); + p += scnprintf(p, end - p, "0x%05x ", data); } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } - return 0; + return p - buf; } struct txpwr_ent { @@ -704,56 +759,71 @@ static const struct txpwr_map __txpwr_map_lmt_ru_be = { }; static unsigned int -__print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent, - const s8 *buf, const unsigned int cur) +__print_txpwr_ent(char *buf, size_t bufsz, const struct txpwr_ent *ent, + const s8 *bufp, const unsigned int cur, unsigned int *ate) { + char *p = buf, *end = buf + bufsz; unsigned int cnt, i; + unsigned int eaten; char *fmt; if (ent->nested) { - for (cnt = 0, i = 0; i < ent->len; i++) - cnt += __print_txpwr_ent(m, ent->ptr + i, buf, - cur + cnt); - return cnt; + for (cnt = 0, i = 0; i < ent->len; i++, cnt += eaten) + p += __print_txpwr_ent(p, end - p, ent->ptr + i, bufp, + cur + cnt, &eaten); + *ate = cnt; + goto out; } switch (ent->len) { case 0: - seq_printf(m, "\t<< %s >>\n", ent->txt); - return 0; + p += scnprintf(p, end - p, "\t<< %s >>\n", ent->txt); + *ate = 0; + goto out; case 2: fmt = "%s\t| %3d, %3d,\t\tdBm\n"; - seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1]); - return 2; + p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur], + bufp[cur + 1]); + *ate = 2; + goto out; case 4: fmt = "%s\t| %3d, %3d, %3d, %3d,\tdBm\n"; - seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1], - buf[cur + 2], buf[cur + 3]); - return 4; + p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur], + bufp[cur + 1], + bufp[cur + 2], bufp[cur + 3]); + *ate = 4; + goto out; case 8: fmt = "%s\t| %3d, %3d, %3d, %3d, %3d, %3d, %3d, %3d,\tdBm\n"; - seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1], - buf[cur + 2], buf[cur + 3], buf[cur + 4], - buf[cur + 5], buf[cur + 6], buf[cur + 7]); - return 8; + p += scnprintf(p, end - p, fmt, ent->txt, bufp[cur], + bufp[cur + 1], + bufp[cur + 2], bufp[cur + 3], bufp[cur + 4], + bufp[cur + 5], bufp[cur + 6], bufp[cur + 7]); + *ate = 8; + goto out; default: return 0; } + +out: + return p - buf; } -static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev, - const struct txpwr_map *map) +static ssize_t __print_txpwr_map(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, + const struct txpwr_map *map) { u8 fct = rtwdev->chip->txpwr_factor_mac; u8 path_num = rtwdev->chip->rf_path_num; + char *p = buf, *end = buf + bufsz; unsigned int cur, i; + unsigned int eaten; u32 max_valid_addr; u32 val, addr; - s8 *buf, tmp; + s8 *bufp, tmp; int ret; - buf = vzalloc(map->addr_to - map->addr_from + 4); - if (!buf) + bufp = vzalloc(map->addr_to - map->addr_from + 4); + if (!bufp) return -ENOMEM; if (path_num == 1) @@ -773,31 +843,32 @@ static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev, for (i = 0; i < 4; i++, val >>= 8) { /* signed 7 bits, and reserved BIT(7) */ tmp = sign_extend32(val, 6); - buf[cur + i] = tmp >> fct; + bufp[cur + i] = tmp >> fct; } } - for (cur = 0, i = 0; i < map->size; i++) - cur += __print_txpwr_ent(m, &map->ent[i], buf, cur); + for (cur = 0, i = 0; i < map->size; i++, cur += eaten) + p += __print_txpwr_ent(p, end - p, &map->ent[i], bufp, cur, &eaten); - vfree(buf); - return 0; + vfree(bufp); + return p - buf; } #define case_REGD(_regd) \ case RTW89_ ## _regd: \ - seq_puts(m, #_regd "\n"); \ + p += scnprintf(p, end - p, #_regd "\n"); \ break -static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan) +static int __print_regd(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, + const struct rtw89_chan *chan) { + char *p = buf, *end = buf + bufsz; u8 band = chan->band_type; u8 regd = rtw89_regd_get(rtwdev, band); switch (regd) { default: - seq_printf(m, "UNKNOWN: %d\n", regd); + p += scnprintf(p, end - p, "UNKNOWN: %d\n", regd); break; case_REGD(WW); case_REGD(ETSI); @@ -816,6 +887,8 @@ static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev, case_REGD(UK); case_REGD(THAILAND); } + + return p - buf; } #undef case_REGD @@ -844,96 +917,93 @@ static const struct dbgfs_txpwr_table *dbgfs_txpwr_tables[RTW89_CHIP_GEN_NUM] = }; static -void rtw89_debug_priv_txpwr_table_get_regd(struct seq_file *m, - struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan) +int rtw89_debug_priv_txpwr_table_get_regd(struct rtw89_dev *rtwdev, + char *buf, size_t bufsz, + const struct rtw89_chan *chan) { const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; const struct rtw89_reg_6ghz_tpe *tpe6 = ®ulatory->reg_6ghz_tpe; + char *p = buf, *end = buf + bufsz; - seq_printf(m, "[Chanctx] band %u, ch %u, bw %u\n", - chan->band_type, chan->channel, chan->band_width); + p += scnprintf(p, end - p, "[Chanctx] band %u, ch %u, bw %u\n", + chan->band_type, chan->channel, chan->band_width); - seq_puts(m, "[Regulatory] "); - __print_regd(m, rtwdev, chan); + p += scnprintf(p, end - p, "[Regulatory] "); + p += __print_regd(rtwdev, p, end - p, chan); if (chan->band_type == RTW89_BAND_6G) { - seq_printf(m, "[reg6_pwr_type] %u\n", regulatory->reg_6ghz_power); + p += scnprintf(p, end - p, "[reg6_pwr_type] %u\n", + regulatory->reg_6ghz_power); if (tpe6->valid) - seq_printf(m, "[TPE] %d dBm\n", tpe6->constraint); + p += scnprintf(p, end - p, "[TPE] %d dBm\n", + tpe6->constraint); } + + return p - buf; } -static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) +static +ssize_t rtw89_debug_priv_txpwr_table_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; const struct dbgfs_txpwr_table *tbl; const struct rtw89_chan *chan; - int ret = 0; + char *p = buf, *end = buf + bufsz; + ssize_t n; + + lockdep_assert_wiphy(rtwdev->hw->wiphy); - mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); - rtw89_debug_priv_txpwr_table_get_regd(m, rtwdev, chan); + p += rtw89_debug_priv_txpwr_table_get_regd(rtwdev, p, end - p, chan); - seq_puts(m, "[SAR]\n"); - rtw89_print_sar(m, rtwdev, chan->freq); + p += scnprintf(p, end - p, "[SAR]\n"); + p += rtw89_print_sar(rtwdev, p, end - p, chan->freq); - seq_puts(m, "[TAS]\n"); - rtw89_print_tas(m, rtwdev); + p += scnprintf(p, end - p, "[TAS]\n"); + p += rtw89_print_tas(rtwdev, p, end - p); - seq_puts(m, "[DAG]\n"); - rtw89_print_ant_gain(m, rtwdev, chan); + p += scnprintf(p, end - p, "[DAG]\n"); + p += rtw89_print_ant_gain(rtwdev, p, end - p, chan); tbl = dbgfs_txpwr_tables[chip_gen]; - if (!tbl) { - ret = -EOPNOTSUPP; - goto err; - } - - seq_puts(m, "\n[TX power byrate]\n"); - ret = __print_txpwr_map(m, rtwdev, tbl->byr); - if (ret) - goto err; - - seq_puts(m, "\n[TX power limit]\n"); - ret = __print_txpwr_map(m, rtwdev, tbl->lmt); - if (ret) - goto err; - - seq_puts(m, "\n[TX power limit_ru]\n"); - ret = __print_txpwr_map(m, rtwdev, tbl->lmt_ru); - if (ret) - goto err; + if (!tbl) + return -EOPNOTSUPP; -err: - mutex_unlock(&rtwdev->mutex); - return ret; + p += scnprintf(p, end - p, "\n[TX power byrate]\n"); + n = __print_txpwr_map(rtwdev, p, end - p, tbl->byr); + if (n < 0) + return n; + p += n; + + p += scnprintf(p, end - p, "\n[TX power limit]\n"); + n = __print_txpwr_map(rtwdev, p, end - p, tbl->lmt); + if (n < 0) + return n; + p += n; + + p += scnprintf(p, end - p, "\n[TX power limit_ru]\n"); + n = __print_txpwr_map(rtwdev, p, end - p, tbl->lmt_ru); + if (n < 0) + return n; + p += n; + + return p - buf; } static ssize_t -rtw89_debug_priv_mac_reg_dump_select(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +rtw89_debug_priv_mac_reg_dump_select(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct seq_file *m = (struct seq_file *)filp->private_data; - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; const struct rtw89_chip_info *chip = rtwdev->chip; - char buf[32]; - size_t buf_size; int sel; int ret; - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; ret = kstrtoint(buf, 0, &sel); if (ret) return ret; @@ -957,99 +1027,91 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp, #define RTW89_MAC_PAGE_SIZE 0x100 -static int rtw89_debug_priv_mac_reg_dump_get(struct seq_file *m, void *v) +static +ssize_t rtw89_debug_priv_mac_reg_dump_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; enum rtw89_debug_mac_reg_sel reg_sel = debugfs_priv->cb_data; - u32 start, end; + char *p = buf, *end = buf + bufsz; + u32 start, end_addr; u32 i, j, k, page; u32 val; switch (reg_sel) { case RTW89_DBG_SEL_MAC_00: - seq_puts(m, "Debug selected MAC page 0x00\n"); + p += scnprintf(p, end - p, "Debug selected MAC page 0x00\n"); start = 0x000; - end = 0x014; + end_addr = 0x014; break; case RTW89_DBG_SEL_MAC_30: - seq_puts(m, "Debug selected MAC page 0x30\n"); + p += scnprintf(p, end - p, "Debug selected MAC page 0x30\n"); start = 0x030; - end = 0x033; + end_addr = 0x033; break; case RTW89_DBG_SEL_MAC_40: - seq_puts(m, "Debug selected MAC page 0x40\n"); + p += scnprintf(p, end - p, "Debug selected MAC page 0x40\n"); start = 0x040; - end = 0x07f; + end_addr = 0x07f; break; case RTW89_DBG_SEL_MAC_80: - seq_puts(m, "Debug selected MAC page 0x80\n"); + p += scnprintf(p, end - p, "Debug selected MAC page 0x80\n"); start = 0x080; - end = 0x09f; + end_addr = 0x09f; break; case RTW89_DBG_SEL_MAC_C0: - seq_puts(m, "Debug selected MAC page 0xc0\n"); + p += scnprintf(p, end - p, "Debug selected MAC page 0xc0\n"); start = 0x0c0; - end = 0x0df; + end_addr = 0x0df; break; case RTW89_DBG_SEL_MAC_E0: - seq_puts(m, "Debug selected MAC page 0xe0\n"); + p += scnprintf(p, end - p, "Debug selected MAC page 0xe0\n"); start = 0x0e0; - end = 0x0ff; + end_addr = 0x0ff; break; case RTW89_DBG_SEL_BB: - seq_puts(m, "Debug selected BB register\n"); + p += scnprintf(p, end - p, "Debug selected BB register\n"); start = 0x100; - end = 0x17f; + end_addr = 0x17f; break; case RTW89_DBG_SEL_IQK: - seq_puts(m, "Debug selected IQK register\n"); + p += scnprintf(p, end - p, "Debug selected IQK register\n"); start = 0x180; - end = 0x1bf; + end_addr = 0x1bf; break; case RTW89_DBG_SEL_RFC: - seq_puts(m, "Debug selected RFC register\n"); + p += scnprintf(p, end - p, "Debug selected RFC register\n"); start = 0x1c0; - end = 0x1ff; + end_addr = 0x1ff; break; default: - seq_puts(m, "Selected invalid register page\n"); + p += scnprintf(p, end - p, "Selected invalid register page\n"); return -EINVAL; } - for (i = start; i <= end; i++) { + for (i = start; i <= end_addr; i++) { page = i << 8; for (j = page; j < page + RTW89_MAC_PAGE_SIZE; j += 16) { - seq_printf(m, "%08xh : ", 0x18600000 + j); + p += scnprintf(p, end - p, "%08xh : ", 0x18600000 + j); for (k = 0; k < 4; k++) { val = rtw89_read32(rtwdev, j + (k << 2)); - seq_printf(m, "%08x ", val); + p += scnprintf(p, end - p, "%08x ", val); } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } } - return 0; + return p - buf; } static ssize_t -rtw89_debug_priv_mac_mem_dump_select(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +rtw89_debug_priv_mac_mem_dump_select(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct seq_file *m = (struct seq_file *)filp->private_data; - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; - char buf[32]; - size_t buf_size; u32 sel, start_addr, len; int num; - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; num = sscanf(buf, "%x %x %x", &sel, &start_addr, &len); if (num != 3) { rtw89_info(rtwdev, "invalid format: \n"); @@ -1066,15 +1128,16 @@ rtw89_debug_priv_mac_mem_dump_select(struct file *filp, return count; } -static void rtw89_debug_dump_mac_mem(struct seq_file *m, - struct rtw89_dev *rtwdev, - u8 sel, u32 start_addr, u32 len) +static int rtw89_debug_dump_mac_mem(struct rtw89_dev *rtwdev, + char *buf, size_t bufsz, + u8 sel, u32 start_addr, u32 len) { const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u32 filter_model_addr = mac->filter_model_addr; u32 indir_access_addr = mac->indir_access_addr; u32 base_addr, start_page, residue; - u32 i, j, p, pages; + char *p = buf, *end = buf + bufsz; + u32 i, j, pp, pages; u32 dump_len, remain; u32 val; @@ -1085,32 +1148,37 @@ static void rtw89_debug_dump_mac_mem(struct seq_file *m, base_addr = mac->mem_base_addrs[sel]; base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE; - for (p = 0; p < pages; p++) { + for (pp = 0; pp < pages; pp++) { dump_len = min_t(u32, remain, MAC_MEM_DUMP_PAGE_SIZE); rtw89_write32(rtwdev, filter_model_addr, base_addr); for (i = indir_access_addr + residue; i < indir_access_addr + dump_len;) { - seq_printf(m, "%08xh:", i); + p += scnprintf(p, end - p, "%08xh:", i); for (j = 0; j < 4 && i < indir_access_addr + dump_len; j++, i += 4) { val = rtw89_read32(rtwdev, i); - seq_printf(m, " %08x", val); + p += scnprintf(p, end - p, " %08x", val); remain -= 4; } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); } base_addr += MAC_MEM_DUMP_PAGE_SIZE; } + + return p - buf; } -static int -rtw89_debug_priv_mac_mem_dump_get(struct seq_file *m, void *v) +static ssize_t +rtw89_debug_priv_mac_mem_dump_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char *p = buf, *end = buf + bufsz; bool grant_read = false; + lockdep_assert_wiphy(rtwdev->hw->wiphy); + if (debugfs_priv->mac_mem.sel >= RTW89_MAC_MEM_NUM) return -ENOENT; @@ -1127,40 +1195,28 @@ rtw89_debug_priv_mac_mem_dump_get(struct seq_file *m, void *v) } } - mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); if (grant_read) rtw89_write32_set(rtwdev, R_AX_TCR1, B_AX_TCR_FORCE_READ_TXDFIFO); - rtw89_debug_dump_mac_mem(m, rtwdev, - debugfs_priv->mac_mem.sel, - debugfs_priv->mac_mem.start, - debugfs_priv->mac_mem.len); + p += rtw89_debug_dump_mac_mem(rtwdev, p, end - p, + debugfs_priv->mac_mem.sel, + debugfs_priv->mac_mem.start, + debugfs_priv->mac_mem.len); if (grant_read) rtw89_write32_clr(rtwdev, R_AX_TCR1, B_AX_TCR_FORCE_READ_TXDFIFO); - mutex_unlock(&rtwdev->mutex); - return 0; + return p - buf; } static ssize_t -rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +rtw89_debug_priv_mac_dbg_port_dump_select(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct seq_file *m = (struct seq_file *)filp->private_data; - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; - char buf[32]; - size_t buf_size; int sel, set; int num; bool enable; - buf_size = min(count, sizeof(buf) - 1); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; num = sscanf(buf, "%d %d", &sel, &set); if (num != 2) { rtw89_info(rtwdev, "invalid format: \n"); @@ -1196,13 +1252,13 @@ rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp, } static int rtw89_debug_mac_dump_ss_dbg(struct rtw89_dev *rtwdev, - struct seq_file *m) + char *buf, size_t bufsz) { return 0; } static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev, - struct seq_file *m) + char *buf, size_t bufsz) { #define DLE_DFI_DUMP(__type, __target, __sel) \ ({ \ @@ -1231,7 +1287,7 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev, __data; \ }) -#define DLE_DFI_FREE_PAGE_DUMP(__m, __type) \ +#define DLE_DFI_FREE_PAGE_DUMP(__p, __end, __type) \ ({ \ u32 __freepg, __pubpg; \ u32 __freepg_head, __freepg_tail, __pubpg_num; \ @@ -1241,24 +1297,25 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev, __freepg_head = FIELD_GET(B_AX_DLE_FREE_HEADPG, __freepg); \ __freepg_tail = FIELD_GET(B_AX_DLE_FREE_TAILPG, __freepg); \ __pubpg_num = FIELD_GET(B_AX_DLE_PUB_PGNUM, __pubpg); \ - seq_printf(__m, "[%s] freepg head: %d\n", \ - #__type, __freepg_head); \ - seq_printf(__m, "[%s] freepg tail: %d\n", \ - #__type, __freepg_tail); \ - seq_printf(__m, "[%s] pubpg num : %d\n", \ - #__type, __pubpg_num); \ + __p += scnprintf(__p, __end - __p, "[%s] freepg head: %d\n", \ + #__type, __freepg_head); \ + __p += scnprintf(__p, __end - __p, "[%s] freepg tail: %d\n", \ + #__type, __freepg_tail); \ + __p += scnprintf(__p, __end - __p, "[%s] pubpg num : %d\n", \ + #__type, __pubpg_num); \ }) -#define case_QUOTA(__m, __type, __id) \ +#define case_QUOTA(__p, __end, __type, __id) \ case __type##_QTAID_##__id: \ - val32 = DLE_DFI_DUMP(__type, QUOTA, __type##_QTAID_##__id); \ + val32 = DLE_DFI_DUMP(__type, QUOTA, __type##_QTAID_##__id); \ rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, val32); \ use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, val32); \ - seq_printf(__m, "[%s][%s] rsv_pgnum: %d\n", \ - #__type, #__id, rsv_pgnum); \ - seq_printf(__m, "[%s][%s] use_pgnum: %d\n", \ - #__type, #__id, use_pgnum); \ + __p += scnprintf(__p, __end - __p, "[%s][%s] rsv_pgnum: %d\n", \ + #__type, #__id, rsv_pgnum); \ + __p += scnprintf(__p, __end - __p, "[%s][%s] use_pgnum: %d\n", \ + #__type, #__id, use_pgnum); \ break + char *p = buf, *end = buf + bufsz; u32 quota_id; u32 val32; u16 rsv_pgnum, use_pgnum; @@ -1266,38 +1323,39 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev, ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL); if (ret) { - seq_puts(m, "[DLE] : DMAC not enabled\n"); - return ret; + p += scnprintf(p, end - p, "[DLE] : DMAC not enabled\n"); + goto out; } - DLE_DFI_FREE_PAGE_DUMP(m, WDE); - DLE_DFI_FREE_PAGE_DUMP(m, PLE); + DLE_DFI_FREE_PAGE_DUMP(p, end, WDE); + DLE_DFI_FREE_PAGE_DUMP(p, end, PLE); for (quota_id = 0; quota_id <= WDE_QTAID_CPUIO; quota_id++) { switch (quota_id) { - case_QUOTA(m, WDE, HOST_IF); - case_QUOTA(m, WDE, WLAN_CPU); - case_QUOTA(m, WDE, DATA_CPU); - case_QUOTA(m, WDE, PKTIN); - case_QUOTA(m, WDE, CPUIO); + case_QUOTA(p, end, WDE, HOST_IF); + case_QUOTA(p, end, WDE, WLAN_CPU); + case_QUOTA(p, end, WDE, DATA_CPU); + case_QUOTA(p, end, WDE, PKTIN); + case_QUOTA(p, end, WDE, CPUIO); } } for (quota_id = 0; quota_id <= PLE_QTAID_CPUIO; quota_id++) { switch (quota_id) { - case_QUOTA(m, PLE, B0_TXPL); - case_QUOTA(m, PLE, B1_TXPL); - case_QUOTA(m, PLE, C2H); - case_QUOTA(m, PLE, H2C); - case_QUOTA(m, PLE, WLAN_CPU); - case_QUOTA(m, PLE, MPDU); - case_QUOTA(m, PLE, CMAC0_RX); - case_QUOTA(m, PLE, CMAC1_RX); - case_QUOTA(m, PLE, CMAC1_BBRPT); - case_QUOTA(m, PLE, WDRLS); - case_QUOTA(m, PLE, CPUIO); + case_QUOTA(p, end, PLE, B0_TXPL); + case_QUOTA(p, end, PLE, B1_TXPL); + case_QUOTA(p, end, PLE, C2H); + case_QUOTA(p, end, PLE, H2C); + case_QUOTA(p, end, PLE, WLAN_CPU); + case_QUOTA(p, end, PLE, MPDU); + case_QUOTA(p, end, PLE, CMAC0_RX); + case_QUOTA(p, end, PLE, CMAC1_RX); + case_QUOTA(p, end, PLE, CMAC1_BBRPT); + case_QUOTA(p, end, PLE, WDRLS); + case_QUOTA(p, end, PLE, CPUIO); } } - return 0; +out: + return p - buf; #undef case_QUOTA #undef DLE_DFI_DUMP @@ -1305,73 +1363,88 @@ static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev, } static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev, - struct seq_file *m) + char *buf, size_t bufsz) { const struct rtw89_chip_info *chip = rtwdev->chip; + char *p = buf, *end = buf + bufsz; u32 dmac_err; int i, ret; ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL); if (ret) { - seq_puts(m, "[DMAC] : DMAC not enabled\n"); - return ret; + p += scnprintf(p, end - p, "[DMAC] : DMAC not enabled\n"); + goto out; } dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR); - seq_printf(m, "R_AX_DMAC_ERR_ISR=0x%08x\n", dmac_err); - seq_printf(m, "R_AX_DMAC_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_DMAC_ERR_IMR)); + p += scnprintf(p, end - p, "R_AX_DMAC_ERR_ISR=0x%08x\n", dmac_err); + p += scnprintf(p, end - p, "R_AX_DMAC_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_DMAC_ERR_IMR)); if (dmac_err) { - seq_printf(m, "R_AX_WDE_ERR_FLAG_CFG=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG_NUM1)); - seq_printf(m, "R_AX_PLE_ERR_FLAG_CFG=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG_NUM1)); + p += scnprintf(p, end - p, "R_AX_WDE_ERR_FLAG_CFG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG_NUM1)); + p += scnprintf(p, end - p, "R_AX_PLE_ERR_FLAG_CFG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG_NUM1)); if (chip->chip_id == RTL8852C) { - seq_printf(m, "R_AX_PLE_ERRFLAG_MSG=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PLE_ERRFLAG_MSG)); - seq_printf(m, "R_AX_WDE_ERRFLAG_MSG=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WDE_ERRFLAG_MSG)); - seq_printf(m, "R_AX_PLE_DBGERR_LOCKEN=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PLE_DBGERR_LOCKEN)); - seq_printf(m, "R_AX_PLE_DBGERR_STS=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PLE_DBGERR_STS)); + p += scnprintf(p, end - p, + "R_AX_PLE_ERRFLAG_MSG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_ERRFLAG_MSG)); + p += scnprintf(p, end - p, + "R_AX_WDE_ERRFLAG_MSG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDE_ERRFLAG_MSG)); + p += scnprintf(p, end - p, + "R_AX_PLE_DBGERR_LOCKEN=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_DBGERR_LOCKEN)); + p += scnprintf(p, end - p, + "R_AX_PLE_DBGERR_STS=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_DBGERR_STS)); } } if (dmac_err & B_AX_WDRLS_ERR_FLAG) { - seq_printf(m, "R_AX_WDRLS_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR)); - seq_printf(m, "R_AX_WDRLS_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR)); + p += scnprintf(p, end - p, "R_AX_WDRLS_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR)); + p += scnprintf(p, end - p, "R_AX_WDRLS_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR)); if (chip->chip_id == RTL8852C) - seq_printf(m, "R_AX_RPQ_RXBD_IDX=0x%08x\n", - rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX_V1)); + p += scnprintf(p, end - p, + "R_AX_RPQ_RXBD_IDX=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX_V1)); else - seq_printf(m, "R_AX_RPQ_RXBD_IDX=0x%08x\n", - rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); + p += scnprintf(p, end - p, + "R_AX_RPQ_RXBD_IDX=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX)); } if (dmac_err & B_AX_WSEC_ERR_FLAG) { if (chip->chip_id == RTL8852C) { - seq_printf(m, "R_AX_SEC_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG_IMR)); - seq_printf(m, "R_AX_SEC_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG)); - seq_printf(m, "R_AX_SEC_ENG_CTRL=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); - seq_printf(m, "R_AX_SEC_MPDU_PROC=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); - seq_printf(m, "R_AX_SEC_CAM_ACCESS=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); - seq_printf(m, "R_AX_SEC_CAM_RDATA=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); - seq_printf(m, "R_AX_SEC_DEBUG1=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_DEBUG1)); - seq_printf(m, "R_AX_SEC_TX_DEBUG=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); - seq_printf(m, "R_AX_SEC_RX_DEBUG=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); + p += scnprintf(p, end - p, + "R_AX_SEC_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG_IMR)); + p += scnprintf(p, end - p, + "R_AX_SEC_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_ERROR_FLAG)); + p += scnprintf(p, end - p, + "R_AX_SEC_ENG_CTRL=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); + p += scnprintf(p, end - p, + "R_AX_SEC_MPDU_PROC=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); + p += scnprintf(p, end - p, + "R_AX_SEC_CAM_ACCESS=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); + p += scnprintf(p, end - p, + "R_AX_SEC_CAM_RDATA=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); + p += scnprintf(p, end - p, "R_AX_SEC_DEBUG1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_DEBUG1)); + p += scnprintf(p, end - p, + "R_AX_SEC_TX_DEBUG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); + p += scnprintf(p, end - p, + "R_AX_SEC_RX_DEBUG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL, B_AX_DBG_SEL0, 0x8B); @@ -1382,187 +1455,229 @@ static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev, for (i = 0; i < 0x10; i++) { rtw89_write32_mask(rtwdev, R_AX_SEC_ENG_CTRL, B_AX_SEC_DBG_PORT_FIELD_MASK, i); - seq_printf(m, "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n", - i, rtw89_read32(rtwdev, R_AX_SEC_DEBUG2)); + p += scnprintf(p, end - p, + "sel=%x,R_AX_SEC_DEBUG2=0x%08x\n", + i, + rtw89_read32(rtwdev, R_AX_SEC_DEBUG2)); } } else { - seq_printf(m, "R_AX_SEC_ERR_IMR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_DEBUG)); - seq_printf(m, "R_AX_SEC_ENG_CTRL=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); - seq_printf(m, "R_AX_SEC_MPDU_PROC=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); - seq_printf(m, "R_AX_SEC_CAM_ACCESS=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); - seq_printf(m, "R_AX_SEC_CAM_RDATA=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); - seq_printf(m, "R_AX_SEC_CAM_WDATA=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA)); - seq_printf(m, "R_AX_SEC_TX_DEBUG=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); - seq_printf(m, "R_AX_SEC_RX_DEBUG=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); - seq_printf(m, "R_AX_SEC_TRX_PKT_CNT=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT)); - seq_printf(m, "R_AX_SEC_TRX_BLK_CNT=0x%08x\n", - rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT)); + p += scnprintf(p, end - p, + "R_AX_SEC_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_DEBUG)); + p += scnprintf(p, end - p, + "R_AX_SEC_ENG_CTRL=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL)); + p += scnprintf(p, end - p, + "R_AX_SEC_MPDU_PROC=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC)); + p += scnprintf(p, end - p, + "R_AX_SEC_CAM_ACCESS=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS)); + p += scnprintf(p, end - p, + "R_AX_SEC_CAM_RDATA=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA)); + p += scnprintf(p, end - p, + "R_AX_SEC_CAM_WDATA=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA)); + p += scnprintf(p, end - p, + "R_AX_SEC_TX_DEBUG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG)); + p += scnprintf(p, end - p, + "R_AX_SEC_RX_DEBUG=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG)); + p += scnprintf(p, end - p, + "R_AX_SEC_TRX_PKT_CNT=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT)); + p += scnprintf(p, end - p, + "R_AX_SEC_TRX_BLK_CNT=0x%08x\n", + rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT)); } } if (dmac_err & B_AX_MPDU_ERR_FLAG) { - seq_printf(m, "R_AX_MPDU_TX_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR)); - seq_printf(m, "R_AX_MPDU_TX_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR)); - seq_printf(m, "R_AX_MPDU_RX_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR)); - seq_printf(m, "R_AX_MPDU_RX_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR)); + p += scnprintf(p, end - p, "R_AX_MPDU_TX_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR)); + p += scnprintf(p, end - p, "R_AX_MPDU_TX_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR)); + p += scnprintf(p, end - p, "R_AX_MPDU_RX_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR)); + p += scnprintf(p, end - p, "R_AX_MPDU_RX_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR)); } if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) { - seq_printf(m, "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR)); - seq_printf(m, "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR)); + p += scnprintf(p, end - p, + "R_AX_STA_SCHEDULER_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR)); + p += scnprintf(p, end - p, + "R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR)); } if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) { - seq_printf(m, "R_AX_WDE_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); - seq_printf(m, "R_AX_WDE_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); - seq_printf(m, "R_AX_PLE_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); - seq_printf(m, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); + p += scnprintf(p, end - p, "R_AX_WDE_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); + p += scnprintf(p, end - p, "R_AX_WDE_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); + p += scnprintf(p, end - p, "R_AX_PLE_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); + p += scnprintf(p, end - p, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); } if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) { if (chip->chip_id == RTL8852C) { - seq_printf(m, "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR)); - seq_printf(m, "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR)); - seq_printf(m, "R_AX_TXPKTCTL_B1_ERRFLAG_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_IMR)); - seq_printf(m, "R_AX_TXPKTCTL_B1_ERRFLAG_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_ISR)); + p += scnprintf(p, end - p, + "R_AX_TXPKTCTL_B0_ERRFLAG_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_IMR)); + p += scnprintf(p, end - p, + "R_AX_TXPKTCTL_B0_ERRFLAG_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPKTCTL_B0_ERRFLAG_ISR)); + p += scnprintf(p, end - p, + "R_AX_TXPKTCTL_B1_ERRFLAG_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_IMR)); + p += scnprintf(p, end - p, + "R_AX_TXPKTCTL_B1_ERRFLAG_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPKTCTL_B1_ERRFLAG_ISR)); } else { - seq_printf(m, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR)); - seq_printf(m, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n", - rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1)); + p += scnprintf(p, end - p, + "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR)); + p += scnprintf(p, end - p, + "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1)); } } if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) { - seq_printf(m, "R_AX_WDE_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); - seq_printf(m, "R_AX_WDE_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); - seq_printf(m, "R_AX_PLE_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); - seq_printf(m, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); - seq_printf(m, "R_AX_WD_CPUQ_OP_0=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0)); - seq_printf(m, "R_AX_WD_CPUQ_OP_1=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1)); - seq_printf(m, "R_AX_WD_CPUQ_OP_2=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2)); - seq_printf(m, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n", - rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS)); - seq_printf(m, "R_AX_PL_CPUQ_OP_0=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0)); - seq_printf(m, "R_AX_PL_CPUQ_OP_1=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1)); - seq_printf(m, "R_AX_PL_CPUQ_OP_2=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2)); - seq_printf(m, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS)); + p += scnprintf(p, end - p, "R_AX_WDE_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR)); + p += scnprintf(p, end - p, "R_AX_WDE_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR)); + p += scnprintf(p, end - p, "R_AX_PLE_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR)); + p += scnprintf(p, end - p, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR)); + p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_0=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0)); + p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1)); + p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_2=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2)); + p += scnprintf(p, end - p, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n", + rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS)); + p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_0=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0)); + p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1)); + p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_2=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2)); + p += scnprintf(p, end - p, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS)); if (chip->chip_id == RTL8852C) { - seq_printf(m, "R_AX_RX_CTRL0=0x%08x\n", - rtw89_read32(rtwdev, R_AX_RX_CTRL0)); - seq_printf(m, "R_AX_RX_CTRL1=0x%08x\n", - rtw89_read32(rtwdev, R_AX_RX_CTRL1)); - seq_printf(m, "R_AX_RX_CTRL2=0x%08x\n", - rtw89_read32(rtwdev, R_AX_RX_CTRL2)); + p += scnprintf(p, end - p, "R_AX_RX_CTRL0=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RX_CTRL0)); + p += scnprintf(p, end - p, "R_AX_RX_CTRL1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RX_CTRL1)); + p += scnprintf(p, end - p, "R_AX_RX_CTRL2=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RX_CTRL2)); } else { - seq_printf(m, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n", - rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0)); - seq_printf(m, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n", - rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1)); - seq_printf(m, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n", - rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2)); + p += scnprintf(p, end - p, + "R_AX_RXDMA_PKT_INFO_0=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0)); + p += scnprintf(p, end - p, + "R_AX_RXDMA_PKT_INFO_1=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1)); + p += scnprintf(p, end - p, + "R_AX_RXDMA_PKT_INFO_2=0x%08x\n", + rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2)); } } if (dmac_err & B_AX_PKTIN_ERR_FLAG) { - seq_printf(m, "R_AX_PKTIN_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); - seq_printf(m, "R_AX_PKTIN_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); + p += scnprintf(p, end - p, "R_AX_PKTIN_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR)); + p += scnprintf(p, end - p, "R_AX_PKTIN_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR)); } if (dmac_err & B_AX_DISPATCH_ERR_FLAG) { - seq_printf(m, "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR)); - seq_printf(m, "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); - seq_printf(m, "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR)); - seq_printf(m, "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); - seq_printf(m, "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR)); - seq_printf(m, "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); + p += scnprintf(p, end - p, + "R_AX_HOST_DISPATCHER_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR)); + p += scnprintf(p, end - p, + "R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR)); + p += scnprintf(p, end - p, + "R_AX_CPU_DISPATCHER_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR)); + p += scnprintf(p, end - p, + "R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR)); + p += scnprintf(p, end - p, + "R_AX_OTHER_DISPATCHER_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR)); + p += scnprintf(p, end - p, + "R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR)); } if (dmac_err & B_AX_BBRPT_ERR_FLAG) { if (chip->chip_id == RTL8852C) { - seq_printf(m, "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR)); - seq_printf(m, "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_ISR)); - seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR)); - seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR)); - seq_printf(m, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR)); - seq_printf(m, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_COM_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_COM_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_ISR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR)); } else { - seq_printf(m, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR)); - seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR)); - seq_printf(m, "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR)); - seq_printf(m, "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR)); - seq_printf(m, "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_CHINFO_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_ISR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_CHINFO_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_DFS_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR)); + p += scnprintf(p, end - p, + "R_AX_BBRPT_DFS_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_ISR)); } } if (dmac_err & B_AX_HAXIDMA_ERR_FLAG && chip->chip_id == RTL8852C) { - seq_printf(m, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK)); - seq_printf(m, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n", - rtw89_read32(rtwdev, R_AX_HAXI_IDCT)); + p += scnprintf(p, end - p, "R_AX_HAXIDMA_ERR_IMR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_HAXI_IDCT_MSK)); + p += scnprintf(p, end - p, "R_AX_HAXIDMA_ERR_ISR=0x%08x\n", + rtw89_read32(rtwdev, R_AX_HAXI_IDCT)); } - return 0; +out: + return p - buf; } static int rtw89_debug_mac_dump_cmac_err(struct rtw89_dev *rtwdev, - struct seq_file *m, + char *buf, size_t bufsz, enum rtw89_mac_idx band) { const struct rtw89_chip_info *chip = rtwdev->chip; + char *p = buf, *end = buf + bufsz; u32 offset = 0; u32 cmac_err; int ret; @@ -1570,96 +1685,127 @@ static int rtw89_debug_mac_dump_cmac_err(struct rtw89_dev *rtwdev, ret = rtw89_mac_check_mac_en(rtwdev, band, RTW89_CMAC_SEL); if (ret) { if (band) - seq_puts(m, "[CMAC] : CMAC1 not enabled\n"); + p += scnprintf(p, end - p, + "[CMAC] : CMAC1 not enabled\n"); else - seq_puts(m, "[CMAC] : CMAC0 not enabled\n"); - return ret; + p += scnprintf(p, end - p, + "[CMAC] : CMAC0 not enabled\n"); + goto out; } if (band) offset = RTW89_MAC_AX_BAND_REG_OFFSET; cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset); - seq_printf(m, "R_AX_CMAC_ERR_ISR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset)); - seq_printf(m, "R_AX_CMAC_FUNC_EN [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN + offset)); - seq_printf(m, "R_AX_CK_EN [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_CK_EN + offset)); + p += scnprintf(p, end - p, "R_AX_CMAC_ERR_ISR [%d]=0x%08x\n", band, + rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR + offset)); + p += scnprintf(p, end - p, "R_AX_CMAC_FUNC_EN [%d]=0x%08x\n", band, + rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN + offset)); + p += scnprintf(p, end - p, "R_AX_CK_EN [%d]=0x%08x\n", band, + rtw89_read32(rtwdev, R_AX_CK_EN + offset)); if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) { - seq_printf(m, "R_AX_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR + offset)); - seq_printf(m, "R_AX_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR + offset)); + p += scnprintf(p, end - p, + "R_AX_SCHEDULE_ERR_IMR [%d]=0x%08x\n", band, + rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR + offset)); + p += scnprintf(p, end - p, + "R_AX_SCHEDULE_ERR_ISR [%d]=0x%08x\n", band, + rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR + offset)); } if (cmac_err & B_AX_PTCL_TOP_ERR_IND) { - seq_printf(m, "R_AX_PTCL_IMR0 [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_PTCL_IMR0 + offset)); - seq_printf(m, "R_AX_PTCL_ISR0 [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_PTCL_ISR0 + offset)); + p += scnprintf(p, end - p, "R_AX_PTCL_IMR0 [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, R_AX_PTCL_IMR0 + offset)); + p += scnprintf(p, end - p, "R_AX_PTCL_ISR0 [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, R_AX_PTCL_ISR0 + offset)); } if (cmac_err & B_AX_DMA_TOP_ERR_IND) { if (chip->chip_id == RTL8852C) { - seq_printf(m, "R_AX_RX_ERR_FLAG [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG + offset)); - seq_printf(m, "R_AX_RX_ERR_FLAG_IMR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG_IMR + offset)); + p += scnprintf(p, end - p, + "R_AX_RX_ERR_FLAG [%d]=0x%08x\n", band, + rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG + offset)); + p += scnprintf(p, end - p, + "R_AX_RX_ERR_FLAG_IMR [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, R_AX_RX_ERR_FLAG_IMR + offset)); } else { - seq_printf(m, "R_AX_DLE_CTRL [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_DLE_CTRL + offset)); + p += scnprintf(p, end - p, + "R_AX_DLE_CTRL [%d]=0x%08x\n", band, + rtw89_read32(rtwdev, R_AX_DLE_CTRL + offset)); } } if (cmac_err & B_AX_DMA_TOP_ERR_IND || cmac_err & B_AX_WMAC_RX_ERR_IND) { if (chip->chip_id == RTL8852C) { - seq_printf(m, "R_AX_PHYINFO_ERR_ISR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR + offset)); - seq_printf(m, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset)); + p += scnprintf(p, end - p, + "R_AX_PHYINFO_ERR_ISR [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR + offset)); + p += scnprintf(p, end - p, + "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset)); } else { - seq_printf(m, "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset)); + p += scnprintf(p, end - p, + "R_AX_PHYINFO_ERR_IMR [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR + offset)); } } if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) { - seq_printf(m, "R_AX_TXPWR_IMR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_TXPWR_IMR + offset)); - seq_printf(m, "R_AX_TXPWR_ISR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_TXPWR_ISR + offset)); + p += scnprintf(p, end - p, "R_AX_TXPWR_IMR [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, R_AX_TXPWR_IMR + offset)); + p += scnprintf(p, end - p, "R_AX_TXPWR_ISR [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, R_AX_TXPWR_ISR + offset)); } if (cmac_err & B_AX_WMAC_TX_ERR_IND) { if (chip->chip_id == RTL8852C) { - seq_printf(m, "R_AX_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA + offset)); - seq_printf(m, "R_AX_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_TRXPTCL_ERROR_INDICA_MASK + offset)); + p += scnprintf(p, end - p, + "R_AX_TRXPTCL_ERROR_INDICA [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, + R_AX_TRXPTCL_ERROR_INDICA + offset)); + p += scnprintf(p, end - p, + "R_AX_TRXPTCL_ERROR_INDICA_MASK [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, + R_AX_TRXPTCL_ERROR_INDICA_MASK + offset)); } else { - seq_printf(m, "R_AX_TMAC_ERR_IMR_ISR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR + offset)); + p += scnprintf(p, end - p, + "R_AX_TMAC_ERR_IMR_ISR [%d]=0x%08x\n", + band, + rtw89_read32(rtwdev, + R_AX_TMAC_ERR_IMR_ISR + offset)); } - seq_printf(m, "R_AX_DBGSEL_TRXPTCL [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL + offset)); + p += scnprintf(p, end - p, + "R_AX_DBGSEL_TRXPTCL [%d]=0x%08x\n", band, + rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL + offset)); } - seq_printf(m, "R_AX_CMAC_ERR_IMR [%d]=0x%08x\n", band, - rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset)); + p += scnprintf(p, end - p, "R_AX_CMAC_ERR_IMR [%d]=0x%08x\n", band, + rtw89_read32(rtwdev, R_AX_CMAC_ERR_IMR + offset)); - return 0; +out: + return p - buf; } static int rtw89_debug_mac_dump_cmac_dbg(struct rtw89_dev *rtwdev, - struct seq_file *m) + char *buf, size_t bufsz) { - rtw89_debug_mac_dump_cmac_err(rtwdev, m, RTW89_MAC_0); + char *p = buf, *end = buf + bufsz; + + p += rtw89_debug_mac_dump_cmac_err(rtwdev, p, end - p, RTW89_MAC_0); if (rtwdev->dbcc_en) - rtw89_debug_mac_dump_cmac_err(rtwdev, m, RTW89_MAC_1); + p += rtw89_debug_mac_dump_cmac_err(rtwdev, p, end - p, RTW89_MAC_1); - return 0; + return p - buf; } static const struct rtw89_mac_dbg_port_info dbg_port_ptcl_c0 = { @@ -2465,11 +2611,12 @@ static const struct rtw89_mac_dbg_port_info dbg_port_pcie_misc2 = { .rd_msk = B_AX_DEBUG_ST_MASK }; -static const struct rtw89_mac_dbg_port_info * -rtw89_debug_mac_dbg_port_sel(struct seq_file *m, - struct rtw89_dev *rtwdev, u32 sel) +static int +rtw89_debug_mac_dbg_port_sel(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, + u32 sel, const struct rtw89_mac_dbg_port_info **ppinfo) { - const struct rtw89_mac_dbg_port_info *info; + const struct rtw89_mac_dbg_port_info *info = NULL; + char *p = buf, *end = buf + bufsz; u32 index; u32 val32; u16 val16; @@ -2481,28 +2628,28 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG); val16 |= B_AX_PTCL_DBG_EN; rtw89_write16(rtwdev, R_AX_PTCL_DBG, val16); - seq_puts(m, "Enable PTCL C0 dbgport.\n"); + p += scnprintf(p, end - p, "Enable PTCL C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_PTCL_C1: info = &dbg_port_ptcl_c1; val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG_C1); val16 |= B_AX_PTCL_DBG_EN; rtw89_write16(rtwdev, R_AX_PTCL_DBG_C1, val16); - seq_puts(m, "Enable PTCL C1 dbgport.\n"); + p += scnprintf(p, end - p, "Enable PTCL C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_SCH_C0: info = &dbg_port_sch_c0; val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL); val32 |= B_AX_SCH_DBG_EN; rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL, val32); - seq_puts(m, "Enable SCH C0 dbgport.\n"); + p += scnprintf(p, end - p, "Enable SCH C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_SCH_C1: info = &dbg_port_sch_c1; val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL_C1); val32 |= B_AX_SCH_DBG_EN; rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL_C1, val32); - seq_puts(m, "Enable SCH C1 dbgport.\n"); + p += scnprintf(p, end - p, "Enable SCH C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_TMAC_C0: info = &dbg_port_tmac_c0; @@ -2519,7 +2666,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); - seq_puts(m, "Enable TMAC C0 dbgport.\n"); + p += scnprintf(p, end - p, "Enable TMAC C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_TMAC_C1: info = &dbg_port_tmac_c1; @@ -2536,7 +2683,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); - seq_puts(m, "Enable TMAC C1 dbgport.\n"); + p += scnprintf(p, end - p, "Enable TMAC C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMAC_C0: info = &dbg_port_rmac_c0; @@ -2558,7 +2705,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL, B_AX_DBGSEL_TRXPTCL_MASK); rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL, val8); - seq_puts(m, "Enable RMAC C0 dbgport.\n"); + p += scnprintf(p, end - p, "Enable RMAC C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMAC_C1: info = &dbg_port_rmac_c1; @@ -2580,23 +2727,23 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL, B_AX_DBGSEL_TRXPTCL_MASK); rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val8); - seq_puts(m, "Enable RMAC C1 dbgport.\n"); + p += scnprintf(p, end - p, "Enable RMAC C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMACST_C0: info = &dbg_port_rmacst_c0; - seq_puts(m, "Enable RMAC state C0 dbgport.\n"); + p += scnprintf(p, end - p, "Enable RMAC state C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMACST_C1: info = &dbg_port_rmacst_c1; - seq_puts(m, "Enable RMAC state C1 dbgport.\n"); + p += scnprintf(p, end - p, "Enable RMAC state C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMAC_PLCP_C0: info = &dbg_port_rmac_plcp_c0; - seq_puts(m, "Enable RMAC PLCP C0 dbgport.\n"); + p += scnprintf(p, end - p, "Enable RMAC PLCP C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_RMAC_PLCP_C1: info = &dbg_port_rmac_plcp_c1; - seq_puts(m, "Enable RMAC PLCP C1 dbgport.\n"); + p += scnprintf(p, end - p, "Enable RMAC PLCP C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_TRXPTCL_C0: info = &dbg_port_trxptcl_c0; @@ -2608,7 +2755,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); - seq_puts(m, "Enable TRXPTCL C0 dbgport.\n"); + p += scnprintf(p, end - p, "Enable TRXPTCL C0 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_TRXPTCL_C1: info = &dbg_port_trxptcl_c1; @@ -2620,131 +2767,137 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1); val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK); rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32); - seq_puts(m, "Enable TRXPTCL C1 dbgport.\n"); + p += scnprintf(p, end - p, "Enable TRXPTCL C1 dbgport.\n"); break; case RTW89_DBG_PORT_SEL_TX_INFOL_C0: info = &dbg_port_tx_infol_c0; val32 = rtw89_read32(rtwdev, R_AX_TCR1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1, val32); - seq_puts(m, "Enable tx infol dump.\n"); + p += scnprintf(p, end - p, "Enable tx infol dump.\n"); break; case RTW89_DBG_PORT_SEL_TX_INFOH_C0: info = &dbg_port_tx_infoh_c0; val32 = rtw89_read32(rtwdev, R_AX_TCR1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1, val32); - seq_puts(m, "Enable tx infoh dump.\n"); + p += scnprintf(p, end - p, "Enable tx infoh dump.\n"); break; case RTW89_DBG_PORT_SEL_TX_INFOL_C1: info = &dbg_port_tx_infol_c1; val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); - seq_puts(m, "Enable tx infol dump.\n"); + p += scnprintf(p, end - p, "Enable tx infol dump.\n"); break; case RTW89_DBG_PORT_SEL_TX_INFOH_C1: info = &dbg_port_tx_infoh_c1; val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); - seq_puts(m, "Enable tx infoh dump.\n"); + p += scnprintf(p, end - p, "Enable tx infoh dump.\n"); break; case RTW89_DBG_PORT_SEL_TXTF_INFOL_C0: info = &dbg_port_txtf_infol_c0; val32 = rtw89_read32(rtwdev, R_AX_TCR1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1, val32); - seq_puts(m, "Enable tx tf infol dump.\n"); + p += scnprintf(p, end - p, "Enable tx tf infol dump.\n"); break; case RTW89_DBG_PORT_SEL_TXTF_INFOH_C0: info = &dbg_port_txtf_infoh_c0; val32 = rtw89_read32(rtwdev, R_AX_TCR1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1, val32); - seq_puts(m, "Enable tx tf infoh dump.\n"); + p += scnprintf(p, end - p, "Enable tx tf infoh dump.\n"); break; case RTW89_DBG_PORT_SEL_TXTF_INFOL_C1: info = &dbg_port_txtf_infol_c1; val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); - seq_puts(m, "Enable tx tf infol dump.\n"); + p += scnprintf(p, end - p, "Enable tx tf infol dump.\n"); break; case RTW89_DBG_PORT_SEL_TXTF_INFOH_C1: info = &dbg_port_txtf_infoh_c1; val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1); val32 |= B_AX_TCR_FORCE_READ_TXDFIFO; rtw89_write32(rtwdev, R_AX_TCR1_C1, val32); - seq_puts(m, "Enable tx tf infoh dump.\n"); + p += scnprintf(p, end - p, "Enable tx tf infoh dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG: info = &dbg_port_wde_bufmgn_freepg; - seq_puts(m, "Enable wde bufmgn freepg dump.\n"); + p += scnprintf(p, end - p, "Enable wde bufmgn freepg dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_BUFMGN_QUOTA: info = &dbg_port_wde_bufmgn_quota; - seq_puts(m, "Enable wde bufmgn quota dump.\n"); + p += scnprintf(p, end - p, "Enable wde bufmgn quota dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PAGELLT: info = &dbg_port_wde_bufmgn_pagellt; - seq_puts(m, "Enable wde bufmgn pagellt dump.\n"); + p += scnprintf(p, end - p, + "Enable wde bufmgn pagellt dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PKTINFO: info = &dbg_port_wde_bufmgn_pktinfo; - seq_puts(m, "Enable wde bufmgn pktinfo dump.\n"); + p += scnprintf(p, end - p, + "Enable wde bufmgn pktinfo dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_QUEMGN_PREPKT: info = &dbg_port_wde_quemgn_prepkt; - seq_puts(m, "Enable wde quemgn prepkt dump.\n"); + p += scnprintf(p, end - p, "Enable wde quemgn prepkt dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_QUEMGN_NXTPKT: info = &dbg_port_wde_quemgn_nxtpkt; - seq_puts(m, "Enable wde quemgn nxtpkt dump.\n"); + p += scnprintf(p, end - p, "Enable wde quemgn nxtpkt dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QLNKTBL: info = &dbg_port_wde_quemgn_qlnktbl; - seq_puts(m, "Enable wde quemgn qlnktbl dump.\n"); + p += scnprintf(p, end - p, + "Enable wde quemgn qlnktbl dump.\n"); break; case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QEMPTY: info = &dbg_port_wde_quemgn_qempty; - seq_puts(m, "Enable wde quemgn qempty dump.\n"); + p += scnprintf(p, end - p, "Enable wde quemgn qempty dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_BUFMGN_FREEPG: info = &dbg_port_ple_bufmgn_freepg; - seq_puts(m, "Enable ple bufmgn freepg dump.\n"); + p += scnprintf(p, end - p, "Enable ple bufmgn freepg dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_BUFMGN_QUOTA: info = &dbg_port_ple_bufmgn_quota; - seq_puts(m, "Enable ple bufmgn quota dump.\n"); + p += scnprintf(p, end - p, "Enable ple bufmgn quota dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PAGELLT: info = &dbg_port_ple_bufmgn_pagellt; - seq_puts(m, "Enable ple bufmgn pagellt dump.\n"); + p += scnprintf(p, end - p, + "Enable ple bufmgn pagellt dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PKTINFO: info = &dbg_port_ple_bufmgn_pktinfo; - seq_puts(m, "Enable ple bufmgn pktinfo dump.\n"); + p += scnprintf(p, end - p, + "Enable ple bufmgn pktinfo dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_QUEMGN_PREPKT: info = &dbg_port_ple_quemgn_prepkt; - seq_puts(m, "Enable ple quemgn prepkt dump.\n"); + p += scnprintf(p, end - p, "Enable ple quemgn prepkt dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_QUEMGN_NXTPKT: info = &dbg_port_ple_quemgn_nxtpkt; - seq_puts(m, "Enable ple quemgn nxtpkt dump.\n"); + p += scnprintf(p, end - p, "Enable ple quemgn nxtpkt dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QLNKTBL: info = &dbg_port_ple_quemgn_qlnktbl; - seq_puts(m, "Enable ple quemgn qlnktbl dump.\n"); + p += scnprintf(p, end - p, + "Enable ple quemgn qlnktbl dump.\n"); break; case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QEMPTY: info = &dbg_port_ple_quemgn_qempty; - seq_puts(m, "Enable ple quemgn qempty dump.\n"); + p += scnprintf(p, end - p, "Enable ple quemgn qempty dump.\n"); break; case RTW89_DBG_PORT_SEL_PKTINFO: info = &dbg_port_pktinfo; - seq_puts(m, "Enable pktinfo dump.\n"); + p += scnprintf(p, end - p, "Enable pktinfo dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_TX0: rtw89_write32_mask(rtwdev, R_AX_DBG_CTRL, @@ -2763,7 +2916,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 0); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, index); - seq_printf(m, "Enable Dispatcher hdt tx%x dump.\n", index); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt tx%x dump.\n", index); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_TX6: info = &dbg_port_dspt_hdt_tx6; @@ -2771,7 +2925,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 0); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 6); - seq_puts(m, "Enable Dispatcher hdt tx6 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt tx6 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_TX7: info = &dbg_port_dspt_hdt_tx7; @@ -2779,7 +2934,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 0); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 7); - seq_puts(m, "Enable Dispatcher hdt tx7 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt tx7 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_TX8: info = &dbg_port_dspt_hdt_tx8; @@ -2787,7 +2943,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 0); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 8); - seq_puts(m, "Enable Dispatcher hdt tx8 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt tx8 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_TX9: case RTW89_DBG_PORT_SEL_DSPT_HDT_TXA: @@ -2799,7 +2956,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 0); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, index); - seq_printf(m, "Enable Dispatcher hdt tx%x dump.\n", index); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt tx%x dump.\n", index); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_TXD: info = &dbg_port_dspt_hdt_txD; @@ -2807,7 +2965,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 0); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 0xD); - seq_puts(m, "Enable Dispatcher hdt txD dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt txD dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_TX0: info = &dbg_port_dspt_cdt_tx0; @@ -2815,7 +2974,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 1); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 0); - seq_puts(m, "Enable Dispatcher cdt tx0 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt tx0 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_TX1: info = &dbg_port_dspt_cdt_tx1; @@ -2823,7 +2983,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 1); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 1); - seq_puts(m, "Enable Dispatcher cdt tx1 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt tx1 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_TX3: info = &dbg_port_dspt_cdt_tx3; @@ -2831,7 +2992,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 1); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 3); - seq_puts(m, "Enable Dispatcher cdt tx3 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt tx3 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_TX4: info = &dbg_port_dspt_cdt_tx4; @@ -2839,7 +3001,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 1); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 4); - seq_puts(m, "Enable Dispatcher cdt tx4 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt tx4 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_TX5: case RTW89_DBG_PORT_SEL_DSPT_CDT_TX6: @@ -2851,7 +3014,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 1); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, index); - seq_printf(m, "Enable Dispatcher cdt tx%x dump.\n", index); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt tx%x dump.\n", index); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_TX9: info = &dbg_port_dspt_cdt_tx9; @@ -2859,7 +3023,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 1); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 9); - seq_puts(m, "Enable Dispatcher cdt tx9 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt tx9 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_TXA: case RTW89_DBG_PORT_SEL_DSPT_CDT_TXB: @@ -2870,7 +3035,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 1); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, index); - seq_printf(m, "Enable Dispatcher cdt tx%x dump.\n", index); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt tx%x dump.\n", index); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_RX0: info = &dbg_port_dspt_hdt_rx0; @@ -2878,7 +3044,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 2); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 0); - seq_puts(m, "Enable Dispatcher hdt rx0 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt rx0 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_RX1: case RTW89_DBG_PORT_SEL_DSPT_HDT_RX2: @@ -2888,7 +3055,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 2); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, index); - seq_printf(m, "Enable Dispatcher hdt rx%x dump.\n", index); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt rx%x dump.\n", index); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_RX3: info = &dbg_port_dspt_hdt_rx3; @@ -2896,7 +3064,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 2); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 3); - seq_puts(m, "Enable Dispatcher hdt rx3 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt rx3 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_RX4: info = &dbg_port_dspt_hdt_rx4; @@ -2904,7 +3073,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 2); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 4); - seq_puts(m, "Enable Dispatcher hdt rx4 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt rx4 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_HDT_RX5: info = &dbg_port_dspt_hdt_rx5; @@ -2912,7 +3082,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 2); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 5); - seq_puts(m, "Enable Dispatcher hdt rx5 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher hdt rx5 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_0: info = &dbg_port_dspt_cdt_rx_p0_0; @@ -2920,7 +3091,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 3); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 0); - seq_puts(m, "Enable Dispatcher cdt rx part0 0 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt rx part0 0 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0: case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_1: @@ -2929,7 +3101,8 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 3); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 1); - seq_puts(m, "Enable Dispatcher cdt rx part0 1 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt rx part0 1 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P0_2: info = &dbg_port_dspt_cdt_rx_p0_2; @@ -2937,43 +3110,50 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, B_AX_DISPATCHER_INTN_SEL_MASK, 3); rtw89_write16_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_CH_SEL_MASK, 2); - seq_puts(m, "Enable Dispatcher cdt rx part0 2 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt rx part0 2 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_CDT_RX_P1: info = &dbg_port_dspt_cdt_rx_p1; rtw89_write8_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_INTN_SEL_MASK, 3); - seq_puts(m, "Enable Dispatcher cdt rx part1 dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher cdt rx part1 dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_STF_CTRL: info = &dbg_port_dspt_stf_ctrl; rtw89_write8_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_INTN_SEL_MASK, 4); - seq_puts(m, "Enable Dispatcher stf control dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher stf control dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_ADDR_CTRL: info = &dbg_port_dspt_addr_ctrl; rtw89_write8_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_INTN_SEL_MASK, 5); - seq_puts(m, "Enable Dispatcher addr control dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher addr control dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_WDE_INTF: info = &dbg_port_dspt_wde_intf; rtw89_write8_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_INTN_SEL_MASK, 6); - seq_puts(m, "Enable Dispatcher wde interface dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher wde interface dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_PLE_INTF: info = &dbg_port_dspt_ple_intf; rtw89_write8_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_INTN_SEL_MASK, 7); - seq_puts(m, "Enable Dispatcher ple interface dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher ple interface dump.\n"); break; case RTW89_DBG_PORT_SEL_DSPT_FLOW_CTRL: info = &dbg_port_dspt_flow_ctrl; rtw89_write8_mask(rtwdev, info->sel_addr, B_AX_DISPATCHER_INTN_SEL_MASK, 8); - seq_puts(m, "Enable Dispatcher flow control dump.\n"); + p += scnprintf(p, end - p, + "Enable Dispatcher flow control dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_TXDMA: info = &dbg_port_pcie_txdma; @@ -2981,7 +3161,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); - seq_puts(m, "Enable pcie txdma dump.\n"); + p += scnprintf(p, end - p, "Enable pcie txdma dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_RXDMA: info = &dbg_port_pcie_rxdma; @@ -2989,7 +3169,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); - seq_puts(m, "Enable pcie rxdma dump.\n"); + p += scnprintf(p, end - p, "Enable pcie rxdma dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_CVT: info = &dbg_port_pcie_cvt; @@ -2997,7 +3177,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); - seq_puts(m, "Enable pcie cvt dump.\n"); + p += scnprintf(p, end - p, "Enable pcie cvt dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_CXPL: info = &dbg_port_pcie_cxpl; @@ -3005,7 +3185,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); - seq_puts(m, "Enable pcie cxpl dump.\n"); + p += scnprintf(p, end - p, "Enable pcie cxpl dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_IO: info = &dbg_port_pcie_io; @@ -3013,7 +3193,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); - seq_puts(m, "Enable pcie io dump.\n"); + p += scnprintf(p, end - p, "Enable pcie io dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_MISC: info = &dbg_port_pcie_misc; @@ -3021,7 +3201,7 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL0); val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL1); rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32); - seq_puts(m, "Enable pcie misc dump.\n"); + p += scnprintf(p, end - p, "Enable pcie misc dump.\n"); break; case RTW89_DBG_PORT_SEL_PCIE_MISC2: info = &dbg_port_pcie_misc2; @@ -3029,14 +3209,16 @@ rtw89_debug_mac_dbg_port_sel(struct seq_file *m, val16 = u16_replace_bits(val16, PCIE_MISC2_DBG_SEL, B_AX_PCIE_DBG_SEL_MASK); rtw89_write16(rtwdev, R_AX_PCIE_DBG_CTRL, val16); - seq_puts(m, "Enable pcie misc2 dump.\n"); + p += scnprintf(p, end - p, "Enable pcie misc2 dump.\n"); break; default: - seq_puts(m, "Dbg port select err\n"); - return NULL; + p += scnprintf(p, end - p, "Dbg port select err\n"); + break; } - return info; + *ppinfo = info; + + return p - buf; } static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel) @@ -3070,23 +3252,25 @@ static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel) } static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev, - struct seq_file *m, u32 sel) + char *buf, size_t bufsz, u32 sel) { - const struct rtw89_mac_dbg_port_info *info; - u8 val8; - u16 val16; + const struct rtw89_mac_dbg_port_info *info = NULL; + char *p = buf, *end = buf + bufsz; u32 val32; + u16 val16; + u8 val8; u32 i; - info = rtw89_debug_mac_dbg_port_sel(m, rtwdev, sel); + p += rtw89_debug_mac_dbg_port_sel(rtwdev, p, end - p, sel, &info); + if (!info) { rtw89_err(rtwdev, "failed to select debug port %d\n", sel); - return -EINVAL; + goto out; } #define case_DBG_SEL(__sel) \ case RTW89_DBG_PORT_SEL_##__sel: \ - seq_puts(m, "Dump debug port " #__sel ":\n"); \ + p += scnprintf(p, end - p, "Dump debug port " #__sel ":\n"); \ break switch (sel) { @@ -3182,8 +3366,8 @@ static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev, #undef case_DBG_SEL - seq_printf(m, "Sel addr = 0x%X\n", info->sel_addr); - seq_printf(m, "Read addr = 0x%X\n", info->rd_addr); + p += scnprintf(p, end - p, "Sel addr = 0x%X\n", info->sel_addr); + p += scnprintf(p, end - p, "Read addr = 0x%X\n", info->rd_addr); for (i = info->srt; i <= info->end; i++) { switch (info->sel_byte) { @@ -3191,17 +3375,17 @@ static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev, default: rtw89_write8_mask(rtwdev, info->sel_addr, info->sel_msk, i); - seq_printf(m, "0x%02X: ", i); + p += scnprintf(p, end - p, "0x%02X: ", i); break; case 2: rtw89_write16_mask(rtwdev, info->sel_addr, info->sel_msk, i); - seq_printf(m, "0x%04X: ", i); + p += scnprintf(p, end - p, "0x%04X: ", i); break; case 4: rtw89_write32_mask(rtwdev, info->sel_addr, info->sel_msk, i); - seq_printf(m, "0x%04X: ", i); + p += scnprintf(p, end - p, "0x%04X: ", i); break; } @@ -3212,77 +3396,75 @@ static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev, default: val8 = rtw89_read8_mask(rtwdev, info->rd_addr, info->rd_msk); - seq_printf(m, "0x%02X\n", val8); + p += scnprintf(p, end - p, "0x%02X\n", val8); break; case 2: val16 = rtw89_read16_mask(rtwdev, info->rd_addr, info->rd_msk); - seq_printf(m, "0x%04X\n", val16); + p += scnprintf(p, end - p, "0x%04X\n", val16); break; case 4: val32 = rtw89_read32_mask(rtwdev, info->rd_addr, info->rd_msk); - seq_printf(m, "0x%08X\n", val32); + p += scnprintf(p, end - p, "0x%08X\n", val32); break; } } - return 0; +out: + return p - buf; } static int rtw89_debug_mac_dump_dbg_port(struct rtw89_dev *rtwdev, - struct seq_file *m) + char *buf, size_t bufsz) { + char *p = buf, *end = buf + bufsz; + ssize_t n; u32 sel; - int ret = 0; for (sel = RTW89_DBG_PORT_SEL_PTCL_C0; sel < RTW89_DBG_PORT_SEL_LAST; sel++) { if (!is_dbg_port_valid(rtwdev, sel)) continue; - ret = rtw89_debug_mac_dbg_port_dump(rtwdev, m, sel); - if (ret) { + n = rtw89_debug_mac_dbg_port_dump(rtwdev, p, end - p, sel); + if (n < 0) { rtw89_err(rtwdev, "failed to dump debug port %d\n", sel); break; } + p += n; } - return ret; + return p - buf; } -static int -rtw89_debug_priv_mac_dbg_port_dump_get(struct seq_file *m, void *v) +static ssize_t +rtw89_debug_priv_mac_dbg_port_dump_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char *p = buf, *end = buf + bufsz; if (debugfs_priv->dbgpkg_en.ss_dbg) - rtw89_debug_mac_dump_ss_dbg(rtwdev, m); + p += rtw89_debug_mac_dump_ss_dbg(rtwdev, p, end - p); if (debugfs_priv->dbgpkg_en.dle_dbg) - rtw89_debug_mac_dump_dle_dbg(rtwdev, m); + p += rtw89_debug_mac_dump_dle_dbg(rtwdev, p, end - p); if (debugfs_priv->dbgpkg_en.dmac_dbg) - rtw89_debug_mac_dump_dmac_dbg(rtwdev, m); + p += rtw89_debug_mac_dump_dmac_dbg(rtwdev, p, end - p); if (debugfs_priv->dbgpkg_en.cmac_dbg) - rtw89_debug_mac_dump_cmac_dbg(rtwdev, m); + p += rtw89_debug_mac_dump_cmac_dbg(rtwdev, p, end - p); if (debugfs_priv->dbgpkg_en.dbg_port) - rtw89_debug_mac_dump_dbg_port(rtwdev, m); + p += rtw89_debug_mac_dump_dbg_port(rtwdev, p, end - p); - return 0; + return p - buf; }; -static u8 *rtw89_hex2bin_user(struct rtw89_dev *rtwdev, - const char __user *user_buf, size_t count) +static u8 *rtw89_hex2bin(struct rtw89_dev *rtwdev, const char *buf, size_t count) { - char *buf; u8 *bin; int num; int err = 0; - buf = memdup_user(user_buf, count); - if (IS_ERR(buf)) - return buf; - num = count / 2; bin = kmalloc(num, GFP_KERNEL); if (!bin) { @@ -3297,22 +3479,18 @@ static u8 *rtw89_hex2bin_user(struct rtw89_dev *rtwdev, } out: - kfree(buf); - return err ? ERR_PTR(err) : bin; } -static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +static ssize_t rtw89_debug_priv_send_h2c_set(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; u8 *h2c; int ret; u16 h2c_len = count / 2; - h2c = rtw89_hex2bin_user(rtwdev, user_buf, count); + h2c = rtw89_hex2bin(rtwdev, buf, count); if (IS_ERR(h2c)) return -EFAULT; @@ -3323,34 +3501,36 @@ static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp, return ret ? ret : count; } -static int -rtw89_debug_priv_early_h2c_get(struct seq_file *m, void *v) +static ssize_t +rtw89_debug_priv_early_h2c_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_early_h2c *early_h2c; + char *p = buf, *end = buf + bufsz; int seq = 0; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); + list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) - seq_printf(m, "%d: %*ph\n", ++seq, early_h2c->h2c_len, early_h2c->h2c); - mutex_unlock(&rtwdev->mutex); + p += scnprintf(p, end - p, "%d: %*ph\n", ++seq, + early_h2c->h2c_len, early_h2c->h2c); - return 0; + return p - buf; } static ssize_t -rtw89_debug_priv_early_h2c_set(struct file *filp, const char __user *user_buf, - size_t count, loff_t *loff) +rtw89_debug_priv_early_h2c_set(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct seq_file *m = (struct seq_file *)filp->private_data; - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_early_h2c *early_h2c; u8 *h2c; u16 h2c_len = count / 2; - h2c = rtw89_hex2bin_user(rtwdev, user_buf, count); + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + h2c = rtw89_hex2bin(rtwdev, buf, count); if (IS_ERR(h2c)) return -EFAULT; @@ -3369,9 +3549,7 @@ rtw89_debug_priv_early_h2c_set(struct file *filp, const char __user *user_buf, early_h2c->h2c = h2c; early_h2c->h2c_len = h2c_len; - mutex_lock(&rtwdev->mutex); list_add_tail(&early_h2c->list, &rtwdev->early_h2c_list); - mutex_unlock(&rtwdev->mutex); out: return count; @@ -3404,15 +3582,16 @@ static int rtw89_dbg_trigger_ctrl_error(struct rtw89_dev *rtwdev) return 0; } -static int -rtw89_debug_priv_fw_crash_get(struct seq_file *m, void *v) +static ssize_t +rtw89_debug_priv_fw_crash_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; + char *p = buf, *end = buf + bufsz; - seq_printf(m, "%d\n", - test_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags)); - return 0; + p += scnprintf(p, end - p, "%d\n", + test_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags)); + return p - buf; } enum rtw89_dbg_crash_simulation_type { @@ -3421,17 +3600,17 @@ enum rtw89_dbg_crash_simulation_type { }; static ssize_t -rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf, - size_t count, loff_t *loff) +rtw89_debug_priv_fw_crash_set(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct seq_file *m = (struct seq_file *)filp->private_data; - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; int (*sim)(struct rtw89_dev *rtwdev); u8 crash_type; int ret; - ret = kstrtou8_from_user(user_buf, count, 0, &crash_type); + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + ret = kstrtou8(buf, 0, &crash_type); if (ret) return -EINVAL; @@ -3448,10 +3627,8 @@ rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf, return -EINVAL; } - mutex_lock(&rtwdev->mutex); set_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags); ret = sim(rtwdev); - mutex_unlock(&rtwdev->mutex); if (ret) return ret; @@ -3459,27 +3636,22 @@ rtw89_debug_priv_fw_crash_set(struct file *filp, const char __user *user_buf, return count; } -static int rtw89_debug_priv_btc_info_get(struct seq_file *m, void *v) +static ssize_t rtw89_debug_priv_btc_info_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; - - rtw89_btc_dump_info(rtwdev, m); - - return 0; + return rtw89_btc_dump_info(rtwdev, buf, bufsz); } -static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +static ssize_t rtw89_debug_priv_btc_manual_set(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_btc *btc = &rtwdev->btc; const struct rtw89_btc_ver *ver = btc->ver; int ret; - ret = kstrtobool_from_user(user_buf, count, &btc->manual_ctrl); + ret = kstrtobool(buf, &btc->manual_ctrl); if (ret) return ret; @@ -3491,31 +3663,29 @@ static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp, return count; } -static ssize_t rtw89_debug_priv_fw_log_manual_set(struct file *filp, - const char __user *user_buf, - size_t count, loff_t *loff) +static ssize_t rtw89_debug_priv_fw_log_manual_set(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct rtw89_debugfs_priv *debugfs_priv = filp->private_data; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_fw_log *log = &rtwdev->fw.log; bool fw_log_manual; - if (kstrtobool_from_user(user_buf, count, &fw_log_manual)) + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + if (kstrtobool(buf, &fw_log_manual)) goto out; - mutex_lock(&rtwdev->mutex); log->enable = fw_log_manual; if (log->enable) rtw89_fw_log_prepare(rtwdev); rtw89_fw_h2c_fw_log(rtwdev, fw_log_manual); - mutex_unlock(&rtwdev->mutex); out: return count; } -static void rtw89_sta_link_info_get_iter(struct seq_file *m, - struct rtw89_dev *rtwdev, - struct rtw89_sta_link *rtwsta_link) +static int rtw89_sta_link_info_get_iter(struct rtw89_dev *rtwdev, + char *buf, size_t bufsz, + struct rtw89_sta_link *rtwsta_link) { static const char * const he_gi_str[] = { [NL80211_RATE_INFO_HE_GI_0_8] = "0.8", @@ -3533,6 +3703,7 @@ static void rtw89_sta_link_info_get_iter(struct seq_file *m, u8 ant_num = hal->ant_diversity ? 2 : rtwdev->chip->rf_path_num; bool ant_asterisk = hal->tx_path_diversity || hal->ant_diversity; struct ieee80211_link_sta *link_sta; + char *p = buf, *end = buf + bufsz; u8 evm_min, evm_max, evm_1ss; u16 max_rc_amsdu_len; u8 rssi; @@ -3546,107 +3717,136 @@ static void rtw89_sta_link_info_get_iter(struct seq_file *m, rcu_read_unlock(); - seq_printf(m, "TX rate [%u, %u]: ", rtwsta_link->mac_id, rtwsta_link->link_id); + p += scnprintf(p, end - p, "TX rate [%u, %u]: ", rtwsta_link->mac_id, + rtwsta_link->link_id); if (rate->flags & RATE_INFO_FLAGS_MCS) - seq_printf(m, "HT MCS-%d%s", rate->mcs, - rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : ""); + p += scnprintf(p, end - p, "HT MCS-%d%s", rate->mcs, + rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : ""); else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) - seq_printf(m, "VHT %dSS MCS-%d%s", rate->nss, rate->mcs, - rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : ""); + p += scnprintf(p, end - p, "VHT %dSS MCS-%d%s", rate->nss, + rate->mcs, + rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : ""); else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) - seq_printf(m, "HE %dSS MCS-%d GI:%s", rate->nss, rate->mcs, - rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? - he_gi_str[rate->he_gi] : "N/A"); + p += scnprintf(p, end - p, "HE %dSS MCS-%d GI:%s", rate->nss, + rate->mcs, + rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? + he_gi_str[rate->he_gi] : "N/A"); else if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) - seq_printf(m, "EHT %dSS MCS-%d GI:%s", rate->nss, rate->mcs, - rate->eht_gi < ARRAY_SIZE(eht_gi_str) ? - eht_gi_str[rate->eht_gi] : "N/A"); + p += scnprintf(p, end - p, "EHT %dSS MCS-%d GI:%s", rate->nss, + rate->mcs, + rate->eht_gi < ARRAY_SIZE(eht_gi_str) ? + eht_gi_str[rate->eht_gi] : "N/A"); else - seq_printf(m, "Legacy %d", rate->legacy); - seq_printf(m, "%s", rtwsta_link->ra_report.might_fallback_legacy ? " FB_G" : ""); - seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(rate->bw)); - seq_printf(m, " (hw_rate=0x%x)", rtwsta_link->ra_report.hw_rate); - seq_printf(m, " ==> agg_wait=%d (%d)\n", rtwsta_link->max_agg_wait, - max_rc_amsdu_len); - - seq_printf(m, "RX rate [%u, %u]: ", rtwsta_link->mac_id, rtwsta_link->link_id); + p += scnprintf(p, end - p, "Legacy %d", rate->legacy); + p += scnprintf(p, end - p, "%s", + rtwsta_link->ra_report.might_fallback_legacy ? " FB_G" : ""); + p += scnprintf(p, end - p, " BW:%u", + rtw89_rate_info_bw_to_mhz(rate->bw)); + p += scnprintf(p, end - p, " (hw_rate=0x%x)", + rtwsta_link->ra_report.hw_rate); + p += scnprintf(p, end - p, " ==> agg_wait=%d (%d)\n", + rtwsta_link->max_agg_wait, + max_rc_amsdu_len); + + p += scnprintf(p, end - p, "RX rate [%u, %u]: ", rtwsta_link->mac_id, + rtwsta_link->link_id); switch (status->encoding) { case RX_ENC_LEGACY: - seq_printf(m, "Legacy %d", status->rate_idx + - (status->band != NL80211_BAND_2GHZ ? 4 : 0)); + p += scnprintf(p, end - p, "Legacy %d", status->rate_idx + + (status->band != NL80211_BAND_2GHZ ? 4 : 0)); break; case RX_ENC_HT: - seq_printf(m, "HT MCS-%d%s", status->rate_idx, - status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : ""); + p += scnprintf(p, end - p, "HT MCS-%d%s", status->rate_idx, + status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : ""); break; case RX_ENC_VHT: - seq_printf(m, "VHT %dSS MCS-%d%s", status->nss, status->rate_idx, - status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : ""); + p += scnprintf(p, end - p, "VHT %dSS MCS-%d%s", status->nss, + status->rate_idx, + status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : ""); break; case RX_ENC_HE: - seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx, - status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? - he_gi_str[status->he_gi] : "N/A"); + p += scnprintf(p, end - p, "HE %dSS MCS-%d GI:%s", + status->nss, status->rate_idx, + status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? + he_gi_str[status->he_gi] : "N/A"); break; case RX_ENC_EHT: - seq_printf(m, "EHT %dSS MCS-%d GI:%s", status->nss, status->rate_idx, - status->eht.gi < ARRAY_SIZE(eht_gi_str) ? - eht_gi_str[status->eht.gi] : "N/A"); + p += scnprintf(p, end - p, "EHT %dSS MCS-%d GI:%s", + status->nss, status->rate_idx, + status->eht.gi < ARRAY_SIZE(eht_gi_str) ? + eht_gi_str[status->eht.gi] : "N/A"); break; } - seq_printf(m, " BW:%u", rtw89_rate_info_bw_to_mhz(status->bw)); - seq_printf(m, " (hw_rate=0x%x)\n", rtwsta_link->rx_hw_rate); + p += scnprintf(p, end - p, " BW:%u", + rtw89_rate_info_bw_to_mhz(status->bw)); + p += scnprintf(p, end - p, " (hw_rate=0x%x)\n", + rtwsta_link->rx_hw_rate); rssi = ewma_rssi_read(&rtwsta_link->avg_rssi); - seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d) [", - RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta_link->prev_rssi); + p += scnprintf(p, end - p, "RSSI: %d dBm (raw=%d, prev=%d) [", + RTW89_RSSI_RAW_TO_DBM(rssi), rssi, + rtwsta_link->prev_rssi); for (i = 0; i < ant_num; i++) { rssi = ewma_rssi_read(&rtwsta_link->rssi[i]); - seq_printf(m, "%d%s%s", RTW89_RSSI_RAW_TO_DBM(rssi), - ant_asterisk && (hal->antenna_tx & BIT(i)) ? "*" : "", - i + 1 == ant_num ? "" : ", "); + p += scnprintf(p, end - p, "%d%s%s", + RTW89_RSSI_RAW_TO_DBM(rssi), + ant_asterisk && (hal->antenna_tx & BIT(i)) ? "*" : "", + i + 1 == ant_num ? "" : ", "); } - seq_puts(m, "]\n"); + p += scnprintf(p, end - p, "]\n"); evm_1ss = ewma_evm_read(&rtwsta_link->evm_1ss); - seq_printf(m, "EVM: [%2u.%02u, ", evm_1ss >> 2, (evm_1ss & 0x3) * 25); + p += scnprintf(p, end - p, "EVM: [%2u.%02u, ", evm_1ss >> 2, + (evm_1ss & 0x3) * 25); for (i = 0; i < (hal->ant_diversity ? 2 : 1); i++) { evm_min = ewma_evm_read(&rtwsta_link->evm_min[i]); evm_max = ewma_evm_read(&rtwsta_link->evm_max[i]); - seq_printf(m, "%s(%2u.%02u, %2u.%02u)", i == 0 ? "" : " ", - evm_min >> 2, (evm_min & 0x3) * 25, - evm_max >> 2, (evm_max & 0x3) * 25); + p += scnprintf(p, end - p, "%s(%2u.%02u, %2u.%02u)", + i == 0 ? "" : " ", + evm_min >> 2, (evm_min & 0x3) * 25, + evm_max >> 2, (evm_max & 0x3) * 25); } - seq_puts(m, "]\t"); + p += scnprintf(p, end - p, "]\t"); snr = ewma_snr_read(&rtwsta_link->avg_snr); - seq_printf(m, "SNR: %u\n", snr); + p += scnprintf(p, end - p, "SNR: %u\n", snr); + + return p - buf; } static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) { - struct seq_file *m = (struct seq_file *)data; + struct rtw89_debugfs_iter_data *iter_data = + (struct rtw89_debugfs_iter_data *)data; struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); struct rtw89_dev *rtwdev = rtwsta->rtwdev; struct rtw89_sta_link *rtwsta_link; + size_t bufsz = iter_data->bufsz; + char *buf = iter_data->buf; + char *p = buf, *end = buf + bufsz; unsigned int link_id; rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) - rtw89_sta_link_info_get_iter(m, rtwdev, rtwsta_link); + p += rtw89_sta_link_info_get_iter(rtwdev, p, end - p, rtwsta_link); + + rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf); } -static void -rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat, +static int +rtw89_debug_append_rx_rate(char *buf, size_t bufsz, struct rtw89_pkt_stat *pkt_stat, enum rtw89_hw_rate first_rate, int len) { + char *p = buf, *end = buf + bufsz; int i; for (i = 0; i < len; i++) - seq_printf(m, "%s%u", i == 0 ? "" : ", ", - pkt_stat->rx_rate_cnt[first_rate + i]); + p += scnprintf(p, end - p, "%s%u", i == 0 ? "" : ", ", + pkt_stat->rx_rate_cnt[first_rate + i]); + + return p - buf; } #define FIRST_RATE_SAME(rate) {RTW89_HW_RATE_ ## rate, RTW89_HW_RATE_ ## rate} @@ -3671,34 +3871,40 @@ static const struct rtw89_rx_rate_cnt_info { {FIRST_RATE_GEV1(EHT_NSS2_MCS0), 14, 0, "EHT 2SS:"}, }; -static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v) +static ssize_t rtw89_debug_priv_phy_info_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_traffic_stats *stats = &rtwdev->stats; struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat; const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_debugfs_iter_data iter_data; const struct rtw89_rx_rate_cnt_info *info; struct rtw89_hal *hal = &rtwdev->hal; + char *p = buf, *end = buf + bufsz; enum rtw89_hw_rate first_rate; u8 rssi; int i; rssi = ewma_rssi_read(&rtwdev->phystat.bcn_rssi); - seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d", - stats->tx_throughput, stats->tx_throughput_raw, stats->tx_tfc_lv); + p += scnprintf(p, end - p, "TP TX: %u [%u] Mbps (lv: %d", + stats->tx_throughput, stats->tx_throughput_raw, + stats->tx_tfc_lv); if (hal->thermal_prot_lv) - seq_printf(m, ", duty: %d%%", - 100 - hal->thermal_prot_lv * RTW89_THERMAL_PROT_STEP); - seq_printf(m, "), RX: %u [%u] Mbps (lv: %d)\n", - stats->rx_throughput, stats->rx_throughput_raw, stats->rx_tfc_lv); - seq_printf(m, "Beacon: %u (%d dBm), TF: %u\n", pkt_stat->beacon_nr, - RTW89_RSSI_RAW_TO_DBM(rssi), stats->rx_tf_periodic); - seq_printf(m, "Avg packet length: TX=%u, RX=%u\n", stats->tx_avg_len, - stats->rx_avg_len); - - seq_puts(m, "RX count:\n"); + p += scnprintf(p, end - p, ", duty: %d%%", + 100 - hal->thermal_prot_lv * RTW89_THERMAL_PROT_STEP); + p += scnprintf(p, end - p, "), RX: %u [%u] Mbps (lv: %d)\n", + stats->rx_throughput, stats->rx_throughput_raw, + stats->rx_tfc_lv); + p += scnprintf(p, end - p, "Beacon: %u (%d dBm), TF: %u\n", + pkt_stat->beacon_nr, + RTW89_RSSI_RAW_TO_DBM(rssi), stats->rx_tf_periodic); + p += scnprintf(p, end - p, "Avg packet length: TX=%u, RX=%u\n", + stats->tx_avg_len, + stats->rx_avg_len); + + p += scnprintf(p, end - p, "RX count:\n"); for (i = 0; i < ARRAY_SIZE(rtw89_rx_rate_cnt_infos); i++) { info = &rtw89_rx_rate_cnt_infos[i]; @@ -3706,189 +3912,238 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v) if (first_rate >= RTW89_HW_RATE_NR) continue; - seq_printf(m, "%10s [", info->rate_mode); - rtw89_debug_append_rx_rate(m, pkt_stat, - first_rate, info->len); + p += scnprintf(p, end - p, "%10s [", info->rate_mode); + p += rtw89_debug_append_rx_rate(p, end - p, pkt_stat, + first_rate, info->len); if (info->ext) { - seq_puts(m, "]["); - rtw89_debug_append_rx_rate(m, pkt_stat, - first_rate + info->len, info->ext); + p += scnprintf(p, end - p, "]["); + p += rtw89_debug_append_rx_rate(p, end - p, pkt_stat, + first_rate + info->len, info->ext); } - seq_puts(m, "]\n"); + p += scnprintf(p, end - p, "]\n"); } - ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_info_get_iter, m); + rtw89_debugfs_iter_data_setup(&iter_data, p, end - p); + ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_info_get_iter, &iter_data); + p += iter_data.written_sz; - return 0; + return p - buf; } -static void rtw89_dump_addr_cam(struct seq_file *m, - struct rtw89_dev *rtwdev, - struct rtw89_addr_cam_entry *addr_cam) +static int rtw89_dump_addr_cam(struct rtw89_dev *rtwdev, + char *buf, size_t bufsz, + struct rtw89_addr_cam_entry *addr_cam) { struct rtw89_cam_info *cam_info = &rtwdev->cam_info; const struct rtw89_sec_cam_entry *sec_entry; + char *p = buf, *end = buf + bufsz; u8 sec_cam_idx; int i; - seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx); - seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx); - seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map), - addr_cam->sec_cam_map); + p += scnprintf(p, end - p, "\taddr_cam_idx=%u\n", + addr_cam->addr_cam_idx); + p += scnprintf(p, end - p, "\t-> bssid_cam_idx=%u\n", + addr_cam->bssid_cam_idx); + p += scnprintf(p, end - p, "\tsec_cam_bitmap=%*ph\n", + (int)sizeof(addr_cam->sec_cam_map), + addr_cam->sec_cam_map); for_each_set_bit(i, addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM) { sec_cam_idx = addr_cam->sec_ent[i]; sec_entry = cam_info->sec_entries[sec_cam_idx]; if (!sec_entry) continue; - seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx); + p += scnprintf(p, end - p, "\tsec[%d]: sec_cam_idx %u", i, + sec_entry->sec_cam_idx); if (sec_entry->ext_key) - seq_printf(m, ", %u", sec_entry->sec_cam_idx + 1); - seq_puts(m, "\n"); + p += scnprintf(p, end - p, ", %u", + sec_entry->sec_cam_idx + 1); + p += scnprintf(p, end - p, "\n"); } + + return p - buf; } -__printf(3, 4) -static void rtw89_dump_pkt_offload(struct seq_file *m, struct list_head *pkt_list, - const char *fmt, ...) +__printf(4, 5) +static int rtw89_dump_pkt_offload(char *buf, size_t bufsz, struct list_head *pkt_list, + const char *fmt, ...) { + char *p = buf, *end = buf + bufsz; struct rtw89_pktofld_info *info; struct va_format vaf; va_list args; if (list_empty(pkt_list)) - return; + return 0; va_start(args, fmt); vaf.va = &args; vaf.fmt = fmt; - seq_printf(m, "%pV", &vaf); + p += scnprintf(p, end - p, "%pV", &vaf); va_end(args); list_for_each_entry(info, pkt_list, list) - seq_printf(m, "%d ", info->id); + p += scnprintf(p, end - p, "%d ", info->id); + + p += scnprintf(p, end - p, "\n"); - seq_puts(m, "\n"); + return p - buf; } -static void rtw89_vif_link_ids_get(struct seq_file *m, u8 *mac, - struct rtw89_dev *rtwdev, - struct rtw89_vif_link *rtwvif_link) +static int rtw89_vif_link_ids_get(struct rtw89_dev *rtwdev, + char *buf, size_t bufsz, u8 *mac, + struct rtw89_vif_link *rtwvif_link) { struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif_link->bssid_cam; - - seq_printf(m, " [%u] %pM\n", rtwvif_link->mac_id, rtwvif_link->mac_addr); - seq_printf(m, "\tlink_id=%u\n", rtwvif_link->link_id); - seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx); - rtw89_dump_addr_cam(m, rtwdev, &rtwvif_link->addr_cam); - rtw89_dump_pkt_offload(m, &rtwvif_link->general_pkt_list, - "\tpkt_ofld[GENERAL]: "); + char *p = buf, *end = buf + bufsz; + + p += scnprintf(p, end - p, " [%u] %pM\n", rtwvif_link->mac_id, + rtwvif_link->mac_addr); + p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwvif_link->link_id); + p += scnprintf(p, end - p, "\tbssid_cam_idx=%u\n", + bssid_cam->bssid_cam_idx); + p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwvif_link->addr_cam); + p += rtw89_dump_pkt_offload(p, end - p, &rtwvif_link->general_pkt_list, + "\tpkt_ofld[GENERAL]: "); + + return p - buf; } static void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { - struct seq_file *m = (struct seq_file *)data; + struct rtw89_debugfs_iter_data *iter_data = + (struct rtw89_debugfs_iter_data *)data; struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); struct rtw89_dev *rtwdev = rtwvif->rtwdev; struct rtw89_vif_link *rtwvif_link; + size_t bufsz = iter_data->bufsz; + char *buf = iter_data->buf; + char *p = buf, *end = buf + bufsz; unsigned int link_id; - seq_printf(m, "VIF %pM\n", rtwvif->mac_addr); + p += scnprintf(p, end - p, "VIF %pM\n", rtwvif->mac_addr); rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) - rtw89_vif_link_ids_get(m, mac, rtwdev, rtwvif_link); + p += rtw89_vif_link_ids_get(rtwdev, p, end - p, mac, rtwvif_link); + + rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf); } -static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_dev *rtwdev, - struct rtw89_sta_link *rtwsta_link) +static int rtw89_dump_ba_cam(struct rtw89_dev *rtwdev, + char *buf, size_t bufsz, + struct rtw89_sta_link *rtwsta_link) { struct rtw89_ba_cam_entry *entry; + char *p = buf, *end = buf + bufsz; bool first = true; list_for_each_entry(entry, &rtwsta_link->ba_cam_list, list) { if (first) { - seq_puts(m, "\tba_cam "); + p += scnprintf(p, end - p, "\tba_cam "); first = false; } else { - seq_puts(m, ", "); + p += scnprintf(p, end - p, ", "); } - seq_printf(m, "tid[%u]=%d", entry->tid, - (int)(entry - rtwdev->cam_info.ba_cam_entry)); + p += scnprintf(p, end - p, "tid[%u]=%d", entry->tid, + (int)(entry - rtwdev->cam_info.ba_cam_entry)); } - seq_puts(m, "\n"); + p += scnprintf(p, end - p, "\n"); + + return p - buf; } -static void rtw89_sta_link_ids_get(struct seq_file *m, - struct rtw89_dev *rtwdev, - struct rtw89_sta_link *rtwsta_link) +static int rtw89_sta_link_ids_get(struct rtw89_dev *rtwdev, + char *buf, size_t bufsz, + struct rtw89_sta_link *rtwsta_link) { struct ieee80211_link_sta *link_sta; + char *p = buf, *end = buf + bufsz; rcu_read_lock(); link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); - seq_printf(m, " [%u] %pM\n", rtwsta_link->mac_id, link_sta->addr); + p += scnprintf(p, end - p, " [%u] %pM\n", rtwsta_link->mac_id, + link_sta->addr); rcu_read_unlock(); - seq_printf(m, "\tlink_id=%u\n", rtwsta_link->link_id); - rtw89_dump_addr_cam(m, rtwdev, &rtwsta_link->addr_cam); - rtw89_dump_ba_cam(m, rtwdev, rtwsta_link); + p += scnprintf(p, end - p, "\tlink_id=%u\n", rtwsta_link->link_id); + p += rtw89_dump_addr_cam(rtwdev, p, end - p, &rtwsta_link->addr_cam); + p += rtw89_dump_ba_cam(rtwdev, p, end - p, rtwsta_link); + + return p - buf; } static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta) { - struct seq_file *m = (struct seq_file *)data; + struct rtw89_debugfs_iter_data *iter_data = + (struct rtw89_debugfs_iter_data *)data; struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); struct rtw89_dev *rtwdev = rtwsta->rtwdev; struct rtw89_sta_link *rtwsta_link; + size_t bufsz = iter_data->bufsz; + char *buf = iter_data->buf; + char *p = buf, *end = buf + bufsz; unsigned int link_id; - seq_printf(m, "STA %pM %s\n", sta->addr, sta->tdls ? "(TDLS)" : ""); + p += scnprintf(p, end - p, "STA %pM %s\n", sta->addr, + sta->tdls ? "(TDLS)" : ""); rtw89_sta_for_each_link(rtwsta, rtwsta_link, link_id) - rtw89_sta_link_ids_get(m, rtwdev, rtwsta_link); + p += rtw89_sta_link_ids_get(rtwdev, p, end - p, rtwsta_link); + + rtw89_debugfs_iter_data_next(iter_data, p, end - p, p - buf); } -static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v) +static ssize_t rtw89_debug_priv_stations_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_cam_info *cam_info = &rtwdev->cam_info; + struct rtw89_debugfs_iter_data iter_data; + char *p = buf, *end = buf + bufsz; u8 idx; - mutex_lock(&rtwdev->mutex); - - seq_puts(m, "map:\n"); - seq_printf(m, "\tmac_id: %*ph\n", (int)sizeof(rtwdev->mac_id_map), - rtwdev->mac_id_map); - seq_printf(m, "\taddr_cam: %*ph\n", (int)sizeof(cam_info->addr_cam_map), - cam_info->addr_cam_map); - seq_printf(m, "\tbssid_cam: %*ph\n", (int)sizeof(cam_info->bssid_cam_map), - cam_info->bssid_cam_map); - seq_printf(m, "\tsec_cam: %*ph\n", (int)sizeof(cam_info->sec_cam_map), - cam_info->sec_cam_map); - seq_printf(m, "\tba_cam: %*ph\n", (int)sizeof(cam_info->ba_cam_map), - cam_info->ba_cam_map); - seq_printf(m, "\tpkt_ofld: %*ph\n", (int)sizeof(rtwdev->pkt_offload), - rtwdev->pkt_offload); + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + p += scnprintf(p, end - p, "map:\n"); + p += scnprintf(p, end - p, "\tmac_id: %*ph\n", + (int)sizeof(rtwdev->mac_id_map), + rtwdev->mac_id_map); + p += scnprintf(p, end - p, "\taddr_cam: %*ph\n", + (int)sizeof(cam_info->addr_cam_map), + cam_info->addr_cam_map); + p += scnprintf(p, end - p, "\tbssid_cam: %*ph\n", + (int)sizeof(cam_info->bssid_cam_map), + cam_info->bssid_cam_map); + p += scnprintf(p, end - p, "\tsec_cam: %*ph\n", + (int)sizeof(cam_info->sec_cam_map), + cam_info->sec_cam_map); + p += scnprintf(p, end - p, "\tba_cam: %*ph\n", + (int)sizeof(cam_info->ba_cam_map), + cam_info->ba_cam_map); + p += scnprintf(p, end - p, "\tpkt_ofld: %*ph\n", + (int)sizeof(rtwdev->pkt_offload), + rtwdev->pkt_offload); for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { if (!(rtwdev->chip->support_bands & BIT(idx))) continue; - rtw89_dump_pkt_offload(m, &rtwdev->scan_info.pkt_list[idx], - "\t\t[SCAN %u]: ", idx); + p += rtw89_dump_pkt_offload(p, end - p, &rtwdev->scan_info.pkt_list[idx], + "\t\t[SCAN %u]: ", idx); } + rtw89_debugfs_iter_data_setup(&iter_data, p, end - p); ieee80211_iterate_active_interfaces_atomic(rtwdev->hw, - IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m); + IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, &iter_data); + p += iter_data.written_sz; - ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, m); + rtw89_debugfs_iter_data_setup(&iter_data, p, end - p); + ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, &iter_data); + p += iter_data.written_sz; - mutex_unlock(&rtwdev->mutex); - - return 0; + return p - buf; } #define DM_INFO(type) {RTW89_DM_ ## type, #type} @@ -3899,43 +4154,45 @@ static const struct rtw89_disabled_dm_info { } rtw89_disabled_dm_infos[] = { DM_INFO(DYNAMIC_EDCCA), DM_INFO(THERMAL_PROTECT), + DM_INFO(TAS), }; -static int -rtw89_debug_priv_disable_dm_get(struct seq_file *m, void *v) +static ssize_t +rtw89_debug_priv_disable_dm_get(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + char *buf, size_t bufsz) { - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; const struct rtw89_disabled_dm_info *info; struct rtw89_hal *hal = &rtwdev->hal; + char *p = buf, *end = buf + bufsz; u32 disabled; int i; - seq_printf(m, "Disabled DM: 0x%x\n", hal->disabled_dm_bitmap); + p += scnprintf(p, end - p, "Disabled DM: 0x%x\n", + hal->disabled_dm_bitmap); for (i = 0; i < ARRAY_SIZE(rtw89_disabled_dm_infos); i++) { info = &rtw89_disabled_dm_infos[i]; disabled = BIT(info->type) & hal->disabled_dm_bitmap; - seq_printf(m, "[%d] %s: %c\n", info->type, info->name, - disabled ? 'X' : 'O'); + p += scnprintf(p, end - p, "[%d] %s: %c\n", info->type, + info->name, + disabled ? 'X' : 'O'); } - return 0; + return p - buf; } static ssize_t -rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf, - size_t count, loff_t *loff) +rtw89_debug_priv_disable_dm_set(struct rtw89_dev *rtwdev, + struct rtw89_debugfs_priv *debugfs_priv, + const char *buf, size_t count) { - struct seq_file *m = (struct seq_file *)filp->private_data; - struct rtw89_debugfs_priv *debugfs_priv = m->private; - struct rtw89_dev *rtwdev = debugfs_priv->rtwdev; struct rtw89_hal *hal = &rtwdev->hal; u32 conf; int ret; - ret = kstrtou32_from_user(user_buf, count, 0, &conf); + ret = kstrtou32(buf, 0, &conf); if (ret) return -EINVAL; @@ -3944,46 +4201,62 @@ rtw89_debug_priv_disable_dm_set(struct file *filp, const char __user *user_buf, return count; } -#define rtw89_debug_priv_get(name) \ +#define rtw89_debug_priv_get(name, opts...) \ { \ .cb_read = rtw89_debug_priv_ ##name## _get, \ + .opt = { opts }, \ } -#define rtw89_debug_priv_set(name) \ +#define rtw89_debug_priv_set(name, opts...) \ { \ .cb_write = rtw89_debug_priv_ ##name## _set, \ + .opt = { opts }, \ } -#define rtw89_debug_priv_select_and_get(name) \ +#define rtw89_debug_priv_select_and_get(name, opts...) \ { \ .cb_write = rtw89_debug_priv_ ##name## _select, \ .cb_read = rtw89_debug_priv_ ##name## _get, \ + .opt = { opts }, \ } -#define rtw89_debug_priv_set_and_get(name) \ +#define rtw89_debug_priv_set_and_get(name, opts...) \ { \ .cb_write = rtw89_debug_priv_ ##name## _set, \ .cb_read = rtw89_debug_priv_ ##name## _get, \ + .opt = { opts }, \ } +#define RSIZE_8K .rsize = 0x2000 +#define RSIZE_12K .rsize = 0x3000 +#define RSIZE_16K .rsize = 0x4000 +#define RSIZE_20K .rsize = 0x5000 +#define RSIZE_32K .rsize = 0x8000 +#define RSIZE_64K .rsize = 0x10000 +#define RSIZE_128K .rsize = 0x20000 +#define RSIZE_1M .rsize = 0x100000 +#define RLOCK .rlock = 1 +#define WLOCK .wlock = 1 +#define RWLOCK RLOCK, WLOCK + static const struct rtw89_debugfs rtw89_debugfs_templ = { .read_reg = rtw89_debug_priv_select_and_get(read_reg), .write_reg = rtw89_debug_priv_set(write_reg), .read_rf = rtw89_debug_priv_select_and_get(read_rf), .write_rf = rtw89_debug_priv_set(write_rf), - .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump), - .txpwr_table = rtw89_debug_priv_get(txpwr_table), - .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump), - .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump), - .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump), + .rf_reg_dump = rtw89_debug_priv_get(rf_reg_dump, RSIZE_8K), + .txpwr_table = rtw89_debug_priv_get(txpwr_table, RSIZE_20K, RLOCK), + .mac_reg_dump = rtw89_debug_priv_select_and_get(mac_reg_dump, RSIZE_128K), + .mac_mem_dump = rtw89_debug_priv_select_and_get(mac_mem_dump, RSIZE_16K, RLOCK), + .mac_dbg_port_dump = rtw89_debug_priv_select_and_get(mac_dbg_port_dump, RSIZE_1M), .send_h2c = rtw89_debug_priv_set(send_h2c), - .early_h2c = rtw89_debug_priv_set_and_get(early_h2c), - .fw_crash = rtw89_debug_priv_set_and_get(fw_crash), - .btc_info = rtw89_debug_priv_get(btc_info), + .early_h2c = rtw89_debug_priv_set_and_get(early_h2c, RWLOCK), + .fw_crash = rtw89_debug_priv_set_and_get(fw_crash, WLOCK), + .btc_info = rtw89_debug_priv_get(btc_info, RSIZE_12K), .btc_manual = rtw89_debug_priv_set(btc_manual), - .fw_log_manual = rtw89_debug_priv_set(fw_log_manual), + .fw_log_manual = rtw89_debug_priv_set(fw_log_manual, WLOCK), .phy_info = rtw89_debug_priv_get(phy_info), - .stations = rtw89_debug_priv_get(stations), + .stations = rtw89_debug_priv_get(stations, RLOCK), .disable_dm = rtw89_debug_priv_set_and_get(disable_dm), }; diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 5d4ad23cc3bd4..8643b17866f89 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -38,6 +38,16 @@ struct rtw89_arp_rsp { static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; +const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = { + .ver = 0x00, + .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, +}; +EXPORT_SYMBOL(rtw89_fw_blacklist_default); + union rtw89_fw_element_arg { size_t offset; enum rtw89_rf_path rf_path; @@ -314,7 +324,7 @@ static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, if (!sec->secure_boot) goto out; - sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); + sb_sel_ver = get_unaligned_le32(§ion_content->sb_sel_ver.v); if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) goto ignore; @@ -344,6 +354,46 @@ static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, return 0; } +static int __check_secure_blacklist(struct rtw89_dev *rtwdev, + struct rtw89_fw_bin_info *info, + struct rtw89_fw_hdr_section_info *section_info, + const void *content) +{ + const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist; + const union rtw89_fw_section_mssc_content *section_content = content; + struct rtw89_fw_secure *sec = &rtwdev->fw.sec; + u8 byte_idx; + u8 bit_mask; + + if (!sec->secure_boot) + return 0; + + if (!info->secure_section_exist || section_info->ignore) + return 0; + + if (!chip_blacklist) { + rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n"); + return -ENOENT; + } + + byte_idx = section_content->blacklist.bit_in_chip_list >> 3; + bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7); + + if (section_content->blacklist.ver > chip_blacklist->ver) { + rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n", + section_content->blacklist.ver, chip_blacklist->ver); + return -EINVAL; + } + + if (chip_blacklist->list[byte_idx] & bit_mask) { + rtw89_warn(rtwdev, "firmware %u in chip blacklist\n", + section_content->blacklist.ver); + return -EPERM; + } + + return 0; +} + static int __parse_security_section(struct rtw89_dev *rtwdev, struct rtw89_fw_bin_info *info, struct rtw89_fw_hdr_section_info *section_info, @@ -364,8 +414,11 @@ static int __parse_security_section(struct rtw89_dev *rtwdev, *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; if (sec->secure_boot) { - if (sec->mss_idx >= section_info->mssc) + if (sec->mss_idx >= section_info->mssc) { + rtw89_err(rtwdev, "unexpected MSS %d >= %d\n", + sec->mss_idx, section_info->mssc); return -EFAULT; + } section_info->key_addr = content + section_info->len + sec->mss_idx * FWDL_SECURITY_SIGLEN; section_info->key_len = FWDL_SECURITY_SIGLEN; @@ -374,6 +427,9 @@ static int __parse_security_section(struct rtw89_dev *rtwdev, info->secure_section_exist = true; } + ret = __check_secure_blacklist(rtwdev, info, section_info, content); + WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n"); + return 0; } @@ -489,19 +545,62 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, } } +static +const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev, + const struct firmware *firmware) +{ + const struct rtw89_mfw_hdr *mfw_hdr; + + if (sizeof(*mfw_hdr) > firmware->size) + return NULL; + + mfw_hdr = (const struct rtw89_mfw_hdr *)firmware->data; + + if (mfw_hdr->sig != RTW89_MFW_SIG) + return NULL; + + return mfw_hdr; +} + +static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev, + const struct firmware *firmware, + const struct rtw89_mfw_hdr *mfw_hdr) +{ + const void *mfw = firmware->data; + u32 mfw_len = firmware->size; + u8 fw_nr = mfw_hdr->fw_nr; + const void *ptr; + + if (fw_nr == 0) { + rtw89_err(rtwdev, "mfw header has no fw entry\n"); + return -ENOENT; + } + + ptr = &mfw_hdr->info[fw_nr]; + + if (ptr > mfw + mfw_len) { + rtw89_err(rtwdev, "mfw header out of address\n"); + return -EFAULT; + } + + return 0; +} + static int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, struct rtw89_fw_suit *fw_suit, bool nowarn) { struct rtw89_fw_info *fw_info = &rtwdev->fw; const struct firmware *firmware = fw_info->req.firmware; + const struct rtw89_mfw_info *mfw_info = NULL, *tmp; + const struct rtw89_mfw_hdr *mfw_hdr; const u8 *mfw = firmware->data; u32 mfw_len = firmware->size; - const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; - const struct rtw89_mfw_info *mfw_info = NULL, *tmp; + int ret; int i; - if (mfw_hdr->sig != RTW89_MFW_SIG) { + mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware); + if (!mfw_hdr) { rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); /* legacy firmware support normal type only */ if (type != RTW89_FW_NORMAL) @@ -511,6 +610,10 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, return 0; } + ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); + if (ret) + return ret; + for (i = 0; i < mfw_hdr->fw_nr; i++) { tmp = &mfw_hdr->info[i]; if (tmp->type != type) @@ -540,6 +643,12 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, found: fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); fw_suit->size = le32_to_cpu(mfw_info->size); + + if (fw_suit->data + fw_suit->size > mfw + mfw_len) { + rtw89_err(rtwdev, "fw_suit %d out of address\n", type); + return -EFAULT; + } + return 0; } @@ -547,16 +656,21 @@ static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) { struct rtw89_fw_info *fw_info = &rtwdev->fw; const struct firmware *firmware = fw_info->req.firmware; - const struct rtw89_mfw_hdr *mfw_hdr = - (const struct rtw89_mfw_hdr *)firmware->data; const struct rtw89_mfw_info *mfw_info; + const struct rtw89_mfw_hdr *mfw_hdr; u32 size; + int ret; - if (mfw_hdr->sig != RTW89_MFW_SIG) { + mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware); + if (!mfw_hdr) { rtw89_warn(rtwdev, "not mfw format\n"); return 0; } + ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); + if (ret) + return ret; + mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); @@ -735,6 +849,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = { __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1), + __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE), }; static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, @@ -988,7 +1103,7 @@ int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); if ((bitmap & needed_bitmap) != needed_bitmap) { - rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", + rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n", needed_bitmap, bitmap); return -ENOENT; } @@ -1056,6 +1171,101 @@ int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, return 0; } +static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor, + u8 cursor_size) +{ + /* fill default values if needed for backward compatibility */ + struct rtw89_fw_regd_entry entry = { + .rule_2ghz = RTW89_NA, + .rule_5ghz = RTW89_NA, + .rule_6ghz = RTW89_NA, + .fmap = cpu_to_le32(0x0), + }; + u8 valid_size = min_t(u8, sizeof(entry), cursor_size); + unsigned int i; + u32 fmap; + + memcpy(&entry, cursor, valid_size); + memset(regd, 0, sizeof(*regd)); + + regd->alpha2[0] = entry.alpha2_0; + regd->alpha2[1] = entry.alpha2_1; + regd->alpha2[2] = '\0'; + + /* also need to consider forward compatibility */ + regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ? + entry.rule_2ghz : RTW89_NA; + regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ? + entry.rule_5ghz : RTW89_NA; + regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ? + entry.rule_6ghz : RTW89_NA; + + BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap)); + BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC); + + fmap = le32_to_cpu(entry.fmap); + for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) { + if (fmap & BIT(i)) + set_bit(i, regd->func_bitmap); + } + + return true; +} + +#define rtw89_for_each_in_regd_element(regd, element) \ + for (const void *cursor = (element)->content, \ + *end = (element)->content + \ + le32_to_cpu((element)->num_ents) * (element)->ent_sz; \ + cursor < end; cursor += (element)->ent_sz) \ + if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz)) + +static +int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev, + const struct rtw89_fw_element_hdr *elm, + const union rtw89_fw_element_arg arg) +{ + const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd; + struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; + u32 num_ents = le32_to_cpu(regd_elm->num_ents); + struct rtw89_regd_data *p; + struct rtw89_regd regd; + u32 i = 0; + + if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) { + rtw89_warn(rtwdev, + "regd element ents (%d) are over max num (%d)\n", + num_ents, RTW89_REGD_MAX_COUNTRY_NUM); + rtw89_warn(rtwdev, + "regd element ignore and take another/common\n"); + return 1; + } + + if (elm_info->regd) { + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "regd element take the latter\n"); + devm_kfree(rtwdev->dev, elm_info->regd); + elm_info->regd = NULL; + } + + p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL); + if (!p) + return -ENOMEM; + + p->nr = num_ents; + rtw89_for_each_in_regd_element(®d, regd_elm) + p->map[i++] = regd; + + if (i != num_ents) { + rtw89_err(rtwdev, "regd element has %d invalid ents\n", + num_ents - i); + devm_kfree(rtwdev->dev, p); + return -EINVAL; + } + + elm_info->regd = p; + return 0; +} + static const struct rtw89_fw_element_handler __fw_element_handlers[] = { [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, { .fw_type = RTW89_FW_BBMCU0 }, NULL}, @@ -1114,6 +1324,9 @@ static const struct rtw89_fw_element_handler __fw_element_handlers[] = { [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { rtw89_build_rfk_log_fmt_from_elm, {}, NULL, }, + [RTW89_FW_ELEMENT_ID_REGD] = { + rtw89_recognize_regd_from_elm, {}, "REGD", + }, }; int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) @@ -1322,7 +1535,6 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, ret = rtw89_h2c_tx(rtwdev, skb, false); if (ret) { rtw89_err(rtwdev, "failed to send h2c\n"); - ret = -1; goto fail; } @@ -1409,7 +1621,6 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, ret = rtw89_h2c_tx(rtwdev, skb, true); if (ret) { rtw89_err(rtwdev, "failed to send h2c\n"); - ret = -1; goto fail; } @@ -2697,7 +2908,9 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, { const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; + static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12}; const struct rtw89_chip_info *chip = rtwdev->chip; + struct rtw89_efuse *efuse = &rtwdev->efuse; struct rtw89_h2c_lps_ml_cmn_info *h2c; struct rtw89_vif_link *rtwvif_link; const struct rtw89_chan *chan; @@ -2705,6 +2918,7 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, u32 len = sizeof(*h2c); unsigned int link_id; struct sk_buff *skb; + u8 beacon_bw_ofst; u8 gain_band; u32 done; u8 path; @@ -2722,9 +2936,10 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, skb_put(skb, len); h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; - h2c->fmt_id = 0x1; + h2c->fmt_id = 0x3; h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); + h2c->rfe_type = efuse->rfe_type; rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; @@ -2745,9 +2960,21 @@ int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, h2c->tia_gain[rtwvif_link->phy_idx][i] = cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); } + + if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) { + beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx]; + h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst; + } + memcpy(h2c->lna_gain[rtwvif_link->phy_idx], gain->lna_gain[gain_band][bw_idx][path], LNA_GAIN_NUM); + memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx], + gain->tia_lna_op1db[gain_band][bw_idx][path], + LNA_GAIN_NUM + 1); + memcpy(h2c->lna_op1db[rtwvif_link->phy_idx], + gain->lna_op1db[gain_band][bw_idx][path], + LNA_GAIN_NUM); } rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, @@ -3281,9 +3508,10 @@ int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); - h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, + h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) | + le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, CCTLINFO_G7_W6_ULDL); - h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); + h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL); if (rtwsta_link) { h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, @@ -3419,6 +3647,61 @@ int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, return ret; } +EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl); + +int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev, + struct rtw89_sta_link *rtwsta_link) +{ + struct rtw89_h2c_cctlinfo_ud_g7 *h2c; + u32 len = sizeof(*h2c); + struct sk_buff *skb; + int ret; + + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); + if (!skb) { + rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n"); + return -ENOMEM; + } + skb_put(skb, len); + h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; + + h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | + le32_encode_bits(1, CCTLINFO_G7_C0_OP); + + if (rtwsta_link->cctl_tx_time) { + h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL); + h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL); + + h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time, + CCTLINFO_G7_W2_AMPDU_MAX_TIME); + h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME); + } + if (rtwsta_link->cctl_tx_retry_limit) { + h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) | + le32_encode_bits(rtwsta_link->data_tx_cnt_lmt, + CCTLINFO_G7_W2_DATA_TX_CNT_LMT); + h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL | + CCTLINFO_G7_W2_DATA_TX_CNT_LMT); + } + + rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, + H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, + H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, + len); + + ret = rtw89_h2c_tx(rtwdev, skb, false); + if (ret) { + rtw89_err(rtwdev, "failed to send h2c\n"); + goto fail; + } + + return 0; +fail: + dev_kfree_skb_any(skb); + + return ret; +} +EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7); int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link) @@ -3622,14 +3905,15 @@ int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); -#define H2C_ROLE_MAINTAIN_LEN 4 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, struct rtw89_sta_link *rtwsta_link, enum rtw89_upd_mode upd_mode) { - struct sk_buff *skb; u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; + struct rtw89_h2c_role_maintain *h2c; + u32 len = sizeof(*h2c); + struct sk_buff *skb; u8 self_role; int ret; @@ -3642,21 +3926,27 @@ int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, self_role = rtwvif_link->self_role; } - skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); + skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); return -ENOMEM; } - skb_put(skb, H2C_ROLE_MAINTAIN_LEN); - SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); - SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); - SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); - SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role); + skb_put(skb, len); + h2c = (struct rtw89_h2c_role_maintain *)skb->data; + + h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) | + le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) | + le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) | + le32_encode_bits(rtwvif_link->wifi_role, + RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) | + le32_encode_bits(rtwvif_link->mac_idx, + RTW89_H2C_ROLE_MAINTAIN_W0_BAND) | + le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT); rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, - H2C_ROLE_MAINTAIN_LEN); + len); ret = rtw89_h2c_tx(rtwdev, skb, false); if (ret) { @@ -5311,6 +5601,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; u8 opch_size = sizeof(*opch) * option->num_opch; u8 probe_id[NUM_NL80211_BANDS]; + u8 scan_offload_ver = U8_MAX; u8 cfg_len = sizeof(*h2c); unsigned int cond; u8 ver = U8_MAX; @@ -5321,6 +5612,11 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, rtw89_scan_get_6g_disabled_chan(rtwdev, option); + if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { + cfg_len = offsetofend(typeof(*h2c), w8); + scan_offload_ver = 0; + } + len = cfg_len + macc_role_size + opch_size; skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); if (!skb) { @@ -5392,10 +5688,8 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); } - if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { - cfg_len = offsetofend(typeof(*h2c), w8); + if (scan_offload_ver == 0) goto flex_member; - } h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | @@ -5994,24 +6288,29 @@ void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) { struct rtw89_early_h2c *early_h2c; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); } } -void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) +void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) { struct rtw89_early_h2c *early_h2c, *tmp; - mutex_lock(&rtwdev->mutex); list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { list_del(&early_h2c->list); kfree(early_h2c->h2c); kfree(early_h2c); } - mutex_unlock(&rtwdev->mutex); +} + +void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) +{ + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + __rtw89_fw_free_all_early_h2c(rtwdev); } static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) @@ -6055,7 +6354,7 @@ void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) enqueue: skb_queue_tail(&rtwdev->c2h_queue, c2h); - ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); + wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work); } static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, @@ -6093,17 +6392,17 @@ static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); } -void rtw89_fw_c2h_work(struct work_struct *work) +void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, c2h_work); struct sk_buff *skb, *tmp; + lockdep_assert_wiphy(rtwdev->hw->wiphy); + skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { skb_unlink(skb, &rtwdev->c2h_queue); - mutex_lock(&rtwdev->mutex); rtw89_fw_c2h_cmd_handle(rtwdev, skb); - mutex_unlock(&rtwdev->mutex); dev_kfree_skb_any(skb); } } @@ -6184,7 +6483,7 @@ int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, u32 ret; if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); if (!h2c_info && !c2h_info) return -EINVAL; diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 2026bc2fd2acd..55255b48bdb71 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -663,6 +663,11 @@ struct rtw89_fw_mss_pool_hdr { } __packed; union rtw89_fw_section_mssc_content { + struct { + u8 pad[0x20]; + u8 bit_in_chip_list; + u8 ver; + } __packed blacklist; struct { u8 pad[58]; __le32 v; @@ -673,6 +678,13 @@ union rtw89_fw_section_mssc_content { } __packed key_sign_len; } __packed; +struct rtw89_fw_blacklist { + u8 ver; + u8 list[32]; +}; + +extern const struct rtw89_fw_blacklist rtw89_fw_blacklist_default; + static inline void SET_CTRL_INFO_MACID(void *table, u32 val) { le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0)); @@ -1579,25 +1591,17 @@ struct rtw89_h2c_bcn_upd_be { #define RTW89_H2C_BCN_UPD_BE_W7_ECSA_OFST GENMASK(30, 16) #define RTW89_H2C_BCN_UPD_BE_W7_PROTECTION_KEY_ID BIT(31) -static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0)); -} - -static inline void SET_FWROLE_MAINTAIN_SELF_ROLE(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(9, 8)); -} - -static inline void SET_FWROLE_MAINTAIN_UPD_MODE(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(12, 10)); -} +struct rtw89_h2c_role_maintain { + __le32 w0; +}; -static inline void SET_FWROLE_MAINTAIN_WIFI_ROLE(void *h2c, u32 val) -{ - le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13)); -} +#define RTW89_H2C_ROLE_MAINTAIN_W0_MACID GENMASK(7, 0) +#define RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE GENMASK(9, 8) +#define RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE GENMASK(12, 10) +#define RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE GENMASK(16, 13) +#define RTW89_H2C_ROLE_MAINTAIN_W0_BAND GENMASK(18, 17) +#define RTW89_H2C_ROLE_MAINTAIN_W0_PORT GENMASK(21, 19) +#define RTW89_H2C_ROLE_MAINTAIN_W0_MACID_EXT GENMASK(31, 24) enum rtw89_fw_sta_type { /* value of RTW89_H2C_JOININFO_W1_STA_TYPE */ RTW89_FW_N_AC_STA = 0, @@ -1801,17 +1805,21 @@ struct rtw89_h2c_lps_ch_info { struct rtw89_h2c_lps_ml_cmn_info { u8 fmt_id; - u8 rsvd0[3]; + u8 rfe_type; + u8 rsvd0[2]; __le32 mlo_dbcc_mode; - u8 central_ch[RTW89_PHY_MAX]; - u8 pri_ch[RTW89_PHY_MAX]; - u8 bw[RTW89_PHY_MAX]; - u8 band[RTW89_PHY_MAX]; - u8 bcn_rate_type[RTW89_PHY_MAX]; + u8 central_ch[RTW89_PHY_NUM]; + u8 pri_ch[RTW89_PHY_NUM]; + u8 bw[RTW89_PHY_NUM]; + u8 band[RTW89_PHY_NUM]; + u8 bcn_rate_type[RTW89_PHY_NUM]; u8 rsvd1[2]; - __le16 tia_gain[RTW89_PHY_MAX][TIA_GAIN_NUM]; - u8 lna_gain[RTW89_PHY_MAX][LNA_GAIN_NUM]; + __le16 tia_gain[RTW89_PHY_NUM][TIA_GAIN_NUM]; + u8 lna_gain[RTW89_PHY_NUM][LNA_GAIN_NUM]; u8 rsvd2[2]; + u8 tia_lna_op1db[RTW89_PHY_NUM][LNA_GAIN_NUM + 1]; + u8 lna_op1db[RTW89_PHY_NUM][LNA_GAIN_NUM]; + u8 dup_bcn_ofst[RTW89_PHY_NUM]; } __packed; static inline void RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(void *cmd, u32 val) @@ -3882,6 +3890,7 @@ enum rtw89_fw_element_id { RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU = 17, RTW89_FW_ELEMENT_ID_TXPWR_TRK = 18, RTW89_FW_ELEMENT_ID_RFKLOG_FMT = 19, + RTW89_FW_ELEMENT_ID_REGD = 20, RTW89_FW_ELEMENT_ID_NUM, }; @@ -3925,6 +3934,15 @@ struct __rtw89_fw_txpwr_element { u8 content[]; } __packed; +struct __rtw89_fw_regd_element { + u8 rsvd0; + u8 rsvd1; + u8 rsvd2; + u8 ent_sz; + __le32 num_ents; + u8 content[]; +} __packed; + enum rtw89_fw_txpwr_trk_type { __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START = 0, RTW89_FW_TXPWR_TRK_TYPE_6GB_N = 0, @@ -4016,6 +4034,7 @@ struct rtw89_fw_element_hdr { __le16 offset[]; } __packed rfk_log_fmt; struct __rtw89_fw_txpwr_element txpwr; + struct __rtw89_fw_regd_element regd; } __packed u; } __packed; @@ -4524,6 +4543,12 @@ struct rtw89_c2h_rfk_report { u8 version; } __packed; +struct rtw89_c2h_rf_tas_info { + struct rtw89_c2h_hdr hdr; + __le32 cur_idx; + __le16 txpwr_history[20]; +} __packed; + #define RTW89_FW_RSVD_PLE_SIZE 0x800 #define RTW89_FW_BACKTRACE_INFO_SIZE 8 @@ -4573,6 +4598,8 @@ int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link); int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link); +int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev, + struct rtw89_sta_link *rtwsta_link); int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link); int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, @@ -4588,7 +4615,7 @@ int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, struct rtw89_sta_link *rtwsta_link); void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h); -void rtw89_fw_c2h_work(struct work_struct *work); +void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work); int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, struct rtw89_sta_link *rtwsta_link, @@ -4654,6 +4681,7 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, bool rack, bool dack); int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len); void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev); +void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev); void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev); int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, u8 macid); @@ -4853,6 +4881,15 @@ static inline int rtw89_chip_h2c_ampdu_cmac_tbl(struct rtw89_dev *rtwdev, return 0; } +static inline +int rtw89_chip_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, + struct rtw89_sta_link *rtwsta_link) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + return chip->ops->h2c_txtime_cmac_tbl(rtwdev, rtwsta_link); +} + static inline int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, bool valid, struct ieee80211_ampdu_params *params) @@ -4874,6 +4911,18 @@ int rtw89_chip_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, return 0; } +/* Must consider compatibility; don't insert new in the mid. + * Fill each field's default value in rtw89_regd_entcpy(). + */ +struct rtw89_fw_regd_entry { + u8 alpha2_0; + u8 alpha2_1; + u8 rule_2ghz; + u8 rule_5ghz; + u8 rule_6ghz; + __le32 fmap; +} __packed; + /* must consider compatibility; don't insert new in the mid */ struct rtw89_fw_txpwr_byrate_entry { u8 band; diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index a37c6d525d6f0..b4841f948ec1c 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1495,6 +1495,21 @@ static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on) #undef PWR_ACT } +int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev) +{ + int ret; + + ret = rtw89_mac_power_switch(rtwdev, true); + if (ret) { + rtw89_mac_power_switch(rtwdev, false); + ret = rtw89_mac_power_switch(rtwdev, true); + if (ret) + return ret; + } + + return 0; +} + void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev) { rtw89_mac_power_switch(rtwdev, false); @@ -3996,14 +4011,6 @@ int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb) { int ret; - ret = rtw89_mac_power_switch(rtwdev, true); - if (ret) { - rtw89_mac_power_switch(rtwdev, false); - ret = rtw89_mac_power_switch(rtwdev, true); - if (ret) - return ret; - } - rtw89_mac_ctrl_hci_dma_trx(rtwdev, true); if (include_bb) { @@ -4036,6 +4043,10 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev) bool include_bb = !!chip->bbmcu_nr; int ret; + ret = rtw89_mac_pwr_on(rtwdev); + if (ret) + return ret; + ret = rtw89_mac_partial_init(rtwdev, include_bb); if (ret) goto fail; @@ -4067,7 +4078,7 @@ int rtw89_mac_init(struct rtw89_dev *rtwdev) return ret; fail: - rtw89_mac_power_switch(rtwdev, false); + rtw89_mac_pwr_off(rtwdev); return ret; } @@ -4826,6 +4837,32 @@ void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, rtw89_write32_set(rtwdev, reg, mac->narrow_bw_ru_dis.mask); } +void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link) +{ + struct ieee80211_bss_conf *bss_conf; + bool set; + u32 reg; + + if (rtwdev->chip->chip_gen != RTW89_CHIP_BE) + return; + + rcu_read_lock(); + + bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); + set = bss_conf->he_support && !bss_conf->eht_support; + + rcu_read_unlock(); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_BE_CLIENT_OM_CTRL, + rtwvif_link->mac_idx); + + if (set) + rtw89_write32_set(rtwdev, reg, B_BE_TRIG_DIS_EHTTB); + else + rtw89_write32_clr(rtwdev, reg, B_BE_TRIG_DIS_EHTTB); +} + void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) { rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif_link); @@ -6031,7 +6068,7 @@ int rtw89_mac_cfg_ctrl_path_v1(struct rtw89_dev *rtwdev, bool wl) if (wl) return 0; - for (i = 0; i < RTW89_PHY_MAX; i++) { + for (i = 0; i < RTW89_PHY_NUM; i++) { g[i].gnt_bt_sw_en = 1; g[i].gnt_bt = 1; g[i].gnt_wl_sw_en = 1; @@ -6422,6 +6459,7 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_ u32 tx_time) { #define MAC_AX_DFLT_TX_TIME 5280 + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx; u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time; u32 reg; @@ -6429,7 +6467,7 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_ if (rtwsta_link->cctl_tx_time) { rtwsta_link->ampdu_max_time = (max_tx_time - 512) >> 9; - ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link); + ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link); } else { ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); if (ret) { @@ -6437,8 +6475,8 @@ __rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_ return ret; } - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx); - rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK, + reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_limit.addr, mac_idx); + rtw89_write32_mask(rtwdev, reg, mac->agg_limit.mask, max_tx_time >> 5); } @@ -6464,6 +6502,7 @@ int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwst int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link, u32 *tx_time) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx; u32 reg; int ret = 0; @@ -6477,8 +6516,8 @@ int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwst return ret; } - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_AMPDU_AGG_LIMIT, mac_idx); - *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5; + reg = rtw89_mac_reg_by_idx(rtwdev, mac->agg_limit.addr, mac_idx); + *tx_time = rtw89_read32_mask(rtwdev, reg, mac->agg_limit.mask) << 5; } return ret; @@ -6494,9 +6533,9 @@ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev, if (!resume) { rtwsta_link->cctl_tx_retry_limit = true; - ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link); + ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link); } else { - ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link); + ret = rtw89_chip_h2c_txtime_cmac_tbl(rtwdev, rtwsta_link); rtwsta_link->cctl_tx_retry_limit = false; } @@ -6506,6 +6545,7 @@ int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev, int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, struct rtw89_sta_link *rtwsta_link, u8 *tx_retry) { + const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u8 mac_idx = rtwsta_link->rtwvif_link->mac_idx; u32 reg; int ret = 0; @@ -6519,8 +6559,8 @@ int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev, return ret; } - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXCNT, mac_idx); - *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK); + reg = rtw89_mac_reg_by_idx(rtwdev, mac->txcnt_limit.addr, mac_idx); + *tx_retry = rtw89_read32_mask(rtwdev, reg, mac->txcnt_limit.mask); } return ret; @@ -6798,6 +6838,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_ax = { .mask = B_AX_RXTRIG_RU26_DIS, }, .wow_ctrl = {.addr = R_AX_WOW_CTRL, .mask = B_AX_WOW_WOWEN,}, + .agg_limit = {.addr = R_AX_AMPDU_AGG_LIMIT, .mask = B_AX_AMPDU_MAX_TIME_MASK,}, + .txcnt_limit = {.addr = R_AX_TXCNT, .mask = B_AX_L_TXCNT_LMT_MASK,}, .check_mac_en = rtw89_mac_check_mac_en_ax, .sys_init = sys_init_ax, diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index 8edea96d037f6..fd7935d245019 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -964,6 +964,8 @@ struct rtw89_mac_gen_def { struct rtw89_reg_def bfee_ctrl; struct rtw89_reg_def narrow_bw_ru_dis; struct rtw89_reg_def wow_ctrl; + struct rtw89_reg_def agg_limit; + struct rtw89_reg_def txcnt_limit; int (*check_mac_en)(struct rtw89_dev *rtwdev, u8 band, enum rtw89_mac_hwmod_sel sel); @@ -1145,6 +1147,7 @@ rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_l rtw89_write32_set(rtwdev, reg, bit); } +int rtw89_mac_pwr_on(struct rtw89_dev *rtwdev); void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev); int rtw89_mac_partial_init(struct rtw89_dev *rtwdev, bool include_bb); int rtw89_mac_init(struct rtw89_dev *rtwdev); @@ -1185,6 +1188,8 @@ void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, bool en); void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); +void rtw89_mac_set_he_tb(struct rtw89_dev *rtwdev, + struct rtw89_vif_link *rtwvif_link); void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); void rtw89_mac_enable_beacon_for_ap_vifs(struct rtw89_dev *rtwdev, bool en); int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif_link *vif); diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index b3669e0074df9..4fded07d0beeb 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -57,32 +57,30 @@ static void rtw89_ops_wake_tx_queue(struct ieee80211_hw *hw, static int rtw89_ops_start(struct ieee80211_hw *hw) { struct rtw89_dev *rtwdev = hw->priv; - int ret; - mutex_lock(&rtwdev->mutex); - ret = rtw89_core_start(rtwdev); - mutex_unlock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); - return ret; + return rtw89_core_start(rtwdev); } static void rtw89_ops_stop(struct ieee80211_hw *hw, bool suspend) { struct rtw89_dev *rtwdev = hw->priv; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + rtw89_core_stop(rtwdev); - mutex_unlock(&rtwdev->mutex); } static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed) { struct rtw89_dev *rtwdev = hw->priv; + lockdep_assert_wiphy(hw->wiphy); + /* let previous ips work finish to ensure we don't leave ips twice */ - cancel_work_sync(&rtwdev->ips_work); + wiphy_work_cancel(hw->wiphy, &rtwdev->ips_work); - mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); if ((changed & IEEE80211_CONF_CHANGE_IDLE) && @@ -100,8 +98,6 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed) !rtwdev->scanning) rtw89_enter_ips(rtwdev); - mutex_unlock(&rtwdev->mutex); - return 0; } @@ -115,7 +111,7 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev, rtw89_vif_type_mapping(rtwvif_link, false); - INIT_WORK(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work); + wiphy_work_init(&rtwvif_link->update_beacon_work, rtw89_core_update_beacon_work); INIT_LIST_HEAD(&rtwvif_link->general_pkt_list); rtwvif_link->hit_rule = 0; @@ -142,9 +138,9 @@ static int __rtw89_ops_add_iface_link(struct rtw89_dev *rtwdev, static void __rtw89_ops_remove_iface_link(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) { - mutex_unlock(&rtwdev->mutex); - cancel_work_sync(&rtwvif_link->update_beacon_work); - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); + + wiphy_work_cancel(rtwdev->hw->wiphy, &rtwvif_link->update_beacon_work); rtw89_leave_ps_mode(rtwdev); @@ -162,11 +158,11 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, u8 mac_id, port; int ret = 0; + lockdep_assert_wiphy(hw->wiphy); + rtw89_debug(rtwdev, RTW89_DBG_STATE, "add vif %pM type %d, p2p %d\n", vif->addr, vif->type, vif->p2p); - mutex_lock(&rtwdev->mutex); - rtw89_leave_ips_by_hwflags(rtwdev); if (RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) @@ -174,10 +170,8 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, IEEE80211_VIF_SUPPORTS_CQM_RSSI; mac_id = rtw89_acquire_mac_id(rtwdev); - if (mac_id == RTW89_MAX_MAC_ID_NUM) { - ret = -ENOSPC; - goto err; - } + if (mac_id == RTW89_MAX_MAC_ID_NUM) + return -ENOSPC; port = rtw89_core_acquire_bit_map(rtwdev->hw_port, RTW89_PORT_NUM); if (port == RTW89_PORT_NUM) { @@ -198,7 +192,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, rtwvif->offchan = false; rtwvif->roc.state = RTW89_ROC_IDLE; - INIT_DELAYED_WORK(&rtwvif->roc.roc_work, rtw89_roc_work); + wiphy_delayed_work_init(&rtwvif->roc.roc_work, rtw89_roc_work); rtw89_traffic_stats_init(rtwdev, &rtwvif->stats); @@ -213,8 +207,6 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, goto unset_link; rtw89_recalc_lps(rtwdev); - - mutex_unlock(&rtwdev->mutex); return 0; unset_link: @@ -224,8 +216,6 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, rtw89_core_release_bit_map(rtwdev->hw_port, port); release_macid: rtw89_release_mac_id(rtwdev, mac_id); -err: - mutex_unlock(&rtwdev->mutex); return ret; } @@ -239,12 +229,12 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw, u8 port = rtw89_vif_get_main_port(rtwvif); struct rtw89_vif_link *rtwvif_link; + lockdep_assert_wiphy(hw->wiphy); + rtw89_debug(rtwdev, RTW89_DBG_STATE, "remove vif %pM type %d p2p %d\n", vif->addr, vif->type, vif->p2p); - cancel_delayed_work_sync(&rtwvif->roc.roc_work); - - mutex_lock(&rtwdev->mutex); + wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work); rtwvif_link = rtwvif->links[RTW89_VIF_IDLE_LINK_ID]; if (unlikely(!rtwvif_link)) { @@ -265,8 +255,6 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw, rtw89_recalc_lps(rtwdev); rtw89_enter_ips_by_hwflags(rtwdev); - - mutex_unlock(&rtwdev->mutex); } static int rtw89_ops_change_interface(struct ieee80211_hw *hw, @@ -304,7 +292,8 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw, const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; u32 rx_fltr; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + rtw89_leave_ps_mode(rtwdev); *new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL | @@ -367,14 +356,11 @@ static void rtw89_ops_configure_filter(struct ieee80211_hw *hw, B_AX_RX_FLTR_CFG_MASK, rx_fltr); if (!rtwdev->dbcc_en) - goto out; + return; rtw89_write32_mask(rtwdev, rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_1), B_AX_RX_FLTR_CFG_MASK, rx_fltr); - -out: - mutex_unlock(&rtwdev->mutex); } static const u8 ac_to_fw_idx[IEEE80211_NUM_ACS] = { @@ -670,6 +656,7 @@ static void __rtw89_ops_bss_link_assoc(struct rtw89_dev *rtwdev, rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, rtwvif_link); rtw89_mac_port_update(rtwdev, rtwvif_link); rtw89_mac_set_he_obss_narrow_bw_ru(rtwdev, rtwvif_link); + rtw89_mac_set_he_tb(rtwdev, rtwvif_link); } static void __rtw89_ops_bss_assoc(struct rtw89_dev *rtwdev, @@ -689,7 +676,8 @@ static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw, struct rtw89_dev *rtwdev = hw->priv; struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + rtw89_leave_ps_mode(rtwdev); if (changed & BSS_CHANGED_ASSOC) { @@ -712,8 +700,6 @@ static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ARP_FILTER) rtwvif->ip_addr = vif->cfg.arp_addr_list[0]; - - mutex_unlock(&rtwdev->mutex); } static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw, @@ -725,7 +711,8 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw, struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); struct rtw89_vif_link *rtwvif_link; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + rtw89_leave_ps_mode(rtwdev); rtwvif_link = rtwvif->links[conf->link_id]; @@ -733,7 +720,7 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw, rtw89_err(rtwdev, "%s: rtwvif link (link_id %u) is not active\n", __func__, conf->link_id); - goto out; + return; } if (changed & BSS_CHANGED_BSSID) { @@ -763,9 +750,6 @@ static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_TPE) rtw89_reg_6ghz_recalc(rtwdev, rtwvif_link, true); - -out: - mutex_unlock(&rtwdev->mutex); } static int rtw89_ops_start_ap(struct ieee80211_hw *hw, @@ -778,22 +762,19 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw, const struct rtw89_chan *chan; int ret = 0; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); rtwvif_link = rtwvif->links[link_conf->link_id]; if (unlikely(!rtwvif_link)) { rtw89_err(rtwdev, "%s: rtwvif link (link_id %u) is not active\n", __func__, link_conf->link_id); - ret = -ENOLINK; - goto out; + return -ENOLINK; } chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); - if (chan->band_type == RTW89_BAND_6G) { - mutex_unlock(&rtwdev->mutex); + if (chan->band_type == RTW89_BAND_6G) return -EOPNOTSUPP; - } if (rtwdev->scanning) rtw89_hw_scan_abort(rtwdev, rtwdev->scan_info.scanning_vif); @@ -810,15 +791,12 @@ static int rtw89_ops_start_ap(struct ieee80211_hw *hw, if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) { ret = rtw89_fw_h2c_ap_info_refcount(rtwdev, true); if (ret) - goto out; + return ret; } rtw89_queue_chanctx_work(rtwdev); -out: - mutex_unlock(&rtwdev->mutex); - - return ret; + return 0; } static @@ -829,14 +807,14 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); struct rtw89_vif_link *rtwvif_link; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); rtwvif_link = rtwvif->links[link_conf->link_id]; if (unlikely(!rtwvif_link)) { rtw89_err(rtwdev, "%s: rtwvif link (link_id %u) is not active\n", __func__, link_conf->link_id); - goto out; + return; } if (RTW89_CHK_FW_FEATURE(NOTIFY_AP_INFO, &rtwdev->fw)) @@ -845,22 +823,18 @@ void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, rtw89_mac_stop_ap(rtwdev, rtwvif_link); rtw89_chip_h2c_assoc_cmac_tbl(rtwdev, rtwvif_link, NULL); rtw89_fw_h2c_join_info(rtwdev, rtwvif_link, NULL, true); - -out: - mutex_unlock(&rtwdev->mutex); } static int rtw89_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { - struct rtw89_dev *rtwdev = hw->priv; struct rtw89_sta *rtwsta = sta_to_rtwsta(sta); struct rtw89_vif *rtwvif = rtwsta->rtwvif; struct rtw89_vif_link *rtwvif_link; unsigned int link_id; rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) - ieee80211_queue_work(rtwdev->hw, &rtwvif_link->update_beacon_work); + wiphy_work_queue(hw->wiphy, &rtwvif_link->update_beacon_work); return 0; } @@ -873,9 +847,9 @@ static int rtw89_ops_conf_tx(struct ieee80211_hw *hw, struct rtw89_dev *rtwdev = hw->priv; struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); struct rtw89_vif_link *rtwvif_link; - int ret = 0; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + rtw89_leave_ps_mode(rtwdev); rtwvif_link = rtwvif->links[link_id]; @@ -883,17 +857,13 @@ static int rtw89_ops_conf_tx(struct ieee80211_hw *hw, rtw89_err(rtwdev, "%s: rtwvif link (link_id %u) is not active\n", __func__, link_id); - ret = -ENOLINK; - goto out; + return -ENOLINK; } rtwvif_link->tx_params[ac] = *params; __rtw89_conf_tx(rtwdev, rtwvif_link, ac); -out: - mutex_unlock(&rtwdev->mutex); - - return ret; + return 0; } static int __rtw89_ops_sta_state(struct ieee80211_hw *hw, @@ -937,14 +907,11 @@ static int rtw89_ops_sta_state(struct ieee80211_hw *hw, enum ieee80211_sta_state new_state) { struct rtw89_dev *rtwdev = hw->priv; - int ret; - mutex_lock(&rtwdev->mutex); - rtw89_leave_ps_mode(rtwdev); - ret = __rtw89_ops_sta_state(hw, vif, sta, old_state, new_state); - mutex_unlock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); - return ret; + rtw89_leave_ps_mode(rtwdev); + return __rtw89_ops_sta_state(hw, vif, sta, old_state, new_state); } static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, @@ -953,9 +920,10 @@ static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_key_conf *key) { struct rtw89_dev *rtwdev = hw->priv; - int ret = 0; + int ret; + + lockdep_assert_wiphy(hw->wiphy); - mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); switch (cmd) { @@ -964,7 +932,7 @@ static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = rtw89_cam_sec_key_add(rtwdev, vif, sta, key); if (ret && ret != -EOPNOTSUPP) { rtw89_err(rtwdev, "failed to add key to sec cam\n"); - goto out; + return ret; } break; case DISABLE_KEY: @@ -974,14 +942,11 @@ static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, ret = rtw89_cam_sec_key_del(rtwdev, vif, sta, key, true); if (ret) { rtw89_err(rtwdev, "failed to remove key from sec cam\n"); - goto out; + return ret; } break; } -out: - mutex_unlock(&rtwdev->mutex); - return ret; } @@ -997,38 +962,32 @@ static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_txq *txq = sta->txq[tid]; struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv; + lockdep_assert_wiphy(rtwdev->hw->wiphy); + switch (params->action) { case IEEE80211_AMPDU_TX_START: return IEEE80211_AMPDU_TX_START_IMMEDIATE; case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: - mutex_lock(&rtwdev->mutex); clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags); clear_bit(tid, rtwsta->ampdu_map); rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, rtwvif, rtwsta); - mutex_unlock(&rtwdev->mutex); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: - mutex_lock(&rtwdev->mutex); set_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags); rtwsta->ampdu_params[tid].agg_num = params->buf_size; rtwsta->ampdu_params[tid].amsdu = params->amsdu; set_bit(tid, rtwsta->ampdu_map); rtw89_leave_ps_mode(rtwdev); rtw89_chip_h2c_ampdu_cmac_tbl(rtwdev, rtwvif, rtwsta); - mutex_unlock(&rtwdev->mutex); break; case IEEE80211_AMPDU_RX_START: - mutex_lock(&rtwdev->mutex); rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, true, params); - mutex_unlock(&rtwdev->mutex); break; case IEEE80211_AMPDU_RX_STOP: - mutex_lock(&rtwdev->mutex); rtw89_chip_h2c_ba_cam(rtwdev, rtwsta, false, params); - mutex_unlock(&rtwdev->mutex); break; default: WARN_ON(1); @@ -1042,11 +1001,11 @@ static int rtw89_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct rtw89_dev *rtwdev = hw->priv; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + rtw89_leave_ps_mode(rtwdev); if (test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) rtw89_mac_update_rts_threshold(rtwdev); - mutex_unlock(&rtwdev->mutex); return 0; } @@ -1086,7 +1045,8 @@ static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct rtw89_dev *rtwdev = hw->priv; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + rtw89_leave_lps(rtwdev); rtw89_hci_flush_queues(rtwdev, queues, drop); @@ -1094,8 +1054,6 @@ static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, __rtw89_drop_packets(rtwdev, vif); else rtw89_mac_flush_txq(rtwdev, queues, drop); - - mutex_unlock(&rtwdev->mutex); } struct rtw89_iter_bitrate_mask_data { @@ -1142,10 +1100,10 @@ static int rtw89_ops_set_bitrate_mask(struct ieee80211_hw *hw, { struct rtw89_dev *rtwdev = hw->priv; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + rtw89_phy_rate_pattern_vif(rtwdev, vif, mask); rtw89_ra_mask_info_update(rtwdev, vif, mask); - mutex_unlock(&rtwdev->mutex); return 0; } @@ -1156,6 +1114,8 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) struct rtw89_dev *rtwdev = hw->priv; struct rtw89_hal *hal = &rtwdev->hal; + lockdep_assert_wiphy(hw->wiphy); + if (hal->ant_diversity) { if (tx_ant != rx_ant || hweight32(tx_ant) != 1) return -EINVAL; @@ -1163,12 +1123,10 @@ int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) return -EINVAL; } - mutex_lock(&rtwdev->mutex); hal->antenna_tx = tx_ant; hal->antenna_rx = rx_ant; hal->tx_path_diversity = false; hal->ant_diversity_fixed = true; - mutex_unlock(&rtwdev->mutex); return 0; } @@ -1193,18 +1151,15 @@ static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw, struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); struct rtw89_vif_link *rtwvif_link; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); if (unlikely(!rtwvif_link)) { rtw89_err(rtwdev, "sw scan start: find no link on HW-0\n"); - goto out; + return; } rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, false); - -out: - mutex_unlock(&rtwdev->mutex); } static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw, @@ -1214,18 +1169,15 @@ static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw, struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); struct rtw89_vif_link *rtwvif_link; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); if (unlikely(!rtwvif_link)) { rtw89_err(rtwdev, "sw scan complete: find no link on HW-0\n"); - goto out; + return; } rtw89_core_scan_complete(rtwdev, rtwvif_link, false); - -out: - mutex_unlock(&rtwdev->mutex); } static void rtw89_ops_reconfig_complete(struct ieee80211_hw *hw, @@ -1245,21 +1197,18 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct rtw89_vif_link *rtwvif_link; int ret; + lockdep_assert_wiphy(hw->wiphy); + if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) return 1; - mutex_lock(&rtwdev->mutex); - - if (rtwdev->scanning || rtwvif->offchan) { - ret = -EBUSY; - goto out; - } + if (rtwdev->scanning || rtwvif->offchan) + return -EBUSY; rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); if (unlikely(!rtwvif_link)) { rtw89_err(rtwdev, "hw scan: find no link on HW-0\n"); - ret = -ENOLINK; - goto out; + return -ENOLINK; } rtw89_hw_scan_start(rtwdev, rtwvif_link, req); @@ -1269,9 +1218,6 @@ static int rtw89_ops_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, rtw89_err(rtwdev, "HW scan failed with status: %d\n", ret); } -out: - mutex_unlock(&rtwdev->mutex); - return ret; } @@ -1282,24 +1228,21 @@ static void rtw89_ops_cancel_hw_scan(struct ieee80211_hw *hw, struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); struct rtw89_vif_link *rtwvif_link; + lockdep_assert_wiphy(hw->wiphy); + if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) return; - mutex_lock(&rtwdev->mutex); - if (!rtwdev->scanning) - goto out; + return; rtwvif_link = rtw89_vif_get_link_inst(rtwvif, 0); if (unlikely(!rtwvif_link)) { rtw89_err(rtwdev, "cancel hw scan: find no link on HW-0\n"); - goto out; + return; } rtw89_hw_scan_abort(rtwdev, rtwvif_link); - -out: - mutex_unlock(&rtwdev->mutex); } static void rtw89_ops_sta_rc_update(struct ieee80211_hw *hw, @@ -1322,13 +1265,10 @@ static int rtw89_ops_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct rtw89_dev *rtwdev = hw->priv; - int ret; - mutex_lock(&rtwdev->mutex); - ret = rtw89_chanctx_ops_add(rtwdev, ctx); - mutex_unlock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); - return ret; + return rtw89_chanctx_ops_add(rtwdev, ctx); } static void rtw89_ops_remove_chanctx(struct ieee80211_hw *hw, @@ -1336,9 +1276,9 @@ static void rtw89_ops_remove_chanctx(struct ieee80211_hw *hw, { struct rtw89_dev *rtwdev = hw->priv; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + rtw89_chanctx_ops_remove(rtwdev, ctx); - mutex_unlock(&rtwdev->mutex); } static void rtw89_ops_change_chanctx(struct ieee80211_hw *hw, @@ -1347,9 +1287,9 @@ static void rtw89_ops_change_chanctx(struct ieee80211_hw *hw, { struct rtw89_dev *rtwdev = hw->priv; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + rtw89_chanctx_ops_change(rtwdev, ctx, changed); - mutex_unlock(&rtwdev->mutex); } static int rtw89_ops_assign_vif_chanctx(struct ieee80211_hw *hw, @@ -1360,25 +1300,18 @@ static int rtw89_ops_assign_vif_chanctx(struct ieee80211_hw *hw, struct rtw89_dev *rtwdev = hw->priv; struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); struct rtw89_vif_link *rtwvif_link; - int ret; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); rtwvif_link = rtwvif->links[link_conf->link_id]; if (unlikely(!rtwvif_link)) { rtw89_err(rtwdev, "%s: rtwvif link (link_id %u) is not active\n", __func__, link_conf->link_id); - ret = -ENOLINK; - goto out; + return -ENOLINK; } - ret = rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif_link, ctx); - -out: - mutex_unlock(&rtwdev->mutex); - - return ret; + return rtw89_chanctx_ops_assign_vif(rtwdev, rtwvif_link, ctx); } static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw, @@ -1390,11 +1323,10 @@ static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw, struct rtw89_vif *rtwvif = vif_to_rtwvif(vif); struct rtw89_vif_link *rtwvif_link; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); rtwvif_link = rtwvif->links[link_conf->link_id]; if (unlikely(!rtwvif_link)) { - mutex_unlock(&rtwdev->mutex); rtw89_err(rtwdev, "%s: rtwvif link (link_id %u) is not active\n", __func__, link_conf->link_id); @@ -1402,7 +1334,6 @@ static void rtw89_ops_unassign_vif_chanctx(struct ieee80211_hw *hw, } rtw89_chanctx_ops_unassign_vif(rtwdev, rtwvif_link, ctx); - mutex_unlock(&rtwdev->mutex); } static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw, @@ -1415,13 +1346,12 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw, struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); struct rtw89_roc *roc = &rtwvif->roc; + lockdep_assert_wiphy(hw->wiphy); + if (!rtwvif) return -EINVAL; - mutex_lock(&rtwdev->mutex); - if (roc->state != RTW89_ROC_IDLE) { - mutex_unlock(&rtwdev->mutex); return -EBUSY; } @@ -1439,8 +1369,6 @@ static int rtw89_ops_remain_on_channel(struct ieee80211_hw *hw, rtw89_roc_start(rtwdev, rtwvif); - mutex_unlock(&rtwdev->mutex); - return 0; } @@ -1450,14 +1378,14 @@ static int rtw89_ops_cancel_remain_on_channel(struct ieee80211_hw *hw, struct rtw89_dev *rtwdev = hw->priv; struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); + lockdep_assert_wiphy(hw->wiphy); + if (!rtwvif) return -EINVAL; - cancel_delayed_work_sync(&rtwvif->roc.roc_work); + wiphy_delayed_work_cancel(hw->wiphy, &rtwvif->roc.roc_work); - mutex_lock(&rtwdev->mutex); rtw89_roc_end(rtwdev, rtwvif); - mutex_unlock(&rtwdev->mutex); return 0; } @@ -1478,14 +1406,14 @@ static int rtw89_ops_set_tid_config(struct ieee80211_hw *hw, { struct rtw89_dev *rtwdev = hw->priv; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + if (sta) rtw89_core_set_tid_config(rtwdev, sta, tid_config); else ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_set_tid_config_iter, tid_config); - mutex_unlock(&rtwdev->mutex); return 0; } @@ -1509,7 +1437,7 @@ static bool rtw89_ops_can_activate_links(struct ieee80211_hw *hw, { struct rtw89_dev *rtwdev = hw->priv; - guard(mutex)(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); return rtw89_can_work_on_links(rtwdev, vif, active_links); } @@ -1571,7 +1499,7 @@ int rtw89_ops_change_vif_links(struct ieee80211_hw *hw, int ret = 0; int i; - guard(mutex)(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); rtw89_debug(rtwdev, RTW89_DBG_STATE, "%s: old_links (0x%08x) -> new_links (0x%08x)\n", @@ -1720,7 +1648,7 @@ int rtw89_ops_change_sta_links(struct ieee80211_hw *hw, unsigned long set_links = new_links & ~old_links; int ret = 0; - guard(mutex)(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); rtw89_debug(rtwdev, RTW89_DBG_STATE, "%s: old_links (0x%08x) -> new_links (0x%08x)\n", @@ -1750,13 +1678,12 @@ static int rtw89_ops_suspend(struct ieee80211_hw *hw, struct rtw89_dev *rtwdev = hw->priv; int ret; + lockdep_assert_wiphy(hw->wiphy); + set_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags); - cancel_delayed_work_sync(&rtwdev->track_work); + wiphy_delayed_work_cancel(hw->wiphy, &rtwdev->track_work); - mutex_lock(&rtwdev->mutex); ret = rtw89_wow_suspend(rtwdev, wowlan); - mutex_unlock(&rtwdev->mutex); - if (ret) { rtw89_warn(rtwdev, "failed to suspend for wow %d\n", ret); clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags); @@ -1771,15 +1698,15 @@ static int rtw89_ops_resume(struct ieee80211_hw *hw) struct rtw89_dev *rtwdev = hw->priv; int ret; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); + ret = rtw89_wow_resume(rtwdev); if (ret) rtw89_warn(rtwdev, "failed to resume for wow %d\n", ret); - mutex_unlock(&rtwdev->mutex); clear_bit(RTW89_FLAG_FORBIDDEN_TRACK_WROK, rtwdev->flags); - ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work, - RTW89_TRACK_WORK_PERIOD); + wiphy_delayed_work_queue(hw->wiphy, &rtwdev->track_work, + RTW89_TRACK_WORK_PERIOD); return ret ? 1 : 0; } @@ -1799,18 +1726,16 @@ static void rtw89_set_rekey_data(struct ieee80211_hw *hw, struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; + lockdep_assert_wiphy(hw->wiphy); + if (data->kek_len > sizeof(gtk_info->kek) || data->kck_len > sizeof(gtk_info->kck)) { rtw89_warn(rtwdev, "kek or kck length over fw limit\n"); return; } - mutex_lock(&rtwdev->mutex); - memcpy(gtk_info->kek, data->kek, data->kek_len); memcpy(gtk_info->kck, data->kck, data->kck_len); - - mutex_unlock(&rtwdev->mutex); } #endif @@ -1818,16 +1743,13 @@ static void rtw89_ops_rfkill_poll(struct ieee80211_hw *hw) { struct rtw89_dev *rtwdev = hw->priv; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(hw->wiphy); /* wl_disable GPIO get floating when entering LPS */ if (test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) - goto out; + return; rtw89_core_rfkill_poll(rtwdev, false); - -out: - mutex_unlock(&rtwdev->mutex); } const struct ieee80211_ops rtw89_ops = { diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c index 2dbdeae904ade..99b82dc85ea38 100644 --- a/drivers/net/wireless/realtek/rtw89/mac_be.c +++ b/drivers/net/wireless/realtek/rtw89/mac_be.c @@ -708,8 +708,8 @@ static int sec_eng_init_be(struct rtw89_dev *rtwdev) val32 |= B_BE_CLK_EN_CGCMP | B_BE_CLK_EN_WAPI | B_BE_CLK_EN_WEP_TKIP | B_BE_SEC_TX_ENC | B_BE_SEC_RX_DEC | B_BE_MC_DEC | B_BE_BC_DEC | - B_BE_BMC_MGNT_DEC | B_BE_UC_MGNT_DEC; - val32 &= ~B_BE_SEC_PRE_ENQUE_TX; + B_BE_BMC_MGNT_DEC | B_BE_UC_MGNT_DEC | + B_BE_SEC_PRE_ENQUE_TX; rtw89_write32(rtwdev, R_BE_SEC_ENG_CTRL, val32); rtw89_write32_set(rtwdev, R_BE_SEC_MPDU_PROC, B_BE_APPEND_ICV | B_BE_APPEND_MIC); @@ -1865,7 +1865,7 @@ int rtw89_mac_cfg_ctrl_path_v2(struct rtw89_dev *rtwdev, bool wl) if (wl) return 0; - for (i = 0; i < RTW89_PHY_MAX; i++) { + for (i = 0; i < RTW89_PHY_NUM; i++) { g[i].gnt_bt_sw_en = 1; g[i].gnt_bt = 1; g[i].gnt_wl_sw_en = 1; @@ -2585,6 +2585,8 @@ const struct rtw89_mac_gen_def rtw89_mac_gen_be = { .mask = B_BE_RXTRIG_RU26_DIS, }, .wow_ctrl = {.addr = R_BE_WOW_CTRL, .mask = B_BE_WOW_WOWEN,}, + .agg_limit = {.addr = R_BE_AMPDU_AGG_LIMIT, .mask = B_BE_AMPDU_MAX_TIME_MASK,}, + .txcnt_limit = {.addr = R_BE_TXCNT, .mask = B_BE_L_TXCNT_LMT_MASK,}, .check_mac_en = rtw89_mac_check_mac_en_be, .sys_init = sys_init_be, diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 4d11c3dd60a5d..79fef5f901408 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -455,34 +455,36 @@ #define B_BE_RX0DMA_INT_EN BIT(0) #define R_BE_HAXI_HISR00 0xB0B4 -#define B_BE_RDU_CH6_INT BIT(28) -#define B_BE_RDU_CH5_INT BIT(27) -#define B_BE_RDU_CH4_INT BIT(26) -#define B_BE_RDU_CH2_INT BIT(25) -#define B_BE_RDU_CH1_INT BIT(24) -#define B_BE_RDU_CH0_INT BIT(23) -#define B_BE_RXDMA_STUCK_INT BIT(22) -#define B_BE_TXDMA_STUCK_INT BIT(21) -#define B_BE_TXDMA_CH14_INT BIT(20) -#define B_BE_TXDMA_CH13_INT BIT(19) -#define B_BE_TXDMA_CH12_INT BIT(18) -#define B_BE_TXDMA_CH11_INT BIT(17) -#define B_BE_TXDMA_CH10_INT BIT(16) -#define B_BE_TXDMA_CH9_INT BIT(15) -#define B_BE_TXDMA_CH8_INT BIT(14) -#define B_BE_TXDMA_CH7_INT BIT(13) -#define B_BE_TXDMA_CH6_INT BIT(12) -#define B_BE_TXDMA_CH5_INT BIT(11) -#define B_BE_TXDMA_CH4_INT BIT(10) -#define B_BE_TXDMA_CH3_INT BIT(9) -#define B_BE_TXDMA_CH2_INT BIT(8) -#define B_BE_TXDMA_CH1_INT BIT(7) -#define B_BE_TXDMA_CH0_INT BIT(6) -#define B_BE_RPQ1DMA_INT BIT(5) -#define B_BE_RX1P1DMA_INT BIT(4) +#define B_BE_RDU_CH5_INT_V1 BIT(30) +#define B_BE_RDU_CH4_INT_V1 BIT(29) +#define B_BE_RDU_CH3_INT_V1 BIT(28) +#define B_BE_RDU_CH2_INT_V1 BIT(27) +#define B_BE_RDU_CH1_INT_V1 BIT(26) +#define B_BE_RDU_CH0_INT_V1 BIT(25) +#define B_BE_RXDMA_STUCK_INT_V1 BIT(24) +#define B_BE_TXDMA_STUCK_INT_V1 BIT(23) +#define B_BE_TXDMA_CH14_INT_V1 BIT(22) +#define B_BE_TXDMA_CH13_INT_V1 BIT(21) +#define B_BE_TXDMA_CH12_INT_V1 BIT(20) +#define B_BE_TXDMA_CH11_INT_V1 BIT(19) +#define B_BE_TXDMA_CH10_INT_V1 BIT(18) +#define B_BE_TXDMA_CH9_INT_V1 BIT(17) +#define B_BE_TXDMA_CH8_INT_V1 BIT(16) +#define B_BE_TXDMA_CH7_INT_V1 BIT(15) +#define B_BE_TXDMA_CH6_INT_V1 BIT(14) +#define B_BE_TXDMA_CH5_INT_V1 BIT(13) +#define B_BE_TXDMA_CH4_INT_V1 BIT(12) +#define B_BE_TXDMA_CH3_INT_V1 BIT(11) +#define B_BE_TXDMA_CH2_INT_V1 BIT(10) +#define B_BE_TXDMA_CH1_INT_V1 BIT(9) +#define B_BE_TXDMA_CH0_INT_V1 BIT(8) +#define B_BE_RX1P1DMA_INT_V1 BIT(7) +#define B_BE_RX0P1DMA_INT_V1 BIT(6) +#define B_BE_RO1DMA_INT BIT(5) +#define B_BE_RP1DMA_INT BIT(4) #define B_BE_RX1DMA_INT BIT(3) -#define B_BE_RPQ0DMA_INT BIT(2) -#define B_BE_RX0P1DMA_INT BIT(1) +#define B_BE_RO0DMA_INT BIT(2) +#define B_BE_RP0DMA_INT BIT(1) #define B_BE_RX0DMA_INT BIT(0) /* TX/RX */ diff --git a/drivers/net/wireless/realtek/rtw89/pci_be.c b/drivers/net/wireless/realtek/rtw89/pci_be.c index cd39eebe81861..12e6a0cbb889b 100644 --- a/drivers/net/wireless/realtek/rtw89/pci_be.c +++ b/drivers/net/wireless/realtek/rtw89/pci_be.c @@ -666,7 +666,7 @@ SIMPLE_DEV_PM_OPS(rtw89_pm_ops_be, rtw89_pci_suspend_be, rtw89_pci_resume_be); EXPORT_SYMBOL(rtw89_pm_ops_be); const struct rtw89_pci_gen_def rtw89_pci_gen_be = { - .isr_rdu = B_BE_RDU_CH1_INT | B_BE_RDU_CH0_INT, + .isr_rdu = B_BE_RDU_CH1_INT_V1 | B_BE_RDU_CH0_INT_V1, .isr_halt_c2h = B_BE_HALT_C2H_INT, .isr_wdt_timeout = B_BE_WDT_TIMEOUT_INT, .isr_clear_rpq = {R_BE_PCIE_DMA_ISR, B_BE_PCIE_RX_RPQ0_ISR_V1}, diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index c7c05f7fda1d5..f4eee642e5ced 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -2044,12 +2044,15 @@ static s8 rtw89_phy_ant_gain_offset(struct rtw89_dev *rtwdev, u8 band, u32 cente if (!chip->support_ant_gain) return 0; - if (!(ant_gain->regd_enabled & BIT(regd))) + if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd))) return 0; offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, center_freq); offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, center_freq); + if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw)) + return min(offset_patha, offset_pathb); + return max(offset_patha, offset_pathb); } @@ -2057,10 +2060,17 @@ s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan) { struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; + const struct rtw89_chip_info *chip = rtwdev->chip; u8 regd = rtw89_regd_get(rtwdev, chan->band_type); s8 offset_patha, offset_pathb; - if (!(ant_gain->regd_enabled & BIT(regd))) + if (!chip->support_ant_gain) + return 0; + + if (ant_gain->block_country || !(ant_gain->regd_enabled & BIT(regd))) + return 0; + + if (RTW89_CHK_FW_FEATURE(NO_POWER_DIFFERENCE, &rtwdev->fw)) return 0; offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq); @@ -2070,24 +2080,29 @@ s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev, } EXPORT_SYMBOL(rtw89_phy_ant_gain_pwr_offset); -void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan) +int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, + const struct rtw89_chan *chan) { struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; const struct rtw89_chip_info *chip = rtwdev->chip; u8 regd = rtw89_regd_get(rtwdev, chan->band_type); + char *p = buf, *end = buf + bufsz; s8 offset_patha, offset_pathb; - if (!chip->support_ant_gain || !(ant_gain->regd_enabled & BIT(regd))) { - seq_puts(m, "no DAG is applied\n"); - return; + if (!(chip->support_ant_gain && (ant_gain->regd_enabled & BIT(regd))) || + ant_gain->block_country) { + p += scnprintf(p, end - p, "no DAG is applied\n"); + goto out; } offset_patha = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_A, chan->freq); offset_pathb = rtw89_phy_ant_gain_query(rtwdev, RF_PATH_B, chan->freq); - seq_printf(m, "ChainA offset: %d dBm\n", offset_patha); - seq_printf(m, "ChainB offset: %d dBm\n", offset_pathb); + p += scnprintf(p, end - p, "ChainA offset: %d dBm\n", offset_patha); + p += scnprintf(p, end - p, "ChainB offset: %d dBm\n", offset_pathb); + +out: + return p - buf; } static const u8 rtw89_rs_idx_num_ax[] = { @@ -3455,6 +3470,30 @@ rtw89_phy_c2h_rfk_report_state(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u3 static void rtw89_phy_c2h_rfk_log_tas_pwr(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) { + const struct rtw89_c2h_rf_tas_info *rf_tas = + (const struct rtw89_c2h_rf_tas_info *)c2h->data; + const enum rtw89_sar_sources src = rtwdev->sar.src; + struct rtw89_tas_info *tas = &rtwdev->tas; + u64 linear = 0; + u32 i, cur_idx; + s16 txpwr; + + if (!tas->enable || src == RTW89_SAR_SOURCE_NONE) + return; + + cur_idx = le32_to_cpu(rf_tas->cur_idx); + for (i = 0; i < cur_idx; i++) { + txpwr = (s16)le16_to_cpu(rf_tas->txpwr_history[i]); + linear += rtw89_db_quarter_to_linear(txpwr); + + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "tas: index: %u, txpwr: %d\n", i, txpwr); + } + + if (cur_idx == 0) + tas->instant_txpwr = rtw89_db_to_linear(0); + else + tas->instant_txpwr = DIV_ROUND_DOWN_ULL(linear, cur_idx); } static @@ -4600,7 +4639,7 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) cfo->dcfo_avg = 0; rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n", rtwdev->total_sta_assoc); - if (rtwdev->total_sta_assoc == 0) { + if (rtwdev->total_sta_assoc == 0 || rtw89_is_mlo_1_1(rtwdev)) { rtw89_phy_cfo_reset(rtwdev); return; } @@ -4651,29 +4690,28 @@ static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev) rtw89_phy_cfo_statistics_reset(rtwdev); } -void rtw89_phy_cfo_track_work(struct work_struct *work) +void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, cfo_track_work.work); struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); + if (!cfo->cfo_trig_by_timer_en) - goto out; + return; rtw89_leave_ps_mode(rtwdev); rtw89_phy_cfo_dm(rtwdev); - ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, - msecs_to_jiffies(cfo->cfo_timer_ms)); -out: - mutex_unlock(&rtwdev->mutex); + wiphy_delayed_work_queue(wiphy, &rtwdev->cfo_track_work, + msecs_to_jiffies(cfo->cfo_timer_ms)); } static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev) { struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking; - ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work, - msecs_to_jiffies(cfo->cfo_timer_ms)); + wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->cfo_track_work, + msecs_to_jiffies(cfo->cfo_timer_ms)); } void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev) @@ -5115,7 +5153,6 @@ static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev) struct rtw89_phy_iter_rssi_data { struct rtw89_dev *rtwdev; - struct rtw89_phy_ch_info *ch_info; bool rssi_changed; }; @@ -5123,10 +5160,15 @@ static void __rtw89_phy_stat_rssi_update_iter(struct rtw89_sta_link *rtwsta_link, struct rtw89_phy_iter_rssi_data *rssi_data) { - struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info; + struct rtw89_vif_link *rtwvif_link = rtwsta_link->rtwvif_link; + struct rtw89_dev *rtwdev = rssi_data->rtwdev; + struct rtw89_phy_ch_info *ch_info; + struct rtw89_bb_ctx *bb; unsigned long rssi_curr; rssi_curr = ewma_rssi_read(&rtwsta_link->avg_rssi); + bb = rtw89_get_bb_ctx(rtwdev, rtwvif_link->phy_idx); + ch_info = &bb->ch_info; if (rssi_curr < ch_info->rssi_min) { ch_info->rssi_min = rssi_curr; @@ -5157,11 +5199,13 @@ static void rtw89_phy_stat_rssi_update_iter(void *data, static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev) { - struct rtw89_phy_iter_rssi_data rssi_data = {0}; + struct rtw89_phy_iter_rssi_data rssi_data = {}; + struct rtw89_bb_ctx *bb; rssi_data.rtwdev = rtwdev; - rssi_data.ch_info = &rtwdev->ch_info; - rssi_data.ch_info->rssi_min = U8_MAX; + rtw89_for_each_active_bb(rtwdev, bb) + bb->ch_info.rssi_min = U8_MAX; + ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_phy_stat_rssi_update_iter, &rssi_data); @@ -5199,24 +5243,27 @@ void rtw89_phy_stat_track(struct rtw89_dev *rtwdev) memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat)); } -static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us) +static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, u32 time_us) { - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); } -static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx) +static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, u16 idx) { - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx); } -static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) +static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; const struct rtw89_ccx_regs *ccx = phy->ccx; env->ccx_manual_ctrl = false; @@ -5225,17 +5272,20 @@ static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev) env->ccx_period = 0; env->ccx_unit_idx = RTW89_CCX_32_US; - rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->en_mask, 1); - rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1); - rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); - rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask, - RTW89_CCX_EDCCA_BW20_0); + rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->en_mask, 1, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->trig_opt_mask, 1, + bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1, + bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->edcca_opt_mask, + RTW89_CCX_EDCCA_BW20_0, bb->phy_idx); } -static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report, - u16 score) +static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, + u16 report, u16 score) { - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; u32 numer = 0; u16 ret = 0; @@ -5275,9 +5325,10 @@ static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev, *period, *unit_idx); } -static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev) +static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "lv:(%d)->(0)\n", env->ccx_rac_lv); @@ -5288,9 +5339,10 @@ static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev) } static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, struct rtw89_ccx_para_info *para) { - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; bool is_update = env->ifs_clm_app != para->ifs_clm_app; u8 i = 0; u16 *ifs_th_l = env->ifs_clm_th_l; @@ -5325,12 +5377,12 @@ static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev, */ ifs_th_l[IFS_CLM_TH_START_IDX] = 0; ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us; - ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, + ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev, bb, ifs_th0_us); for (i = 1; i < RTW89_IFS_CLM_NUM; i++) { ifs_th_l[i] = ifs_th_h[i - 1] + 1; ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times; - ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]); + ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, bb, ifs_th_h_us[i]); } ifs_update_finished: @@ -5341,30 +5393,31 @@ static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev, return is_update; } -static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) +static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; const struct rtw89_ccx_regs *ccx = phy->ccx; u8 i = 0; - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask, - env->ifs_clm_th_l[0]); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask, - env->ifs_clm_th_l[1]); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask, - env->ifs_clm_th_l[2]); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask, - env->ifs_clm_th_l[3]); - - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask, - env->ifs_clm_th_h[0]); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask, - env->ifs_clm_th_h[1]); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask, - env->ifs_clm_th_h[2]); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask, - env->ifs_clm_th_h[3]); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_l_mask, + env->ifs_clm_th_l[0], bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_l_mask, + env->ifs_clm_th_l[1], bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_l_mask, + env->ifs_clm_th_l[2], bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_l_mask, + env->ifs_clm_th_l[3], bb->phy_idx); + + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_th_h_mask, + env->ifs_clm_th_h[0], bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_th_h_mask, + env->ifs_clm_th_h[1], bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_th_h_mask, + env->ifs_clm_th_h[2], bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_th_h_mask, + env->ifs_clm_th_h[3], bb->phy_idx); for (i = 0; i < RTW89_IFS_CLM_NUM; i++) rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, @@ -5372,31 +5425,38 @@ static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev) i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]); } -static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev) +static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; const struct rtw89_ccx_regs *ccx = phy->ccx; - struct rtw89_ccx_para_info para = {0}; + struct rtw89_ccx_para_info para = {}; env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; env->ifs_clm_mntr_time = 0; para.ifs_clm_app = RTW89_IFS_CLM_INIT; - if (rtw89_phy_ifs_clm_th_update_check(rtwdev, ¶)) - rtw89_phy_ifs_clm_set_th_reg(rtwdev); + if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, ¶)) + rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_collect_en_mask, true, + bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t1_addr, ccx->ifs_t1_en_mask, true, + bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t2_addr, ccx->ifs_t2_en_mask, true, + bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t3_addr, ccx->ifs_t3_en_mask, true, + bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_t4_addr, ccx->ifs_t4_en_mask, true, + bb->phy_idx); } static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, enum rtw89_env_racing_lv level) { - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; int ret = 0; if (level >= RTW89_RAC_MAX_NUM) { @@ -5425,56 +5485,62 @@ static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev, return ret; } -static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev) +static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; const struct rtw89_ccx_regs *ccx = phy->ccx; - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0); - rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1); - rtw89_phy_set_phy_regs(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 0, + bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 0, + bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, ccx->ifs_clm_cnt_clear_mask, 1, + bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->setting_addr, ccx->measurement_trig_mask, 1, + bb->phy_idx); env->ccx_ongoing = true; } -static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev) +static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; u8 i = 0; u32 res = 0; env->ifs_clm_tx_ratio = - rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT); + rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_tx, PERCENT); env->ifs_clm_edcca_excl_cca_ratio = - rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca, + rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_edcca_excl_cca, PERCENT); env->ifs_clm_cck_fa_ratio = - rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT); + rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERCENT); env->ifs_clm_ofdm_fa_ratio = - rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT); + rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERCENT); env->ifs_clm_cck_cca_excl_fa_ratio = - rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa, + rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckcca_excl_fa, PERCENT); env->ifs_clm_ofdm_cca_excl_fa_ratio = - rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa, + rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmcca_excl_fa, PERCENT); env->ifs_clm_cck_fa_permil = - rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL); + rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_cckfa, PERMIL); env->ifs_clm_ofdm_fa_permil = - rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL); + rtw89_phy_ccx_get_report(rtwdev, bb, env->ifs_clm_ofdmfa, PERMIL); for (i = 0; i < RTW89_IFS_CLM_NUM; i++) { if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) { env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD; } else { env->ifs_clm_ifs_avg[i] = - rtw89_phy_ccx_idx_to_us(rtwdev, + rtw89_phy_ccx_idx_to_us(rtwdev, bb, env->ifs_clm_avg[i]); } - res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]); + res = rtw89_phy_ccx_idx_to_us(rtwdev, bb, env->ifs_clm_cca[i]); res += env->ifs_clm_his[i] >> 1; if (env->ifs_clm_his[i]) res /= env->ifs_clm_his[i]; @@ -5504,81 +5570,82 @@ static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev) env->ifs_clm_cca_avg[i]); } -static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) +static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; const struct rtw89_ccx_regs *ccx = phy->ccx; u8 i = 0; - if (rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, - ccx->ifs_cnt_done_mask) == 0) { + if (rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr, + ccx->ifs_cnt_done_mask, bb->phy_idx) == 0) { rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Get IFS_CLM report Fail\n"); return false; } env->ifs_clm_tx = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, - ccx->ifs_clm_tx_cnt_msk); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr, + ccx->ifs_clm_tx_cnt_msk, bb->phy_idx); env->ifs_clm_edcca_excl_cca = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_tx_cnt_addr, - ccx->ifs_clm_edcca_excl_cca_fa_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_tx_cnt_addr, + ccx->ifs_clm_edcca_excl_cca_fa_mask, bb->phy_idx); env->ifs_clm_cckcca_excl_fa = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, - ccx->ifs_clm_cckcca_excl_fa_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr, + ccx->ifs_clm_cckcca_excl_fa_mask, bb->phy_idx); env->ifs_clm_ofdmcca_excl_fa = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_cca_addr, - ccx->ifs_clm_ofdmcca_excl_fa_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_cca_addr, + ccx->ifs_clm_ofdmcca_excl_fa_mask, bb->phy_idx); env->ifs_clm_cckfa = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, - ccx->ifs_clm_cck_fa_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr, + ccx->ifs_clm_cck_fa_mask, bb->phy_idx); env->ifs_clm_ofdmfa = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_clm_fa_addr, - ccx->ifs_clm_ofdm_fa_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_clm_fa_addr, + ccx->ifs_clm_ofdm_fa_mask, bb->phy_idx); env->ifs_clm_his[0] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, - ccx->ifs_t1_his_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t1_his_mask, bb->phy_idx); env->ifs_clm_his[1] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, - ccx->ifs_t2_his_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t2_his_mask, bb->phy_idx); env->ifs_clm_his[2] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, - ccx->ifs_t3_his_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t3_his_mask, bb->phy_idx); env->ifs_clm_his[3] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_his_addr, - ccx->ifs_t4_his_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_his_addr, + ccx->ifs_t4_his_mask, bb->phy_idx); env->ifs_clm_avg[0] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, - ccx->ifs_t1_avg_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr, + ccx->ifs_t1_avg_mask, bb->phy_idx); env->ifs_clm_avg[1] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_l_addr, - ccx->ifs_t2_avg_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_l_addr, + ccx->ifs_t2_avg_mask, bb->phy_idx); env->ifs_clm_avg[2] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, - ccx->ifs_t3_avg_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr, + ccx->ifs_t3_avg_mask, bb->phy_idx); env->ifs_clm_avg[3] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_avg_h_addr, - ccx->ifs_t4_avg_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_avg_h_addr, + ccx->ifs_t4_avg_mask, bb->phy_idx); env->ifs_clm_cca[0] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, - ccx->ifs_t1_cca_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr, + ccx->ifs_t1_cca_mask, bb->phy_idx); env->ifs_clm_cca[1] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_l_addr, - ccx->ifs_t2_cca_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_l_addr, + ccx->ifs_t2_cca_mask, bb->phy_idx); env->ifs_clm_cca[2] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, - ccx->ifs_t3_cca_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr, + ccx->ifs_t3_cca_mask, bb->phy_idx); env->ifs_clm_cca[3] = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_cca_h_addr, - ccx->ifs_t4_cca_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_cca_h_addr, + ccx->ifs_t4_cca_mask, bb->phy_idx); env->ifs_clm_total_ifs = - rtw89_phy_read32_mask(rtwdev, ccx->ifs_total_addr, - ccx->ifs_total_mask); + rtw89_phy_read32_idx(rtwdev, ccx->ifs_total_addr, + ccx->ifs_total_mask, bb->phy_idx); rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n", env->ifs_clm_total_ifs); @@ -5598,16 +5665,17 @@ static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev) "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i], env->ifs_clm_avg[i], env->ifs_clm_cca[i]); - rtw89_phy_ifs_clm_get_utility(rtwdev); + rtw89_phy_ifs_clm_get_utility(rtwdev, bb); return true; } static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, struct rtw89_ccx_para_info *para) { const struct rtw89_phy_gen_def *phy = rtwdev->chip->phy_def; - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_env_monitor_info *env = &bb->env_monitor; const struct rtw89_ccx_regs *ccx = phy->ccx; u32 period = 0; u32 unit_idx = 0; @@ -5618,17 +5686,17 @@ static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, return -EINVAL; } - if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv)) + if (rtw89_phy_ccx_racing_ctrl(rtwdev, bb, para->rac_lv)) return -EINVAL; if (para->mntr_time != env->ifs_clm_mntr_time) { rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time, &period, &unit_idx); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, - ccx->ifs_clm_period_mask, period); - rtw89_phy_set_phy_regs(rtwdev, ccx->ifs_cnt_addr, - ccx->ifs_clm_cnt_unit_mask, - unit_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, + ccx->ifs_clm_period_mask, period, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, ccx->ifs_cnt_addr, + ccx->ifs_clm_cnt_unit_mask, + unit_idx, bb->phy_idx); rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Update IFS-CLM time ((%d)) -> ((%d))\n", @@ -5639,18 +5707,19 @@ static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev, env->ccx_unit_idx = (u8)unit_idx; } - if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) { + if (rtw89_phy_ifs_clm_th_update_check(rtwdev, bb, para)) { env->ifs_clm_app = para->ifs_clm_app; - rtw89_phy_ifs_clm_set_th_reg(rtwdev); + rtw89_phy_ifs_clm_set_th_reg(rtwdev, bb); } return 0; } -void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev) +static void __rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; - struct rtw89_ccx_para_info para = {0}; + struct rtw89_env_monitor_info *env = &bb->env_monitor; + struct rtw89_ccx_para_info para = {}; u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL; env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL; @@ -5660,25 +5729,36 @@ void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev) return; } + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "BB-%d env_monitor track\n", bb->phy_idx); + /* only ifs_clm for now */ - if (rtw89_phy_ifs_clm_get_result(rtwdev)) + if (rtw89_phy_ifs_clm_get_result(rtwdev, bb)) env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM; - rtw89_phy_ccx_racing_release(rtwdev); + rtw89_phy_ccx_racing_release(rtwdev, bb); para.mntr_time = 1900; para.rac_lv = RTW89_RAC_LV_1; para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND; - if (rtw89_phy_ifs_clm_set(rtwdev, ¶) == 0) + if (rtw89_phy_ifs_clm_set(rtwdev, bb, ¶) == 0) chk_result |= RTW89_PHY_ENV_MON_IFS_CLM; if (chk_result) - rtw89_phy_ccx_trigger(rtwdev); + rtw89_phy_ccx_trigger(rtwdev, bb); rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "get_result=0x%x, chk_result:0x%x\n", env->ccx_watchdog_result, chk_result); } +void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_bb_ctx *bb; + + rtw89_for_each_active_bb(rtwdev, bb) + __rtw89_phy_env_monitor_track(rtwdev, bb); +} + static bool rtw89_physts_ie_page_valid(enum rtw89_phy_status_bitmap *ie_page) { if (*ie_page >= RTW89_PHYSTS_BITMAP_NUM || @@ -5799,11 +5879,12 @@ static void rtw89_physts_parsing_init(struct rtw89_dev *rtwdev) __rtw89_physts_parsing_init(rtwdev, RTW89_PHY_1); } -static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type) +static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, int type) { const struct rtw89_chip_info *chip = rtwdev->chip; - struct rtw89_dig_info *dig = &rtwdev->dig; const struct rtw89_phy_dig_gain_cfg *cfg; + struct rtw89_dig_info *dig = &bb->dig; const char *msg; u8 i; s8 gain_base; @@ -5840,8 +5921,8 @@ static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type) } for (i = 0; i < cfg->size; i++) { - tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr, - cfg->table[i].mask); + tmp = rtw89_phy_read32_idx(rtwdev, cfg->table[i].addr, + cfg->table[i].mask, bb->phy_idx); tmp >>= DIG_GAIN_SHIFT; gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base; gain_base += DIG_GAIN; @@ -5851,25 +5932,26 @@ static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type) } } -static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev) +static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { - struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_dig_info *dig = &bb->dig; u32 tmp; u8 i; if (!rtwdev->hal.support_igi) return; - tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW, - B_PATH0_IB_PKPW_MSK); + tmp = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PKPW, + B_PATH0_IB_PKPW_MSK, bb->phy_idx); dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT); - dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK, - B_PATH0_IB_PBK_MSK); + dig->ib_pbk = rtw89_phy_read32_idx(rtwdev, R_PATH0_IB_PBK, + B_PATH0_IB_PBK_MSK, bb->phy_idx); rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n", dig->ib_pkpwr, dig->ib_pbk); for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++) - rtw89_phy_dig_read_gain_table(rtwdev, i); + rtw89_phy_dig_read_gain_table(rtwdev, bb, i); } static const u8 rssi_nolink = 22; @@ -5878,10 +5960,11 @@ static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88}; static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16}; static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528}; -static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev) +static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { - struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; - struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_phy_ch_info *ch_info = &bb->ch_info; + struct rtw89_dig_info *dig = &bb->dig; bool is_linked = rtwdev->total_sta_assoc > 0; if (is_linked) { @@ -5892,10 +5975,11 @@ static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev) } } -static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev) +static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { - struct rtw89_dig_info *dig = &rtwdev->dig; - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); + const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx); + struct rtw89_dig_info *dig = &bb->dig; bool is_linked = rtwdev->total_sta_assoc > 0; const u16 *fa_th_src = NULL; @@ -5924,9 +6008,10 @@ static const u8 pd_low_th_offset = 16, dynamic_igi_min = 0x20; static const u8 igi_max_performance_mode = 0x5a; static const u8 dynamic_pd_threshold_max; -static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev) +static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { - struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_dig_info *dig = &bb->dig; dig->cur_gaincode.lna_idx = LNA_IDX_MAX; dig->cur_gaincode.tia_idx = TIA_IDX_MAX; @@ -5942,15 +6027,27 @@ static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev) dig->is_linked_pre = false; } +static void __rtw89_phy_dig_init(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) +{ + rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig_init\n", bb->phy_idx); + + rtw89_phy_dig_update_gain_para(rtwdev, bb); + rtw89_phy_dig_reset(rtwdev, bb); +} + static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev) { - rtw89_phy_dig_update_gain_para(rtwdev); - rtw89_phy_dig_reset(rtwdev); + struct rtw89_bb_ctx *bb; + + rtw89_for_each_capab_bb(rtwdev, bb) + __rtw89_phy_dig_init(rtwdev, bb); } -static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) +static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, u8 rssi) { - struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_dig_info *dig = &bb->dig; u8 lna_idx; if (rssi < dig->igi_rssi_th[0]) @@ -5969,9 +6066,10 @@ static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) return lna_idx; } -static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) +static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, u8 rssi) { - struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_dig_info *dig = &bb->dig; u8 tia_idx; if (rssi < dig->igi_rssi_th[0]) @@ -5984,10 +6082,11 @@ static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi) #define IB_PBK_BASE 110 #define WB_RSSI_BASE 10 -static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, +static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, u8 rssi, struct rtw89_agc_gaincode_set *set) { - struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_dig_info *dig = &bb->dig; s8 lna_gain = dig->lna_gain[set->lna_idx]; s8 tia_gain = dig->tia_gain[set->tia_idx]; s32 wb_rssi = rssi + lna_gain + tia_gain; @@ -6003,12 +6102,13 @@ static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, return rxb_idx; } -static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, +static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, u8 rssi, struct rtw89_agc_gaincode_set *set) { - set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi); - set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi); - set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set); + set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, bb, rssi); + set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, bb, rssi); + set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, bb, rssi, set); rtw89_debug(rtwdev, RTW89_DBG_DIG, "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n", @@ -6017,10 +6117,11 @@ static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi, #define IGI_OFFSET_MAX 25 #define IGI_OFFSET_MUL 2 -static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev) +static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { - struct rtw89_dig_info *dig = &rtwdev->dig; - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; + struct rtw89_dig_info *dig = &bb->dig; + struct rtw89_env_monitor_info *env = &bb->env_monitor; enum rtw89_dig_noisy_level noisy_lv; u8 igi_offset = dig->fa_rssi_ofst; u16 fa_ratio = 0; @@ -6057,92 +6158,99 @@ static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev) noisy_lv, igi_offset); } -static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx) +static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, u8 lna_idx) { const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; - rtw89_phy_write32_mask(rtwdev, dig_regs->p0_lna_init.addr, - dig_regs->p0_lna_init.mask, lna_idx); - rtw89_phy_write32_mask(rtwdev, dig_regs->p1_lna_init.addr, - dig_regs->p1_lna_init.mask, lna_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->p0_lna_init.addr, + dig_regs->p0_lna_init.mask, lna_idx, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->p1_lna_init.addr, + dig_regs->p1_lna_init.mask, lna_idx, bb->phy_idx); } -static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx) +static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, u8 tia_idx) { const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; - rtw89_phy_write32_mask(rtwdev, dig_regs->p0_tia_init.addr, - dig_regs->p0_tia_init.mask, tia_idx); - rtw89_phy_write32_mask(rtwdev, dig_regs->p1_tia_init.addr, - dig_regs->p1_tia_init.mask, tia_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->p0_tia_init.addr, + dig_regs->p0_tia_init.mask, tia_idx, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->p1_tia_init.addr, + dig_regs->p1_tia_init.mask, tia_idx, bb->phy_idx); } -static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx) +static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, u8 rxb_idx) { const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; - rtw89_phy_write32_mask(rtwdev, dig_regs->p0_rxb_init.addr, - dig_regs->p0_rxb_init.mask, rxb_idx); - rtw89_phy_write32_mask(rtwdev, dig_regs->p1_rxb_init.addr, - dig_regs->p1_rxb_init.mask, rxb_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->p0_rxb_init.addr, + dig_regs->p0_rxb_init.mask, rxb_idx, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->p1_rxb_init.addr, + dig_regs->p1_rxb_init.mask, rxb_idx, bb->phy_idx); } static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, const struct rtw89_agc_gaincode_set set) { if (!rtwdev->hal.support_igi) return; - rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx); - rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx); - rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx); + rtw89_phy_dig_set_lna_idx(rtwdev, bb, set.lna_idx); + rtw89_phy_dig_set_tia_idx(rtwdev, bb, set.tia_idx); + rtw89_phy_dig_set_rxb_idx(rtwdev, bb, set.rxb_idx); rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n", set.lna_idx, set.tia_idx, set.rxb_idx); } static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, bool enable) { const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; - rtw89_phy_write32_mask(rtwdev, dig_regs->p0_p20_pagcugc_en.addr, - dig_regs->p0_p20_pagcugc_en.mask, enable); - rtw89_phy_write32_mask(rtwdev, dig_regs->p0_s20_pagcugc_en.addr, - dig_regs->p0_s20_pagcugc_en.mask, enable); - rtw89_phy_write32_mask(rtwdev, dig_regs->p1_p20_pagcugc_en.addr, - dig_regs->p1_p20_pagcugc_en.mask, enable); - rtw89_phy_write32_mask(rtwdev, dig_regs->p1_s20_pagcugc_en.addr, - dig_regs->p1_s20_pagcugc_en.mask, enable); + rtw89_phy_write32_idx(rtwdev, dig_regs->p0_p20_pagcugc_en.addr, + dig_regs->p0_p20_pagcugc_en.mask, enable, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->p0_s20_pagcugc_en.addr, + dig_regs->p0_s20_pagcugc_en.mask, enable, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->p1_p20_pagcugc_en.addr, + dig_regs->p1_p20_pagcugc_en.mask, enable, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->p1_s20_pagcugc_en.addr, + dig_regs->p1_s20_pagcugc_en.mask, enable, bb->phy_idx); rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable); } -static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev) +static void rtw89_phy_dig_config_igi(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { - struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_dig_info *dig = &bb->dig; if (!rtwdev->hal.support_igi) return; if (dig->force_gaincode_idx_en) { - rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); + rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode); rtw89_debug(rtwdev, RTW89_DBG_DIG, "Force gaincode index enabled.\n"); } else { - rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi, + rtw89_phy_dig_gaincode_by_rssi(rtwdev, bb, dig->igi_fa_rssi, &dig->cur_gaincode); - rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode); + rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->cur_gaincode); } } -static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, - bool enable) +static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, + u8 rssi, bool enable) { - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); + const struct rtw89_chan *chan = rtw89_mgnt_chan_get(rtwdev, bb->phy_idx); const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; enum rtw89_bandwidth cbw = chan->band_width; - struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_dig_info *dig = &bb->dig; u8 final_rssi = 0, under_region = dig->pd_low_th_ofst; u8 ofdm_cca_th; s8 cck_cca_th; @@ -6184,10 +6292,10 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, "Dynamic PD th disabled, Set PD_low_bd=0\n"); } - rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg, - dig_regs->pd_lower_bound_mask, pd_val); - rtw89_phy_write32_mask(rtwdev, dig_regs->seg0_pd_reg, - dig_regs->pd_spatial_reuse_en, enable); + rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg, + dig_regs->pd_lower_bound_mask, pd_val, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->seg0_pd_reg, + dig_regs->pd_spatial_reuse_en, enable, bb->phy_idx); if (!rtwdev->hal.support_cckpd) return; @@ -6199,29 +6307,29 @@ static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi, "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n", final_rssi, cck_cca_th, under_region, pd_val); - rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_reg, - dig_regs->bmode_cca_rssi_limit_en, enable); - rtw89_phy_write32_mask(rtwdev, dig_regs->bmode_pd_lower_bound_reg, - dig_regs->bmode_rssi_nocca_low_th_mask, pd_val); + rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_reg, + dig_regs->bmode_cca_rssi_limit_en, enable, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, dig_regs->bmode_pd_lower_bound_reg, + dig_regs->bmode_rssi_nocca_low_th_mask, pd_val, bb->phy_idx); } -void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev) +void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb) { - struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_dig_info *dig = &bb->dig; dig->bypass_dig = false; - rtw89_phy_dig_para_reset(rtwdev); - rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode); - rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false); - rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); - rtw89_phy_dig_update_para(rtwdev); + rtw89_phy_dig_para_reset(rtwdev, bb); + rtw89_phy_dig_set_igi_cr(rtwdev, bb, dig->force_gaincode); + rtw89_phy_dig_dyn_pd_th(rtwdev, bb, rssi_nolink, false); + rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false); + rtw89_phy_dig_update_para(rtwdev, bb); } #define IGI_RSSI_MIN 10 #define ABS_IGI_MIN 0xc -void rtw89_phy_dig(struct rtw89_dev *rtwdev) +static void __rtw89_phy_dig(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb) { - struct rtw89_dig_info *dig = &rtwdev->dig; + struct rtw89_dig_info *dig = &bb->dig; bool is_linked = rtwdev->total_sta_assoc > 0; u8 igi_min; @@ -6230,20 +6338,22 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev) return; } - rtw89_phy_dig_update_rssi_info(rtwdev); + rtw89_debug(rtwdev, RTW89_DBG_DIG, "BB-%d dig track\n", bb->phy_idx); + + rtw89_phy_dig_update_rssi_info(rtwdev, bb); if (!dig->is_linked_pre && is_linked) { rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n"); - rtw89_phy_dig_update_para(rtwdev); + rtw89_phy_dig_update_para(rtwdev, bb); dig->igi_fa_rssi = dig->igi_rssi; } else if (dig->is_linked_pre && !is_linked) { rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n"); - rtw89_phy_dig_update_para(rtwdev); + rtw89_phy_dig_update_para(rtwdev, bb); dig->igi_fa_rssi = dig->igi_rssi; } dig->is_linked_pre = is_linked; - rtw89_phy_dig_igi_offset_by_env(rtwdev); + rtw89_phy_dig_igi_offset_by_env(rtwdev, bb); igi_min = max_t(int, dig->igi_rssi - IGI_RSSI_MIN, 0); dig->dyn_igi_max = min(igi_min + IGI_OFFSET_MAX, igi_max_performance_mode); @@ -6262,14 +6372,22 @@ void rtw89_phy_dig(struct rtw89_dev *rtwdev) dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min, dig->igi_fa_rssi); - rtw89_phy_dig_config_igi(rtwdev); + rtw89_phy_dig_config_igi(rtwdev, bb); - rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en); + rtw89_phy_dig_dyn_pd_th(rtwdev, bb, dig->igi_fa_rssi, dig->dyn_pd_th_en); if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max) - rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true); + rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, true); else - rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false); + rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, bb, false); +} + +void rtw89_phy_dig(struct rtw89_dev *rtwdev) +{ + struct rtw89_bb_ctx *bb; + + rtw89_for_each_active_bb(rtwdev, bb) + __rtw89_phy_dig(rtwdev, bb); } static void __rtw89_phy_tx_path_div_sta_iter(struct rtw89_dev *rtwdev, @@ -6443,17 +6561,17 @@ static void rtw89_phy_antdiv_training_state(struct rtw89_dev *rtwdev) } antdiv->training_count++; - ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, - state_period); + wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work, + state_period); } -void rtw89_phy_antdiv_work(struct work_struct *work) +void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work) { struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, antdiv_work.work); struct rtw89_antdiv_info *antdiv = &rtwdev->antdiv; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(wiphy); if (antdiv->training_count <= ANTDIV_TRAINNING_CNT) { rtw89_phy_antdiv_training_state(rtwdev); @@ -6461,8 +6579,6 @@ void rtw89_phy_antdiv_work(struct work_struct *work) rtw89_phy_antdiv_decision_state(rtwdev); rtw89_phy_antdiv_set_ant(rtwdev); } - - mutex_unlock(&rtwdev->mutex); } void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev) @@ -6483,19 +6599,34 @@ void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev) return; antdiv->training_count = 0; - ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->antdiv_work, 0); + wiphy_delayed_work_queue(rtwdev->hw->wiphy, &rtwdev->antdiv_work, 0); +} + +static void __rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) +{ + rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, + "BB-%d env_monitor init\n", bb->phy_idx); + + rtw89_phy_ccx_top_setting_init(rtwdev, bb); + rtw89_phy_ifs_clm_setting_init(rtwdev, bb); } static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev) { - rtw89_phy_ccx_top_setting_init(rtwdev); - rtw89_phy_ifs_clm_setting_init(rtwdev); + struct rtw89_bb_ctx *bb; + + rtw89_for_each_capab_bb(rtwdev, bb) + __rtw89_phy_env_monitor_init(rtwdev, bb); } -static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev) +static void __rtw89_phy_edcca_init(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; - struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; + struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak; + + rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca init\n", bb->phy_idx); memset(edcca_bak, 0, sizeof(*edcca_bak)); @@ -6511,8 +6642,16 @@ static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev) rtw89_phy_set_phy_regs(rtwdev, R_DFS_FFT_CG, B_DFS_FFT_EN, 1); } - rtw89_phy_write32_mask(rtwdev, edcca_regs->tx_collision_t2r_st, - edcca_regs->tx_collision_t2r_st_mask, 0x29); + rtw89_phy_write32_idx(rtwdev, edcca_regs->tx_collision_t2r_st, + edcca_regs->tx_collision_t2r_st_mask, 0x29, bb->phy_idx); +} + +static void rtw89_phy_edcca_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_bb_ctx *bb; + + rtw89_for_each_capab_bb(rtwdev, bb) + __rtw89_phy_edcca_init(rtwdev, bb); } void rtw89_phy_dm_init(struct rtw89_dev *rtwdev) @@ -6875,44 +7014,46 @@ void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx, } EXPORT_SYMBOL(rtw89_decode_chan_idx); -void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan) +void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, bool scan) { const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; - struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; + struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak; if (scan) { edcca_bak->a = - rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level, - edcca_regs->edcca_mask); + rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level, + edcca_regs->edcca_mask, bb->phy_idx); edcca_bak->p = - rtw89_phy_read32_mask(rtwdev, edcca_regs->edcca_level, - edcca_regs->edcca_p_mask); + rtw89_phy_read32_idx(rtwdev, edcca_regs->edcca_level, + edcca_regs->edcca_p_mask, bb->phy_idx); edcca_bak->ppdu = - rtw89_phy_read32_mask(rtwdev, edcca_regs->ppdu_level, - edcca_regs->ppdu_mask); - - rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, - edcca_regs->edcca_mask, EDCCA_MAX); - rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, - edcca_regs->edcca_p_mask, EDCCA_MAX); - rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, - edcca_regs->ppdu_mask, EDCCA_MAX); + rtw89_phy_read32_idx(rtwdev, edcca_regs->ppdu_level, + edcca_regs->ppdu_mask, bb->phy_idx); + + rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level, + edcca_regs->edcca_mask, EDCCA_MAX, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level, + edcca_regs->edcca_p_mask, EDCCA_MAX, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level, + edcca_regs->ppdu_mask, EDCCA_MAX, bb->phy_idx); } else { - rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, - edcca_regs->edcca_mask, - edcca_bak->a); - rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, - edcca_regs->edcca_p_mask, - edcca_bak->p); - rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, - edcca_regs->ppdu_mask, - edcca_bak->ppdu); + rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level, + edcca_regs->edcca_mask, + edcca_bak->a, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level, + edcca_regs->edcca_p_mask, + edcca_bak->p, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level, + edcca_regs->ppdu_mask, + edcca_bak->ppdu, bb->phy_idx); } } -static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev) +static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb) { const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; + const struct rtw89_edcca_p_regs *edcca_p_regs; bool flag_fb, flag_p20, flag_s20, flag_s40, flag_s80; s8 pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80; u8 path, per20_bitmap; @@ -6922,13 +7063,18 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev) if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_EDCCA)) return; + if (bb->phy_idx == RTW89_PHY_1) + edcca_p_regs = &edcca_regs->p[RTW89_PHY_1]; + else + edcca_p_regs = &edcca_regs->p[RTW89_PHY_0]; + if (rtwdev->chip->chip_id == RTL8922A) rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, edcca_regs->rpt_sel_be_mask, 0); - rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, - edcca_regs->rpt_sel_mask, 0); - tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); + rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel, + edcca_p_regs->rpt_sel_mask, 0); + tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b); path = u32_get_bits(tmp, B_EDCCA_RPT_B_PATH_MASK); flag_s80 = u32_get_bits(tmp, B_EDCCA_RPT_B_S80); flag_s40 = u32_get_bits(tmp, B_EDCCA_RPT_B_S40); @@ -6939,19 +7085,19 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev) pwdb_p20 = u32_get_bits(tmp, MASKBYTE2); pwdb_fb = u32_get_bits(tmp, MASKBYTE3); - rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, - edcca_regs->rpt_sel_mask, 4); - tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); + rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel, + edcca_p_regs->rpt_sel_mask, 4); + tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b); pwdb_s80 = u32_get_bits(tmp, MASKBYTE1); pwdb_s40 = u32_get_bits(tmp, MASKBYTE2); - per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_regs->rpt_a, + per20_bitmap = rtw89_phy_read32_mask(rtwdev, edcca_p_regs->rpt_a, MASKBYTE0); if (rtwdev->chip->chip_id == RTL8922A) { rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, edcca_regs->rpt_sel_be_mask, 4); - tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); + tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b); pwdb[0] = u32_get_bits(tmp, MASKBYTE3); pwdb[1] = u32_get_bits(tmp, MASKBYTE2); pwdb[2] = u32_get_bits(tmp, MASKBYTE1); @@ -6959,33 +7105,33 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev) rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel_be, edcca_regs->rpt_sel_be_mask, 5); - tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_b); + tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_b); pwdb[4] = u32_get_bits(tmp, MASKBYTE3); pwdb[5] = u32_get_bits(tmp, MASKBYTE2); pwdb[6] = u32_get_bits(tmp, MASKBYTE1); pwdb[7] = u32_get_bits(tmp, MASKBYTE0); } else { - rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, - edcca_regs->rpt_sel_mask, 0); - tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); + rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel, + edcca_p_regs->rpt_sel_mask, 0); + tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a); pwdb[0] = u32_get_bits(tmp, MASKBYTE3); pwdb[1] = u32_get_bits(tmp, MASKBYTE2); - rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, - edcca_regs->rpt_sel_mask, 1); - tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); + rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel, + edcca_p_regs->rpt_sel_mask, 1); + tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a); pwdb[2] = u32_get_bits(tmp, MASKBYTE3); pwdb[3] = u32_get_bits(tmp, MASKBYTE2); - rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, - edcca_regs->rpt_sel_mask, 2); - tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); + rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel, + edcca_p_regs->rpt_sel_mask, 2); + tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a); pwdb[4] = u32_get_bits(tmp, MASKBYTE3); pwdb[5] = u32_get_bits(tmp, MASKBYTE2); - rtw89_phy_write32_mask(rtwdev, edcca_regs->rpt_sel, - edcca_regs->rpt_sel_mask, 3); - tmp = rtw89_phy_read32(rtwdev, edcca_regs->rpt_a); + rtw89_phy_write32_mask(rtwdev, edcca_p_regs->rpt_sel, + edcca_p_regs->rpt_sel_mask, 3); + tmp = rtw89_phy_read32(rtwdev, edcca_p_regs->rpt_a); pwdb[6] = u32_get_bits(tmp, MASKBYTE3); pwdb[7] = u32_get_bits(tmp, MASKBYTE2); } @@ -7007,9 +7153,10 @@ static void rtw89_phy_edcca_log(struct rtw89_dev *rtwdev) pwdb_fb, pwdb_p20, pwdb_s20, pwdb_s40, pwdb_s80); } -static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev) +static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb) { - struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info; + struct rtw89_phy_ch_info *ch_info = &bb->ch_info; bool is_linked = rtwdev->total_sta_assoc > 0; u8 rssi_min = ch_info->rssi_min >> 1; u8 edcca_thre; @@ -7025,13 +7172,13 @@ static u8 rtw89_phy_edcca_get_thre_by_rssi(struct rtw89_dev *rtwdev) return edcca_thre; } -void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev) +void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb) { const struct rtw89_edcca_regs *edcca_regs = rtwdev->chip->edcca_regs; - struct rtw89_edcca_bak *edcca_bak = &rtwdev->hal.edcca_bak; + struct rtw89_edcca_bak *edcca_bak = &bb->edcca_bak; u8 th; - th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev); + th = rtw89_phy_edcca_get_thre_by_rssi(rtwdev, bb); if (th == edcca_bak->th_old) return; @@ -7040,23 +7187,33 @@ void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev) rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "[EDCCA]: Normal Mode, EDCCA_th = %d\n", th); - rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, - edcca_regs->edcca_mask, th); - rtw89_phy_write32_mask(rtwdev, edcca_regs->edcca_level, - edcca_regs->edcca_p_mask, th); - rtw89_phy_write32_mask(rtwdev, edcca_regs->ppdu_level, - edcca_regs->ppdu_mask, th); + rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level, + edcca_regs->edcca_mask, th, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, edcca_regs->edcca_level, + edcca_regs->edcca_p_mask, th, bb->phy_idx); + rtw89_phy_write32_idx(rtwdev, edcca_regs->ppdu_level, + edcca_regs->ppdu_mask, th, bb->phy_idx); +} + +static +void __rtw89_phy_edcca_track(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb) +{ + rtw89_debug(rtwdev, RTW89_DBG_EDCCA, "BB-%d edcca track\n", bb->phy_idx); + + rtw89_phy_edcca_thre_calc(rtwdev, bb); + rtw89_phy_edcca_log(rtwdev, bb); } void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev) { struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_bb_ctx *bb; if (hal->disabled_dm_bitmap & BIT(RTW89_DM_DYNAMIC_EDCCA)) return; - rtw89_phy_edcca_thre_calc(rtwdev); - rtw89_phy_edcca_log(rtwdev); + rtw89_for_each_active_bb(rtwdev, bb) + __rtw89_phy_edcca_track(rtwdev, bb); } enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index 08b635c93ac3e..518a100375fb3 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -835,8 +835,8 @@ s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, void rtw89_phy_ant_gain_init(struct rtw89_dev *rtwdev); s16 rtw89_phy_ant_gain_pwr_offset(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan); -void rtw89_print_ant_gain(struct seq_file *m, struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan); +int rtw89_print_ant_gain(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, + const struct rtw89_chan *chan); void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev, const struct rtw89_txpwr_table *tbl); s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, @@ -914,6 +914,13 @@ static inline s8 rtw89_phy_txpwr_rf_to_bb(struct rtw89_dev *rtwdev, s8 txpwr_rf) return txpwr_rf << (chip->txpwr_factor_bb - chip->txpwr_factor_rf); } +static inline s8 rtw89_phy_txpwr_bb_to_rf(struct rtw89_dev *rtwdev, s8 txpwr_bb) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + return txpwr_bb >> (chip->txpwr_factor_bb - chip->txpwr_factor_rf); +} + static inline s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf) { const struct rtw89_chip_info *chip = rtwdev->chip; @@ -978,20 +985,20 @@ void rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, struct rtw89_h2c_rf_tssi *h2c); void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev); -void rtw89_phy_cfo_track_work(struct work_struct *work); +void rtw89_phy_cfo_track_work(struct wiphy *wiphy, struct wiphy_work *work); void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val, struct rtw89_rx_phy_ppdu *phy_ppdu); void rtw89_phy_stat_track(struct rtw89_dev *rtwdev); void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev); void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u32 val); -void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev); +void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb); void rtw89_phy_dig(struct rtw89_dev *rtwdev); void rtw89_phy_tx_path_div_track(struct rtw89_dev *rtwdev); void rtw89_phy_antdiv_parse(struct rtw89_dev *rtwdev, struct rtw89_rx_phy_ppdu *phy_ppdu); void rtw89_phy_antdiv_track(struct rtw89_dev *rtwdev); -void rtw89_phy_antdiv_work(struct work_struct *work); +void rtw89_phy_antdiv_work(struct wiphy *wiphy, struct wiphy_work *work); void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link); void rtw89_phy_tssi_ctrl_set_bandedge_cfg(struct rtw89_dev *rtwdev, @@ -1002,9 +1009,10 @@ void rtw89_phy_ul_tb_ctrl_track(struct rtw89_dev *rtwdev); u8 rtw89_encode_chan_idx(struct rtw89_dev *rtwdev, u8 central_ch, u8 band); void rtw89_decode_chan_idx(struct rtw89_dev *rtwdev, u8 chan_idx, u8 *ch, enum nl80211_band *band); -void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, bool scan); +void rtw89_phy_config_edcca(struct rtw89_dev *rtwdev, + struct rtw89_bb_ctx *bb, bool scan); void rtw89_phy_edcca_track(struct rtw89_dev *rtwdev); -void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev); +void rtw89_phy_edcca_thre_calc(struct rtw89_dev *rtwdev, struct rtw89_bb_ctx *bb); enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev, diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c index 96ea04d90cd36..ac46a7baa00db 100644 --- a/drivers/net/wireless/realtek/rtw89/ps.c +++ b/drivers/net/wireless/realtek/rtw89/ps.c @@ -113,7 +113,7 @@ static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev) { - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); __rtw89_leave_ps_mode(rtwdev); } @@ -125,7 +125,7 @@ void rtw89_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, bool can_ps_mode = true; unsigned int link_id; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags)) return; @@ -162,7 +162,7 @@ void rtw89_leave_lps(struct rtw89_dev *rtwdev) struct rtw89_vif *rtwvif; unsigned int link_id; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); if (!test_and_clear_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags)) return; diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 10d0efa7a58ef..c776954ad360d 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -6618,6 +6618,13 @@ #define B_BE_RTS_LIMIT_IN_OFDM6 BIT(1) #define B_BE_CHECK_CCK_EN BIT(0) +#define R_BE_TXCNT 0x1082C +#define R_BE_TXCNT_C1 0x1482C +#define B_BE_ADD_TXCNT_BY BIT(31) +#define B_BE_TOTAL_TC_OPT BIT(30) +#define B_BE_S_TXCNT_LMT_MASK GENMASK(29, 24) +#define B_BE_L_TXCNT_LMT_MASK GENMASK(21, 16) + #define R_BE_MBSSID_DROP_0 0x1083C #define R_BE_MBSSID_DROP_0_C1 0x1483C #define B_BE_GI_LTF_FB_SEL BIT(30) @@ -7095,6 +7102,10 @@ #define B_BE_MACLBK_RDY_NUM_MASK GENMASK(7, 3) #define B_BE_MACLBK_EN BIT(0) +#define R_BE_CLIENT_OM_CTRL 0x11040 +#define R_BE_CLIENT_OM_CTRL_C1 0x15040 +#define B_BE_TRIG_DIS_EHTTB BIT(24) + #define R_BE_WMAC_NAV_CTL 0x11080 #define R_BE_WMAC_NAV_CTL_C1 0x15080 #define B_BE_WMAC_NAV_UPPER_EN BIT(26) @@ -8157,6 +8168,8 @@ #define B_EDCCA_RPT_B_S40 BIT(4) #define B_EDCCA_RPT_B_S80 BIT(3) #define B_EDCCA_RPT_B_PATH_MASK GENMASK(2, 1) +#define R_EDCCA_RPT_P1_A 0x1740 +#define R_EDCCA_RPT_P1_B 0x1744 #define R_SWSI_V1 0x174C #define B_SWSI_W_BUSY_V1 BIT(24) #define B_SWSI_R_BUSY_V1 BIT(25) @@ -8222,6 +8235,7 @@ #define B_TXCKEN_FORCE_ALL GENMASK(24, 0) #define R_EDCCA_RPT_SEL 0x20CC #define B_EDCCA_RPT_SEL_MSK GENMASK(2, 0) +#define B_EDCCA_RPT_SEL_P1_MSK GENMASK(5, 3) #define R_ADC_FIFO 0x20fc #define B_ADC_FIFO_RST GENMASK(31, 24) #define B_ADC_FIFO_RXK GENMASK(31, 16) @@ -8291,6 +8305,8 @@ #define B_P1_EN_SOUND_WO_NDP BIT(1) #define R_EDCCA_RPT_A_BE 0x2E38 #define R_EDCCA_RPT_B_BE 0x2E3C +#define R_EDCCA_RPT_P1_A_BE 0x2E40 +#define R_EDCCA_RPT_P1_B_BE 0x2E44 #define R_S1_HW_SI_DIS 0x3200 #define B_S1_HW_SI_DIS_W_R_TRIG GENMASK(30, 28) #define R_P1_RXCK 0x32A0 @@ -9169,6 +9185,16 @@ #define B_IQKINF2_FCNT GENMASK(23, 16) #define B_IQKINF2_KCNT GENMASK(15, 8) #define B_IQKINF2_NCTLV GENMASK(7, 0) +#define R_TXAGC_REF_DBM_RF1_P0 0xBC04 +#define B_TXAGC_OFDM_REF_DBM_RF1_P0 GENMASK(10, 2) +#define B_TXAGC_CCK_REF_DBM_RF1_P0 GENMASK(19, 11) +#define R_TSSI_K_RF1_P0 0xBC28 +#define B_TSSI_K_OFDM_RF1_P0 GENMASK(9, 0) +#define R_TXAGC_REF_DBM_RF1_P1 0xBD04 +#define B_TXAGC_OFDM_REF_DBM_RF1_P1 GENMASK(10, 2) +#define B_TXAGC_CCK_REF_DBM_RF1_P1 GENMASK(19, 11) +#define R_TSSI_K_RF1_P1 0xBD28 +#define B_TSSI_K_OFDM_RF1_P1 GENMASK(9, 0) #define R_RFK_ST 0xBFF8 #define R_DCOF0 0xC000 #define B_DCOF0_RST BIT(17) @@ -9338,16 +9364,18 @@ #define R_TSSI_MAP_OFST_P1 0xE720 #define B_TSSI_MAP_OFST_OFDM GENMASK(17, 9) #define B_TSSI_MAP_OFST_CCK GENMASK(26, 18) -#define R_TXAGC_REF0_P0 0xE628 -#define R_TXAGC_REF0_P1 0xE728 -#define B_TXAGC_REF0_OFDM_DBM GENMASK(8, 0) -#define B_TXAGC_REF0_CCK_DBM GENMASK(17, 9) -#define B_TXAGC_REF0_OFDM_CW GENMASK(26, 18) -#define R_TXAGC_REF1_P0 0xE62C -#define R_TXAGC_REF1_P1 0xE72C -#define B_TXAGC_REF1_CCK_CW GENMASK(8, 0) +#define R_TXAGC_REF_DBM_P0 0xE628 +#define B_TXAGC_OFDM_REF_DBM_P0 GENMASK(8, 0) +#define B_TXAGC_CCK_REF_DBM_P0 GENMASK(17, 9) +#define R_TSSI_K_P0 0xE6A0 +#define B_TSSI_K_OFDM_P0 GENMASK(29, 20) #define R_TXPWR_RSTB 0xE70C #define B_TXPWR_RSTB BIT(16) +#define R_TXAGC_REF_DBM_P1 0xE728 +#define B_TXAGC_OFDM_REF_DBM_P1 GENMASK(8, 0) +#define B_TXAGC_CCK_REF_DBM_P1 GENMASK(17, 9) +#define R_TSSI_K_P1 0xE7A0 +#define B_TSSI_K_OFDM_P1 GENMASK(29, 20) /* WiFi CPU local domain */ #define R_AX_WDT_CTRL 0x0040 diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index 80b2f74589eb9..655323a796082 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -7,257 +7,266 @@ #include "ps.h" #include "util.h" -#define COUNTRY_REGD(_alpha2, _txpwr_regd...) \ - {.alpha2 = (_alpha2), \ - .txpwr_regd = {_txpwr_regd}, \ +static +void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request); + +#define COUNTRY_REGD(_alpha2, _rule_2ghz, _rule_5ghz, _rule_6ghz, _fmap) \ + { \ + .alpha2 = _alpha2, \ + .txpwr_regd[RTW89_BAND_2G] = _rule_2ghz, \ + .txpwr_regd[RTW89_BAND_5G] = _rule_5ghz, \ + .txpwr_regd[RTW89_BAND_6G] = _rule_6ghz, \ + .func_bitmap = { _fmap, }, \ } +static_assert(BITS_PER_TYPE(unsigned long) >= NUM_OF_RTW89_REGD_FUNC); + static const struct rtw89_regd rtw89_ww_regd = - COUNTRY_REGD("00", RTW89_WW, RTW89_WW, RTW89_WW); + COUNTRY_REGD("00", RTW89_WW, RTW89_WW, RTW89_WW, 0x0); static const struct rtw89_regd rtw89_regd_map[] = { - COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC), - COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE), - COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC), - COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GB", RTW89_UK, RTW89_UK, RTW89_UK), - COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR), - COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE), - COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN), - COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC), - COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI), - COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND), - COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA), - COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA), - COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CA", RTW89_IC, RTW89_IC, RTW89_IC), - COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK, RTW89_MKK), - COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA), - COUNTRY_REGD("CC", RTW89_ACMA, RTW89_ACMA, RTW89_NA), - COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GY", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA), - COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA), - COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA), - COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("ST", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC, RTW89_NA), - COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA), - COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA), - COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_FCC), - COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI), - COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("SY", RTW89_ETSI, RTW89_NA, RTW89_NA), - COUNTRY_REGD("SD", RTW89_ETSI, RTW89_ETSI, RTW89_NA), - COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA), + COUNTRY_REGD("AR", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC, 0x0), + COUNTRY_REGD("BO", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("CL", RTW89_CHILE, RTW89_CHILE, RTW89_CHILE, 0x0), + COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("SV", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("HN", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("MX", RTW89_MEXICO, RTW89_MEXICO, RTW89_FCC, 0x0), + COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x1), + COUNTRY_REGD("UY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("VE", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("AT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("BE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("CY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("CZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("DK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("EE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("FI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("FR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("DE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("GR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("HU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("IS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("IE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("IT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("LV", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("LI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("LT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("LU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("MT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("MC", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("NL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("NO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("PL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("PT", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("SK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("SI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("ES", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("SE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("CH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("GB", RTW89_UK, RTW89_UK, RTW89_UK, 0x0), + COUNTRY_REGD("AL", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("AZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("BH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("BA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("BG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("HR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("EG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("GH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("IQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("IL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("JO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("KZ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("KE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("KW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("KG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("LB", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("LS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("MK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("MA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("MZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("NA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("NG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("OM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("QA", RTW89_QATAR, RTW89_QATAR, RTW89_QATAR, 0x0), + COUNTRY_REGD("RO", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x2), + COUNTRY_REGD("RU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("SA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("SN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("RS", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("ME", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("ZA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("TR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("UA", RTW89_UKRAINE, RTW89_UKRAINE, RTW89_UKRAINE, 0x0), + COUNTRY_REGD("AE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("YE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("ZW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("BD", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("KH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("CN", RTW89_CN, RTW89_CN, RTW89_CN, 0x0), + COUNTRY_REGD("HK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("IN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC, RTW89_KCC, 0x1), + COUNTRY_REGD("MY", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("PK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("PH", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("SG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("LK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC, RTW89_ETSI, 0x0), + COUNTRY_REGD("TH", RTW89_THAILAND, RTW89_THAILAND, RTW89_THAILAND, 0x0), + COUNTRY_REGD("VN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("AU", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA, 0x0), + COUNTRY_REGD("NZ", RTW89_ACMA, RTW89_ACMA, RTW89_ACMA, 0x0), + COUNTRY_REGD("PG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("CA", RTW89_IC, RTW89_IC, RTW89_IC, 0x1), + COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK, RTW89_MKK, 0x0), + COUNTRY_REGD("JM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("TN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("DZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("AD", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("AO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("AI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("AQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("AM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("BY", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("BJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("BT", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("BW", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("BV", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("IO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("BN", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("BF", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("MM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("BI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("CM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("CV", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("CF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("TD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("CX", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0), + COUNTRY_REGD("CC", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0), + COUNTRY_REGD("KM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("CG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("CD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("CK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("DJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("GQ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("ER", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("ET", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("FK", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("FO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("GF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("PF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("TF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("GA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("GM", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("GE", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("GI", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("GL", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("GP", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("GG", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("GN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("GW", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("GY", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("HM", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0), + COUNTRY_REGD("VA", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("IM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("JE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("KI", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("XK", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("LA", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("LR", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("LY", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("MO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("MG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("MW", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("MV", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("ML", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("MQ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("MR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("MU", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("YT", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("MD", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("MN", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("MS", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("NR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("NP", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("NC", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("NE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("NU", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0), + COUNTRY_REGD("NF", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0), + COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("RE", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("RW", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("SH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("PM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("SM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("ST", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC, RTW89_NA, 0x0), + COUNTRY_REGD("SL", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("SB", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("SO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("GS", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("SJ", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("SZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("TJ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("TZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("TG", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("TK", RTW89_ACMA, RTW89_ACMA, RTW89_NA, 0x0), + COUNTRY_REGD("TO", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("TM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("TC", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA, RTW89_NA, 0x0), + COUNTRY_REGD("UG", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC, RTW89_FCC, 0x0), + COUNTRY_REGD("UZ", RTW89_ETSI, RTW89_ETSI, RTW89_ETSI, 0x0), + COUNTRY_REGD("VU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("WF", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("EH", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("ZM", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("CU", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("IR", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("SY", RTW89_ETSI, RTW89_NA, RTW89_NA, 0x0), + COUNTRY_REGD("SD", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), + COUNTRY_REGD("PS", RTW89_ETSI, RTW89_ETSI, RTW89_NA, 0x0), }; static const char rtw89_alpha2_list_eu[][3] = { @@ -295,13 +304,16 @@ static const char rtw89_alpha2_list_eu[][3] = { "RO", }; -static const struct rtw89_regd *rtw89_regd_find_reg_by_name(const char *alpha2) +static const struct rtw89_regd *rtw89_regd_find_reg_by_name(struct rtw89_dev *rtwdev, + const char *alpha2) { + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_regd_ctrl *regd_ctrl = ®ulatory->ctrl; u32 i; - for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) { - if (!memcmp(rtw89_regd_map[i].alpha2, alpha2, 2)) - return &rtw89_regd_map[i]; + for (i = 0; i < regd_ctrl->nr; i++) { + if (!memcmp(regd_ctrl->map[i].alpha2, alpha2, 2)) + return ®d_ctrl->map[i]; } return &rtw89_ww_regd; @@ -312,22 +324,25 @@ static bool rtw89_regd_is_ww(const struct rtw89_regd *regd) return regd == &rtw89_ww_regd; } -static u8 rtw89_regd_get_index(const struct rtw89_regd *regd) +static u8 rtw89_regd_get_index(struct rtw89_dev *rtwdev, const struct rtw89_regd *regd) { + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_regd_ctrl *regd_ctrl = ®ulatory->ctrl; + BUILD_BUG_ON(ARRAY_SIZE(rtw89_regd_map) > RTW89_REGD_MAX_COUNTRY_NUM); if (rtw89_regd_is_ww(regd)) return RTW89_REGD_MAX_COUNTRY_NUM; - return regd - rtw89_regd_map; + return regd - regd_ctrl->map; } -static u8 rtw89_regd_get_index_by_name(const char *alpha2) +static u8 rtw89_regd_get_index_by_name(struct rtw89_dev *rtwdev, const char *alpha2) { const struct rtw89_regd *regd; - regd = rtw89_regd_find_reg_by_name(alpha2); - return rtw89_regd_get_index(regd); + regd = rtw89_regd_find_reg_by_name(rtwdev, alpha2); + return rtw89_regd_get_index(rtwdev, regd); } #define rtw89_debug_regd(_dev, _regd, _desc, _argv...) \ @@ -345,6 +360,7 @@ static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev, struct wiphy *wiphy) { struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_regd_ctrl *regd_ctrl = ®ulatory->ctrl; const struct rtw89_chip_info *chip = rtwdev->chip; struct ieee80211_supported_band *sband; struct rtw89_acpi_dsm_result res = {}; @@ -382,8 +398,8 @@ static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev, "acpi: eval if allow unii-4: 0x%x\n", val); bottom: - for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) { - const struct rtw89_regd *regd = &rtw89_regd_map[i]; + for (i = 0; i < regd_ctrl->nr; i++) { + const struct rtw89_regd *regd = ®d_ctrl->map[i]; switch (regd->txpwr_regd[RTW89_BAND_5G]) { case RTW89_FCC: @@ -406,7 +422,7 @@ static void __rtw89_regd_setup_policy_6ghz(struct rtw89_dev *rtwdev, bool block, struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; u8 index; - index = rtw89_regd_get_index_by_name(alpha2); + index = rtw89_regd_get_index_by_name(rtwdev, alpha2); if (index == RTW89_REGD_MAX_COUNTRY_NUM) { rtw89_debug(rtwdev, RTW89_DBG_REGD, "%s: unknown alpha2 %c%c\n", __func__, alpha2[0], alpha2[1]); @@ -474,6 +490,7 @@ static void rtw89_regd_setup_policy_6ghz(struct rtw89_dev *rtwdev) static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev) { struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_regd_ctrl *regd_ctrl = ®ulatory->ctrl; const struct rtw89_acpi_policy_6ghz_sp *ptr; struct rtw89_acpi_dsm_result res = {}; bool enable_by_us; @@ -505,8 +522,8 @@ static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev) enable_by_us = u8_get_bits(ptr->conf, RTW89_ACPI_CONF_6GHZ_SP_US); - for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) { - const struct rtw89_regd *tmp = &rtw89_regd_map[i]; + for (i = 0; i < regd_ctrl->nr; i++) { + const struct rtw89_regd *tmp = ®d_ctrl->map[i]; if (enable_by_us && memcmp(tmp->alpha2, "US", 2) == 0) clear_bit(i, regulatory->block_6ghz_sp); @@ -573,8 +590,21 @@ static void rtw89_regd_setup_6ghz(struct rtw89_dev *rtwdev, struct wiphy *wiphy) int rtw89_regd_setup(struct rtw89_dev *rtwdev) { + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; + const struct rtw89_regd_data *regd_data = elm_info->regd; struct wiphy *wiphy = rtwdev->hw->wiphy; + if (regd_data) { + regulatory->ctrl.nr = regd_data->nr; + regulatory->ctrl.map = regd_data->map; + } else { + regulatory->ctrl.nr = ARRAY_SIZE(rtw89_regd_map); + regulatory->ctrl.map = rtw89_regd_map; + } + + regulatory->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT; + if (!wiphy) return -EINVAL; @@ -585,21 +615,16 @@ int rtw89_regd_setup(struct rtw89_dev *rtwdev) return 0; } -int rtw89_regd_init(struct rtw89_dev *rtwdev, - void (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)) +int rtw89_regd_init_hint(struct rtw89_dev *rtwdev) { - struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; const struct rtw89_regd *chip_regd; struct wiphy *wiphy = rtwdev->hw->wiphy; int ret; - regulatory->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT; - if (!wiphy) return -EINVAL; - chip_regd = rtw89_regd_find_reg_by_name(rtwdev->efuse.country_code); + chip_regd = rtw89_regd_find_reg_by_name(rtwdev, rtwdev->efuse.country_code); if (!rtw89_regd_is_ww(chip_regd)) { rtwdev->regulatory.regd = chip_regd; /* Ignore country ie if there is a country domain programmed in chip */ @@ -637,7 +662,7 @@ static void rtw89_regd_apply_policy_unii4(struct rtw89_dev *rtwdev, if (!chip->support_unii4) return; - index = rtw89_regd_get_index(regd); + index = rtw89_regd_get_index(rtwdev, regd); if (index != RTW89_REGD_MAX_COUNTRY_NUM && !test_bit(index, regulatory->block_unii4)) return; @@ -655,7 +680,7 @@ static bool regd_is_6ghz_blocked(struct rtw89_dev *rtwdev) const struct rtw89_regd *regd = regulatory->regd; u8 index; - index = rtw89_regd_get_index(regd); + index = rtw89_regd_get_index(rtwdev, regd); if (index != RTW89_REGD_MAX_COUNTRY_NUM && !test_bit(index, regulatory->block_6ghz)) return false; @@ -696,11 +721,36 @@ static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev, sband->channels[i].flags |= IEEE80211_CHAN_DISABLED; } +static void rtw89_regd_apply_policy_tas(struct rtw89_dev *rtwdev) +{ + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_regd *regd = regulatory->regd; + struct rtw89_tas_info *tas = &rtwdev->tas; + + if (!tas->enable) + return; + + tas->block_regd = !test_bit(RTW89_REGD_FUNC_TAS, regd->func_bitmap); +} + +static void rtw89_regd_apply_policy_ant_gain(struct rtw89_dev *rtwdev) +{ + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + struct rtw89_ant_gain_info *ant_gain = &rtwdev->ant_gain; + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_regd *regd = regulatory->regd; + + if (!chip->support_ant_gain) + return; + + ant_gain->block_country = !test_bit(RTW89_REGD_FUNC_DAG, regd->func_bitmap); +} + static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev, struct wiphy *wiphy, struct regulatory_request *request) { - rtwdev->regulatory.regd = rtw89_regd_find_reg_by_name(request->alpha2); + rtwdev->regulatory.regd = rtw89_regd_find_reg_by_name(rtwdev, request->alpha2); /* This notification might be set from the system of distros, * and it does not expect the regulatory will be modified by * connecting to an AP (i.e. country ie). @@ -713,14 +763,17 @@ static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev, rtw89_regd_apply_policy_unii4(rtwdev, wiphy); rtw89_regd_apply_policy_6ghz(rtwdev, wiphy); + rtw89_regd_apply_policy_tas(rtwdev); + rtw89_regd_apply_policy_ant_gain(rtwdev); } +static void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct rtw89_dev *rtwdev = hw->priv; - mutex_lock(&rtwdev->mutex); + wiphy_lock(wiphy); rtw89_leave_ps_mode(rtwdev); if (wiphy->regd) { @@ -736,7 +789,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request rtw89_core_set_chip_txpwr(rtwdev); exit: - mutex_unlock(&rtwdev->mutex); + wiphy_unlock(wiphy); } /* Maximum Transmit Power field (@raw) can be EIRP or PSD. @@ -925,7 +978,7 @@ static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) sel = RTW89_REG_6GHZ_POWER_DFLT; if (sel == RTW89_REG_6GHZ_POWER_STD) { - index = rtw89_regd_get_index(regd); + index = rtw89_regd_get_index(rtwdev, regd); if (index == RTW89_REGD_MAX_COUNTRY_NUM || test_bit(index, regulatory->block_6ghz_sp)) { rtw89_debug(rtwdev, RTW89_DBG_REGD, @@ -986,7 +1039,7 @@ int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvi unsigned int changed = 0; int ret; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); /* The result of reg_6ghz_tpe may depend on reg_6ghz_power type, * so must do reg_6ghz_tpe_recalc() after reg_6ghz_power_recalc(). diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index c56f70267882a..0d482cd57f6eb 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -224,10 +224,17 @@ static const struct rtw89_edcca_regs rtw8851b_edcca_regs = { .edcca_p_mask = B_EDCCA_LVL_MSK1, .ppdu_level = R_SEG0R_EDCCA_LVL_V1, .ppdu_mask = B_EDCCA_LVL_MSK3, - .rpt_a = R_EDCCA_RPT_A, - .rpt_b = R_EDCCA_RPT_B, - .rpt_sel = R_EDCCA_RPT_SEL, - .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + .p = {{ + .rpt_a = R_EDCCA_RPT_A, + .rpt_b = R_EDCCA_RPT_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + }, { + .rpt_a = R_EDCCA_RPT_P1_A, + .rpt_b = R_EDCCA_RPT_P1_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK, + }}, .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST, .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M, }; @@ -1596,10 +1603,16 @@ static void rtw8851b_rfk_channel(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx; enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx; + rtw89_btc_ntfy_conn_rfk(rtwdev, true); + rtw8851b_rx_dck(rtwdev, phy_idx, chanctx_idx); rtw8851b_iqk(rtwdev, phy_idx, chanctx_idx); + rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30); rtw8851b_tssi(rtwdev, phy_idx, true, chanctx_idx); + rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30); rtw8851b_dpk(rtwdev, phy_idx, chanctx_idx); + + rtw89_btc_ntfy_conn_rfk(rtwdev, false); } static void rtw8851b_rfk_band_changed(struct rtw89_dev *rtwdev, @@ -2410,6 +2423,7 @@ static const struct rtw89_chip_ops rtw8851b_chip_ops = { .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl, .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl, .h2c_ampdu_cmac_tbl = NULL, + .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl, .h2c_default_dmac_tbl = NULL, .h2c_update_beacon = rtw89_fw_h2c_update_beacon, .h2c_ba_cam = rtw89_fw_h2c_ba_cam, @@ -2445,6 +2459,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .try_ce_fw = true, .bbmcu_nr = 0, .needed_fw_elms = 0, + .fw_blacklist = NULL, .fifo_size = 196608, .small_fifo_size = true, .dle_scc_rsvd_size = 98304, @@ -2483,10 +2498,13 @@ const struct rtw89_chip_info rtw8851b_chip_info = { BIT(NL80211_CHAN_WIDTH_80), .support_unii4 = true, .support_ant_gain = false, + .support_tas = false, .ul_tb_waveform_ctrl = true, .ul_tb_pwr_diff = false, + .rx_freq_frome_ie = true, .hw_sec_hdr = false, .hw_mgmt_tx_encrypt = false, + .hw_tkip_crypto = false, .rf_path_num = 1, .tx_nss = 1, .rx_nss = 1, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index 9bd2842c27d50..286334e26c842 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -522,10 +522,17 @@ static const struct rtw89_edcca_regs rtw8852a_edcca_regs = { .edcca_p_mask = B_EDCCA_LVL_MSK1, .ppdu_level = R_SEG0R_EDCCA_LVL, .ppdu_mask = B_EDCCA_LVL_MSK3, - .rpt_a = R_EDCCA_RPT_A, - .rpt_b = R_EDCCA_RPT_B, - .rpt_sel = R_EDCCA_RPT_SEL, - .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + .p = {{ + .rpt_a = R_EDCCA_RPT_A, + .rpt_b = R_EDCCA_RPT_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + }, { + .rpt_a = R_EDCCA_RPT_P1_A, + .rpt_b = R_EDCCA_RPT_P1_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK, + }}, .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST, .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M, }; @@ -1356,10 +1363,16 @@ static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx; enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx; + rtw89_btc_ntfy_conn_rfk(rtwdev, true); + rtw8852a_rx_dck(rtwdev, phy_idx, true, chanctx_idx); rtw8852a_iqk(rtwdev, phy_idx, chanctx_idx); + rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30); rtw8852a_tssi(rtwdev, phy_idx, chanctx_idx); + rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30); rtw8852a_dpk(rtwdev, phy_idx, chanctx_idx); + + rtw89_btc_ntfy_conn_rfk(rtwdev, false); } static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev, @@ -2136,6 +2149,7 @@ static const struct rtw89_chip_ops rtw8852a_chip_ops = { .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl, .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl, .h2c_ampdu_cmac_tbl = NULL, + .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl, .h2c_default_dmac_tbl = NULL, .h2c_update_beacon = rtw89_fw_h2c_update_beacon, .h2c_ba_cam = rtw89_fw_h2c_ba_cam, @@ -2162,6 +2176,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .try_ce_fw = false, .bbmcu_nr = 0, .needed_fw_elms = 0, + .fw_blacklist = NULL, .fifo_size = 458752, .small_fifo_size = false, .dle_scc_rsvd_size = 0, @@ -2201,10 +2216,13 @@ const struct rtw89_chip_info rtw8852a_chip_info = { BIT(NL80211_CHAN_WIDTH_80), .support_unii4 = false, .support_ant_gain = false, + .support_tas = false, .ul_tb_waveform_ctrl = false, .ul_tb_pwr_diff = false, + .rx_freq_frome_ie = true, .hw_sec_hdr = false, .hw_mgmt_tx_encrypt = false, + .hw_tkip_crypto = false, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index dfb2bf61b0b83..eceb4fb9880d0 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -189,10 +189,17 @@ static const struct rtw89_edcca_regs rtw8852b_edcca_regs = { .edcca_p_mask = B_EDCCA_LVL_MSK1, .ppdu_level = R_SEG0R_EDCCA_LVL_V1, .ppdu_mask = B_EDCCA_LVL_MSK3, - .rpt_a = R_EDCCA_RPT_A, - .rpt_b = R_EDCCA_RPT_B, - .rpt_sel = R_EDCCA_RPT_SEL, - .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + .p = {{ + .rpt_a = R_EDCCA_RPT_A, + .rpt_b = R_EDCCA_RPT_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + }, { + .rpt_a = R_EDCCA_RPT_P1_A, + .rpt_b = R_EDCCA_RPT_P1_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK, + }}, .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST, .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M, }; @@ -568,10 +575,16 @@ static void rtw8852b_rfk_channel(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx; enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx; + rtw89_btc_ntfy_conn_rfk(rtwdev, true); + rtw8852b_rx_dck(rtwdev, phy_idx, chanctx_idx); rtw8852b_iqk(rtwdev, phy_idx, chanctx_idx); + rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30); rtw8852b_tssi(rtwdev, phy_idx, true, chanctx_idx); + rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30); rtw8852b_dpk(rtwdev, phy_idx, chanctx_idx); + + rtw89_btc_ntfy_conn_rfk(rtwdev, false); } static void rtw8852b_rfk_band_changed(struct rtw89_dev *rtwdev, @@ -763,6 +776,7 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = { .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl, .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl, .h2c_ampdu_cmac_tbl = NULL, + .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl, .h2c_default_dmac_tbl = NULL, .h2c_update_beacon = rtw89_fw_h2c_update_beacon, .h2c_ba_cam = rtw89_fw_h2c_ba_cam, @@ -798,6 +812,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .try_ce_fw = true, .bbmcu_nr = 0, .needed_fw_elms = 0, + .fw_blacklist = &rtw89_fw_blacklist_default, .fifo_size = 196608, .small_fifo_size = true, .dle_scc_rsvd_size = 98304, @@ -837,10 +852,13 @@ const struct rtw89_chip_info rtw8852b_chip_info = { BIT(NL80211_CHAN_WIDTH_80), .support_unii4 = true, .support_ant_gain = true, + .support_tas = false, .ul_tb_waveform_ctrl = true, .ul_tb_pwr_diff = false, + .rx_freq_frome_ie = true, .hw_sec_hdr = false, .hw_mgmt_tx_encrypt = false, + .hw_tkip_crypto = false, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c index 0e094ce9c9b08..99c9505b3cbd3 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c @@ -621,9 +621,9 @@ static void rtw8852bt_ext_loss_avg_update(struct rtw89_dev *rtwdev, if (ext_loss_a == ext_loss_b) { ext_loss_avg = ext_loss_a; } else { - linear = rtw89_db_2_linear(abs(ext_loss_a - ext_loss_b)) + 1; - linear = DIV_ROUND_CLOSEST_ULL(linear / 2, 1 << RTW89_LINEAR_FRAC_BITS); - ext_loss_avg = rtw89_linear_2_db(linear); + linear = rtw89_db_to_linear(abs(ext_loss_a - ext_loss_b)) + 1; + linear /= 2; + ext_loss_avg = rtw89_linear_to_db(linear); ext_loss_avg += min(ext_loss_a, ext_loss_b); } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c index ef47a5facc836..fbf82d42687ba 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c @@ -3585,9 +3585,10 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel); struct rtw8852bx_bb_tssi_bak tssi_bak; s32 aliment_diff, tssi_cw_default; - u32 start_time, finish_time; u32 bb_reg_backup[8] = {0}; + ktime_t start_time; const s16 *power; + s64 this_time; u8 band; bool ok; u32 tmp; @@ -3613,7 +3614,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, return; } - start_time = ktime_get_ns(); + start_time = ktime_get(); if (chan->band_type == RTW89_BAND_2G) power = power_2g; @@ -3738,12 +3739,12 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak); rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0); - finish_time = ktime_get_ns(); - tssi_info->tssi_alimk_time += finish_time - start_time; + this_time = ktime_us_delta(ktime_get(), start_time); + tssi_info->tssi_alimk_time += this_time; rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[TSSI PA K] %s processing time = %d ms\n", __func__, - tssi_info->tssi_alimk_time); + "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n", + __func__, this_time, tssi_info->tssi_alimk_time); } void rtw8852b_dpk_init(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c index bde3e1fb7ca62..bbf37442c4921 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852bt.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.c @@ -187,10 +187,17 @@ static const struct rtw89_edcca_regs rtw8852bt_edcca_regs = { .edcca_p_mask = B_EDCCA_LVL_MSK1, .ppdu_level = R_SEG0R_EDCCA_LVL_V1, .ppdu_mask = B_EDCCA_LVL_MSK3, - .rpt_a = R_EDCCA_RPT_A, - .rpt_b = R_EDCCA_RPT_B, - .rpt_sel = R_EDCCA_RPT_SEL, - .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + .p = {{ + .rpt_a = R_EDCCA_RPT_A, + .rpt_b = R_EDCCA_RPT_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + }, { + .rpt_a = R_EDCCA_RPT_P1_A, + .rpt_b = R_EDCCA_RPT_P1_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK, + }}, .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST, .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M, }; @@ -541,10 +548,16 @@ static void rtw8852bt_rfk_channel(struct rtw89_dev *rtwdev, enum rtw89_chanctx_idx chanctx_idx = rtwvif_link->chanctx_idx; enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx; + rtw89_btc_ntfy_conn_rfk(rtwdev, true); + rtw8852bt_rx_dck(rtwdev, phy_idx, chanctx_idx); rtw8852bt_iqk(rtwdev, phy_idx, chanctx_idx); + rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30); rtw8852bt_tssi(rtwdev, phy_idx, true, chanctx_idx); + rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30); rtw8852bt_dpk(rtwdev, phy_idx, chanctx_idx); + + rtw89_btc_ntfy_conn_rfk(rtwdev, false); } static void rtw8852bt_rfk_band_changed(struct rtw89_dev *rtwdev, @@ -697,6 +710,7 @@ static const struct rtw89_chip_ops rtw8852bt_chip_ops = { .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl, .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl, .h2c_ampdu_cmac_tbl = NULL, + .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl, .h2c_default_dmac_tbl = NULL, .h2c_update_beacon = rtw89_fw_h2c_update_beacon, .h2c_ba_cam = rtw89_fw_h2c_ba_cam, @@ -732,6 +746,7 @@ const struct rtw89_chip_info rtw8852bt_chip_info = { .try_ce_fw = true, .bbmcu_nr = 0, .needed_fw_elms = RTW89_AX_GEN_DEF_NEEDED_FW_ELEMENTS_NO_6GHZ, + .fw_blacklist = &rtw89_fw_blacklist_default, .fifo_size = 458752, .small_fifo_size = true, .dle_scc_rsvd_size = 98304, @@ -770,10 +785,13 @@ const struct rtw89_chip_info rtw8852bt_chip_info = { BIT(NL80211_CHAN_WIDTH_80), .support_unii4 = true, .support_ant_gain = true, + .support_tas = false, .ul_tb_waveform_ctrl = true, .ul_tb_pwr_diff = false, + .rx_freq_frome_ie = true, .hw_sec_hdr = false, .hw_mgmt_tx_encrypt = false, + .hw_tkip_crypto = true, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c index 336a83e1d46be..6e6889eea9a0d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c @@ -3663,9 +3663,10 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel); struct rtw8852bx_bb_tssi_bak tssi_bak; s32 aliment_diff, tssi_cw_default; - u32 start_time, finish_time; u32 bb_reg_backup[8] = {}; + ktime_t start_time; const s16 *power; + s64 this_time; u8 band; bool ok; u32 tmp; @@ -3675,7 +3676,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, "======> %s channel=%d path=%d\n", __func__, channel, path); - start_time = ktime_get_ns(); + start_time = ktime_get(); if (chan->band_type == RTW89_BAND_2G) power = power_2g; @@ -3802,12 +3803,12 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak); rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0); - finish_time = ktime_get_ns(); - tssi_info->tssi_alimk_time += finish_time - start_time; + this_time = ktime_us_delta(ktime_get(), start_time); + tssi_info->tssi_alimk_time += this_time; rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[TSSI PA K] %s processing time = %d ms\n", __func__, - tssi_info->tssi_alimk_time); + "[TSSI PA K] %s processing time = %lld us (acc = %llu us)\n", + __func__, this_time, tssi_info->tssi_alimk_time); } void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev) diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index bc84b15e7826d..08bcdf2463827 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -12,6 +12,7 @@ #include "rtw8852c.h" #include "rtw8852c_rfk.h" #include "rtw8852c_table.h" +#include "sar.h" #include "util.h" #define RTW8852C_FW_FORMAT_MAX 1 @@ -186,10 +187,17 @@ static const struct rtw89_edcca_regs rtw8852c_edcca_regs = { .edcca_p_mask = B_EDCCA_LVL_MSK1, .ppdu_level = R_SEG0R_EDCCA_LVL, .ppdu_mask = B_EDCCA_LVL_MSK3, - .rpt_a = R_EDCCA_RPT_A, - .rpt_b = R_EDCCA_RPT_B, - .rpt_sel = R_EDCCA_RPT_SEL, - .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + .p = {{ + .rpt_a = R_EDCCA_RPT_A, + .rpt_b = R_EDCCA_RPT_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + }, { + .rpt_a = R_EDCCA_RPT_P1_A, + .rpt_b = R_EDCCA_RPT_P1_B, + .rpt_sel = R_EDCCA_RPT_SEL, + .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK, + }}, .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST, .tx_collision_t2r_st_mask = B_TX_COLLISION_T2R_ST_M, }; @@ -1853,10 +1861,16 @@ static void rtw8852c_rfk_channel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx = rtwvif_link->phy_idx; rtw8852c_mcc_get_ch_info(rtwdev, phy_idx); + rtw89_btc_ntfy_conn_rfk(rtwdev, true); + rtw8852c_rx_dck(rtwdev, phy_idx, false); rtw8852c_iqk(rtwdev, phy_idx, chanctx_idx); + rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30); rtw8852c_tssi(rtwdev, phy_idx, chanctx_idx); + rtw89_btc_ntfy_preserve_bt_time(rtwdev, 30); rtw8852c_dpk(rtwdev, phy_idx, chanctx_idx); + + rtw89_btc_ntfy_conn_rfk(rtwdev, false); rtw89_fw_h2c_rf_ntfy_mcc(rtwdev); } @@ -2867,6 +2881,7 @@ static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev) static const struct rtw89_chanctx_listener rtw8852c_chanctx_listener = { .callbacks[RTW89_CHANCTX_CALLBACK_RFK] = rtw8852c_rfk_chanctx_cb, + .callbacks[RTW89_CHANCTX_CALLBACK_TAS] = rtw89_tas_chanctx_cb, }; #ifdef CONFIG_PM @@ -2928,6 +2943,7 @@ static const struct rtw89_chip_ops rtw8852c_chip_ops = { .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl, .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl, .h2c_ampdu_cmac_tbl = NULL, + .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl, .h2c_default_dmac_tbl = NULL, .h2c_update_beacon = rtw89_fw_h2c_update_beacon, .h2c_ba_cam = rtw89_fw_h2c_ba_cam, @@ -2954,6 +2970,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .try_ce_fw = false, .bbmcu_nr = 0, .needed_fw_elms = 0, + .fw_blacklist = &rtw89_fw_blacklist_default, .fifo_size = 458752, .small_fifo_size = false, .dle_scc_rsvd_size = 0, @@ -2996,10 +3013,13 @@ const struct rtw89_chip_info rtw8852c_chip_info = { BIT(NL80211_CHAN_WIDTH_160), .support_unii4 = true, .support_ant_gain = true, + .support_tas = true, .ul_tb_waveform_ctrl = false, .ul_tb_pwr_diff = true, + .rx_freq_frome_ie = false, .hw_sec_hdr = true, .hw_mgmt_tx_encrypt = true, + .hw_tkip_crypto = true, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c index 11d66bfceb15f..8082592db84a1 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c @@ -205,10 +205,17 @@ static const struct rtw89_edcca_regs rtw8922a_edcca_regs = { .edcca_p_mask = B_EDCCA_LVL_MSK1, .ppdu_level = R_SEG0R_PPDU_LVL_BE, .ppdu_mask = B_EDCCA_LVL_MSK1, - .rpt_a = R_EDCCA_RPT_A_BE, - .rpt_b = R_EDCCA_RPT_B_BE, - .rpt_sel = R_EDCCA_RPT_SEL_BE, - .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + .p = {{ + .rpt_a = R_EDCCA_RPT_A_BE, + .rpt_b = R_EDCCA_RPT_B_BE, + .rpt_sel = R_EDCCA_RPT_SEL_BE, + .rpt_sel_mask = B_EDCCA_RPT_SEL_MSK, + }, { + .rpt_a = R_EDCCA_RPT_P1_A_BE, + .rpt_b = R_EDCCA_RPT_P1_B_BE, + .rpt_sel = R_EDCCA_RPT_SEL_BE, + .rpt_sel_mask = B_EDCCA_RPT_SEL_P1_MSK, + }}, .rpt_sel_be = R_EDCCA_RPTREG_SEL_BE, .rpt_sel_be_mask = B_EDCCA_RPTREG_SEL_BE_MSK, .tx_collision_t2r_st = R_TX_COLLISION_T2R_ST_BE, @@ -2149,6 +2156,56 @@ static void rtw8922a_set_txpwr_ref(struct rtw89_dev *rtwdev, B_BE_PWR_REF_CTRL_CCK, ref_cck); } +static const struct rtw89_reg_def rtw8922a_txpwr_ref[][3] = { + {{ .addr = R_TXAGC_REF_DBM_P0, .mask = B_TXAGC_OFDM_REF_DBM_P0}, + { .addr = R_TXAGC_REF_DBM_P0, .mask = B_TXAGC_CCK_REF_DBM_P0}, + { .addr = R_TSSI_K_P0, .mask = B_TSSI_K_OFDM_P0} + }, + {{ .addr = R_TXAGC_REF_DBM_RF1_P0, .mask = B_TXAGC_OFDM_REF_DBM_RF1_P0}, + { .addr = R_TXAGC_REF_DBM_RF1_P0, .mask = B_TXAGC_CCK_REF_DBM_RF1_P0}, + { .addr = R_TSSI_K_RF1_P0, .mask = B_TSSI_K_OFDM_RF1_P0} + }, +}; + +static void rtw8922a_set_txpwr_diff(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + s16 pwr_ofst = rtw89_phy_ant_gain_pwr_offset(rtwdev, chan); + const struct rtw89_chip_info *chip = rtwdev->chip; + static const u32 path_ofst[] = {0x0, 0x100}; + const struct rtw89_reg_def *txpwr_ref; + static const s16 tssi_k_base = 0x12; + s16 tssi_k_ofst = abs(pwr_ofst) + tssi_k_base; + s16 ofst_dec[RF_PATH_NUM_8922A]; + s16 tssi_k[RF_PATH_NUM_8922A]; + s16 pwr_ref_ofst; + s16 pwr_ref = 0; + u8 i; + + if (rtwdev->hal.cv == CHIP_CAV) + pwr_ref = 16; + + pwr_ref <<= chip->txpwr_factor_rf; + pwr_ref_ofst = pwr_ref - rtw89_phy_txpwr_bb_to_rf(rtwdev, abs(pwr_ofst)); + + ofst_dec[RF_PATH_A] = pwr_ofst > 0 ? pwr_ref : pwr_ref_ofst; + ofst_dec[RF_PATH_B] = pwr_ofst > 0 ? pwr_ref_ofst : pwr_ref; + tssi_k[RF_PATH_A] = pwr_ofst > 0 ? tssi_k_base : tssi_k_ofst; + tssi_k[RF_PATH_B] = pwr_ofst > 0 ? tssi_k_ofst : tssi_k_base; + + for (i = 0; i < RF_PATH_NUM_8922A; i++) { + txpwr_ref = rtw8922a_txpwr_ref[phy_idx]; + + rtw89_phy_write32_mask(rtwdev, txpwr_ref[0].addr + path_ofst[i], + txpwr_ref[0].mask, ofst_dec[i]); + rtw89_phy_write32_mask(rtwdev, txpwr_ref[1].addr + path_ofst[i], + txpwr_ref[1].mask, ofst_dec[i]); + rtw89_phy_write32_mask(rtwdev, txpwr_ref[2].addr + path_ofst[i], + txpwr_ref[2].mask, tssi_k[i]); + } +} + static void rtw8922a_bb_tx_triangular(struct rtw89_dev *rtwdev, bool en, enum rtw89_phy_idx phy_idx) { @@ -2185,6 +2242,8 @@ static void rtw8922a_set_txpwr(struct rtw89_dev *rtwdev, rtw8922a_set_tx_shape(rtwdev, chan, phy_idx); rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx); rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx); + rtw8922a_set_txpwr_diff(rtwdev, chan, phy_idx); + rtw8922a_set_txpwr_ref(rtwdev, phy_idx); } static void rtw8922a_set_txpwr_ctrl(struct rtw89_dev *rtwdev, @@ -2695,6 +2754,7 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = { .h2c_default_cmac_tbl = rtw89_fw_h2c_default_cmac_tbl_g7, .h2c_assoc_cmac_tbl = rtw89_fw_h2c_assoc_cmac_tbl_g7, .h2c_ampdu_cmac_tbl = rtw89_fw_h2c_ampdu_cmac_tbl_g7, + .h2c_txtime_cmac_tbl = rtw89_fw_h2c_txtime_cmac_tbl_g7, .h2c_default_dmac_tbl = rtw89_fw_h2c_default_dmac_tbl_v2, .h2c_update_beacon = rtw89_fw_h2c_update_beacon_be, .h2c_ba_cam = rtw89_fw_h2c_ba_cam_v1, @@ -2721,6 +2781,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .try_ce_fw = false, .bbmcu_nr = 1, .needed_fw_elms = RTW89_BE_GEN_DEF_NEEDED_FW_ELEMENTS, + .fw_blacklist = &rtw89_fw_blacklist_default, .fifo_size = 589824, .small_fifo_size = false, .dle_scc_rsvd_size = 0, @@ -2760,11 +2821,14 @@ const struct rtw89_chip_info rtw8922a_chip_info = { BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_160), .support_unii4 = true, - .support_ant_gain = false, + .support_ant_gain = true, + .support_tas = false, .ul_tb_waveform_ctrl = false, .ul_tb_pwr_diff = false, + .rx_freq_frome_ie = false, .hw_sec_hdr = true, .hw_mgmt_tx_encrypt = true, + .hw_tkip_crypto = true, .rf_path_num = 2, .tx_nss = 2, .rx_nss = 2, diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c index 871f45a6508ca..0b5af95287022 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.c +++ b/drivers/net/wireless/realtek/rtw89/sar.c @@ -7,10 +7,16 @@ #include "phy.h" #include "reg.h" #include "sar.h" +#include "util.h" #define RTW89_TAS_FACTOR 2 /* unit: 0.25 dBm */ +#define RTW89_TAS_SAR_GAP (1 << RTW89_TAS_FACTOR) #define RTW89_TAS_DPR_GAP (1 << RTW89_TAS_FACTOR) #define RTW89_TAS_DELTA (2 << RTW89_TAS_FACTOR) +#define RTW89_TAS_TX_RATIO_THRESHOLD 70 +#define RTW89_TAS_DFLT_TX_RATIO 80 +#define RTW89_TAS_DPR_ON_OFFSET (RTW89_TAS_DELTA + RTW89_TAS_SAR_GAP) +#define RTW89_TAS_DPR_OFF_OFFSET (4 << RTW89_TAS_FACTOR) static enum rtw89_sar_subband rtw89_sar_get_subband(struct rtw89_dev *rtwdev, u32 center_freq) @@ -99,7 +105,7 @@ struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = { typeof(_dev) _d = (_dev); \ BUILD_BUG_ON(!rtw89_sar_handlers[_s].descr_sar_source); \ BUILD_BUG_ON(!rtw89_sar_handlers[_s].query_sar_config); \ - lockdep_assert_held(&_d->mutex); \ + lockdep_assert_wiphy(_d->hw->wiphy); \ _d->sar._cfg_name = *(_cfg_data); \ _d->sar.src = _s; \ } while (0) @@ -117,8 +123,8 @@ static s8 rtw89_txpwr_sar_to_mac(struct rtw89_dev *rtwdev, u8 fct, s32 cfg) RTW89_SAR_TXPWR_MAC_MAX); } -static s8 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl, - s8 cfg) +static s32 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl, + s32 cfg) { const u8 fct = sar_hdl->txpwr_factor_sar; @@ -128,8 +134,8 @@ static s8 rtw89_txpwr_tas_to_sar(const struct rtw89_sar_handler *sar_hdl, return cfg >> (RTW89_TAS_FACTOR - fct); } -static s8 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl, - s8 cfg) +static s32 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl, + s32 cfg) { const u8 fct = sar_hdl->txpwr_factor_sar; @@ -139,18 +145,48 @@ static s8 rtw89_txpwr_sar_to_tas(const struct rtw89_sar_handler *sar_hdl, return cfg << (RTW89_TAS_FACTOR - fct); } +static bool rtw89_tas_is_active(struct rtw89_dev *rtwdev) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + struct rtw89_vif *rtwvif; + + if (!tas->enable) + return false; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + if (ieee80211_vif_is_mld(rtwvif_to_vif(rtwvif))) + return false; + } + + return true; +} + +static const char *rtw89_tas_state_str(enum rtw89_tas_state state) +{ + switch (state) { + case RTW89_TAS_STATE_DPR_OFF: + return "DPR OFF"; + case RTW89_TAS_STATE_DPR_ON: + return "DPR ON"; + case RTW89_TAS_STATE_STATIC_SAR: + return "STATIC SAR"; + default: + return NULL; + } +} + s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq) { const enum rtw89_sar_sources src = rtwdev->sar.src; /* its members are protected by rtw89_sar_set_src() */ const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src]; struct rtw89_tas_info *tas = &rtwdev->tas; - s8 delta; + s32 offset; int ret; s32 cfg; u8 fct; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); if (src == RTW89_SAR_SOURCE_NONE) return RTW89_SAR_TXPWR_MAC_MAX; @@ -159,15 +195,17 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq) if (ret) return RTW89_SAR_TXPWR_MAC_MAX; - if (tas->enable) { + if (rtw89_tas_is_active(rtwdev)) { switch (tas->state) { case RTW89_TAS_STATE_DPR_OFF: - return RTW89_SAR_TXPWR_MAC_MAX; + offset = rtw89_txpwr_tas_to_sar(sar_hdl, RTW89_TAS_DPR_OFF_OFFSET); + cfg += offset; + break; case RTW89_TAS_STATE_DPR_ON: - delta = rtw89_txpwr_tas_to_sar(sar_hdl, tas->delta); - cfg -= delta; + offset = rtw89_txpwr_tas_to_sar(sar_hdl, RTW89_TAS_DPR_ON_OFFSET); + cfg -= offset; break; - case RTW89_TAS_STATE_DPR_FORBID: + case RTW89_TAS_STATE_STATIC_SAR: default: break; } @@ -178,72 +216,91 @@ s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq) return rtw89_txpwr_sar_to_mac(rtwdev, fct, cfg); } -void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev, u32 center_freq) +int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, + u32 center_freq) { const enum rtw89_sar_sources src = rtwdev->sar.src; /* its members are protected by rtw89_sar_set_src() */ const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src]; const u8 fct_mac = rtwdev->chip->txpwr_factor_mac; + char *p = buf, *end = buf + bufsz; int ret; s32 cfg; u8 fct; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); if (src == RTW89_SAR_SOURCE_NONE) { - seq_puts(m, "no SAR is applied\n"); - return; + p += scnprintf(p, end - p, "no SAR is applied\n"); + goto out; } - seq_printf(m, "source: %d (%s)\n", src, sar_hdl->descr_sar_source); + p += scnprintf(p, end - p, "source: %d (%s)\n", src, + sar_hdl->descr_sar_source); ret = sar_hdl->query_sar_config(rtwdev, center_freq, &cfg); if (ret) { - seq_printf(m, "config: return code: %d\n", ret); - seq_printf(m, "assign: max setting: %d (unit: 1/%lu dBm)\n", - RTW89_SAR_TXPWR_MAC_MAX, BIT(fct_mac)); - return; + p += scnprintf(p, end - p, "config: return code: %d\n", ret); + p += scnprintf(p, end - p, + "assign: max setting: %d (unit: 1/%lu dBm)\n", + RTW89_SAR_TXPWR_MAC_MAX, BIT(fct_mac)); + goto out; } fct = sar_hdl->txpwr_factor_sar; - seq_printf(m, "config: %d (unit: 1/%lu dBm)\n", cfg, BIT(fct)); + p += scnprintf(p, end - p, "config: %d (unit: 1/%lu dBm)\n", cfg, + BIT(fct)); + +out: + return p - buf; } -void rtw89_print_tas(struct seq_file *m, struct rtw89_dev *rtwdev) +int rtw89_print_tas(struct rtw89_dev *rtwdev, char *buf, size_t bufsz) { struct rtw89_tas_info *tas = &rtwdev->tas; + char *p = buf, *end = buf + bufsz; - if (!tas->enable) { - seq_puts(m, "no TAS is applied\n"); - return; + if (!rtw89_tas_is_active(rtwdev)) { + p += scnprintf(p, end - p, "no TAS is applied\n"); + goto out; } - seq_printf(m, "DPR gap: %d\n", tas->dpr_gap); - seq_printf(m, "TAS delta: %d\n", tas->delta); + p += scnprintf(p, end - p, "State: %s\n", + rtw89_tas_state_str(tas->state)); + p += scnprintf(p, end - p, "Average time: %d\n", + tas->window_size * 2); + p += scnprintf(p, end - p, "SAR gap: %d dBm\n", + RTW89_TAS_SAR_GAP >> RTW89_TAS_FACTOR); + p += scnprintf(p, end - p, "DPR gap: %d dBm\n", + RTW89_TAS_DPR_GAP >> RTW89_TAS_FACTOR); + p += scnprintf(p, end - p, "DPR ON offset: %d dBm\n", + RTW89_TAS_DPR_ON_OFFSET >> RTW89_TAS_FACTOR); + p += scnprintf(p, end - p, "DPR OFF offset: %d dBm\n", + RTW89_TAS_DPR_OFF_OFFSET >> RTW89_TAS_FACTOR); + +out: + return p - buf; } static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev, const struct rtw89_sar_cfg_common *sar) { enum rtw89_sar_sources src; - int ret = 0; - mutex_lock(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); src = rtwdev->sar.src; if (src != RTW89_SAR_SOURCE_NONE && src != RTW89_SAR_SOURCE_COMMON) { rtw89_warn(rtwdev, "SAR source: %d is in use", src); - ret = -EBUSY; - goto exit; + return -EBUSY; } rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar); rtw89_core_set_chip_txpwr(rtwdev); + rtw89_tas_reset(rtwdev, false); -exit: - mutex_unlock(&rtwdev->mutex); - return ret; + return 0; } static const struct cfg80211_sar_freq_ranges rtw89_common_sar_freq_ranges[] = { @@ -279,6 +336,8 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, s32 power; u32 i, idx; + lockdep_assert_wiphy(rtwdev->hw->wiphy); + if (sar->type != NL80211_SAR_TYPE_POWER) return -EINVAL; @@ -304,65 +363,174 @@ int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, return rtw89_apply_sar_common(rtwdev, &sar_common); } -static void rtw89_tas_state_update(struct rtw89_dev *rtwdev) +static bool rtw89_tas_query_sar_config(struct rtw89_dev *rtwdev, s32 *cfg) { + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); const enum rtw89_sar_sources src = rtwdev->sar.src; /* its members are protected by rtw89_sar_set_src() */ const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src]; - struct rtw89_tas_info *tas = &rtwdev->tas; - s32 txpwr_avg = tas->total_txpwr / RTW89_TAS_MAX_WINDOW / PERCENT; - s32 dpr_on_threshold, dpr_off_threshold, cfg; - enum rtw89_tas_state state = tas->state; - const struct rtw89_chan *chan; int ret; - lockdep_assert_held(&rtwdev->mutex); - if (src == RTW89_SAR_SOURCE_NONE) - return; + return false; - chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); - ret = sar_hdl->query_sar_config(rtwdev, chan->freq, &cfg); + ret = sar_hdl->query_sar_config(rtwdev, chan->freq, cfg); if (ret) + return false; + + *cfg = rtw89_txpwr_sar_to_tas(sar_hdl, *cfg); + + return true; +} + +static void rtw89_tas_state_update(struct rtw89_dev *rtwdev, + enum rtw89_tas_state state) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + + if (tas->state == state) return; - cfg = rtw89_txpwr_sar_to_tas(sar_hdl, cfg); + rtw89_debug(rtwdev, RTW89_DBG_SAR, "tas: switch state: %s -> %s\n", + rtw89_tas_state_str(tas->state), rtw89_tas_state_str(state)); - if (tas->delta >= cfg) { + tas->state = state; + rtw89_core_set_chip_txpwr(rtwdev); +} + +static u32 rtw89_tas_get_window_size(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); + u8 band = chan->band_type; + u8 regd = rtw89_regd_get(rtwdev, band); + + switch (regd) { + default: rtw89_debug(rtwdev, RTW89_DBG_SAR, - "TAS delta exceed SAR limit\n"); - state = RTW89_TAS_STATE_DPR_FORBID; - goto out; + "tas: regd: %u is unhandled\n", regd); + fallthrough; + case RTW89_IC: + case RTW89_KCC: + return 180; + case RTW89_FCC: + switch (band) { + case RTW89_BAND_2G: + return 50; + case RTW89_BAND_5G: + return 30; + case RTW89_BAND_6G: + default: + return 15; + } + break; + } +} + +static void rtw89_tas_window_update(struct rtw89_dev *rtwdev) +{ + u32 window_size = rtw89_tas_get_window_size(rtwdev); + struct rtw89_tas_info *tas = &rtwdev->tas; + u64 total_txpwr = 0; + u8 head_idx; + u32 i, j; + + WARN_ON_ONCE(tas->window_size > RTW89_TAS_TXPWR_WINDOW); + + if (tas->window_size == window_size) + return; + + rtw89_debug(rtwdev, RTW89_DBG_SAR, "tas: window update: %u -> %u\n", + tas->window_size, window_size); + + head_idx = (tas->txpwr_tail_idx - window_size + 1 + RTW89_TAS_TXPWR_WINDOW) % + RTW89_TAS_TXPWR_WINDOW; + for (i = 0; i < window_size; i++) { + j = (head_idx + i) % RTW89_TAS_TXPWR_WINDOW; + total_txpwr += tas->txpwr_history[j]; + } + + tas->window_size = window_size; + tas->total_txpwr = total_txpwr; + tas->txpwr_head_idx = head_idx; +} + +static void rtw89_tas_history_update(struct rtw89_dev *rtwdev) +{ + struct rtw89_bb_ctx *bb = rtw89_get_bb_ctx(rtwdev, RTW89_PHY_0); + struct rtw89_env_monitor_info *env = &bb->env_monitor; + struct rtw89_tas_info *tas = &rtwdev->tas; + u8 tx_ratio = env->ifs_clm_tx_ratio; + u64 instant_txpwr, txpwr; + + /* txpwr in unit of linear(mW) multiply by percentage */ + if (tx_ratio == 0) { + /* special case: idle tx power + * use -40 dBm * 100 tx ratio + */ + instant_txpwr = rtw89_db_to_linear(-40); + txpwr = instant_txpwr * 100; + } else { + instant_txpwr = tas->instant_txpwr; + txpwr = instant_txpwr * tx_ratio; } - dpr_on_threshold = cfg; - dpr_off_threshold = cfg - tas->dpr_gap; + tas->total_txpwr += txpwr - tas->txpwr_history[tas->txpwr_head_idx]; + tas->total_tx_ratio += tx_ratio - tas->tx_ratio_history[tas->tx_ratio_idx]; + tas->tx_ratio_history[tas->tx_ratio_idx] = tx_ratio; + + tas->txpwr_head_idx = (tas->txpwr_head_idx + 1) % RTW89_TAS_TXPWR_WINDOW; + tas->txpwr_tail_idx = (tas->txpwr_tail_idx + 1) % RTW89_TAS_TXPWR_WINDOW; + tas->tx_ratio_idx = (tas->tx_ratio_idx + 1) % RTW89_TAS_TX_RATIO_WINDOW; + tas->txpwr_history[tas->txpwr_tail_idx] = txpwr; + rtw89_debug(rtwdev, RTW89_DBG_SAR, - "DPR_ON thold: %d, DPR_OFF thold: %d, txpwr_avg: %d\n", - dpr_on_threshold, dpr_off_threshold, txpwr_avg); + "tas: instant_txpwr: %d, tx_ratio: %u, txpwr: %d\n", + rtw89_linear_to_db_quarter(instant_txpwr), tx_ratio, + rtw89_linear_to_db_quarter(div_u64(txpwr, PERCENT))); +} - if (txpwr_avg >= dpr_on_threshold) +static void rtw89_tas_rolling_average(struct rtw89_dev *rtwdev) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + s32 dpr_on_threshold, dpr_off_threshold; + enum rtw89_tas_state state; + u16 tx_ratio_avg; + s32 txpwr_avg; + u64 linear; + + linear = DIV_ROUND_DOWN_ULL(tas->total_txpwr, tas->window_size * PERCENT); + txpwr_avg = rtw89_linear_to_db_quarter(linear); + tx_ratio_avg = tas->total_tx_ratio / RTW89_TAS_TX_RATIO_WINDOW; + dpr_on_threshold = tas->dpr_on_threshold; + dpr_off_threshold = tas->dpr_off_threshold; + + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "tas: DPR_ON: %d, DPR_OFF: %d, txpwr_avg: %d, tx_ratio_avg: %u\n", + dpr_on_threshold, dpr_off_threshold, txpwr_avg, tx_ratio_avg); + + if (tx_ratio_avg >= RTW89_TAS_TX_RATIO_THRESHOLD) + state = RTW89_TAS_STATE_STATIC_SAR; + else if (txpwr_avg >= dpr_on_threshold) state = RTW89_TAS_STATE_DPR_ON; else if (txpwr_avg < dpr_off_threshold) state = RTW89_TAS_STATE_DPR_OFF; - -out: - if (tas->state == state) + else return; - rtw89_debug(rtwdev, RTW89_DBG_SAR, - "TAS old state: %d, new state: %d\n", tas->state, state); - tas->state = state; - rtw89_core_set_chip_txpwr(rtwdev); + rtw89_tas_state_update(rtwdev, state); } void rtw89_tas_init(struct rtw89_dev *rtwdev) { + const struct rtw89_chip_info *chip = rtwdev->chip; struct rtw89_tas_info *tas = &rtwdev->tas; struct rtw89_acpi_dsm_result res = {}; int ret; u8 val; + if (!chip->support_tas) + return; + ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_TAS_EN, &res); if (ret) { rtw89_debug(rtwdev, RTW89_DBG_SAR, @@ -386,64 +554,116 @@ void rtw89_tas_init(struct rtw89_dev *rtwdev) rtw89_debug(rtwdev, RTW89_DBG_SAR, "TAS not enable\n"); return; } - - tas->dpr_gap = RTW89_TAS_DPR_GAP; - tas->delta = RTW89_TAS_DELTA; } -void rtw89_tas_reset(struct rtw89_dev *rtwdev) +void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force) { + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_CHANCTX_0); struct rtw89_tas_info *tas = &rtwdev->tas; + u64 linear; + s32 cfg; + int i; - if (!tas->enable) + if (!rtw89_tas_is_active(rtwdev)) + return; + + if (!rtw89_tas_query_sar_config(rtwdev, &cfg)) return; - memset(&tas->txpwr_history, 0, sizeof(tas->txpwr_history)); - tas->total_txpwr = 0; - tas->cur_idx = 0; + tas->dpr_on_threshold = cfg - RTW89_TAS_SAR_GAP; + tas->dpr_off_threshold = cfg - RTW89_TAS_SAR_GAP - RTW89_TAS_DPR_GAP; + + /* avoid history reset after new SAR apply */ + if (!force && tas->keep_history) + return; + + linear = rtw89_db_quarter_to_linear(cfg) * RTW89_TAS_DFLT_TX_RATIO; + for (i = 0; i < RTW89_TAS_TXPWR_WINDOW; i++) + tas->txpwr_history[i] = linear; + + for (i = 0; i < RTW89_TAS_TX_RATIO_WINDOW; i++) + tas->tx_ratio_history[i] = RTW89_TAS_DFLT_TX_RATIO; + + tas->total_tx_ratio = RTW89_TAS_DFLT_TX_RATIO * RTW89_TAS_TX_RATIO_WINDOW; + tas->total_txpwr = linear * RTW89_TAS_TXPWR_WINDOW; + tas->window_size = RTW89_TAS_TXPWR_WINDOW; + tas->txpwr_head_idx = 0; + tas->txpwr_tail_idx = RTW89_TAS_TXPWR_WINDOW - 1; + tas->tx_ratio_idx = 0; tas->state = RTW89_TAS_STATE_DPR_OFF; -} + tas->backup_state = RTW89_TAS_STATE_DPR_OFF; + tas->keep_history = true; -static const struct rtw89_reg_def txpwr_regs[] = { - {R_PATH0_TXPWR, B_PATH0_TXPWR}, - {R_PATH1_TXPWR, B_PATH1_TXPWR}, -}; + rtw89_debug(rtwdev, RTW89_DBG_SAR, + "tas: band: %u, freq: %u\n", chan->band_type, chan->freq); +} void rtw89_tas_track(struct rtw89_dev *rtwdev) { - struct rtw89_env_monitor_info *env = &rtwdev->env_monitor; - const enum rtw89_sar_sources src = rtwdev->sar.src; - u8 max_nss_num = rtwdev->chip->rf_path_num; struct rtw89_tas_info *tas = &rtwdev->tas; - s16 tmp, txpwr, instant_txpwr = 0; - u32 val; - int i; + struct rtw89_hal *hal = &rtwdev->hal; + s32 cfg; - if (!tas->enable || src == RTW89_SAR_SOURCE_NONE) + if (hal->disabled_dm_bitmap & BIT(RTW89_DM_TAS)) return; - if (env->ccx_watchdog_result != RTW89_PHY_ENV_MON_IFS_CLM) + if (!rtw89_tas_is_active(rtwdev)) return; - for (i = 0; i < max_nss_num; i++) { - val = rtw89_phy_read32_mask(rtwdev, txpwr_regs[i].addr, - txpwr_regs[i].mask); - tmp = sign_extend32(val, 8); - if (tmp <= 0) - return; - instant_txpwr += tmp; + if (!rtw89_tas_query_sar_config(rtwdev, &cfg) || tas->block_regd) { + rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR); + return; } - instant_txpwr /= max_nss_num; - /* in unit of 0.25 dBm multiply by percentage */ - txpwr = instant_txpwr * env->ifs_clm_tx_ratio; - tas->total_txpwr += txpwr - tas->txpwr_history[tas->cur_idx]; - tas->txpwr_history[tas->cur_idx] = txpwr; - rtw89_debug(rtwdev, RTW89_DBG_SAR, - "instant_txpwr: %d, tx_ratio: %d, txpwr: %d\n", - instant_txpwr, env->ifs_clm_tx_ratio, txpwr); + if (tas->pause) + return; - tas->cur_idx = (tas->cur_idx + 1) % RTW89_TAS_MAX_WINDOW; + rtw89_tas_window_update(rtwdev); + rtw89_tas_history_update(rtwdev); + rtw89_tas_rolling_average(rtwdev); +} - rtw89_tas_state_update(rtwdev); +void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + s32 cfg; + + if (!rtw89_tas_is_active(rtwdev)) + return; + + if (!rtw89_tas_query_sar_config(rtwdev, &cfg)) + return; + + if (start) { + tas->backup_state = tas->state; + rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR); + } else { + rtw89_tas_state_update(rtwdev, tas->backup_state); + } +} + +void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_state state) +{ + struct rtw89_tas_info *tas = &rtwdev->tas; + s32 cfg; + + if (!rtw89_tas_is_active(rtwdev)) + return; + + if (!rtw89_tas_query_sar_config(rtwdev, &cfg)) + return; + + switch (state) { + case RTW89_CHANCTX_STATE_MCC_START: + tas->pause = true; + rtw89_tas_state_update(rtwdev, RTW89_TAS_STATE_STATIC_SAR); + break; + case RTW89_CHANCTX_STATE_MCC_STOP: + tas->pause = false; + break; + default: + break; + } } +EXPORT_SYMBOL(rtw89_tas_chanctx_cb); diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h index 4ae081d2d3b46..0df1661db9a80 100644 --- a/drivers/net/wireless/realtek/rtw89/sar.h +++ b/drivers/net/wireless/realtek/rtw89/sar.h @@ -19,12 +19,16 @@ struct rtw89_sar_handler { extern const struct cfg80211_sar_capa rtw89_sar_capa; s8 rtw89_query_sar(struct rtw89_dev *rtwdev, u32 center_freq); -void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev, u32 center_freq); -void rtw89_print_tas(struct seq_file *m, struct rtw89_dev *rtwdev); +int rtw89_print_sar(struct rtw89_dev *rtwdev, char *buf, size_t bufsz, + u32 center_freq); +int rtw89_print_tas(struct rtw89_dev *rtwdev, char *buf, size_t bufsz); int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw, const struct cfg80211_sar_specs *sar); void rtw89_tas_init(struct rtw89_dev *rtwdev); -void rtw89_tas_reset(struct rtw89_dev *rtwdev); +void rtw89_tas_reset(struct rtw89_dev *rtwdev, bool force); void rtw89_tas_track(struct rtw89_dev *rtwdev); +void rtw89_tas_scan(struct rtw89_dev *rtwdev, bool start); +void rtw89_tas_chanctx_cb(struct rtw89_dev *rtwdev, + enum rtw89_chanctx_state state); #endif diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 26a944d3b6727..0740e303680cf 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -156,9 +156,9 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt) rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n", ser_st_name(ser), ser_ev_name(ser, evt)); - mutex_lock(&rtwdev->mutex); + wiphy_lock(rtwdev->hw->wiphy); rtw89_leave_lps(rtwdev); - mutex_unlock(&rtwdev->mutex); + wiphy_unlock(rtwdev->hw->wiphy); ser->st_tbl[ser->state].st_func(ser, evt); } @@ -483,10 +483,13 @@ static void ser_l1_reset_pre_st_hdl(struct rtw89_ser *ser, u8 evt) static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt) { struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser); + struct wiphy *wiphy = rtwdev->hw->wiphy; switch (evt) { case SER_EV_STATE_IN: - cancel_delayed_work_sync(&rtwdev->track_work); + wiphy_lock(wiphy); + wiphy_delayed_work_cancel(wiphy, &rtwdev->track_work); + wiphy_unlock(wiphy); drv_stop_tx(ser); if (hal_stop_dma(ser)) { @@ -517,8 +520,8 @@ static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt) hal_enable_dma(ser); drv_resume_rx(ser); drv_resume_tx(ser); - ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work, - RTW89_TRACK_WORK_PERIOD); + wiphy_delayed_work_queue(wiphy, &rtwdev->track_work, + RTW89_TRACK_WORK_PERIOD); break; default: @@ -708,9 +711,9 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) switch (evt) { case SER_EV_STATE_IN: - mutex_lock(&rtwdev->mutex); + wiphy_lock(rtwdev->hw->wiphy); ser_l2_reset_st_pre_hdl(ser); - mutex_unlock(&rtwdev->mutex); + wiphy_unlock(rtwdev->hw->wiphy); ieee80211_restart_hw(rtwdev->hw); ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT); diff --git a/drivers/net/wireless/realtek/rtw89/util.c b/drivers/net/wireless/realtek/rtw89/util.c index e71956ce98537..073714db26f24 100644 --- a/drivers/net/wireless/realtek/rtw89/util.c +++ b/drivers/net/wireless/realtek/rtw89/util.c @@ -4,103 +4,159 @@ #include "util.h" -#define FRAC_ROWS 3 -#define FRAC_ROW_MAX (FRAC_ROWS - 1) -#define NORM_ROW_MIN FRAC_ROWS - -static const u32 db_invert_table[12][8] = { - /* rows 0~2 in unit of U(32,3) */ - {10, 13, 16, 20, 25, 32, 40, 50}, - {64, 80, 101, 128, 160, 201, 256, 318}, - {401, 505, 635, 800, 1007, 1268, 1596, 2010}, - /* rows 3~11 in unit of U(32,0) */ - {316, 398, 501, 631, 794, 1000, 1259, 1585}, - {1995, 2512, 3162, 3981, 5012, 6310, 7943, 10000}, - {12589, 15849, 19953, 25119, 31623, 39811, 50119, 63098}, - {79433, 100000, 125893, 158489, 199526, 251189, 316228, 398107}, - {501187, 630957, 794328, 1000000, 1258925, 1584893, 1995262, 2511886}, - {3162278, 3981072, 5011872, 6309573, 7943282, 1000000, 12589254, - 15848932}, - {19952623, 25118864, 31622777, 39810717, 50118723, 63095734, 79432823, - 100000000}, - {125892541, 158489319, 199526232, 251188643, 316227766, 398107171, - 501187234, 630957345}, - {794328235, 1000000000, 1258925412, 1584893192, 1995262315, 2511886432U, - 3162277660U, 3981071706U}, +#define RTW89_DBM_QUARTER_FACTOR 2 +#define RTW89_MIN_DBM (-41.25 * (1 << RTW89_DBM_QUARTER_FACTOR)) +#define RTW89_MAX_DBM (96 * (1 << RTW89_DBM_QUARTER_FACTOR)) +#define RTW89_DB_INVERT_TABLE_OFFSET (-RTW89_MIN_DBM) + +static const u64 db_invert_table[] = { + /* in unit of 0.000001 */ + 75, 79, 84, 89, 94, 100, 106, 112, 119, 126, 133, 141, 150, 158, 168, 178, 188, + 200, 211, 224, 237, 251, 266, 282, 299, 316, 335, 355, 376, 398, 422, 447, 473, + 501, 531, 562, 596, 631, 668, 708, 750, 794, 841, 891, 944, 1000, 1059, 1122, 1189, + 1259, 1334, 1413, 1496, 1585, 1679, 1778, 1884, 1995, 2113, 2239, 2371, 2512, 2661, + 2818, 2985, 3162, 3350, 3548, 3758, 3981, 4217, 4467, 4732, 5012, 5309, 5623, 5957, + 6310, 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, 10593, 11220, 11885, 12589, + 13335, 14125, 14962, 15849, 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, + 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, 42170, 44668, 47315, 50119, + 53088, 56234, 59566, 63096, 66834, 70795, 74989, 79433, 84140, 89125, 94406, 100000, + 105925, 112202, 118850, 125893, 133352, 141254, 149624, 158489, 167880, 177828, + 188365, 199526, 211349, 223872, 237137, 251189, 266073, 281838, 298538, 316228, + 334965, 354813, 375837, 398107, 421697, 446684, 473151, 501187, 530884, 562341, + 595662, 630957, 668344, 707946, 749894, 794328, 841395, 891251, 944061, 1000000, + 1059254, 1122018, 1188502, 1258925, 1333521, 1412538, 1496236, 1584893, 1678804, + 1778279, 1883649, 1995262, 2113489, 2238721, 2371374, 2511886, 2660725, 2818383, + 2985383, 3162278, 3349654, 3548134, 3758374, 3981072, 4216965, 4466836, 4731513, + 5011872, 5308844, 5623413, 5956621, 6309573, 6683439, 7079458, 7498942, 7943282, + 8413951, 8912509, 9440609, 10000000, 10592537, 11220185, 11885022, 12589254, + 13335214, 14125375, 14962357, 15848932, 16788040, 17782794, 18836491, 19952623, + 21134890, 22387211, 23713737, 25118864, 26607251, 28183829, 29853826, 31622777, + 33496544, 35481339, 37583740, 39810717, 42169650, 44668359, 47315126, 50118723, + 53088444, 56234133, 59566214, 63095734, 66834392, 70794578, 74989421, 79432823, + 84139514, 89125094, 94406088, 100000000, 105925373, 112201845, 118850223, 125892541, + 133352143, 141253754, 149623566, 158489319, 167880402, 177827941, 188364909, 199526231, + 211348904, 223872114, 237137371, 251188643, 266072506, 281838293, 298538262, 316227766, + 334965439, 354813389, 375837404, 398107171, 421696503, 446683592, 473151259, 501187234, + 530884444, 562341325, 595662144, 630957344, 668343918, 707945784, 749894209, 794328235, + 841395142, 891250938, 944060876, 1000000000, 1059253725, 1122018454, 1188502227, + 1258925412, 1333521432, 1412537545, 1496235656, 1584893192, 1678804018, 1778279410, + 1883649089, 1995262315, 2113489040, 2238721139, 2371373706, 2511886432, 2660725060, + 2818382931, 2985382619, 3162277660, 3349654392, 3548133892, 3758374043, 3981071706, + 4216965034, 4466835922ULL, 4731512590ULL, 5011872336ULL, 5308844442ULL, 5623413252ULL, + 5956621435ULL, 6309573445ULL, 6683439176ULL, 7079457844ULL, 7498942093ULL, + 7943282347ULL, 8413951416ULL, 8912509381ULL, 9440608763ULL, 10000000000ULL, + 10592537252ULL, 11220184543ULL, 11885022274ULL, 12589254118ULL, 13335214322ULL, + 14125375446ULL, 14962356561ULL, 15848931925ULL, 16788040181ULL, 17782794100ULL, + 18836490895ULL, 19952623150ULL, 21134890398ULL, 22387211386ULL, 23713737057ULL, + 25118864315ULL, 26607250598ULL, 28183829313ULL, 29853826189ULL, 31622776602ULL, + 33496543916ULL, 35481338923ULL, 37583740429ULL, 39810717055ULL, 42169650343ULL, + 44668359215ULL, 47315125896ULL, 50118723363ULL, 53088444423ULL, 56234132519ULL, + 59566214353ULL, 63095734448ULL, 66834391757ULL, 70794578438ULL, 74989420933ULL, + 79432823472ULL, 84139514165ULL, 89125093813ULL, 94406087629ULL, 100000000000ULL, + 105925372518ULL, 112201845430ULL, 118850222744ULL, 125892541179ULL, 133352143216ULL, + 141253754462ULL, 149623565609ULL, 158489319246ULL, 167880401812ULL, 177827941004ULL, + 188364908949ULL, 199526231497ULL, 211348903984ULL, 223872113857ULL, 237137370566ULL, + 251188643151ULL, 266072505980ULL, 281838293126ULL, 298538261892ULL, 316227766017ULL, + 334965439158ULL, 354813389234ULL, 375837404288ULL, 398107170553ULL, 421696503429ULL, + 446683592151ULL, 473151258961ULL, 501187233627ULL, 530884444231ULL, 562341325190ULL, + 595662143529ULL, 630957344480ULL, 668343917569ULL, 707945784384ULL, 749894209332ULL, + 794328234724ULL, 841395141645ULL, 891250938134ULL, 944060876286ULL, 1000000000000ULL, + 1059253725177ULL, 1122018454302ULL, 1188502227437ULL, 1258925411794ULL, + 1333521432163ULL, 1412537544623ULL, 1496235656094ULL, 1584893192461ULL, + 1678804018123ULL, 1778279410039ULL, 1883649089490ULL, 1995262314969ULL, + 2113489039837ULL, 2238721138568ULL, 2371373705662ULL, 2511886431510ULL, + 2660725059799ULL, 2818382931264ULL, 2985382618918ULL, 3162277660168ULL, + 3349654391578ULL, 3548133892336ULL, 3758374042884ULL, 3981071705535ULL, + 4216965034286ULL, 4466835921510ULL, 4731512589615ULL, 5011872336273ULL, + 5308844442310ULL, 5623413251904ULL, 5956621435290ULL, 6309573444802ULL, + 6683439175686ULL, 7079457843841ULL, 7498942093325ULL, 7943282347243ULL, + 8413951416452ULL, 8912509381337ULL, 9440608762859ULL, 10000000000000ULL, + 10592537251773ULL, 11220184543020ULL, 11885022274370ULL, 12589254117942ULL, + 13335214321633ULL, 14125375446228ULL, 14962356560944ULL, 15848931924611ULL, + 16788040181226ULL, 17782794100389ULL, 18836490894898ULL, 19952623149689ULL, + 21134890398367ULL, 22387211385683ULL, 23713737056617ULL, 25118864315096ULL, + 26607250597988ULL, 28183829312645ULL, 29853826189180ULL, 31622776601684ULL, + 33496543915783ULL, 35481338923358ULL, 37583740428845ULL, 39810717055350ULL, + 42169650342858ULL, 44668359215096ULL, 47315125896148ULL, 50118723362727ULL, + 53088444423099ULL, 56234132519035ULL, 59566214352901ULL, 63095734448019ULL, + 66834391756862ULL, 70794578438414ULL, 74989420933246ULL, 79432823472428ULL, + 84139514164520ULL, 89125093813375ULL, 94406087628593ULL, 100000000000000ULL, + 105925372517729ULL, 112201845430197ULL, 118850222743702ULL, 125892541179417ULL, + 133352143216332ULL, 141253754462276ULL, 149623565609444ULL, 158489319246111ULL, + 167880401812256ULL, 177827941003893ULL, 188364908948981ULL, 199526231496888ULL, + 211348903983664ULL, 223872113856834ULL, 237137370566166ULL, 251188643150958ULL, + 266072505979882ULL, 281838293126446ULL, 298538261891796ULL, 316227766016838ULL, + 334965439157829ULL, 354813389233577ULL, 375837404288444ULL, 398107170553497ULL, + 421696503428583ULL, 446683592150964ULL, 473151258961482ULL, 501187233627272ULL, + 530884444230989ULL, 562341325190350ULL, 595662143529011ULL, 630957344480196ULL, + 668343917568615ULL, 707945784384138ULL, 749894209332456ULL, 794328234724284ULL, + 841395141645198ULL, 891250938133745ULL, 944060876285923ULL, 1000000000000000ULL, + 1059253725177290ULL, 1122018454301970ULL, 1188502227437020ULL, 1258925411794170ULL, + 1333521432163330ULL, 1412537544622760ULL, 1496235656094440ULL, 1584893192461110ULL, + 1678804018122560ULL, 1778279410038920ULL, 1883649089489810ULL, 1995262314968890ULL, + 2113489039836650ULL, 2238721138568340ULL, 2371373705661660ULL, 2511886431509590ULL, + 2660725059798820ULL, 2818382931264460ULL, 2985382618917960ULL, 3162277660168380ULL, + 3349654391578280ULL, 3548133892335770ULL, 3758374042884440ULL, 3981071705534970ULL }; -u32 rtw89_linear_2_db(u64 val) +s32 rtw89_linear_to_db_quarter(u64 val) { - u8 i, j; - u32 dB; - - for (i = 0; i < 12; i++) { - for (j = 0; j < 8; j++) { - if (i <= FRAC_ROW_MAX && - (val << RTW89_LINEAR_FRAC_BITS) <= db_invert_table[i][j]) - goto cnt; - else if (i > FRAC_ROW_MAX && val <= db_invert_table[i][j]) - goto cnt; - } - } + int r = ARRAY_SIZE(db_invert_table) - 1; + int l = 0; + int m; - return 96; /* maximum 96 dB */ + while (l <= r) { + m = l + (r - l) / 2; -cnt: - /* special cases */ - if (j == 0 && i == 0) - goto end; + if (db_invert_table[m] == val) + return m - (s32)RTW89_DB_INVERT_TABLE_OFFSET; - if (i == NORM_ROW_MIN && j == 0) { - if (db_invert_table[NORM_ROW_MIN][0] - val > - val - (db_invert_table[FRAC_ROW_MAX][7] >> RTW89_LINEAR_FRAC_BITS)) { - i = FRAC_ROW_MAX; - j = 7; - } - goto end; + if (db_invert_table[m] > val) + r = m - 1; + else + l = m + 1; } - if (i <= FRAC_ROW_MAX) - val <<= RTW89_LINEAR_FRAC_BITS; - - /* compare difference to get precise dB */ - if (j == 0) { - if (db_invert_table[i][j] - val > - val - db_invert_table[i - 1][7]) { - i--; - j = 7; - } - } else { - if (db_invert_table[i][j] - val > - val - db_invert_table[i][j - 1]) { - j--; - } - } -end: - dB = (i << 3) + j + 1; + if (l >= ARRAY_SIZE(db_invert_table)) + return RTW89_MAX_DBM; + else if (r < 0) + return RTW89_MIN_DBM; + else if (val - db_invert_table[r] <= db_invert_table[l] - val) + return r - (s32)RTW89_DB_INVERT_TABLE_OFFSET; + else + return l - (s32)RTW89_DB_INVERT_TABLE_OFFSET; +} +EXPORT_SYMBOL(rtw89_linear_to_db_quarter); - return dB; +s32 rtw89_linear_to_db(u64 val) +{ + return rtw89_linear_to_db_quarter(val) >> RTW89_DBM_QUARTER_FACTOR; } -EXPORT_SYMBOL(rtw89_linear_2_db); +EXPORT_SYMBOL(rtw89_linear_to_db); -u64 rtw89_db_2_linear(u32 db) +u64 rtw89_db_quarter_to_linear(s32 db) { - u64 linear; - u8 i, j; + /* supported range -41.25 to 96 dBm, in unit of 0.25 dBm */ + db = clamp_t(s32, db, RTW89_MIN_DBM, RTW89_MAX_DBM); + db += (s32)RTW89_DB_INVERT_TABLE_OFFSET; - if (db > 96) - db = 96; - else if (db < 1) - return 1; + return db_invert_table[db]; +} +EXPORT_SYMBOL(rtw89_db_quarter_to_linear); - i = (db - 1) >> 3; - j = (db - 1) & 0x7; +u64 rtw89_db_to_linear(s32 db) +{ + return rtw89_db_quarter_to_linear(db << RTW89_DBM_QUARTER_FACTOR); +} +EXPORT_SYMBOL(rtw89_db_to_linear); - linear = db_invert_table[i][j]; +void rtw89_might_trailing_ellipsis(char *buf, size_t size, ssize_t used) +{ + static const char ellipsis[] = "..."; - if (i >= NORM_ROW_MIN) - linear = linear << RTW89_LINEAR_FRAC_BITS; + /* length of null terminiator isn't included in 'used' */ + if (used + 1 < size || size < sizeof(ellipsis)) + return; - return linear; + memcpy(buf + size - sizeof(ellipsis), ellipsis, sizeof(ellipsis)); } -EXPORT_SYMBOL(rtw89_db_2_linear); diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h index e669544cafd3f..bd08495301e45 100644 --- a/drivers/net/wireless/realtek/rtw89/util.h +++ b/drivers/net/wireless/realtek/rtw89/util.h @@ -6,13 +6,11 @@ #include "core.h" -#define RTW89_LINEAR_FRAC_BITS 3 - #define rtw89_iterate_vifs_bh(rtwdev, iterator, data) \ ieee80211_iterate_active_interfaces_atomic((rtwdev)->hw, \ IEEE80211_IFACE_ITER_NORMAL, iterator, data) -/* call this function with rtwdev->mutex is held */ +/* call this function with wiphy mutex is held */ #define rtw89_for_each_rtwvif(rtwdev, rtwvif) \ list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list) @@ -25,7 +23,7 @@ static inline bool rtw89_rtwvif_in_list(struct rtw89_dev *rtwdev, { struct rtw89_vif *rtwvif; - lockdep_assert_held(&rtwdev->mutex); + lockdep_assert_wiphy(rtwdev->hw->wiphy); rtw89_for_each_rtwvif(rtwdev, rtwvif) if (rtwvif == new) @@ -75,7 +73,10 @@ static inline void ether_addr_copy_mask(u8 *dst, const u8 *src, u8 mask) } } -u32 rtw89_linear_2_db(u64 linear); -u64 rtw89_db_2_linear(u32 db); +s32 rtw89_linear_to_db_quarter(u64 val); +s32 rtw89_linear_to_db(u64 val); +u64 rtw89_db_quarter_to_linear(s32 db); +u64 rtw89_db_to_linear(s32 db); +void rtw89_might_trailing_ellipsis(char *buf, size_t size, ssize_t used); #endif diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c index 01754d031bb43..17eee58503cb3 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.c +++ b/drivers/net/wireless/realtek/rtw89/wow.c @@ -604,6 +604,8 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev, struct ieee80211_key_conf *key; u8 sz; + lockdep_assert_wiphy(rtwdev->hw->wiphy); + cipher_info = rtw89_cipher_alg_recognize(cipher); sz = struct_size(rekey_conf, key, cipher_info->len); rekey_conf = kmalloc(sz, GFP_KERNEL); @@ -616,15 +618,10 @@ static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev, memcpy(rekey_conf->key, gtk, flex_array_size(rekey_conf, key, cipher_info->len)); - /* ieee80211_gtk_rekey_add() will call set_key(), therefore we - * need to unlock mutex - */ - mutex_unlock(&rtwdev->mutex); if (ieee80211_vif_is_mld(wow_vif)) key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, rtwvif_link->link_id); else key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1); - mutex_lock(&rtwdev->mutex); kfree(rekey_conf); if (IS_ERR(key)) { diff --git a/drivers/net/wireless/silabs/wfx/bus.h b/drivers/net/wireless/silabs/wfx/bus.h index ccadfdd6873ce..79edaef20881a 100644 --- a/drivers/net/wireless/silabs/wfx/bus.h +++ b/drivers/net/wireless/silabs/wfx/bus.h @@ -28,6 +28,7 @@ struct wfx_hwbus_ops { void (*lock)(void *bus_priv); void (*unlock)(void *bus_priv); size_t (*align_size)(void *bus_priv, size_t size); + void (*set_wakeup)(void *priv, bool enabled); }; extern struct sdio_driver wfx_sdio_driver; diff --git a/drivers/net/wireless/silabs/wfx/bus_sdio.c b/drivers/net/wireless/silabs/wfx/bus_sdio.c index f290eecde773f..ab0793b9908f4 100644 --- a/drivers/net/wireless/silabs/wfx/bus_sdio.c +++ b/drivers/net/wireless/silabs/wfx/bus_sdio.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "bus.h" #include "wfx.h" @@ -172,6 +173,13 @@ static size_t wfx_sdio_align_size(void *priv, size_t size) return sdio_align_size(bus->func, size); } +static void wfx_sdio_set_wakeup(void *priv, bool enabled) +{ + struct wfx_sdio_priv *bus = priv; + + device_set_wakeup_enable(&bus->func->dev, enabled); +} + static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = { .copy_from_io = wfx_sdio_copy_from_io, .copy_to_io = wfx_sdio_copy_to_io, @@ -180,6 +188,7 @@ static const struct wfx_hwbus_ops wfx_sdio_hwbus_ops = { .lock = wfx_sdio_lock, .unlock = wfx_sdio_unlock, .align_size = wfx_sdio_align_size, + .set_wakeup = wfx_sdio_set_wakeup, }; static const struct of_device_id wfx_sdio_of_match[] = { @@ -191,9 +200,48 @@ static const struct of_device_id wfx_sdio_of_match[] = { }; MODULE_DEVICE_TABLE(of, wfx_sdio_of_match); +static int wfx_sdio_suspend(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct wfx_sdio_priv *bus = sdio_get_drvdata(func); + int ret; + + if (!device_may_wakeup(dev)) + return 0; + + flush_work(&bus->core->hif.bh); + /* Either "wakeup-source" attribute or out-of-band IRQ is required for + * WoWLAN + */ + if (bus->of_irq) { + ret = enable_irq_wake(bus->of_irq); + if (ret) + return ret; + } else { + ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ); + if (ret) + return ret; + } + return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); +} + +static int wfx_sdio_resume(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct wfx_sdio_priv *bus = sdio_get_drvdata(func); + + if (!device_may_wakeup(dev)) + return 0; + if (bus->of_irq) + return disable_irq_wake(bus->of_irq); + else + return 0; +} + static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { const struct wfx_platform_data *pdata = of_device_get_match_data(&func->dev); + mmc_pm_flag_t pm_flag = sdio_get_host_pm_caps(func); struct device_node *np = func->dev.of_node; struct wfx_sdio_priv *bus; int ret; @@ -235,6 +283,9 @@ static int wfx_sdio_probe(struct sdio_func *func, const struct sdio_device_id *i if (ret) goto sdio_release; + if (pm_flag & MMC_PM_KEEP_POWER) + device_set_wakeup_capable(&func->dev, true); + return 0; sdio_release: @@ -261,6 +312,8 @@ static const struct sdio_device_id wfx_sdio_ids[] = { }; MODULE_DEVICE_TABLE(sdio, wfx_sdio_ids); +static DEFINE_SIMPLE_DEV_PM_OPS(wfx_sdio_pm_ops, wfx_sdio_suspend, wfx_sdio_resume); + struct sdio_driver wfx_sdio_driver = { .name = "wfx-sdio", .id_table = wfx_sdio_ids, @@ -268,5 +321,6 @@ struct sdio_driver wfx_sdio_driver = { .remove = wfx_sdio_remove, .drv = { .of_match_table = wfx_sdio_of_match, + .pm = &wfx_sdio_pm_ops, } }; diff --git a/drivers/net/wireless/silabs/wfx/bus_spi.c b/drivers/net/wireless/silabs/wfx/bus_spi.c index 160b90114aad7..45ee19e1ecbf2 100644 --- a/drivers/net/wireless/silabs/wfx/bus_spi.c +++ b/drivers/net/wireless/silabs/wfx/bus_spi.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "bus.h" #include "wfx.h" @@ -179,6 +180,13 @@ static size_t wfx_spi_align_size(void *priv, size_t size) return ALIGN(size, 4); } +static void wfx_spi_set_wakeup(void *priv, bool enabled) +{ + struct wfx_spi_priv *bus = priv; + + device_set_wakeup_enable(&bus->func->dev, enabled); +} + static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = { .copy_from_io = wfx_spi_copy_from_io, .copy_to_io = wfx_spi_copy_to_io, @@ -187,8 +195,29 @@ static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = { .lock = wfx_spi_lock, .unlock = wfx_spi_unlock, .align_size = wfx_spi_align_size, + .set_wakeup = wfx_spi_set_wakeup, }; +static int wfx_spi_suspend(struct device *dev) +{ + struct spi_device *func = to_spi_device(dev); + struct wfx_spi_priv *bus = spi_get_drvdata(func); + + if (!device_may_wakeup(dev)) + return 0; + flush_work(&bus->core->hif.bh); + return enable_irq_wake(func->irq); +} + +static int wfx_spi_resume(struct device *dev) +{ + struct spi_device *func = to_spi_device(dev); + + if (!device_may_wakeup(dev)) + return 0; + return disable_irq_wake(func->irq); +} + static int wfx_spi_probe(struct spi_device *func) { struct wfx_platform_data *pdata; @@ -239,7 +268,12 @@ static int wfx_spi_probe(struct spi_device *func) if (!bus->core) return -EIO; - return wfx_probe(bus->core); + ret = wfx_probe(bus->core); + if (ret) + return ret; + + device_set_wakeup_capable(&func->dev, true); + return 0; } static void wfx_spi_remove(struct spi_device *func) @@ -273,12 +307,15 @@ static const struct of_device_id wfx_spi_of_match[] = { MODULE_DEVICE_TABLE(of, wfx_spi_of_match); #endif +static DEFINE_SIMPLE_DEV_PM_OPS(wfx_spi_pm_ops, wfx_spi_suspend, wfx_spi_resume); + struct spi_driver wfx_spi_driver = { + .id_table = wfx_spi_id, + .probe = wfx_spi_probe, + .remove = wfx_spi_remove, .driver = { .name = "wfx-spi", .of_match_table = of_match_ptr(wfx_spi_of_match), + .pm = &wfx_spi_pm_ops, }, - .id_table = wfx_spi_id, - .probe = wfx_spi_probe, - .remove = wfx_spi_remove, }; diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c index 64441c8bc4606..a61128debbadc 100644 --- a/drivers/net/wireless/silabs/wfx/main.c +++ b/drivers/net/wireless/silabs/wfx/main.c @@ -121,6 +121,12 @@ static const struct ieee80211_iface_combination wfx_iface_combinations[] = { } }; +#ifdef CONFIG_PM +static const struct wiphy_wowlan_support wfx_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT, +}; +#endif + static const struct ieee80211_ops wfx_ops = { .start = wfx_start, .stop = wfx_stop, @@ -153,6 +159,11 @@ static const struct ieee80211_ops wfx_ops = { .unassign_vif_chanctx = wfx_unassign_vif_chanctx, .remain_on_channel = wfx_remain_on_channel, .cancel_remain_on_channel = wfx_cancel_remain_on_channel, +#ifdef CONFIG_PM + .suspend = wfx_suspend, + .resume = wfx_resume, + .set_wakeup = wfx_set_wakeup, +#endif }; bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor) @@ -289,6 +300,9 @@ struct wfx_dev *wfx_init_common(struct device *dev, const struct wfx_platform_da NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U; hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; +#ifdef CONFIG_PM + hw->wiphy->wowlan = &wfx_wowlan_support; +#endif hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; hw->wiphy->max_remain_on_channel_duration = 5000; diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c index 7c04810dbf3dc..e95b9ded17d9d 100644 --- a/drivers/net/wireless/silabs/wfx/sta.c +++ b/drivers/net/wireless/silabs/wfx/sta.c @@ -10,6 +10,7 @@ #include "sta.h" #include "wfx.h" +#include "bus.h" #include "fwio.h" #include "bh.h" #include "key.h" @@ -803,6 +804,30 @@ void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) } } +#ifdef CONFIG_PM +int wfx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) +{ + /* FIXME: hardware also support WIPHY_WOWLAN_MAGIC_PKT and other filters */ + if (!wowlan->any || !wowlan->disconnect) + return -EINVAL; + return 0; +} + +int wfx_resume(struct ieee80211_hw *hw) +{ + return 0; +} + +void wfx_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct wfx_dev *wdev = hw->priv; + + if (enabled) + dev_info(wdev->dev, "support for WoWLAN is experimental\n"); + wdev->hwbus_ops->set_wakeup(wdev->hwbus_priv, enabled); +} +#endif + int wfx_start(struct ieee80211_hw *hw) { return 0; diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h index 7817c7c6f3dde..8702eed5267f8 100644 --- a/drivers/net/wireless/silabs/wfx/sta.h +++ b/drivers/net/wireless/silabs/wfx/sta.h @@ -56,6 +56,9 @@ int wfx_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *conf); +int wfx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); +int wfx_resume(struct ieee80211_hw *hw); +void wfx_set_wakeup(struct ieee80211_hw *hw, bool enabled); /* Hardware API Callbacks */ void wfx_cooling_timeout_work(struct work_struct *work); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index cf6a331d40427..b426f3bfab281 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -4,7 +4,7 @@ * Copyright (c) 2008, Jouni Malinen * Copyright (c) 2011, Javier Lopez * Copyright (c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2024 Intel Corporation + * Copyright (C) 2018 - 2025 Intel Corporation */ /* @@ -1983,11 +1983,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, return; } - if (sta && sta->mlo) { - if (WARN_ON(!link_sta)) { - ieee80211_free_txskb(hw, skb); - return; - } + /* Do address translations only between shared links. It is + * possible that while an non-AP MLD station and an AP MLD + * station have shared links, the frame is intended to be sent + * on a link which is not shared (for example when sending a + * probe response). + */ + if (sta && sta->mlo && link_sta) { /* address translation to link addresses on TX */ ether_addr_copy(hdr->addr1, link_sta->addr); ether_addr_copy(hdr->addr2, bss_conf->addr); @@ -5345,6 +5347,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, TDLS_WIDER_BW); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); + ieee80211_hw_set(hw, STRICT); if (param->mlo) { hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c index 4ee3740804667..1fffeff2190ca 100644 --- a/drivers/net/wireless/virtual/virt_wifi.c +++ b/drivers/net/wireless/virtual/virt_wifi.c @@ -146,7 +146,7 @@ static void virt_wifi_inform_bss(struct wiphy *wiphy) static const struct { u8 tag; u8 len; - u8 ssid[8]; + u8 ssid[8] __nonstring; } __packed ssid = { .tag = WLAN_EID_SSID, .len = VIRT_WIFI_SSID_LEN, @@ -519,11 +519,13 @@ static rx_handler_result_t virt_wifi_rx_handler(struct sk_buff **pskb) } /* Called with rtnl lock held. */ -static int virt_wifi_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int virt_wifi_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct virt_wifi_netdev_priv *priv = netdev_priv(dev); + struct net *link_net = rtnl_newlink_link_net(params); + struct nlattr **tb = params->tb; int err; if (!tb[IFLA_LINK]) @@ -532,7 +534,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev, netif_carrier_off(dev); priv->upperdev = dev; - priv->lowerdev = __dev_get_by_index(src_net, + priv->lowerdev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK])); if (!priv->lowerdev) diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index f90c33d19b399..9653dbaac3c05 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -21,7 +21,7 @@ struct zd_reg_alpha2_map { u32 reg; - char alpha2[2]; + char alpha2[2] __nonstring; }; static struct zd_reg_alpha2_map reg_alpha2_map[] = { diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c index 7a9c09cd4fdcf..6a7a26085fc70 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -41,6 +41,7 @@ #include #include #include +#include #include "t7xx_dpmaif.h" #include "t7xx_hif_dpmaif.h" diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c index 02f2ec7cf4cee..8bf63f2dcbbfd 100644 --- a/drivers/net/wwan/t7xx/t7xx_pci.c +++ b/drivers/net/wwan/t7xx/t7xx_pci.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include "t7xx_mhccif.h" diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c index a51e2755991af..63a47d420bc59 100644 --- a/drivers/net/wwan/wwan_core.c +++ b/drivers/net/wwan/wwan_core.c @@ -967,15 +967,18 @@ static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[], return dev; } -static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int wwan_rtnl_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent); - u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]); struct wwan_netdev_priv *priv = netdev_priv(dev); + struct nlattr **data = params->data; + u32 link_id; int ret; + link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]); + if (IS_ERR(wwandev)) return PTR_ERR(wwandev); @@ -1061,6 +1064,11 @@ static void wwan_create_default_link(struct wwan_device *wwandev, { struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1]; struct nlattr *data[IFLA_WWAN_MAX + 1]; + struct rtnl_newlink_params params = { + .src_net = &init_net, + .tb = tb, + .data = data, + }; struct net_device *dev; struct nlmsghdr *nlh; struct sk_buff *msg; @@ -1105,7 +1113,7 @@ static void wwan_create_default_link(struct wwan_device *wwandev, if (WARN_ON(IS_ERR(dev))) goto unlock; - if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) { + if (WARN_ON(wwan_rtnl_newlink(dev, ¶ms, NULL))) { free_netdev(dev); goto unlock; } diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 1de11b722f049..8971aca41e63d 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -599,7 +599,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q, } if (!nvme_try_complete_req(req, cqe->status, cqe->result) && - !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, + !blk_mq_add_to_batch(req, iob, + nvme_req(req)->status != NVME_SC_SUCCESS, apple_nvme_complete_batch)) apple_nvme_complete_rq(req); } @@ -1011,25 +1012,37 @@ static void apple_nvme_reset_work(struct work_struct *work) ret = apple_rtkit_shutdown(anv->rtk); if (ret) goto out; + + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); } - writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + /* + * Only do the soft-reset if the CPU is not running, which means either we + * or the previous stage shut it down cleanly. + */ + if (!(readl(anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL) & + APPLE_ANS_COPROC_CPU_CONTROL_RUN)) { - ret = reset_control_assert(anv->reset); - if (ret) - goto out; + ret = reset_control_assert(anv->reset); + if (ret) + goto out; - ret = apple_rtkit_reinit(anv->rtk); - if (ret) - goto out; + ret = apple_rtkit_reinit(anv->rtk); + if (ret) + goto out; - ret = reset_control_deassert(anv->reset); - if (ret) - goto out; + ret = reset_control_deassert(anv->reset); + if (ret) + goto out; + + writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN, + anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + + ret = apple_rtkit_boot(anv->rtk); + } else { + ret = apple_rtkit_wake(anv->rtk); + } - writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN, - anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); - ret = apple_rtkit_boot(anv->rtk); if (ret) { dev_err(anv->dev, "ANS did not boot"); goto out; @@ -1516,6 +1529,7 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev) return anv; put_dev: + apple_nvme_detach_genpd(anv); put_device(anv->dev); return ERR_PTR(ret); } @@ -1549,6 +1563,7 @@ static int apple_nvme_probe(struct platform_device *pdev) nvme_uninit_ctrl(&anv->ctrl); out_put_ctrl: nvme_put_ctrl(&anv->ctrl); + apple_nvme_detach_genpd(anv); return ret; } @@ -1563,9 +1578,12 @@ static void apple_nvme_remove(struct platform_device *pdev) apple_nvme_disable(anv, true); nvme_uninit_ctrl(&anv->ctrl); - if (apple_rtkit_is_running(anv->rtk)) + if (apple_rtkit_is_running(anv->rtk)) { apple_rtkit_shutdown(anv->rtk); + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + } + apple_nvme_detach_genpd(anv); } @@ -1574,8 +1592,11 @@ static void apple_nvme_shutdown(struct platform_device *pdev) struct apple_nvme *anv = platform_get_drvdata(pdev); apple_nvme_disable(anv, true); - if (apple_rtkit_is_running(anv->rtk)) + if (apple_rtkit_is_running(anv->rtk)) { apple_rtkit_shutdown(anv->rtk); + + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + } } static int apple_nvme_resume(struct device *dev) @@ -1592,10 +1613,11 @@ static int apple_nvme_suspend(struct device *dev) apple_nvme_disable(anv, true); - if (apple_rtkit_is_running(anv->rtk)) + if (apple_rtkit_is_running(anv->rtk)) { ret = apple_rtkit_shutdown(anv->rtk); - writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + } return ret; } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 818d4e49aab51..8359d0aa0e44b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -431,6 +431,12 @@ static inline void nvme_end_req_zoned(struct request *req) static inline void __nvme_end_req(struct request *req) { + if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { + if (blk_rq_is_passthrough(req)) + nvme_log_err_passthru(req); + else + nvme_log_error(req); + } nvme_end_req_zoned(req); nvme_trace_bio_complete(req); if (req->cmd_flags & REQ_NVME_MPATH) @@ -441,12 +447,6 @@ void nvme_end_req(struct request *req) { blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { - if (blk_rq_is_passthrough(req)) - nvme_log_err_passthru(req); - else - nvme_log_error(req); - } __nvme_end_req(req); blk_mq_end_request(req, status); } @@ -564,8 +564,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, switch (new_state) { case NVME_CTRL_LIVE: switch (old_state) { - case NVME_CTRL_NEW: - case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; fallthrough; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index f4f1866fbd5b8..b9929a5a7f4e3 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -781,61 +781,12 @@ nvme_fc_abort_lsops(struct nvme_fc_rport *rport) static void nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) { - enum nvme_ctrl_state state; - unsigned long flags; - dev_info(ctrl->ctrl.device, "NVME-FC{%d}: controller connectivity lost. Awaiting " "Reconnect", ctrl->cnum); - spin_lock_irqsave(&ctrl->lock, flags); set_bit(ASSOC_FAILED, &ctrl->flags); - state = nvme_ctrl_state(&ctrl->ctrl); - spin_unlock_irqrestore(&ctrl->lock, flags); - - switch (state) { - case NVME_CTRL_NEW: - case NVME_CTRL_LIVE: - /* - * Schedule a controller reset. The reset will terminate the - * association and schedule the reconnect timer. Reconnects - * will be attempted until either the ctlr_loss_tmo - * (max_retries * connect_delay) expires or the remoteport's - * dev_loss_tmo expires. - */ - if (nvme_reset_ctrl(&ctrl->ctrl)) { - dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: Couldn't schedule reset.\n", - ctrl->cnum); - nvme_delete_ctrl(&ctrl->ctrl); - } - break; - - case NVME_CTRL_CONNECTING: - /* - * The association has already been terminated and the - * controller is attempting reconnects. No need to do anything - * futher. Reconnects will be attempted until either the - * ctlr_loss_tmo (max_retries * connect_delay) expires or the - * remoteport's dev_loss_tmo expires. - */ - break; - - case NVME_CTRL_RESETTING: - /* - * Controller is already in the process of terminating the - * association. No need to do anything further. The reconnect - * step will kick in naturally after the association is - * terminated. - */ - break; - - case NVME_CTRL_DELETING: - case NVME_CTRL_DELETING_NOIO: - default: - /* no action to take - let it delete */ - break; - } + nvme_reset_ctrl(&ctrl->ctrl); } /** @@ -3071,7 +3022,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) struct nvmefc_ls_rcv_op *disls = NULL; unsigned long flags; int ret; - bool changed; ++ctrl->ctrl.nr_reconnects; @@ -3177,23 +3127,18 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) else ret = nvme_fc_recreate_io_queues(ctrl); } + if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags)) + ret = -EIO; if (ret) goto out_term_aen_ops; - spin_lock_irqsave(&ctrl->lock, flags); - if (!test_bit(ASSOC_FAILED, &ctrl->flags)) - changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); - else + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) { ret = -EIO; - spin_unlock_irqrestore(&ctrl->lock, flags); - - if (ret) goto out_term_aen_ops; + } ctrl->ctrl.nr_reconnects = 0; - - if (changed) - nvme_start_ctrl(&ctrl->ctrl); + nvme_start_ctrl(&ctrl->ctrl); return 0; /* Success */ diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index e8930146847af..24e2c702da7a2 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -128,8 +128,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, if (!nvme_ctrl_sgl_supported(ctrl)) dev_warn_once(ctrl->device, "using unchecked data buffer\n"); if (has_metadata) { - if (!supports_metadata) - return -EINVAL; + if (!supports_metadata) { + ret = -EINVAL; + goto out; + } if (!nvme_ctrl_meta_sgl_supported(ctrl)) dev_warn_once(ctrl->device, "using unchecked metadata buffer\n"); @@ -139,8 +141,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, struct iov_iter iter; /* fixedbufs is only for non-vectored io */ - if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) - return -EINVAL; + if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) { + ret = -EINVAL; + goto out; + } ret = io_uring_cmd_import_fixed(ubuffer, bufflen, rq_data_dir(req), &iter, ioucmd); if (ret < 0) @@ -283,8 +287,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, { if (ns && nsid != ns->head->ns_id) { dev_err(ctrl->device, - "%s: nsid (%u) in cmd does not match nsid (%u)" - "of namespace\n", + "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n", current->comm, nsid, ns->head->ns_id); return false; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9197a5b173fdf..3ad7f197c8087 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1130,8 +1130,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); if (!nvme_try_complete_req(req, cqe->status, cqe->result) && - !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, - nvme_pci_complete_batch)) + !blk_mq_add_to_batch(req, iob, + nvme_req(req)->status != NVME_SC_SUCCESS, + nvme_pci_complete_batch)) nvme_pci_complete_rq(req); } @@ -1411,9 +1412,20 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; struct nvme_command cmd = { }; + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 csts = readl(dev->bar + NVME_REG_CSTS); u8 opcode; + /* + * Shutdown the device immediately if we see it is disconnected. This + * unblocks PCIe error handling if the nvme driver is waiting in + * error_resume for a device that has been removed. We can't unbind the + * driver while the driver's error callback is waiting to complete, so + * we're relying on a timeout to break that deadlock if a removal + * occurs while reset work is running. + */ + if (pci_dev_is_disconnected(pdev)) + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); if (nvme_state_terminal(&dev->ctrl)) goto disable; @@ -1421,7 +1433,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) * the recovery mechanism will surely fail. */ mb(); - if (pci_channel_offline(to_pci_dev(dev->dev))) + if (pci_channel_offline(pdev)) return BLK_EH_RESET_TIMER; /* @@ -1982,6 +1994,18 @@ static void nvme_map_cmb(struct nvme_dev *dev) if (offset > bar_size) return; + /* + * Controllers may support a CMB size larger than their BAR, for + * example, due to being behind a bridge. Reduce the CMB to the + * reported size of the BAR + */ + size = min(size, bar_size - offset); + + if (!IS_ALIGNED(size, memremap_compat_align()) || + !IS_ALIGNED(pci_resource_start(pdev, bar), + memremap_compat_align())) + return; + /* * Tell the controller about the host side address mapping the CMB, * and enable CMB decoding for the NVMe 1.4+ scheme: @@ -1992,17 +2016,10 @@ static void nvme_map_cmb(struct nvme_dev *dev) dev->bar + NVME_REG_CMBMSC); } - /* - * Controllers may support a CMB size larger than their BAR, - * for example, due to being behind a bridge. Reduce the CMB to - * the reported size of the BAR - */ - if (size > bar_size - offset) - size = bar_size - offset; - if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { dev_warn(dev->ctrl.device, "failed to register the CMB\n"); + hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC); return; } @@ -3706,6 +3723,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 841238f38fdda..327f3f2f5399c 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -217,6 +217,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) return queue - queue->ctrl->queues; } +static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type) +{ + switch (type) { + case nvme_tcp_c2h_term: + case nvme_tcp_c2h_data: + case nvme_tcp_r2t: + case nvme_tcp_rsp: + return true; + default: + return false; + } +} + /* * Check if the queue is TLS encrypted */ @@ -763,6 +776,40 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, return 0; } +static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue, + struct nvme_tcp_term_pdu *pdu) +{ + u16 fes; + const char *msg; + u32 plen = le32_to_cpu(pdu->hdr.plen); + + static const char * const msg_table[] = { + [NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field", + [NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error", + [NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error", + [NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range", + [NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded", + [NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter", + }; + + if (plen < NVME_TCP_MIN_C2HTERM_PLEN || + plen > NVME_TCP_MAX_C2HTERM_PLEN) { + dev_err(queue->ctrl->ctrl.device, + "Received a malformed C2HTermReq PDU (plen = %u)\n", + plen); + return; + } + + fes = le16_to_cpu(pdu->fes); + if (fes && fes < ARRAY_SIZE(msg_table)) + msg = msg_table[fes]; + else + msg = "Unknown"; + + dev_err(queue->ctrl->ctrl.device, + "Received C2HTermReq (FES = %s)\n", msg); +} + static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) { @@ -784,6 +831,25 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, return 0; hdr = queue->pdu; + if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) { + if (!nvme_tcp_recv_pdu_supported(hdr->type)) + goto unsupported_pdu; + + dev_err(queue->ctrl->ctrl.device, + "pdu type %d has unexpected header length (%d)\n", + hdr->type, hdr->hlen); + return -EPROTO; + } + + if (unlikely(hdr->type == nvme_tcp_c2h_term)) { + /* + * C2HTermReq never includes Header or Data digests. + * Skip the checks. + */ + nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu); + return -EINVAL; + } + if (queue->hdr_digest) { ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); if (unlikely(ret)) @@ -807,10 +873,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, nvme_tcp_init_recv_ctx(queue); return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); default: - dev_err(queue->ctrl->ctrl.device, - "unsupported pdu type (%d)\n", hdr->type); - return -EINVAL; + goto unsupported_pdu; } + +unsupported_pdu: + dev_err(queue->ctrl->ctrl.device, + "unsupported pdu type (%d)\n", hdr->type); + return -EINVAL; } static inline void nvme_tcp_end_request(struct request *rq, u16 status) @@ -1449,8 +1518,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) msg.msg_control = cbuf; msg.msg_controllen = sizeof(cbuf); } + msg.msg_flags = MSG_WAITALL; ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, iov.iov_len, msg.msg_flags); + if (ret >= 0 && ret < sizeof(*icresp)) + ret = -ECONNRESET; if (ret < 0) { pr_warn("queue %d: failed to receive icresp, error %d\n", nvme_tcp_queue_id(queue), ret); @@ -1565,7 +1637,7 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) ctrl->io_queues[HCTX_TYPE_POLL]; } -/** +/* * Track the number of queues assigned to each cpu using a global per-cpu * counter and select the least used cpu from the mq_map. Our goal is to spread * different controllers I/O threads across different cpu cores. @@ -2653,6 +2725,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_tcp_queue *queue = hctx->driver_data; struct sock *sk = queue->sock->sk; + int ret; if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) return 0; @@ -2660,9 +2733,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) set_bit(NVME_TCP_Q_POLLING, &queue->flags); if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) sk_busy_loop(sk, true); - nvme_tcp_try_recv(queue); + ret = nvme_tcp_try_recv(queue); clear_bit(NVME_TCP_Q_POLLING, &queue->flags); - return queue->nr_cqe; + return ret < 0 ? ret : queue->nr_cqe; } static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index cdc4a09a6e8a4..2e741696f3712 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -606,6 +606,9 @@ int nvmet_ns_enable(struct nvmet_ns *ns) goto out_dev_put; } + if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL)) + goto out_pr_exit; + nvmet_ns_changed(subsys, ns->nsid); ns->enabled = true; xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED); @@ -613,6 +616,9 @@ int nvmet_ns_enable(struct nvmet_ns *ns) out_unlock: mutex_unlock(&subsys->lock); return ret; +out_pr_exit: + if (ns->pr.enable) + nvmet_pr_exit_ns(ns); out_dev_put: list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); @@ -638,6 +644,19 @@ void nvmet_ns_disable(struct nvmet_ns *ns) mutex_unlock(&subsys->lock); + /* + * Now that we removed the namespaces from the lookup list, we + * can kill the per_cpu ref and wait for any remaining references + * to be dropped, as well as a RCU grace period for anyone only + * using the namepace under rcu_read_lock(). Note that we can't + * use call_rcu here as we need to ensure the namespaces have + * been fully destroyed before unloading the module. + */ + percpu_ref_kill(&ns->ref); + synchronize_rcu(); + wait_for_completion(&ns->disable_done); + percpu_ref_exit(&ns->ref); + if (ns->pr.enable) nvmet_pr_exit_ns(ns); @@ -660,22 +679,6 @@ void nvmet_ns_free(struct nvmet_ns *ns) if (ns->nsid == subsys->max_nsid) subsys->max_nsid = nvmet_max_nsid(subsys); - mutex_unlock(&subsys->lock); - - /* - * Now that we removed the namespaces from the lookup list, we - * can kill the per_cpu ref and wait for any remaining references - * to be dropped, as well as a RCU grace period for anyone only - * using the namepace under rcu_read_lock(). Note that we can't - * use call_rcu here as we need to ensure the namespaces have - * been fully destroyed before unloading the module. - */ - percpu_ref_kill(&ns->ref); - synchronize_rcu(); - wait_for_completion(&ns->disable_done); - percpu_ref_exit(&ns->ref); - - mutex_lock(&subsys->lock); subsys->nr_namespaces--; mutex_unlock(&subsys->lock); @@ -705,9 +708,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) ns->nsid = nsid; ns->subsys = subsys; - if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL)) - goto out_free; - if (ns->nsid > subsys->max_nsid) subsys->max_nsid = nsid; @@ -730,8 +730,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) return ns; out_exit: subsys->max_nsid = nvmet_max_nsid(subsys); - percpu_ref_exit(&ns->ref); -out_free: kfree(ns); out_unlock: mutex_unlock(&subsys->lock); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 4be8d22d2d8d4..fcf4f460dc9a4 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -647,7 +647,6 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, struct nvmet_host *host); void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, u8 event_info, u8 log_page); -bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid); #define NVMET_MIN_QUEUE_SIZE 16 #define NVMET_MAX_QUEUE_SIZE 1024 @@ -784,37 +783,37 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req); static inline bool nvmet_cc_en(u32 cc) { - return (cc >> NVME_CC_EN_SHIFT) & 0x1; + return (cc & NVME_CC_ENABLE) >> NVME_CC_EN_SHIFT; } static inline u8 nvmet_cc_css(u32 cc) { - return (cc >> NVME_CC_CSS_SHIFT) & 0x7; + return (cc & NVME_CC_CSS_MASK) >> NVME_CC_CSS_SHIFT; } static inline u8 nvmet_cc_mps(u32 cc) { - return (cc >> NVME_CC_MPS_SHIFT) & 0xf; + return (cc & NVME_CC_MPS_MASK) >> NVME_CC_MPS_SHIFT; } static inline u8 nvmet_cc_ams(u32 cc) { - return (cc >> NVME_CC_AMS_SHIFT) & 0x7; + return (cc & NVME_CC_AMS_MASK) >> NVME_CC_AMS_SHIFT; } static inline u8 nvmet_cc_shn(u32 cc) { - return (cc >> NVME_CC_SHN_SHIFT) & 0x3; + return (cc & NVME_CC_SHN_MASK) >> NVME_CC_SHN_SHIFT; } static inline u8 nvmet_cc_iosqes(u32 cc) { - return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; + return (cc & NVME_CC_IOSQES_MASK) >> NVME_CC_IOSQES_SHIFT; } static inline u8 nvmet_cc_iocqes(u32 cc) { - return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; + return (cc & NVME_CC_IOCQES_MASK) >> NVME_CC_IOCQES_SHIFT; } /* Convert a 32-bit number to a 16-bit 0's based number */ diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c index ac30b42cc6221..b1e31483f1574 100644 --- a/drivers/nvme/target/pci-epf.c +++ b/drivers/nvme/target/pci-epf.c @@ -46,7 +46,7 @@ static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex); /* * BAR CC register and SQ polling intervals. */ -#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(5) +#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(10) #define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5) #define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000) @@ -1265,15 +1265,12 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; u16 status; - if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) + if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) return NVME_SC_QID_INVALID | NVME_STATUS_DNR; if (!(flags & NVME_QUEUE_PHYS_CONTIG)) return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR; - if (flags & NVME_CQ_IRQ_ENABLED) - set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); - cq->pci_addr = pci_addr; cq->qid = cqid; cq->depth = qsize + 1; @@ -1290,24 +1287,27 @@ static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, cq->qes = ctrl->io_cqes; cq->pci_size = cq->qes * cq->depth; - cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); - if (!cq->iv) { - status = NVME_SC_INTERNAL | NVME_STATUS_DNR; - goto err; + if (flags & NVME_CQ_IRQ_ENABLED) { + cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); + if (!cq->iv) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); } status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth); if (status != NVME_SC_SUCCESS) goto err; + set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); + dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", cqid, qsize, cq->qes, cq->vector); return NVME_SC_SUCCESS; err: - clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); - clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); + if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); return status; } @@ -1333,7 +1333,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; u16 status; - if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) + if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) return NVME_SC_QID_INVALID | NVME_STATUS_DNR; if (!(flags & NVME_QUEUE_PHYS_CONTIG)) @@ -1355,7 +1355,7 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth); if (status != NVME_SC_SUCCESS) - goto out_clear_bit; + return status; sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND, min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid); @@ -1365,6 +1365,8 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, goto out_destroy_sq; } + set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags); + dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n", sqid, qsize, sq->qes); @@ -1372,8 +1374,6 @@ static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, out_destroy_sq: nvmet_sq_destroy(&sq->nvme_sq); -out_clear_bit: - clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags); return status; } @@ -1694,6 +1694,7 @@ static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work) struct nvmet_pci_epf_ctrl *ctrl = container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work); struct nvmet_pci_epf_queue *sq; + unsigned long limit = jiffies; unsigned long last = 0; int i, nr_sqs; @@ -1708,6 +1709,16 @@ static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work) nr_sqs++; } + /* + * If we have been running for a while, reschedule to let other + * tasks run and to avoid RCU stalls. + */ + if (time_is_before_jiffies(limit + secs_to_jiffies(1))) { + cond_resched(); + limit = jiffies; + continue; + } + if (nr_sqs) { last = jiffies; continue; @@ -1822,14 +1833,14 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) if (ctrl->io_sqes < sizeof(struct nvme_command)) { dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n", ctrl->io_sqes, sizeof(struct nvme_command)); - return -EINVAL; + goto err; } ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc); if (ctrl->io_cqes < sizeof(struct nvme_completion)) { dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n", ctrl->io_sqes, sizeof(struct nvme_completion)); - return -EINVAL; + goto err; } /* Create the admin queue. */ @@ -1844,7 +1855,7 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) qsize, pci_addr, 0); if (status != NVME_SC_SUCCESS) { dev_err(ctrl->dev, "Failed to create admin completion queue\n"); - return -EINVAL; + goto err; } qsize = aqa & 0x00000fff; @@ -1854,17 +1865,22 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) if (status != NVME_SC_SUCCESS) { dev_err(ctrl->dev, "Failed to create admin submission queue\n"); nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); - return -EINVAL; + goto err; } ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB; ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD; ctrl->enabled = true; + ctrl->csts = NVME_CSTS_RDY; /* Start polling the controller SQs. */ schedule_delayed_work(&ctrl->poll_sqs, 0); return 0; + +err: + ctrl->csts = 0; + return -EINVAL; } static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) @@ -1889,6 +1905,8 @@ static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) /* Delete the admin queue last. */ nvmet_pci_epf_delete_sq(ctrl->tctrl, 0); nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); + + ctrl->csts &= ~NVME_CSTS_RDY; } static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) @@ -1903,19 +1921,19 @@ static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) old_cc = ctrl->cc; new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC); + if (new_cc == old_cc) + goto reschedule_work; + ctrl->cc = new_cc; if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) { ret = nvmet_pci_epf_enable_ctrl(ctrl); if (ret) - return; - ctrl->csts |= NVME_CSTS_RDY; + goto reschedule_work; } - if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) { + if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) nvmet_pci_epf_disable_ctrl(ctrl); - ctrl->csts &= ~NVME_CSTS_RDY; - } if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) { nvmet_pci_epf_disable_ctrl(ctrl); @@ -1928,6 +1946,7 @@ static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) nvmet_update_cc(ctrl->tctrl, ctrl->cc); nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); +reschedule_work: schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); } diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 1afd93026f9bf..2a4536ef61848 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -996,6 +996,27 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, nvmet_req_complete(&cmd->req, status); } +static bool nvmet_rdma_recv_not_live(struct nvmet_rdma_queue *queue, + struct nvmet_rdma_rsp *rsp) +{ + unsigned long flags; + bool ret = true; + + spin_lock_irqsave(&queue->state_lock, flags); + /* + * recheck queue state is not live to prevent a race condition + * with RDMA_CM_EVENT_ESTABLISHED handler. + */ + if (queue->state == NVMET_RDMA_Q_LIVE) + ret = false; + else if (queue->state == NVMET_RDMA_Q_CONNECTING) + list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); + else + nvmet_rdma_put_rsp(rsp); + spin_unlock_irqrestore(&queue->state_lock, flags); + return ret; +} + static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_cmd *cmd = @@ -1038,17 +1059,9 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) rsp->n_rdma = 0; rsp->invalidate_rkey = 0; - if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { - unsigned long flags; - - spin_lock_irqsave(&queue->state_lock, flags); - if (queue->state == NVMET_RDMA_Q_CONNECTING) - list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); - else - nvmet_rdma_put_rsp(rsp); - spin_unlock_irqrestore(&queue->state_lock, flags); + if (unlikely(queue->state != NVMET_RDMA_Q_LIVE) && + nvmet_rdma_recv_not_live(queue, rsp)) return; - } nvmet_rdma_handle_command(queue, rsp); } diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 7c51c2a8c109a..4f9cac8a5abe0 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -571,10 +571,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) struct nvmet_tcp_cmd *cmd = container_of(req, struct nvmet_tcp_cmd, req); struct nvmet_tcp_queue *queue = cmd->queue; + enum nvmet_tcp_recv_state queue_state; + struct nvmet_tcp_cmd *queue_cmd; struct nvme_sgl_desc *sgl; u32 len; - if (unlikely(cmd == queue->cmd)) { + /* Pairs with store_release in nvmet_prepare_receive_pdu() */ + queue_state = smp_load_acquire(&queue->rcv_state); + queue_cmd = READ_ONCE(queue->cmd); + + if (unlikely(cmd == queue_cmd)) { sgl = &cmd->req.cmd->common.dptr.sgl; len = le32_to_cpu(sgl->length); @@ -583,7 +589,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) * Avoid using helpers, this might happen before * nvmet_req_init is completed. */ - if (queue->rcv_state == NVMET_TCP_RECV_PDU && + if (queue_state == NVMET_TCP_RECV_PDU && len && len <= cmd->req.port->inline_data_size && nvme_is_write(cmd->req.cmd)) return; @@ -847,8 +853,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) { queue->offset = 0; queue->left = sizeof(struct nvme_tcp_hdr); - queue->cmd = NULL; - queue->rcv_state = NVMET_TCP_RECV_PDU; + WRITE_ONCE(queue->cmd, NULL); + /* Ensure rcv_state is visible only after queue->cmd is set */ + smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); } static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c index b810df727b446..b4cf245fb2467 100644 --- a/drivers/nvmem/brcm_nvram.c +++ b/drivers/nvmem/brcm_nvram.c @@ -100,7 +100,7 @@ static int brcm_nvram_read_post_process_macaddr(void *context, const char *id, i { u8 mac[ETH_ALEN]; - if (bytes != 3 * ETH_ALEN - 1) + if (bytes != MAC_ADDR_STR_LEN) return -EINVAL; if (!mac_pton(buf, mac)) diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c index 731e6f4f12b2b..436426d4e8f91 100644 --- a/drivers/nvmem/layouts/u-boot-env.c +++ b/drivers/nvmem/layouts/u-boot-env.c @@ -37,7 +37,7 @@ static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, i { u8 mac[ETH_ALEN]; - if (bytes != 3 * ETH_ALEN - 1) + if (bytes != MAC_ADDR_STR_LEN) return -EINVAL; if (!mac_pton(buf, mac)) diff --git a/drivers/of/base.c b/drivers/of/base.c index af6c68bbb4277..e37b088f1fade 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -824,6 +824,33 @@ struct device_node *of_get_child_by_name(const struct device_node *node, } EXPORT_SYMBOL(of_get_child_by_name); +/** + * of_get_available_child_by_name - Find the available child node by name for a given parent + * @node: parent node + * @name: child name to look for. + * + * This function looks for child node for given matching name and checks the + * device's availability for use. + * + * Return: A node pointer if found, with refcount incremented, use + * of_node_put() on it when done. + * Returns NULL if node is not found. + */ +struct device_node *of_get_available_child_by_name(const struct device_node *node, + const char *name) +{ + struct device_node *child; + + child = of_get_child_by_name(node, name); + if (child && !of_device_is_available(child)) { + of_node_put(child); + return NULL; + } + + return child; +} +EXPORT_SYMBOL(of_get_available_child_by_name); + struct device_node *__of_find_node_by_path(const struct device_node *parent, const char *path) { diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 75e819f66a561..ee2e31522d7ef 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -415,12 +415,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam prop = of_get_flat_dt_prop(node, "alignment", &len); if (prop) { - if (len != dt_root_size_cells * sizeof(__be32)) { + if (len != dt_root_addr_cells * sizeof(__be32)) { pr_err("invalid alignment property in '%s' node.\n", uname); return -EINVAL; } - align = dt_mem_next_cell(dt_root_size_cells, &prop); + align = dt_mem_next_cell(dt_root_addr_cells, &prop); } nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c index 45004f598e4dc..e4c0a82d16d9e 100644 --- a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c +++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c @@ -325,7 +325,7 @@ to_fsl_samsung_hdmi_phy(struct clk_hw *hw) return container_of(hw, struct fsl_samsung_hdmi_phy, hw); } -static void +static int fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy, const struct phy_config *cfg) { @@ -341,6 +341,9 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy, break; } + if (unlikely(div == 4)) + return -EINVAL; + writeb(FIELD_PREP(REG12_CK_DIV_MASK, div), phy->regs + PHY_REG(12)); /* @@ -364,6 +367,8 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy, FIELD_PREP(REG14_RP_CODE_MASK, 2) | FIELD_PREP(REG14_TG_CODE_HIGH_MASK, fld_tg_code >> 8), phy->regs + PHY_REG(14)); + + return 0; } static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u16 *m, u8 *s) @@ -466,7 +471,11 @@ static int fsl_samsung_hdmi_phy_configure(struct fsl_samsung_hdmi_phy *phy, writeb(REG21_SEL_TX_CK_INV | FIELD_PREP(REG21_PMS_S_MASK, cfg->pll_div_regs[2] >> 4), phy->regs + PHY_REG(21)); - fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg); + ret = fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg); + if (ret) { + dev_err(phy->dev, "pixclock too large\n"); + return ret; + } writeb(REG33_FIX_DA | REG33_MODE_SET_DONE, phy->regs + PHY_REG(33)); diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig index 2f7a05f21dc59..dcb8e1628632e 100644 --- a/drivers/phy/rockchip/Kconfig +++ b/drivers/phy/rockchip/Kconfig @@ -125,6 +125,7 @@ config PHY_ROCKCHIP_USBDP depends on ARCH_ROCKCHIP && OF depends on TYPEC select GENERIC_PHY + select USB_COMMON help Enable this to support the Rockchip USB3.0/DP combo PHY with Samsung IP block. This is required for USB3 support on RK3588. diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c index a1532ef8bbe9d..8c3ce57f89151 100644 --- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c +++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c @@ -324,7 +324,10 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk"); - priv->phy_rst = devm_reset_control_get(dev, "phy"); + priv->phy_rst = devm_reset_control_get_exclusive(dev, "phy"); + /* fallback to old behaviour */ + if (PTR_ERR(priv->phy_rst) == -ENOENT) + priv->phy_rst = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(priv->phy_rst)) return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n"); diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index c421b495eb0fe..46b8f6987c62c 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -488,9 +488,9 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst) reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK; /* FSEL settings corresponding to reference clock */ - reg &= ~PHYCLKRST_FSEL_PIPE_MASK | - PHYCLKRST_MPLL_MULTIPLIER_MASK | - PHYCLKRST_SSC_REFCLKSEL_MASK; + reg &= ~(PHYCLKRST_FSEL_PIPE_MASK | + PHYCLKRST_MPLL_MULTIPLIER_MASK | + PHYCLKRST_SSC_REFCLKSEL_MASK); switch (phy_drd->extrefclk) { case EXYNOS5_FSEL_50MHZ: reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF | @@ -532,9 +532,9 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst) reg &= ~PHYCLKRST_REFCLKSEL_MASK; reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK; - reg &= ~PHYCLKRST_FSEL_UTMI_MASK | - PHYCLKRST_MPLL_MULTIPLIER_MASK | - PHYCLKRST_SSC_REFCLKSEL_MASK; + reg &= ~(PHYCLKRST_FSEL_UTMI_MASK | + PHYCLKRST_MPLL_MULTIPLIER_MASK | + PHYCLKRST_SSC_REFCLKSEL_MASK); reg |= PHYCLKRST_FSEL(phy_drd->extrefclk); return reg; @@ -1296,14 +1296,17 @@ static int exynos5_usbdrd_gs101_phy_exit(struct phy *phy) struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst); int ret; + if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) { + ret = exynos850_usbdrd_phy_exit(phy); + if (ret) + return ret; + } + + exynos5_usbdrd_phy_isol(inst, true); + if (inst->phy_cfg->id != EXYNOS5_DRDPHY_UTMI) return 0; - ret = exynos850_usbdrd_phy_exit(phy); - if (ret) - return ret; - - exynos5_usbdrd_phy_isol(inst, true); return regulator_bulk_disable(phy_drd->drv_data->n_regulators, phy_drd->regulators); } diff --git a/drivers/phy/st/phy-stm32-combophy.c b/drivers/phy/st/phy-stm32-combophy.c index 49e9fa90a6819..607b4d607eb5e 100644 --- a/drivers/phy/st/phy-stm32-combophy.c +++ b/drivers/phy/st/phy-stm32-combophy.c @@ -111,6 +111,7 @@ static const struct clk_impedance imp_lookup[] = { { 4204000, { 511000, 609000, 706000, 802000 } }, { 3999000, { 571000, 648000, 726000, 803000 } } }; +#define DEFAULT_IMP_INDEX 3 /* Default impedance is 50 Ohm */ static int stm32_impedance_tune(struct stm32_combophy *combophy) { @@ -119,10 +120,9 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy) u8 imp_of, vswing_of; u32 max_imp = imp_lookup[0].microohm; u32 min_imp = imp_lookup[imp_size - 1].microohm; - u32 max_vswing = imp_lookup[imp_size - 1].vswing[vswing_size - 1]; + u32 max_vswing; u32 min_vswing = imp_lookup[0].vswing[0]; u32 val; - u32 regval; if (!of_property_read_u32(combophy->dev->of_node, "st,output-micro-ohms", &val)) { if (val < min_imp || val > max_imp) { @@ -130,45 +130,43 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy) return -EINVAL; } - regval = 0; - for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++) { - if (imp_lookup[imp_of].microohm <= val) { - regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of); + for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++) + if (imp_lookup[imp_of].microohm <= val) break; - } - } + + if (WARN_ON(imp_of == ARRAY_SIZE(imp_lookup))) + return -EINVAL; dev_dbg(combophy->dev, "Set %u micro-ohms output impedance\n", imp_lookup[imp_of].microohm); regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, STM32MP25_PCIEPRG_IMPCTRL_OHM, - regval); - } else { - regmap_read(combophy->regmap, SYSCFG_PCIEPRGCR, &val); - imp_of = FIELD_GET(STM32MP25_PCIEPRG_IMPCTRL_OHM, val); - } + FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of)); + } else + imp_of = DEFAULT_IMP_INDEX; if (!of_property_read_u32(combophy->dev->of_node, "st,output-vswing-microvolt", &val)) { + max_vswing = imp_lookup[imp_of].vswing[vswing_size - 1]; + if (val < min_vswing || val > max_vswing) { dev_err(combophy->dev, "Invalid value %u for output vswing\n", val); return -EINVAL; } - regval = 0; - for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++) { - if (imp_lookup[imp_of].vswing[vswing_of] >= val) { - regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of); + for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++) + if (imp_lookup[imp_of].vswing[vswing_of] >= val) break; - } - } + + if (WARN_ON(vswing_of == ARRAY_SIZE(imp_lookup[imp_of].vswing))) + return -EINVAL; dev_dbg(combophy->dev, "Set %u microvolt swing\n", imp_lookup[imp_of].vswing[vswing_of]); regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR, STM32MP25_PCIEPRG_IMPCTRL_VSWING, - regval); + FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of)); } return 0; diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c index 0f60d5d1c1678..fae6242aa730e 100644 --- a/drivers/phy/tegra/xusb-tegra186.c +++ b/drivers/phy/tegra/xusb-tegra186.c @@ -928,6 +928,7 @@ static int tegra186_utmi_phy_init(struct phy *phy) unsigned int index = lane->index; struct device *dev = padctl->dev; int err; + u32 reg; port = tegra_xusb_find_usb2_port(padctl, index); if (!port) { @@ -935,6 +936,16 @@ static int tegra186_utmi_phy_init(struct phy *phy) return -ENODEV; } + if (port->mode == USB_DR_MODE_OTG || + port->mode == USB_DR_MODE_PERIPHERAL) { + /* reset VBUS&ID OVERRIDE */ + reg = padctl_readl(padctl, USB2_VBUS_ID); + reg &= ~VBUS_OVERRIDE; + reg &= ~ID_OVERRIDE(~0); + reg |= ID_OVERRIDE_FLOATING; + padctl_writel(padctl, reg, USB2_VBUS_ID); + } + if (port->supply && port->mode == USB_DR_MODE_HOST) { err = regulator_enable(port->supply); if (err) { diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index e0ca59ae31531..ff5d5e29629fa 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -424,6 +424,12 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv) return 0; } +static const struct regmap_config phy_gmii_sel_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + static int phy_gmii_sel_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -468,7 +474,14 @@ static int phy_gmii_sel_probe(struct platform_device *pdev) priv->regmap = syscon_node_to_regmap(node->parent); if (IS_ERR(priv->regmap)) { - priv->regmap = device_node_to_regmap(node); + void __iomem *base; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return dev_err_probe(dev, PTR_ERR(base), + "failed to get base memory resource\n"); + + priv->regmap = regmap_init_mmio(dev, base, &phy_gmii_sel_regmap_cfg); if (IS_ERR(priv->regmap)) return dev_err_probe(dev, PTR_ERR(priv->regmap), "Failed to get syscon\n"); diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c index 73dbf29c002f3..cf6efa9c0364a 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c @@ -974,7 +974,7 @@ static const struct regmap_config bcm281xx_pinctrl_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = BCM281XX_PIN_VC_CAM3_SDA, + .max_register = BCM281XX_PIN_VC_CAM3_SDA * 4, }; static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) diff --git a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c index 471f644c5eef2..d09a5e9b2eca5 100644 --- a/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c +++ b/drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c @@ -2374,6 +2374,9 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl) pctrl->gpio_bank[id].gc.parent = dev; pctrl->gpio_bank[id].gc.fwnode = child; pctrl->gpio_bank[id].gc.label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", child); + if (pctrl->gpio_bank[id].gc.label == NULL) + return -ENOMEM; + pctrl->gpio_bank[id].gc.dbg_show = npcmgpio_dbg_show; pctrl->gpio_bank[id].direction_input = pctrl->gpio_bank[id].gc.direction_input; pctrl->gpio_bank[id].gc.direction_input = npcmgpio_direction_input; diff --git a/drivers/pinctrl/spacemit/Kconfig b/drivers/pinctrl/spacemit/Kconfig index 168f8a5ffbb95..a2f98b3f8a755 100644 --- a/drivers/pinctrl/spacemit/Kconfig +++ b/drivers/pinctrl/spacemit/Kconfig @@ -4,9 +4,10 @@ # config PINCTRL_SPACEMIT_K1 - tristate "SpacemiT K1 SoC Pinctrl driver" + bool "SpacemiT K1 SoC Pinctrl driver" depends on ARCH_SPACEMIT || COMPILE_TEST depends on OF + default y select GENERIC_PINCTRL_GROUPS select GENERIC_PINMUX_FUNCTIONS select GENERIC_PINCONF diff --git a/drivers/pinctrl/spacemit/pinctrl-k1.c b/drivers/pinctrl/spacemit/pinctrl-k1.c index a32579d736130..59fd555ff38d4 100644 --- a/drivers/pinctrl/spacemit/pinctrl-k1.c +++ b/drivers/pinctrl/spacemit/pinctrl-k1.c @@ -1044,7 +1044,7 @@ static struct platform_driver k1_pinctrl_driver = { .of_match_table = k1_pinctrl_ids, }, }; -module_platform_driver(k1_pinctrl_driver); +builtin_platform_driver(k1_pinctrl_driver); MODULE_AUTHOR("Yixun Lan "); MODULE_DESCRIPTION("Pinctrl driver for the SpacemiT K1 SoC"); diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig index 49c383eb67854..13e37b49d9d01 100644 --- a/drivers/platform/cznic/Kconfig +++ b/drivers/platform/cznic/Kconfig @@ -6,6 +6,7 @@ menuconfig CZNIC_PLATFORMS bool "Platform support for CZ.NIC's Turris hardware" + depends on ARCH_MVEBU || COMPILE_TEST help Say Y here to be able to choose driver support for CZ.NIC's Turris devices. This option alone does not add any kernel code. diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c index d4f32ad665305..a594d5fcfcfd1 100644 --- a/drivers/platform/surface/surface_aggregator_registry.c +++ b/drivers/platform/surface/surface_aggregator_registry.c @@ -371,7 +371,7 @@ static const struct software_node *ssam_node_group_sp8[] = { NULL, }; -/* Devices for Surface Pro 9 (Intel/x86) and 10 */ +/* Devices for Surface Pro 9, 10 and 11 (Intel/x86) */ static const struct software_node *ssam_node_group_sp9[] = { &ssam_node_root, &ssam_node_hub_kip, @@ -430,6 +430,9 @@ static const struct acpi_device_id ssam_platform_hub_acpi_match[] = { /* Surface Pro 10 */ { "MSHW0510", (unsigned long)ssam_node_group_sp9 }, + /* Surface Pro 11 */ + { "MSHW0583", (unsigned long)ssam_node_group_sp9 }, + /* Surface Book 2 */ { "MSHW0107", (unsigned long)ssam_node_group_gen5 }, diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index 764cc1fe90ae4..a2cb2d5544f5b 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -452,6 +452,7 @@ static int amd_pmf_probe(struct platform_device *pdev) mutex_init(&dev->lock); mutex_init(&dev->update_mutex); + mutex_init(&dev->cb_mutex); apmf_acpi_init(dev); platform_set_drvdata(pdev, dev); @@ -477,6 +478,7 @@ static void amd_pmf_remove(struct platform_device *pdev) amd_pmf_dbgfs_unregister(dev); mutex_destroy(&dev->lock); mutex_destroy(&dev->update_mutex); + mutex_destroy(&dev->cb_mutex); kfree(dev->buf); } diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h index 41b2b91b8fdc6..e6bdee68ccf34 100644 --- a/drivers/platform/x86/amd/pmf/pmf.h +++ b/drivers/platform/x86/amd/pmf/pmf.h @@ -106,9 +106,12 @@ struct cookie_header { #define PMF_TA_IF_VERSION_MAJOR 1 #define TA_PMF_ACTION_MAX 32 #define TA_PMF_UNDO_MAX 8 -#define TA_OUTPUT_RESERVED_MEM 906 +#define TA_OUTPUT_RESERVED_MEM 922 #define MAX_OPERATION_PARAMS 4 +#define TA_ERROR_CRYPTO_INVALID_PARAM 0x20002 +#define TA_ERROR_CRYPTO_BIN_TOO_LARGE 0x2000d + #define PMF_IF_V1 1 #define PMF_IF_V2 2 diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c index f34f3130c3307..1d90f9382024b 100644 --- a/drivers/platform/x86/amd/pmf/spc.c +++ b/drivers/platform/x86/amd/pmf/spc.c @@ -219,12 +219,14 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_ switch (dev->current_profile) { case PLATFORM_PROFILE_PERFORMANCE: + case PLATFORM_PROFILE_BALANCED_PERFORMANCE: val = TA_BEST_PERFORMANCE; break; case PLATFORM_PROFILE_BALANCED: val = TA_BETTER_PERFORMANCE; break; case PLATFORM_PROFILE_LOW_POWER: + case PLATFORM_PROFILE_QUIET: val = TA_BEST_BATTERY; break; default: diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c index e6cf0b22dac33..d3083383f11fb 100644 --- a/drivers/platform/x86/amd/pmf/sps.c +++ b/drivers/platform/x86/amd/pmf/sps.c @@ -297,12 +297,14 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf) switch (pmf->current_profile) { case PLATFORM_PROFILE_PERFORMANCE: + case PLATFORM_PROFILE_BALANCED_PERFORMANCE: mode = POWER_MODE_PERFORMANCE; break; case PLATFORM_PROFILE_BALANCED: mode = POWER_MODE_BALANCED_POWER; break; case PLATFORM_PROFILE_LOW_POWER: + case PLATFORM_PROFILE_QUIET: mode = POWER_MODE_POWER_SAVER; break; default: @@ -387,6 +389,14 @@ static int amd_pmf_profile_set(struct device *dev, return 0; } +static int amd_pmf_hidden_choices(void *drvdata, unsigned long *choices) +{ + set_bit(PLATFORM_PROFILE_QUIET, choices); + set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices); + + return 0; +} + static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices) { set_bit(PLATFORM_PROFILE_LOW_POWER, choices); @@ -398,6 +408,7 @@ static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices) static const struct platform_profile_ops amd_pmf_profile_ops = { .probe = amd_pmf_profile_probe, + .hidden_choices = amd_pmf_hidden_choices, .profile_get = amd_pmf_profile_get, .profile_set = amd_pmf_profile_set, }; diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c index 8c88769ea1d87..a1e43873a07b0 100644 --- a/drivers/platform/x86/amd/pmf/tee-if.c +++ b/drivers/platform/x86/amd/pmf/tee-if.c @@ -27,8 +27,11 @@ module_param(pb_side_load, bool, 0444); MODULE_PARM_DESC(pb_side_load, "Sideload policy binaries debug policy failures"); #endif -static const uuid_t amd_pmf_ta_uuid = UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, - 0xb1, 0x2d, 0xc5, 0x29, 0xb1, 0x3d, 0x85, 0x43); +static const uuid_t amd_pmf_ta_uuid[] = { UUID_INIT(0xd9b39bf2, 0x66bd, 0x4154, 0xaf, 0xb8, 0x8a, + 0xcc, 0x2b, 0x2b, 0x60, 0xd6), + UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, 0xb1, 0x2d, 0xc5, + 0x29, 0xb1, 0x3d, 0x85, 0x43), + }; static const char *amd_pmf_uevent_as_str(unsigned int state) { @@ -321,9 +324,9 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev) */ schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3)); } else { - dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res); + dev_dbg(dev->dev, "ta invoke cmd init failed err: %x\n", res); dev->smart_pc_enabled = false; - return -EIO; + return res; } return 0; @@ -390,12 +393,12 @@ static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const voi return ver->impl_id == TEE_IMPL_ID_AMDTEE; } -static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id) +static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_t *uuid) { struct tee_ioctl_open_session_arg sess_arg = {}; int rc; - export_uuid(sess_arg.uuid, &amd_pmf_ta_uuid); + export_uuid(sess_arg.uuid, uuid); sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC; sess_arg.num_params = 0; @@ -434,7 +437,7 @@ static int amd_pmf_register_input_device(struct amd_pmf_dev *dev) return 0; } -static int amd_pmf_tee_init(struct amd_pmf_dev *dev) +static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid) { u32 size; int ret; @@ -445,7 +448,7 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev) return PTR_ERR(dev->tee_ctx); } - ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id); + ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid); if (ret) { dev_err(dev->dev, "Failed to open TA session (%d)\n", ret); ret = -EINVAL; @@ -489,7 +492,8 @@ static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev) int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev) { - int ret; + bool status; + int ret, i; ret = apmf_check_smart_pc(dev); if (ret) { @@ -502,26 +506,22 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev) return -ENODEV; } - ret = amd_pmf_tee_init(dev); - if (ret) - return ret; - INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd); ret = amd_pmf_set_dram_addr(dev, true); if (ret) - goto error; + goto err_cancel_work; dev->policy_base = devm_ioremap_resource(dev->dev, dev->res); if (IS_ERR(dev->policy_base)) { ret = PTR_ERR(dev->policy_base); - goto error; + goto err_free_dram_buf; } dev->policy_buf = kzalloc(dev->policy_sz, GFP_KERNEL); if (!dev->policy_buf) { ret = -ENOMEM; - goto error; + goto err_free_dram_buf; } memcpy_fromio(dev->policy_buf, dev->policy_base, dev->policy_sz); @@ -531,24 +531,60 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev) dev->prev_data = kzalloc(sizeof(*dev->prev_data), GFP_KERNEL); if (!dev->prev_data) { ret = -ENOMEM; - goto error; + goto err_free_policy; } - ret = amd_pmf_start_policy_engine(dev); - if (ret) - goto error; + for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) { + ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]); + if (ret) + goto err_free_prev_data; + + ret = amd_pmf_start_policy_engine(dev); + switch (ret) { + case TA_PMF_TYPE_SUCCESS: + status = true; + break; + case TA_ERROR_CRYPTO_INVALID_PARAM: + case TA_ERROR_CRYPTO_BIN_TOO_LARGE: + amd_pmf_tee_deinit(dev); + status = false; + break; + default: + ret = -EINVAL; + amd_pmf_tee_deinit(dev); + goto err_free_prev_data; + } + + if (status) + break; + } + + if (!status && !pb_side_load) { + ret = -EINVAL; + goto err_free_prev_data; + } if (pb_side_load) amd_pmf_open_pb(dev, dev->dbgfs_dir); ret = amd_pmf_register_input_device(dev); if (ret) - goto error; + goto err_pmf_remove_pb; return 0; -error: - amd_pmf_deinit_smart_pc(dev); +err_pmf_remove_pb: + if (pb_side_load && dev->esbin) + amd_pmf_remove_pb(dev); + amd_pmf_tee_deinit(dev); +err_free_prev_data: + kfree(dev->prev_data); +err_free_policy: + kfree(dev->policy_buf); +err_free_dram_buf: + kfree(dev->buf); +err_cancel_work: + cancel_delayed_work_sync(&dev->pb_work); return ret; } diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 927a2993f6160..88a1a9ff2f344 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -139,6 +139,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), }, }, + { + .ident = "Microsoft Surface Go 4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 8272f1dd0fbc0..db3c031d17572 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -404,6 +404,11 @@ static const struct intel_vsec_platform_info oobmsm_info = { .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI, }; +/* DMR OOBMSM info */ +static const struct intel_vsec_platform_info dmr_oobmsm_info = { + .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI, +}; + /* TGL info */ static const struct intel_vsec_platform_info tgl_info = { .caps = VSEC_CAP_TELEMETRY, @@ -420,6 +425,7 @@ static const struct intel_vsec_platform_info lnl_info = { #define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d #define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7 +#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR 0x09a1 #define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d #define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d #define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d @@ -430,6 +436,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) }, diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 72a10ed2017ce..1cc91173e0127 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -9972,6 +9972,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = { * Individual addressing is broken on models that expose the * primary battery as BAT1. */ + TPACPI_Q_LNV('G', '8', true), /* ThinkPad X131e */ TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */ TPACPI_Q_LNV('J', '7', true), /* B5400 */ TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */ diff --git a/drivers/pmdomain/amlogic/meson-secure-pwrc.c b/drivers/pmdomain/amlogic/meson-secure-pwrc.c index 42ce41a2fe3a0..ff76ea36835e5 100644 --- a/drivers/pmdomain/amlogic/meson-secure-pwrc.c +++ b/drivers/pmdomain/amlogic/meson-secure-pwrc.c @@ -221,7 +221,7 @@ static const struct meson_secure_pwrc_domain_desc t7_pwrc_domains[] = { SEC_PD(T7_VI_CLK2, 0), /* ETH is for ethernet online wakeup, and should be always on */ SEC_PD(T7_ETH, GENPD_FLAG_ALWAYS_ON), - SEC_PD(T7_ISP, 0), + TOP_PD(T7_ISP, 0, PWRC_T7_MIPI_ISP_ID), SEC_PD(T7_MIPI_ISP, 0), TOP_PD(T7_GDC, 0, PWRC_T7_NIC3_ID), TOP_PD(T7_DEWARP, 0, PWRC_T7_NIC3_ID), diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c index fa27195f074e7..3c3158f31a484 100644 --- a/drivers/power/supply/axp20x_battery.c +++ b/drivers/power/supply/axp20x_battery.c @@ -466,10 +466,9 @@ static int axp717_battery_get_prop(struct power_supply *psy, /* * If a fault is detected it must also be cleared; if the - * condition persists it should reappear (This is an - * assumption, it's actually not documented). A restart was - * not sufficient to clear the bit in testing despite the - * register listed as POR. + * condition persists it should reappear. A restart was not + * sufficient to clear the bit in testing despite the register + * listed as POR. */ case POWER_SUPPLY_PROP_HEALTH: ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_FAULT, @@ -480,26 +479,26 @@ static int axp717_battery_get_prop(struct power_supply *psy, switch (reg & AXP717_BATT_PMU_FAULT_MASK) { case AXP717_BATT_UVLO_2_5V: val->intval = POWER_SUPPLY_HEALTH_DEAD; - regmap_update_bits(axp20x_batt->regmap, - AXP717_PMU_FAULT, - AXP717_BATT_UVLO_2_5V, - AXP717_BATT_UVLO_2_5V); + regmap_write_bits(axp20x_batt->regmap, + AXP717_PMU_FAULT, + AXP717_BATT_UVLO_2_5V, + AXP717_BATT_UVLO_2_5V); return 0; case AXP717_BATT_OVER_TEMP: val->intval = POWER_SUPPLY_HEALTH_HOT; - regmap_update_bits(axp20x_batt->regmap, - AXP717_PMU_FAULT, - AXP717_BATT_OVER_TEMP, - AXP717_BATT_OVER_TEMP); + regmap_write_bits(axp20x_batt->regmap, + AXP717_PMU_FAULT, + AXP717_BATT_OVER_TEMP, + AXP717_BATT_OVER_TEMP); return 0; case AXP717_BATT_UNDER_TEMP: val->intval = POWER_SUPPLY_HEALTH_COLD; - regmap_update_bits(axp20x_batt->regmap, - AXP717_PMU_FAULT, - AXP717_BATT_UNDER_TEMP, - AXP717_BATT_UNDER_TEMP); + regmap_write_bits(axp20x_batt->regmap, + AXP717_PMU_FAULT, + AXP717_BATT_UNDER_TEMP, + AXP717_BATT_UNDER_TEMP); return 0; default: diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c index 652c1f213af1c..4f28ef1bba1a3 100644 --- a/drivers/power/supply/da9150-fg.c +++ b/drivers/power/supply/da9150-fg.c @@ -247,9 +247,9 @@ static int da9150_fg_current_avg(struct da9150_fg *fg, DA9150_QIF_SD_GAIN_SIZE); da9150_fg_read_sync_end(fg); - div = (u64) (sd_gain * shunt_val * 65536ULL); + div = 65536ULL * sd_gain * shunt_val; do_div(div, 1000000); - res = (u64) (iavg * 1000000ULL); + res = 1000000ULL * iavg; do_div(res, div); val->intval = (int) res; diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index d0bb52a7a0367..76c340b38015a 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -1592,11 +1592,11 @@ __power_supply_register(struct device *parent, if (rc) goto register_thermal_failed; - scoped_guard(rwsem_read, &psy->extensions_sem) { - rc = power_supply_create_triggers(psy); - if (rc) - goto create_triggers_failed; + rc = power_supply_create_triggers(psy); + if (rc) + goto create_triggers_failed; + scoped_guard(rwsem_read, &psy->extensions_sem) { rc = power_supply_add_hwmon_sysfs(psy); if (rc) goto add_hwmon_sysfs_failed; diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index bf6468c56419c..4380e6ddb8495 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -205,6 +205,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd, case PTP_EXTTS_REQUEST: case PTP_EXTTS_REQUEST2: + if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) { + err = -EACCES; + break; + } memset(&req, 0, sizeof(req)); if (copy_from_user(&req.extts, (void __user *)arg, @@ -246,6 +250,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd, case PTP_PEROUT_REQUEST: case PTP_PEROUT_REQUEST2: + if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) { + err = -EACCES; + break; + } memset(&req, 0, sizeof(req)); if (copy_from_user(&req.perout, (void __user *)arg, @@ -314,6 +322,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd, case PTP_ENABLE_PPS: case PTP_ENABLE_PPS2: + if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) { + err = -EACCES; + break; + } memset(&req, 0, sizeof(req)); if (!capable(CAP_SYS_TIME)) @@ -456,6 +468,10 @@ long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd, case PTP_PIN_SETFUNC: case PTP_PIN_SETFUNC2: + if ((pccontext->fp->f_mode & FMODE_WRITE) == 0) { + err = -EACCES; + break; + } if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { err = -EFAULT; break; diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index b651087f426f5..b25635c5c7453 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -2090,6 +2090,10 @@ ptp_ocp_signal_from_perout(struct ptp_ocp *bp, int gen, { struct ptp_ocp_signal s = { }; + if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) + return -EOPNOTSUPP; + s.polarity = bp->signal[gen].polarity; s.period = ktime_set(req->period.sec, req->period.nsec); if (!s.period) @@ -3959,9 +3963,6 @@ _signal_summary_show(struct seq_file *s, struct ptp_ocp *bp, int nr) bool on; u32 val; - if (!signal) - return; - on = signal->running; sprintf(label, "GEN%d", nr + 1); seq_printf(s, "%7s: %s, period:%llu duty:%d%% phase:%llu pol:%d", diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 27afbb9d544b7..cbf531d0ba688 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -1742,7 +1742,8 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, err = rio_add_net(net); if (err) { rmcd_debug(RDEV, "failed to register net, err=%d", err); - kfree(net); + put_device(&net->dev); + mport->net = NULL; goto cleanup; } } diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index fdcf742b2adbc..c12941f71e2cb 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -871,7 +871,10 @@ static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport, dev_set_name(&net->dev, "rnet_%d", net->id); net->dev.parent = &mport->dev; net->dev.release = rio_scan_release_dev; - rio_add_net(net); + if (rio_add_net(net)) { + put_device(&net->dev); + net = NULL; + } } return net; diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c index aa5464be7053b..6d3e75b33260e 100644 --- a/drivers/reset/reset-microchip-sparx5.c +++ b/drivers/reset/reset-microchip-sparx5.c @@ -8,6 +8,7 @@ */ #include #include +#include #include #include #include @@ -72,14 +73,22 @@ static struct regmap *mchp_lan966x_syscon_to_regmap(struct device *dev, struct device_node *syscon_np) { struct regmap_config regmap_config = mchp_lan966x_syscon_regmap_config; - resource_size_t size; + struct resource res; void __iomem *base; + int err; + + err = of_address_to_resource(syscon_np, 0, &res); + if (err) + return ERR_PTR(err); - base = devm_of_iomap(dev, syscon_np, 0, &size); - if (IS_ERR(base)) - return ERR_CAST(base); + /* It is not possible to use devm_of_iomap because this resource is + * shared with other drivers. + */ + base = devm_ioremap(dev, res.start, resource_size(&res)); + if (!base) + return ERR_PTR(-ENOMEM); - regmap_config.max_register = size - 4; + regmap_config.max_register = resource_size(&res) - 4; return devm_regmap_init_mmio(dev, base, ®map_config); } diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index c61e6427384c3..9eb9e3c49f815 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig @@ -2,15 +2,6 @@ menu "S/390 network device drivers" depends on NETDEVICES && S390 -config LCS - def_tristate m - prompt "Lan Channel Station Interface" - depends on CCW && NETDEVICES && ETHERNET - help - Select this option if you want to use LCS networking on IBM System z. - To compile as a module, choose M. The module name is lcs. - If you do not use LCS, choose N. - config CTCM def_tristate m prompt "CTC and MPC SNA device support" @@ -98,7 +89,7 @@ config QETH_OSX config CCWGROUP tristate - default (LCS || CTCM || QETH || SMC) + default (CTCM || QETH || SMC) config ISM tristate "Support for ISM vPCI Adapter" diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index bc55ec316adb1..b5aaba2901274 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_CTCM) += ctcm.o fsm.o obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o obj-$(CONFIG_SMSGIUCV) += smsgiucv.o obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o -obj-$(CONFIG_LCS) += lcs.o qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_ethtool.o obj-$(CONFIG_QETH) += qeth.o qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c deleted file mode 100644 index 88db8378325a3..0000000000000 --- a/drivers/s390/net/lcs.c +++ /dev/null @@ -1,2385 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Linux for S/390 LAN channel station device driver - * - * Copyright IBM Corp. 1999, 2009 - * Author(s): Original Code written by - * DJ Barrow - * Rewritten by - * Frank Pavlic and - * Martin Schwidefsky - */ - -#define KMSG_COMPONENT "lcs" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "lcs.h" - - -/* - * initialization string for output - */ - -static char version[] __initdata = "LCS driver"; - -/* - * the root device for lcs group devices - */ -static struct device *lcs_root_dev; - -/* - * Some prototypes. - */ -static void lcs_tasklet(unsigned long); -static void lcs_start_kernel_thread(struct work_struct *); -static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); -#ifdef CONFIG_IP_MULTICAST -static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); -#endif /* CONFIG_IP_MULTICAST */ -static int lcs_recovery(void *ptr); - -/* - * Debug Facility Stuff - */ -static char debug_buffer[255]; -static debug_info_t *lcs_dbf_setup; -static debug_info_t *lcs_dbf_trace; - -/* - * LCS Debug Facility functions - */ -static void -lcs_unregister_debug_facility(void) -{ - debug_unregister(lcs_dbf_setup); - debug_unregister(lcs_dbf_trace); -} - -static int -lcs_register_debug_facility(void) -{ - lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); - lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); - if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { - pr_err("Not enough memory for debug facility.\n"); - lcs_unregister_debug_facility(); - return -ENOMEM; - } - debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view); - debug_set_level(lcs_dbf_setup, 2); - debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view); - debug_set_level(lcs_dbf_trace, 2); - return 0; -} - -/* - * Allocate io buffers. - */ -static int -lcs_alloc_channel(struct lcs_channel *channel) -{ - int cnt; - - LCS_DBF_TEXT(2, setup, "ichalloc"); - for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { - /* alloc memory fo iobuffer */ - channel->iob[cnt].data = - kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); - if (channel->iob[cnt].data == NULL) - break; - channel->iob[cnt].state = LCS_BUF_STATE_EMPTY; - } - if (cnt < LCS_NUM_BUFFS) { - /* Not all io buffers could be allocated. */ - LCS_DBF_TEXT(2, setup, "echalloc"); - while (cnt-- > 0) - kfree(channel->iob[cnt].data); - return -ENOMEM; - } - return 0; -} - -/* - * Free io buffers. - */ -static void -lcs_free_channel(struct lcs_channel *channel) -{ - int cnt; - - LCS_DBF_TEXT(2, setup, "ichfree"); - for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { - kfree(channel->iob[cnt].data); - channel->iob[cnt].data = NULL; - } -} - -/* - * Cleanup channel. - */ -static void -lcs_cleanup_channel(struct lcs_channel *channel) -{ - LCS_DBF_TEXT(3, setup, "cleanch"); - /* Kill write channel tasklets. */ - tasklet_kill(&channel->irq_tasklet); - /* Free channel buffers. */ - lcs_free_channel(channel); -} - -/* - * LCS free memory for card and channels. - */ -static void -lcs_free_card(struct lcs_card *card) -{ - LCS_DBF_TEXT(2, setup, "remcard"); - LCS_DBF_HEX(2, setup, &card, sizeof(void*)); - kfree(card); -} - -/* - * LCS alloc memory for card and channels - */ -static struct lcs_card * -lcs_alloc_card(void) -{ - struct lcs_card *card; - int rc; - - LCS_DBF_TEXT(2, setup, "alloclcs"); - - card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA); - if (card == NULL) - return NULL; - card->lan_type = LCS_FRAME_TYPE_AUTO; - card->pkt_seq = 0; - card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT; - /* Allocate io buffers for the read channel. */ - rc = lcs_alloc_channel(&card->read); - if (rc){ - LCS_DBF_TEXT(2, setup, "iccwerr"); - lcs_free_card(card); - return NULL; - } - /* Allocate io buffers for the write channel. */ - rc = lcs_alloc_channel(&card->write); - if (rc) { - LCS_DBF_TEXT(2, setup, "iccwerr"); - lcs_cleanup_channel(&card->read); - lcs_free_card(card); - return NULL; - } - -#ifdef CONFIG_IP_MULTICAST - INIT_LIST_HEAD(&card->ipm_list); -#endif - LCS_DBF_HEX(2, setup, &card, sizeof(void*)); - return card; -} - -/* - * Setup read channel. - */ -static void -lcs_setup_read_ccws(struct lcs_card *card) -{ - int cnt; - - LCS_DBF_TEXT(2, setup, "ireadccw"); - /* Setup read ccws. */ - memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1)); - for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { - card->read.ccws[cnt].cmd_code = LCS_CCW_READ; - card->read.ccws[cnt].count = LCS_IOBUFFERSIZE; - card->read.ccws[cnt].flags = - CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI; - /* - * Note: we have allocated the buffer with GFP_DMA, so - * we do not need to do set_normalized_cda. - */ - card->read.ccws[cnt].cda = - virt_to_dma32(card->read.iob[cnt].data); - ((struct lcs_header *) - card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET; - card->read.iob[cnt].callback = lcs_get_frames_cb; - card->read.iob[cnt].state = LCS_BUF_STATE_READY; - card->read.iob[cnt].count = LCS_IOBUFFERSIZE; - } - card->read.ccws[0].flags &= ~CCW_FLAG_PCI; - card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI; - card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND; - /* Last ccw is a tic (transfer in channel). */ - card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; - card->read.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->read.ccws); - /* Setg initial state of the read channel. */ - card->read.state = LCS_CH_STATE_INIT; - - card->read.io_idx = 0; - card->read.buf_idx = 0; -} - -static void -lcs_setup_read(struct lcs_card *card) -{ - LCS_DBF_TEXT(3, setup, "initread"); - - lcs_setup_read_ccws(card); - /* Initialize read channel tasklet. */ - card->read.irq_tasklet.data = (unsigned long) &card->read; - card->read.irq_tasklet.func = lcs_tasklet; - /* Initialize waitqueue. */ - init_waitqueue_head(&card->read.wait_q); -} - -/* - * Setup write channel. - */ -static void -lcs_setup_write_ccws(struct lcs_card *card) -{ - int cnt; - - LCS_DBF_TEXT(3, setup, "iwritccw"); - /* Setup write ccws. */ - memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1)); - for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { - card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE; - card->write.ccws[cnt].count = 0; - card->write.ccws[cnt].flags = - CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI; - /* - * Note: we have allocated the buffer with GFP_DMA, so - * we do not need to do set_normalized_cda. - */ - card->write.ccws[cnt].cda = - virt_to_dma32(card->write.iob[cnt].data); - } - /* Last ccw is a tic (transfer in channel). */ - card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; - card->write.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->write.ccws); - /* Set initial state of the write channel. */ - card->read.state = LCS_CH_STATE_INIT; - - card->write.io_idx = 0; - card->write.buf_idx = 0; -} - -static void -lcs_setup_write(struct lcs_card *card) -{ - LCS_DBF_TEXT(3, setup, "initwrit"); - - lcs_setup_write_ccws(card); - /* Initialize write channel tasklet. */ - card->write.irq_tasklet.data = (unsigned long) &card->write; - card->write.irq_tasklet.func = lcs_tasklet; - /* Initialize waitqueue. */ - init_waitqueue_head(&card->write.wait_q); -} - -static void -lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads) -{ - unsigned long flags; - - spin_lock_irqsave(&card->mask_lock, flags); - card->thread_allowed_mask = threads; - spin_unlock_irqrestore(&card->mask_lock, flags); - wake_up(&card->wait_q); -} -static int lcs_threads_running(struct lcs_card *card, unsigned long threads) -{ - unsigned long flags; - int rc = 0; - - spin_lock_irqsave(&card->mask_lock, flags); - rc = (card->thread_running_mask & threads); - spin_unlock_irqrestore(&card->mask_lock, flags); - return rc; -} - -static int -lcs_wait_for_threads(struct lcs_card *card, unsigned long threads) -{ - return wait_event_interruptible(card->wait_q, - lcs_threads_running(card, threads) == 0); -} - -static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) -{ - unsigned long flags; - - spin_lock_irqsave(&card->mask_lock, flags); - if ( !(card->thread_allowed_mask & thread) || - (card->thread_start_mask & thread) ) { - spin_unlock_irqrestore(&card->mask_lock, flags); - return -EPERM; - } - card->thread_start_mask |= thread; - spin_unlock_irqrestore(&card->mask_lock, flags); - return 0; -} - -static void -lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread) -{ - unsigned long flags; - - spin_lock_irqsave(&card->mask_lock, flags); - card->thread_running_mask &= ~thread; - spin_unlock_irqrestore(&card->mask_lock, flags); - wake_up(&card->wait_q); -} - -static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread) -{ - unsigned long flags; - int rc = 0; - - spin_lock_irqsave(&card->mask_lock, flags); - if (card->thread_start_mask & thread){ - if ((card->thread_allowed_mask & thread) && - !(card->thread_running_mask & thread)){ - rc = 1; - card->thread_start_mask &= ~thread; - card->thread_running_mask |= thread; - } else - rc = -EPERM; - } - spin_unlock_irqrestore(&card->mask_lock, flags); - return rc; -} - -static int -lcs_do_run_thread(struct lcs_card *card, unsigned long thread) -{ - int rc = 0; - wait_event(card->wait_q, - (rc = __lcs_do_run_thread(card, thread)) >= 0); - return rc; -} - -static int -lcs_do_start_thread(struct lcs_card *card, unsigned long thread) -{ - unsigned long flags; - int rc = 0; - - spin_lock_irqsave(&card->mask_lock, flags); - LCS_DBF_TEXT_(4, trace, " %02x%02x%02x", - (u8) card->thread_start_mask, - (u8) card->thread_allowed_mask, - (u8) card->thread_running_mask); - rc = (card->thread_start_mask & thread); - spin_unlock_irqrestore(&card->mask_lock, flags); - return rc; -} - -/* - * Initialize channels,card and state machines. - */ -static void -lcs_setup_card(struct lcs_card *card) -{ - LCS_DBF_TEXT(2, setup, "initcard"); - LCS_DBF_HEX(2, setup, &card, sizeof(void*)); - - lcs_setup_read(card); - lcs_setup_write(card); - /* Set cards initial state. */ - card->state = DEV_STATE_DOWN; - card->tx_buffer = NULL; - card->tx_emitted = 0; - - init_waitqueue_head(&card->wait_q); - spin_lock_init(&card->lock); - spin_lock_init(&card->ipm_lock); - spin_lock_init(&card->mask_lock); -#ifdef CONFIG_IP_MULTICAST - INIT_LIST_HEAD(&card->ipm_list); -#endif - INIT_LIST_HEAD(&card->lancmd_waiters); -} - -static void lcs_clear_multicast_list(struct lcs_card *card) -{ -#ifdef CONFIG_IP_MULTICAST - struct lcs_ipm_list *ipm; - unsigned long flags; - - /* Free multicast list. */ - LCS_DBF_TEXT(3, setup, "clmclist"); - spin_lock_irqsave(&card->ipm_lock, flags); - while (!list_empty(&card->ipm_list)){ - ipm = list_entry(card->ipm_list.next, - struct lcs_ipm_list, list); - list_del(&ipm->list); - if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){ - spin_unlock_irqrestore(&card->ipm_lock, flags); - lcs_send_delipm(card, ipm); - spin_lock_irqsave(&card->ipm_lock, flags); - } - kfree(ipm); - } - spin_unlock_irqrestore(&card->ipm_lock, flags); -#endif -} - -/* - * Cleanup channels,card and state machines. - */ -static void -lcs_cleanup_card(struct lcs_card *card) -{ - - LCS_DBF_TEXT(3, setup, "cleancrd"); - LCS_DBF_HEX(2,setup,&card,sizeof(void*)); - - if (card->dev != NULL) - free_netdev(card->dev); - /* Cleanup channels. */ - lcs_cleanup_channel(&card->write); - lcs_cleanup_channel(&card->read); -} - -/* - * Start channel. - */ -static int -lcs_start_channel(struct lcs_channel *channel) -{ - unsigned long flags; - int rc; - - LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev)); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start(channel->ccwdev, - channel->ccws + channel->io_idx, 0, 0, - DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND); - if (rc == 0) - channel->state = LCS_CH_STATE_RUNNING; - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - if (rc) { - LCS_DBF_TEXT_(4,trace,"essh%s", - dev_name(&channel->ccwdev->dev)); - dev_err(&channel->ccwdev->dev, - "Starting an LCS device resulted in an error," - " rc=%d!\n", rc); - } - return rc; -} - -static int -lcs_clear_channel(struct lcs_channel *channel) -{ - unsigned long flags; - int rc; - - LCS_DBF_TEXT(4,trace,"clearch"); - LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_clear(channel->ccwdev, 0); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - if (rc) { - LCS_DBF_TEXT_(4, trace, "ecsc%s", - dev_name(&channel->ccwdev->dev)); - return rc; - } - wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED)); - channel->state = LCS_CH_STATE_STOPPED; - return rc; -} - - -/* - * Stop channel. - */ -static int -lcs_stop_channel(struct lcs_channel *channel) -{ - unsigned long flags; - int rc; - - if (channel->state == LCS_CH_STATE_STOPPED) - return 0; - LCS_DBF_TEXT(4,trace,"haltsch"); - LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); - channel->state = LCS_CH_STATE_INIT; - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_halt(channel->ccwdev, 0); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - if (rc) { - LCS_DBF_TEXT_(4, trace, "ehsc%s", - dev_name(&channel->ccwdev->dev)); - return rc; - } - /* Asynchronous halt initialted. Wait for its completion. */ - wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED)); - lcs_clear_channel(channel); - return 0; -} - -/* - * start read and write channel - */ -static int -lcs_start_channels(struct lcs_card *card) -{ - int rc; - - LCS_DBF_TEXT(2, trace, "chstart"); - /* start read channel */ - rc = lcs_start_channel(&card->read); - if (rc) - return rc; - /* start write channel */ - rc = lcs_start_channel(&card->write); - if (rc) - lcs_stop_channel(&card->read); - return rc; -} - -/* - * stop read and write channel - */ -static int -lcs_stop_channels(struct lcs_card *card) -{ - LCS_DBF_TEXT(2, trace, "chhalt"); - lcs_stop_channel(&card->read); - lcs_stop_channel(&card->write); - return 0; -} - -/* - * Get empty buffer. - */ -static struct lcs_buffer * -__lcs_get_buffer(struct lcs_channel *channel) -{ - int index; - - LCS_DBF_TEXT(5, trace, "_getbuff"); - index = channel->io_idx; - do { - if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) { - channel->iob[index].state = LCS_BUF_STATE_LOCKED; - return channel->iob + index; - } - index = (index + 1) & (LCS_NUM_BUFFS - 1); - } while (index != channel->io_idx); - return NULL; -} - -static struct lcs_buffer * -lcs_get_buffer(struct lcs_channel *channel) -{ - struct lcs_buffer *buffer; - unsigned long flags; - - LCS_DBF_TEXT(5, trace, "getbuff"); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - buffer = __lcs_get_buffer(channel); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - return buffer; -} - -/* - * Resume channel program if the channel is suspended. - */ -static int -__lcs_resume_channel(struct lcs_channel *channel) -{ - int rc; - - if (channel->state != LCS_CH_STATE_SUSPENDED) - return 0; - if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND) - return 0; - LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev)); - rc = ccw_device_resume(channel->ccwdev); - if (rc) { - LCS_DBF_TEXT_(4, trace, "ersc%s", - dev_name(&channel->ccwdev->dev)); - dev_err(&channel->ccwdev->dev, - "Sending data from the LCS device to the LAN failed" - " with rc=%d\n",rc); - } else - channel->state = LCS_CH_STATE_RUNNING; - return rc; - -} - -/* - * Make a buffer ready for processing. - */ -static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index) -{ - int prev, next; - - LCS_DBF_TEXT(5, trace, "rdybits"); - prev = (index - 1) & (LCS_NUM_BUFFS - 1); - next = (index + 1) & (LCS_NUM_BUFFS - 1); - /* Check if we may clear the suspend bit of this buffer. */ - if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) { - /* Check if we have to set the PCI bit. */ - if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND)) - /* Suspend bit of the previous buffer is not set. */ - channel->ccws[index].flags |= CCW_FLAG_PCI; - /* Suspend bit of the next buffer is set. */ - channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND; - } -} - -static int -lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) -{ - unsigned long flags; - int index, rc; - - LCS_DBF_TEXT(5, trace, "rdybuff"); - BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED && - buffer->state != LCS_BUF_STATE_PROCESSED); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - buffer->state = LCS_BUF_STATE_READY; - index = buffer - channel->iob; - /* Set length. */ - channel->ccws[index].count = buffer->count; - /* Check relevant PCI/suspend bits. */ - __lcs_ready_buffer_bits(channel, index); - rc = __lcs_resume_channel(channel); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - return rc; -} - -/* - * Mark the buffer as processed. Take care of the suspend bit - * of the previous buffer. This function is called from - * interrupt context, so the lock must not be taken. - */ -static int -__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) -{ - int index, prev, next; - - LCS_DBF_TEXT(5, trace, "prcsbuff"); - BUG_ON(buffer->state != LCS_BUF_STATE_READY); - buffer->state = LCS_BUF_STATE_PROCESSED; - index = buffer - channel->iob; - prev = (index - 1) & (LCS_NUM_BUFFS - 1); - next = (index + 1) & (LCS_NUM_BUFFS - 1); - /* Set the suspend bit and clear the PCI bit of this buffer. */ - channel->ccws[index].flags |= CCW_FLAG_SUSPEND; - channel->ccws[index].flags &= ~CCW_FLAG_PCI; - /* Check the suspend bit of the previous buffer. */ - if (channel->iob[prev].state == LCS_BUF_STATE_READY) { - /* - * Previous buffer is in state ready. It might have - * happened in lcs_ready_buffer that the suspend bit - * has not been cleared to avoid an endless loop. - * Do it now. - */ - __lcs_ready_buffer_bits(channel, prev); - } - /* Clear PCI bit of next buffer. */ - channel->ccws[next].flags &= ~CCW_FLAG_PCI; - return __lcs_resume_channel(channel); -} - -/* - * Put a processed buffer back to state empty. - */ -static void -lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) -{ - unsigned long flags; - - LCS_DBF_TEXT(5, trace, "relbuff"); - BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED && - buffer->state != LCS_BUF_STATE_PROCESSED); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - buffer->state = LCS_BUF_STATE_EMPTY; - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); -} - -/* - * Get buffer for a lan command. - */ -static struct lcs_buffer * -lcs_get_lancmd(struct lcs_card *card, int count) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(4, trace, "getlncmd"); - /* Get buffer and wait if none is available. */ - wait_event(card->write.wait_q, - ((buffer = lcs_get_buffer(&card->write)) != NULL)); - count += sizeof(struct lcs_header); - *(__u16 *)(buffer->data + count) = 0; - buffer->count = count + sizeof(__u16); - buffer->callback = lcs_release_buffer; - cmd = (struct lcs_cmd *) buffer->data; - cmd->offset = count; - cmd->type = LCS_FRAME_TYPE_CONTROL; - cmd->slot = 0; - return buffer; -} - - -static void -lcs_get_reply(struct lcs_reply *reply) -{ - refcount_inc(&reply->refcnt); -} - -static void -lcs_put_reply(struct lcs_reply *reply) -{ - if (refcount_dec_and_test(&reply->refcnt)) - kfree(reply); -} - -static struct lcs_reply * -lcs_alloc_reply(struct lcs_cmd *cmd) -{ - struct lcs_reply *reply; - - LCS_DBF_TEXT(4, trace, "getreply"); - - reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC); - if (!reply) - return NULL; - refcount_set(&reply->refcnt, 1); - reply->sequence_no = cmd->sequence_no; - reply->received = 0; - reply->rc = 0; - init_waitqueue_head(&reply->wait_q); - - return reply; -} - -/* - * Notifier function for lancmd replies. Called from read irq. - */ -static void -lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) -{ - struct list_head *l, *n; - struct lcs_reply *reply; - - LCS_DBF_TEXT(4, trace, "notiwait"); - spin_lock(&card->lock); - list_for_each_safe(l, n, &card->lancmd_waiters) { - reply = list_entry(l, struct lcs_reply, list); - if (reply->sequence_no == cmd->sequence_no) { - lcs_get_reply(reply); - list_del_init(&reply->list); - if (reply->callback != NULL) - reply->callback(card, cmd); - reply->received = 1; - reply->rc = cmd->return_code; - wake_up(&reply->wait_q); - lcs_put_reply(reply); - break; - } - } - spin_unlock(&card->lock); -} - -/* - * Emit buffer of a lan command. - */ -static void -lcs_lancmd_timeout(struct timer_list *t) -{ - struct lcs_reply *reply = from_timer(reply, t, timer); - struct lcs_reply *list_reply, *r; - unsigned long flags; - - LCS_DBF_TEXT(4, trace, "timeout"); - spin_lock_irqsave(&reply->card->lock, flags); - list_for_each_entry_safe(list_reply, r, - &reply->card->lancmd_waiters,list) { - if (reply == list_reply) { - lcs_get_reply(reply); - list_del_init(&reply->list); - spin_unlock_irqrestore(&reply->card->lock, flags); - reply->received = 1; - reply->rc = -ETIME; - wake_up(&reply->wait_q); - lcs_put_reply(reply); - return; - } - } - spin_unlock_irqrestore(&reply->card->lock, flags); -} - -static int -lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, - void (*reply_callback)(struct lcs_card *, struct lcs_cmd *)) -{ - struct lcs_reply *reply; - struct lcs_cmd *cmd; - unsigned long flags; - int rc; - - LCS_DBF_TEXT(4, trace, "sendcmd"); - cmd = (struct lcs_cmd *) buffer->data; - cmd->return_code = 0; - cmd->sequence_no = card->sequence_no++; - reply = lcs_alloc_reply(cmd); - if (!reply) - return -ENOMEM; - reply->callback = reply_callback; - reply->card = card; - spin_lock_irqsave(&card->lock, flags); - list_add_tail(&reply->list, &card->lancmd_waiters); - spin_unlock_irqrestore(&card->lock, flags); - - buffer->callback = lcs_release_buffer; - rc = lcs_ready_buffer(&card->write, buffer); - if (rc) - return rc; - timer_setup(&reply->timer, lcs_lancmd_timeout, 0); - mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout); - wait_event(reply->wait_q, reply->received); - del_timer_sync(&reply->timer); - LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); - rc = reply->rc; - lcs_put_reply(reply); - return rc ? -EIO : 0; -} - -/* - * LCS startup command - */ -static int -lcs_send_startup(struct lcs_card *card, __u8 initiator) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "startup"); - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_STARTUP; - cmd->initiator = initiator; - cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE; - return lcs_send_lancmd(card, buffer, NULL); -} - -/* - * LCS shutdown command - */ -static int -lcs_send_shutdown(struct lcs_card *card) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "shutdown"); - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_SHUTDOWN; - cmd->initiator = LCS_INITIATOR_TCPIP; - return lcs_send_lancmd(card, buffer, NULL); -} - -/* - * LCS lanstat command - */ -static void -__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd) -{ - LCS_DBF_TEXT(2, trace, "statcb"); - memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH); -} - -static int -lcs_send_lanstat(struct lcs_card *card) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2,trace, "cmdstat"); - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - /* Setup lanstat command. */ - cmd->cmd_code = LCS_CMD_LANSTAT; - cmd->initiator = LCS_INITIATOR_TCPIP; - cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; - cmd->cmd.lcs_std_cmd.portno = card->portno; - return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb); -} - -/* - * send stoplan command - */ -static int -lcs_send_stoplan(struct lcs_card *card, __u8 initiator) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "cmdstpln"); - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_STOPLAN; - cmd->initiator = initiator; - cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; - cmd->cmd.lcs_std_cmd.portno = card->portno; - return lcs_send_lancmd(card, buffer, NULL); -} - -/* - * send startlan command - */ -static void -__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd) -{ - LCS_DBF_TEXT(2, trace, "srtlancb"); - card->lan_type = cmd->cmd.lcs_std_cmd.lan_type; - card->portno = cmd->cmd.lcs_std_cmd.portno; -} - -static int -lcs_send_startlan(struct lcs_card *card, __u8 initiator) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "cmdstaln"); - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_STARTLAN; - cmd->initiator = initiator; - cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; - cmd->cmd.lcs_std_cmd.portno = card->portno; - return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb); -} - -#ifdef CONFIG_IP_MULTICAST -/* - * send setipm command (Multicast) - */ -static int -lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "cmdsetim"); - buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_SETIPM; - cmd->initiator = LCS_INITIATOR_TCPIP; - cmd->cmd.lcs_qipassist.lan_type = card->lan_type; - cmd->cmd.lcs_qipassist.portno = card->portno; - cmd->cmd.lcs_qipassist.version = 4; - cmd->cmd.lcs_qipassist.num_ip_pairs = 1; - memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, - &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); - LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); - return lcs_send_lancmd(card, buffer, NULL); -} - -/* - * send delipm command (Multicast) - */ -static int -lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - - LCS_DBF_TEXT(2, trace, "cmddelim"); - buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_DELIPM; - cmd->initiator = LCS_INITIATOR_TCPIP; - cmd->cmd.lcs_qipassist.lan_type = card->lan_type; - cmd->cmd.lcs_qipassist.portno = card->portno; - cmd->cmd.lcs_qipassist.version = 4; - cmd->cmd.lcs_qipassist.num_ip_pairs = 1; - memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, - &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); - LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); - return lcs_send_lancmd(card, buffer, NULL); -} - -/* - * check if multicast is supported by LCS - */ -static void -__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd) -{ - LCS_DBF_TEXT(2, trace, "chkmccb"); - card->ip_assists_supported = - cmd->cmd.lcs_qipassist.ip_assists_supported; - card->ip_assists_enabled = - cmd->cmd.lcs_qipassist.ip_assists_enabled; -} - -static int -lcs_check_multicast_support(struct lcs_card *card) -{ - struct lcs_buffer *buffer; - struct lcs_cmd *cmd; - int rc; - - LCS_DBF_TEXT(2, trace, "cmdqipa"); - /* Send query ipassist. */ - buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); - cmd = (struct lcs_cmd *) buffer->data; - cmd->cmd_code = LCS_CMD_QIPASSIST; - cmd->initiator = LCS_INITIATOR_TCPIP; - cmd->cmd.lcs_qipassist.lan_type = card->lan_type; - cmd->cmd.lcs_qipassist.portno = card->portno; - cmd->cmd.lcs_qipassist.version = 4; - cmd->cmd.lcs_qipassist.num_ip_pairs = 1; - rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); - if (rc != 0) { - pr_err("Query IPAssist failed. Assuming unsupported!\n"); - return -EOPNOTSUPP; - } - if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) - return 0; - return -EOPNOTSUPP; -} - -/* - * set or del multicast address on LCS card - */ -static void -lcs_fix_multicast_list(struct lcs_card *card) -{ - struct list_head failed_list; - struct lcs_ipm_list *ipm, *tmp; - unsigned long flags; - int rc; - - LCS_DBF_TEXT(4,trace, "fixipm"); - INIT_LIST_HEAD(&failed_list); - spin_lock_irqsave(&card->ipm_lock, flags); -list_modified: - list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){ - switch (ipm->ipm_state) { - case LCS_IPM_STATE_SET_REQUIRED: - /* del from ipm_list so no one else can tamper with - * this entry */ - list_del_init(&ipm->list); - spin_unlock_irqrestore(&card->ipm_lock, flags); - rc = lcs_send_setipm(card, ipm); - spin_lock_irqsave(&card->ipm_lock, flags); - if (rc) { - pr_info("Adding multicast address failed." - " Table possibly full!\n"); - /* store ipm in failed list -> will be added - * to ipm_list again, so a retry will be done - * during the next call of this function */ - list_add_tail(&ipm->list, &failed_list); - } else { - ipm->ipm_state = LCS_IPM_STATE_ON_CARD; - /* re-insert into ipm_list */ - list_add_tail(&ipm->list, &card->ipm_list); - } - goto list_modified; - case LCS_IPM_STATE_DEL_REQUIRED: - list_del(&ipm->list); - spin_unlock_irqrestore(&card->ipm_lock, flags); - lcs_send_delipm(card, ipm); - spin_lock_irqsave(&card->ipm_lock, flags); - kfree(ipm); - goto list_modified; - case LCS_IPM_STATE_ON_CARD: - break; - } - } - /* re-insert all entries from the failed_list into ipm_list */ - list_for_each_entry_safe(ipm, tmp, &failed_list, list) - list_move_tail(&ipm->list, &card->ipm_list); - - spin_unlock_irqrestore(&card->ipm_lock, flags); -} - -/* - * get mac address for the relevant Multicast address - */ -static void -lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) -{ - LCS_DBF_TEXT(4,trace, "getmac"); - ip_eth_mc_map(ipm, mac); -} - -/* - * function called by net device to handle multicast address relevant things - */ -static void lcs_remove_mc_addresses(struct lcs_card *card, - struct in_device *in4_dev) -{ - struct ip_mc_list *im4; - struct list_head *l; - struct lcs_ipm_list *ipm; - unsigned long flags; - char buf[MAX_ADDR_LEN]; - - LCS_DBF_TEXT(4, trace, "remmclst"); - spin_lock_irqsave(&card->ipm_lock, flags); - list_for_each(l, &card->ipm_list) { - ipm = list_entry(l, struct lcs_ipm_list, list); - for (im4 = rcu_dereference(in4_dev->mc_list); - im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) { - lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); - if ( (ipm->ipm.ip_addr == im4->multiaddr) && - (memcmp(buf, &ipm->ipm.mac_addr, - LCS_MAC_LENGTH) == 0) ) - break; - } - if (im4 == NULL) - ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED; - } - spin_unlock_irqrestore(&card->ipm_lock, flags); -} - -static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card, - struct ip_mc_list *im4, - char *buf) -{ - struct lcs_ipm_list *tmp, *ipm = NULL; - struct list_head *l; - unsigned long flags; - - LCS_DBF_TEXT(4, trace, "chkmcent"); - spin_lock_irqsave(&card->ipm_lock, flags); - list_for_each(l, &card->ipm_list) { - tmp = list_entry(l, struct lcs_ipm_list, list); - if ( (tmp->ipm.ip_addr == im4->multiaddr) && - (memcmp(buf, &tmp->ipm.mac_addr, - LCS_MAC_LENGTH) == 0) ) { - ipm = tmp; - break; - } - } - spin_unlock_irqrestore(&card->ipm_lock, flags); - return ipm; -} - -static void lcs_set_mc_addresses(struct lcs_card *card, - struct in_device *in4_dev) -{ - - struct ip_mc_list *im4; - struct lcs_ipm_list *ipm; - char buf[MAX_ADDR_LEN]; - unsigned long flags; - - LCS_DBF_TEXT(4, trace, "setmclst"); - for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; - im4 = rcu_dereference(im4->next_rcu)) { - lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); - ipm = lcs_check_addr_entry(card, im4, buf); - if (ipm != NULL) - continue; /* Address already in list. */ - ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); - if (ipm == NULL) { - pr_info("Not enough memory to add" - " new multicast entry!\n"); - break; - } - memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); - ipm->ipm.ip_addr = im4->multiaddr; - ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED; - spin_lock_irqsave(&card->ipm_lock, flags); - LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4); - list_add(&ipm->list, &card->ipm_list); - spin_unlock_irqrestore(&card->ipm_lock, flags); - } -} - -static int -lcs_register_mc_addresses(void *data) -{ - struct lcs_card *card; - struct in_device *in4_dev; - - card = (struct lcs_card *) data; - - if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD)) - return 0; - LCS_DBF_TEXT(4, trace, "regmulti"); - - in4_dev = in_dev_get(card->dev); - if (in4_dev == NULL) - goto out; - rcu_read_lock(); - lcs_remove_mc_addresses(card,in4_dev); - lcs_set_mc_addresses(card, in4_dev); - rcu_read_unlock(); - in_dev_put(in4_dev); - - netif_carrier_off(card->dev); - netif_tx_disable(card->dev); - wait_event(card->write.wait_q, - (card->write.state != LCS_CH_STATE_RUNNING)); - lcs_fix_multicast_list(card); - if (card->state == DEV_STATE_UP) { - netif_carrier_on(card->dev); - netif_wake_queue(card->dev); - } -out: - lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); - return 0; -} -#endif /* CONFIG_IP_MULTICAST */ - -/* - * function called by net device to - * handle multicast address relevant things - */ -static void -lcs_set_multicast_list(struct net_device *dev) -{ -#ifdef CONFIG_IP_MULTICAST - struct lcs_card *card; - - LCS_DBF_TEXT(4, trace, "setmulti"); - card = (struct lcs_card *) dev->ml_priv; - - if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) - schedule_work(&card->kernel_thread_starter); -#endif /* CONFIG_IP_MULTICAST */ -} - -static long -lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) -{ - if (!IS_ERR(irb)) - return 0; - - switch (PTR_ERR(irb)) { - case -EIO: - dev_warn(&cdev->dev, - "An I/O-error occurred on the LCS device\n"); - LCS_DBF_TEXT(2, trace, "ckirberr"); - LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); - break; - case -ETIMEDOUT: - dev_warn(&cdev->dev, - "A command timed out on the LCS device\n"); - LCS_DBF_TEXT(2, trace, "ckirberr"); - LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); - break; - default: - dev_warn(&cdev->dev, - "An error occurred on the LCS device, rc=%ld\n", - PTR_ERR(irb)); - LCS_DBF_TEXT(2, trace, "ckirberr"); - LCS_DBF_TEXT(2, trace, " rc???"); - } - return PTR_ERR(irb); -} - -static int -lcs_get_problem(struct ccw_device *cdev, struct irb *irb) -{ - int dstat, cstat; - char *sense; - - sense = (char *) irb->ecw; - cstat = irb->scsw.cmd.cstat; - dstat = irb->scsw.cmd.dstat; - - if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | - SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | - SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { - LCS_DBF_TEXT(2, trace, "CGENCHK"); - return 1; - } - if (dstat & DEV_STAT_UNIT_CHECK) { - if (sense[LCS_SENSE_BYTE_1] & - LCS_SENSE_RESETTING_EVENT) { - LCS_DBF_TEXT(2, trace, "REVIND"); - return 1; - } - if (sense[LCS_SENSE_BYTE_0] & - LCS_SENSE_CMD_REJECT) { - LCS_DBF_TEXT(2, trace, "CMDREJ"); - return 0; - } - if ((!sense[LCS_SENSE_BYTE_0]) && - (!sense[LCS_SENSE_BYTE_1]) && - (!sense[LCS_SENSE_BYTE_2]) && - (!sense[LCS_SENSE_BYTE_3])) { - LCS_DBF_TEXT(2, trace, "ZEROSEN"); - return 0; - } - LCS_DBF_TEXT(2, trace, "DGENCHK"); - return 1; - } - return 0; -} - -static void -lcs_schedule_recovery(struct lcs_card *card) -{ - LCS_DBF_TEXT(2, trace, "startrec"); - if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD)) - schedule_work(&card->kernel_thread_starter); -} - -/* - * IRQ Handler for LCS channels - */ -static void -lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) -{ - struct lcs_card *card; - struct lcs_channel *channel; - int rc, index; - int cstat, dstat; - - if (lcs_check_irb_error(cdev, irb)) - return; - - card = CARD_FROM_DEV(cdev); - if (card->read.ccwdev == cdev) - channel = &card->read; - else - channel = &card->write; - - cstat = irb->scsw.cmd.cstat; - dstat = irb->scsw.cmd.dstat; - LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev)); - LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat, - irb->scsw.cmd.dstat); - LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl, - irb->scsw.cmd.actl); - - /* Check for channel and device errors presented */ - rc = lcs_get_problem(cdev, irb); - if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { - dev_warn(&cdev->dev, - "The LCS device stopped because of an error," - " dstat=0x%X, cstat=0x%X \n", - dstat, cstat); - if (rc) { - channel->state = LCS_CH_STATE_ERROR; - } - } - if (channel->state == LCS_CH_STATE_ERROR) { - lcs_schedule_recovery(card); - wake_up(&card->wait_q); - return; - } - /* How far in the ccw chain have we processed? */ - if ((channel->state != LCS_CH_STATE_INIT) && - (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && - (irb->scsw.cmd.cpa != 0)) { - index = (struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa) - - channel->ccws; - if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || - (irb->scsw.cmd.cstat & SCHN_STAT_PCI)) - /* Bloody io subsystem tells us lies about cpa... */ - index = (index - 1) & (LCS_NUM_BUFFS - 1); - while (channel->io_idx != index) { - __lcs_processed_buffer(channel, - channel->iob + channel->io_idx); - channel->io_idx = - (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1); - } - } - - if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) || - (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) || - (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) - /* Mark channel as stopped. */ - channel->state = LCS_CH_STATE_STOPPED; - else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) - /* CCW execution stopped on a suspend bit. */ - channel->state = LCS_CH_STATE_SUSPENDED; - if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { - if (irb->scsw.cmd.cc != 0) { - ccw_device_halt(channel->ccwdev, 0); - return; - } - /* The channel has been stopped by halt_IO. */ - channel->state = LCS_CH_STATE_HALTED; - } - if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) - channel->state = LCS_CH_STATE_CLEARED; - /* Do the rest in the tasklet. */ - tasklet_schedule(&channel->irq_tasklet); -} - -/* - * Tasklet for IRQ handler - */ -static void -lcs_tasklet(unsigned long data) -{ - unsigned long flags; - struct lcs_channel *channel; - struct lcs_buffer *iob; - int buf_idx; - - channel = (struct lcs_channel *) data; - LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev)); - - /* Check for processed buffers. */ - iob = channel->iob; - buf_idx = channel->buf_idx; - while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) { - /* Do the callback thing. */ - if (iob[buf_idx].callback != NULL) - iob[buf_idx].callback(channel, iob + buf_idx); - buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1); - } - channel->buf_idx = buf_idx; - - if (channel->state == LCS_CH_STATE_STOPPED) - lcs_start_channel(channel); - spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - if (channel->state == LCS_CH_STATE_SUSPENDED && - channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) - __lcs_resume_channel(channel); - spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); - - /* Something happened on the channel. Wake up waiters. */ - wake_up(&channel->wait_q); -} - -/* - * Finish current tx buffer and make it ready for transmit. - */ -static void -__lcs_emit_txbuffer(struct lcs_card *card) -{ - LCS_DBF_TEXT(5, trace, "emittx"); - *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0; - card->tx_buffer->count += 2; - lcs_ready_buffer(&card->write, card->tx_buffer); - card->tx_buffer = NULL; - card->tx_emitted++; -} - -/* - * Callback for finished tx buffers. - */ -static void -lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) -{ - struct lcs_card *card; - - LCS_DBF_TEXT(5, trace, "txbuffcb"); - /* Put buffer back to pool. */ - lcs_release_buffer(channel, buffer); - card = container_of(channel, struct lcs_card, write); - if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev)) - netif_wake_queue(card->dev); - spin_lock(&card->lock); - card->tx_emitted--; - if (card->tx_emitted <= 0 && card->tx_buffer != NULL) - /* - * Last running tx buffer has finished. Submit partially - * filled current buffer. - */ - __lcs_emit_txbuffer(card); - spin_unlock(&card->lock); -} - -/* - * Packet transmit function called by network stack - */ -static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, - struct net_device *dev) -{ - struct lcs_header *header; - int rc = NETDEV_TX_OK; - - LCS_DBF_TEXT(5, trace, "hardxmit"); - if (skb == NULL) { - card->stats.tx_dropped++; - card->stats.tx_errors++; - return NETDEV_TX_OK; - } - if (card->state != DEV_STATE_UP) { - dev_kfree_skb(skb); - card->stats.tx_dropped++; - card->stats.tx_errors++; - card->stats.tx_carrier_errors++; - return NETDEV_TX_OK; - } - if (skb->protocol == htons(ETH_P_IPV6)) { - dev_kfree_skb(skb); - return NETDEV_TX_OK; - } - netif_stop_queue(card->dev); - spin_lock(&card->lock); - if (card->tx_buffer != NULL && - card->tx_buffer->count + sizeof(struct lcs_header) + - skb->len + sizeof(u16) > LCS_IOBUFFERSIZE) - /* skb too big for current tx buffer. */ - __lcs_emit_txbuffer(card); - if (card->tx_buffer == NULL) { - /* Get new tx buffer */ - card->tx_buffer = lcs_get_buffer(&card->write); - if (card->tx_buffer == NULL) { - card->stats.tx_dropped++; - rc = NETDEV_TX_BUSY; - goto out; - } - card->tx_buffer->callback = lcs_txbuffer_cb; - card->tx_buffer->count = 0; - } - header = (struct lcs_header *) - (card->tx_buffer->data + card->tx_buffer->count); - card->tx_buffer->count += skb->len + sizeof(struct lcs_header); - header->offset = card->tx_buffer->count; - header->type = card->lan_type; - header->slot = card->portno; - skb_copy_from_linear_data(skb, header + 1, skb->len); - spin_unlock(&card->lock); - card->stats.tx_bytes += skb->len; - card->stats.tx_packets++; - dev_kfree_skb(skb); - netif_wake_queue(card->dev); - spin_lock(&card->lock); - if (card->tx_emitted <= 0 && card->tx_buffer != NULL) - /* If this is the first tx buffer emit it immediately. */ - __lcs_emit_txbuffer(card); -out: - spin_unlock(&card->lock); - return rc; -} - -static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct lcs_card *card; - int rc; - - LCS_DBF_TEXT(5, trace, "pktxmit"); - card = (struct lcs_card *) dev->ml_priv; - rc = __lcs_start_xmit(card, skb, dev); - return rc; -} - -/* - * send startlan and lanstat command to make LCS device ready - */ -static int -lcs_startlan_auto(struct lcs_card *card) -{ - int rc; - - LCS_DBF_TEXT(2, trace, "strtauto"); - card->lan_type = LCS_FRAME_TYPE_ENET; - rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); - if (rc == 0) - return 0; - - return -EIO; -} - -static int -lcs_startlan(struct lcs_card *card) -{ - int rc, i; - - LCS_DBF_TEXT(2, trace, "startlan"); - rc = 0; - if (card->portno != LCS_INVALID_PORT_NO) { - if (card->lan_type == LCS_FRAME_TYPE_AUTO) - rc = lcs_startlan_auto(card); - else - rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); - } else { - for (i = 0; i <= 16; i++) { - card->portno = i; - if (card->lan_type != LCS_FRAME_TYPE_AUTO) - rc = lcs_send_startlan(card, - LCS_INITIATOR_TCPIP); - else - /* autodetecting lan type */ - rc = lcs_startlan_auto(card); - if (rc == 0) - break; - } - } - if (rc == 0) - return lcs_send_lanstat(card); - return rc; -} - -/* - * LCS detect function - * setup channels and make them I/O ready - */ -static int -lcs_detect(struct lcs_card *card) -{ - int rc = 0; - - LCS_DBF_TEXT(2, setup, "lcsdetct"); - /* start/reset card */ - if (card->dev) - netif_stop_queue(card->dev); - rc = lcs_stop_channels(card); - if (rc == 0) { - rc = lcs_start_channels(card); - if (rc == 0) { - rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP); - if (rc == 0) - rc = lcs_startlan(card); - } - } - if (rc == 0) { - card->state = DEV_STATE_UP; - } else { - card->state = DEV_STATE_DOWN; - card->write.state = LCS_CH_STATE_INIT; - card->read.state = LCS_CH_STATE_INIT; - } - return rc; -} - -/* - * LCS Stop card - */ -static int -lcs_stopcard(struct lcs_card *card) -{ - int rc; - - LCS_DBF_TEXT(3, setup, "stopcard"); - - if (card->read.state != LCS_CH_STATE_STOPPED && - card->write.state != LCS_CH_STATE_STOPPED && - card->read.state != LCS_CH_STATE_ERROR && - card->write.state != LCS_CH_STATE_ERROR && - card->state == DEV_STATE_UP) { - lcs_clear_multicast_list(card); - rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP); - rc = lcs_send_shutdown(card); - } - rc = lcs_stop_channels(card); - card->state = DEV_STATE_DOWN; - - return rc; -} - -/* - * Kernel Thread helper functions for LGW initiated commands - */ -static void -lcs_start_kernel_thread(struct work_struct *work) -{ - struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter); - LCS_DBF_TEXT(5, trace, "krnthrd"); - if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD)) - kthread_run(lcs_recovery, card, "lcs_recover"); -#ifdef CONFIG_IP_MULTICAST - if (lcs_do_start_thread(card, LCS_SET_MC_THREAD)) - kthread_run(lcs_register_mc_addresses, card, "regipm"); -#endif -} - -/* - * Process control frames. - */ -static void -lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) -{ - LCS_DBF_TEXT(5, trace, "getctrl"); - if (cmd->initiator == LCS_INITIATOR_LGW) { - switch(cmd->cmd_code) { - case LCS_CMD_STARTUP: - case LCS_CMD_STARTLAN: - lcs_schedule_recovery(card); - break; - case LCS_CMD_STOPLAN: - if (card->dev) { - pr_warn("Stoplan for %s initiated by LGW\n", - card->dev->name); - netif_carrier_off(card->dev); - } - break; - default: - LCS_DBF_TEXT(5, trace, "noLGWcmd"); - break; - } - } else - lcs_notify_lancmd_waiters(card, cmd); -} - -/* - * Unpack network packet. - */ -static void -lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) -{ - struct sk_buff *skb; - - LCS_DBF_TEXT(5, trace, "getskb"); - if (card->dev == NULL || - card->state != DEV_STATE_UP) - /* The card isn't up. Ignore the packet. */ - return; - - skb = dev_alloc_skb(skb_len); - if (skb == NULL) { - dev_err(&card->dev->dev, - " Allocating a socket buffer to interface %s failed\n", - card->dev->name); - card->stats.rx_dropped++; - return; - } - skb_put_data(skb, skb_data, skb_len); - skb->protocol = card->lan_type_trans(skb, card->dev); - card->stats.rx_bytes += skb_len; - card->stats.rx_packets++; - if (skb->protocol == htons(ETH_P_802_2)) - *((__u32 *)skb->cb) = ++card->pkt_seq; - netif_rx(skb); -} - -/* - * LCS main routine to get packets and lancmd replies from the buffers - */ -static void -lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) -{ - struct lcs_card *card; - struct lcs_header *lcs_hdr; - __u16 offset; - - LCS_DBF_TEXT(5, trace, "lcsgtpkt"); - lcs_hdr = (struct lcs_header *) buffer->data; - if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) { - LCS_DBF_TEXT(4, trace, "-eiogpkt"); - return; - } - card = container_of(channel, struct lcs_card, read); - offset = 0; - while (lcs_hdr->offset != 0) { - if (lcs_hdr->offset <= 0 || - lcs_hdr->offset > LCS_IOBUFFERSIZE || - lcs_hdr->offset < offset) { - /* Offset invalid. */ - card->stats.rx_length_errors++; - card->stats.rx_errors++; - return; - } - if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL) - lcs_get_control(card, (struct lcs_cmd *) lcs_hdr); - else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET) - lcs_get_skb(card, (char *)(lcs_hdr + 1), - lcs_hdr->offset - offset - - sizeof(struct lcs_header)); - else - dev_info_once(&card->dev->dev, - "Unknown frame type %d\n", - lcs_hdr->type); - offset = lcs_hdr->offset; - lcs_hdr->offset = LCS_ILLEGAL_OFFSET; - lcs_hdr = (struct lcs_header *) (buffer->data + offset); - } - /* The buffer is now empty. Make it ready again. */ - lcs_ready_buffer(&card->read, buffer); -} - -/* - * get network statistics for ifconfig and other user programs - */ -static struct net_device_stats * -lcs_getstats(struct net_device *dev) -{ - struct lcs_card *card; - - LCS_DBF_TEXT(4, trace, "netstats"); - card = (struct lcs_card *) dev->ml_priv; - return &card->stats; -} - -/* - * stop lcs device - * This function will be called by user doing ifconfig xxx down - */ -static int -lcs_stop_device(struct net_device *dev) -{ - struct lcs_card *card; - int rc; - - LCS_DBF_TEXT(2, trace, "stopdev"); - card = (struct lcs_card *) dev->ml_priv; - netif_carrier_off(dev); - netif_tx_disable(dev); - dev->flags &= ~IFF_UP; - wait_event(card->write.wait_q, - (card->write.state != LCS_CH_STATE_RUNNING)); - rc = lcs_stopcard(card); - if (rc) - dev_err(&card->dev->dev, - " Shutting down the LCS device failed\n"); - return rc; -} - -/* - * start lcs device and make it runnable - * This function will be called by user doing ifconfig xxx up - */ -static int -lcs_open_device(struct net_device *dev) -{ - struct lcs_card *card; - int rc; - - LCS_DBF_TEXT(2, trace, "opendev"); - card = (struct lcs_card *) dev->ml_priv; - /* initialize statistics */ - rc = lcs_detect(card); - if (rc) { - pr_err("Error in opening device!\n"); - - } else { - dev->flags |= IFF_UP; - netif_carrier_on(dev); - netif_wake_queue(dev); - card->state = DEV_STATE_UP; - } - return rc; -} - -/* - * show function for portno called by cat or similar things - */ -static ssize_t -lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf) -{ - struct lcs_card *card; - - card = dev_get_drvdata(dev); - - if (!card) - return 0; - - return sysfs_emit(buf, "%d\n", card->portno); -} - -/* - * store the value which is piped to file portno - */ -static ssize_t -lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct lcs_card *card; - int rc; - s16 value; - - card = dev_get_drvdata(dev); - - if (!card) - return 0; - - rc = kstrtos16(buf, 0, &value); - if (rc) - return -EINVAL; - /* TODO: sanity checks */ - card->portno = value; - if (card->dev) - card->dev->dev_port = card->portno; - - return count; - -} - -static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); - -static const char *lcs_type[] = { - "not a channel", - "2216 parallel", - "2216 channel", - "OSA LCS card", - "unknown channel type", - "unsupported channel type", -}; - -static ssize_t -lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct ccwgroup_device *cgdev; - - cgdev = to_ccwgroupdev(dev); - if (!cgdev) - return -ENODEV; - - return sysfs_emit(buf, "%s\n", - lcs_type[cgdev->cdev[0]->id.driver_info]); -} - -static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); - -static ssize_t -lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct lcs_card *card; - - card = dev_get_drvdata(dev); - - return card ? sysfs_emit(buf, "%u\n", card->lancmd_timeout) : 0; -} - -static ssize_t -lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -{ - struct lcs_card *card; - unsigned int value; - int rc; - - card = dev_get_drvdata(dev); - - if (!card) - return 0; - - rc = kstrtouint(buf, 0, &value); - if (rc) - return -EINVAL; - /* TODO: sanity checks */ - card->lancmd_timeout = value; - - return count; - -} - -static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); - -static ssize_t -lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct lcs_card *card = dev_get_drvdata(dev); - char *tmp; - int i; - - if (!card) - return -EINVAL; - if (card->state != DEV_STATE_UP) - return -EPERM; - i = simple_strtoul(buf, &tmp, 16); - if (i == 1) - lcs_schedule_recovery(card); - return count; -} - -static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store); - -static struct attribute * lcs_attrs[] = { - &dev_attr_portno.attr, - &dev_attr_type.attr, - &dev_attr_lancmd_timeout.attr, - &dev_attr_recover.attr, - NULL, -}; -static struct attribute_group lcs_attr_group = { - .attrs = lcs_attrs, -}; -static const struct attribute_group *lcs_attr_groups[] = { - &lcs_attr_group, - NULL, -}; -static const struct device_type lcs_devtype = { - .name = "lcs", - .groups = lcs_attr_groups, -}; - -/* - * lcs_probe_device is called on establishing a new ccwgroup_device. - */ -static int -lcs_probe_device(struct ccwgroup_device *ccwgdev) -{ - struct lcs_card *card; - - if (!get_device(&ccwgdev->dev)) - return -ENODEV; - - LCS_DBF_TEXT(2, setup, "add_dev"); - card = lcs_alloc_card(); - if (!card) { - LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM); - put_device(&ccwgdev->dev); - return -ENOMEM; - } - dev_set_drvdata(&ccwgdev->dev, card); - ccwgdev->cdev[0]->handler = lcs_irq; - ccwgdev->cdev[1]->handler = lcs_irq; - card->gdev = ccwgdev; - INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread); - card->thread_start_mask = 0; - card->thread_allowed_mask = 0; - card->thread_running_mask = 0; - ccwgdev->dev.type = &lcs_devtype; - - return 0; -} - -static int -lcs_register_netdev(struct ccwgroup_device *ccwgdev) -{ - struct lcs_card *card; - - LCS_DBF_TEXT(2, setup, "regnetdv"); - card = dev_get_drvdata(&ccwgdev->dev); - if (card->dev->reg_state != NETREG_UNINITIALIZED) - return 0; - SET_NETDEV_DEV(card->dev, &ccwgdev->dev); - return register_netdev(card->dev); -} - -/* - * lcs_new_device will be called by setting the group device online. - */ -static const struct net_device_ops lcs_netdev_ops = { - .ndo_open = lcs_open_device, - .ndo_stop = lcs_stop_device, - .ndo_get_stats = lcs_getstats, - .ndo_start_xmit = lcs_start_xmit, -}; - -static const struct net_device_ops lcs_mc_netdev_ops = { - .ndo_open = lcs_open_device, - .ndo_stop = lcs_stop_device, - .ndo_get_stats = lcs_getstats, - .ndo_start_xmit = lcs_start_xmit, - .ndo_set_rx_mode = lcs_set_multicast_list, -}; - -static int -lcs_new_device(struct ccwgroup_device *ccwgdev) -{ - struct lcs_card *card; - struct net_device *dev=NULL; - enum lcs_dev_states recover_state; - int rc; - - card = dev_get_drvdata(&ccwgdev->dev); - if (!card) - return -ENODEV; - - LCS_DBF_TEXT(2, setup, "newdev"); - LCS_DBF_HEX(3, setup, &card, sizeof(void*)); - card->read.ccwdev = ccwgdev->cdev[0]; - card->write.ccwdev = ccwgdev->cdev[1]; - - recover_state = card->state; - rc = ccw_device_set_online(card->read.ccwdev); - if (rc) - goto out_err; - rc = ccw_device_set_online(card->write.ccwdev); - if (rc) - goto out_werr; - - LCS_DBF_TEXT(3, setup, "lcsnewdv"); - - lcs_setup_card(card); - rc = lcs_detect(card); - if (rc) { - LCS_DBF_TEXT(2, setup, "dtctfail"); - dev_err(&ccwgdev->dev, - "Detecting a network adapter for LCS devices" - " failed with rc=%d (0x%x)\n", rc, rc); - lcs_stopcard(card); - goto out; - } - if (card->dev) { - LCS_DBF_TEXT(2, setup, "samedev"); - LCS_DBF_HEX(3, setup, &card, sizeof(void*)); - goto netdev_out; - } - switch (card->lan_type) { - case LCS_FRAME_TYPE_ENET: - card->lan_type_trans = eth_type_trans; - dev = alloc_etherdev(0); - break; - default: - LCS_DBF_TEXT(3, setup, "errinit"); - pr_err(" Initialization failed\n"); - goto out; - } - if (!dev) - goto out; - card->dev = dev; - card->dev->ml_priv = card; - card->dev->netdev_ops = &lcs_netdev_ops; - card->dev->dev_port = card->portno; - eth_hw_addr_set(card->dev, card->mac); -#ifdef CONFIG_IP_MULTICAST - if (!lcs_check_multicast_support(card)) - card->dev->netdev_ops = &lcs_mc_netdev_ops; -#endif -netdev_out: - lcs_set_allowed_threads(card,0xffffffff); - if (recover_state == DEV_STATE_RECOVER) { - lcs_set_multicast_list(card->dev); - card->dev->flags |= IFF_UP; - netif_carrier_on(card->dev); - netif_wake_queue(card->dev); - card->state = DEV_STATE_UP; - } else { - lcs_stopcard(card); - } - - if (lcs_register_netdev(ccwgdev) != 0) - goto out; - - /* Print out supported assists: IPv6 */ - pr_info("LCS device %s %s IPv6 support\n", card->dev->name, - (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? - "with" : "without"); - /* Print out supported assist: Multicast */ - pr_info("LCS device %s %s Multicast support\n", card->dev->name, - (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? - "with" : "without"); - return 0; -out: - - ccw_device_set_offline(card->write.ccwdev); -out_werr: - ccw_device_set_offline(card->read.ccwdev); -out_err: - return -ENODEV; -} - -/* - * lcs_shutdown_device, called when setting the group device offline. - */ -static int -__lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode) -{ - struct lcs_card *card; - enum lcs_dev_states recover_state; - int ret = 0, ret2 = 0, ret3 = 0; - - LCS_DBF_TEXT(3, setup, "shtdndev"); - card = dev_get_drvdata(&ccwgdev->dev); - if (!card) - return -ENODEV; - if (recovery_mode == 0) { - lcs_set_allowed_threads(card, 0); - if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD)) - return -ERESTARTSYS; - } - LCS_DBF_HEX(3, setup, &card, sizeof(void*)); - recover_state = card->state; - - ret = lcs_stop_device(card->dev); - ret2 = ccw_device_set_offline(card->read.ccwdev); - ret3 = ccw_device_set_offline(card->write.ccwdev); - if (!ret) - ret = (ret2) ? ret2 : ret3; - if (ret) - LCS_DBF_TEXT_(3, setup, "1err:%d", ret); - if (recover_state == DEV_STATE_UP) { - card->state = DEV_STATE_RECOVER; - } - return 0; -} - -static int -lcs_shutdown_device(struct ccwgroup_device *ccwgdev) -{ - return __lcs_shutdown_device(ccwgdev, 0); -} - -/* - * drive lcs recovery after startup and startlan initiated by Lan Gateway - */ -static int -lcs_recovery(void *ptr) -{ - struct lcs_card *card; - struct ccwgroup_device *gdev; - int rc; - - card = (struct lcs_card *) ptr; - - LCS_DBF_TEXT(4, trace, "recover1"); - if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD)) - return 0; - LCS_DBF_TEXT(4, trace, "recover2"); - gdev = card->gdev; - dev_warn(&gdev->dev, - "A recovery process has been started for the LCS device\n"); - rc = __lcs_shutdown_device(gdev, 1); - rc = lcs_new_device(gdev); - if (!rc) - pr_info("Device %s successfully recovered!\n", - card->dev->name); - else - pr_info("Device %s could not be recovered!\n", - card->dev->name); - lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); - return 0; -} - -/* - * lcs_remove_device, free buffers and card - */ -static void -lcs_remove_device(struct ccwgroup_device *ccwgdev) -{ - struct lcs_card *card; - - card = dev_get_drvdata(&ccwgdev->dev); - if (!card) - return; - - LCS_DBF_TEXT(3, setup, "remdev"); - LCS_DBF_HEX(3, setup, &card, sizeof(void*)); - if (ccwgdev->state == CCWGROUP_ONLINE) { - lcs_shutdown_device(ccwgdev); - } - if (card->dev) - unregister_netdev(card->dev); - lcs_cleanup_card(card); - lcs_free_card(card); - dev_set_drvdata(&ccwgdev->dev, NULL); - put_device(&ccwgdev->dev); -} - -static struct ccw_device_id lcs_ids[] = { - {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel}, - {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216}, - {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2}, - {}, -}; -MODULE_DEVICE_TABLE(ccw, lcs_ids); - -static struct ccw_driver lcs_ccw_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "lcs", - }, - .ids = lcs_ids, - .probe = ccwgroup_probe_ccwdev, - .remove = ccwgroup_remove_ccwdev, - .int_class = IRQIO_LCS, -}; - -/* - * LCS ccwgroup driver registration - */ -static struct ccwgroup_driver lcs_group_driver = { - .driver = { - .owner = THIS_MODULE, - .name = "lcs", - }, - .ccw_driver = &lcs_ccw_driver, - .setup = lcs_probe_device, - .remove = lcs_remove_device, - .set_online = lcs_new_device, - .set_offline = lcs_shutdown_device, -}; - -static ssize_t group_store(struct device_driver *ddrv, const char *buf, - size_t count) -{ - int err; - err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf); - return err ? err : count; -} -static DRIVER_ATTR_WO(group); - -static struct attribute *lcs_drv_attrs[] = { - &driver_attr_group.attr, - NULL, -}; -static struct attribute_group lcs_drv_attr_group = { - .attrs = lcs_drv_attrs, -}; -static const struct attribute_group *lcs_drv_attr_groups[] = { - &lcs_drv_attr_group, - NULL, -}; - -/* - * LCS Module/Kernel initialization function - */ -static int -__init lcs_init_module(void) -{ - int rc; - - pr_info("Loading %s\n", version); - rc = lcs_register_debug_facility(); - LCS_DBF_TEXT(0, setup, "lcsinit"); - if (rc) - goto out_err; - lcs_root_dev = root_device_register("lcs"); - rc = PTR_ERR_OR_ZERO(lcs_root_dev); - if (rc) - goto register_err; - rc = ccw_driver_register(&lcs_ccw_driver); - if (rc) - goto ccw_err; - lcs_group_driver.driver.groups = lcs_drv_attr_groups; - rc = ccwgroup_driver_register(&lcs_group_driver); - if (rc) - goto ccwgroup_err; - return 0; - -ccwgroup_err: - ccw_driver_unregister(&lcs_ccw_driver); -ccw_err: - root_device_unregister(lcs_root_dev); -register_err: - lcs_unregister_debug_facility(); -out_err: - pr_err("Initializing the lcs device driver failed\n"); - return rc; -} - - -/* - * LCS module cleanup function - */ -static void -__exit lcs_cleanup_module(void) -{ - pr_info("Terminating lcs module.\n"); - LCS_DBF_TEXT(0, trace, "cleanup"); - ccwgroup_driver_unregister(&lcs_group_driver); - ccw_driver_unregister(&lcs_ccw_driver); - root_device_unregister(lcs_root_dev); - lcs_unregister_debug_facility(); -} - -module_init(lcs_init_module); -module_exit(lcs_cleanup_module); - -MODULE_AUTHOR("Frank Pavlic "); -MODULE_DESCRIPTION("S/390 LAN channel station device driver"); -MODULE_LICENSE("GPL"); - diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h deleted file mode 100644 index a2699b70b0502..0000000000000 --- a/drivers/s390/net/lcs.h +++ /dev/null @@ -1,342 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/*lcs.h*/ - -#include -#include -#include -#include -#include -#include - -#define LCS_DBF_TEXT(level, name, text) \ - do { \ - debug_text_event(lcs_dbf_##name, level, text); \ - } while (0) - -#define LCS_DBF_HEX(level,name,addr,len) \ -do { \ - debug_event(lcs_dbf_##name,level,(void*)(addr),len); \ -} while (0) - -#define LCS_DBF_TEXT_(level,name,text...) \ - do { \ - if (debug_level_enabled(lcs_dbf_##name, level)) { \ - scnprintf(debug_buffer, sizeof(debug_buffer), text); \ - debug_text_event(lcs_dbf_##name, level, debug_buffer); \ - } \ - } while (0) - -/** - * sysfs related stuff - */ -#define CARD_FROM_DEV(cdev) \ - (struct lcs_card *) dev_get_drvdata( \ - &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); - -/** - * Enum for classifying detected devices. - */ -enum lcs_channel_types { - /* Device is not a channel */ - lcs_channel_type_none, - - /* Device is a 2216 channel */ - lcs_channel_type_parallel, - - /* Device is a 2216 channel */ - lcs_channel_type_2216, - - /* Device is a OSA2 card */ - lcs_channel_type_osa2 -}; - -/** - * CCW commands used in this driver - */ -#define LCS_CCW_WRITE 0x01 -#define LCS_CCW_READ 0x02 -#define LCS_CCW_TRANSFER 0x08 - -/** - * LCS device status primitives - */ -#define LCS_CMD_STARTLAN 0x01 -#define LCS_CMD_STOPLAN 0x02 -#define LCS_CMD_LANSTAT 0x04 -#define LCS_CMD_STARTUP 0x07 -#define LCS_CMD_SHUTDOWN 0x08 -#define LCS_CMD_QIPASSIST 0xb2 -#define LCS_CMD_SETIPM 0xb4 -#define LCS_CMD_DELIPM 0xb5 - -#define LCS_INITIATOR_TCPIP 0x00 -#define LCS_INITIATOR_LGW 0x01 -#define LCS_STD_CMD_SIZE 16 -#define LCS_MULTICAST_CMD_SIZE 404 - -/** - * LCS IPASSIST MASKS,only used when multicast is switched on - */ -/* Not supported by LCS */ -#define LCS_IPASS_ARP_PROCESSING 0x0001 -#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002 -#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004 -#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008 -#define LCS_IPASS_IP_FILTERING 0x0010 -/* Supported by lcs 3172 */ -#define LCS_IPASS_IPV6_SUPPORT 0x0020 -#define LCS_IPASS_MULTICAST_SUPPORT 0x0040 - -/** - * LCS sense byte definitions - */ -#define LCS_SENSE_BYTE_0 0 -#define LCS_SENSE_BYTE_1 1 -#define LCS_SENSE_BYTE_2 2 -#define LCS_SENSE_BYTE_3 3 -#define LCS_SENSE_INTERFACE_DISCONNECT 0x01 -#define LCS_SENSE_EQUIPMENT_CHECK 0x10 -#define LCS_SENSE_BUS_OUT_CHECK 0x20 -#define LCS_SENSE_INTERVENTION_REQUIRED 0x40 -#define LCS_SENSE_CMD_REJECT 0x80 -#define LCS_SENSE_RESETTING_EVENT 0x80 -#define LCS_SENSE_DEVICE_ONLINE 0x20 - -/** - * LCS packet type definitions - */ -#define LCS_FRAME_TYPE_CONTROL 0 -#define LCS_FRAME_TYPE_ENET 1 -#define LCS_FRAME_TYPE_TR 2 -#define LCS_FRAME_TYPE_FDDI 7 -#define LCS_FRAME_TYPE_AUTO -1 - -/** - * some more definitions,we will sort them later - */ -#define LCS_ILLEGAL_OFFSET 0xffff -#define LCS_IOBUFFERSIZE 0x5000 -#define LCS_NUM_BUFFS 32 /* needs to be power of 2 */ -#define LCS_MAC_LENGTH 6 -#define LCS_INVALID_PORT_NO -1 -#define LCS_LANCMD_TIMEOUT_DEFAULT 5 - -/** - * Multicast state - */ -#define LCS_IPM_STATE_SET_REQUIRED 0 -#define LCS_IPM_STATE_DEL_REQUIRED 1 -#define LCS_IPM_STATE_ON_CARD 2 - -/** - * LCS IP Assist declarations - * seems to be only used for multicast - */ -#define LCS_IPASS_ARP_PROCESSING 0x0001 -#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002 -#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004 -#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008 -#define LCS_IPASS_IP_FILTERING 0x0010 -#define LCS_IPASS_IPV6_SUPPORT 0x0020 -#define LCS_IPASS_MULTICAST_SUPPORT 0x0040 - -/** - * LCS Buffer states - */ -enum lcs_buffer_states { - LCS_BUF_STATE_EMPTY, /* buffer is empty */ - LCS_BUF_STATE_LOCKED, /* buffer is locked, don't touch */ - LCS_BUF_STATE_READY, /* buffer is ready for read/write */ - LCS_BUF_STATE_PROCESSED, -}; - -/** - * LCS Channel State Machine declarations - */ -enum lcs_channel_states { - LCS_CH_STATE_INIT, - LCS_CH_STATE_HALTED, - LCS_CH_STATE_STOPPED, - LCS_CH_STATE_RUNNING, - LCS_CH_STATE_SUSPENDED, - LCS_CH_STATE_CLEARED, - LCS_CH_STATE_ERROR, -}; - -/** - * LCS device state machine - */ -enum lcs_dev_states { - DEV_STATE_DOWN, - DEV_STATE_UP, - DEV_STATE_RECOVER, -}; - -enum lcs_threads { - LCS_SET_MC_THREAD = 1, - LCS_RECOVERY_THREAD = 2, -}; - -/** - * LCS struct declarations - */ -struct lcs_header { - __u16 offset; - __u8 type; - __u8 slot; -} __attribute__ ((packed)); - -struct lcs_ip_mac_pair { - __be32 ip_addr; - __u8 mac_addr[LCS_MAC_LENGTH]; - __u8 reserved[2]; -} __attribute__ ((packed)); - -struct lcs_ipm_list { - struct list_head list; - struct lcs_ip_mac_pair ipm; - __u8 ipm_state; -}; - -struct lcs_cmd { - __u16 offset; - __u8 type; - __u8 slot; - __u8 cmd_code; - __u8 initiator; - __u16 sequence_no; - __u16 return_code; - union { - struct { - __u8 lan_type; - __u8 portno; - __u16 parameter_count; - __u8 operator_flags[3]; - __u8 reserved[3]; - } lcs_std_cmd; - struct { - __u16 unused1; - __u16 buff_size; - __u8 unused2[6]; - } lcs_startup; - struct { - __u8 lan_type; - __u8 portno; - __u8 unused[10]; - __u8 mac_addr[LCS_MAC_LENGTH]; - __u32 num_packets_deblocked; - __u32 num_packets_blocked; - __u32 num_packets_tx_on_lan; - __u32 num_tx_errors_detected; - __u32 num_tx_packets_disgarded; - __u32 num_packets_rx_from_lan; - __u32 num_rx_errors_detected; - __u32 num_rx_discarded_nobuffs_avail; - __u32 num_rx_packets_too_large; - } lcs_lanstat_cmd; -#ifdef CONFIG_IP_MULTICAST - struct { - __u8 lan_type; - __u8 portno; - __u16 num_ip_pairs; - __u16 ip_assists_supported; - __u16 ip_assists_enabled; - __u16 version; - struct { - struct lcs_ip_mac_pair - ip_mac_pair[32]; - __u32 response_data; - } lcs_ipass_ctlmsg __attribute ((packed)); - } lcs_qipassist __attribute__ ((packed)); -#endif /*CONFIG_IP_MULTICAST */ - } cmd __attribute__ ((packed)); -} __attribute__ ((packed)); - -/** - * Forward declarations. - */ -struct lcs_card; -struct lcs_channel; - -/** - * Definition of an lcs buffer. - */ -struct lcs_buffer { - enum lcs_buffer_states state; - void *data; - int count; - /* Callback for completion notification. */ - void (*callback)(struct lcs_channel *, struct lcs_buffer *); -}; - -struct lcs_reply { - struct list_head list; - __u16 sequence_no; - refcount_t refcnt; - /* Callback for completion notification. */ - void (*callback)(struct lcs_card *, struct lcs_cmd *); - wait_queue_head_t wait_q; - struct lcs_card *card; - struct timer_list timer; - int received; - int rc; -}; - -/** - * Definition of an lcs channel - */ -struct lcs_channel { - enum lcs_channel_states state; - struct ccw_device *ccwdev; - struct ccw1 ccws[LCS_NUM_BUFFS + 1]; - wait_queue_head_t wait_q; - struct tasklet_struct irq_tasklet; - struct lcs_buffer iob[LCS_NUM_BUFFS]; - int io_idx; - int buf_idx; -}; - - -/** - * definition of the lcs card - */ -struct lcs_card { - spinlock_t lock; - spinlock_t ipm_lock; - enum lcs_dev_states state; - struct net_device *dev; - struct net_device_stats stats; - __be16 (*lan_type_trans)(struct sk_buff *skb, - struct net_device *dev); - struct ccwgroup_device *gdev; - struct lcs_channel read; - struct lcs_channel write; - struct lcs_buffer *tx_buffer; - int tx_emitted; - struct list_head lancmd_waiters; - int lancmd_timeout; - - struct work_struct kernel_thread_starter; - spinlock_t mask_lock; - unsigned long thread_start_mask; - unsigned long thread_running_mask; - unsigned long thread_allowed_mask; - wait_queue_head_t wait_q; - -#ifdef CONFIG_IP_MULTICAST - struct list_head ipm_list; -#endif - __u8 mac[LCS_MAC_LENGTH]; - __u16 ip_assists_supported; - __u16 ip_assists_enabled; - __s8 lan_type; - __u32 pkt_seq; - __u16 sequence_no; - __s16 portno; - /* Some info copied from probeinfo */ - u8 device_forced; - u8 max_port_no; - u8 hint_port_no; - s16 port_protocol_no; -} __attribute__ ((aligned(8))); - diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index be0890e4e7062..f1cfe0bb89b20 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1669,13 +1669,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req) if (in_flight) __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); - /* - * Only clear the driver-private command data if the LLD does not supply - * a function to initialize that data. - */ - if (!shost->hostt->init_cmd_priv) - memset(cmd + 1, 0, shost->hostt->cmd_size); - cmd->prot_op = SCSI_PROT_NORMAL; if (blk_rq_bytes(req)) cmd->sc_data_direction = rq_dma_dir(req); @@ -1842,6 +1835,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, if (!scsi_host_queue_ready(q, shost, sdev, cmd)) goto out_dec_target_busy; + /* + * Only clear the driver-private command data if the LLD does not supply + * a function to initialize that data. + */ + if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv) + memset(cmd + 1, 0, shost->hostt->cmd_size); + if (!(req->rq_flags & RQF_DONTPREP)) { ret = scsi_prepare_cmd(req); if (ret != BLK_STS_OK) diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c index e7aa9bd4b44b8..6f01d944f9c65 100644 --- a/drivers/slimbus/messaging.c +++ b/drivers/slimbus/messaging.c @@ -148,8 +148,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) } ret = ctrl->xfer_msg(ctrl, txn); - - if (!ret && need_tid && !txn->msg->comp) { + if (ret == -ETIMEDOUT) { + slim_free_txn_tid(ctrl, txn); + } else if (!ret && need_tid && !txn->msg->comp) { unsigned long ms = txn->rl + HZ; time_left = wait_for_completion_timeout(txn->comp, diff --git a/drivers/soc/hisilicon/kunpeng_hccs.c b/drivers/soc/hisilicon/kunpeng_hccs.c index 8aa8dec14911c..444a8f59b7da7 100644 --- a/drivers/soc/hisilicon/kunpeng_hccs.c +++ b/drivers/soc/hisilicon/kunpeng_hccs.c @@ -1539,8 +1539,8 @@ static ssize_t used_types_show(struct kobject *kobj, u16 i; for (i = 0; i < hdev->used_type_num - 1; i++) - len += sysfs_emit(&buf[len], "%s ", hdev->type_name_maps[i].name); - len += sysfs_emit(&buf[len], "%s\n", hdev->type_name_maps[i].name); + len += sysfs_emit_at(buf, len, "%s ", hdev->type_name_maps[i].name); + len += sysfs_emit_at(buf, len, "%s\n", hdev->type_name_maps[i].name); return len; } diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index 8ac7658e3d525..3ed8161d7d28b 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -192,9 +192,20 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = { devm_kasprintf((dev), GFP_KERNEL, "%d.%d", ((soc_rev) >> 4) & 0xf, (soc_rev) & 0xf) : \ "unknown" +static void imx8m_unregister_soc(void *data) +{ + soc_device_unregister(data); +} + +static void imx8m_unregister_cpufreq(void *data) +{ + platform_device_unregister(data); +} + static int imx8m_soc_probe(struct platform_device *pdev) { struct soc_device_attribute *soc_dev_attr; + struct platform_device *cpufreq_dev; const struct imx8_soc_data *data; struct device *dev = &pdev->dev; const struct of_device_id *id; @@ -239,11 +250,22 @@ static int imx8m_soc_probe(struct platform_device *pdev) if (IS_ERR(soc_dev)) return PTR_ERR(soc_dev); + ret = devm_add_action(dev, imx8m_unregister_soc, soc_dev); + if (ret) + return ret; + pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id, soc_dev_attr->revision); - if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) - platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); + if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) { + cpufreq_dev = platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); + if (IS_ERR(cpufreq_dev)) + return dev_err_probe(dev, PTR_ERR(cpufreq_dev), + "Failed to register imx-cpufreq-dev device\n"); + ret = devm_add_action(dev, imx8m_unregister_cpufreq, cpufreq_dev); + if (ret) + return ret; + } return 0; } diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c index ae42e3a9127fc..16913c3ef65ca 100644 --- a/drivers/soc/loongson/loongson2_guts.c +++ b/drivers/soc/loongson/loongson2_guts.c @@ -114,8 +114,11 @@ static int loongson2_guts_probe(struct platform_device *pdev) if (of_property_read_string(root, "model", &machine)) of_property_read_string_index(root, "compatible", 0, &machine); of_node_put(root); - if (machine) + if (machine) { soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL); + if (!soc_dev_attr.machine) + return -ENOMEM; + } svr = loongson2_guts_get_svr(); soc_die = loongson2_soc_die_match(svr, loongson2_soc_die); diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c index 328b6153b2be6..71be378d2e43a 100644 --- a/drivers/soc/qcom/pdr_interface.c +++ b/drivers/soc/qcom/pdr_interface.c @@ -75,7 +75,6 @@ static int pdr_locator_new_server(struct qmi_handle *qmi, { struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, locator_hdl); - struct pdr_service *pds; mutex_lock(&pdr->lock); /* Create a local client port for QMI communication */ @@ -87,12 +86,7 @@ static int pdr_locator_new_server(struct qmi_handle *qmi, mutex_unlock(&pdr->lock); /* Service pending lookup requests */ - mutex_lock(&pdr->list_lock); - list_for_each_entry(pds, &pdr->lookups, node) { - if (pds->need_locator_lookup) - schedule_work(&pdr->locator_work); - } - mutex_unlock(&pdr->list_lock); + schedule_work(&pdr->locator_work); return 0; } diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c index 052c292eeda61..cde19cdfd3c7f 100644 --- a/drivers/soc/qcom/pmic_glink.c +++ b/drivers/soc/qcom/pmic_glink.c @@ -233,7 +233,7 @@ static void pmic_glink_pdr_callback(int state, char *svc_path, void *priv) static int pmic_glink_rpmsg_probe(struct rpmsg_device *rpdev) { - struct pmic_glink *pg = __pmic_glink; + struct pmic_glink *pg; guard(mutex)(&__pmic_glink_lock); pg = __pmic_glink; diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index d8c9be64d006a..244ac01068629 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -930,11 +930,8 @@ static int atmel_qspi_sama7g5_transfer(struct spi_mem *mem, /* Release the chip-select. */ ret = atmel_qspi_reg_sync(aq); - if (ret) { - pm_runtime_mark_last_busy(&aq->pdev->dev); - pm_runtime_put_autosuspend(&aq->pdev->dev); + if (ret) return ret; - } atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR); return atmel_qspi_wait_for_completion(aq, QSPI_SR_CSRA); diff --git a/drivers/spi/spi-microchip-core.c b/drivers/spi/spi-microchip-core.c index 5b6af55855efc..62ba0bd9cbb7e 100644 --- a/drivers/spi/spi-microchip-core.c +++ b/drivers/spi/spi-microchip-core.c @@ -70,8 +70,7 @@ #define INT_RX_CHANNEL_OVERFLOW BIT(2) #define INT_TX_CHANNEL_UNDERRUN BIT(3) -#define INT_ENABLE_MASK (CONTROL_RX_DATA_INT | CONTROL_TX_DATA_INT | \ - CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT) +#define INT_ENABLE_MASK (CONTROL_RX_OVER_INT | CONTROL_TX_UNDER_INT) #define REG_CONTROL (0x00) #define REG_FRAME_SIZE (0x04) @@ -133,10 +132,15 @@ static inline void mchp_corespi_disable(struct mchp_corespi *spi) mchp_corespi_write(spi, REG_CONTROL, control); } -static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi) +static inline void mchp_corespi_read_fifo(struct mchp_corespi *spi, int fifo_max) { - while (spi->rx_len >= spi->n_bytes && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY)) { - u32 data = mchp_corespi_read(spi, REG_RX_DATA); + for (int i = 0; i < fifo_max; i++) { + u32 data; + + while (mchp_corespi_read(spi, REG_STATUS) & STATUS_RXFIFO_EMPTY) + ; + + data = mchp_corespi_read(spi, REG_RX_DATA); spi->rx_len -= spi->n_bytes; @@ -211,11 +215,10 @@ static inline void mchp_corespi_set_xfer_size(struct mchp_corespi *spi, int len) mchp_corespi_write(spi, REG_FRAMESUP, len); } -static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi) +static inline void mchp_corespi_write_fifo(struct mchp_corespi *spi, int fifo_max) { - int fifo_max, i = 0; + int i = 0; - fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes); mchp_corespi_set_xfer_size(spi, fifo_max); while ((i < fifo_max) && !(mchp_corespi_read(spi, REG_STATUS) & STATUS_TXFIFO_FULL)) { @@ -413,19 +416,6 @@ static irqreturn_t mchp_corespi_interrupt(int irq, void *dev_id) if (intfield == 0) return IRQ_NONE; - if (intfield & INT_TXDONE) - mchp_corespi_write(spi, REG_INT_CLEAR, INT_TXDONE); - - if (intfield & INT_RXRDY) { - mchp_corespi_write(spi, REG_INT_CLEAR, INT_RXRDY); - - if (spi->rx_len) - mchp_corespi_read_fifo(spi); - } - - if (!spi->rx_len && !spi->tx_len) - finalise = true; - if (intfield & INT_RX_CHANNEL_OVERFLOW) { mchp_corespi_write(spi, REG_INT_CLEAR, INT_RX_CHANNEL_OVERFLOW); finalise = true; @@ -512,9 +502,14 @@ static int mchp_corespi_transfer_one(struct spi_controller *host, mchp_corespi_write(spi, REG_SLAVE_SELECT, spi->pending_slave_select); - while (spi->tx_len) - mchp_corespi_write_fifo(spi); + while (spi->tx_len) { + int fifo_max = DIV_ROUND_UP(min(spi->tx_len, FIFO_DEPTH), spi->n_bytes); + + mchp_corespi_write_fifo(spi, fifo_max); + mchp_corespi_read_fifo(spi, fifo_max); + } + spi_finalize_current_transfer(host); return 1; } diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c index 322a543b8c278..d0f397c902420 100644 --- a/drivers/tee/optee/supp.c +++ b/drivers/tee/optee/supp.c @@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee_supp *supp = &optee->supp; struct optee_supp_req *req; - bool interruptable; u32 ret; /* @@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, /* * Wait for supplicant to process and return result, once we've * returned from wait_for_completion(&req->c) successfully we have - * exclusive access again. + * exclusive access again. Allow the wait to be killable such that + * the wait doesn't turn into an indefinite state if the supplicant + * gets hung for some reason. */ - while (wait_for_completion_interruptible(&req->c)) { + if (wait_for_completion_killable(&req->c)) { mutex_lock(&supp->mutex); - interruptable = !supp->ctx; - if (interruptable) { - /* - * There's no supplicant available and since the - * supp->mutex currently is held none can - * become available until the mutex released - * again. - * - * Interrupting an RPC to supplicant is only - * allowed as a way of slightly improving the user - * experience in case the supplicant hasn't been - * started yet. During normal operation the supplicant - * will serve all requests in a timely manner and - * interrupting then wouldn't make sense. - */ - if (req->in_queue) { - list_del(&req->link); - req->in_queue = false; - } + if (req->in_queue) { + list_del(&req->link); + req->in_queue = false; } mutex_unlock(&supp->mutex); - - if (interruptable) { - req->ret = TEEC_ERROR_COMMUNICATION; - break; - } + req->ret = TEEC_ERROR_COMMUNICATION; } ret = req->ret; diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index 3b644de3292e2..0d9f636c80f4d 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -370,7 +370,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors, for (i = 0; i < num_actors; i++) { struct power_actor *pa = &power[i]; - u64 req_range = (u64)pa->req_power * power_range; + u64 req_range = (u64)pa->weighted_req_power * power_range; pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range, total_req_power); @@ -641,6 +641,22 @@ static int allocate_actors_buffer(struct power_allocator_params *params, return ret; } +static void power_allocator_update_weight(struct power_allocator_params *params) +{ + const struct thermal_trip_desc *td; + struct thermal_instance *instance; + + if (!params->trip_max) + return; + + td = trip_to_trip_desc(params->trip_max); + + params->total_weight = 0; + list_for_each_entry(instance, &td->thermal_instances, trip_node) + if (power_actor_is_valid(instance)) + params->total_weight += instance->weight; +} + static void power_allocator_update_tz(struct thermal_zone_device *tz, enum thermal_notify_event reason) { @@ -656,16 +672,12 @@ static void power_allocator_update_tz(struct thermal_zone_device *tz, if (power_actor_is_valid(instance)) num_actors++; - if (num_actors == params->num_actors) - return; + if (num_actors != params->num_actors) + allocate_actors_buffer(params, num_actors); - allocate_actors_buffer(params, num_actors); - break; + fallthrough; case THERMAL_INSTANCE_WEIGHT_CHANGED: - params->total_weight = 0; - list_for_each_entry(instance, &td->thermal_instances, trip_node) - if (power_actor_is_valid(instance)) - params->total_weight += instance->weight; + power_allocator_update_weight(params); break; default: break; @@ -731,6 +743,8 @@ static int power_allocator_bind(struct thermal_zone_device *tz) tz->governor_data = params; + power_allocator_update_weight(params); + return 0; free_params: diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index 5ab4ce4daaebd..5401f03d6b6c1 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -274,6 +274,34 @@ static bool thermal_of_get_cooling_spec(struct device_node *map_np, int index, return true; } +static bool thermal_of_cm_lookup(struct device_node *cm_np, + const struct thermal_trip *trip, + struct thermal_cooling_device *cdev, + struct cooling_spec *c) +{ + for_each_child_of_node_scoped(cm_np, child) { + struct device_node *tr_np; + int count, i; + + tr_np = of_parse_phandle(child, "trip", 0); + if (tr_np != trip->priv) + continue; + + /* The trip has been found, look up the cdev. */ + count = of_count_phandle_with_args(child, "cooling-device", + "#cooling-cells"); + if (count <= 0) + pr_err("Add a cooling_device property with at least one device\n"); + + for (i = 0; i < count; i++) { + if (thermal_of_get_cooling_spec(child, i, cdev, c)) + return true; + } + } + + return false; +} + static bool thermal_of_should_bind(struct thermal_zone_device *tz, const struct thermal_trip *trip, struct thermal_cooling_device *cdev, @@ -293,27 +321,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz, goto out; /* Look up the trip and the cdev in the cooling maps. */ - for_each_child_of_node_scoped(cm_np, child) { - struct device_node *tr_np; - int count, i; - - tr_np = of_parse_phandle(child, "trip", 0); - if (tr_np != trip->priv) - continue; - - /* The trip has been found, look up the cdev. */ - count = of_count_phandle_with_args(child, "cooling-device", "#cooling-cells"); - if (count <= 0) - pr_err("Add a cooling_device property with at least one device\n"); - - for (i = 0; i < count; i++) { - result = thermal_of_get_cooling_spec(child, i, cdev, c); - if (result) - break; - } - - break; - } + result = thermal_of_cm_lookup(cm_np, trip, cdev, c); of_node_put(cm_np); out: diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 8229a6fbda5ab..717b31d787289 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -1009,6 +1009,8 @@ static int tb_dp_dprx_start(struct tb_tunnel *tunnel) */ tb_tunnel_get(tunnel); + tunnel->dprx_started = true; + if (tunnel->callback) { tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout); queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0); @@ -1021,9 +1023,12 @@ static int tb_dp_dprx_start(struct tb_tunnel *tunnel) static void tb_dp_dprx_stop(struct tb_tunnel *tunnel) { - tunnel->dprx_canceled = true; - cancel_delayed_work(&tunnel->dprx_work); - tb_tunnel_put(tunnel); + if (tunnel->dprx_started) { + tunnel->dprx_started = false; + tunnel->dprx_canceled = true; + cancel_delayed_work(&tunnel->dprx_work); + tb_tunnel_put(tunnel); + } } static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h index 7f6d3a18a41e8..8a0a0cb21a895 100644 --- a/drivers/thunderbolt/tunnel.h +++ b/drivers/thunderbolt/tunnel.h @@ -63,6 +63,7 @@ enum tb_tunnel_state { * @allocated_down: Allocated downstream bandwidth (only for USB3) * @bw_mode: DP bandwidth allocation mode registers can be used to * determine consumed and allocated bandwidth + * @dprx_started: DPRX negotiation was started (tb_dp_dprx_start() was called for it) * @dprx_canceled: Was DPRX capabilities read poll canceled * @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read @@ -100,6 +101,7 @@ struct tb_tunnel { int allocated_up; int allocated_down; bool bw_mode; + bool dprx_started; bool dprx_canceled; ktime_t dprx_timeout; struct delayed_work dprx_work; diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c index 8d4ad0a3f2cf0..252186124669a 100644 --- a/drivers/ufs/core/ufs_bsg.c +++ b/drivers/ufs/core/ufs_bsg.c @@ -194,10 +194,12 @@ static int ufs_bsg_request(struct bsg_job *job) ufshcd_rpm_put_sync(hba); kfree(buff); bsg_reply->result = ret; - job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply); /* complete the job here only if no error */ - if (ret == 0) + if (ret == 0) { + job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) : + sizeof(struct ufs_bsg_reply); bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len); + } return ret; } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 1893a7ad95316..464f13da259aa 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -266,7 +266,7 @@ static bool ufshcd_has_pending_tasks(struct ufs_hba *hba) static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba) { - return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba); + return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba); } static const struct ufs_dev_quirk ufs_fixups[] = { @@ -628,8 +628,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba) const struct scsi_device *sdev_ufs = hba->ufs_device_wlun; dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); - dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", - hba->outstanding_reqs, hba->outstanding_tasks); + dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n", + scsi_host_busy(hba->host), hba->outstanding_tasks); dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", hba->saved_err, hba->saved_uic_err); dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", @@ -8882,7 +8882,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd) dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n", __func__, hba->outstanding_tasks); - return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; + return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE; } static const struct attribute_group *ufshcd_driver_groups[] = { @@ -10431,6 +10431,21 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) */ spin_lock_init(&hba->clk_gating.lock); + /* + * Set the default power management level for runtime and system PM. + * Host controller drivers can override them in their + * 'ufs_hba_variant_ops::init' callback. + * + * Default power saving mode is to keep UFS link in Hibern8 state + * and UFS device in sleep state. + */ + hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + UFS_SLEEP_PWR_MODE, + UIC_LINK_HIBERN8_STATE); + err = ufshcd_hba_init(hba); if (err) goto out_error; @@ -10544,21 +10559,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto out_disable; } - /* - * Set the default power management level for runtime and system PM if - * not set by the host controller drivers. - * Default power saving mode is to keep UFS link in Hibern8 state - * and UFS device in sleep state. - */ - if (!hba->rpm_lvl) - hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( - UFS_SLEEP_PWR_MODE, - UIC_LINK_HIBERN8_STATE); - if (!hba->spm_lvl) - hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( - UFS_SLEEP_PWR_MODE, - UIC_LINK_HIBERN8_STATE); - INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work); INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work); diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 0dd85d2635b99..47d06af33747d 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -1131,7 +1131,10 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, struct cxacru_data *instance; struct usb_device *usb_dev = interface_to_usbdev(intf); struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; - struct usb_endpoint_descriptor *in, *out; + static const u8 ep_addrs[] = { + CXACRU_EP_CMD + USB_DIR_IN, + CXACRU_EP_CMD + USB_DIR_OUT, + 0}; int ret; /* instance init */ @@ -1179,13 +1182,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, } if (usb_endpoint_xfer_int(&cmd_ep->desc)) - ret = usb_find_common_endpoints(intf->cur_altsetting, - NULL, NULL, &in, &out); + ret = usb_check_int_endpoints(intf, ep_addrs); else - ret = usb_find_common_endpoints(intf->cur_altsetting, - &in, &out, NULL, NULL); + ret = usb_check_bulk_endpoints(intf, ep_addrs); - if (ret) { + if (!ret) { usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n"); ret = -ENODEV; goto fail; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index a76bb50b62026..dcba4281ea486 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -6065,6 +6065,36 @@ void usb_hub_cleanup(void) usb_deregister(&hub_driver); } /* usb_hub_cleanup() */ +/** + * hub_hc_release_resources - clear resources used by host controller + * @udev: pointer to device being released + * + * Context: task context, might sleep + * + * Function releases the host controller resources in correct order before + * making any operation on resuming usb device. The host controller resources + * allocated for devices in tree should be released starting from the last + * usb device in tree toward the root hub. This function is used only during + * resuming device when usb device require reinitialization – that is, when + * flag udev->reset_resume is set. + * + * This call is synchronous, and may not be used in an interrupt context. + */ +static void hub_hc_release_resources(struct usb_device *udev) +{ + struct usb_hub *hub = usb_hub_to_struct_hub(udev); + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + int i; + + /* Release up resources for all children before this device */ + for (i = 0; i < udev->maxchild; i++) + if (hub->ports[i]->child) + hub_hc_release_resources(hub->ports[i]->child); + + if (hcd->driver->reset_device) + hcd->driver->reset_device(hcd, udev); +} + /** * usb_reset_and_verify_device - perform a USB port reset to reinitialize a device * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) @@ -6129,6 +6159,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev) bos = udev->bos; udev->bos = NULL; + if (udev->reset_resume) + hub_hc_release_resources(udev); + mutex_lock(hcd->address0_mutex); for (i = 0; i < PORT_INIT_TRIES; ++i) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index dfcfc142bd5e1..8efbacc5bc341 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -341,6 +341,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0638, 0x0a13), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + /* Prolific Single-LUN Mass Storage Card Reader */ + { USB_DEVICE(0x067b, 0x2731), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_NO_LPM }, + /* Saitek Cyborg Gold Joystick */ { USB_DEVICE(0x06a3, 0x0006), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index dfa1b5fe48dc4..66a08b5271653 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -131,11 +131,24 @@ void dwc3_enable_susphy(struct dwc3 *dwc, bool enable) } } -void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) +void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy) { + unsigned int hw_mode; u32 reg; reg = dwc3_readl(dwc->regs, DWC3_GCTL); + + /* + * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and + * GUSB2PHYCFG.SUSPHY should be cleared during mode switching, + * and they can be set after core initialization. + */ + hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); + if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) { + if (DWC3_GCTL_PRTCAP(reg) != mode) + dwc3_enable_susphy(dwc, false); + } + reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); reg |= DWC3_GCTL_PRTCAPDIR(mode); dwc3_writel(dwc->regs, DWC3_GCTL, reg); @@ -216,7 +229,7 @@ static void __dwc3_set_mode(struct work_struct *work) spin_lock_irqsave(&dwc->lock, flags); - dwc3_set_prtcap(dwc, desired_dr_role); + dwc3_set_prtcap(dwc, desired_dr_role, false); spin_unlock_irqrestore(&dwc->lock, flags); @@ -658,16 +671,7 @@ static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index) */ reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; - /* - * Above DWC_usb3.0 1.94a, it is recommended to set - * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration. - * So default value will be '0' when the core is reset. Application - * needs to set it to '1' after the core initialization is completed. - * - * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be - * cleared after power-on reset, and it can be set after core - * initialization. - */ + /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; if (dwc->u2ss_inp3_quirk) @@ -747,15 +751,7 @@ static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index) break; } - /* - * Above DWC_usb3.0 1.94a, it is recommended to set - * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration. - * So default value will be '0' when the core is reset. Application - * needs to set it to '1' after the core initialization is completed. - * - * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared - * after power-on reset, and it can be set after core initialization. - */ + /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; if (dwc->dis_enblslpm_quirk) @@ -830,6 +826,25 @@ static int dwc3_phy_init(struct dwc3 *dwc) goto err_exit_usb3_phy; } + /* + * Above DWC_usb3.0 1.94a, it is recommended to set + * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during + * coreConsultant configuration. So default value will be '0' when the + * core is reset. Application needs to set it to '1' after the core + * initialization is completed. + * + * Certain phy requires to be in P0 power state during initialization. + * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear + * prior to phy init to maintain in the P0 state. + * + * After phy initialization, some phy operations can only be executed + * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and + * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid + * blocking phy ops. + */ + if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) + dwc3_enable_susphy(dwc, true); + return 0; err_exit_usb3_phy: @@ -1588,7 +1603,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) switch (dwc->dr_mode) { case USB_DR_MODE_PERIPHERAL: - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false); if (dwc->usb2_phy) otg_set_vbus(dwc->usb2_phy->otg, false); @@ -1600,7 +1615,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) return dev_err_probe(dev, ret, "failed to initialize gadget\n"); break; case USB_DR_MODE_HOST: - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false); if (dwc->usb2_phy) otg_set_vbus(dwc->usb2_phy->otg, true); @@ -1645,7 +1660,7 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc) } /* de-assert DRVVBUS for HOST and OTG mode */ - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); } static void dwc3_get_software_properties(struct dwc3 *dwc) @@ -1835,8 +1850,6 @@ static void dwc3_get_properties(struct dwc3 *dwc) dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; dwc->tx_max_burst_prd = tx_max_burst_prd; - dwc->imod_interval = 0; - dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; } @@ -1854,21 +1867,19 @@ static void dwc3_check_params(struct dwc3 *dwc) unsigned int hwparam_gen = DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); - /* Check for proper value of imod_interval */ - if (dwc->imod_interval && !dwc3_has_imod(dwc)) { - dev_warn(dwc->dev, "Interrupt moderation not supported\n"); - dwc->imod_interval = 0; - } - /* + * Enable IMOD for all supporting controllers. + * + * Particularly, DWC_usb3 v3.00a must enable this feature for + * the following reason: + * * Workaround for STAR 9000961433 which affects only version * 3.00a of the DWC_usb3 core. This prevents the controller * interrupt from being masked while handling events. IMOD * allows us to work around this issue. Enable it for the * affected version. */ - if (!dwc->imod_interval && - DWC3_VER_IS(DWC3, 300A)) + if (dwc3_has_imod((dwc))) dwc->imod_interval = 1; /* Check the maximum_speed parameter */ @@ -2457,7 +2468,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) if (ret) return ret; - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); dwc3_gadget_resume(dwc); break; case DWC3_GCTL_PRTCAP_HOST: @@ -2465,7 +2476,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) ret = dwc3_core_init_for_resume(dwc); if (ret) return ret; - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true); break; } /* Restore GUSB2PHYCFG bits that were modified in suspend */ @@ -2494,7 +2505,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) if (ret) return ret; - dwc3_set_prtcap(dwc, dwc->current_dr_role); + dwc3_set_prtcap(dwc, dwc->current_dr_role, true); dwc3_otg_init(dwc); if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index c955039bb4f62..aaa39e663f60a 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1558,7 +1558,7 @@ struct dwc3_gadget_ep_cmd_params { #define DWC3_HAS_OTG BIT(3) /* prototypes */ -void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode); +void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy); void dwc3_set_mode(struct dwc3 *dwc, u32 mode); u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type); diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index d76ae676783cf..7977860932b14 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -173,7 +173,7 @@ void dwc3_otg_init(struct dwc3 *dwc) * block "Initialize GCTL for OTG operation". */ /* GCTL.PrtCapDir=2'b11 */ - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true); /* GUSB2PHYCFG0.SusPHY=0 */ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; @@ -556,7 +556,7 @@ int dwc3_drd_init(struct dwc3 *dwc) dwc3_drd_update(dwc); } else { - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true); /* use OTG block to get ID event */ irq = dwc3_otg_get_irq(dwc); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index ddd6b2ce57107..89a4dc8ebf948 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4501,14 +4501,18 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_SIZE(evt->length)); + evt->flags &= ~DWC3_EVENT_PENDING; + /* + * Add an explicit write memory barrier to make sure that the update of + * clearing DWC3_EVENT_PENDING is observed in dwc3_check_event_buf() + */ + wmb(); + if (dwc->imod_interval) { dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); } - /* Keep the clearing of DWC3_EVENT_PENDING at the end */ - evt->flags &= ~DWC3_EVENT_PENDING; - return ret; } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index bdda8c74602de..869ad99afb48b 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1050,10 +1050,11 @@ static int set_config(struct usb_composite_dev *cdev, else usb_gadget_set_remote_wakeup(gadget, 0); done: - if (power <= USB_SELF_POWER_VBUS_MAX_DRAW) - usb_gadget_set_selfpowered(gadget); - else + if (power > USB_SELF_POWER_VBUS_MAX_DRAW || + (c && !(c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))) usb_gadget_clear_selfpowered(gadget); + else + usb_gadget_set_selfpowered(gadget); usb_gadget_vbus_draw(gadget, power); if (result >= 0 && cdev->delayed_status) @@ -2615,7 +2616,10 @@ void composite_suspend(struct usb_gadget *gadget) cdev->suspended = 1; - usb_gadget_set_selfpowered(gadget); + if (cdev->config && + cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER) + usb_gadget_set_selfpowered(gadget); + usb_gadget_vbus_draw(gadget, 2); } @@ -2649,8 +2653,11 @@ void composite_resume(struct usb_gadget *gadget) else maxpower = min(maxpower, 900U); - if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW) + if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW || + !(cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER)) usb_gadget_clear_selfpowered(gadget); + else + usb_gadget_set_selfpowered(gadget); usb_gadget_vbus_draw(gadget, maxpower); } else { diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 09e2838917e29..f58590bf5e02f 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -1052,8 +1052,8 @@ void gether_suspend(struct gether *link) * There is a transfer in progress. So we trigger a remote * wakeup to inform the host. */ - ether_wakeup_host(dev->port_usb); - return; + if (!ether_wakeup_host(dev->port_usb)) + return; } spin_lock_irqsave(&dev->lock, flags); link->is_suspend = true; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 9693464c05204..69c278b64084b 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "xhci.h" #include "xhci-trace.h" @@ -770,9 +771,16 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci) enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci, struct xhci_port *port) { + struct usb_hcd *hcd; void __iomem *base; u32 offset; + /* Don't try and probe this capability for non-Intel hosts */ + hcd = xhci_to_hcd(xhci); + if (!dev_is_pci(hcd->self.controller) || + to_pci_dev(hcd->self.controller)->vendor != PCI_VENDOR_ID_INTEL) + return USB_LINK_UNKNOWN; + base = &xhci->cap_regs->hc_capbase; offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 92703efda1f7b..fdf0c1008225a 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2437,7 +2437,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * and our use of dma addresses in the trb_address_map radix tree needs * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. */ - if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH) + if (xhci->quirks & XHCI_TRB_OVERFETCH) + /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2); else diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index ad0ff356f6fa0..54460d11f7ee8 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -38,6 +38,8 @@ #define PCI_DEVICE_ID_ETRON_EJ168 0x7023 #define PCI_DEVICE_ID_ETRON_EJ188 0x7052 +#define PCI_DEVICE_ID_VIA_VL805 0x3483 + #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1 @@ -418,8 +420,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x3432) xhci->quirks |= XHCI_BROKEN_STREAMS; - if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) + if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) { xhci->quirks |= XHCI_LPM_SUPPORT; + xhci->quirks |= XHCI_TRB_OVERFETCH; + } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) { @@ -467,11 +471,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->device == 0x9202) { xhci->quirks |= XHCI_RESET_ON_RESUME; - xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + xhci->quirks |= XHCI_TRB_OVERFETCH; } if (pdev->device == 0x9203) - xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + xhci->quirks |= XHCI_TRB_OVERFETCH; } if (pdev->vendor == PCI_VENDOR_ID_CDNS && diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 45653114ccd7f..1a90ebc8a30ea 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -780,8 +780,12 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) struct xhci_segment *seg; ring = xhci->cmd_ring; - xhci_for_each_ring_seg(ring->first_seg, seg) + xhci_for_each_ring_seg(ring->first_seg, seg) { + /* erase all TRBs before the link */ memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); + /* clear link cycle bit */ + seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); + } xhci_initialize_ring_info(ring); /* diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 8c164340a2c35..779b01dee068f 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1632,7 +1632,7 @@ struct xhci_hcd { #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42) #define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43) #define XHCI_RESET_TO_DEFAULT BIT_ULL(44) -#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45) +#define XHCI_TRB_OVERFETCH BIT_ULL(45) #define XHCI_ZHAOXIN_HOST BIT_ULL(46) #define XHCI_WRITE_64_HI_LO BIT_ULL(47) #define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 935fc496fe94b..4b35ef216125c 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -312,8 +312,10 @@ static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv) priv->clks[1] = of_clk_get(dev_of_node(dev), 1); if (PTR_ERR(priv->clks[1]) == -ENOENT) priv->clks[1] = NULL; - else if (IS_ERR(priv->clks[1])) + else if (IS_ERR(priv->clks[1])) { + clk_put(priv->clks[0]); return PTR_ERR(priv->clks[1]); + } return 0; } @@ -779,6 +781,8 @@ static void usbhs_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "usb remove\n"); + flush_delayed_work(&priv->notify_hotplug_work); + /* power off */ if (!usbhs_get_dparam(priv, runtime_pwctrl)) usbhsc_power_ctrl(priv, 0); diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 105132ae87acb..e8e5723f54122 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -1094,7 +1094,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) goto usbhs_mod_gadget_probe_err_gpriv; } - gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); + gpriv->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_UNDEFINED); dev_info(dev, "%stransceiver found\n", !IS_ERR(gpriv->transceiver) ? "" : "no "); diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index e07c5e3eb18c0..9b34e23b70919 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1079,6 +1079,20 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, /* GMC devices */ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) }, + /* Altera USB Blaster 3 */ + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6022_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6025_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 3) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6029_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 3) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602C_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 5ee60ba2a73cd..52be47d684ea6 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1612,3 +1612,16 @@ */ #define GMC_VID 0x1cd7 #define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */ + +/* + * Altera USB Blaster 3 (http://www.altera.com). + */ +#define ALTERA_VID 0x09fb +#define ALTERA_UB3_6022_PID 0x6022 +#define ALTERA_UB3_6025_PID 0x6025 +#define ALTERA_UB3_6026_PID 0x6026 +#define ALTERA_UB3_6029_PID 0x6029 +#define ALTERA_UB3_602A_PID 0x602a +#define ALTERA_UB3_602C_PID 0x602c +#define ALTERA_UB3_602D_PID 0x602d +#define ALTERA_UB3_602E_PID 0x602e diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 58bd54e8c483a..5cd26dac2069f 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1368,13 +1368,13 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */ .driver_info = RSVD(0) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990A (rmnet) */ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990A (MBIM) */ .driver_info = NCTRL(0) | RSVD(1) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990A (RNDIS) */ .driver_info = NCTRL(2) | RSVD(3) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990A (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, @@ -1388,28 +1388,44 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */ .driver_info = NCTRL(3) | RSVD(4) | RSVD(5) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x30), /* Telit FE990B (rmnet) */ + .driver_info = NCTRL(5) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x30), /* Telit FE990B (MBIM) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x30), /* Telit FE990B (RNDIS) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x30), /* Telit FE990B (ECM) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x60) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c0, 0xff), /* Telit FE910C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c4, 0xff), /* Telit FE910C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) }, /* Telit FN990B (rmnet) */ - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30), + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x30), /* Telit FN990B (rmnet) */ .driver_info = NCTRL(5) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) }, /* Telit FN990B (MBIM) */ - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30), + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */ .driver_info = NCTRL(6) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) }, /* Telit FN990B (RNDIS) */ - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30), + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x30), /* Telit FN990B (RNDIS) */ .driver_info = NCTRL(6) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) }, /* Telit FN990B (ECM) */ - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) }, - { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30), + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x30), /* Telit FN990B (ECM) */ .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x60) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c index 64f6dd0dc6609..88c50b984e8a3 100644 --- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c +++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c @@ -334,6 +334,11 @@ static int rt1711h_probe(struct i2c_client *client) { int ret; struct rt1711h_chip *chip; + const u16 alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | + TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST | + TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS | + TCPC_ALERT_CC_STATUS | TCPC_ALERT_RX_BUF_OVF | + TCPC_ALERT_FAULT; chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); if (!chip) @@ -382,6 +387,12 @@ static int rt1711h_probe(struct i2c_client *client) dev_name(chip->dev), chip); if (ret < 0) return ret; + + /* Enable alert interrupts */ + ret = rt1711h_write16(chip, TCPC_ALERT_MASK, alert_mask); + if (ret < 0) + return ret; + enable_irq_wake(client->irq); return 0; diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 6bf1a22c785af..62ca4a0ec55bb 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -5117,16 +5117,16 @@ static void run_state_machine(struct tcpm_port *port) */ if (port->vbus_never_low) { port->vbus_never_low = false; - tcpm_set_state(port, SNK_SOFT_RESET, - port->timings.sink_wait_cap_time); + upcoming_state = SNK_SOFT_RESET; } else { if (!port->self_powered) upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT; else upcoming_state = hard_reset_state(port); - tcpm_set_state(port, SNK_WAIT_CAPABILITIES_TIMEOUT, - port->timings.sink_wait_cap_time); } + + tcpm_set_state(port, upcoming_state, + port->timings.sink_wait_cap_time); break; case SNK_WAIT_CAPABILITIES_TIMEOUT: /* diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index fcf499cc9458c..2a2915b0a645f 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -25,7 +25,7 @@ * difficult to estimate the time it takes for the system to process the command * before it is actually passed to the PPM. */ -#define UCSI_TIMEOUT_MS 5000 +#define UCSI_TIMEOUT_MS 10000 /* * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests @@ -1346,7 +1346,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi) mutex_lock(&ucsi->ppm_lock); - ret = ucsi->ops->read_cci(ucsi, &cci); + ret = ucsi->ops->poll_cci(ucsi, &cci); if (ret < 0) goto out; @@ -1364,7 +1364,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi) tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS); do { - ret = ucsi->ops->read_cci(ucsi, &cci); + ret = ucsi->ops->poll_cci(ucsi, &cci); if (ret < 0) goto out; if (cci & UCSI_CCI_COMMAND_COMPLETE) @@ -1393,7 +1393,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi) /* Give the PPM time to process a reset before reading CCI */ msleep(20); - ret = ucsi->ops->read_cci(ucsi, &cci); + ret = ucsi->ops->poll_cci(ucsi, &cci); if (ret) goto out; @@ -1825,11 +1825,11 @@ static int ucsi_init(struct ucsi *ucsi) err_unregister: for (con = connector; con->port; con++) { + if (con->wq) + destroy_workqueue(con->wq); ucsi_unregister_partner(con); ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON); ucsi_unregister_port_psy(con); - if (con->wq) - destroy_workqueue(con->wq); usb_power_delivery_unregister_capabilities(con->port_sink_caps); con->port_sink_caps = NULL; @@ -1929,8 +1929,8 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops) struct ucsi *ucsi; if (!ops || - !ops->read_version || !ops->read_cci || !ops->read_message_in || - !ops->sync_control || !ops->async_control) + !ops->read_version || !ops->read_cci || !ops->poll_cci || + !ops->read_message_in || !ops->sync_control || !ops->async_control) return ERR_PTR(-EINVAL); ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL); @@ -2013,10 +2013,6 @@ void ucsi_unregister(struct ucsi *ucsi) for (i = 0; i < ucsi->cap.num_connectors; i++) { cancel_work_sync(&ucsi->connector[i].work); - ucsi_unregister_partner(&ucsi->connector[i]); - ucsi_unregister_altmodes(&ucsi->connector[i], - UCSI_RECIPIENT_CON); - ucsi_unregister_port_psy(&ucsi->connector[i]); if (ucsi->connector[i].wq) { struct ucsi_work *uwork; @@ -2032,6 +2028,11 @@ void ucsi_unregister(struct ucsi *ucsi) destroy_workqueue(ucsi->connector[i].wq); } + ucsi_unregister_partner(&ucsi->connector[i]); + ucsi_unregister_altmodes(&ucsi->connector[i], + UCSI_RECIPIENT_CON); + ucsi_unregister_port_psy(&ucsi->connector[i]); + usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps); ucsi->connector[i].port_sink_caps = NULL; usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps); diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 82735eb34f0e3..28780acc4af2e 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -62,6 +62,7 @@ struct dentry; * struct ucsi_operations - UCSI I/O operations * @read_version: Read implemented UCSI version * @read_cci: Read CCI register + * @poll_cci: Read CCI register while polling with notifications disabled * @read_message_in: Read message data from UCSI * @sync_control: Blocking control operation * @async_control: Non-blocking control operation @@ -76,6 +77,7 @@ struct dentry; struct ucsi_operations { int (*read_version)(struct ucsi *ucsi, u16 *version); int (*read_cci)(struct ucsi *ucsi, u32 *cci); + int (*poll_cci)(struct ucsi *ucsi, u32 *cci); int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len); int (*sync_control)(struct ucsi *ucsi, u64 command); int (*async_control)(struct ucsi *ucsi, u64 command); diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index 5c55155519634..ac1ebb5d95272 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -59,19 +59,24 @@ static int ucsi_acpi_read_version(struct ucsi *ucsi, u16 *version) static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci) { struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); - int ret; - - if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) { - ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ); - if (ret) - return ret; - } memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci)); return 0; } +static int ucsi_acpi_poll_cci(struct ucsi *ucsi, u32 *cci) +{ + struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); + int ret; + + ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ); + if (ret) + return ret; + + return ucsi_acpi_read_cci(ucsi, cci); +} + static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_len) { struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); @@ -94,6 +99,7 @@ static int ucsi_acpi_async_control(struct ucsi *ucsi, u64 command) static const struct ucsi_operations ucsi_acpi_ops = { .read_version = ucsi_acpi_read_version, .read_cci = ucsi_acpi_read_cci, + .poll_cci = ucsi_acpi_poll_cci, .read_message_in = ucsi_acpi_read_message_in, .sync_control = ucsi_sync_control_common, .async_control = ucsi_acpi_async_control @@ -142,6 +148,7 @@ static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command) static const struct ucsi_operations ucsi_gram_ops = { .read_version = ucsi_acpi_read_version, .read_cci = ucsi_acpi_read_cci, + .poll_cci = ucsi_acpi_poll_cci, .read_message_in = ucsi_gram_read_message_in, .sync_control = ucsi_gram_sync_control, .async_control = ucsi_acpi_async_control diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index 740171f24ef9f..4b1668733a4be 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -664,6 +664,7 @@ static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command) static const struct ucsi_operations ucsi_ccg_ops = { .read_version = ucsi_ccg_read_version, .read_cci = ucsi_ccg_read_cci, + .poll_cci = ucsi_ccg_read_cci, .read_message_in = ucsi_ccg_read_message_in, .sync_control = ucsi_ccg_sync_control, .async_control = ucsi_ccg_async_control, diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c index fed39d4580905..8af79101a2fc7 100644 --- a/drivers/usb/typec/ucsi/ucsi_glink.c +++ b/drivers/usb/typec/ucsi/ucsi_glink.c @@ -206,6 +206,7 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con) static const struct ucsi_operations pmic_glink_ucsi_ops = { .read_version = pmic_glink_ucsi_read_version, .read_cci = pmic_glink_ucsi_read_cci, + .poll_cci = pmic_glink_ucsi_read_cci, .read_message_in = pmic_glink_ucsi_read_message_in, .sync_control = ucsi_sync_control_common, .async_control = pmic_glink_ucsi_async_control, diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c index 6923fad31d795..57ef7d83a4121 100644 --- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c +++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c @@ -424,6 +424,7 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data) static const struct ucsi_operations ucsi_stm32g0_ops = { .read_version = ucsi_stm32g0_read_version, .read_cci = ucsi_stm32g0_read_cci, + .poll_cci = ucsi_stm32g0_read_cci, .read_message_in = ucsi_stm32g0_read_message_in, .sync_control = ucsi_sync_control_common, .async_control = ucsi_stm32g0_async_control, diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c index 4cae85c0dc12a..d33e3f2dd1d80 100644 --- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c +++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c @@ -74,6 +74,7 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command) static const struct ucsi_operations yoga_c630_ucsi_ops = { .read_version = yoga_c630_ucsi_read_version, .read_cci = yoga_c630_ucsi_read_cci, + .poll_cci = yoga_c630_ucsi_read_cci, .read_message_in = yoga_c630_ucsi_read_message_in, .sync_control = ucsi_sync_control_common, .async_control = yoga_c630_ucsi_async_control, diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9ac25d08f473e..63612faeab727 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -666,7 +666,7 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev) vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed, worker, name); - if (!vtsk) + if (IS_ERR(vtsk)) goto free_worker; mutex_init(&worker->mutex); diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index 7fdb5edd7e2e8..75338ffc703fb 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -282,6 +282,8 @@ static uint screen_depth; static uint screen_fb_size; static uint dio_fb_size; /* FB size for deferred IO */ +static void hvfb_putmem(struct fb_info *info); + /* Send message to Hyper-V host */ static inline int synthvid_send(struct hv_device *hdev, struct synthvid_msg *msg) @@ -862,6 +864,17 @@ static void hvfb_ops_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, hvfb_ondemand_refresh_throttle(par, x, y, width, height); } +/* + * fb_ops.fb_destroy is called by the last put_fb_info() call at the end + * of unregister_framebuffer() or fb_release(). Do any cleanup related to + * framebuffer here. + */ +static void hvfb_destroy(struct fb_info *info) +{ + hvfb_putmem(info); + framebuffer_release(info); +} + /* * TODO: GEN1 codepaths allocate from system or DMA-able memory. Fix the * driver to use the _SYSMEM_ or _DMAMEM_ helpers in these cases. @@ -877,6 +890,7 @@ static const struct fb_ops hvfb_ops = { .fb_set_par = hvfb_set_par, .fb_setcolreg = hvfb_setcolreg, .fb_blank = hvfb_blank, + .fb_destroy = hvfb_destroy, }; /* Get options from kernel paramenter "video=" */ @@ -952,7 +966,7 @@ static phys_addr_t hvfb_get_phymem(struct hv_device *hdev, } /* Release contiguous physical memory */ -static void hvfb_release_phymem(struct hv_device *hdev, +static void hvfb_release_phymem(struct device *device, phys_addr_t paddr, unsigned int size) { unsigned int order = get_order(size); @@ -960,7 +974,7 @@ static void hvfb_release_phymem(struct hv_device *hdev, if (order <= MAX_PAGE_ORDER) __free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order); else - dma_free_coherent(&hdev->device, + dma_free_coherent(device, round_up(size, PAGE_SIZE), phys_to_virt(paddr), paddr); @@ -989,6 +1003,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) base = pci_resource_start(pdev, 0); size = pci_resource_len(pdev, 0); + aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME); /* * For Gen 1 VM, we can directly use the contiguous memory @@ -1010,11 +1025,21 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) goto getmem_done; } pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n"); + } else { + aperture_remove_all_conflicting_devices(KBUILD_MODNAME); } /* - * Cannot use the contiguous physical memory. - * Allocate mmio space for framebuffer. + * Cannot use contiguous physical memory, so allocate MMIO space for + * the framebuffer. At this point in the function, conflicting devices + * that might have claimed the framebuffer MMIO space based on + * screen_info.lfb_base must have already been removed so that + * vmbus_allocate_mmio() does not allocate different MMIO space. If the + * kdump image were to be loaded using kexec_file_load(), the + * framebuffer location in the kdump image would be set from + * screen_info.lfb_base at the time that kdump is enabled. If the + * framebuffer has moved elsewhere, this could be the wrong location, + * causing kdump to hang when efifb (for example) loads. */ dio_fb_size = screen_width * screen_height * screen_depth / 8; @@ -1051,11 +1076,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) info->screen_size = dio_fb_size; getmem_done: - if (base && size) - aperture_remove_conflicting_devices(base, size, KBUILD_MODNAME); - else - aperture_remove_all_conflicting_devices(KBUILD_MODNAME); - if (!gen2vm) pci_dev_put(pdev); @@ -1074,16 +1094,16 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) } /* Release the framebuffer */ -static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info) +static void hvfb_putmem(struct fb_info *info) { struct hvfb_par *par = info->par; if (par->need_docopy) { vfree(par->dio_vp); - iounmap(info->screen_base); + iounmap(par->mmio_vp); vmbus_free_mmio(par->mem->start, screen_fb_size); } else { - hvfb_release_phymem(hdev, info->fix.smem_start, + hvfb_release_phymem(info->device, info->fix.smem_start, screen_fb_size); } @@ -1172,7 +1192,7 @@ static int hvfb_probe(struct hv_device *hdev, if (ret) goto error; - ret = register_framebuffer(info); + ret = devm_register_framebuffer(&hdev->device, info); if (ret) { pr_err("Unable to register framebuffer\n"); goto error; @@ -1197,7 +1217,7 @@ static int hvfb_probe(struct hv_device *hdev, error: fb_deferred_io_cleanup(info); - hvfb_putmem(hdev, info); + hvfb_putmem(info); error2: vmbus_close(hdev->channel); error1: @@ -1220,14 +1240,10 @@ static void hvfb_remove(struct hv_device *hdev) fb_deferred_io_cleanup(info); - unregister_framebuffer(info); cancel_delayed_work_sync(&par->dwork); vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); - - hvfb_putmem(hdev, info); - framebuffer_release(info); } static int hvfb_suspend(struct hv_device *hdev) diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c index c24036c4e51ec..e4e196abdaac9 100644 --- a/drivers/virt/acrn/hsm.c +++ b/drivers/virt/acrn/hsm.c @@ -49,7 +49,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr) switch (cmd & PMCMD_TYPE_MASK) { case ACRN_PMCMD_GET_PX_CNT: case ACRN_PMCMD_GET_CX_CNT: - pm_info = kmalloc(sizeof(u64), GFP_KERNEL); + pm_info = kzalloc(sizeof(u64), GFP_KERNEL); if (!pm_info) return -ENOMEM; @@ -64,7 +64,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr) kfree(pm_info); break; case ACRN_PMCMD_GET_PX_DATA: - px_data = kmalloc(sizeof(*px_data), GFP_KERNEL); + px_data = kzalloc(sizeof(*px_data), GFP_KERNEL); if (!px_data) return -ENOMEM; @@ -79,7 +79,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr) kfree(px_data); break; case ACRN_PMCMD_GET_CX_DATA: - cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL); + cx_data = kzalloc(sizeof(*cx_data), GFP_KERNEL); if (!cx_data) return -ENOMEM; diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c index 264b6523fe52f..70fbc9a3e703d 100644 --- a/drivers/virt/coco/sev-guest/sev-guest.c +++ b/drivers/virt/coco/sev-guest/sev-guest.c @@ -38,12 +38,6 @@ struct snp_guest_dev { struct miscdevice misc; struct snp_msg_desc *msg_desc; - - union { - struct snp_report_req report; - struct snp_derived_key_req derived_key; - struct snp_ext_report_req ext_report; - } req; }; /* @@ -71,7 +65,7 @@ struct snp_req_resp { static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { - struct snp_report_req *report_req = &snp_dev->req.report; + struct snp_report_req *report_req __free(kfree) = NULL; struct snp_msg_desc *mdesc = snp_dev->msg_desc; struct snp_report_resp *report_resp; struct snp_guest_req req = {}; @@ -80,6 +74,10 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io if (!arg->req_data || !arg->resp_data) return -EINVAL; + report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT); + if (!report_req) + return -ENOMEM; + if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req))) return -EFAULT; @@ -116,7 +114,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { - struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key; + struct snp_derived_key_req *derived_key_req __free(kfree) = NULL; struct snp_derived_key_resp derived_key_resp = {0}; struct snp_msg_desc *mdesc = snp_dev->msg_desc; struct snp_guest_req req = {}; @@ -136,6 +134,10 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque if (sizeof(buf) < resp_len) return -ENOMEM; + derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT); + if (!derived_key_req) + return -ENOMEM; + if (copy_from_user(derived_key_req, (void __user *)arg->req_data, sizeof(*derived_key_req))) return -EFAULT; @@ -168,16 +170,21 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques struct snp_req_resp *io) { - struct snp_ext_report_req *report_req = &snp_dev->req.ext_report; + struct snp_ext_report_req *report_req __free(kfree) = NULL; struct snp_msg_desc *mdesc = snp_dev->msg_desc; struct snp_report_resp *report_resp; struct snp_guest_req req = {}; int ret, npages = 0, resp_len; sockptr_t certs_address; + struct page *page; if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data)) return -EINVAL; + report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT); + if (!report_req) + return -ENOMEM; + if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req))) return -EFAULT; @@ -203,8 +210,20 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques * the host. If host does not supply any certs in it, then copy * zeros to indicate that certificate data was not provided. */ - memset(mdesc->certs_data, 0, report_req->certs_len); npages = report_req->certs_len >> PAGE_SHIFT; + page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, + get_order(report_req->certs_len)); + if (!page) + return -ENOMEM; + + req.certs_data = page_address(page); + ret = set_memory_decrypted((unsigned long)req.certs_data, npages); + if (ret) { + pr_err("failed to mark page shared, ret=%d\n", ret); + __free_pages(page, get_order(report_req->certs_len)); + return -EFAULT; + } + cmd: /* * The intermediate response buffer is used while decrypting the @@ -213,10 +232,12 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques */ resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize; report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); - if (!report_resp) - return -ENOMEM; + if (!report_resp) { + ret = -ENOMEM; + goto e_free_data; + } - mdesc->input.data_npages = npages; + req.input.data_npages = npages; req.msg_version = arg->msg_version; req.msg_type = SNP_MSG_REPORT_REQ; @@ -231,7 +252,7 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques /* If certs length is invalid then copy the returned length */ if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) { - report_req->certs_len = mdesc->input.data_npages << PAGE_SHIFT; + report_req->certs_len = req.input.data_npages << PAGE_SHIFT; if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req))) ret = -EFAULT; @@ -240,7 +261,7 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques if (ret) goto e_free; - if (npages && copy_to_sockptr(certs_address, mdesc->certs_data, report_req->certs_len)) { + if (npages && copy_to_sockptr(certs_address, req.certs_data, report_req->certs_len)) { ret = -EFAULT; goto e_free; } @@ -250,6 +271,13 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques e_free: kfree(report_resp); +e_free_data: + if (npages) { + if (set_memory_encrypted((unsigned long)req.certs_data, npages)) + WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n"); + else + __free_pages(page, get_order(report_req->certs_len)); + } return ret; } diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig index 11b153e7454e4..eaba28c95e733 100644 --- a/drivers/virt/vboxguest/Kconfig +++ b/drivers/virt/vboxguest/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config VBOXGUEST tristate "Virtual Box Guest integration support" - depends on (ARM64 || X86) && PCI && INPUT + depends on (ARM64 || X86 || COMPILE_TEST) && PCI && INPUT + depends on HAS_IOPORT help This is a driver for the Virtual Box Guest PCI device used in Virtual Box virtual machines. Enabling this driver will add diff --git a/fs/affs/file.c b/fs/affs/file.c index a5a861dd52230..7a71018e3f675 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -596,7 +596,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize) BUG_ON(tmp > bsize); AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); affs_fix_checksum(sb, bh); bh->b_state &= ~(1UL << BH_New); @@ -724,7 +724,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, tmp = min(bsize - boff, to - from); BUG_ON(boff + tmp > bsize || tmp > bsize); memcpy(AFFS_DATA(bh) + boff, data + from, tmp); - be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); + AFFS_DATA_HEAD(bh)->size = cpu_to_be32( + max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size))); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); written += tmp; @@ -746,7 +747,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); @@ -780,7 +781,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); diff --git a/fs/afs/cell.c b/fs/afs/cell.c index cee42646736c8..96a6781f36530 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -64,7 +64,8 @@ static struct afs_cell *afs_find_cell_locked(struct afs_net *net, return ERR_PTR(-ENAMETOOLONG); if (!name) { - cell = net->ws_cell; + cell = rcu_dereference_protected(net->ws_cell, + lockdep_is_held(&net->cells_lock)); if (!cell) return ERR_PTR(-EDESTADDRREQ); goto found; @@ -388,8 +389,8 @@ int afs_cell_init(struct afs_net *net, const char *rootcell) /* install the new cell */ down_write(&net->cells_lock); afs_see_cell(new_root, afs_cell_trace_see_ws); - old_root = net->ws_cell; - net->ws_cell = new_root; + old_root = rcu_replace_pointer(net->ws_cell, new_root, + lockdep_is_held(&net->cells_lock)); up_write(&net->cells_lock); afs_unuse_cell(net, old_root, afs_cell_trace_unuse_ws); @@ -945,8 +946,8 @@ void afs_cell_purge(struct afs_net *net) _enter(""); down_write(&net->cells_lock); - ws = net->ws_cell; - net->ws_cell = NULL; + ws = rcu_replace_pointer(net->ws_cell, NULL, + lockdep_is_held(&net->cells_lock)); up_write(&net->cells_lock); afs_unuse_cell(net, ws, afs_cell_trace_unuse_ws); diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index d8bf52f77d930..008698d706caa 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -314,12 +314,23 @@ static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inod const char *name; bool dotted = vnode->fid.vnode == 3; - if (!net->ws_cell) + if (!dentry) { + /* We're in RCU-pathwalk. */ + cell = rcu_dereference(net->ws_cell); + if (dotted) + name = cell->name - 1; + else + name = cell->name; + /* Shouldn't need to set a delayed call. */ + return name; + } + + if (!rcu_access_pointer(net->ws_cell)) return ERR_PTR(-ENOENT); down_read(&net->cells_lock); - cell = net->ws_cell; + cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock)); if (dotted) name = cell->name - 1; else diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 90f407774a9a1..df30bd62da79e 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -287,7 +287,7 @@ struct afs_net { /* Cell database */ struct rb_root cells; - struct afs_cell *ws_cell; + struct afs_cell __rcu *ws_cell; struct work_struct cells_manager; struct timer_list cells_timer; atomic_t cells_outstanding; diff --git a/fs/afs/proc.c b/fs/afs/proc.c index e7614f4f30c21..12c88d8be3fe8 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -206,7 +206,7 @@ static int afs_proc_rootcell_show(struct seq_file *m, void *v) net = afs_seq2net_single(m); down_read(&net->cells_lock); - cell = net->ws_cell; + cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock)); if (cell) seq_printf(m, "%s\n", cell->name); up_read(&net->cells_lock); @@ -242,7 +242,7 @@ static int afs_proc_rootcell_write(struct file *file, char *buf, size_t size) ret = -EEXIST; inode_lock(file_inode(file)); - if (!net->ws_cell) + if (!rcu_access_pointer(net->ws_cell)) ret = afs_cell_init(net, buf); else printk("busy\n"); diff --git a/fs/afs/server.c b/fs/afs/server.c index 038f9d0ae3af8..4504e16b458cc 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -163,6 +163,8 @@ static struct afs_server *afs_install_server(struct afs_cell *cell, rb_insert_color(&server->uuid_rb, &net->fs_servers); hlist_add_head_rcu(&server->proc_link, &net->fs_proc); + afs_get_cell(cell, afs_cell_trace_get_server); + added_dup: write_seqlock(&net->fs_addr_lock); estate = rcu_dereference_protected(server->endpoint_state, @@ -442,6 +444,7 @@ static void afs_server_rcu(struct rcu_head *rcu) atomic_read(&server->active), afs_server_trace_free); afs_put_endpoint_state(rcu_access_pointer(server->endpoint_state), afs_estate_trace_put_server); + afs_put_cell(server->cell, afs_cell_trace_put_server); kfree(server); } diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index 7e7e567a7f8a2..d20cd902ef949 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c @@ -97,8 +97,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume, break; if (j < slist->nr_servers) { if (slist->servers[j].server == server) { - afs_put_server(volume->cell->net, server, - afs_server_trace_put_slist_isort); + afs_unuse_server(volume->cell->net, server, + afs_server_trace_put_slist_isort); continue; } diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index ca755e8d1a372..1ec1f90e0eb38 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -203,7 +203,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) return NULL; } - bch2_btree_lock_init(&b->c, 0); + bch2_btree_lock_init(&b->c, 0, GFP_KERNEL); __bch2_btree_node_to_freelist(bc, b); return b; @@ -795,17 +795,18 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea } b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN); - if (!b) { + if (b) { + bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_NOWAIT); + } else { mutex_unlock(&bc->lock); bch2_trans_unlock(trans); b = __btree_node_mem_alloc(c, GFP_KERNEL); if (!b) goto err; + bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL); mutex_lock(&bc->lock); } - bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0); - BUG_ON(!six_trylock_intent(&b->c.lock)); BUG_ON(!six_trylock_write(&b->c.lock)); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index e371e60e3133e..756736f9243d7 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -996,7 +996,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, } got_good_key: le16_add_cpu(&i->u64s, -next_good_key); - memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k); + memmove_u64s_down(k, (u64 *) k + next_good_key, (u64 *) vstruct_end(i) - (u64 *) k); set_btree_node_need_rewrite(b); } fsck_err: @@ -1186,7 +1186,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, le64_to_cpu(i->journal_seq), b->written, b->written + sectors, ptr_written); - b->written += sectors; + b->written = min(b->written + sectors, btree_sectors(c)); if (blacklisted && !first) continue; diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 1821f40c161a1..edce594333756 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -156,7 +156,7 @@ bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned k } if (ck) { - bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0); + bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL); ck->c.cached = true; goto lock; } diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index 10b805a60f526..caef65adeae49 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -7,9 +7,10 @@ static struct lock_class_key bch2_btree_node_lock_key; void bch2_btree_lock_init(struct btree_bkey_cached_common *b, - enum six_lock_init_flags flags) + enum six_lock_init_flags flags, + gfp_t gfp) { - __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags); + __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags, gfp); lockdep_set_notrack_class(&b->lock); } diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index b54ef48eb8cc2..b33ab7af84402 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -13,7 +13,7 @@ #include "btree_iter.h" #include "six.h" -void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags); +void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags, gfp_t gfp); void bch2_trans_unlock_noassert(struct btree_trans *); void bch2_trans_unlock_write(struct btree_trans *); diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 8f22ef9a7651a..47d8690f01bfc 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -126,10 +126,18 @@ bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s) int bch2_btree_insert_clone_trans(struct btree_trans *, enum btree_id, struct bkey_i *); +int bch2_btree_write_buffer_insert_err(struct btree_trans *, + enum btree_id, struct bkey_i *); + static inline int __must_check bch2_trans_update_buffered(struct btree_trans *trans, enum btree_id btree, struct bkey_i *k) { + if (unlikely(!btree_type_uses_write_buffer(btree))) { + int ret = bch2_btree_write_buffer_insert_err(trans, btree, k); + dump_stack(); + return ret; + } /* * Most updates skip the btree write buffer until journal replay is * finished because synchronization with journal replay relies on having diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index b56c4987b8c97..2c09d19dd6210 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -264,6 +264,22 @@ static void move_keys_from_inc_to_flushing(struct btree_write_buffer *wb) BUG_ON(wb->sorted.size < wb->flushing.keys.nr); } +int bch2_btree_write_buffer_insert_err(struct btree_trans *trans, + enum btree_id btree, struct bkey_i *k) +{ + struct bch_fs *c = trans->c; + struct printbuf buf = PRINTBUF; + + prt_printf(&buf, "attempting to do write buffer update on non wb btree="); + bch2_btree_id_to_text(&buf, btree); + prt_str(&buf, "\n"); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); + + bch2_fs_inconsistent(c, "%s", buf.buf); + printbuf_exit(&buf); + return -EROFS; +} + static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) { struct bch_fs *c = trans->c; @@ -312,7 +328,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) darray_for_each(wb->sorted, i) { struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx]; - BUG_ON(!btree_type_uses_write_buffer(k->btree)); + if (unlikely(!btree_type_uses_write_buffer(k->btree))) { + ret = bch2_btree_write_buffer_insert_err(trans, k->btree, &k->k); + goto err; + } for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++) prefetch(&wb->flushing.keys.data[n->idx]); diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 337494facac64..642fbc60ecab1 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -340,6 +340,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, struct printbuf buf = PRINTBUF; prt_str(&buf, "about to insert invalid key in data update path"); + prt_printf(&buf, "\nop.nonce: %u", m->op.nonce); prt_str(&buf, "\nold: "); bch2_bkey_val_to_text(&buf, c, old); prt_str(&buf, "\nk: "); diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h index a633f83c1ac78..362b3b2f2f2e3 100644 --- a/fs/bcachefs/dirent.h +++ b/fs/bcachefs/dirent.h @@ -31,11 +31,6 @@ static inline unsigned dirent_val_u64s(unsigned len) sizeof(u64)); } -static inline unsigned int dirent_occupied_size(const struct qstr *name) -{ - return (BKEY_U64s + dirent_val_u64s(name->len)) * sizeof(u64); -} - int bch2_dirent_read_target(struct btree_trans *, subvol_inum, struct bkey_s_c_dirent, subvol_inum *); diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 05d5f71a7ca9f..2d8042f853dcd 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -99,7 +99,7 @@ static inline bool ptr_better(struct bch_fs *c, /* Pick at random, biased in favor of the faster device: */ - return bch2_rand_range(l1 + l2) > l1; + return bch2_get_random_u64_below(l1 + l2) > l1; } if (bch2_force_reconstruct_read) diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h index 620b284aa34f0..204d765dd74c8 100644 --- a/fs/bcachefs/extents.h +++ b/fs/bcachefs/extents.h @@ -704,7 +704,7 @@ static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1, ptr1.unwritten == ptr2.unwritten && ptr1.offset == ptr2.offset && ptr1.dev == ptr2.dev && - ptr1.dev == ptr2.dev); + ptr1.gen == ptr2.gen); } void bch2_ptr_swab(struct bkey_s); diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/fs-common.c index d70d9f634cea9..2c3d46ac70c61 100644 --- a/fs/bcachefs/fs-common.c +++ b/fs/bcachefs/fs-common.c @@ -152,7 +152,6 @@ int bch2_create_trans(struct btree_trans *trans, if (is_subdir_for_nlink(new_inode)) dir_u->bi_nlink++; dir_u->bi_mtime = dir_u->bi_ctime = now; - dir_u->bi_size += dirent_occupied_size(name); ret = bch2_inode_write(trans, &dir_iter, dir_u); if (ret) @@ -221,7 +220,6 @@ int bch2_link_trans(struct btree_trans *trans, } dir_u->bi_mtime = dir_u->bi_ctime = now; - dir_u->bi_size += dirent_occupied_size(name); dir_hash = bch2_hash_info_init(c, dir_u); @@ -324,7 +322,6 @@ int bch2_unlink_trans(struct btree_trans *trans, dir_u->bi_mtime = dir_u->bi_ctime = inode_u->bi_ctime = now; dir_u->bi_nlink -= is_subdir_for_nlink(inode_u); - dir_u->bi_size -= dirent_occupied_size(name); ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc, &dir_hash, &dirent_iter, @@ -463,14 +460,6 @@ int bch2_rename_trans(struct btree_trans *trans, goto err; } - if (mode == BCH_RENAME) { - src_dir_u->bi_size -= dirent_occupied_size(src_name); - dst_dir_u->bi_size += dirent_occupied_size(dst_name); - } - - if (mode == BCH_RENAME_OVERWRITE) - src_dir_u->bi_size -= dirent_occupied_size(src_name); - if (src_inode_u->bi_parent_subvol) src_inode_u->bi_parent_subvol = dst_dir.subvol; diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 94bf34b9b65f0..717e7b94c66f8 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -466,6 +466,7 @@ int bchfs_truncate(struct mnt_idmap *idmap, ret = bch2_truncate_folio(inode, iattr->ia_size); if (unlikely(ret < 0)) goto err; + ret = 0; truncate_setsize(&inode->v, iattr->ia_size); diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 9bf316e7b845d..0e85131d0af88 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -1978,31 +1978,10 @@ static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_ return ret; } -static int check_dir_i_size_notnested(struct btree_trans *trans, struct inode_walker *w) -{ - struct bch_fs *c = trans->c; - int ret = 0; - - darray_for_each(w->inodes, i) - if (fsck_err_on(i->inode.bi_size != i->i_size, - trans, inode_dir_wrong_nlink, - "directory %llu:%u with wrong i_size: got %llu, should be %llu", - w->last_pos.inode, i->snapshot, i->inode.bi_size, i->i_size)) { - i->inode.bi_size = i->i_size; - ret = bch2_fsck_write_inode(trans, &i->inode); - if (ret) - break; - } -fsck_err: - bch_err_fn(c, ret); - return ret; -} - static int check_subdir_dirents_count(struct btree_trans *trans, struct inode_walker *w) { u32 restart_count = trans->restart_count; return check_subdir_count_notnested(trans, w) ?: - check_dir_i_size_notnested(trans, w) ?: trans_was_restarted(trans, restart_count); } diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index 04ec05206f8cf..339b80770f1dd 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -1198,6 +1198,7 @@ void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c, opts->_name##_from_inode = true; \ } else { \ opts->_name = c->opts._name; \ + opts->_name##_from_inode = false; \ } BCH_INODE_OPTS() #undef x diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index 8c7b2d3d779df..aa91fcf51eecc 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -59,7 +59,7 @@ static bool bch2_target_congested(struct bch_fs *c, u16 target) } rcu_read_unlock(); - return bch2_rand_range(nr * CONGESTED_MAX) < total; + return get_random_u32_below(nr * CONGESTED_MAX) < total; } #else @@ -951,12 +951,6 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, goto retry_pick; } - /* - * Unlock the iterator while the btree node's lock is still in - * cache, before doing the IO: - */ - bch2_trans_unlock(trans); - if (flags & BCH_READ_NODECODE) { /* * can happen if we retry, and the extent we were going to read @@ -1113,6 +1107,15 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, trace_and_count(c, read_split, &orig->bio); } + /* + * Unlock the iterator while the btree node's lock is still in + * cache, before doing the IO: + */ + if (!(flags & BCH_READ_IN_RETRY)) + bch2_trans_unlock(trans); + else + bch2_trans_unlock_long(trans); + if (!rbio->pick.idx) { if (unlikely(!rbio->have_ioref)) { struct printbuf buf = PRINTBUF; @@ -1160,6 +1163,8 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, if (likely(!(flags & BCH_READ_IN_RETRY))) { return 0; } else { + bch2_trans_unlock(trans); + int ret; rbio->context = RBIO_CONTEXT_UNBOUND; diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 24c294d4634e0..05b1250619ecc 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -1021,8 +1021,8 @@ struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, /* allocate journal on a device: */ -static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, - bool new_fs, struct closure *cl) +static int bch2_set_nr_journal_buckets_iter(struct bch_dev *ca, unsigned nr, + bool new_fs, struct closure *cl) { struct bch_fs *c = ca->fs; struct journal_device *ja = &ca->journal; @@ -1150,26 +1150,20 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, return ret; } -/* - * Allocate more journal space at runtime - not currently making use if it, but - * the code works: - */ -int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, - unsigned nr) +static int bch2_set_nr_journal_buckets_loop(struct bch_fs *c, struct bch_dev *ca, + unsigned nr, bool new_fs) { struct journal_device *ja = &ca->journal; - struct closure cl; int ret = 0; + struct closure cl; closure_init_stack(&cl); - down_write(&c->state_lock); - /* don't handle reducing nr of buckets yet: */ if (nr < ja->nr) - goto unlock; + return 0; - while (ja->nr < nr) { + while (!ret && ja->nr < nr) { struct disk_reservation disk_res = { 0, 0, 0 }; /* @@ -1182,25 +1176,38 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, * filesystem-wide allocation will succeed, this is a device * specific allocation - we can hang here: */ + if (!new_fs) { + ret = bch2_disk_reservation_get(c, &disk_res, + bucket_to_sector(ca, nr - ja->nr), 1, 0); + if (ret) + break; + } - ret = bch2_disk_reservation_get(c, &disk_res, - bucket_to_sector(ca, nr - ja->nr), 1, 0); - if (ret) - break; + ret = bch2_set_nr_journal_buckets_iter(ca, nr, new_fs, &cl); - ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl); + if (ret == -BCH_ERR_bucket_alloc_blocked || + ret == -BCH_ERR_open_buckets_empty) + ret = 0; /* wait and retry */ bch2_disk_reservation_put(c, &disk_res); - closure_sync(&cl); - - if (ret && ret != -BCH_ERR_bucket_alloc_blocked) - break; } - bch_err_fn(c, ret); -unlock: + return ret; +} + +/* + * Allocate more journal space at runtime - not currently making use if it, but + * the code works: + */ +int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca, + unsigned nr) +{ + down_write(&c->state_lock); + int ret = bch2_set_nr_journal_buckets_loop(c, ca, nr, false); up_write(&c->state_lock); + + bch_err_fn(c, ret); return ret; } @@ -1226,7 +1233,7 @@ int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs) min(1 << 13, (1 << 24) / ca->mi.bucket_size)); - ret = __bch2_set_nr_journal_buckets(ca, nr, new_fs, NULL); + ret = bch2_set_nr_journal_buckets_loop(ca->fs, ca, nr, new_fs); err: bch_err_fn(ca, ret); return ret; diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 21805509ab9e6..6718dc37c5a35 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -74,20 +74,14 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, struct move_bucket *b, u64 time) { struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c k; - struct bch_alloc_v4 _a; - const struct bch_alloc_v4 *a; - int ret; - if (bch2_bucket_is_open(trans->c, - b->k.bucket.inode, - b->k.bucket.offset)) + if (bch2_bucket_is_open(c, b->k.bucket.inode, b->k.bucket.offset)) return 0; - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, - b->k.bucket, BTREE_ITER_cached); - ret = bkey_err(k); + struct btree_iter iter; + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, + b->k.bucket, BTREE_ITER_cached); + int ret = bkey_err(k); if (ret) return ret; @@ -95,13 +89,18 @@ static int bch2_bucket_is_movable(struct btree_trans *trans, if (!ca) goto out; - a = bch2_alloc_to_v4(k, &_a); + if (ca->mi.state != BCH_MEMBER_STATE_rw || + !bch2_dev_is_online(ca)) + goto out_put; + + struct bch_alloc_v4 _a; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); b->k.gen = a->gen; b->sectors = bch2_bucket_sectors_dirty(*a); u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); ret = lru_idx && lru_idx <= time; - +out_put: bch2_dev_put(ca); out: bch2_trans_iter_exit(trans, &iter); diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c index 35e07bc8fbd34..051214fdc7352 100644 --- a/fs/bcachefs/sb-downgrade.c +++ b/fs/bcachefs/sb-downgrade.c @@ -90,10 +90,7 @@ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ BCH_FSCK_ERR_accounting_mismatch, \ BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \ - BCH_FSCK_ERR_accounting_key_junk_at_end) \ - x(directory_size, \ - BIT_ULL(BCH_RECOVERY_PASS_check_dirents), \ - BCH_FSCK_ERR_directory_size_mismatch) \ + BCH_FSCK_ERR_accounting_key_junk_at_end) #define DOWNGRADE_TABLE() \ x(bucket_stripe_sectors, \ diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c index 7e7c66a1e1a6b..7c403427fbdb8 100644 --- a/fs/bcachefs/six.c +++ b/fs/bcachefs/six.c @@ -850,7 +850,8 @@ void six_lock_exit(struct six_lock *lock) EXPORT_SYMBOL_GPL(six_lock_exit); void __six_lock_init(struct six_lock *lock, const char *name, - struct lock_class_key *key, enum six_lock_init_flags flags) + struct lock_class_key *key, enum six_lock_init_flags flags, + gfp_t gfp) { atomic_set(&lock->state, 0); raw_spin_lock_init(&lock->wait_lock); @@ -873,7 +874,7 @@ void __six_lock_init(struct six_lock *lock, const char *name, * failure if they wish by checking lock->readers, but generally * will not want to treat it as an error. */ - lock->readers = alloc_percpu(unsigned); + lock->readers = alloc_percpu_gfp(unsigned, gfp); } #endif } diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h index c142e06b7a3a7..59b851cf8bacc 100644 --- a/fs/bcachefs/six.h +++ b/fs/bcachefs/six.h @@ -164,18 +164,19 @@ enum six_lock_init_flags { }; void __six_lock_init(struct six_lock *lock, const char *name, - struct lock_class_key *key, enum six_lock_init_flags flags); + struct lock_class_key *key, enum six_lock_init_flags flags, + gfp_t gfp); /** * six_lock_init - initialize a six lock * @lock: lock to initialize * @flags: optional flags, i.e. SIX_LOCK_INIT_PCPU */ -#define six_lock_init(lock, flags) \ +#define six_lock_init(lock, flags, gfp) \ do { \ static struct lock_class_key __key; \ \ - __six_lock_init((lock), #lock, &__key, flags); \ + __six_lock_init((lock), #lock, &__key, flags, gfp); \ } while (0) /** diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index 8037ccbacf6af..a81a7b6c09897 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -69,14 +69,20 @@ enum bcachefs_metadata_version bch2_latest_compatible_version(enum bcachefs_meta return v; } -void bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version version) +bool bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version version) { - mutex_lock(&c->sb_lock); - SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb, - max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version)); - c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_FEATURE_incompat_version_field); - bch2_write_super(c); - mutex_unlock(&c->sb_lock); + bool ret = (c->sb.features & BIT_ULL(BCH_FEATURE_incompat_version_field)) && + version <= c->sb.version_incompat_allowed; + + if (ret) { + mutex_lock(&c->sb_lock); + SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb, + max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version)); + bch2_write_super(c); + mutex_unlock(&c->sb_lock); + } + + return ret; } const char * const bch2_sb_fields[] = { @@ -1219,9 +1225,11 @@ void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version, bool incompat) c->disk_sb.sb->version = cpu_to_le16(new_version); c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL); - if (incompat) + if (incompat) { SET_BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb, max(BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb), new_version)); + c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_FEATURE_incompat_version_field); + } } static int bch2_sb_ext_validate(struct bch_sb *sb, struct bch_sb_field *f, diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h index f1ab4f9437203..b4cff9ebdebbf 100644 --- a/fs/bcachefs/super-io.h +++ b/fs/bcachefs/super-io.h @@ -21,17 +21,14 @@ static inline bool bch2_version_compatible(u16 version) void bch2_version_to_text(struct printbuf *, enum bcachefs_metadata_version); enum bcachefs_metadata_version bch2_latest_compatible_version(enum bcachefs_metadata_version); -void bch2_set_version_incompat(struct bch_fs *, enum bcachefs_metadata_version); +bool bch2_set_version_incompat(struct bch_fs *, enum bcachefs_metadata_version); static inline bool bch2_request_incompat_feature(struct bch_fs *c, enum bcachefs_metadata_version version) { - if (unlikely(version > c->sb.version_incompat)) { - if (version > c->sb.version_incompat_allowed) - return false; - bch2_set_version_incompat(c, version); - } - return true; + return likely(version <= c->sb.version_incompat) + ? true + : bch2_set_version_incompat(c, version); } static inline size_t bch2_sb_field_bytes(struct bch_sb_field *f) diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 6d97d412fed98..0459c875e189b 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -1811,7 +1811,11 @@ int bch2_dev_add(struct bch_fs *c, const char *path) goto err_late; up_write(&c->state_lock); - return 0; +out: + printbuf_exit(&label); + printbuf_exit(&errbuf); + bch_err_fn(c, ret); + return ret; err_unlock: mutex_unlock(&c->sb_lock); @@ -1820,10 +1824,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path) if (ca) bch2_dev_free(ca); bch2_free_super(&sb); - printbuf_exit(&label); - printbuf_exit(&errbuf); - bch_err_fn(c, ret); - return ret; + goto out; err_late: up_write(&c->state_lock); ca = NULL; diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c index e0a876cbaa6b7..da2cd11b3025d 100644 --- a/fs/bcachefs/util.c +++ b/fs/bcachefs/util.c @@ -653,19 +653,25 @@ int bch2_bio_alloc_pages(struct bio *bio, size_t size, gfp_t gfp_mask) return 0; } -size_t bch2_rand_range(size_t max) +u64 bch2_get_random_u64_below(u64 ceil) { - size_t rand; + if (ceil <= U32_MAX) + return __get_random_u32_below(ceil); - if (!max) - return 0; + /* this is the same (clever) algorithm as in __get_random_u32_below() */ + u64 rand = get_random_u64(); + u64 mult = ceil * rand; - do { - rand = get_random_long(); - rand &= roundup_pow_of_two(max) - 1; - } while (rand >= max); + if (unlikely(mult < ceil)) { + u64 bound; + div64_u64_rem(-ceil, ceil, &bound); + while (unlikely(mult < bound)) { + rand = get_random_u64(); + mult = ceil * rand; + } + } - return rand; + return mul_u64_u64_shr(ceil, rand, 64); } void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src) diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index e7c3541b38f3f..f4a4783219d9d 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -401,7 +401,7 @@ do { \ _ret; \ }) -size_t bch2_rand_range(size_t); +u64 bch2_get_random_u64_below(u64); void memcpy_to_bio(struct bio *, struct bvec_iter, const void *); void memcpy_from_bio(void *, struct bio *, struct bvec_iter); diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 67ce85ff0ae25..7f46abbd6311b 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -1128,6 +1128,8 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c long nr_dropped = 0; struct rb_node *node; + lockdep_assert_held_write(&tree->lock); + /* * Take the mmap lock so that we serialize with the inode logging phase * of fsync because we may need to set the full sync flag on the inode, @@ -1139,28 +1141,12 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c * to find new extents, which may not be there yet because ordered * extents haven't completed yet. * - * We also do a try lock because otherwise we could deadlock. This is - * because the shrinker for this filesystem may be invoked while we are - * in a path that is holding the mmap lock in write mode. For example in - * a reflink operation while COWing an extent buffer, when allocating - * pages for a new extent buffer and under memory pressure, the shrinker - * may be invoked, and therefore we would deadlock by attempting to read - * lock the mmap lock while we are holding already a write lock on it. + * We also do a try lock because we don't want to block for too long and + * we are holding the extent map tree's lock in write mode. */ if (!down_read_trylock(&inode->i_mmap_lock)) return 0; - /* - * We want to be fast so if the lock is busy we don't want to spend time - * waiting for it - either some task is about to do IO for the inode or - * we may have another task shrinking extent maps, here in this code, so - * skip this inode. - */ - if (!write_trylock(&tree->lock)) { - up_read(&inode->i_mmap_lock); - return 0; - } - node = rb_first(&tree->root); while (node) { struct rb_node *next = rb_next(node); @@ -1201,12 +1187,61 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c break; node = next; } - write_unlock(&tree->lock); up_read(&inode->i_mmap_lock); return nr_dropped; } +static struct btrfs_inode *find_first_inode_to_shrink(struct btrfs_root *root, + u64 min_ino) +{ + struct btrfs_inode *inode; + unsigned long from = min_ino; + + xa_lock(&root->inodes); + while (true) { + struct extent_map_tree *tree; + + inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT); + if (!inode) + break; + + tree = &inode->extent_tree; + + /* + * We want to be fast so if the lock is busy we don't want to + * spend time waiting for it (some task is about to do IO for + * the inode). + */ + if (!write_trylock(&tree->lock)) + goto next; + + /* + * Skip inode if it doesn't have loaded extent maps, so we avoid + * getting a reference and doing an iput later. This includes + * cases like files that were opened for things like stat(2), or + * files with all extent maps previously released through the + * release folio callback (btrfs_release_folio()) or released in + * a previous run, or directories which never have extent maps. + */ + if (RB_EMPTY_ROOT(&tree->root)) { + write_unlock(&tree->lock); + goto next; + } + + if (igrab(&inode->vfs_inode)) + break; + + write_unlock(&tree->lock); +next: + from = btrfs_ino(inode) + 1; + cond_resched_lock(&root->inodes.xa_lock); + } + xa_unlock(&root->inodes); + + return inode; +} + static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx) { struct btrfs_fs_info *fs_info = root->fs_info; @@ -1214,21 +1249,21 @@ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx long nr_dropped = 0; u64 min_ino = fs_info->em_shrinker_last_ino + 1; - inode = btrfs_find_first_inode(root, min_ino); + inode = find_first_inode_to_shrink(root, min_ino); while (inode) { nr_dropped += btrfs_scan_inode(inode, ctx); + write_unlock(&inode->extent_tree.lock); min_ino = btrfs_ino(inode) + 1; fs_info->em_shrinker_last_ino = btrfs_ino(inode); - btrfs_add_delayed_iput(inode); + iput(&inode->vfs_inode); - if (ctx->scanned >= ctx->nr_to_scan || - btrfs_fs_closing(inode->root->fs_info)) + if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(fs_info)) break; cond_resched(); - inode = btrfs_find_first_inode(root, min_ino); + inode = find_first_inode_to_shrink(root, min_ino); } if (inode) { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index ed3c0d6546c5d..0b568c8d24cbc 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1090,7 +1090,7 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i) u64 lockend; size_t num_written = 0; ssize_t ret; - loff_t old_isize = i_size_read(inode); + loff_t old_isize; unsigned int ilock_flags = 0; const bool nowait = (iocb->ki_flags & IOCB_NOWAIT); unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0); @@ -1103,6 +1103,13 @@ ssize_t btrfs_buffered_write(struct kiocb *iocb, struct iov_iter *i) if (ret < 0) return ret; + /* + * We can only trust the isize with inode lock held, or it can race with + * other buffered writes and cause incorrect call of + * pagecache_isize_extended() to overwrite existing data. + */ + old_isize = i_size_read(inode); + ret = generic_write_checks(iocb, i); if (ret <= 0) goto out; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a9322601ab5c9..38756f8cef463 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1382,8 +1382,13 @@ static noinline int cow_file_range(struct btrfs_inode *inode, continue; } if (done_offset) { - *done_offset = start - 1; - return 0; + /* + * Move @end to the end of the processed range, + * and exit the loop to unlock the processed extents. + */ + end = start - 1; + ret = 0; + break; } ret = -ENOSPC; } diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 53b846d99ecea..14f53f7575553 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -1330,13 +1330,13 @@ MODULE_PARM_DESC(read_policy, int btrfs_read_policy_to_enum(const char *str, s64 *value_ret) { - char param[32] = { 0 }; + char param[32]; char __maybe_unused *value_str; if (!str || strlen(str) == 0) return 0; - strncpy(param, str, sizeof(param) - 1); + strscpy(param, str); #ifdef CONFIG_BTRFS_EXPERIMENTAL /* Separate value from input in policy:value format. */ diff --git a/fs/btrfs/tests/delayed-refs-tests.c b/fs/btrfs/tests/delayed-refs-tests.c index 6558508c2ddf5..265370e79a546 100644 --- a/fs/btrfs/tests/delayed-refs-tests.c +++ b/fs/btrfs/tests/delayed-refs-tests.c @@ -1009,6 +1009,7 @@ int btrfs_test_delayed_refs(u32 sectorsize, u32 nodesize) if (!ret) ret = select_delayed_refs_test(&trans); + kfree(transaction); out_free_fs_info: btrfs_free_dummy_fs_info(fs_info); return ret; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0a07764890550..3f8afbd1ebb55 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -7155,6 +7155,7 @@ static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf, btrfs_err(fs_info, "failed to add chunk map, start=%llu len=%llu: %d", map->start, map->chunk_len, ret); + btrfs_free_chunk_map(map); } return ret; @@ -7200,8 +7201,12 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info, fs_devices = find_fsid(fsid, NULL); if (!fs_devices) { - if (!btrfs_test_opt(fs_info, DEGRADED)) + if (!btrfs_test_opt(fs_info, DEGRADED)) { + btrfs_err(fs_info, + "failed to find fsid %pU when attempting to open seed devices", + fsid); return ERR_PTR(-ENOENT); + } fs_devices = alloc_fs_devices(fsid); if (IS_ERR(fs_devices)) diff --git a/fs/coredump.c b/fs/coredump.c index 591700e1b2ce6..4375c70144d0a 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -63,6 +63,7 @@ static void free_vma_snapshot(struct coredump_params *cprm); static int core_uses_pid; static unsigned int core_pipe_limit; +static unsigned int core_sort_vma; static char core_pattern[CORENAME_MAX_SIZE] = "core"; static int core_name_size = CORENAME_MAX_SIZE; unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT; @@ -1026,6 +1027,15 @@ static const struct ctl_table coredump_sysctls[] = { .extra1 = (unsigned int *)&core_file_note_size_min, .extra2 = (unsigned int *)&core_file_note_size_max, }, + { + .procname = "core_sort_vma", + .data = &core_sort_vma, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, }; static int __init init_fs_coredump_sysctls(void) @@ -1256,8 +1266,9 @@ static bool dump_vma_snapshot(struct coredump_params *cprm) cprm->vma_data_size += m->dump_size; } - sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta), - cmp_vma_size, NULL); + if (core_sort_vma) + sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta), + cmp_vma_size, NULL); return true; } diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c index cb1b6d0c34545..c294a8fc566da 100644 --- a/fs/efivarfs/file.c +++ b/fs/efivarfs/file.c @@ -57,10 +57,11 @@ static ssize_t efivarfs_file_write(struct file *file, if (bytes == -ENOENT) { /* - * zero size signals to release that the write deleted - * the variable + * FIXME: temporary workaround for fwupdate, signal + * failed write with a 1 to keep created but not + * written files */ - i_size_write(inode, 0); + i_size_write(inode, 1); } else { i_size_write(inode, datasize + sizeof(attributes)); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); @@ -124,7 +125,8 @@ static int efivarfs_file_release(struct inode *inode, struct file *file) struct efivar_entry *var = inode->i_private; inode_lock(inode); - var->removed = (--var->open_count == 0 && i_size_read(inode) == 0); + /* FIXME: temporary work around for fwupdate */ + var->removed = (--var->open_count == 0 && i_size_read(inode) == 1); inode_unlock(inode); if (var->removed) diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 09fcf731e65d6..0486e9b68bc6e 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -367,6 +367,8 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc) if (err) return err; + register_pm_notifier(&sfi->pm_nb); + return efivar_init(efivarfs_callback, sb, true); } @@ -419,7 +421,7 @@ static bool efivarfs_actor(struct dir_context *ctx, const char *name, int len, if (err) size = 0; - inode_lock(inode); + inode_lock_nested(inode, I_MUTEX_CHILD); i_size_write(inode, size); inode_unlock(inode); @@ -472,12 +474,25 @@ static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor, return err; } +static void efivarfs_deactivate_super_work(struct work_struct *work) +{ + struct super_block *s = container_of(work, struct super_block, + destroy_work); + /* + * note: here s->destroy_work is free for reuse (which + * will happen in deactivate_super) + */ + deactivate_super(s); +} + +static struct file_system_type efivarfs_type; + static int efivarfs_pm_notify(struct notifier_block *nb, unsigned long action, void *ptr) { struct efivarfs_fs_info *sfi = container_of(nb, struct efivarfs_fs_info, pm_nb); - struct path path = { .mnt = NULL, .dentry = sfi->sb->s_root, }; + struct path path; struct efivarfs_ctx ectx = { .ctx = { .actor = efivarfs_actor, @@ -485,6 +500,7 @@ static int efivarfs_pm_notify(struct notifier_block *nb, unsigned long action, .sb = sfi->sb, }; struct file *file; + struct super_block *s = sfi->sb; static bool rescan_done = true; if (action == PM_HIBERNATION_PREPARE) { @@ -497,11 +513,43 @@ static int efivarfs_pm_notify(struct notifier_block *nb, unsigned long action, if (rescan_done) return NOTIFY_DONE; + /* ensure single superblock is alive and pin it */ + if (!atomic_inc_not_zero(&s->s_active)) + return NOTIFY_DONE; + pr_info("efivarfs: resyncing variable state\n"); - /* O_NOATIME is required to prevent oops on NULL mnt */ + path.dentry = sfi->sb->s_root; + + /* + * do not add SB_KERNMOUNT which a single superblock could + * expose to userspace and which also causes MNT_INTERNAL, see + * below + */ + path.mnt = vfs_kern_mount(&efivarfs_type, 0, + efivarfs_type.name, NULL); + if (IS_ERR(path.mnt)) { + pr_err("efivarfs: internal mount failed\n"); + /* + * We may be the last pinner of the superblock but + * calling efivarfs_kill_sb from within the notifier + * here would deadlock trying to unregister it + */ + INIT_WORK(&s->destroy_work, efivarfs_deactivate_super_work); + schedule_work(&s->destroy_work); + return PTR_ERR(path.mnt); + } + + /* path.mnt now has pin on superblock, so this must be above one */ + atomic_dec(&s->s_active); + file = kernel_file_open(&path, O_RDONLY | O_DIRECTORY | O_NOATIME, current_cred()); + /* + * safe even if last put because no MNT_INTERNAL means this + * will do delayed deactivate_super and not deadlock + */ + mntput(path.mnt); if (IS_ERR(file)) return NOTIFY_DONE; @@ -552,7 +600,6 @@ static int efivarfs_init_fs_context(struct fs_context *fc) sfi->pm_nb.notifier_call = efivarfs_pm_notify; sfi->pm_nb.priority = 0; - register_pm_notifier(&sfi->pm_nb); return 0; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 7c0980db77b31..2fecf66661e9a 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -447,7 +447,7 @@ static bool ep_busy_loop(struct eventpoll *ep, int nonblock) if (!budget) budget = BUSY_POLL_BUDGET; - if (napi_id >= MIN_NAPI_ID && ep_busy_loop_on(ep)) { + if (napi_id_valid(napi_id) && ep_busy_loop_on(ep)) { napi_busy_loop(napi_id, nonblock ? NULL : ep_busy_loop_end, ep, prefer_busy_poll, budget); if (ep_events_available(ep)) @@ -492,7 +492,7 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi) * or * Nothing to do if we already have this ID */ - if (napi_id < MIN_NAPI_ID || napi_id == ep->napi_id) + if (!napi_id_valid(napi_id) || napi_id == ep->napi_id) return; /* record NAPI ID for use in next busy poll */ @@ -546,7 +546,7 @@ static void ep_suspend_napi_irqs(struct eventpoll *ep) { unsigned int napi_id = READ_ONCE(ep->napi_id); - if (napi_id >= MIN_NAPI_ID && READ_ONCE(ep->prefer_busy_poll)) + if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll)) napi_suspend_irqs(napi_id); } @@ -554,7 +554,7 @@ static void ep_resume_napi_irqs(struct eventpoll *ep) { unsigned int napi_id = READ_ONCE(ep->napi_id); - if (napi_id >= MIN_NAPI_ID && READ_ONCE(ep->prefer_busy_poll)) + if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll)) napi_resume_irqs(napi_id); } diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c index ce9be95c9172f..9ff825f1502d5 100644 --- a/fs/exfat/balloc.c +++ b/fs/exfat/balloc.c @@ -141,7 +141,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync) return 0; } -void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) +int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) { int i, b; unsigned int ent_idx; @@ -150,13 +150,17 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) struct exfat_mount_options *opts = &sbi->options; if (!is_valid_cluster(sbi, clu)) - return; + return -EIO; ent_idx = CLUSTER_TO_BITMAP_ENT(clu); i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx); b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx); + if (!test_bit_le(b, sbi->vol_amap[i]->b_data)) + return -EIO; + clear_bit_le(b, sbi->vol_amap[i]->b_data); + exfat_update_bh(sbi->vol_amap[i], sync); if (opts->discard) { @@ -171,6 +175,8 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) opts->discard = 0; } } + + return 0; } /* diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 78be6964a8a08..d30ce18a88b7a 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -456,7 +456,7 @@ int exfat_count_num_clusters(struct super_block *sb, int exfat_load_bitmap(struct super_block *sb); void exfat_free_bitmap(struct exfat_sb_info *sbi); int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync); -void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync); +int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync); unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu); int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count); int exfat_trim_fs(struct inode *inode, struct fstrim_range *range); diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c index 9e5492ac409b0..6f3651c6ca91e 100644 --- a/fs/exfat/fatent.c +++ b/fs/exfat/fatent.c @@ -175,6 +175,7 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu)); if (p_chain->flags == ALLOC_NO_FAT_CHAIN) { + int err; unsigned int last_cluster = p_chain->dir + p_chain->size - 1; do { bool sync = false; @@ -189,7 +190,9 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain cur_cmap_i = next_cmap_i; } - exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + err = exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + if (err) + break; clu++; num_clusters++; } while (num_clusters < p_chain->size); @@ -210,12 +213,13 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain cur_cmap_i = next_cmap_i; } - exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + if (exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)))) + break; clu = n_clu; num_clusters++; if (err) - goto dec_used_clus; + break; if (num_clusters >= sbi->num_clusters - EXFAT_FIRST_CLUSTER) { /* @@ -229,7 +233,6 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain } while (clu != EXFAT_EOF_CLUSTER); } -dec_used_clus: sbi->used_clusters -= num_clusters; return 0; } diff --git a/fs/exfat/file.c b/fs/exfat/file.c index 05b51e7217838..807349d8ea050 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -587,7 +587,7 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter) valid_size = ei->valid_size; ret = generic_write_checks(iocb, iter); - if (ret < 0) + if (ret <= 0) goto unlock; if (iocb->ki_flags & IOCB_DIRECT) { diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 691dd77b6ab5f..8b30027d82512 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -232,7 +232,7 @@ static int exfat_search_empty_slot(struct super_block *sb, dentry = 0; } - while (dentry + num_entries < total_entries && + while (dentry + num_entries <= total_entries && clu.dir != EXFAT_EOF_CLUSTER) { i = dentry & (dentries_per_clu - 1); @@ -646,6 +646,11 @@ static int exfat_find(struct inode *dir, struct qstr *qname, info->valid_size = le64_to_cpu(ep2->dentry.stream.valid_size); info->size = le64_to_cpu(ep2->dentry.stream.size); + if (unlikely(EXFAT_B_TO_CLU_ROUND_UP(info->size, sbi) > sbi->used_clusters)) { + exfat_fs_error(sb, "data size is invalid(%lld)", info->size); + return -EIO; + } + info->start_clu = le32_to_cpu(ep2->dentry.stream.start_clu); if (!is_valid_cluster(sbi, info->start_clu) && info->size) { exfat_warn(sb, "start_clu is invalid cluster(0x%x)", diff --git a/fs/ext4/file.c b/fs/ext4/file.c index a5205149adba3..3bd96c3d4cd0c 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -756,9 +756,6 @@ static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf, unsigned int order) return VM_FAULT_SIGBUS; } } else { - result = filemap_fsnotify_fault(vmf); - if (unlikely(result)) - return result; filemap_invalidate_lock_shared(mapping); } result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops); diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 5b5f789b37eb6..2c3a4d09e500f 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -838,6 +838,12 @@ static int fuse_check_folio(struct folio *folio) return 0; } +/* + * Attempt to steal a page from the splice() pipe and move it into the + * pagecache. If successful, the pointer in @pagep will be updated. The + * folio that was originally in @pagep will lose a reference and the new + * folio returned in @pagep will carry a reference. + */ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) { int err; @@ -1451,7 +1457,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, if (ret < 0) goto out; - if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) { + if (pipe_buf_usage(pipe) + cs.nr_segs > pipe->max_usage) { ret = -EIO; goto out; } @@ -2101,7 +2107,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) { - unsigned int head, tail, mask, count; + unsigned int head, tail, count; unsigned nbuf; unsigned idx; struct pipe_buffer *bufs; @@ -2118,8 +2124,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, head = pipe->head; tail = pipe->tail; - mask = pipe->ring_size - 1; - count = head - tail; + count = pipe_occupancy(head, tail); bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) { @@ -2129,8 +2134,8 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, nbuf = 0; rem = 0; - for (idx = tail; idx != head && rem < len; idx++) - rem += pipe->bufs[idx & mask].len; + for (idx = tail; !pipe_empty(head, idx) && rem < len; idx++) + rem += pipe_buf(pipe, idx)->len; ret = -EINVAL; if (rem < len) @@ -2141,10 +2146,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, struct pipe_buffer *ibuf; struct pipe_buffer *obuf; - if (WARN_ON(nbuf >= count || tail == head)) + if (WARN_ON(nbuf >= count || pipe_empty(head, tail))) goto out_free; - ibuf = &pipe->bufs[tail & mask]; + ibuf = pipe_buf(pipe, tail); obuf = &bufs[nbuf]; if (rem >= ibuf->len) { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 198862b086ff7..3805f9b06c9d2 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1636,7 +1636,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, goto out_err; if (fc->cache_symlinks) - return page_get_link(dentry, inode, callback); + return page_get_link_raw(dentry, inode, callback); err = -ECHILD; if (!dentry) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 7d92a54799985..d63e56fd3dd20 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -955,8 +955,10 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, fuse_invalidate_atime(inode); } - for (i = 0; i < ap->num_folios; i++) + for (i = 0; i < ap->num_folios; i++) { folio_end_read(ap->folios[i], !err); + folio_put(ap->folios[i]); + } if (ia->ff) fuse_file_put(ia->ff, false); @@ -1048,7 +1050,14 @@ static void fuse_readahead(struct readahead_control *rac) ap = &ia->ap; while (ap->num_folios < cur_pages) { - folio = readahead_folio(rac); + /* + * This returns a folio with a ref held on it. + * The ref needs to be held until the request is + * completed, since the splice case (see + * fuse_try_move_page()) drops the ref after it's + * replaced in the page cache. + */ + folio = __readahead_folio(rac); ap->folios[ap->num_folios] = folio; ap->descs[ap->num_folios].length = folio_size(folio); ap->num_folios++; diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index b521eb15759e8..0e47da82b0c24 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -427,12 +427,10 @@ static loff_t iomap_dio_bio_iter(const struct iomap_iter *iter, bio_put(bio); goto zero_tail; } - if (dio->flags & IOMAP_DIO_WRITE) { + if (dio->flags & IOMAP_DIO_WRITE) task_io_account_write(n); - } else { - if (dio->flags & IOMAP_DIO_DIRTY) - bio_set_pages_dirty(bio); - } + else if (dio->flags & IOMAP_DIO_DIRTY) + bio_set_pages_dirty(bio); dio->size += n; copied += n; diff --git a/fs/namei.c b/fs/namei.c index 3ab9440c5b931..ecb7b95c2ca33 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -5356,10 +5356,9 @@ const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done) EXPORT_SYMBOL(vfs_get_link); /* get the link contents into pagecache */ -const char *page_get_link(struct dentry *dentry, struct inode *inode, - struct delayed_call *callback) +static char *__page_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) { - char *kaddr; struct page *page; struct address_space *mapping = inode->i_mapping; @@ -5378,8 +5377,23 @@ const char *page_get_link(struct dentry *dentry, struct inode *inode, } set_delayed_call(callback, page_put_link, page); BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM); - kaddr = page_address(page); - nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); + return page_address(page); +} + +const char *page_get_link_raw(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) +{ + return __page_get_link(dentry, inode, callback); +} +EXPORT_SYMBOL_GPL(page_get_link_raw); + +const char *page_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) +{ + char *kaddr = __page_get_link(dentry, inode, callback); + + if (!IS_ERR(kaddr)) + nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); return kaddr; } diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 035ba52742a50..4db912f562305 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -780,6 +780,43 @@ int nfs4_inode_return_delegation(struct inode *inode) return 0; } +/** + * nfs4_inode_set_return_delegation_on_close - asynchronously return a delegation + * @inode: inode to process + * + * This routine is called to request that the delegation be returned as soon + * as the file is closed. If the file is already closed, the delegation is + * immediately returned. + */ +void nfs4_inode_set_return_delegation_on_close(struct inode *inode) +{ + struct nfs_delegation *delegation; + struct nfs_delegation *ret = NULL; + + if (!inode) + return; + rcu_read_lock(); + delegation = nfs4_get_valid_delegation(inode); + if (!delegation) + goto out; + spin_lock(&delegation->lock); + if (!delegation->inode) + goto out_unlock; + if (list_empty(&NFS_I(inode)->open_files) && + !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { + /* Refcount matched in nfs_end_delegation_return() */ + ret = nfs_get_delegation(delegation); + } else + set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); +out_unlock: + spin_unlock(&delegation->lock); + if (ret) + nfs_clear_verifier_delegated(inode); +out: + rcu_read_unlock(); + nfs_end_delegation_return(inode, ret, 0); +} + /** * nfs4_inode_return_delegation_on_close - asynchronously return a delegation * @inode: inode to process diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 71524d34ed207..8ff5ab9c5c256 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -49,6 +49,7 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred, unsigned long pagemod_limit, u32 deleg_type); int nfs4_inode_return_delegation(struct inode *inode); void nfs4_inode_return_delegation_on_close(struct inode *inode); +void nfs4_inode_set_return_delegation_on_close(struct inode *inode); int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid); void nfs_inode_evict_delegation(struct inode *inode); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index f45beea92d034..f32f8d7c9122b 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -56,6 +56,7 @@ #include #include +#include "delegation.h" #include "internal.h" #include "iostat.h" #include "pnfs.h" @@ -130,6 +131,20 @@ static void nfs_direct_truncate_request(struct nfs_direct_req *dreq, dreq->count = req_start; } +static void nfs_direct_file_adjust_size_locked(struct inode *inode, + loff_t offset, size_t count) +{ + loff_t newsize = offset + (loff_t)count; + loff_t oldsize = i_size_read(inode); + + if (newsize > oldsize) { + i_size_write(inode, newsize); + NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; + trace_nfs_size_grow(inode, newsize); + nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); + } +} + /** * nfs_swap_rw - NFS address space operation for swap I/O * @iocb: target I/O control block @@ -272,6 +287,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) nfs_direct_count_bytes(dreq, hdr); spin_unlock(&dreq->lock); + nfs_update_delegated_atime(dreq->inode); + while (!list_empty(&hdr->pages)) { struct nfs_page *req = nfs_list_entry(hdr->pages.next); struct page *page = req->wb_page; @@ -741,6 +758,7 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) struct nfs_direct_req *dreq = hdr->dreq; struct nfs_commit_info cinfo; struct nfs_page *req = nfs_list_entry(hdr->pages.next); + struct inode *inode = dreq->inode; int flags = NFS_ODIRECT_DONE; trace_nfs_direct_write_completion(dreq); @@ -762,6 +780,11 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr) } spin_unlock(&dreq->lock); + spin_lock(&inode->i_lock); + nfs_direct_file_adjust_size_locked(inode, dreq->io_start, dreq->count); + nfs_update_delegated_mtime_locked(dreq->inode); + spin_unlock(&inode->i_lock); + while (!list_empty(&hdr->pages)) { req = nfs_list_entry(hdr->pages.next); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 1bb646752e466..033feeab8c346 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -457,7 +458,7 @@ static bool nfs_release_folio(struct folio *folio, gfp_t gfp) /* If the private flag is set, then the folio is not freeable */ if (folio_test_private(folio)) { if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL || - current_is_kswapd()) + current_is_kswapd() || current_is_kcompactd()) return false; if (nfs_wb_folio(folio->mapping->host, folio) < 0) return false; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index df9669d4ded7f..6e95db6c17e92 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -133,6 +133,7 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry, if (err) return NULL; + label->lsmid = shim.id; label->label = shim.context; label->len = shim.len; return label; @@ -145,7 +146,7 @@ nfs4_label_release_security(struct nfs4_label *label) if (label) { shim.context = label->label; shim.len = label->len; - shim.id = LSM_ID_UNDEF; + shim.id = label->lsmid; security_release_secctx(&shim); } } @@ -3906,8 +3907,11 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) { + struct dentry *dentry = ctx->dentry; if (ctx->state == NULL) return; + if (dentry->d_flags & DCACHE_NFSFS_RENAMED) + nfs4_inode_set_return_delegation_on_close(d_inode(dentry)); if (is_sync) nfs4_close_sync(ctx->state, _nfs4_ctx_to_openmode(ctx)); else @@ -6269,7 +6273,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf, size_t buflen) { struct nfs_server *server = NFS_SERVER(inode); - struct nfs4_label label = {0, 0, buflen, buf}; + struct nfs4_label label = {0, 0, 0, buflen, buf}; u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL }; struct nfs_fattr fattr = { @@ -6374,7 +6378,7 @@ static int nfs4_do_set_security_label(struct inode *inode, static int nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen) { - struct nfs4_label ilabel = {0, 0, buflen, (char *)buf }; + struct nfs4_label ilabel = {0, 0, 0, buflen, (char *)buf }; struct nfs_fattr *fattr; int status; diff --git a/fs/nsfs.c b/fs/nsfs.c index 663f8656158d5..f7fddf8ecf73f 100644 --- a/fs/nsfs.c +++ b/fs/nsfs.c @@ -37,7 +37,6 @@ static char *ns_dname(struct dentry *dentry, char *buffer, int buflen) } const struct dentry_operations ns_dentry_operations = { - .d_delete = always_delete_dentry, .d_dname = ns_dname, .d_prune = stashed_dentry_prune, }; diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 0c28e5fa34077..d7310fcf38881 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -618,7 +618,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c) err = PTR_ERR(upper); if (!IS_ERR(upper)) { err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper); - dput(upper); if (!err) { /* Restore timestamps on parent (best effort) */ @@ -626,6 +625,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c) ovl_dentry_set_upper_alias(c->dentry); ovl_dentry_update_reval(c->dentry, upper); } + dput(upper); } inode_unlock(udir); if (err) diff --git a/fs/pidfs.c b/fs/pidfs.c index 63f9699ebac36..c0478b3c55d9f 100644 --- a/fs/pidfs.c +++ b/fs/pidfs.c @@ -521,7 +521,6 @@ static char *pidfs_dname(struct dentry *dentry, char *buffer, int buflen) } const struct dentry_operations pidfs_dentry_operations = { - .d_delete = always_delete_dentry, .d_dname = pidfs_dname, .d_prune = stashed_dentry_prune, }; diff --git a/fs/pipe.c b/fs/pipe.c index ce1af7592780d..4d0799e4e7196 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -210,11 +210,10 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = { /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */ static inline bool pipe_readable(const struct pipe_inode_info *pipe) { - unsigned int head = READ_ONCE(pipe->head); - unsigned int tail = READ_ONCE(pipe->tail); + union pipe_index idx = { .head_tail = READ_ONCE(pipe->head_tail) }; unsigned int writers = READ_ONCE(pipe->writers); - return !pipe_empty(head, tail) || !writers; + return !pipe_empty(idx.head, idx.tail) || !writers; } static inline unsigned int pipe_update_tail(struct pipe_inode_info *pipe, @@ -395,7 +394,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) wake_next_reader = true; mutex_lock(&pipe->mutex); } - if (pipe_empty(pipe->head, pipe->tail)) + if (pipe_is_empty(pipe)) wake_next_reader = false; mutex_unlock(&pipe->mutex); @@ -417,11 +416,10 @@ static inline int is_packetized(struct file *file) /* Done while waiting without holding the pipe lock - thus the READ_ONCE() */ static inline bool pipe_writable(const struct pipe_inode_info *pipe) { - unsigned int head = READ_ONCE(pipe->head); - unsigned int tail = READ_ONCE(pipe->tail); + union pipe_index idx = { .head_tail = READ_ONCE(pipe->head_tail) }; unsigned int max_usage = READ_ONCE(pipe->max_usage); - return !pipe_full(head, tail, max_usage) || + return !pipe_full(idx.head, idx.tail, max_usage) || !READ_ONCE(pipe->readers); } @@ -579,11 +577,11 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); wait_event_interruptible_exclusive(pipe->wr_wait, pipe_writable(pipe)); mutex_lock(&pipe->mutex); - was_empty = pipe_empty(pipe->head, pipe->tail); + was_empty = pipe_is_empty(pipe); wake_next_writer = true; } out: - if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) + if (pipe_is_full(pipe)) wake_next_writer = false; mutex_unlock(&pipe->mutex); @@ -616,7 +614,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from) static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct pipe_inode_info *pipe = filp->private_data; - unsigned int count, head, tail, mask; + unsigned int count, head, tail; switch (cmd) { case FIONREAD: @@ -624,10 +622,9 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) count = 0; head = pipe->head; tail = pipe->tail; - mask = pipe->ring_size - 1; - while (tail != head) { - count += pipe->bufs[tail & mask].len; + while (!pipe_empty(head, tail)) { + count += pipe_buf(pipe, tail)->len; tail++; } mutex_unlock(&pipe->mutex); @@ -659,7 +656,7 @@ pipe_poll(struct file *filp, poll_table *wait) { __poll_t mask; struct pipe_inode_info *pipe = filp->private_data; - unsigned int head, tail; + union pipe_index idx; /* Epoll has some historical nasty semantics, this enables them */ WRITE_ONCE(pipe->poll_usage, true); @@ -680,19 +677,18 @@ pipe_poll(struct file *filp, poll_table *wait) * if something changes and you got it wrong, the poll * table entry will wake you up and fix it. */ - head = READ_ONCE(pipe->head); - tail = READ_ONCE(pipe->tail); + idx.head_tail = READ_ONCE(pipe->head_tail); mask = 0; if (filp->f_mode & FMODE_READ) { - if (!pipe_empty(head, tail)) + if (!pipe_empty(idx.head, idx.tail)) mask |= EPOLLIN | EPOLLRDNORM; if (!pipe->writers && filp->f_pipe != pipe->w_counter) mask |= EPOLLHUP; } if (filp->f_mode & FMODE_WRITE) { - if (!pipe_full(head, tail, pipe->max_usage)) + if (!pipe_full(idx.head, idx.tail, pipe->max_usage)) mask |= EPOLLOUT | EPOLLWRNORM; /* * Most Unices do not set EPOLLERR for FIFOs but on Linux they diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 8ec90826a49e9..a3e22803cddf2 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -559,10 +559,16 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode, return p; } -static inline void pde_set_flags(struct proc_dir_entry *pde) +static void pde_set_flags(struct proc_dir_entry *pde) { if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT) pde->flags |= PROC_ENTRY_PERMANENT; + if (pde->proc_ops->proc_read_iter) + pde->flags |= PROC_ENTRY_proc_read_iter; +#ifdef CONFIG_COMPAT + if (pde->proc_ops->proc_compat_ioctl) + pde->flags |= PROC_ENTRY_proc_compat_ioctl; +#endif } struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, @@ -626,6 +632,7 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, p->proc_ops = &proc_seq_ops; p->seq_ops = ops; p->state_size = state_size; + pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_seq_private); @@ -656,6 +663,7 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode, return NULL; p->proc_ops = &proc_single_ops; p->single_show = show; + pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_single_data); diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 626ad7bd94f24..a3eb3b740f766 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -656,13 +656,13 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) if (S_ISREG(inode->i_mode)) { inode->i_op = de->proc_iops; - if (de->proc_ops->proc_read_iter) + if (pde_has_proc_read_iter(de)) inode->i_fop = &proc_iter_file_ops; else inode->i_fop = &proc_reg_file_ops; #ifdef CONFIG_COMPAT - if (de->proc_ops->proc_compat_ioctl) { - if (de->proc_ops->proc_read_iter) + if (pde_has_proc_compat_ioctl(de)) { + if (pde_has_proc_read_iter(de)) inode->i_fop = &proc_iter_file_ops_compat; else inode->i_fop = &proc_reg_file_ops_compat; diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 1695509370b88..77a517f91821a 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -85,6 +85,20 @@ static inline void pde_make_permanent(struct proc_dir_entry *pde) pde->flags |= PROC_ENTRY_PERMANENT; } +static inline bool pde_has_proc_read_iter(const struct proc_dir_entry *pde) +{ + return pde->flags & PROC_ENTRY_proc_read_iter; +} + +static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde) +{ +#ifdef CONFIG_COMPAT + return pde->flags & PROC_ENTRY_proc_compat_ioctl; +#else + return false; +#endif +} + extern struct kmem_cache *proc_dir_entry_cache; void pde_free(struct proc_dir_entry *pde); diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c index 699a3f76d0834..64bd68f750f84 100644 --- a/fs/smb/client/cifsacl.c +++ b/fs/smb/client/cifsacl.c @@ -763,7 +763,7 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, struct cifs_fattr *fattr, bool mode_from_special_sid) { int i; - int num_aces = 0; + u16 num_aces = 0; int acl_size; char *acl_base; struct smb_ace **ppace; @@ -778,14 +778,15 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, } /* validate that we do not go past end of acl */ - if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { + if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) || + end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) { cifs_dbg(VFS, "ACL too small to parse DACL\n"); return; } cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n", le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), - le32_to_cpu(pdacl->num_aces)); + le16_to_cpu(pdacl->num_aces)); /* reset rwx permissions for user/group/other. Also, if num_aces is 0 i.e. DACL has no ACEs, @@ -795,12 +796,15 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl, acl_base = (char *)pdacl; acl_size = sizeof(struct smb_acl); - num_aces = le32_to_cpu(pdacl->num_aces); + num_aces = le16_to_cpu(pdacl->num_aces); if (num_aces > 0) { umode_t denied_mode = 0; - if (num_aces > ULONG_MAX / sizeof(struct smb_ace *)) + if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) / + (offsetof(struct smb_ace, sid) + + offsetof(struct smb_sid, sub_auth) + sizeof(__le16))) return; + ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *), GFP_KERNEL); if (!ppace) @@ -937,12 +941,12 @@ unsigned int setup_special_user_owner_ACE(struct smb_ace *pntace) static void populate_new_aces(char *nacl_base, struct smb_sid *pownersid, struct smb_sid *pgrpsid, - __u64 *pnmode, u32 *pnum_aces, u16 *pnsize, + __u64 *pnmode, u16 *pnum_aces, u16 *pnsize, bool modefromsid, bool posix) { __u64 nmode; - u32 num_aces = 0; + u16 num_aces = 0; u16 nsize = 0; __u64 user_mode; __u64 group_mode; @@ -1050,7 +1054,7 @@ static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *p u16 size = 0; struct smb_ace *pntace = NULL; char *acl_base = NULL; - u32 src_num_aces = 0; + u16 src_num_aces = 0; u16 nsize = 0; struct smb_ace *pnntace = NULL; char *nacl_base = NULL; @@ -1058,7 +1062,7 @@ static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *p acl_base = (char *)pdacl; size = sizeof(struct smb_acl); - src_num_aces = le32_to_cpu(pdacl->num_aces); + src_num_aces = le16_to_cpu(pdacl->num_aces); nacl_base = (char *)pndacl; nsize = sizeof(struct smb_acl); @@ -1090,11 +1094,11 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl, u16 size = 0; struct smb_ace *pntace = NULL; char *acl_base = NULL; - u32 src_num_aces = 0; + u16 src_num_aces = 0; u16 nsize = 0; struct smb_ace *pnntace = NULL; char *nacl_base = NULL; - u32 num_aces = 0; + u16 num_aces = 0; bool new_aces_set = false; /* Assuming that pndacl and pnmode are never NULL */ @@ -1112,7 +1116,7 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl, acl_base = (char *)pdacl; size = sizeof(struct smb_acl); - src_num_aces = le32_to_cpu(pdacl->num_aces); + src_num_aces = le16_to_cpu(pdacl->num_aces); /* Retain old ACEs which we can retain */ for (i = 0; i < src_num_aces; ++i) { @@ -1158,7 +1162,7 @@ static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl, } finalize_dacl: - pndacl->num_aces = cpu_to_le32(num_aces); + pndacl->num_aces = cpu_to_le16(num_aces); pndacl->size = cpu_to_le16(nsize); return 0; @@ -1293,7 +1297,7 @@ static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd, dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION); ndacl_ptr->size = cpu_to_le16(0); - ndacl_ptr->num_aces = cpu_to_le32(0); + ndacl_ptr->num_aces = cpu_to_le16(0); rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr, pnmode, mode_from_sid, posix); @@ -1653,7 +1657,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode, dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset); if (mode_from_sid) nsecdesclen += - le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace); + le16_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace); else /* cifsacl */ nsecdesclen += le16_to_cpu(dacl_ptr->size); } diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index 3feaa0f681699..d07682020c645 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -1338,7 +1338,8 @@ cifs_readv_callback(struct mid_q_entry *mid) rdata->credits.value = 0; rdata->subreq.error = rdata->result; rdata->subreq.transferred += rdata->got_bytes; - queue_work(cifsiod_wq, &rdata->subreq.work); + trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress); + netfs_read_subreq_terminated(&rdata->subreq); release_mid(mid); add_credits(server, &credits, 0); } diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index f917de020dd5d..73f93a35eeddb 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -1825,9 +1825,8 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx, bool match_super) { - if (ctx->sectype != Unspecified && - ctx->sectype != ses->sectype) - return 0; + struct TCP_Server_Info *server = ses->server; + enum securityEnum ctx_sec, ses_sec; if (!match_super && ctx->dfs_root_ses != ses->dfs_root_ses) return 0; @@ -1839,11 +1838,20 @@ static int match_session(struct cifs_ses *ses, if (ses->chan_max < ctx->max_channels) return 0; - switch (ses->sectype) { + ctx_sec = server->ops->select_sectype(server, ctx->sectype); + ses_sec = server->ops->select_sectype(server, ses->sectype); + + if (ctx_sec != ses_sec) + return 0; + + switch (ctx_sec) { + case IAKerb: case Kerberos: if (!uid_eq(ctx->cred_uid, ses->cred_uid)) return 0; break; + case NTLMv2: + case RawNTLMSSP: default: /* NULL username means anonymous session */ if (ses->user_name == NULL) { diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index e9b286d9a7ba3..8c73d4d60d1a7 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -171,6 +171,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = { fsparam_string("username", Opt_user), fsparam_string("pass", Opt_pass), fsparam_string("password", Opt_pass), + fsparam_string("pass2", Opt_pass2), fsparam_string("password2", Opt_pass2), fsparam_string("ip", Opt_ip), fsparam_string("addr", Opt_ip), @@ -1131,6 +1132,9 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, } else if (!strcmp("user", param->key) || !strcmp("username", param->key)) { skip_parsing = true; opt = Opt_user; + } else if (!strcmp("pass2", param->key) || !strcmp("password2", param->key)) { + skip_parsing = true; + opt = Opt_pass2; } } @@ -1340,21 +1344,21 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, } break; case Opt_acregmax: - ctx->acregmax = HZ * result.uint_32; - if (ctx->acregmax > CIFS_MAX_ACTIMEO) { + if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { cifs_errorf(fc, "acregmax too large\n"); goto cifs_parse_mount_err; } + ctx->acregmax = HZ * result.uint_32; break; case Opt_acdirmax: - ctx->acdirmax = HZ * result.uint_32; - if (ctx->acdirmax > CIFS_MAX_ACTIMEO) { + if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { cifs_errorf(fc, "acdirmax too large\n"); goto cifs_parse_mount_err; } + ctx->acdirmax = HZ * result.uint_32; break; case Opt_actimeo: - if (HZ * result.uint_32 > CIFS_MAX_ACTIMEO) { + if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { cifs_errorf(fc, "timeout too large\n"); goto cifs_parse_mount_err; } @@ -1366,11 +1370,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, ctx->acdirmax = ctx->acregmax = HZ * result.uint_32; break; case Opt_closetimeo: - ctx->closetimeo = HZ * result.uint_32; - if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) { + if (result.uint_32 > SMB3_MAX_DCLOSETIMEO / HZ) { cifs_errorf(fc, "closetimeo too large\n"); goto cifs_parse_mount_err; } + ctx->closetimeo = HZ * result.uint_32; break; case Opt_echo_interval: ctx->echo_interval = result.uint_32; diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 23e0c8be7fb52..4dd11eafb69d9 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -4965,6 +4965,10 @@ receive_encrypted_standard(struct TCP_Server_Info *server, next_buffer = (char *)cifs_buf_get(); else next_buffer = (char *)cifs_small_buf_get(); + if (!next_buffer) { + cifs_server_dbg(VFS, "No memory for (large) SMB response\n"); + return -1; + } memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd); } diff --git a/fs/smb/common/smbacl.h b/fs/smb/common/smbacl.h index 6a60698fc6f0f..a624ec9e4a144 100644 --- a/fs/smb/common/smbacl.h +++ b/fs/smb/common/smbacl.h @@ -107,7 +107,8 @@ struct smb_sid { struct smb_acl { __le16 revision; /* revision level */ __le16 size; - __le32 num_aces; + __le16 num_aces; + __le16 reserved; } __attribute__((packed)); struct smb_ace { diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c index f8a40f65db6ae..c1f22c1291117 100644 --- a/fs/smb/server/connection.c +++ b/fs/smb/server/connection.c @@ -433,6 +433,26 @@ void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops) default_conn_ops.terminate_fn = ops->terminate_fn; } +void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn) +{ + atomic_inc(&conn->r_count); +} + +void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn) +{ + /* + * Checking waitqueue to dropping pending requests on + * disconnection. waitqueue_active is safe because it + * uses atomic operation for condition. + */ + atomic_inc(&conn->refcnt); + if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) + wake_up(&conn->r_count_q); + + if (atomic_dec_and_test(&conn->refcnt)) + kfree(conn); +} + int ksmbd_conn_transport_init(void) { int ret; diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h index b379ae4fdcdff..91c2318639e76 100644 --- a/fs/smb/server/connection.h +++ b/fs/smb/server/connection.h @@ -168,6 +168,8 @@ int ksmbd_conn_transport_init(void); void ksmbd_conn_transport_destroy(void); void ksmbd_conn_lock(struct ksmbd_conn *conn); void ksmbd_conn_unlock(struct ksmbd_conn *conn); +void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn); +void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn); /* * WARNING diff --git a/fs/smb/server/ksmbd_work.c b/fs/smb/server/ksmbd_work.c index 4af2e6007c29d..72b00ca6e4551 100644 --- a/fs/smb/server/ksmbd_work.c +++ b/fs/smb/server/ksmbd_work.c @@ -26,7 +26,6 @@ struct ksmbd_work *ksmbd_alloc_work_struct(void) INIT_LIST_HEAD(&work->request_entry); INIT_LIST_HEAD(&work->async_request_entry); INIT_LIST_HEAD(&work->fp_entry); - INIT_LIST_HEAD(&work->interim_entry); INIT_LIST_HEAD(&work->aux_read_list); work->iov_alloc_cnt = 4; work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec), @@ -56,8 +55,6 @@ void ksmbd_free_work_struct(struct ksmbd_work *work) kfree(work->tr_buf); kvfree(work->request_buf); kfree(work->iov); - if (!list_empty(&work->interim_entry)) - list_del(&work->interim_entry); if (work->async_id) ksmbd_release_id(&work->conn->async_ida, work->async_id); diff --git a/fs/smb/server/ksmbd_work.h b/fs/smb/server/ksmbd_work.h index 8ca2c813246e6..d36393ff8310c 100644 --- a/fs/smb/server/ksmbd_work.h +++ b/fs/smb/server/ksmbd_work.h @@ -89,7 +89,6 @@ struct ksmbd_work { /* List head at conn->async_requests */ struct list_head async_request_entry; struct list_head fp_entry; - struct list_head interim_entry; }; /** diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index 3a3fe4afbdf0d..28886ff1ee577 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -46,7 +46,6 @@ static struct oplock_info *alloc_opinfo(struct ksmbd_work *work, opinfo->fid = id; opinfo->Tid = Tid; INIT_LIST_HEAD(&opinfo->op_entry); - INIT_LIST_HEAD(&opinfo->interim_list); init_waitqueue_head(&opinfo->oplock_q); init_waitqueue_head(&opinfo->oplock_brk); atomic_set(&opinfo->refcount, 1); @@ -635,6 +634,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk) { struct smb2_oplock_break *rsp = NULL; struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work); + struct ksmbd_conn *conn = work->conn; struct oplock_break_info *br_info = work->request_buf; struct smb2_hdr *rsp_hdr; struct ksmbd_file *fp; @@ -690,6 +690,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk) out: ksmbd_free_work_struct(work); + ksmbd_conn_r_count_dec(conn); } /** @@ -724,6 +725,7 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo) work->sess = opinfo->sess; if (opinfo->op_state == OPLOCK_ACK_WAIT) { + ksmbd_conn_r_count_inc(conn); INIT_WORK(&work->work, __smb2_oplock_break_noti); ksmbd_queue_work(work); @@ -745,6 +747,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk) { struct smb2_lease_break *rsp = NULL; struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work); + struct ksmbd_conn *conn = work->conn; struct lease_break_info *br_info = work->request_buf; struct smb2_hdr *rsp_hdr; @@ -791,6 +794,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk) out: ksmbd_free_work_struct(work); + ksmbd_conn_r_count_dec(conn); } /** @@ -803,7 +807,6 @@ static void __smb2_lease_break_noti(struct work_struct *wk) static int smb2_lease_break_noti(struct oplock_info *opinfo) { struct ksmbd_conn *conn = opinfo->conn; - struct list_head *tmp, *t; struct ksmbd_work *work; struct lease_break_info *br_info; struct lease *lease = opinfo->o_lease; @@ -831,16 +834,7 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo) work->sess = opinfo->sess; if (opinfo->op_state == OPLOCK_ACK_WAIT) { - list_for_each_safe(tmp, t, &opinfo->interim_list) { - struct ksmbd_work *in_work; - - in_work = list_entry(tmp, struct ksmbd_work, - interim_entry); - setup_async_work(in_work, NULL, NULL); - smb2_send_interim_resp(in_work, STATUS_PENDING); - list_del_init(&in_work->interim_entry); - release_async_work(in_work); - } + ksmbd_conn_r_count_inc(conn); INIT_WORK(&work->work, __smb2_lease_break_noti); ksmbd_queue_work(work); wait_for_break_ack(opinfo); @@ -871,7 +865,8 @@ static void wait_lease_breaking(struct oplock_info *opinfo) } } -static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level) +static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level, + struct ksmbd_work *in_work) { int err = 0; @@ -914,9 +909,15 @@ static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level) } if (lease->state & (SMB2_LEASE_WRITE_CACHING_LE | - SMB2_LEASE_HANDLE_CACHING_LE)) + SMB2_LEASE_HANDLE_CACHING_LE)) { + if (in_work) { + setup_async_work(in_work, NULL, NULL); + smb2_send_interim_resp(in_work, STATUS_PENDING); + release_async_work(in_work); + } + brk_opinfo->op_state = OPLOCK_ACK_WAIT; - else + } else atomic_dec(&brk_opinfo->breaking_cnt); } else { err = oplock_break_pending(brk_opinfo, req_op_level); @@ -1116,7 +1117,7 @@ void smb_send_parent_lease_break_noti(struct ksmbd_file *fp, if (ksmbd_conn_releasing(opinfo->conn)) continue; - oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE); + oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL); opinfo_put(opinfo); } } @@ -1152,7 +1153,7 @@ void smb_lazy_parent_lease_break_close(struct ksmbd_file *fp) if (ksmbd_conn_releasing(opinfo->conn)) continue; - oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE); + oplock_break(opinfo, SMB2_OPLOCK_LEVEL_NONE, NULL); opinfo_put(opinfo); } } @@ -1252,8 +1253,7 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid, goto op_break_not_needed; } - list_add(&work->interim_entry, &prev_opinfo->interim_list); - err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II); + err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II, work); opinfo_put(prev_opinfo); if (err == -ENOENT) goto set_lev; @@ -1322,8 +1322,7 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work, } brk_opinfo->open_trunc = is_trunc; - list_add(&work->interim_entry, &brk_opinfo->interim_list); - oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II); + oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II, work); opinfo_put(brk_opinfo); } @@ -1386,7 +1385,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp, SMB2_LEASE_KEY_SIZE)) goto next; brk_op->open_trunc = is_trunc; - oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE); + oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE, NULL); next: opinfo_put(brk_op); rcu_read_lock(); diff --git a/fs/smb/server/oplock.h b/fs/smb/server/oplock.h index 72bc88a63a408..3f64f07872638 100644 --- a/fs/smb/server/oplock.h +++ b/fs/smb/server/oplock.h @@ -67,7 +67,6 @@ struct oplock_info { bool is_lease; bool open_trunc; /* truncate on open */ struct lease *o_lease; - struct list_head interim_list; struct list_head op_entry; struct list_head lease_entry; wait_queue_head_t oplock_q; /* Other server threads */ diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c index 601e7fcbcf1e6..ab533c6029879 100644 --- a/fs/smb/server/server.c +++ b/fs/smb/server/server.c @@ -270,17 +270,7 @@ static void handle_ksmbd_work(struct work_struct *wk) ksmbd_conn_try_dequeue_request(work); ksmbd_free_work_struct(work); - /* - * Checking waitqueue to dropping pending requests on - * disconnection. waitqueue_active is safe because it - * uses atomic operation for condition. - */ - atomic_inc(&conn->refcnt); - if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q)) - wake_up(&conn->r_count_q); - - if (atomic_dec_and_test(&conn->refcnt)) - kfree(conn); + ksmbd_conn_r_count_dec(conn); } /** @@ -310,7 +300,7 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn) conn->request_buf = NULL; ksmbd_conn_enqueue_request(work); - atomic_inc(&conn->r_count); + ksmbd_conn_r_count_inc(conn); /* update activity on connection */ conn->last_active = jiffies; INIT_WORK(&work->work, handle_ksmbd_work); diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index f1efcd0274750..c53121538990e 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -7458,17 +7458,17 @@ int smb2_lock(struct ksmbd_work *work) } no_check_cl: + flock = smb_lock->fl; + list_del(&smb_lock->llist); + if (smb_lock->zero_len) { err = 0; goto skip; } - - flock = smb_lock->fl; - list_del(&smb_lock->llist); retry: rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL); skip: - if (flags & SMB2_LOCKFLAG_UNLOCK) { + if (smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) { if (!rc) { ksmbd_debug(SMB, "File unlocked\n"); } else if (rc == -ENOENT) { diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c index d39d3e553366d..49b128698670a 100644 --- a/fs/smb/server/smbacl.c +++ b/fs/smb/server/smbacl.c @@ -333,7 +333,7 @@ void posix_state_to_acl(struct posix_acl_state *state, pace->e_perm = state->other.allow; } -int init_acl_state(struct posix_acl_state *state, int cnt) +int init_acl_state(struct posix_acl_state *state, u16 cnt) { int alloc; @@ -368,7 +368,7 @@ static void parse_dacl(struct mnt_idmap *idmap, struct smb_fattr *fattr) { int i, ret; - int num_aces = 0; + u16 num_aces = 0; unsigned int acl_size; char *acl_base; struct smb_ace **ppace; @@ -389,16 +389,18 @@ static void parse_dacl(struct mnt_idmap *idmap, ksmbd_debug(SMB, "DACL revision %d size %d num aces %d\n", le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size), - le32_to_cpu(pdacl->num_aces)); + le16_to_cpu(pdacl->num_aces)); acl_base = (char *)pdacl; acl_size = sizeof(struct smb_acl); - num_aces = le32_to_cpu(pdacl->num_aces); + num_aces = le16_to_cpu(pdacl->num_aces); if (num_aces <= 0) return; - if (num_aces > ULONG_MAX / sizeof(struct smb_ace *)) + if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) / + (offsetof(struct smb_ace, sid) + + offsetof(struct smb_sid, sub_auth) + sizeof(__le16))) return; ret = init_acl_state(&acl_state, num_aces); @@ -432,6 +434,7 @@ static void parse_dacl(struct mnt_idmap *idmap, offsetof(struct smb_sid, sub_auth); if (end_of_acl - acl_base < acl_size || + ppace[i]->sid.num_subauth == 0 || ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES || (end_of_acl - acl_base < acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) || @@ -580,7 +583,7 @@ static void parse_dacl(struct mnt_idmap *idmap, static void set_posix_acl_entries_dacl(struct mnt_idmap *idmap, struct smb_ace *pndace, - struct smb_fattr *fattr, u32 *num_aces, + struct smb_fattr *fattr, u16 *num_aces, u16 *size, u32 nt_aces_num) { struct posix_acl_entry *pace; @@ -701,7 +704,7 @@ static void set_ntacl_dacl(struct mnt_idmap *idmap, struct smb_fattr *fattr) { struct smb_ace *ntace, *pndace; - int nt_num_aces = le32_to_cpu(nt_dacl->num_aces), num_aces = 0; + u16 nt_num_aces = le16_to_cpu(nt_dacl->num_aces), num_aces = 0; unsigned short size = 0; int i; @@ -728,7 +731,7 @@ static void set_ntacl_dacl(struct mnt_idmap *idmap, set_posix_acl_entries_dacl(idmap, pndace, fattr, &num_aces, &size, nt_num_aces); - pndacl->num_aces = cpu_to_le32(num_aces); + pndacl->num_aces = cpu_to_le16(num_aces); pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size); } @@ -736,7 +739,7 @@ static void set_mode_dacl(struct mnt_idmap *idmap, struct smb_acl *pndacl, struct smb_fattr *fattr) { struct smb_ace *pace, *pndace; - u32 num_aces = 0; + u16 num_aces = 0; u16 size = 0, ace_size = 0; uid_t uid; const struct smb_sid *sid; @@ -792,7 +795,7 @@ static void set_mode_dacl(struct mnt_idmap *idmap, fattr->cf_mode, 0007); out: - pndacl->num_aces = cpu_to_le32(num_aces); + pndacl->num_aces = cpu_to_le16(num_aces); pndacl->size = cpu_to_le16(le16_to_cpu(pndacl->size) + size); } @@ -807,6 +810,13 @@ static int parse_sid(struct smb_sid *psid, char *end_of_acl) return -EINVAL; } + if (!psid->num_subauth) + return 0; + + if (psid->num_subauth > SID_MAX_SUB_AUTHORITIES || + end_of_acl < (char *)psid + 8 + sizeof(__le32) * psid->num_subauth) + return -EINVAL; + return 0; } @@ -848,6 +858,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, pntsd->type = cpu_to_le16(DACL_PRESENT); if (pntsd->osidoffset) { + if (le32_to_cpu(pntsd->osidoffset) < sizeof(struct smb_ntsd)) + return -EINVAL; + rc = parse_sid(owner_sid_ptr, end_of_acl); if (rc) { pr_err("%s: Error %d parsing Owner SID\n", __func__, rc); @@ -863,6 +876,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, } if (pntsd->gsidoffset) { + if (le32_to_cpu(pntsd->gsidoffset) < sizeof(struct smb_ntsd)) + return -EINVAL; + rc = parse_sid(group_sid_ptr, end_of_acl); if (rc) { pr_err("%s: Error %d mapping Owner SID to gid\n", @@ -884,6 +900,9 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, pntsd->type |= cpu_to_le16(DACL_PROTECTED); if (dacloffset) { + if (dacloffset < sizeof(struct smb_ntsd)) + return -EINVAL; + parse_dacl(idmap, dacl_ptr, end_of_acl, owner_sid_ptr, group_sid_ptr, fattr); } @@ -1006,8 +1025,9 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, struct smb_sid owner_sid, group_sid; struct dentry *parent = path->dentry->d_parent; struct mnt_idmap *idmap = mnt_idmap(path->mnt); - int inherited_flags = 0, flags = 0, i, ace_cnt = 0, nt_size = 0, pdacl_size; - int rc = 0, num_aces, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size; + int inherited_flags = 0, flags = 0, i, nt_size = 0, pdacl_size; + int rc = 0, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size; + u16 num_aces, ace_cnt = 0; char *aces_base; bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode); @@ -1023,7 +1043,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, parent_pdacl = (struct smb_acl *)((char *)parent_pntsd + dacloffset); acl_len = pntsd_size - dacloffset; - num_aces = le32_to_cpu(parent_pdacl->num_aces); + num_aces = le16_to_cpu(parent_pdacl->num_aces); pntsd_type = le16_to_cpu(parent_pntsd->type); pdacl_size = le16_to_cpu(parent_pdacl->size); @@ -1183,7 +1203,7 @@ int smb_inherit_dacl(struct ksmbd_conn *conn, pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset)); pdacl->revision = cpu_to_le16(2); pdacl->size = cpu_to_le16(sizeof(struct smb_acl) + nt_size); - pdacl->num_aces = cpu_to_le32(ace_cnt); + pdacl->num_aces = cpu_to_le16(ace_cnt); pace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); memcpy(pace, aces_base, nt_size); pntsd_size += sizeof(struct smb_acl) + nt_size; @@ -1264,7 +1284,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path, ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); aces_size = acl_size - sizeof(struct smb_acl); - for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) { + for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) { if (offsetof(struct smb_ace, access_req) > aces_size) break; ace_size = le16_to_cpu(ace->size); @@ -1285,7 +1305,7 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path, ace = (struct smb_ace *)((char *)pdacl + sizeof(struct smb_acl)); aces_size = acl_size - sizeof(struct smb_acl); - for (i = 0; i < le32_to_cpu(pdacl->num_aces); i++) { + for (i = 0; i < le16_to_cpu(pdacl->num_aces); i++) { if (offsetof(struct smb_ace, access_req) > aces_size) break; ace_size = le16_to_cpu(ace->size); diff --git a/fs/smb/server/smbacl.h b/fs/smb/server/smbacl.h index 24ce576fc2924..355adaee39b87 100644 --- a/fs/smb/server/smbacl.h +++ b/fs/smb/server/smbacl.h @@ -86,7 +86,7 @@ int parse_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, int build_sec_desc(struct mnt_idmap *idmap, struct smb_ntsd *pntsd, struct smb_ntsd *ppntsd, int ppntsd_size, int addition_info, __u32 *secdesclen, struct smb_fattr *fattr); -int init_acl_state(struct posix_acl_state *state, int cnt); +int init_acl_state(struct posix_acl_state *state, u16 cnt); void free_acl_state(struct posix_acl_state *state); void posix_state_to_acl(struct posix_acl_state *state, struct posix_acl_entry *pace); diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c index 0460ebea6ff02..3f185ae60dc51 100644 --- a/fs/smb/server/transport_ipc.c +++ b/fs/smb/server/transport_ipc.c @@ -281,6 +281,7 @@ static int handle_response(int type, void *payload, size_t sz) if (entry->type + 1 != type) { pr_err("Waiting for IPC type %d, got %d. Ignore.\n", entry->type + 1, type); + continue; } entry->response = kvzalloc(sz, KSMBD_DEFAULT_GFP); diff --git a/fs/splice.c b/fs/splice.c index 28cfa63aa2364..23fa5561b9441 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -331,7 +331,7 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos, int i; /* Work out how much data we can actually add into the pipe */ - used = pipe_occupancy(pipe->head, pipe->tail); + used = pipe_buf_usage(pipe); npages = max_t(ssize_t, pipe->max_usage - used, 0); len = min_t(size_t, len, npages * PAGE_SIZE); npages = DIV_ROUND_UP(len, PAGE_SIZE); @@ -527,7 +527,7 @@ static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_des return -ERESTARTSYS; repeat: - while (pipe_empty(pipe->head, pipe->tail)) { + while (pipe_is_empty(pipe)) { if (!pipe->writers) return 0; @@ -820,7 +820,7 @@ ssize_t splice_to_socket(struct pipe_inode_info *pipe, struct file *out, if (signal_pending(current)) break; - while (pipe_empty(pipe->head, pipe->tail)) { + while (pipe_is_empty(pipe)) { ret = 0; if (!pipe->writers) goto out; @@ -968,7 +968,7 @@ static ssize_t do_splice_read(struct file *in, loff_t *ppos, return 0; /* Don't try to read more the pipe has space for. */ - p_space = pipe->max_usage - pipe_occupancy(pipe->head, pipe->tail); + p_space = pipe->max_usage - pipe_buf_usage(pipe); len = min_t(size_t, len, p_space << PAGE_SHIFT); if (unlikely(len > MAX_RW_COUNT)) @@ -1080,7 +1080,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, more = sd->flags & SPLICE_F_MORE; sd->flags |= SPLICE_F_MORE; - WARN_ON_ONCE(!pipe_empty(pipe->head, pipe->tail)); + WARN_ON_ONCE(!pipe_is_empty(pipe)); while (len) { size_t read_len; @@ -1268,7 +1268,7 @@ static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags) send_sig(SIGPIPE, current, 0); return -EPIPE; } - if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) + if (!pipe_is_full(pipe)) return 0; if (flags & SPLICE_F_NONBLOCK) return -EAGAIN; @@ -1652,13 +1652,13 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) * Check the pipe occupancy without the inode lock first. This function * is speculative anyways, so missing one is ok. */ - if (!pipe_empty(pipe->head, pipe->tail)) + if (!pipe_is_empty(pipe)) return 0; ret = 0; pipe_lock(pipe); - while (pipe_empty(pipe->head, pipe->tail)) { + while (pipe_is_empty(pipe)) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; @@ -1688,13 +1688,13 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) * Check pipe occupancy without the inode lock first. This function * is speculative anyways, so missing one is ok. */ - if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) + if (!pipe_is_full(pipe)) return 0; ret = 0; pipe_lock(pipe); - while (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { + while (pipe_is_full(pipe)) { if (!pipe->readers) { send_sig(SIGPIPE, current, 0); ret = -EPIPE; diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 4db0d2b0aab8f..181260e72680c 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -198,7 +198,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache) { int i, j; - if (cache == NULL) + if (IS_ERR(cache) || cache == NULL) return; for (i = 0; i < cache->entries; i++) { diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c index 1d94bb7841081..0bc96ab6580b3 100644 --- a/fs/vboxsf/super.c +++ b/fs/vboxsf/super.c @@ -21,8 +21,7 @@ #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */ -static const unsigned char VBSF_MOUNT_SIGNATURE[4] = { '\000', '\377', '\376', - '\375' }; +static const unsigned char VBSF_MOUNT_SIGNATURE[4] __nonstring = "\000\377\376\375"; static int follow_symlinks; module_param(follow_symlinks, int, 0444); diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 3d33e17f2e5ce..7839efe050bfa 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -33,8 +33,6 @@ struct kmem_cache *xfs_extfree_item_cache; struct workqueue_struct *xfs_alloc_wq; -#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b))) - #define XFSA_FIXUP_BNO_OK 1 #define XFSA_FIXUP_CNT_OK 2 @@ -410,8 +408,8 @@ xfs_alloc_compute_diff( if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) { if (newlen1 < newlen2 || (newlen1 == newlen2 && - XFS_ABSDIFF(newbno1, wantbno) > - XFS_ABSDIFF(newbno2, wantbno))) + abs_diff(newbno1, wantbno) > + abs_diff(newbno2, wantbno))) newbno1 = newbno2; } else if (newbno2 != NULLAGBLOCK) newbno1 = newbno2; @@ -427,7 +425,7 @@ xfs_alloc_compute_diff( } else newbno1 = freeend - wantlen; *newbnop = newbno1; - return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno); + return newbno1 == NULLAGBLOCK ? 0 : abs_diff(newbno1, wantbno); } /* diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 15bb790359f81..5d560e9073f42 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -29,11 +29,6 @@ struct kmem_cache *xfs_buf_cache; /* * Locking orders * - * xfs_buf_ioacct_inc: - * xfs_buf_ioacct_dec: - * b_sema (caller holds) - * b_lock - * * xfs_buf_stale: * b_sema (caller holds) * b_lock @@ -81,51 +76,6 @@ xfs_buf_vmap_len( return (bp->b_page_count * PAGE_SIZE); } -/* - * Bump the I/O in flight count on the buftarg if we haven't yet done so for - * this buffer. The count is incremented once per buffer (per hold cycle) - * because the corresponding decrement is deferred to buffer release. Buffers - * can undergo I/O multiple times in a hold-release cycle and per buffer I/O - * tracking adds unnecessary overhead. This is used for sychronization purposes - * with unmount (see xfs_buftarg_drain()), so all we really need is a count of - * in-flight buffers. - * - * Buffers that are never released (e.g., superblock, iclog buffers) must set - * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count - * never reaches zero and unmount hangs indefinitely. - */ -static inline void -xfs_buf_ioacct_inc( - struct xfs_buf *bp) -{ - if (bp->b_flags & XBF_NO_IOACCT) - return; - - ASSERT(bp->b_flags & XBF_ASYNC); - spin_lock(&bp->b_lock); - if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { - bp->b_state |= XFS_BSTATE_IN_FLIGHT; - percpu_counter_inc(&bp->b_target->bt_io_count); - } - spin_unlock(&bp->b_lock); -} - -/* - * Clear the in-flight state on a buffer about to be released to the LRU or - * freed and unaccount from the buftarg. - */ -static inline void -__xfs_buf_ioacct_dec( - struct xfs_buf *bp) -{ - lockdep_assert_held(&bp->b_lock); - - if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { - bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; - percpu_counter_dec(&bp->b_target->bt_io_count); - } -} - /* * When we mark a buffer stale, we remove the buffer from the LRU and clear the * b_lru_ref count so that the buffer is freed immediately when the buffer @@ -149,15 +99,7 @@ xfs_buf_stale( */ bp->b_flags &= ~_XBF_DELWRI_Q; - /* - * Once the buffer is marked stale and unlocked, a subsequent lookup - * could reset b_flags. There is no guarantee that the buffer is - * unaccounted (released to LRU) before that occurs. Drop in-flight - * status now to preserve accounting consistency. - */ spin_lock(&bp->b_lock); - __xfs_buf_ioacct_dec(bp); - atomic_set(&bp->b_lru_ref, 0); if (!(bp->b_state & XFS_BSTATE_DISPOSE) && (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru))) @@ -794,18 +736,13 @@ xfs_buf_get_map( int _xfs_buf_read( - struct xfs_buf *bp, - xfs_buf_flags_t flags) + struct xfs_buf *bp) { - ASSERT(!(flags & XBF_WRITE)); ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE); - bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); - + bp->b_flags |= XBF_READ; xfs_buf_submit(bp); - if (flags & XBF_ASYNC) - return 0; return xfs_buf_iowait(bp); } @@ -857,6 +794,8 @@ xfs_buf_read_map( struct xfs_buf *bp; int error; + ASSERT(!(flags & (XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD))); + flags |= XBF_READ; *bpp = NULL; @@ -870,21 +809,11 @@ xfs_buf_read_map( /* Initiate the buffer read and wait. */ XFS_STATS_INC(target->bt_mount, xb_get_read); bp->b_ops = ops; - error = _xfs_buf_read(bp, flags); - - /* Readahead iodone already dropped the buffer, so exit. */ - if (flags & XBF_ASYNC) - return 0; + error = _xfs_buf_read(bp); } else { /* Buffer already read; all we need to do is check it. */ error = xfs_buf_reverify(bp, ops); - /* Readahead already finished; drop the buffer and exit. */ - if (flags & XBF_ASYNC) { - xfs_buf_relse(bp); - return 0; - } - /* We do not want read in the flags */ bp->b_flags &= ~XBF_READ; ASSERT(bp->b_ops != NULL || ops == NULL); @@ -936,6 +865,7 @@ xfs_buf_readahead_map( int nmaps, const struct xfs_buf_ops *ops) { + const xfs_buf_flags_t flags = XBF_READ | XBF_ASYNC | XBF_READ_AHEAD; struct xfs_buf *bp; /* @@ -945,9 +875,21 @@ xfs_buf_readahead_map( if (xfs_buftarg_is_mem(target)) return; - xfs_buf_read_map(target, map, nmaps, - XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops, - __this_address); + if (xfs_buf_get_map(target, map, nmaps, flags | XBF_TRYLOCK, &bp)) + return; + trace_xfs_buf_readahead(bp, 0, _RET_IP_); + + if (bp->b_flags & XBF_DONE) { + xfs_buf_reverify(bp, ops); + xfs_buf_relse(bp); + return; + } + XFS_STATS_INC(target->bt_mount, xb_get_read); + bp->b_ops = ops; + bp->b_flags &= ~(XBF_WRITE | XBF_DONE); + bp->b_flags |= flags; + percpu_counter_inc(&target->bt_readahead_count); + xfs_buf_submit(bp); } /* @@ -1003,10 +945,12 @@ xfs_buf_get_uncached( struct xfs_buf *bp; DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); + /* there are currently no valid flags for xfs_buf_get_uncached */ + ASSERT(flags == 0); + *bpp = NULL; - /* flags might contain irrelevant bits, pass only what we care about */ - error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp); + error = _xfs_buf_alloc(target, &map, 1, flags, &bp); if (error) return error; @@ -1060,7 +1004,6 @@ xfs_buf_rele_uncached( spin_unlock(&bp->b_lock); return; } - __xfs_buf_ioacct_dec(bp); spin_unlock(&bp->b_lock); xfs_buf_free(bp); } @@ -1079,20 +1022,12 @@ xfs_buf_rele_cached( spin_lock(&bp->b_lock); ASSERT(bp->b_hold >= 1); if (bp->b_hold > 1) { - /* - * Drop the in-flight state if the buffer is already on the LRU - * and it holds the only reference. This is racy because we - * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT - * ensures the decrement occurs only once per-buf. - */ - if (--bp->b_hold == 1 && !list_empty(&bp->b_lru)) - __xfs_buf_ioacct_dec(bp); + bp->b_hold--; goto out_unlock; } /* we are asked to drop the last reference */ - __xfs_buf_ioacct_dec(bp); - if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { + if (atomic_read(&bp->b_lru_ref)) { /* * If the buffer is added to the LRU, keep the reference to the * buffer for the LRU and clear the (now stale) dispose list @@ -1345,6 +1280,7 @@ xfs_buf_ioend_handle_error( resubmit: xfs_buf_ioerror(bp, 0); bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL); + reinit_completion(&bp->b_iowait); xfs_buf_submit(bp); return true; out_stale: @@ -1355,8 +1291,9 @@ xfs_buf_ioend_handle_error( return false; } -static void -xfs_buf_ioend( +/* returns false if the caller needs to resubmit the I/O, else true */ +static bool +__xfs_buf_ioend( struct xfs_buf *bp) { trace_xfs_buf_iodone(bp, _RET_IP_); @@ -1369,6 +1306,8 @@ xfs_buf_ioend( bp->b_ops->verify_read(bp); if (!bp->b_error) bp->b_flags |= XBF_DONE; + if (bp->b_flags & XBF_READ_AHEAD) + percpu_counter_dec(&bp->b_target->bt_readahead_count); } else { if (!bp->b_error) { bp->b_flags &= ~XBF_WRITE_FAIL; @@ -1376,7 +1315,7 @@ xfs_buf_ioend( } if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp)) - return; + return false; /* clear the retry state */ bp->b_last_error = 0; @@ -1397,7 +1336,15 @@ xfs_buf_ioend( bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD | _XBF_LOGRECOVERY); + return true; +} +static void +xfs_buf_ioend( + struct xfs_buf *bp) +{ + if (!__xfs_buf_ioend(bp)) + return; if (bp->b_flags & XBF_ASYNC) xfs_buf_relse(bp); else @@ -1411,15 +1358,8 @@ xfs_buf_ioend_work( struct xfs_buf *bp = container_of(work, struct xfs_buf, b_ioend_work); - xfs_buf_ioend(bp); -} - -static void -xfs_buf_ioend_async( - struct xfs_buf *bp) -{ - INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); - queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work); + if (__xfs_buf_ioend(bp)) + xfs_buf_relse(bp); } void @@ -1491,7 +1431,13 @@ xfs_buf_bio_end_io( XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR)) xfs_buf_ioerror(bp, -EIO); - xfs_buf_ioend_async(bp); + if (bp->b_flags & XBF_ASYNC) { + INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); + queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work); + } else { + complete(&bp->b_iowait); + } + bio_put(bio); } @@ -1568,9 +1514,11 @@ xfs_buf_iowait( { ASSERT(!(bp->b_flags & XBF_ASYNC)); - trace_xfs_buf_iowait(bp, _RET_IP_); - wait_for_completion(&bp->b_iowait); - trace_xfs_buf_iowait_done(bp, _RET_IP_); + do { + trace_xfs_buf_iowait(bp, _RET_IP_); + wait_for_completion(&bp->b_iowait); + trace_xfs_buf_iowait_done(bp, _RET_IP_); + } while (!__xfs_buf_ioend(bp)); return bp->b_error; } @@ -1648,9 +1596,6 @@ xfs_buf_submit( */ bp->b_error = 0; - if (bp->b_flags & XBF_ASYNC) - xfs_buf_ioacct_inc(bp); - if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) { xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE); xfs_buf_ioend(bp); @@ -1776,9 +1721,8 @@ xfs_buftarg_wait( struct xfs_buftarg *btp) { /* - * First wait on the buftarg I/O count for all in-flight buffers to be - * released. This is critical as new buffers do not make the LRU until - * they are released. + * First wait for all in-flight readahead buffers to be released. This is + * critical as new buffers do not make the LRU until they are released. * * Next, flush the buffer workqueue to ensure all completion processing * has finished. Just waiting on buffer locks is not sufficient for @@ -1787,7 +1731,7 @@ xfs_buftarg_wait( * all reference counts have been dropped before we start walking the * LRU list. */ - while (percpu_counter_sum(&btp->bt_io_count)) + while (percpu_counter_sum(&btp->bt_readahead_count)) delay(100); flush_workqueue(btp->bt_mount->m_buf_workqueue); } @@ -1904,8 +1848,8 @@ xfs_destroy_buftarg( struct xfs_buftarg *btp) { shrinker_free(btp->bt_shrinker); - ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0); - percpu_counter_destroy(&btp->bt_io_count); + ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0); + percpu_counter_destroy(&btp->bt_readahead_count); list_lru_destroy(&btp->bt_lru); } @@ -1959,7 +1903,7 @@ xfs_init_buftarg( if (list_lru_init(&btp->bt_lru)) return -ENOMEM; - if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL)) + if (percpu_counter_init(&btp->bt_readahead_count, 0, GFP_KERNEL)) goto out_destroy_lru; btp->bt_shrinker = @@ -1973,7 +1917,7 @@ xfs_init_buftarg( return 0; out_destroy_io_count: - percpu_counter_destroy(&btp->bt_io_count); + percpu_counter_destroy(&btp->bt_readahead_count); out_destroy_lru: list_lru_destroy(&btp->bt_lru); return -ENOMEM; diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 3b4ed42e11c01..80e06eecaf56e 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -27,7 +27,6 @@ struct xfs_buf; #define XBF_READ (1u << 0) /* buffer intended for reading from device */ #define XBF_WRITE (1u << 1) /* buffer intended for writing to device */ #define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */ -#define XBF_NO_IOACCT (1u << 3) /* bypass I/O accounting (non-LRU bufs) */ #define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */ #define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */ #define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */ @@ -58,7 +57,6 @@ typedef unsigned int xfs_buf_flags_t; { XBF_READ, "READ" }, \ { XBF_WRITE, "WRITE" }, \ { XBF_READ_AHEAD, "READ_AHEAD" }, \ - { XBF_NO_IOACCT, "NO_IOACCT" }, \ { XBF_ASYNC, "ASYNC" }, \ { XBF_DONE, "DONE" }, \ { XBF_STALE, "STALE" }, \ @@ -77,7 +75,6 @@ typedef unsigned int xfs_buf_flags_t; * Internal state flags. */ #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ -#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ struct xfs_buf_cache { struct rhashtable bc_hash; @@ -116,7 +113,7 @@ struct xfs_buftarg { struct shrinker *bt_shrinker; struct list_lru bt_lru; - struct percpu_counter bt_io_count; + struct percpu_counter bt_readahead_count; struct ratelimit_state bt_ioerror_rl; /* Atomic write unit values */ @@ -291,7 +288,7 @@ int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr, size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp, const struct xfs_buf_ops *ops); -int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags); +int _xfs_buf_read(struct xfs_buf *bp); void xfs_buf_hold(struct xfs_buf *bp); /* Releasing Buffers */ diff --git a/fs/xfs/xfs_buf_mem.c b/fs/xfs/xfs_buf_mem.c index 07bebbfb16ee1..5b64a2b3b113f 100644 --- a/fs/xfs/xfs_buf_mem.c +++ b/fs/xfs/xfs_buf_mem.c @@ -117,7 +117,7 @@ xmbuf_free( struct xfs_buftarg *btp) { ASSERT(xfs_buftarg_is_mem(btp)); - ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0); + ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0); trace_xmbuf_free(btp); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index f7a7d89c345ec..9a435b1ff2647 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1451,9 +1451,6 @@ xfs_dax_read_fault( trace_xfs_read_fault(ip, order); - ret = filemap_fsnotify_fault(vmf); - if (unlikely(ret)) - return ret; xfs_ilock(ip, XFS_MMAPLOCK_SHARED); ret = xfs_dax_fault_locked(vmf, order, false); xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); @@ -1482,16 +1479,6 @@ xfs_write_fault( vm_fault_t ret; trace_xfs_write_fault(ip, order); - /* - * Usually we get here from ->page_mkwrite callback but in case of DAX - * we will get here also for ordinary write fault. Handle HSM - * notifications for that case. - */ - if (IS_DAX(inode)) { - ret = filemap_fsnotify_fault(vmf); - if (unlikely(ret)) - return ret; - } sb_start_pagefault(inode->i_sb); file_update_time(vmf->vma->vm_file); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index b3c27dbccce86..2f76531842f83 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3380,7 +3380,7 @@ xlog_do_recover( */ xfs_buf_lock(bp); xfs_buf_hold(bp); - error = _xfs_buf_read(bp, XBF_READ); + error = _xfs_buf_read(bp); if (error) { if (!xlog_is_shutdown(log)) { xfs_buf_ioerror_alert(bp, __this_address); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 477c5262cf912..b69356582b86f 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -181,14 +181,11 @@ xfs_readsb( /* * Allocate a (locked) buffer to hold the superblock. This will be kept - * around at all times to optimize access to the superblock. Therefore, - * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count - * elevated. + * around at all times to optimize access to the superblock. */ reread: error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR, - BTOBB(sector_size), XBF_NO_IOACCT, &bp, - buf_ops); + BTOBB(sector_size), 0, &bp, buf_ops); if (error) { if (loud) xfs_warn(mp, "SB validate failed with error %d.", error); diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index d8e6d073d64dc..57bef567e0116 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -1407,7 +1407,7 @@ xfs_rtmount_readsb( /* m_blkbb_log is not set up yet */ error = xfs_buf_read_uncached(mp->m_rtdev_targp, XFS_RTSB_DADDR, - mp->m_sb.sb_blocksize >> BBSHIFT, XBF_NO_IOACCT, &bp, + mp->m_sb.sb_blocksize >> BBSHIFT, 0, &bp, &xfs_rtsb_buf_ops); if (error) { xfs_warn(mp, "rt sb validate failed with error %d.", error); diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index b29462363b815..bfc2f12490224 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -593,6 +593,7 @@ DEFINE_EVENT(xfs_buf_flags_class, name, \ DEFINE_BUF_FLAGS_EVENT(xfs_buf_find); DEFINE_BUF_FLAGS_EVENT(xfs_buf_get); DEFINE_BUF_FLAGS_EVENT(xfs_buf_read); +DEFINE_BUF_FLAGS_EVENT(xfs_buf_readahead); TRACE_EVENT(xfs_buf_ioerror, TP_PROTO(struct xfs_buf *bp, int error, xfs_failaddr_t caller_ip), diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index f42133dae68e5..2afc95bf1655f 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -90,7 +90,7 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, #ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) + unsigned long addr, pte_t *ptep, unsigned long sz) { return ptep_get_and_clear(mm, addr, ptep); } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 02a4adb4a9999..0d5b186abee86 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -457,7 +457,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) . = ALIGN((align)); \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ - *(.rodata) *(.rodata.*) \ + *(.rodata) *(.rodata.*) *(.data.rel.ro*) \ SCHED_DATA \ RO_AFTER_INIT_DATA /* Read only after init */ \ . = ALIGN(8); \ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 13a11f3c09b87..cf0afa60e4a7b 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -154,7 +154,10 @@ enum virtchnl_ops { VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, - /* opcode 57 - 65 are reserved */ + /* opcode 58 and 59 are reserved */ + VIRTCHNL_OP_1588_PTP_GET_CAPS = 60, + VIRTCHNL_OP_1588_PTP_GET_TIME = 61, + /* opcode 62 - 65 are reserved */ VIRTCHNL_OP_GET_QOS_CAPS = 66, /* opcode 68 through 111 are reserved */ VIRTCHNL_OP_CONFIG_QUEUE_BW = 112, @@ -270,6 +273,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) #define VIRTCHNL_VF_OFFLOAD_QOS BIT(29) +#define VIRTCHNL_VF_CAP_PTP BIT(31) #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ @@ -309,6 +313,60 @@ struct virtchnl_txq_info { VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info); +/* RX descriptor IDs (range from 0 to 63) */ +enum virtchnl_rx_desc_ids { + VIRTCHNL_RXDID_0_16B_BASE = 0, + VIRTCHNL_RXDID_1_32B_BASE = 1, + VIRTCHNL_RXDID_2_FLEX_SQ_NIC = 2, + VIRTCHNL_RXDID_3_FLEX_SQ_SW = 3, + VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB = 4, + VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL = 5, + VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2 = 6, + VIRTCHNL_RXDID_7_HW_RSVD = 7, + /* 8 through 15 are reserved */ + VIRTCHNL_RXDID_16_COMMS_GENERIC = 16, + VIRTCHNL_RXDID_17_COMMS_AUX_VLAN = 17, + VIRTCHNL_RXDID_18_COMMS_AUX_IPV4 = 18, + VIRTCHNL_RXDID_19_COMMS_AUX_IPV6 = 19, + VIRTCHNL_RXDID_20_COMMS_AUX_FLOW = 20, + VIRTCHNL_RXDID_21_COMMS_AUX_TCP = 21, + /* 22 through 63 are reserved */ +}; + +#define VIRTCHNL_RXDID_BIT(x) BIT_ULL(VIRTCHNL_RXDID_##x) + +/* RX descriptor ID bitmasks */ +enum virtchnl_rx_desc_id_bitmasks { + VIRTCHNL_RXDID_0_16B_BASE_M = VIRTCHNL_RXDID_BIT(0_16B_BASE), + VIRTCHNL_RXDID_1_32B_BASE_M = VIRTCHNL_RXDID_BIT(1_32B_BASE), + VIRTCHNL_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL_RXDID_BIT(2_FLEX_SQ_NIC), + VIRTCHNL_RXDID_3_FLEX_SQ_SW_M = VIRTCHNL_RXDID_BIT(3_FLEX_SQ_SW), + VIRTCHNL_RXDID_4_FLEX_SQ_NIC_VEB_M = VIRTCHNL_RXDID_BIT(4_FLEX_SQ_NIC_VEB), + VIRTCHNL_RXDID_5_FLEX_SQ_NIC_ACL_M = VIRTCHNL_RXDID_BIT(5_FLEX_SQ_NIC_ACL), + VIRTCHNL_RXDID_6_FLEX_SQ_NIC_2_M = VIRTCHNL_RXDID_BIT(6_FLEX_SQ_NIC_2), + VIRTCHNL_RXDID_7_HW_RSVD_M = VIRTCHNL_RXDID_BIT(7_HW_RSVD), + /* 8 through 15 are reserved */ + VIRTCHNL_RXDID_16_COMMS_GENERIC_M = VIRTCHNL_RXDID_BIT(16_COMMS_GENERIC), + VIRTCHNL_RXDID_17_COMMS_AUX_VLAN_M = VIRTCHNL_RXDID_BIT(17_COMMS_AUX_VLAN), + VIRTCHNL_RXDID_18_COMMS_AUX_IPV4_M = VIRTCHNL_RXDID_BIT(18_COMMS_AUX_IPV4), + VIRTCHNL_RXDID_19_COMMS_AUX_IPV6_M = VIRTCHNL_RXDID_BIT(19_COMMS_AUX_IPV6), + VIRTCHNL_RXDID_20_COMMS_AUX_FLOW_M = VIRTCHNL_RXDID_BIT(20_COMMS_AUX_FLOW), + VIRTCHNL_RXDID_21_COMMS_AUX_TCP_M = VIRTCHNL_RXDID_BIT(21_COMMS_AUX_TCP), + /* 22 through 63 are reserved */ +}; + +/* virtchnl_rxq_info_flags - definition of bits in the flags field of the + * virtchnl_rxq_info structure. + * + * @VIRTCHNL_PTP_RX_TSTAMP: request to enable Rx timestamping + * + * Other flag bits are currently reserved and they may be extended in the + * future. + */ +enum virtchnl_rxq_info_flags { + VIRTCHNL_PTP_RX_TSTAMP = BIT(0), +}; + /* VIRTCHNL_OP_CONFIG_RX_QUEUE * VF sends this message to set up parameters for one RX queue. * External data buffer contains one instance of virtchnl_rxq_info. @@ -331,8 +389,14 @@ struct virtchnl_rxq_info { u32 databuffer_size; u32 max_pkt_size; u8 crc_disable; - u8 rxdid; - u8 pad1[2]; + /* see enum virtchnl_rx_desc_ids; + * only used when VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC is supported. Note + * that when the offload is not supported, the descriptor format aligns + * with VIRTCHNL_RXDID_1_32B_BASE. + */ + enum virtchnl_rx_desc_ids rxdid:8; + enum virtchnl_rxq_info_flags flags:8; /* see virtchnl_rxq_info_flags */ + u8 pad1; u64 dma_ring_addr; /* see enum virtchnl_rx_hsplit; deprecated with AVF 1.0 */ @@ -1032,10 +1096,6 @@ struct virtchnl_filter { VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); -struct virtchnl_supported_rxdids { - u64 supported_rxdids; -}; - /* VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other @@ -1283,7 +1343,7 @@ struct virtchnl_proto_hdrs { * 2 - from the second inner layer * .... **/ - int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ + u32 count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ union { struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; @@ -1335,7 +1395,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action); struct virtchnl_filter_action_set { /* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */ - int count; + u32 count; struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS]; }; @@ -1425,6 +1485,61 @@ struct virtchnl_fdir_del { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); +#define VIRTCHNL_1588_PTP_CAP_RX_TSTAMP BIT(1) +#define VIRTCHNL_1588_PTP_CAP_READ_PHC BIT(2) + +/** + * struct virtchnl_ptp_caps - Defines the PTP caps available to the VF. + * @caps: On send, VF sets what capabilities it requests. On reply, PF + * indicates what has been enabled for this VF. The PF shall not set + * bits which were not requested by the VF. + * @rsvd: Reserved bits for future extension. + * + * Structure that defines the PTP capabilities available to the VF. The VF + * sends VIRTCHNL_OP_1588_PTP_GET_CAPS, and must fill in the ptp_caps field + * indicating what capabilities it is requesting. The PF will respond with the + * same message with the virtchnl_ptp_caps structure indicating what is + * enabled for the VF. + * + * VIRTCHNL_1588_PTP_CAP_RX_TSTAMP indicates that the VF receive queues have + * receive timestamps enabled in the flexible descriptors. Note that this + * requires a VF to also negotiate to enable advanced flexible descriptors in + * the receive path instead of the default legacy descriptor format. + * + * VIRTCHNL_1588_PTP_CAP_READ_PHC indicates that the VF may read the PHC time + * via the VIRTCHNL_OP_1588_PTP_GET_TIME command. + * + * Note that in the future, additional capability flags may be added which + * indicate additional extended support. All fields marked as reserved by this + * header will be set to zero. VF implementations should verify this to ensure + * that future extensions do not break compatibility. + */ +struct virtchnl_ptp_caps { + u32 caps; + u8 rsvd[44]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_ptp_caps); + +/** + * struct virtchnl_phc_time - Contains the 64bits of PHC clock time in ns. + * @time: PHC time in nanoseconds + * @rsvd: Reserved for future extension + * + * Structure received with VIRTCHNL_OP_1588_PTP_GET_TIME. Contains the 64bits + * of PHC clock time in nanoseconds. + * + * VIRTCHNL_OP_1588_PTP_GET_TIME may be sent to request the current time of + * the PHC. This op is available in case direct access via the PHC registers + * is not available. + */ +struct virtchnl_phc_time { + u64 time; + u8 rsvd[8]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_phc_time); + struct virtchnl_shaper_bw { /* Unit is Kbps */ u32 committed; @@ -1757,6 +1872,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, } } break; + case VIRTCHNL_OP_1588_PTP_GET_CAPS: + valid_len = sizeof(struct virtchnl_ptp_caps); + break; + case VIRTCHNL_OP_1588_PTP_GET_TIME: + valid_len = sizeof(struct virtchnl_phc_time); + break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index fa2a76cc2f73d..aba9c24486aad 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -28,7 +28,7 @@ typedef enum rq_end_io_ret (rq_end_io_fn)(struct request *, blk_status_t); typedef __u32 __bitwise req_flags_t; /* Keep rqf_name[] in sync with the definitions below */ -enum { +enum rqf_flags { /* drive already may have started this one */ __RQF_STARTED, /* request for flush sequence */ @@ -852,12 +852,20 @@ static inline bool blk_mq_is_reserved_rq(struct request *rq) return rq->rq_flags & RQF_RESV; } -/* +/** + * blk_mq_add_to_batch() - add a request to the completion batch + * @req: The request to add to batch + * @iob: The batch to add the request + * @is_error: Specify true if the request failed with an error + * @complete: The completaion handler for the request + * * Batched completions only work when there is no I/O error and no special * ->end_io handler. + * + * Return: true when the request was added to the batch, otherwise false */ static inline bool blk_mq_add_to_batch(struct request *req, - struct io_comp_batch *iob, int ioerror, + struct io_comp_batch *iob, bool is_error, void (*complete)(struct io_comp_batch *)) { /* @@ -865,7 +873,7 @@ static inline bool blk_mq_add_to_batch(struct request *req, * 1) No batch container * 2) Has scheduler data attached * 3) Not a passthrough request and end_io set - * 4) Not a passthrough request and an ioerror + * 4) Not a passthrough request and failed with an error */ if (!iob) return false; @@ -874,7 +882,7 @@ static inline bool blk_mq_add_to_batch(struct request *req, if (!blk_rq_is_passthrough(req)) { if (req->end_io) return false; - if (ioerror < 0) + if (is_error) return false; } diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 248416ecd01c9..d37751789bf58 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -196,10 +196,11 @@ struct gendisk { unsigned int zone_capacity; unsigned int last_zone_capacity; unsigned long __rcu *conv_zones_bitmap; - unsigned int zone_wplugs_hash_bits; - spinlock_t zone_wplugs_lock; + unsigned int zone_wplugs_hash_bits; + atomic_t nr_zone_wplugs; + spinlock_t zone_wplugs_lock; struct mempool_s *zone_wplugs_pool; - struct hlist_head *zone_wplugs_hash; + struct hlist_head *zone_wplugs_hash; struct workqueue_struct *zone_wplugs_wq; #endif /* CONFIG_BLK_DEV_ZONED */ @@ -367,6 +368,7 @@ struct queue_limits { unsigned int max_sectors; unsigned int max_user_sectors; unsigned int max_segment_size; + unsigned int min_segment_size; unsigned int physical_block_size; unsigned int logical_block_size; unsigned int alignment_offset; diff --git a/include/linux/call_once.h b/include/linux/call_once.h index 6261aa0b3fb00..13cd6469e7e56 100644 --- a/include/linux/call_once.h +++ b/include/linux/call_once.h @@ -26,20 +26,41 @@ do { \ __once_init((once), #once, &__key); \ } while (0) -static inline void call_once(struct once *once, void (*cb)(struct once *)) +/* + * call_once - Ensure a function has been called exactly once + * + * @once: Tracking struct + * @cb: Function to be called + * + * If @once has never completed successfully before, call @cb and, if + * it returns a zero or positive value, mark @once as completed. Return + * the value returned by @cb + * + * If @once has completed succesfully before, return 0. + * + * The call to @cb is implicitly surrounded by a mutex, though for + * efficiency the * function avoids taking it after the first call. + */ +static inline int call_once(struct once *once, int (*cb)(struct once *)) { - /* Pairs with atomic_set_release() below. */ - if (atomic_read_acquire(&once->state) == ONCE_COMPLETED) - return; - - guard(mutex)(&once->lock); - WARN_ON(atomic_read(&once->state) == ONCE_RUNNING); - if (atomic_read(&once->state) != ONCE_NOT_STARTED) - return; - - atomic_set(&once->state, ONCE_RUNNING); - cb(once); - atomic_set_release(&once->state, ONCE_COMPLETED); + int r, state; + + /* Pairs with atomic_set_release() below. */ + if (atomic_read_acquire(&once->state) == ONCE_COMPLETED) + return 0; + + guard(mutex)(&once->lock); + state = atomic_read(&once->state); + if (unlikely(state != ONCE_NOT_STARTED)) + return WARN_ON_ONCE(state != ONCE_COMPLETED) ? -EINVAL : 0; + + atomic_set(&once->state, ONCE_RUNNING); + r = cb(once); + if (r < 0) + atomic_set(&once->state, ONCE_NOT_STARTED); + else + atomic_set_release(&once->state, ONCE_COMPLETED); + return r; } #endif /* _LINUX_CALL_ONCE_H */ diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index ec00e3f7af2b3..ee2614adb7858 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -212,7 +212,7 @@ const volatile void * __must_check_fn(const volatile void *val) { return val; } #define no_free_ptr(p) \ - ((typeof(p)) __must_check_fn(__get_and_null(p, NULL))) + ((typeof(p)) __must_check_fn((__force const volatile void *)__get_and_null(p, NULL))) #define return_ptr(p) return no_free_ptr(p) diff --git a/include/linux/compaction.h b/include/linux/compaction.h index e947764960496..7bf0c521db634 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -80,6 +80,11 @@ static inline unsigned long compact_gap(unsigned int order) return 2UL << order; } +static inline int current_is_kcompactd(void) +{ + return current->flags & PF_KCOMPACTD; +} + #ifdef CONFIG_COMPACTION extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 200fd3c5bc70d..1553857548241 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -110,7 +110,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, /* Unreachable code */ #ifdef CONFIG_OBJTOOL /* Annotate a C jump table to allow objtool to follow the code flow */ -#define __annotate_jump_table __section(".rodata..c_jump_table,\"a\",@progbits #") +#define __annotate_jump_table __section(".data.rel.ro.c_jump_table") #else /* !CONFIG_OBJTOOL */ #define __annotate_jump_table #endif /* CONFIG_OBJTOOL */ diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h index 20b5729903d72..2fd7ba75362aa 100644 --- a/include/linux/cpu_rmap.h +++ b/include/linux/cpu_rmap.h @@ -32,6 +32,7 @@ struct cpu_rmap { #define CPU_RMAP_DIST_INF 0xffff extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags); +extern void cpu_rmap_get(struct cpu_rmap *rmap); extern int cpu_rmap_put(struct cpu_rmap *rmap); extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj); diff --git a/include/linux/cred.h b/include/linux/cred.h index 0c3c4b16b469c..5658a3bfe803c 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -172,18 +172,12 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred) static inline const struct cred *override_creds(const struct cred *override_cred) { - const struct cred *old = current->cred; - - rcu_assign_pointer(current->cred, override_cred); - return old; + return rcu_replace_pointer(current->cred, override_cred, 1); } static inline const struct cred *revert_creds(const struct cred *revert_cred) { - const struct cred *override_cred = current->cred; - - rcu_assign_pointer(current->cred, revert_cred); - return override_cred; + return rcu_replace_pointer(current->cred, revert_cred, 1); } /** diff --git a/include/linux/damon.h b/include/linux/damon.h index af525252b853a..c9074d569596a 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -470,6 +470,11 @@ struct damos { unsigned long next_apply_sis; /* informs if ongoing DAMOS walk for this scheme is finished */ bool walk_completed; + /* + * If the current region in the filtering stage is allowed by core + * layer-handled filters. If true, operations layer allows it, too. + */ + bool core_filters_allowed; /* public: */ struct damos_quota quota; struct damos_watermarks wmarks; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 870994cc3ef7e..8210ece94fa6b 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -210,6 +210,14 @@ static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx) void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id); +struct link_mode_info { + int speed; + u8 lanes; + u8 duplex; +}; + +extern const struct link_mode_info link_mode_params[]; + /* declare a link mode bitmap */ #define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) @@ -763,13 +771,12 @@ struct kernel_ethtool_ts_info { /** * struct ethtool_ops - optional netdev operations + * @supported_input_xfrm: supported types of input xfrm from %RXH_XFRM_*. * @cap_link_lanes_supported: indicates if the driver supports lanes * parameter. * @cap_rss_ctx_supported: indicates if the driver supports RSS * contexts via legacy API, drivers implementing @create_rxfh_context * do not have to set this bit. - * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor - * RSS. * @rxfh_per_ctx_key: device supports setting different RSS key for each * additional context. Netlink API should report hfunc, key, and input_xfrm * for every context, not just context 0. @@ -995,9 +1002,9 @@ struct kernel_ethtool_ts_info { * of the generic netdev features interface. */ struct ethtool_ops { + u32 supported_input_xfrm:8; u32 cap_link_lanes_supported:1; u32 cap_rss_ctx_supported:1; - u32 cap_rss_sym_xor_supported:1; u32 rxfh_per_ctx_key:1; u32 cap_rss_rxnfc_adds:1; u32 rxfh_indir_space; diff --git a/include/linux/filter.h b/include/linux/filter.h index 590476743f7a3..f5cf4d35d83e9 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1528,6 +1528,7 @@ struct bpf_sock_ops_kern { void *skb_data_end; u8 op; u8 is_fullsock; + u8 is_locked_tcp_sock; u8 remaining_opt_len; u64 temp; /* temp and everything after is not * initialized to 0 before calling diff --git a/include/linux/fs.h b/include/linux/fs.h index 2c3b2f8a621f7..2788df98080f8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2975,8 +2975,8 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) } else if (iocb->ki_flags & IOCB_DONTCACHE) { struct address_space *mapping = iocb->ki_filp->f_mapping; - filemap_fdatawrite_range_kick(mapping, iocb->ki_pos, - iocb->ki_pos + count); + filemap_fdatawrite_range_kick(mapping, iocb->ki_pos - count, + iocb->ki_pos - 1); } return count; @@ -3452,6 +3452,8 @@ extern const struct file_operations generic_ro_fops; extern int readlink_copy(char __user *, int, const char *, int); extern int page_readlink(struct dentry *, char __user *, int); +extern const char *page_get_link_raw(struct dentry *, struct inode *, + struct delayed_call *); extern const char *page_get_link(struct dentry *, struct inode *, struct delayed_call *); extern void page_put_link(void *); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 6a33288bd6a1f..83d3ac97f8262 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -170,6 +170,21 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, return fsnotify_path(&file->f_path, FS_ACCESS_PERM); } +/* + * fsnotify_mmap_perm - permission hook before mmap of file range + */ +static inline int fsnotify_mmap_perm(struct file *file, int prot, + const loff_t off, size_t len) +{ + /* + * mmap() generates only pre-content events. + */ + if (!file || likely(!FMODE_FSNOTIFY_HSM(file->f_mode))) + return 0; + + return fsnotify_pre_content(&file->f_path, &off, len); +} + /* * fsnotify_truncate_perm - permission hook before file truncate */ @@ -223,6 +238,12 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, return 0; } +static inline int fsnotify_mmap_perm(struct file *file, int prot, + const loff_t off, size_t len) +{ + return 0; +} + static inline int fsnotify_truncate_perm(const struct path *path, loff_t length) { return 0; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ec8c0ccc8f959..76a75ec03dd6b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -682,6 +682,7 @@ struct huge_bootmem_page { int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); +void wait_for_freed_hugetlb_folios(void); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, bool cow_from_owner); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, @@ -1004,7 +1005,9 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { - return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); + unsigned long psize = huge_page_size(hstate_vma(vma)); + + return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize); } #endif @@ -1066,6 +1069,10 @@ static inline int replace_free_hugepage_folios(unsigned long start_pfn, return 0; } +static inline void wait_for_freed_hugetlb_folios(void) +{ +} + static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, bool cow_from_owner) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 16741e542e81c..508d466de1cc2 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1543,6 +1543,10 @@ struct ieee80211_mgmt { u8 count; u8 variable[]; } __packed ml_reconf_resp; + struct { + u8 action_code; + u8 variable[]; + } __packed epcs; } u; } __packed action; DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */ @@ -3109,6 +3113,11 @@ ieee80211_he_spr_size(const u8 *he_spr_ie) #define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454 2 #define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK 0x01 +#define IEEE80211_EHT_MAC_CAP1_EHT_TRS 0x02 +#define IEEE80211_EHT_MAC_CAP1_TXOP_RET 0x04 +#define IEEE80211_EHT_MAC_CAP1_TWO_BQRS 0x08 +#define IEEE80211_EHT_MAC_CAP1_EHT_LINK_ADAPT_MASK 0x30 +#define IEEE80211_EHT_MAC_CAP1_UNSOL_EPCS_PRIO_ACCESS 0x40 /* EHT PHY capabilities as defined in P802.11be_D2.0 section 9.4.2.313.3 */ #define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02 @@ -5570,6 +5579,9 @@ static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data, fixed + prof->sta_info_len - 1 <= len; } +#define IEEE80211_MLE_STA_EPCS_CONTROL_LINK_ID 0x000f +#define IEEE80211_EPCS_ENA_RESP_BODY_LEN 3 + static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len) { const struct ieee80211_ttlm_elem *t2l = (const void *)data; diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 3ff96ae31bf6d..c5fe3b2a53e82 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -65,11 +65,9 @@ struct br_ip_list { #define BR_DEFAULT_AGEING_TIME (300 * HZ) struct net_bridge; -void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br, - unsigned int cmd, struct ifreq *ifr, +void brioctl_set(int (*hook)(struct net *net, unsigned int cmd, void __user *uarg)); -int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg); +int br_ioctl_call(struct net *net, unsigned int cmd, void __user *uarg); #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 8a9792a6427ad..61b7335aa037c 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -19,6 +19,9 @@ #include #include +/* XX:XX:XX:XX:XX:XX */ +#define MAC_ADDR_STR_LEN (3 * ETH_ALEN - 1) + static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) { return (struct ethhdr *)skb_mac_header(skb); diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 523025106a643..0f7281e3e4488 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -59,8 +59,10 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, extern void macvlan_common_setup(struct net_device *dev); -extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +struct rtnl_newlink_params; + +extern int macvlan_common_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack); extern void macvlan_dellink(struct net_device *dev, struct list_head *head); diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index a6e2aadbb91bd..5aeeed22f35bf 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -207,6 +207,7 @@ struct inet6_cork { struct ipv6_txoptions *opt; u8 hop_limit; u8 tclass; + u8 dontfrag:1; }; /* struct ipv6_pinfo - ipv6 private area */ diff --git a/include/linux/libata.h b/include/linux/libata.h index c1c57f814b98d..e5695998acb02 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -88,6 +88,7 @@ enum ata_quirks { __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */ __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */ __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */ + __ATA_QUIRK_NO_LPM_ON_ATI, /* Disable LPM on ATI chipset */ __ATA_QUIRK_NO_ID_DEV_LOG, /* Identify device log missing */ __ATA_QUIRK_NO_LOG_DIR, /* Do not read log directory */ __ATA_QUIRK_NO_FUA, /* Do not use FUA */ @@ -432,6 +433,7 @@ enum { ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024), ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M), ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI), + ATA_QUIRK_NO_LPM_ON_ATI = (1U << __ATA_QUIRK_NO_LPM_ON_ATI), ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG), ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR), ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA), diff --git a/include/linux/log2.h b/include/linux/log2.h index 9f30d087a1281..1366cb688a6d9 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -41,7 +41,7 @@ int __ilog2_u64(u64 n) * *not* considered a power of two. * Return: true if @n is a power of 2, otherwise false. */ -static inline __attribute__((const)) +static __always_inline __attribute__((const)) bool is_power_of_2(unsigned long n) { return (n != 0 && ((n & (n - 1)) == 0)); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 27f42f713c891..f016263e1fcf0 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1135,7 +1135,7 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_buf *buf); -int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order); void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, @@ -1415,7 +1415,6 @@ int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port, bool *vlan_offload_disabled); void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl, struct _rule_hw *eth_header); -int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 0c48b20f818a9..904804e995aa8 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -538,6 +538,7 @@ struct mlx5_cmd_layout { }; enum mlx5_rfr_severity_bit_offsets { + MLX5_CRR_BIT_OFFSET = 0x6, MLX5_RFR_BIT_OFFSET = 0x7, }; @@ -1250,6 +1251,7 @@ enum mlx5_cap_type { MLX5_CAP_GENERAL_2 = 0x20, MLX5_CAP_PORT_SELECTION = 0x25, MLX5_CAP_ADV_VIRTUALIZATION = 0x26, + MLX5_CAP_ADV_RDMA = 0x28, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1344,6 +1346,12 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap) +#define MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(mdev, cap) \ + MLX5_CAP_ADV_RDMA(mdev, rdma_transport_rx_flow_table_properties.cap) + +#define MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(mdev, cap) \ + MLX5_CAP_ADV_RDMA(mdev, rdma_transport_tx_flow_table_properties.cap) + #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) @@ -1383,6 +1391,10 @@ enum mlx5_qcam_feature_groups { MLX5_GET(adv_virtualization_cap, \ mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap) +#define MLX5_CAP_ADV_RDMA(mdev, cap) \ + MLX5_GET(adv_rdma_cap, \ + mdev->caps.hca[MLX5_CAP_ADV_RDMA]->cur, cap) + #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap) @@ -1505,6 +1517,7 @@ enum { MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16, + MLX5_PHYSICAL_LAYER_RECOVERY_GROUP = 0x1a, MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, MLX5_INFINIBAND_EXTENDED_PORT_COUNTERS_GROUP = 0x21, }; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index af86097641b0b..d1dfbad9a4473 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -54,7 +54,6 @@ #include #include #include -#include #include #define MLX5_ADEV_NAME "mlx5_core" @@ -305,6 +304,8 @@ struct mlx5_cmd { struct semaphore sem; struct semaphore pages_sem; struct semaphore throttle_sem; + struct semaphore unprivileged_sem; + struct xarray privileged_uids; } vars; enum mlx5_cmdif_state state; void *cmd_alloc_buf; @@ -679,33 +680,8 @@ struct mlx5_rsvd_gids { struct ida ida; }; -#define MAX_PIN_NUM 8 -struct mlx5_pps { - u8 pin_caps[MAX_PIN_NUM]; - struct work_struct out_work; - u64 start[MAX_PIN_NUM]; - u8 enabled; - u64 min_npps_period; - u64 min_out_pulse_duration_ns; -}; - -struct mlx5_timer { - struct cyclecounter cycles; - struct timecounter tc; - u32 nominal_c_mult; - unsigned long overflow_period; -}; - -struct mlx5_clock { - struct mlx5_nb pps_nb; - seqlock_t lock; - struct hwtstamp_config hwtstamp_config; - struct ptp_clock *ptp; - struct ptp_clock_info ptp_info; - struct mlx5_pps pps_info; - struct mlx5_timer timer; -}; - +struct mlx5_clock; +struct mlx5_clock_dev_state; struct mlx5_dm; struct mlx5_fw_tracer; struct mlx5_vxlan; @@ -789,7 +765,8 @@ struct mlx5_core_dev { #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; #endif - struct mlx5_clock clock; + struct mlx5_clock *clock; + struct mlx5_clock_dev_state *clock_state; struct mlx5_ib_clock_info *clock_info; struct mlx5_fw_tracer *tracer; struct mlx5_rsc_dump *rsc_dump; @@ -989,6 +966,8 @@ struct mlx5_async_work { mlx5_async_cbk_t user_callback; u16 opcode; /* cmd opcode */ u16 op_mod; /* cmd op_mod */ + u8 throttle_locked:1; + u8 unpriv_locked:1; void *out; /* pointer to the cmd output buffer */ }; @@ -1019,6 +998,8 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); +int mlx5_cmd_add_privileged_uid(struct mlx5_core_dev *dev, u16 uid); +void mlx5_cmd_remove_privileged_uid(struct mlx5_core_dev *dev, u16 uid); void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev); void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev); diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index df73a2ccc9af3..67256e776566c 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -147,6 +147,8 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, /* reuse tun_opts for the mapped ipsec obj id when tun_id is 0 (invalid) */ #define ESW_IPSEC_RX_MAPPED_ID_MASK GENMASK(ESW_TUN_OPTS_BITS - 1, 0) +#define ESW_IPSEC_RX_MAPPED_ID_MATCH_MASK \ + GENMASK(31 - ESW_RESERVED_BITS, ESW_ZONE_ID_BITS) u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 2a69d9d71276d..939e58c2f3865 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -40,6 +40,9 @@ #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) +#define MLX5_RDMA_TRANSPORT_BYPASS_PRIO 0 +#define MLX5_FS_MAX_POOL_SIZE BIT(30) + enum mlx5_flow_destination_type { MLX5_FLOW_DESTINATION_TYPE_NONE, MLX5_FLOW_DESTINATION_TYPE_VPORT, @@ -108,6 +111,8 @@ enum mlx5_flow_namespace_type { MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC, MLX5_FLOW_NAMESPACE_RDMA_RX_MACSEC, MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC, + MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_RX, + MLX5_FLOW_NAMESPACE_RDMA_TRANSPORT_TX, }; enum { @@ -192,9 +197,9 @@ struct mlx5_flow_namespace * mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type); struct mlx5_flow_namespace * -mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, - enum mlx5_flow_namespace_type type, - int vport); +mlx5_get_flow_vport_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type, + int vport_idx); struct mlx5_flow_table_attr { int prio; @@ -202,6 +207,7 @@ struct mlx5_flow_table_attr { u32 level; u32 flags; u16 uid; + u16 vport; struct mlx5_flow_table *next_ft; struct { @@ -238,6 +244,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); struct mlx5_exe_aso { u32 object_id; + int base_id; u8 type; u8 return_reg_id; union { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 4f3716e124c9c..2c09df4ee5742 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1570,6 +1570,8 @@ enum { enum { MLX5_UCTX_CAP_RAW_TX = 1UL << 0, MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1, + MLX5_UCTX_CAP_RDMA_CTRL = 1UL << 3, + MLX5_UCTX_CAP_RDMA_CTRL_OTHER_VHCA = 1UL << 4, }; #define MLX5_FC_BULK_SIZE_FACTOR 128 @@ -1991,7 +1993,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_geneve_tlv_options[0x8]; u8 reserved_at_568[0x3]; u8 max_geneve_tlv_option_data_len[0x5]; - u8 reserved_at_570[0x9]; + u8 reserved_at_570[0x1]; + u8 adv_rdma[0x1]; + u8 reserved_at_572[0x7]; u8 adv_virtualization[0x1]; u8 reserved_at_57a[0x6]; @@ -2140,7 +2144,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 log_min_mkey_entity_size[0x5]; u8 reserved_at_1b0[0x10]; - u8 reserved_at_1c0[0x60]; + u8 general_obj_types_127_64[0x40]; + u8 reserved_at_200[0x20]; u8 reserved_at_220[0x1]; u8 sw_vhca_id_valid[0x1]; @@ -2640,6 +2645,12 @@ struct mlx5_ifc_field_select_802_1qau_rp_bits { u8 field_select_8021qaurp[0x20]; }; +struct mlx5_ifc_phys_layer_recovery_cntrs_bits { + u8 total_successful_recovery_events[0x20]; + + u8 reserved_at_20[0x7a0]; +}; + struct mlx5_ifc_phys_layer_cntrs_bits { u8 time_since_last_clear_high[0x20]; @@ -4841,6 +4852,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits { struct mlx5_ifc_ib_ext_port_cntrs_grp_data_layout_bits ib_ext_port_cntrs_grp_data_layout; struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs; struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs; + struct mlx5_ifc_phys_layer_recovery_cntrs_bits phys_layer_recovery_cntrs; u8 reserved_at_0[0x7c0]; }; @@ -10579,7 +10591,9 @@ struct mlx5_ifc_mtutc_reg_bits { }; struct mlx5_ifc_pcam_enhanced_features_bits { - u8 reserved_at_0[0x1d]; + u8 reserved_at_0[0x10]; + u8 ppcnt_recovery_counters[0x1]; + u8 reserved_at_11[0xc]; u8 fec_200G_per_lane_in_pplm[0x1]; u8 reserved_at_1e[0x2a]; u8 fec_100G_per_lane_in_pplm[0x1]; @@ -11119,6 +11133,7 @@ enum { MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf, MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10, MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12, + MLX5_INITIAL_SEG_HEALTH_SYNDROME_TRUST_LOCKDOWN_ERR = 0x13, }; struct mlx5_ifc_initial_seg_bits { @@ -12493,6 +12508,10 @@ enum { MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24), }; +enum { + MLX5_HCA_CAP_2_GENERAL_OBJECT_TYPES_RDMA_CTRL = BIT_ULL(0x13), +}; + enum { MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, @@ -12500,6 +12519,7 @@ enum { MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24, MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27, MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47, + MLX5_GENERAL_OBJECT_TYPES_RDMA_CTRL = 0x53, MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15, }; @@ -13067,6 +13087,44 @@ struct mlx5_ifc_load_vhca_state_out_bits { u8 reserved_at_40[0x40]; }; +struct mlx5_ifc_adv_rdma_cap_bits { + u8 rdma_transport_manager[0x1]; + u8 rdma_transport_manager_other_eswitch[0x1]; + u8 reserved_at_2[0x1e]; + + u8 rcx_type[0x8]; + u8 reserved_at_28[0x2]; + u8 ps_entry_log_max_value[0x6]; + u8 reserved_at_30[0x6]; + u8 qp_max_ps_num_entry[0xa]; + + u8 mp_max_num_queues[0x8]; + u8 ps_user_context_max_log_size[0x8]; + u8 message_based_qp_and_striding_wq[0x8]; + u8 reserved_at_58[0x8]; + + u8 max_receive_send_message_size_stride[0x10]; + u8 reserved_at_70[0x10]; + + u8 max_receive_send_message_size_byte[0x20]; + + u8 reserved_at_a0[0x160]; + + struct mlx5_ifc_flow_table_prop_layout_bits rdma_transport_rx_flow_table_properties; + + struct mlx5_ifc_flow_table_prop_layout_bits rdma_transport_tx_flow_table_properties; + + struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_rx_ft_field_support_2; + + struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_tx_ft_field_support_2; + + struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_rx_ft_field_bitmask_support_2; + + struct mlx5_ifc_flow_table_fields_supported_2_bits rdma_transport_tx_ft_field_bitmask_support_2; + + u8 reserved_at_800[0x3800]; +}; + struct mlx5_ifc_adv_virtualization_cap_bits { u8 reserved_at_0[0x3]; u8 pg_track_log_max_num[0x5]; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index e68d42b8ce652..58770b86f7939 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -61,15 +61,6 @@ enum mlx5_an_status { #define MLX5_EEPROM_PAGE_LENGTH 256 #define MLX5_EEPROM_HIGH_PAGE_LENGTH 128 -struct mlx5_module_eeprom_query_params { - u16 size; - u16 offset; - u16 i2c_address; - u32 page; - u32 bank; - u32 module_number; -}; - enum mlx5e_link_mode { MLX5E_1000BASE_CX_SGMII = 0, MLX5E_1000BASE_KX = 1, @@ -115,9 +106,12 @@ enum mlx5e_ext_link_mode { MLX5E_100GAUI_1_100GBASE_CR_KR = 11, MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12, MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13, + MLX5E_200GAUI_1_200GBASE_CR1_KR1 = 14, MLX5E_400GAUI_8_400GBASE_CR8 = 15, MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16, + MLX5E_400GAUI_2_400GBASE_CR2_KR2 = 17, MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19, + MLX5E_800GAUI_4_800GBASE_CR4_KR4 = 20, MLX5E_EXT_LINK_MODES_NUMBER, }; @@ -142,12 +136,6 @@ enum mlx5_ptys_width { MLX5_PTYS_WIDTH_12X = 1 << 4, }; -struct mlx5_port_eth_proto { - u32 cap; - u32 admin; - u32 oper; -}; - #define MLX5E_PROT_MASK(link_mode) (1U << link_mode) #define MLX5_GET_ETH_PROTO(reg, out, ext, field) \ (ext ? MLX5_GET(reg, out, ext_##field) : \ @@ -160,14 +148,7 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper, u16 *proto_oper, u8 local_port, u8 plane_index); -void mlx5_toggle_port_link(struct mlx5_core_dev *dev); -int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, - enum mlx5_port_status status); -int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, - enum mlx5_port_status *status); -int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); - -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); + void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, u8 port); @@ -175,65 +156,4 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, u8 *vl_hw_cap, u8 local_port); -int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); -int mlx5_query_port_pause(struct mlx5_core_dev *dev, - u32 *rx_pause, u32 *tx_pause); - -int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); -int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, - u8 *pfc_en_rx); - -int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev, - u16 stall_critical_watermark, - u16 stall_minor_watermark); -int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev, - u16 *stall_critical_watermark, u16 *stall_minor_watermark); - -int mlx5_max_tc(struct mlx5_core_dev *mdev); - -int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); -int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, - u8 prio, u8 *tc); -int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); -int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, - u8 tc, u8 *tc_group); -int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); -int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, - u8 tc, u8 *bw_pct); -int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, - u8 *max_bw_value, - u8 *max_bw_unit); -int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, - u8 *max_bw_value, - u8 *max_bw_unit); -int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); -int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); - -int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen); -int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen); -int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); -void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, - bool *enabled); -int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, - u16 offset, u16 size, u8 *data); -int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev, - struct mlx5_module_eeprom_query_params *params, u8 *data); - -int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); -int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); - -int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); -int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); -int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio); -int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); - -int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext, - struct mlx5_port_eth_proto *eproto); -bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev); -u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper, - bool force_legacy); -u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed, - bool force_legacy); -int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); - #endif /* __MLX5_PORT_H__ */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 5409c7b505094..21d4f43b99833 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1458,7 +1458,10 @@ static inline void folio_get(struct folio *folio) static inline void get_page(struct page *page) { - folio_get(page_folio(page)); + struct folio *folio = page_folio(page); + if (WARN_ON_ONCE(folio_test_slab(folio))) + return; + folio_get(folio); } static inline __must_check bool try_get_page(struct page *page) @@ -1552,6 +1555,9 @@ static inline void put_page(struct page *page) { struct folio *folio = page_folio(page); + if (folio_test_slab(folio)) + return; + /* * For some devmap managed pages we need to catch refcount transition * from 2 to 1: @@ -3425,7 +3431,6 @@ extern vm_fault_t filemap_fault(struct vm_fault *vmf); extern vm_fault_t filemap_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff); extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); -extern vm_fault_t filemap_fsnotify_fault(struct vm_fault *vmf); extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6b27db7f94963..0234f14f2aa6b 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -875,10 +875,11 @@ struct mm_struct { */ unsigned int nr_cpus_allowed; /** - * @max_nr_cid: Maximum number of concurrency IDs allocated. + * @max_nr_cid: Maximum number of allowed concurrency + * IDs allocated. * - * Track the highest number of concurrency IDs allocated for the - * mm. + * Track the highest number of allowed concurrency IDs + * allocated for the mm. */ atomic_t max_nr_cid; /** diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h index 1c1332e4df260..13274c3def660 100644 --- a/include/linux/net/intel/iidc.h +++ b/include/linux/net/intel/iidc.h @@ -78,6 +78,8 @@ int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset); int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type); int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable); void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos); +int ice_alloc_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry); +void ice_free_rdma_qvector(struct ice_pf *pf, struct msix_entry *entry); /* Structure representing auxiliary driver tailored information about the core * PCI dev, each auxiliary driver using the IIDC interface will have an diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 11be70a7929f2..7a01c518e5730 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -53,12 +53,12 @@ enum { NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */ NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */ NETIF_F_GSO_FRAGLIST_BIT, /* ... Fraglist GSO */ + NETIF_F_GSO_ACCECN_BIT, /* TCP AccECN w/ TSO (no clear CWR) */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_FRAGLIST_BIT, + NETIF_F_GSO_ACCECN_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ - __UNUSED_NETIF_F_37, NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ NETIF_F_RXHASH_BIT, /* Receive hashing offload */ NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */ @@ -128,6 +128,7 @@ enum { #define NETIF_F_SG __NETIF_F(SG) #define NETIF_F_TSO6 __NETIF_F(TSO6) #define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) +#define NETIF_F_GSO_ACCECN __NETIF_F(GSO_ACCECN) #define NETIF_F_TSO __NETIF_F(TSO) #define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) #define NETIF_F_RXFCS __NETIF_F(RXFCS) @@ -210,7 +211,8 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID) /* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \ + NETIF_F_GSO_ACCECN | NETIF_F_GSO_SCTP | \ NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST) /* diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ab550a89b9bfa..ec3369b498221 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -340,11 +340,27 @@ struct gro_list { }; /* - * size of gro hash buckets, must less than bit number of - * napi_struct::gro_bitmask + * size of gro hash buckets, must be <= the number of bits in + * gro_node::bitmask */ #define GRO_HASH_BUCKETS 8 +/** + * struct gro_node - structure to support Generic Receive Offload + * @bitmask: bitmask to indicate used buckets in @hash + * @hash: hashtable of pending aggregated skbs, separated by flows + * @rx_list: list of pending ``GRO_NORMAL`` skbs + * @rx_count: cached current length of @rx_list + * @cached_napi_id: napi_struct::napi_id cached for hotpath, 0 for standalone + */ +struct gro_node { + unsigned long bitmask; + struct gro_list hash[GRO_HASH_BUCKETS]; + struct list_head rx_list; + u32 rx_count; + u32 cached_napi_id; +}; + /* * Structure for per-NAPI config */ @@ -352,6 +368,8 @@ struct napi_config { u64 gro_flush_timeout; u64 irq_suspend_timeout; u32 defer_hard_irqs; + cpumask_t affinity_mask; + u8 threaded; unsigned int napi_id; }; @@ -370,7 +388,6 @@ struct napi_struct { unsigned long state; int weight; u32 defer_hard_irqs_count; - unsigned long gro_bitmask; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL /* CPU actively polling if netpoll is configured */ @@ -379,11 +396,8 @@ struct napi_struct { /* CPU on which NAPI has been scheduled for processing */ int list_owner; struct net_device *dev; - struct gro_list gro_hash[GRO_HASH_BUCKETS]; struct sk_buff *skb; - struct list_head rx_list; /* Pending GRO_NORMAL skbs */ - int rx_count; /* length of rx_list */ - unsigned int napi_id; /* protected by netdev_lock */ + struct gro_node gro; struct hrtimer timer; /* all fields past this point are write-protected by netdev_lock */ struct task_struct *thread; @@ -391,9 +405,12 @@ struct napi_struct { unsigned long irq_suspend_timeout; u32 defer_hard_irqs; /* control-path-only fields follow */ + u32 napi_id; struct list_head dev_list; struct hlist_node napi_hash_node; int irq; + struct irq_affinity_notify notify; + int napi_rmap_idx; int index; struct napi_config *config; }; @@ -409,6 +426,9 @@ enum { NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ + NAPI_STATE_HAS_NOTIFIER, /* Napi has an IRQ notifier */ + NAPI_STATE_THREADED_BUSY_POLL, /* The threaded napi poller will busy poll */ + NAPI_STATE_SCHED_THREADED_BUSY_POLL, /* The threaded napi poller is busy polling */ }; enum { @@ -422,8 +442,15 @@ enum { NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), + NAPIF_STATE_HAS_NOTIFIER = BIT(NAPI_STATE_HAS_NOTIFIER), + NAPIF_STATE_THREADED_BUSY_POLL = BIT(NAPI_STATE_THREADED_BUSY_POLL), + NAPIF_STATE_SCHED_THREADED_BUSY_POLL = + BIT(NAPI_STATE_SCHED_THREADED_BUSY_POLL), }; +#define NAPIF_STATE_THREADED_BUSY_POLL_MASK \ + (NAPIF_STATE_THREADED | NAPIF_STATE_THREADED_BUSY_POLL) + enum gro_result { GRO_MERGED, GRO_MERGED_FREE, @@ -570,7 +597,18 @@ static inline bool napi_complete(struct napi_struct *n) return napi_complete_done(n, 0); } -int dev_set_threaded(struct net_device *dev, bool threaded); +int dev_set_threaded(struct net_device *dev, + enum netdev_napi_threaded threaded); + +/* + * napi_set_threaded - set napi threaded state + * @napi: NAPI context + * @threaded: threading mode + * + * Return 0 on success and negative errno on failure. + */ +int napi_set_threaded(struct napi_struct *napi, + enum netdev_napi_threaded threaded); void napi_disable(struct napi_struct *n); void napi_disable_locked(struct napi_struct *n); @@ -658,6 +696,7 @@ struct netdev_queue { struct Qdisc __rcu *qdisc_sleeping; #ifdef CONFIG_SYSFS struct kobject kobj; + const struct attribute_group **groups; #endif unsigned long tx_maxrate; /* @@ -669,6 +708,7 @@ struct netdev_queue { /* Subordinate device that the queue has been assigned to */ struct net_device *sb_dev; #ifdef CONFIG_XDP_SOCKETS + /* "ops protected", see comment about net_device::lock */ struct xsk_buff_pool *pool; #endif @@ -691,7 +731,7 @@ struct netdev_queue { * slow- / control-path part */ /* NAPI instance for the queue - * Readers and writers must hold RTNL + * "ops protected", see comment about net_device::lock */ struct napi_struct *napi; @@ -1988,12 +2028,21 @@ enum netdev_reg_state { * * @threaded: napi threaded mode is enabled * + * @irq_affinity_auto: driver wants the core to store and re-assign the IRQ + * affinity. Set by netif_enable_irq_affinity(), then + * the driver must create a persistent napi by + * netif_napi_add_config() and finally bind the napi to + * IRQ (via netif_napi_set_irq()). + * + * @rx_cpu_rmap_auto: driver wants the core to manage the ARFS rmap. + * Set by calling netif_enable_cpu_rmap(). + * * @see_all_hwtstamp_requests: device wants to see calls to * ndo_hwtstamp_set() for all timestamp requests * regardless of source, even if those aren't * HWTSTAMP_SOURCE_NETDEV * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN - * @netns_local: interface can't change network namespaces + * @netns_immutable: interface can't change network namespaces * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes * * @net_notifier_list: List of per-net netdev notifier block @@ -2394,12 +2443,14 @@ struct net_device { struct sfp_bus *sfp_bus; struct lock_class_key *qdisc_tx_busylock; bool proto_down; - bool threaded; + u8 threaded; + bool irq_affinity_auto; + bool rx_cpu_rmap_auto; /* priv_flags_slow, ungrouped to save space */ unsigned long see_all_hwtstamp_requests:1; unsigned long change_proto_down:1; - unsigned long netns_local:1; + unsigned long netns_immutable:1; unsigned long fcoe_mtu:1; struct list_head net_notifier_list; @@ -2455,19 +2506,45 @@ struct net_device { */ bool up; + /** + * @request_ops_lock: request the core to run all @netdev_ops and + * @ethtool_ops under the @lock. + */ + bool request_ops_lock; + /** * @lock: netdev-scope lock, protects a small selection of fields. * Should always be taken using netdev_lock() / netdev_unlock() helpers. * Drivers are free to use it for other protection. * - * Protects: + * For the drivers that implement shaper or queue API, the scope + * of this lock is expanded to cover most ndo/queue/ethtool/sysfs + * operations. Drivers may opt-in to this behavior by setting + * @request_ops_lock. + * + * @lock protection mixes with rtnl_lock in multiple ways, fields are + * either: + * - simply protected by the instance @lock; + * - double protected - writers hold both locks, readers hold either; + * - ops protected - protected by the lock held around the NDOs + * and other callbacks, that is the instance lock on devices for + * which netdev_need_ops_lock() returns true, otherwise by rtnl_lock; + * - double ops protected - always protected by rtnl_lock but for + * devices for which netdev_need_ops_lock() returns true - also + * the instance lock. + * + * Simply protects: * @gro_flush_timeout, @napi_defer_hard_irqs, @napi_list, * @net_shaper_hierarchy, @reg_state, @threaded * - * Partially protects (writers must hold both @lock and rtnl_lock): + * Double protects: * @up * - * Also protects some fields in struct napi_struct. + * Double ops protects: + * @real_num_rx_queues, @real_num_tx_queues + * + * Also protects some fields in: + * struct napi_struct, struct netdev_queue, struct netdev_rx_queue * * Ordering: take after rtnl_lock. */ @@ -2590,21 +2667,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, f(dev, &dev->_tx[i], arg); } -#define netdev_lockdep_set_classes(dev) \ -{ \ - static struct lock_class_key qdisc_tx_busylock_key; \ - static struct lock_class_key qdisc_xmit_lock_key; \ - static struct lock_class_key dev_addr_list_lock_key; \ - unsigned int i; \ - \ - (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ - lockdep_set_class(&(dev)->addr_list_lock, \ - &dev_addr_list_lock_key); \ - for (i = 0; i < (dev)->num_tx_queues; i++) \ - lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ - &qdisc_xmit_lock_key); \ -} - u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, @@ -2710,23 +2772,9 @@ static inline void netdev_unlock(struct net_device *dev) { mutex_unlock(&dev->lock); } +/* Additional netdev_lock()-related helpers are in net/netdev_lock.h */ -static inline void netdev_assert_locked(struct net_device *dev) -{ - lockdep_assert_held(&dev->lock); -} - -static inline void netdev_assert_locked_or_invisible(struct net_device *dev) -{ - if (dev->reg_state == NETREG_REGISTERED || - dev->reg_state == NETREG_UNREGISTERING) - netdev_assert_locked(dev); -} - -static inline void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) -{ - napi->irq = irq; -} +void netif_napi_set_irq_locked(struct napi_struct *napi, int irq); static inline void netif_napi_set_irq(struct napi_struct *napi, int irq) { @@ -2864,6 +2912,9 @@ static inline void netif_napi_del(struct napi_struct *napi) synchronize_net(); } +int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs); +void netif_set_affinity_auto(struct net_device *dev); + struct packet_type { __be16 type; /* This is really htons(ether_type). */ bool ignore_outgoing; @@ -3297,9 +3348,12 @@ struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); bool netdev_name_in_use(struct net *net, const char *name); int dev_alloc_name(struct net_device *dev, const char *name); +int netif_open(struct net_device *dev, struct netlink_ext_ack *extack); int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); +void netif_close(struct net_device *dev); void dev_close(struct net_device *dev); void dev_close_many(struct list_head *head, bool unlink); +void netif_disable_lro(struct net_device *dev); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, @@ -4045,17 +4099,7 @@ static inline bool netif_is_multiqueue(const struct net_device *dev) } int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); - -#ifdef CONFIG_SYSFS int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); -#else -static inline int netif_set_real_num_rx_queues(struct net_device *dev, - unsigned int rxqs) -{ - dev->real_num_rx_queues = rxqs; - return 0; -} -#endif int netif_set_real_num_queues(struct net_device *dev, unsigned int txq, unsigned int rxq); @@ -4114,8 +4158,14 @@ int netif_receive_skb(struct sk_buff *skb); int netif_receive_skb_core(struct sk_buff *skb); void netif_receive_skb_list_internal(struct list_head *head); void netif_receive_skb_list(struct list_head *head); -gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); -void napi_gro_flush(struct napi_struct *napi, bool flush_old); +gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb); + +static inline gro_result_t napi_gro_receive(struct napi_struct *napi, + struct sk_buff *skb) +{ + return gro_receive_skb(&napi->gro, skb); +} + struct sk_buff *napi_get_frags(struct napi_struct *napi); gro_result_t napi_gro_frags(struct napi_struct *napi); @@ -4141,6 +4191,8 @@ int put_user_ifreq(struct ifreq *ifr, void __user *arg); int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, void __user *data, bool *need_copyout); int dev_ifconf(struct net *net, struct ifconf __user *ifc); +int dev_eth_ioctl(struct net_device *dev, + struct ifreq *ifr, unsigned int cmd); int generic_hwtstamp_get_lower(struct net_device *dev, struct kernel_hwtstamp_config *kernel_cfg); int generic_hwtstamp_set_lower(struct net_device *dev, @@ -4150,22 +4202,25 @@ int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *dev, unsigned int flags, struct netlink_ext_ack *extack); +int netif_change_flags(struct net_device *dev, unsigned int flags, + struct netlink_ext_ack *extack); int dev_change_flags(struct net_device *dev, unsigned int flags, struct netlink_ext_ack *extack); +int netif_set_alias(struct net_device *dev, const char *alias, size_t len); int dev_set_alias(struct net_device *, const char *, size_t); int dev_get_alias(const struct net_device *, char *, size_t); -int __dev_change_net_namespace(struct net_device *dev, struct net *net, - const char *pat, int new_ifindex); -static inline +int netif_change_net_namespace(struct net_device *dev, struct net *net, + const char *pat, int new_ifindex, + struct netlink_ext_ack *extack); int dev_change_net_namespace(struct net_device *dev, struct net *net, - const char *pat) -{ - return __dev_change_net_namespace(dev, net, pat, 0); -} + const char *pat); int __dev_set_mtu(struct net_device *, int); +int netif_set_mtu(struct net_device *dev, int new_mtu); int dev_set_mtu(struct net_device *, int); int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, struct netlink_ext_ack *extack); +int netif_set_mac_address(struct net_device *dev, struct sockaddr *sa, + struct netlink_ext_ack *extack); int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, struct netlink_ext_ack *extack); int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, @@ -4181,6 +4236,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); u8 dev_xdp_prog_count(struct net_device *dev); +int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf); int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf); u8 dev_xdp_sb_prog_count(struct net_device *dev); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); @@ -4249,7 +4305,17 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev, return 0; } -bool dev_nit_active(struct net_device *dev); +bool dev_nit_active_rcu(const struct net_device *dev); +static inline bool dev_nit_active(const struct net_device *dev) +{ + bool ret; + + rcu_read_lock(); + ret = dev_nit_active_rcu(dev); + rcu_read_unlock(); + return ret; +} + void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); static inline void __dev_put(struct net_device *dev) @@ -4923,6 +4989,7 @@ static inline void __dev_mc_unsync(struct net_device *dev, /* Functions used for secondary unicast and multicast support */ void dev_set_rx_mode(struct net_device *dev); int dev_set_promiscuity(struct net_device *dev, int inc); +int netif_set_allmulti(struct net_device *dev, int inc, bool notify); int dev_set_allmulti(struct net_device *dev, int inc); void netdev_state_change(struct net_device *dev); void __netdev_notify_peers(struct net_device *dev); @@ -5241,6 +5308,8 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_TCP_ACCECN != + (NETIF_F_GSO_ACCECN >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index f91e50a76efd4..a8de41d84be52 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -24,15 +24,24 @@ union inet_addr { struct netpoll { struct net_device *dev; + u16 local_port, remote_port; netdevice_tracker dev_tracker; - char dev_name[IFNAMSIZ]; - const char *name; - union inet_addr local_ip, remote_ip; bool ipv6; - u16 local_port, remote_port; - u8 remote_mac[ETH_ALEN]; + + /* Hot fields above */ + + const char *name; struct sk_buff_head skb_pool; + struct work_struct refill_wq; + /* + * Either dev_name or dev_mac can be used to specify the local + * interface - dev_name is used if it is a nonempty string, else + * dev_mac is used. + */ + char dev_name[IFNAMSIZ]; + u8 dev_mac[ETH_ALEN]; + u8 remote_mac[ETH_ALEN]; }; struct netpoll_info { diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 71fbebfa43c7e..9ac83ca883266 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -47,6 +47,7 @@ struct nfs4_acl { struct nfs4_label { uint32_t lfs; uint32_t pi; + u32 lsmid; u32 len; char *label; }; diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h index e07e8978d691b..e435250fcb4d0 100644 --- a/include/linux/nvme-tcp.h +++ b/include/linux/nvme-tcp.h @@ -13,6 +13,8 @@ #define NVME_TCP_ADMIN_CCSZ SZ_8K #define NVME_TCP_DIGEST_LENGTH 4 #define NVME_TCP_MIN_MAXH2CDATA 4096 +#define NVME_TCP_MIN_C2HTERM_PLEN 24 +#define NVME_TCP_MAX_C2HTERM_PLEN 152 enum nvme_tcp_pfv { NVME_TCP_PFV_1_0 = 0x0, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index fe3b60818fdcf..2dc05b1c3283d 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -199,28 +199,54 @@ enum { #define NVME_NVM_IOSQES 6 #define NVME_NVM_IOCQES 4 +/* + * Controller Configuration (CC) register (Offset 14h) + */ enum { + /* Enable (EN): bit 0 */ NVME_CC_ENABLE = 1 << 0, NVME_CC_EN_SHIFT = 0, + + /* Bits 03:01 are reserved (NVMe Base Specification rev 2.1) */ + + /* I/O Command Set Selected (CSS): bits 06:04 */ NVME_CC_CSS_SHIFT = 4, - NVME_CC_MPS_SHIFT = 7, - NVME_CC_AMS_SHIFT = 11, - NVME_CC_SHN_SHIFT = 14, - NVME_CC_IOSQES_SHIFT = 16, - NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT, NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT, NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT, - NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT, + + /* Memory Page Size (MPS): bits 10:07 */ + NVME_CC_MPS_SHIFT = 7, + NVME_CC_MPS_MASK = 0xf << NVME_CC_MPS_SHIFT, + + /* Arbitration Mechanism Selected (AMS): bits 13:11 */ + NVME_CC_AMS_SHIFT = 11, + NVME_CC_AMS_MASK = 7 << NVME_CC_AMS_SHIFT, NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT, NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT, NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT, + + /* Shutdown Notification (SHN): bits 15:14 */ + NVME_CC_SHN_SHIFT = 14, + NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT, NVME_CC_SHN_NONE = 0 << NVME_CC_SHN_SHIFT, NVME_CC_SHN_NORMAL = 1 << NVME_CC_SHN_SHIFT, NVME_CC_SHN_ABRUPT = 2 << NVME_CC_SHN_SHIFT, - NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT, + + /* I/O Submission Queue Entry Size (IOSQES): bits 19:16 */ + NVME_CC_IOSQES_SHIFT = 16, + NVME_CC_IOSQES_MASK = 0xf << NVME_CC_IOSQES_SHIFT, NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT, + + /* I/O Completion Queue Entry Size (IOCQES): bits 23:20 */ + NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_IOCQES_MASK = 0xf << NVME_CC_IOCQES_SHIFT, NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT, + + /* Controller Ready Independent of Media Enable (CRIME): bit 24 */ NVME_CC_CRIME = 1 << 24, + + /* Bits 25:31 are reserved (NVMe Base Specification rev 2.1) */ }; enum { diff --git a/include/linux/of.h b/include/linux/of.h index eaf0e2a2b75cb..9d6b8a61607f1 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -301,6 +301,8 @@ extern struct device_node *of_get_compatible_child(const struct device_node *par const char *compatible); extern struct device_node *of_get_child_by_name(const struct device_node *node, const char *name); +extern struct device_node *of_get_available_child_by_name(const struct device_node *node, + const char *name); /* cache lookup */ extern struct device_node *of_find_next_cache_node(const struct device_node *); @@ -578,6 +580,13 @@ static inline struct device_node *of_get_child_by_name( return NULL; } +static inline struct device_node *of_get_available_child_by_name( + const struct device_node *node, + const char *name) +{ + return NULL; +} + static inline int of_device_is_compatible(const struct device_node *device, const char *name) { diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index 733f4ddd2ef15..e40f554ff717a 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -50,8 +50,7 @@ struct dw_xpcs; struct phylink_pcs *xpcs_to_phylink_pcs(struct dw_xpcs *xpcs); int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); -int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, - int enable); +void xpcs_config_eee_mult_fact(struct dw_xpcs *xpcs, u8 mult_fact); struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr); struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode); void xpcs_destroy(struct dw_xpcs *xpcs); diff --git a/include/linux/phy.h b/include/linux/phy.h index 19f076a71f946..a2bfae80c4497 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -37,10 +37,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; -extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init; extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init; @@ -54,11 +51,6 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap2_features) __ro_after_init; #define PHY_EEE_CAP2_FEATURES ((unsigned long *)&phy_eee_cap2_features) extern const int phy_basic_ports_array[3]; -extern const int phy_10_100_features_array[4]; -extern const int phy_basic_t1_features_array[3]; -extern const int phy_basic_t1s_p2mp_features_array[2]; -extern const int phy_gbit_features_array[2]; -extern const int phy_10gbit_features_array[1]; /* * Set phydev->irq to PHY_POLL if interrupts are not supported, @@ -89,7 +81,7 @@ extern const int phy_10gbit_features_array[1]; * @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface * @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay * @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay - * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal RX delay + * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal TX delay * @PHY_INTERFACE_MODE_RTBI: Reduced TBI * @PHY_INTERFACE_MODE_SMII: Serial MII * @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface @@ -189,13 +181,6 @@ static inline void phy_interface_set_rgmii(unsigned long *intf) __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, intf); } -/* - * phy_supported_speeds - return all speeds currently supported by a PHY device - */ -unsigned int phy_supported_speeds(struct phy_device *phy, - unsigned int *speeds, - unsigned int size); - /** * phy_modes - map phy_interface_t enum to device tree binding of phy-mode * @interface: enum phy_interface_t value @@ -303,13 +288,11 @@ static inline long rgmii_clock(int speed) } } -#define PHY_INIT_TIMEOUT 100000 -#define PHY_FORCE_TIMEOUT 10 - #define PHY_MAX_ADDR 32 /* Used when trying to connect to a specific phy (mii bus id:phy device id) */ #define PHY_ID_FMT "%s:%02x" +#define PHY_ID_SIZE (MII_BUS_ID_SIZE + 3) #define MII_BUS_ID_SIZE 61 @@ -337,41 +320,6 @@ struct mdio_bus_stats { struct u64_stats_sync syncp; }; -/** - * struct phy_package_shared - Shared information in PHY packages - * @base_addr: Base PHY address of PHY package used to combine PHYs - * in one package and for offset calculation of phy_package_read/write - * @np: Pointer to the Device Node if PHY package defined in DT - * @refcnt: Number of PHYs connected to this shared data - * @flags: Initialization of PHY package - * @priv_size: Size of the shared private data @priv - * @priv: Driver private data shared across a PHY package - * - * Represents a shared structure between different phydev's in the same - * package, for example a quad PHY. See phy_package_join() and - * phy_package_leave(). - */ -struct phy_package_shared { - u8 base_addr; - /* With PHY package defined in DT this points to the PHY package node */ - struct device_node *np; - refcount_t refcnt; - unsigned long flags; - size_t priv_size; - - /* private data pointer */ - /* note that this pointer is shared between different phydevs and - * the user has to take care of appropriate locking. It is allocated - * and freed automatically by phy_package_join() and - * phy_package_leave(). - */ - void *priv; -}; - -/* used as bit number in atomic bitops */ -#define PHY_SHARED_F_INIT_DONE 0 -#define PHY_SHARED_F_PROBE_DONE 1 - /** * struct mii_bus - Represents an MDIO bus * @@ -611,7 +559,7 @@ struct macsec_ops; * @eee_cfg: User configuration of EEE * @lp_advertising: Current link partner advertised linkmodes * @host_interfaces: PHY interface modes supported by host - * @eee_broken_modes: Energy efficient ethernet modes which should be prohibited + * @eee_disabled_modes: Energy efficient ethernet modes not to be advertised * @autoneg: Flag autoneg being used * @rate_matching: Current rate matching mode * @link: Current link state @@ -727,7 +675,7 @@ struct phy_device { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee); /* Energy efficient ethernet modes which should be prohibited */ - __ETHTOOL_DECLARE_LINK_MODE_MASK(eee_broken_modes); + __ETHTOOL_DECLARE_LINK_MODE_MASK(eee_disabled_modes); bool enable_tx_lpi; bool eee_active; struct eee_config eee_cfg; @@ -1189,8 +1137,16 @@ struct phy_driver { int (*set_tunable)(struct phy_device *dev, struct ethtool_tunable *tuna, const void *data); - /** @set_loopback: Set the loopback mood of the PHY */ - int (*set_loopback)(struct phy_device *dev, bool enable); + /** + * @set_loopback: Set the loopback mode of the PHY + * enable selects if the loopback mode is enabled or disabled. If the + * loopback mode is enabled, then the speed of the loopback mode can be + * requested with the speed argument. If the speed argument is zero, + * then any speed can be selected. If the speed argument is > 0, then + * this speed shall be selected for the loopback mode or EOPNOTSUPP + * shall be returned if speed selection is not supported. + */ + int (*set_loopback)(struct phy_device *dev, bool enable, int speed); /** @get_sqi: Get the signal quality indication */ int (*get_sqi)(struct phy_device *dev); /** @get_sqi_max: Get the maximum signal quality indication */ @@ -1273,13 +1229,23 @@ struct phy_driver { */ int (*led_polarity_set)(struct phy_device *dev, int index, unsigned long modes); + + /** + * @get_next_update_time: Get the time until the next update event + * @dev: PHY device + * + * Callback to determine the time (in jiffies) until the next + * update event for the PHY state machine. Allows PHY drivers to + * dynamically adjust polling intervals based on link state or other + * conditions. + * + * Returns the time in jiffies until the next update event. + */ + unsigned int (*get_next_update_time)(struct phy_device *dev); }; #define to_phy_driver(d) container_of_const(to_mdio_common_driver(d), \ struct phy_driver, mdiodrv) -#define PHY_ANY_ID "MATCH ANY PHY" -#define PHY_ANY_UID 0xffffffff - #define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0) #define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4) #define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10) @@ -1312,62 +1278,36 @@ static inline bool phydev_id_compare(struct phy_device *phydev, u32 id) return phy_id_compare(id, phydev->phy_id, phydev->drv->phy_id_mask); } -/* A Structure for boards to register fixups with the PHY Lib */ -struct phy_fixup { - struct list_head list; - char bus_id[MII_BUS_ID_SIZE + 3]; - u32 phy_uid; - u32 phy_uid_mask; - int (*run)(struct phy_device *phydev); -}; - const char *phy_speed_to_str(int speed); const char *phy_duplex_to_str(unsigned int duplex); const char *phy_rate_matching_to_str(int rate_matching); int phy_interface_num_ports(phy_interface_t interface); -/* A structure for mapping a particular speed and duplex - * combination to a particular SUPPORTED and ADVERTISED value - */ -struct phy_setting { - u32 speed; - u8 duplex; - u8 bit; -}; - -const struct phy_setting * -phy_lookup_setting(int speed, int duplex, const unsigned long *mask, - bool exact); -size_t phy_speeds(unsigned int *speeds, size_t size, - unsigned long *mask); -void of_set_phy_supported(struct phy_device *phydev); -void of_set_phy_eee_broken(struct phy_device *phydev); -void of_set_phy_timing_role(struct phy_device *phydev); -int phy_speed_down_core(struct phy_device *phydev); - /** - * phy_set_eee_broken - Mark an EEE mode as broken so that it isn't advertised. + * phy_is_started - Convenience function to check whether PHY is started * @phydev: The phy_device struct - * @link_mode: The broken EEE mode */ -static inline void phy_set_eee_broken(struct phy_device *phydev, u32 link_mode) +static inline bool phy_is_started(struct phy_device *phydev) { - linkmode_set_bit(link_mode, phydev->eee_broken_modes); + return phydev->state >= PHY_UP; } /** - * phy_is_started - Convenience function to check whether PHY is started + * phy_disable_eee_mode - Don't advertise an EEE mode. * @phydev: The phy_device struct + * @link_mode: The EEE mode to be disabled */ -static inline bool phy_is_started(struct phy_device *phydev) +static inline void phy_disable_eee_mode(struct phy_device *phydev, u32 link_mode) { - return phydev->state >= PHY_UP; + WARN_ON(phy_is_started(phydev)); + + linkmode_set_bit(link_mode, phydev->eee_disabled_modes); + linkmode_clear_bit(link_mode, phydev->advertising_eee); } void phy_resolve_aneg_pause(struct phy_device *phydev); void phy_resolve_aneg_linkmode(struct phy_device *phydev); -void phy_check_downshift(struct phy_device *phydev); /** * phy_read - Convenience function for reading a given PHY register @@ -1746,15 +1686,6 @@ static inline bool phy_is_default_hwtstamp(struct phy_device *phydev) return phy_has_hwtstamp(phydev) && phydev->default_timestamp; } -/** - * phy_is_internal - Convenience function for testing if a PHY is internal - * @phydev: the phy_device struct - */ -static inline bool phy_is_internal(struct phy_device *phydev) -{ - return phydev->is_internal; -} - /** * phy_on_sfp - Convenience function for testing if a PHY is on an SFP module * @phydev: the phy_device struct @@ -1878,7 +1809,7 @@ int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); int __phy_resume(struct phy_device *phydev); -int phy_loopback(struct phy_device *phydev, bool enable); +int phy_loopback(struct phy_device *phydev, bool enable, int speed); int phy_sfp_connect_phy(void *upstream, struct phy_device *phy); void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy); void phy_sfp_attach(void *upstream, struct sfp_bus *bus); @@ -1993,7 +1924,7 @@ int genphy_read_status(struct phy_device *phydev); int genphy_read_master_slave(struct phy_device *phydev); int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); -int genphy_loopback(struct phy_device *phydev, bool enable); +int genphy_loopback(struct phy_device *phydev, bool enable, int speed); int genphy_soft_reset(struct phy_device *phydev); irqreturn_t genphy_handle_interrupt_no_ack(struct phy_device *phydev); @@ -2035,7 +1966,7 @@ int genphy_c45_pma_baset1_read_master_slave(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); int genphy_c45_baset1_read_status(struct phy_device *phydev); int genphy_c45_config_aneg(struct phy_device *phydev); -int genphy_c45_loopback(struct phy_device *phydev, bool enable); +int genphy_c45_loopback(struct phy_device *phydev, bool enable, int speed); int genphy_c45_pma_resume(struct phy_device *phydev); int genphy_c45_pma_suspend(struct phy_device *phydev); int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable); @@ -2045,14 +1976,12 @@ int genphy_c45_plca_set_cfg(struct phy_device *phydev, const struct phy_plca_cfg *plca_cfg); int genphy_c45_plca_get_status(struct phy_device *phydev, struct phy_plca_status *plca_st); -int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv, - unsigned long *lp); +int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *lp); int genphy_c45_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data); int genphy_c45_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data); int genphy_c45_an_config_eee_aneg(struct phy_device *phydev); -int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv); /* Generic C45 PHY driver */ extern struct phy_driver genphy_c45_driver; @@ -2078,7 +2007,6 @@ int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); void phy_error(struct phy_device *phydev); void phy_state_machine(struct work_struct *work); -void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies); void phy_trigger_machine(struct phy_device *phydev); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); @@ -2114,11 +2042,13 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, const int *delay_values, int size, bool is_rx); +int phy_get_tx_amplitude_gain(struct phy_device *phydev, struct device *dev, + enum ethtool_link_mode_bit_indices linkmode, + u32 *val); + void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv, bool *tx_pause, bool *rx_pause); -int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask, - int (*run)(struct phy_device *)); int phy_register_fixup_for_id(const char *bus_id, int (*run)(struct phy_device *)); int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask, @@ -2142,13 +2072,6 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev, int phy_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd); int phy_ethtool_nway_reset(struct net_device *ndev); -int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size); -int of_phy_package_join(struct phy_device *phydev, size_t priv_size); -void phy_package_leave(struct phy_device *phydev); -int devm_phy_package_join(struct device *dev, struct phy_device *phydev, - int base_addr, size_t priv_size); -int devm_of_phy_package_join(struct device *dev, struct phy_device *phydev, - size_t priv_size); int __init mdio_bus_init(void); void mdio_bus_exit(void); @@ -2178,104 +2101,6 @@ int __phy_hwtstamp_set(struct phy_device *phydev, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack); -static inline int phy_package_address(struct phy_device *phydev, - unsigned int addr_offset) -{ - struct phy_package_shared *shared = phydev->shared; - u8 base_addr = shared->base_addr; - - if (addr_offset >= PHY_MAX_ADDR - base_addr) - return -EIO; - - /* we know that addr will be in the range 0..31 and thus the - * implicit cast to a signed int is not a problem. - */ - return base_addr + addr_offset; -} - -static inline int phy_package_read(struct phy_device *phydev, - unsigned int addr_offset, u32 regnum) -{ - int addr = phy_package_address(phydev, addr_offset); - - if (addr < 0) - return addr; - - return mdiobus_read(phydev->mdio.bus, addr, regnum); -} - -static inline int __phy_package_read(struct phy_device *phydev, - unsigned int addr_offset, u32 regnum) -{ - int addr = phy_package_address(phydev, addr_offset); - - if (addr < 0) - return addr; - - return __mdiobus_read(phydev->mdio.bus, addr, regnum); -} - -static inline int phy_package_write(struct phy_device *phydev, - unsigned int addr_offset, u32 regnum, - u16 val) -{ - int addr = phy_package_address(phydev, addr_offset); - - if (addr < 0) - return addr; - - return mdiobus_write(phydev->mdio.bus, addr, regnum, val); -} - -static inline int __phy_package_write(struct phy_device *phydev, - unsigned int addr_offset, u32 regnum, - u16 val) -{ - int addr = phy_package_address(phydev, addr_offset); - - if (addr < 0) - return addr; - - return __mdiobus_write(phydev->mdio.bus, addr, regnum, val); -} - -int __phy_package_read_mmd(struct phy_device *phydev, - unsigned int addr_offset, int devad, - u32 regnum); - -int phy_package_read_mmd(struct phy_device *phydev, - unsigned int addr_offset, int devad, - u32 regnum); - -int __phy_package_write_mmd(struct phy_device *phydev, - unsigned int addr_offset, int devad, - u32 regnum, u16 val); - -int phy_package_write_mmd(struct phy_device *phydev, - unsigned int addr_offset, int devad, - u32 regnum, u16 val); - -static inline bool __phy_package_set_once(struct phy_device *phydev, - unsigned int b) -{ - struct phy_package_shared *shared = phydev->shared; - - if (!shared) - return false; - - return !test_and_set_bit(b, &shared->flags); -} - -static inline bool phy_package_init_once(struct phy_device *phydev) -{ - return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE); -} - -static inline bool phy_package_probe_once(struct phy_device *phydev) -{ - return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE); -} - extern const struct bus_type mdio_bus_type; struct mdio_board_info { diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 898b00451bbfd..1f5773ab56601 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -442,7 +442,6 @@ struct phylink_pcs_ops; * are supported by this PCS. * @ops: a pointer to the &struct phylink_pcs_ops structure * @phylink: pointer to &struct phylink_config - * @neg_mode: provide PCS neg mode via "mode" argument * @poll: poll the PCS for link changes * @rxc_always_on: The MAC driver requires the reference clock * to always be on. Standalone PCS drivers which @@ -459,7 +458,6 @@ struct phylink_pcs { DECLARE_PHY_INTERFACE_MASK(supported_interfaces); const struct phylink_pcs_ops *ops; struct phylink *phylink; - bool neg_mode; bool poll; bool rxc_always_on; }; @@ -477,6 +475,10 @@ struct phylink_pcs { * @pcs_an_restart: restart 802.3z BaseX autonegotiation. * @pcs_link_up: program the PCS for the resolved link configuration * (where necessary). + * @pcs_disable_eee: optional notification to PCS that EEE has been disabled + * at the MAC. + * @pcs_enable_eee: optional notification to PCS that EEE will be enabled at + * the MAC. * @pcs_pre_init: configure PCS components necessary for MAC hardware * initialization e.g. RX clock for stmmac. */ @@ -500,6 +502,8 @@ struct phylink_pcs_ops { void (*pcs_an_restart)(struct phylink_pcs *pcs); void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex); + void (*pcs_disable_eee)(struct phylink_pcs *pcs); + void (*pcs_enable_eee)(struct phylink_pcs *pcs); int (*pcs_pre_init)(struct phylink_pcs *pcs); }; @@ -591,6 +595,14 @@ void pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode, * The %neg_mode argument should be tested via the phylink_mode_*() family of * functions, or for PCS that set pcs->neg_mode true, should be tested * against the PHYLINK_PCS_NEG_* definitions. + * + * pcs_config() will be called when configuration of the PCS is required + * or when the advertisement is possibly updated. It must not unnecessarily + * disrupt an established link. + * + * When an autonegotiation restart is required for 802.3z modes, .pcs_config() + * should return a positive non-zero integer (e.g. 1) to indicate to phylink + * to call the pcs_an_restart() method. */ int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, const unsigned long *advertising, @@ -625,6 +637,22 @@ void pcs_an_restart(struct phylink_pcs *pcs); void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex); +/** + * pcs_disable_eee() - Disable EEE at the PCS + * @pcs: a pointer to a &struct phylink_pcs + * + * Optional method informing the PCS that EEE has been disabled at the MAC. + */ +void pcs_disable_eee(struct phylink_pcs *pcs); + +/** + * pcs_enable_eee() - Enable EEE at the PCS + * @pcs: a pointer to a &struct phylink_pcs + * + * Optional method informing the PCS that EEE is about to be enabled at the MAC. + */ +void pcs_enable_eee(struct phylink_pcs *pcs); + /** * pcs_pre_init() - Configure PCS components necessary for MAC initialization * @pcs: a pointer to a &struct phylink_pcs. @@ -678,7 +706,11 @@ int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs); void phylink_start(struct phylink *); void phylink_stop(struct phylink *); +void phylink_rx_clk_stop_block(struct phylink *); +void phylink_rx_clk_stop_unblock(struct phylink *); + void phylink_suspend(struct phylink *pl, bool mac_wol); +void phylink_prepare_resume(struct phylink *pl); void phylink_resume(struct phylink *pl); void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *); @@ -694,7 +726,6 @@ void phylink_ethtool_get_pauseparam(struct phylink *, int phylink_ethtool_set_pauseparam(struct phylink *, struct ethtool_pauseparam *); int phylink_get_eee_err(struct phylink *); -int phylink_init_eee(struct phylink *, bool); int phylink_ethtool_get_eee(struct phylink *link, struct ethtool_keee *eee); int phylink_ethtool_set_eee(struct phylink *link, struct ethtool_keee *eee); int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); @@ -737,6 +768,18 @@ static inline int phylink_get_link_timer_ns(phy_interface_t interface) } } +/** + * phylink_mac_implements_lpi() - determine if MAC implements LPI ops + * @ops: phylink_mac_ops structure + * + * Returns true if the phylink MAC operations structure indicates that the + * LPI operations have been implemented, false otherwise. + */ +static inline bool phylink_mac_implements_lpi(const struct phylink_mac_ops *ops) +{ + return ops && ops->mac_disable_tx_lpi && ops->mac_enable_tx_lpi; +} + void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state, unsigned int neg_mode, u16 bmsr, u16 lpa); void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 8ff23bf5a8197..b698758000f8b 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -31,6 +31,33 @@ struct pipe_buffer { unsigned long private; }; +/* + * Really only alpha needs 32-bit fields, but + * might as well do it for 64-bit architectures + * since that's what we've historically done, + * and it makes 'head_tail' always be a simple + * 'unsigned long'. + */ +#ifdef CONFIG_64BIT +typedef unsigned int pipe_index_t; +#else +typedef unsigned short pipe_index_t; +#endif + +/* + * We have to declare this outside 'struct pipe_inode_info', + * but then we can't use 'union pipe_index' for an anonymous + * union, so we end up having to duplicate this declaration + * below. Annoying. + */ +union pipe_index { + unsigned long head_tail; + struct { + pipe_index_t head; + pipe_index_t tail; + }; +}; + /** * struct pipe_inode_info - a linux kernel pipe * @mutex: mutex protecting the whole thing @@ -38,6 +65,7 @@ struct pipe_buffer { * @wr_wait: writer wait point in case of full pipe * @head: The point of buffer production * @tail: The point of buffer consumption + * @head_tail: unsigned long union of @head and @tail * @note_loss: The next read() should insert a data-lost message * @max_usage: The maximum number of slots that may be used in the ring * @ring_size: total number of buffers (should be a power of 2) @@ -58,8 +86,16 @@ struct pipe_buffer { struct pipe_inode_info { struct mutex mutex; wait_queue_head_t rd_wait, wr_wait; - unsigned int head; - unsigned int tail; + + /* This has to match the 'union pipe_index' above */ + union { + unsigned long head_tail; + struct { + pipe_index_t head; + pipe_index_t tail; + }; + }; + unsigned int max_usage; unsigned int ring_size; unsigned int nr_accounted; @@ -141,23 +177,23 @@ static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe) } /** - * pipe_empty - Return true if the pipe is empty + * pipe_occupancy - Return number of slots used in the pipe * @head: The pipe ring head pointer * @tail: The pipe ring tail pointer */ -static inline bool pipe_empty(unsigned int head, unsigned int tail) +static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail) { - return head == tail; + return (pipe_index_t)(head - tail); } /** - * pipe_occupancy - Return number of slots used in the pipe + * pipe_empty - Return true if the pipe is empty * @head: The pipe ring head pointer * @tail: The pipe ring tail pointer */ -static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail) +static inline bool pipe_empty(unsigned int head, unsigned int tail) { - return head - tail; + return !pipe_occupancy(head, tail); } /** @@ -172,6 +208,33 @@ static inline bool pipe_full(unsigned int head, unsigned int tail, return pipe_occupancy(head, tail) >= limit; } +/** + * pipe_is_full - Return true if the pipe is full + * @pipe: the pipe + */ +static inline bool pipe_is_full(const struct pipe_inode_info *pipe) +{ + return pipe_full(pipe->head, pipe->tail, pipe->max_usage); +} + +/** + * pipe_is_empty - Return true if the pipe is empty + * @pipe: the pipe + */ +static inline bool pipe_is_empty(const struct pipe_inode_info *pipe) +{ + return pipe_empty(pipe->head, pipe->tail); +} + +/** + * pipe_buf_usage - Return how many pipe buffers are in use + * @pipe: the pipe + */ +static inline unsigned int pipe_buf_usage(const struct pipe_inode_info *pipe) +{ + return pipe_occupancy(pipe->head, pipe->tail); +} + /** * pipe_buf - Return the pipe buffer for the specified slot in the pipe ring * @pipe: The pipe to access @@ -245,15 +308,6 @@ static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe, return buf->ops->try_steal(pipe, buf); } -static inline void pipe_discard_from(struct pipe_inode_info *pipe, - unsigned int old_head) -{ - unsigned int mask = pipe->ring_size - 1; - - while (pipe->head > old_head) - pipe_buf_release(pipe, &pipe->bufs[--pipe->head & mask]); -} - /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ #define PIPE_SIZE PAGE_SIZE diff --git a/include/linux/platform_data/x86/intel_pmc_ipc.h b/include/linux/platform_data/x86/intel_pmc_ipc.h new file mode 100644 index 0000000000000..6e603a8c075f9 --- /dev/null +++ b/include/linux/platform_data/x86/intel_pmc_ipc.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Intel Core SoC Power Management Controller Header File + * + * Copyright (c) 2025, Intel Corporation. + * All Rights Reserved. + * + */ +#ifndef INTEL_PMC_IPC_H +#define INTEL_PMC_IPC_H +#include + +#define IPC_SOC_REGISTER_ACCESS 0xAA +#define IPC_SOC_SUB_CMD_READ 0x00 +#define IPC_SOC_SUB_CMD_WRITE 0x01 +#define PMC_IPCS_PARAM_COUNT 7 +#define VALID_IPC_RESPONSE 5 + +struct pmc_ipc_cmd { + u32 cmd; + u32 sub_cmd; + u32 size; + u32 wbuf[4]; +}; + +struct pmc_ipc_rbuf { + u32 buf[4]; +}; + +/** + * intel_pmc_ipc() - PMC IPC Mailbox accessor + * @ipc_cmd: Prepared input command to send + * @rbuf: Allocated array for returned IPC data + * + * Return: 0 on success. Non-zero on mailbox error + */ +static inline int intel_pmc_ipc(struct pmc_ipc_cmd *ipc_cmd, struct pmc_ipc_rbuf *rbuf) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object params[PMC_IPCS_PARAM_COUNT] = { + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_INTEGER,}, + }; + struct acpi_object_list arg_list = { PMC_IPCS_PARAM_COUNT, params }; + union acpi_object *obj; + int status; + + if (!ipc_cmd || !rbuf) + return -EINVAL; + + /* + * 0: IPC Command + * 1: IPC Sub Command + * 2: Size + * 3-6: Write Buffer for offset + */ + params[0].integer.value = ipc_cmd->cmd; + params[1].integer.value = ipc_cmd->sub_cmd; + params[2].integer.value = ipc_cmd->size; + params[3].integer.value = ipc_cmd->wbuf[0]; + params[4].integer.value = ipc_cmd->wbuf[1]; + params[5].integer.value = ipc_cmd->wbuf[2]; + params[6].integer.value = ipc_cmd->wbuf[3]; + + status = acpi_evaluate_object(NULL, "\\IPCS", &arg_list, &buffer); + if (ACPI_FAILURE(status)) + return -ENODEV; + + obj = buffer.pointer; + + if (obj && obj->type == ACPI_TYPE_PACKAGE && + obj->package.count == VALID_IPC_RESPONSE) { + const union acpi_object *objs = obj->package.elements; + + if ((u8)objs[0].integer.value != 0) + return -EINVAL; + + rbuf->buf[0] = objs[1].integer.value; + rbuf->buf[1] = objs[2].integer.value; + rbuf->buf[2] = objs[3].integer.value; + rbuf->buf[3] = objs[4].integer.value; + } else { + return -EINVAL; + } + + return 0; +} + +#endif /* INTEL_PMC_IPC_H */ diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h index 8ab5b0e8eb2c1..8c9df7dadd5d3 100644 --- a/include/linux/platform_profile.h +++ b/include/linux/platform_profile.h @@ -33,6 +33,8 @@ enum platform_profile_option { * @probe: Callback to setup choices available to the new class device. These * choices will only be enforced when setting a new profile, not when * getting the current one. + * @hidden_choices: Callback to setup choices that are not visible to the user + * but can be set by the driver. * @profile_get: Callback that will be called when showing the current platform * profile in sysfs. * @profile_set: Callback that will be called when storing a new platform @@ -40,6 +42,7 @@ enum platform_profile_option { */ struct platform_profile_ops { int (*probe)(void *drvdata, unsigned long *choices); + int (*hidden_choices)(void *drvdata, unsigned long *choices); int (*profile_get)(struct device *dev, enum platform_profile_option *profile); int (*profile_set)(struct device *dev, enum platform_profile_option profile); }; diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index ef8619f489203..a500d3160fe8c 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h @@ -95,10 +95,13 @@ struct posix_clock { * struct posix_clock_context - represents clock file operations context * * @clk: Pointer to the clock + * @fp: Pointer to the file used to open the clock * @private_clkdata: Pointer to user data * * Drivers should use struct posix_clock_context during specific character - * device file operation methods to access the posix clock. + * device file operation methods to access the posix clock. In particular, + * the file pointer can be used to verify correct access mode for ioctl() + * calls. * * Drivers can store a private data structure during the open operation * if they have specific information that is required in other file @@ -106,6 +109,7 @@ struct posix_clock { */ struct posix_clock_context { struct posix_clock *clk; + struct file *fp; void *private_clkdata; }; diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h index 45e6e427ceb8a..f73fbea0dbc23 100644 --- a/include/linux/ppp_channel.h +++ b/include/linux/ppp_channel.h @@ -42,8 +42,7 @@ struct ppp_channel { int hdrlen; /* amount of headroom channel needs */ void *ppp; /* opaque to channel */ int speed; /* transfer rate (bytes/second) */ - /* the following is not used at present */ - int latency; /* overhead time in milliseconds */ + bool direct_xmit; /* no qdisc, xmit directly */ }; #ifdef __KERNEL__ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 0b2a898544409..ea62201c74c40 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -20,10 +20,13 @@ enum { * If in doubt, ignore this flag. */ #ifdef MODULE - PROC_ENTRY_PERMANENT = 0U, + PROC_ENTRY_PERMANENT = 0U, #else - PROC_ENTRY_PERMANENT = 1U << 0, + PROC_ENTRY_PERMANENT = 1U << 0, #endif + + PROC_ENTRY_proc_read_iter = 1U << 1, + PROC_ENTRY_proc_compat_ioctl = 1U << 2, }; struct proc_ops { diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 5b67cd03276eb..aa29ac53b8330 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -267,7 +267,7 @@ struct qed_ll2_ops { int qed_ll2_alloc_if(struct qed_dev *); void qed_ll2_dealloc_if(struct qed_dev *); #else -static const struct qed_ll2_ops qed_ll2_ops_pass = { +static __maybe_unused const struct qed_ll2_ops qed_ll2_ops_pass = { .start = NULL, .stop = NULL, .start_xmit = NULL, diff --git a/include/linux/rcuref.h b/include/linux/rcuref.h index 2c8bfd0f1b6b3..6322d8c1c6b42 100644 --- a/include/linux/rcuref.h +++ b/include/linux/rcuref.h @@ -71,27 +71,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref) return rcuref_get_slowpath(ref); } -extern __must_check bool rcuref_put_slowpath(rcuref_t *ref); +extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt); /* * Internal helper. Do not invoke directly. */ static __always_inline __must_check bool __rcuref_put(rcuref_t *ref) { + int cnt; + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(), "suspicious rcuref_put_rcusafe() usage"); /* * Unconditionally decrease the reference count. The saturation and * dead zones provide enough tolerance for this. */ - if (likely(!atomic_add_negative_release(-1, &ref->refcnt))) + cnt = atomic_sub_return_release(1, &ref->refcnt); + if (likely(cnt >= 0)) return false; /* * Handle the last reference drop and cases inside the saturation * and dead zones. */ - return rcuref_put_slowpath(ref); + return rcuref_put_slowpath(ref, cnt); } /** diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 4bc2ee0b10b05..ccaaf4c7d5f6a 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -43,6 +43,7 @@ extern void rtnl_lock(void); extern void rtnl_unlock(void); extern int rtnl_trylock(void); extern int rtnl_is_locked(void); +extern int rtnl_lock_interruptible(void); extern int rtnl_lock_killable(void); extern bool refcount_dec_and_rtnl_lock(refcount_t *r); diff --git a/include/linux/sched.h b/include/linux/sched.h index 9632e3318e0d6..9c15365a30c08 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1701,7 +1701,7 @@ extern struct pid *cad_pid; #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ #define PF_USER_WORKER 0x00004000 /* Kernel thread cloned from userspace thread */ #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ -#define PF__HOLE__00010000 0x00010000 +#define PF_KCOMPACTD 0x00010000 /* I am kcompactd */ #define PF_KSWAPD 0x00020000 /* I am kswapd */ #define PF_MEMALLOC_NOFS 0x00040000 /* All allocations inherit GFP_NOFS. See memalloc_nfs_save() */ #define PF_MEMALLOC_NOIO 0x00080000 /* All allocations inherit GFP_NOIO. See memalloc_noio_save() */ diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 836a7e200f395..6719949135c96 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -222,7 +222,6 @@ struct sctp_datahdr { __be16 stream; __be16 ssn; __u32 ppid; - /* __u8 payload[]; */ }; struct sctp_data_chunk { @@ -239,7 +238,6 @@ struct sctp_idatahdr { __u32 ppid; __be32 fsn; }; - __u8 payload[0]; }; struct sctp_idata_chunk { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index bb2b751d274ac..31f09f1d187df 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -470,7 +470,7 @@ struct skb_shared_hwtstamps { /* Definitions for tx_flags in struct skb_shared_info */ enum { /* generate hardware time stamp */ - SKBTX_HW_TSTAMP = 1 << 0, + SKBTX_HW_TSTAMP_NOBPF = 1 << 0, /* generate software time stamp when queueing packet to NIC */ SKBTX_SW_TSTAMP = 1 << 1, @@ -478,8 +478,8 @@ enum { /* device driver is going to provide hardware time stamp */ SKBTX_IN_PROGRESS = 1 << 2, - /* generate hardware time stamp based on cycles if supported */ - SKBTX_HW_TSTAMP_USE_CYCLES = 1 << 3, + /* generate software time stamp on packet tx completion */ + SKBTX_COMPLETION_TSTAMP = 1 << 3, /* generate wifi status information (where possible) */ SKBTX_WIFI_STATUS = 1 << 4, @@ -489,12 +489,18 @@ enum { /* generate software time stamp when entering packet scheduling */ SKBTX_SCHED_TSTAMP = 1 << 6, + + /* used for bpf extension when a bpf program is loaded */ + SKBTX_BPF = 1 << 7, }; +#define SKBTX_HW_TSTAMP (SKBTX_HW_TSTAMP_NOBPF | SKBTX_BPF) + #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ - SKBTX_SCHED_TSTAMP) + SKBTX_SCHED_TSTAMP | \ + SKBTX_BPF | \ + SKBTX_COMPLETION_TSTAMP) #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \ - SKBTX_HW_TSTAMP_USE_CYCLES | \ SKBTX_ANY_SW_TSTAMP) /* Definitions for flags in struct skb_shared_info */ @@ -703,6 +709,8 @@ enum { SKB_GSO_UDP_L4 = 1 << 17, SKB_GSO_FRAGLIST = 1 << 18, + + SKB_GSO_TCP_ACCECN = 1 << 19, }; #if BITS_PER_LONG > 32 @@ -1315,6 +1323,7 @@ struct sk_buff *build_skb_around(struct sk_buff *skb, void *data, unsigned int frag_size); void skb_attempt_defer_free(struct sk_buff *skb); +u32 napi_skb_cache_get_bulk(void **skbs, u32 n); struct sk_buff *napi_build_skb(void *data, unsigned int frag_size); struct sk_buff *slab_build_skb(void *data); @@ -1527,14 +1536,8 @@ void __skb_get_hash_net(const struct net *net, struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, const void *data, const struct flow_keys_basic *keys, int hlen); -__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, - const void *data, int hlen_proto); - -static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, - int thoff, u8 ip_proto) -{ - return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); -} +__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, + const void *data, int hlen_proto); void skb_flow_dissector_init(struct flow_dissector *flow_dissector, const struct flow_dissector_key *key, @@ -3865,25 +3868,6 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i) __must_check; -static inline int skb_add_data(struct sk_buff *skb, - struct iov_iter *from, int copy) -{ - const int off = skb->len; - - if (skb->ip_summed == CHECKSUM_NONE) { - __wsum csum = 0; - if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy, - &csum, from)) { - skb->csum = csum_block_add(skb->csum, csum, off); - return 0; - } - } else if (copy_from_iter_full(skb_put(skb, copy), copy, from)) - return 0; - - __skb_trim(skb, off); - return -EFAULT; -} - static inline bool skb_can_coalesce(struct sk_buff *skb, int i, const struct page *page, int off) { @@ -4162,6 +4146,8 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, unsigned int flags); int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int len); +int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb, + int offset, int len, int flags); int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); unsigned int skb_zerocopy_headlen(const struct sk_buff *from); @@ -4564,7 +4550,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, static inline void skb_tx_timestamp(struct sk_buff *skb) { skb_clone_tx_timestamp(skb); - if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) + if (skb_shinfo(skb)->tx_flags & (SKBTX_SW_TSTAMP | SKBTX_BPF)) skb_tstamp_tx(skb, NULL); } diff --git a/include/linux/socket.h b/include/linux/socket.h index d18cc47e89bd0..c3322eb3d6865 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -392,6 +392,8 @@ struct ucred { extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); +extern int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len, + void *data); struct timespec64; struct __kernel_timespec; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index c9878a612e532..c4ec8bb8144e7 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -78,6 +78,7 @@ | DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \ | DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256) +struct clk; struct stmmac_priv; /* Platfrom data for platform device structure's platform_data field */ @@ -182,7 +183,8 @@ struct dwmac4_addrs { #define STMMAC_FLAG_INT_SNAPSHOT_EN BIT(9) #define STMMAC_FLAG_RX_CLK_RUNS_IN_LPI BIT(10) #define STMMAC_FLAG_EN_TX_LPI_CLOCKGATING BIT(11) -#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(12) +#define STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP BIT(12) +#define STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY BIT(13) struct plat_stmmacenet_data { int bus_id; @@ -231,11 +233,17 @@ struct plat_stmmacenet_data { u8 tx_sched_algorithm; struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES]; - void (*fix_mac_speed)(void *priv, unsigned int speed, unsigned int mode); + int (*set_clk_tx_rate)(void *priv, struct clk *clk_tx_i, + phy_interface_t interface, int speed); + void (*fix_mac_speed)(void *priv, int speed, unsigned int mode); int (*fix_soc_reset)(void *priv, void __iomem *ioaddr); int (*serdes_powerup)(struct net_device *ndev, void *priv); void (*serdes_powerdown)(struct net_device *ndev, void *priv); void (*speed_mode_2500)(struct net_device *ndev, void *priv); + int (*mac_finish)(struct net_device *ndev, + void *priv, + unsigned int mode, + phy_interface_t interface); void (*ptp_clk_freq_config)(struct stmmac_priv *priv); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); @@ -252,8 +260,11 @@ struct plat_stmmacenet_data { struct clk *stmmac_clk; struct clk *pclk; struct clk *clk_ptp_ref; + struct clk *clk_tx_i; /* clk_tx_i to MAC core */ unsigned long clk_ptp_rate; unsigned long clk_ref_rate; + struct clk_bulk_data *clks; + int num_clks; unsigned int mult_fact_100ns; s32 ptp_max_adj; u32 cdc_error_adj; diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index fec1e8a1570c3..eac57914dcf32 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -158,7 +158,6 @@ enum { RPC_TASK_NEED_XMIT, RPC_TASK_NEED_RECV, RPC_TASK_MSG_PIN_WAIT, - RPC_TASK_SIGNALLED, }; #define rpc_test_and_set_running(t) \ @@ -171,7 +170,7 @@ enum { #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) -#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate) +#define RPC_SIGNALLED(t) (READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS) /* * Task priorities. diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h index b5ec038069dab..91cdf12190a03 100644 --- a/include/linux/swap_cgroup.h +++ b/include/linux/swap_cgroup.h @@ -6,7 +6,7 @@ #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) -extern void swap_cgroup_record(struct folio *folio, swp_entry_t ent); +extern void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent); extern unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents); extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); extern int swap_cgroup_swapon(int type, unsigned long max_pages); @@ -15,7 +15,7 @@ extern void swap_cgroup_swapoff(int type); #else static inline -void swap_cgroup_record(struct folio *folio, swp_entry_t ent) +void swap_cgroup_record(struct folio *folio, unsigned short id, swp_entry_t ent) { } diff --git a/include/linux/tcp.h b/include/linux/tcp.h index f88daaa76d836..1669d95bb0f9a 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -160,6 +160,8 @@ struct tcp_request_sock { u32 rcv_isn; u32 snt_isn; u32 ts_off; + u32 snt_tsval_first; + u32 snt_tsval_last; u32 last_oow_ack_time; /* last SYNACK */ u32 rcv_nxt; /* the ack # by SYNACK. For * FastOpen it's the seq# @@ -242,6 +244,9 @@ struct tcp_sock { struct minmax rtt_min; /* OOO segments go in this rbtree. Socket lock must be held. */ struct rb_root out_of_order_queue; +#if defined(CONFIG_TLS_DEVICE) + void (*tcp_clean_acked)(struct sock *sk, u32 acked_seq); +#endif u32 snd_ssthresh; /* Slow start size threshold */ u8 recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */ __cacheline_group_end(tcp_sock_read_rx); diff --git a/include/linux/udp.h b/include/linux/udp.h index 0807e21cfec95..895240177f4f4 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -101,6 +101,13 @@ struct udp_sock { /* Cache friendly copy of sk->sk_peek_off >= 0 */ bool peeking_with_offset; + + /* + * Accounting for the tunnel GRO fastpath. + * Unprotected by compilers guard, as it uses space available in + * the last UDP socket cacheline. + */ + struct hlist_node tunnel_list; }; #define udp_test_bit(nr, sk) \ @@ -219,4 +226,13 @@ static inline void udp_allow_gso(struct sock *sk) #define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE) +static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6) +{ +#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL) + return rcu_dereference(net->ipv4.udp_tunnel_gro[is_ipv6].sk); +#else + return NULL; +#endif +} + #endif /* _LINUX_UDP_H */ diff --git a/include/linux/unroll.h b/include/linux/unroll.h index d42fd6366373e..863fb69f6a7e6 100644 --- a/include/linux/unroll.h +++ b/include/linux/unroll.h @@ -9,6 +9,50 @@ #include +#ifdef CONFIG_CC_IS_CLANG +#define __pick_unrolled(x, y) _Pragma(#x) +#elif CONFIG_GCC_VERSION >= 80000 +#define __pick_unrolled(x, y) _Pragma(#y) +#else +#define __pick_unrolled(x, y) /* not supported */ +#endif + +/** + * unrolled - loop attributes to ask the compiler to unroll it + * + * Usage: + * + * #define BATCH 8 + * + * unrolled_count(BATCH) + * for (u32 i = 0; i < BATCH; i++) + * // loop body without cross-iteration dependencies + * + * This is only a hint and the compiler is free to disable unrolling if it + * thinks the count is suboptimal and may hurt performance and/or hugely + * increase object code size. + * Not having any cross-iteration dependencies (i.e. when iter x + 1 depends + * on what iter x will do with variables) is not a strict requirement, but + * provides best performance and object code size. + * Available only on Clang and GCC 8.x onwards. + */ + +/* Ask the compiler to pick an optimal unroll count, Clang only */ +#define unrolled \ + __pick_unrolled(clang loop unroll(enable), /* nothing */) + +/* Unroll each @n iterations of the loop */ +#define unrolled_count(n) \ + __pick_unrolled(clang loop unroll_count(n), GCC unroll n) + +/* Unroll the whole loop */ +#define unrolled_full \ + __pick_unrolled(clang loop unroll(full), GCC unroll 65534) + +/* Never unroll the loop */ +#define unrolled_none \ + __pick_unrolled(clang loop unroll(disable), GCC unroll 1) + #define UNROLL(N, MACRO, args...) CONCATENATE(__UNROLL_, N)(MACRO, args) #define __UNROLL_0(MACRO, args...) diff --git a/include/linux/usb/mctp-usb.h b/include/linux/usb/mctp-usb.h new file mode 100644 index 0000000000000..a2f6f1e04efbd --- /dev/null +++ b/include/linux/usb/mctp-usb.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * mctp-usb.h - MCTP USB transport binding: common definitions, + * based on DMTF0283 specification: + * https://www.dmtf.org/sites/default/files/standards/documents/DSP0283_1.0.1.pdf + * + * These are protocol-level definitions, that may be shared between host + * and gadget drivers. + * + * Copyright (C) 2024-2025 Code Construct Pty Ltd + */ + +#ifndef __LINUX_USB_MCTP_USB_H +#define __LINUX_USB_MCTP_USB_H + +#include + +struct mctp_usb_hdr { + __be16 id; + u8 rsvd; + u8 len; +} __packed; + +#define MCTP_USB_XFER_SIZE 512 +#define MCTP_USB_BTU 68 +#define MCTP_USB_MTU_MIN MCTP_USB_BTU +#define MCTP_USB_MTU_MAX (U8_MAX - sizeof(struct mctp_usb_hdr)) +#define MCTP_USB_DMTF_ID 0x1ab4 + +#endif /* __LINUX_USB_MCTP_USB_H */ diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h index 33a4c146dc19c..2ca60828f28bb 100644 --- a/include/linux/usb/r8152.h +++ b/include/linux/usb/r8152.h @@ -30,6 +30,7 @@ #define VENDOR_ID_NVIDIA 0x0955 #define VENDOR_ID_TPLINK 0x2357 #define VENDOR_ID_DLINK 0x2001 +#define VENDOR_ID_DELL 0x413c #define VENDOR_ID_ASUS 0x0b05 #if IS_REACHABLE(CONFIG_USB_RTL8152) diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 63129c79b8cbc..b588069ece7eb 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -2,11 +2,15 @@ #ifndef __LINUX_NET_AFUNIX_H #define __LINUX_NET_AFUNIX_H -#include -#include +#include #include +#include +#include #include +#include +#include #include +#include #if IS_ENABLED(CONFIG_UNIX) struct unix_sock *unix_get_socket(struct file *filp); @@ -17,61 +21,17 @@ static inline struct unix_sock *unix_get_socket(struct file *filp) } #endif -extern unsigned int unix_tot_inflight; -void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver); -void unix_del_edges(struct scm_fp_list *fpl); -void unix_update_edges(struct unix_sock *receiver); -int unix_prepare_fpl(struct scm_fp_list *fpl); -void unix_destroy_fpl(struct scm_fp_list *fpl); -void unix_gc(void); -void wait_for_unix_gc(struct scm_fp_list *fpl); - -struct unix_vertex { - struct list_head edges; - struct list_head entry; - struct list_head scc_entry; - unsigned long out_degree; - unsigned long index; - unsigned long scc_index; -}; - -struct unix_edge { - struct unix_sock *predecessor; - struct unix_sock *successor; - struct list_head vertex_entry; - struct list_head stack_entry; -}; - -struct sock *unix_peer_get(struct sock *sk); - -#define UNIX_HASH_MOD (256 - 1) -#define UNIX_HASH_SIZE (256 * 2) -#define UNIX_HASH_BITS 8 - struct unix_address { refcount_t refcnt; int len; struct sockaddr_un name[]; }; -struct unix_skb_parms { - struct pid *pid; /* Skb credentials */ - kuid_t uid; - kgid_t gid; - struct scm_fp_list *fp; /* Passed files */ -#ifdef CONFIG_SECURITY_NETWORK - u32 secid; /* Security ID */ -#endif - u32 consumed; -} __randomize_layout; - struct scm_stat { atomic_t nr_fds; unsigned long nr_unix_fds; }; -#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) - /* The AF_UNIX socket */ struct unix_sock { /* WARNING: sk has to be the first member */ @@ -84,6 +44,7 @@ struct unix_sock { struct unix_vertex *vertex; spinlock_t lock; struct socket_wq peer_wq; +#define peer_wait peer_wq.wait wait_queue_entry_t peer_wake; struct scm_stat scm_stat; #if IS_ENABLED(CONFIG_AF_UNIX_OOB) @@ -94,35 +55,4 @@ struct unix_sock { #define unix_sk(ptr) container_of_const(ptr, struct unix_sock, sk) #define unix_peer(sk) (unix_sk(sk)->peer) -#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) -#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) - -#define peer_wait peer_wq.wait - -long unix_inq_len(struct sock *sk); -long unix_outq_len(struct sock *sk); - -int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, - int flags); -int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, - int flags); -#ifdef CONFIG_SYSCTL -int unix_sysctl_register(struct net *net); -void unix_sysctl_unregister(struct net *net); -#else -static inline int unix_sysctl_register(struct net *net) { return 0; } -static inline void unix_sysctl_unregister(struct net *net) {} -#endif - -#ifdef CONFIG_BPF_SYSCALL -extern struct proto unix_dgram_proto; -extern struct proto unix_stream_proto; - -int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); -int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); -void __init unix_bpf_build_proto(void); -#else -static inline void __init unix_bpf_build_proto(void) -{} -#endif #endif diff --git a/include/net/ax25.h b/include/net/ax25.h index 4ee141aae0a29..a7bba42dde153 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -418,7 +418,6 @@ void ax25_rt_device_down(struct net_device *); int ax25_rt_ioctl(unsigned int, void __user *); extern const struct seq_operations ax25_rt_seqops; ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev); -int ax25_rt_autobind(ax25_cb *, ax25_address *); struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *); void ax25_rt_free(void); diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 435250c72d568..bbefde319f95e 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -156,6 +156,7 @@ struct bt_voice { #define BT_PKT_STATUS 16 #define BT_SCM_PKT_STATUS 0x03 +#define BT_SCM_ERROR 0x04 #define BT_ISO_QOS 17 diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 0d51970d809fd..a8586c3058c7c 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -208,6 +208,13 @@ enum { */ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, + /* When this quirk is set consider Sync Flow Control as supported by + * the driver. + * + * This quirk must be set before hci_register_dev is called. + */ + HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, + /* When this quirk is set, the LE states reported through the * HCI_LE_READ_SUPPORTED_STATES are invalid/broken. * @@ -354,6 +361,22 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, + + /* When this quirk is set, the HCI_OP_READ_VOICE_SETTING command is + * skipped. This is required for a subset of the CSR controller clones + * which erroneously claim to support it. + * + * This quirk must be set before hci_register_dev is called. + */ + HCI_QUIRK_BROKEN_READ_VOICE_SETTING, + + /* When this quirk is set, the HCI_OP_READ_PAGE_SCAN_TYPE command is + * skipped. This is required for a subset of the CSR controller clones + * which erroneously claim to support it. + * + * This quirk must be set before hci_register_dev is called. + */ + HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, }; /* HCI device flags */ @@ -432,6 +455,7 @@ enum { HCI_WIDEBAND_SPEECH_ENABLED, HCI_EVENT_FILTER_CONFIGURED, HCI_PA_SYNC, + HCI_SCO_FLOWCTL, HCI_DUT_MODE, HCI_VENDOR_DIAG, @@ -683,7 +707,7 @@ enum { #define HCI_ERROR_REMOTE_POWER_OFF 0x15 #define HCI_ERROR_LOCAL_HOST_TERM 0x16 #define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18 -#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1e +#define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1a #define HCI_ERROR_INVALID_LL_PARAMS 0x1e #define HCI_ERROR_UNSPECIFIED 0x1f #define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c @@ -855,6 +879,11 @@ struct hci_cp_remote_name_req_cancel { bdaddr_t bdaddr; } __packed; +struct hci_rp_remote_name_req_cancel { + __u8 status; + bdaddr_t bdaddr; +} __packed; + #define HCI_OP_READ_REMOTE_FEATURES 0x041b struct hci_cp_read_remote_features { __le16 handle; @@ -1528,6 +1557,11 @@ struct hci_rp_read_tx_power { __s8 tx_power; } __packed; +#define HCI_OP_WRITE_SYNC_FLOWCTL 0x0c2f +struct hci_cp_write_sync_flowctl { + __u8 enable; +} __packed; + #define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46 struct hci_rp_read_page_scan_type { __u8 status; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index f756fac95488a..5115da34f8812 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -261,6 +261,12 @@ struct adv_info { struct delayed_work rpa_expired_cb; }; +struct tx_queue { + struct sk_buff_head queue; + unsigned int extra; + unsigned int tracked; +}; + #define HCI_MAX_ADV_INSTANCES 5 #define HCI_DEFAULT_ADV_DURATION 2 @@ -733,6 +739,8 @@ struct hci_conn { struct sk_buff_head data_q; struct list_head chan_list; + struct tx_queue tx_q; + struct delayed_work disc_work; struct delayed_work auto_accept_work; struct delayed_work idle_work; @@ -804,6 +812,7 @@ struct hci_conn_params { extern struct list_head hci_dev_list; extern struct list_head hci_cb_list; extern rwlock_t hci_dev_list_lock; +extern struct mutex hci_cb_list_lock; #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags) #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags) @@ -1571,6 +1580,18 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active); void hci_conn_failed(struct hci_conn *conn, u8 status); u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle); +void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb); +void hci_conn_tx_dequeue(struct hci_conn *conn); +void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset, + const struct sockcm_cookie *sockc); + +static inline void hci_sockcm_init(struct sockcm_cookie *sockc, struct sock *sk) +{ + *sockc = (struct sockcm_cookie) { + .tsflags = READ_ONCE(sk->sk_tsflags), + }; +} + /* * hci_conn_get() and hci_conn_put() are used to control the life-time of an * "hci_conn" object. They do not guarantee that the hci_conn object is running, @@ -1857,6 +1878,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define lmp_hold_capable(dev) ((dev)->features[0][0] & LMP_HOLD) #define lmp_sniff_capable(dev) ((dev)->features[0][0] & LMP_SNIFF) #define lmp_park_capable(dev) ((dev)->features[0][1] & LMP_PARK) +#define lmp_sco_capable(dev) ((dev)->features[0][1] & LMP_SCO) #define lmp_inq_rssi_capable(dev) ((dev)->features[0][3] & LMP_RSSI_INQ) #define lmp_esco_capable(dev) ((dev)->features[0][3] & LMP_ESCO) #define lmp_bredr_capable(dev) (!((dev)->features[0][4] & LMP_NO_BREDR)) @@ -1924,6 +1946,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn); ((dev)->commands[20] & 0x10 && \ !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) +#define read_voice_setting_capable(dev) \ + ((dev)->commands[9] & 0x04 && \ + !test_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &(dev)->quirks)) + /* Use enhanced synchronous connection if command is supported and its quirk * has not been set. */ @@ -2010,47 +2036,24 @@ struct hci_cb { char *name; - bool (*match) (struct hci_conn *conn); void (*connect_cfm) (struct hci_conn *conn, __u8 status); void (*disconn_cfm) (struct hci_conn *conn, __u8 status); void (*security_cfm) (struct hci_conn *conn, __u8 status, - __u8 encrypt); + __u8 encrypt); void (*key_change_cfm) (struct hci_conn *conn, __u8 status); void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role); }; -static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list) -{ - struct hci_cb *cb, *cpy; - - rcu_read_lock(); - list_for_each_entry_rcu(cb, &hci_cb_list, list) { - if (cb->match && cb->match(conn)) { - cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC); - if (!cpy) - break; - - *cpy = *cb; - INIT_LIST_HEAD(&cpy->list); - list_add_rcu(&cpy->list, list); - } - } - rcu_read_unlock(); -} - static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) { - struct list_head list; - struct hci_cb *cb, *tmp; - - INIT_LIST_HEAD(&list); - hci_cb_lookup(conn, &list); + struct hci_cb *cb; - list_for_each_entry_safe(cb, tmp, &list, list) { + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { if (cb->connect_cfm) cb->connect_cfm(conn, status); - kfree(cb); } + mutex_unlock(&hci_cb_list_lock); if (conn->connect_cfm_cb) conn->connect_cfm_cb(conn, status); @@ -2058,43 +2061,22 @@ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status) static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason) { - struct list_head list; - struct hci_cb *cb, *tmp; - - INIT_LIST_HEAD(&list); - hci_cb_lookup(conn, &list); + struct hci_cb *cb; - list_for_each_entry_safe(cb, tmp, &list, list) { + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { if (cb->disconn_cfm) cb->disconn_cfm(conn, reason); - kfree(cb); } + mutex_unlock(&hci_cb_list_lock); if (conn->disconn_cfm_cb) conn->disconn_cfm_cb(conn, reason); } -static inline void hci_security_cfm(struct hci_conn *conn, __u8 status, - __u8 encrypt) -{ - struct list_head list; - struct hci_cb *cb, *tmp; - - INIT_LIST_HEAD(&list); - hci_cb_lookup(conn, &list); - - list_for_each_entry_safe(cb, tmp, &list, list) { - if (cb->security_cfm) - cb->security_cfm(conn, status, encrypt); - kfree(cb); - } - - if (conn->security_cfm_cb) - conn->security_cfm_cb(conn, status); -} - static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) { + struct hci_cb *cb; __u8 encrypt; if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) @@ -2102,11 +2084,20 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00; - hci_security_cfm(conn, status, encrypt); + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { + if (cb->security_cfm) + cb->security_cfm(conn, status, encrypt); + } + mutex_unlock(&hci_cb_list_lock); + + if (conn->security_cfm_cb) + conn->security_cfm_cb(conn, status); } static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) { + struct hci_cb *cb; __u8 encrypt; if (conn->state == BT_CONFIG) { @@ -2133,38 +2124,40 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) conn->sec_level = conn->pending_sec_level; } - hci_security_cfm(conn, status, encrypt); + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { + if (cb->security_cfm) + cb->security_cfm(conn, status, encrypt); + } + mutex_unlock(&hci_cb_list_lock); + + if (conn->security_cfm_cb) + conn->security_cfm_cb(conn, status); } static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status) { - struct list_head list; - struct hci_cb *cb, *tmp; + struct hci_cb *cb; - INIT_LIST_HEAD(&list); - hci_cb_lookup(conn, &list); - - list_for_each_entry_safe(cb, tmp, &list, list) { + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { if (cb->key_change_cfm) cb->key_change_cfm(conn, status); - kfree(cb); } + mutex_unlock(&hci_cb_list_lock); } static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role) { - struct list_head list; - struct hci_cb *cb, *tmp; - - INIT_LIST_HEAD(&list); - hci_cb_lookup(conn, &list); + struct hci_cb *cb; - list_for_each_entry_safe(cb, tmp, &list, list) { + mutex_lock(&hci_cb_list_lock); + list_for_each_entry(cb, &hci_cb_list, list) { if (cb->role_switch_cfm) cb->role_switch_cfm(conn, status, role); - kfree(cb); } + mutex_unlock(&hci_cb_list_lock); } static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type) @@ -2386,8 +2379,6 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status); void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, u8 status); void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status); -void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status); -void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status); void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 9189354c568f4..4bb0eaedda180 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -38,8 +38,8 @@ #define L2CAP_DEFAULT_TX_WINDOW 63 #define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF #define L2CAP_DEFAULT_MAX_TX 3 -#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */ -#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ +#define L2CAP_DEFAULT_RETRANS_TO 2 /* seconds */ +#define L2CAP_DEFAULT_MONITOR_TO 12 /* seconds */ #define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */ #define L2CAP_DEFAULT_ACK_TO 200 #define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF @@ -955,7 +955,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason); int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst, u8 dst_type, u16 timeout); int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu); -int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len); +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, + const struct sockcm_cookie *sockc); void l2cap_chan_busy(struct l2cap_chan *chan, int busy); void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail); int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator); diff --git a/include/net/bonding.h b/include/net/bonding.h index 8bb5f016969f1..d018a299bb88c 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -463,6 +463,14 @@ static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len) memcpy(dst, src, len); } +static inline bool bond_hw_addr_equal(const u8 *dst, const u8 *src, unsigned int len) +{ + if (len == ETH_ALEN) + return ether_addr_equal(dst, src); + else + return (memcmp(dst, src, len) == 0); +} + #define BOND_PRI_RESELECT_ALWAYS 0 #define BOND_PRI_RESELECT_BETTER 1 #define BOND_PRI_RESELECT_FAILURE 2 @@ -695,6 +703,7 @@ void bond_debug_register(struct bonding *bond); void bond_debug_unregister(struct bonding *bond); void bond_debug_reregister(struct bonding *bond); const char *bond_mode_name(int mode); +bool bond_xdp_check(struct bonding *bond, int mode); void bond_setup(struct net_device *bond_dev); unsigned int bond_get_num_tx_queues(void); int bond_netlink_init(void); diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index c39a426ebf520..6e172d0f6ef5f 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -24,6 +24,11 @@ */ #define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1)) +static inline bool napi_id_valid(unsigned int napi_id) +{ + return napi_id >= MIN_NAPI_ID; +} + #define BUSY_POLL_BUDGET 8 #ifdef CONFIG_NET_RX_BUSY_POLL @@ -114,7 +119,7 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock) #ifdef CONFIG_NET_RX_BUSY_POLL unsigned int napi_id = READ_ONCE(sk->sk_napi_id); - if (napi_id >= MIN_NAPI_ID) + if (napi_id_valid(napi_id)) napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk, READ_ONCE(sk->sk_prefer_busy_poll), READ_ONCE(sk->sk_busy_poll_budget) ?: BUSY_POLL_BUDGET); @@ -122,18 +127,24 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock) } /* used in the NIC receive handler to mark the skb */ -static inline void skb_mark_napi_id(struct sk_buff *skb, - struct napi_struct *napi) +static inline void __skb_mark_napi_id(struct sk_buff *skb, + const struct gro_node *gro) { #ifdef CONFIG_NET_RX_BUSY_POLL /* If the skb was already marked with a valid NAPI ID, avoid overwriting * it. */ - if (skb->napi_id < MIN_NAPI_ID) - skb->napi_id = napi->napi_id; + if (!napi_id_valid(skb->napi_id)) + skb->napi_id = gro->cached_napi_id; #endif } +static inline void skb_mark_napi_id(struct sk_buff *skb, + const struct napi_struct *napi) +{ + __skb_mark_napi_id(skb, &napi->gro); +} + /* used in the protocol handler to propagate the napi_id to the socket */ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb) { diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 363d7dd2255aa..efbd79c67be21 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -7,7 +7,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include @@ -127,6 +127,8 @@ struct wiphy; * even if it is otherwise disabled. * @IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP: Allow using this channel for AP operation * with very low power (VLP), even if otherwise set to NO_IR. + * @IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY: Allow activity on a 20 MHz channel, + * even if otherwise set to NO_IR. */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = BIT(0), @@ -155,6 +157,7 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = BIT(23), IEEE80211_CHAN_CAN_MONITOR = BIT(24), IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(25), + IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY = BIT(26), }; #define IEEE80211_CHAN_NO_HT40 \ @@ -1005,6 +1008,17 @@ cfg80211_chandef_compatible(const struct cfg80211_chan_def *chandef1, */ int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width); +/** + * cfg80211_chandef_get_width - return chandef width in MHz + * @c: chandef to return bandwidth for + * Return: channel width in MHz for the given chandef; note that it returns + * 80 for 80+80 configurations + */ +static inline int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c) +{ + return nl80211_chan_width_to_mhz(c->width); +} + /** * cfg80211_chandef_valid - check if a channel definition is valid * @chandef: the channel definition to check @@ -2045,9 +2059,6 @@ struct cfg80211_tid_stats { * @assoc_at: bootime (ns) of the last association * @rx_bytes: bytes (size of MPDUs) received from this station * @tx_bytes: bytes (size of MPDUs) transmitted to this station - * @llid: mesh local link id - * @plid: mesh peer link id - * @plink_state: mesh peer link state * @signal: The signal strength, type depends on the wiphy's signal_type. * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_. * @signal_avg: Average signal strength, type depends on the wiphy's signal_type. @@ -2067,14 +2078,20 @@ struct cfg80211_tid_stats { * This number should increase every time the list of stations * changes, i.e. when a station is added or removed, so that * userspace can tell whether it got a consistent snapshot. + * @beacon_loss_count: Number of times beacon loss event has triggered. * @assoc_req_ies: IEs from (Re)Association Request. * This is used only when in AP mode with drivers that do not use * user space MLME/SME implementation. The information is provided for * the cfg80211_new_sta() calls to notify user space of the IEs. * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. * @sta_flags: station flags mask & values - * @beacon_loss_count: Number of times beacon loss event has triggered. * @t_offset: Time offset of the station relative to this host. + * @llid: mesh local link id + * @plid: mesh peer link id + * @plink_state: mesh peer link state + * @connected_to_gate: true if mesh STA has a path to mesh gate + * @connected_to_as: true if mesh STA has a path to authentication server + * @airtime_link_metric: mesh airtime link metric. * @local_pm: local mesh STA power save mode * @peer_pm: peer mesh STA power save mode * @nonpeer_pm: non-peer mesh STA power save mode @@ -2083,7 +2100,6 @@ struct cfg80211_tid_stats { * @rx_beacon: number of beacons received from this peer * @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received * from this peer - * @connected_to_gate: true if mesh STA has a path to mesh gate * @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer * @tx_duration: aggregate PPDU duration(usecs) for all the frames to a peer * @airtime_weight: current airtime scheduling weight @@ -2097,8 +2113,6 @@ struct cfg80211_tid_stats { * @fcs_err_count: number of packets (MPDUs) received from this station with * an FCS error. This counter should be incremented only when TA of the * received packet with an FCS error matches the peer MAC address. - * @airtime_link_metric: mesh airtime link metric. - * @connected_to_as: true if mesh STA has a path to authentication server * @mlo_params_valid: Indicates @assoc_link_id and @mld_addr fields are filled * by driver. Drivers use this only in cfg80211_new_sta() calls when AP * MLD's MLME/SME is offload to driver. Drivers won't fill this @@ -2125,9 +2139,6 @@ struct station_info { u64 assoc_at; u64 rx_bytes; u64 tx_bytes; - u16 llid; - u16 plid; - u8 plink_state; s8 signal; s8 signal_avg; @@ -2147,36 +2158,38 @@ struct station_info { int generation; + u32 beacon_loss_count; + const u8 *assoc_req_ies; size_t assoc_req_ies_len; - u32 beacon_loss_count; s64 t_offset; + u16 llid; + u16 plid; + u8 plink_state; + u8 connected_to_gate; + u8 connected_to_as; + u32 airtime_link_metric; enum nl80211_mesh_power_mode local_pm; enum nl80211_mesh_power_mode peer_pm; enum nl80211_mesh_power_mode nonpeer_pm; u32 expected_throughput; - u64 tx_duration; - u64 rx_duration; - u64 rx_beacon; - u8 rx_beacon_signal_avg; - u8 connected_to_gate; + u16 airtime_weight; - struct cfg80211_tid_stats *pertid; s8 ack_signal; s8 avg_ack_signal; + struct cfg80211_tid_stats *pertid; - u16 airtime_weight; + u64 tx_duration; + u64 rx_duration; + u64 rx_beacon; + u8 rx_beacon_signal_avg; u32 rx_mpdu_count; u32 fcs_err_count; - u32 airtime_link_metric; - - u8 connected_to_as; - bool mlo_params_valid; u8 assoc_link_id; u8 mld_addr[ETH_ALEN] __aligned(2); @@ -2265,7 +2278,7 @@ static inline int cfg80211_get_station(struct net_device *dev, * @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP * @MONITOR_FLAG_CONTROL: pass control frames * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering - * @MONITOR_FLAG_COOK_FRAMES: report frames after processing + * @MONITOR_FLAG_COOK_FRAMES: deprecated, will unconditionally be refused * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address * @MONITOR_FLAG_SKIP_TX: do not pass locally transmitted frames */ @@ -2947,6 +2960,7 @@ struct cfg80211_bss_ies { * @nontrans_list: list of non-transmitted BSS, if this is a transmitted one * (multi-BSSID support) * @signal: signal strength value (type depends on the wiphy's signal_type) + * @ts_boottime: timestamp of the last BSS update in nanoseconds since boot * @chains: bitmask for filled values in @chain_signal. * @chain_signal: per-chain signal strength of last received BSS in dBm. * @bssid_index: index in the multiple BSS set @@ -2971,6 +2985,8 @@ struct cfg80211_bss { s32 signal; + u64 ts_boottime; + u16 beacon_interval; u16 capability; @@ -3080,6 +3096,19 @@ struct cfg80211_assoc_link { int error; }; +/** + * struct cfg80211_ml_reconf_req - MLO link reconfiguration request + * @add_links: data for links to add, see &struct cfg80211_assoc_link + * @rem_links: bitmap of links to remove + * @ext_mld_capa_ops: extended MLD capabilities and operations set by + * userspace for the ML reconfiguration action frame + */ +struct cfg80211_ml_reconf_req { + struct cfg80211_assoc_link add_links[IEEE80211_MLD_MAX_NUM_LINKS]; + u16 rem_links; + u16 ext_mld_capa_ops; +}; + /** * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association. * @@ -3130,10 +3159,10 @@ enum cfg80211_assoc_req_flags { * included in the Current AP address field of the Reassociation Request * frame. * @flags: See &enum cfg80211_assoc_req_flags - * @supported_selectors: supported selectors in IEEE 802.11 format + * @supported_selectors: supported BSS selectors in IEEE 802.11 format * (or %NULL for no change). * If %NULL, then support for SAE_H2E should be assumed. - * @supported_selectors_len: Length of supported_selectors in octets. + * @supported_selectors_len: number of supported BSS selectors * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. @@ -3152,6 +3181,8 @@ enum cfg80211_assoc_req_flags { * the link on which the association request should be sent * @ap_mld_addr: AP MLD address in case of MLO association request, * valid iff @link_id >= 0 + * @ext_mld_capa_ops: extended MLD capabilities and operations set by + * userspace for the association */ struct cfg80211_assoc_request { struct cfg80211_bss *bss; @@ -3172,6 +3203,7 @@ struct cfg80211_assoc_request { struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS]; const u8 *ap_mld_addr; s8 link_id; + u16 ext_mld_capa_ops; }; /** @@ -4970,8 +5002,7 @@ struct cfg80211_ops { struct cfg80211_ttlm_params *params); u32 (*get_radio_mask)(struct wiphy *wiphy, struct net_device *dev); int (*assoc_ml_reconf)(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_assoc_link *add_links, - u16 rem_links); + struct cfg80211_ml_reconf_req *req); int (*set_epcs)(struct wiphy *wiphy, struct net_device *dev, bool val); }; @@ -9750,6 +9781,7 @@ struct cfg80211_mlo_reconf_done_data { u16 added_links; struct { struct cfg80211_bss *bss; + u8 *addr; } links[IEEE80211_MLD_MAX_NUM_LINKS]; }; diff --git a/include/net/devlink.h b/include/net/devlink.h index b8783126c1ed2..a0b84efd4740a 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1846,6 +1846,8 @@ int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn); int devlink_info_board_serial_number_put(struct devlink_info_req *req, const char *bsn); +int devlink_info_function_uid_put(struct devlink_info_req *req, + const char *fuid); enum devlink_info_version_type { DEVLINK_INFO_VERSION_TYPE_NONE, diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index 32a34dfe8cc58..e4fdc6b54ceff 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -40,6 +40,8 @@ FN(TCP_OFOMERGE) \ FN(TCP_RFC7323_PAWS) \ FN(TCP_RFC7323_PAWS_ACK) \ + FN(TCP_RFC7323_TSECR) \ + FN(TCP_LISTEN_OVERFLOW) \ FN(TCP_OLD_SEQUENCE) \ FN(TCP_INVALID_SEQUENCE) \ FN(TCP_INVALID_ACK_SEQUENCE) \ @@ -281,6 +283,13 @@ enum skb_drop_reason { * Corresponds to LINUX_MIB_PAWS_OLD_ACK. */ SKB_DROP_REASON_TCP_RFC7323_PAWS_ACK, + /** + * @SKB_DROP_REASON_TCP_RFC7323_TSECR: PAWS check, invalid TSEcr. + * Corresponds to LINUX_MIB_TSECRREJECTED. + */ + SKB_DROP_REASON_TCP_RFC7323_TSECR, + /** @SKB_DROP_REASON_TCP_LISTEN_OVERFLOW: listener queue full. */ + SKB_DROP_REASON_TCP_LISTEN_OVERFLOW, /** @SKB_DROP_REASON_TCP_OLD_SEQUENCE: Old SEQ field (duplicate packet) */ SKB_DROP_REASON_TCP_OLD_SEQUENCE, /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */ diff --git a/include/net/dropreason.h b/include/net/dropreason.h index 56cb7be92244c..7d3b1a2a6feca 100644 --- a/include/net/dropreason.h +++ b/include/net/dropreason.h @@ -17,12 +17,6 @@ enum skb_drop_reason_subsys { */ SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE, - /** - * @SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR: mac80211 drop reasons - * for frames still going to monitor, see net/mac80211/drop.h - */ - SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR, - /** * @SKB_DROP_REASON_SUBSYS_OPENVSWITCH: openvswitch drop reasons, * see net/openvswitch/drop.h diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 84c15402931cd..4160731dcb6e3 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -163,11 +163,8 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb) if (!new_md) return ERR_PTR(-ENOMEM); - unsafe_memcpy(&new_md->u.tun_info, &md_dst->u.tun_info, - sizeof(struct ip_tunnel_info) + md_size, - /* metadata_dst_alloc() reserves room (md_size bytes) for - * options right after the ip_tunnel_info struct. - */); + memcpy(&new_md->u.tun_info, &md_dst->u.tun_info, + sizeof(struct ip_tunnel_info) + md_size); #ifdef CONFIG_DST_CACHE /* Unclone the dst cache if there is one */ if (new_md->u.tun_info.dst_cache.cache) { diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index 04383d90a1e38..5927910ec06e5 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -43,6 +43,8 @@ struct fib_rule { struct fib_kuid_range uid_range; struct fib_rule_port_range sport_range; struct fib_rule_port_range dport_range; + u16 sport_mask; + u16 dport_mask; struct rcu_head rcu; }; @@ -146,6 +148,17 @@ static inline bool fib_rule_port_inrange(const struct fib_rule_port_range *a, ntohs(port) <= a->end; } +static inline bool fib_rule_port_match(const struct fib_rule_port_range *range, + u16 port_mask, __be16 port) +{ + if ((range->start ^ ntohs(port)) & port_mask) + return false; + if (!port_mask && fib_rule_port_range_set(range) && + !fib_rule_port_inrange(range, port)) + return false; + return true; +} + static inline bool fib_rule_port_range_valid(const struct fib_rule_port_range *a) { return a->start != 0 && a->end != 0 && a->end < 0xffff && @@ -159,6 +172,12 @@ static inline bool fib_rule_port_range_compare(struct fib_rule_port_range *a, a->end == b->end; } +static inline bool +fib_rule_port_is_range(const struct fib_rule_port_range *range) +{ + return range->start != range->end; +} + static inline bool fib_rule_requires_fldissect(struct fib_rule *rule) { return rule->iifindex != LOOPBACK_IFINDEX && (rule->ip_proto || @@ -178,10 +197,10 @@ int fib_rules_dump(struct net *net, struct notifier_block *nb, int family, struct netlink_ext_ack *extack); unsigned int fib_rules_seq_read(const struct net *net, int family); -int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack); -int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack); +int fib_newrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack, bool rtnl_held); +int fib_delrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack, bool rtnl_held); INDIRECT_CALLABLE_DECLARE(int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)); diff --git a/include/net/gro.h b/include/net/gro.h index 7b548f91754bf..22d3a69e4404c 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -509,28 +509,46 @@ static inline int gro_receive_network_flush(const void *th, const void *th2, int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb); +void __gro_flush(struct gro_node *gro, bool flush_old); + +static inline void gro_flush(struct gro_node *gro, bool flush_old) +{ + if (!gro->bitmask) + return; + + __gro_flush(gro, flush_old); +} + +static inline void napi_gro_flush(struct napi_struct *napi, bool flush_old) +{ + gro_flush(&napi->gro, flush_old); +} /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */ -static inline void gro_normal_list(struct napi_struct *napi) +static inline void gro_normal_list(struct gro_node *gro) { - if (!napi->rx_count) + if (!gro->rx_count) return; - netif_receive_skb_list_internal(&napi->rx_list); - INIT_LIST_HEAD(&napi->rx_list); - napi->rx_count = 0; + netif_receive_skb_list_internal(&gro->rx_list); + INIT_LIST_HEAD(&gro->rx_list); + gro->rx_count = 0; } /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded, * pass the whole batch up to the stack. */ -static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs) +static inline void gro_normal_one(struct gro_node *gro, struct sk_buff *skb, + int segs) { - list_add_tail(&skb->list, &napi->rx_list); - napi->rx_count += segs; - if (napi->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch)) - gro_normal_list(napi); + list_add_tail(&skb->list, &gro->rx_list); + gro->rx_count += segs; + if (gro->rx_count >= READ_ONCE(net_hotdata.gro_normal_batch)) + gro_normal_list(gro); } +void gro_init(struct gro_node *gro); +void gro_cleanup(struct gro_node *gro); + /* This function is the alternative of 'inet_iif' and 'inet_sdif' * functions in case we can not rely on fields of IPCB. * diff --git a/include/net/hotdata.h b/include/net/hotdata.h index 30e9570beb2af..fda94b2647ffa 100644 --- a/include/net/hotdata.h +++ b/include/net/hotdata.h @@ -23,7 +23,6 @@ struct net_hotdata { struct net_offload udpv6_offload; #endif struct list_head offload_base; - struct list_head ptype_all; struct kmem_cache *skbuff_cache; struct kmem_cache *skbuff_fclone_cache; struct kmem_cache *skb_small_head_cache; diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index 025bd8d3c769a..745891d2e1137 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h @@ -21,8 +21,6 @@ struct sockaddr; struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6, const struct request_sock *req, u8 proto); -void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); - int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu); diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index 74dd90ff5f129..c32878c69179d 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -150,7 +150,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, int iif, int sdif, bool *refcounted) { - struct net *net = dev_net(skb_dst(skb)->dev); + struct net *net = dev_net_rcu(skb_dst(skb)->dev); const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct sock *sk; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index c7f42844c79a9..1735db332aab5 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -44,12 +44,10 @@ struct inet_connection_sock_af_ops { struct request_sock *req_unhash, bool *own_req); u16 net_header_len; - u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); - void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); void (*mtu_reduced)(struct sock *sk); }; @@ -58,7 +56,6 @@ struct inet_connection_sock_af_ops { * @icsk_accept_queue: FIFO of established children * @icsk_bind_hash: Bind node * @icsk_bind2_hash: Bind node in the bhash2 table - * @icsk_timeout: Timeout * @icsk_retransmit_timer: Resend (no ack) * @icsk_rto: Retransmit timeout * @icsk_pmtu_cookie Last pmtu seen by socket @@ -66,7 +63,6 @@ struct inet_connection_sock_af_ops { * @icsk_af_ops Operations which are AF_INET{4,6} specific * @icsk_ulp_ops Pluggable ULP control hook * @icsk_ulp_data ULP private data - * @icsk_clean_acked Clean acked data hook * @icsk_ca_state: Congestion control state * @icsk_retransmits: Number of unrecovered [RTO] timeouts * @icsk_pending: Scheduled timer event @@ -85,18 +81,17 @@ struct inet_connection_sock { struct request_sock_queue icsk_accept_queue; struct inet_bind_bucket *icsk_bind_hash; struct inet_bind2_bucket *icsk_bind2_hash; - unsigned long icsk_timeout; struct timer_list icsk_retransmit_timer; struct timer_list icsk_delack_timer; __u32 icsk_rto; __u32 icsk_rto_min; + u32 icsk_rto_max; __u32 icsk_delack_max; __u32 icsk_pmtu_cookie; const struct tcp_congestion_ops *icsk_ca_ops; const struct inet_connection_sock_af_ops *icsk_af_ops; const struct tcp_ulp_ops *icsk_ulp_ops; void __rcu *icsk_ulp_data; - void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq); unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); __u8 icsk_ca_state:5, icsk_ca_initialized:1, @@ -116,8 +111,8 @@ struct inet_connection_sock { #define ATO_BITS 8 __u32 ato:ATO_BITS, /* Predicted tick of soft clock */ lrcv_flowlabel:20, /* last received ipv6 flowlabel */ - unused:4; - unsigned long timeout; /* Currently scheduled timeout */ + dst_quick_ack:1, /* cache dst RTAX_QUICKACK */ + unused:3; __u32 lrcvtime; /* timestamp of last received data packet */ __u16 last_seg_size; /* Size of last incoming segment */ __u16 rcv_mss; /* MSS used for delayed ACK decisions */ @@ -189,8 +184,17 @@ static inline void inet_csk_delack_init(struct sock *sk) memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); } -void inet_csk_delete_keepalive_timer(struct sock *sk); -void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout); +static inline unsigned long +icsk_timeout(const struct inet_connection_sock *icsk) +{ + return READ_ONCE(icsk->icsk_retransmit_timer.expires); +} + +static inline unsigned long +icsk_delack_timeout(const struct inet_connection_sock *icsk) +{ + return READ_ONCE(icsk->icsk_delack_timer.expires); +} static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) { @@ -227,16 +231,15 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, when = max_when; } + when += jiffies; if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) { smp_store_release(&icsk->icsk_pending, what); - icsk->icsk_timeout = jiffies + when; - sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); + sk_reset_timer(sk, &icsk->icsk_retransmit_timer, when); } else if (what == ICSK_TIME_DACK) { smp_store_release(&icsk->icsk_ack.pending, icsk->icsk_ack.pending | ICSK_ACK_TIMER); - icsk->icsk_ack.timeout = jiffies + when; - sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); + sk_reset_timer(sk, &icsk->icsk_delack_timer, when); } else { pr_debug("inet_csk BUG: unknown timer value\n"); } @@ -318,8 +321,6 @@ static inline __poll_t inet_csk_listen_poll(const struct sock *sk) int inet_csk_listen_start(struct sock *sk); void inet_csk_listen_stop(struct sock *sk); -void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); - /* update the fast reuse flag when adding a socket */ void inet_csk_update_fastreuse(struct inet_bind_bucket *tb, struct sock *sk); diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 5af6eb14c5db1..0eccd9c3a883f 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -137,7 +137,7 @@ static inline void fqdir_pre_exit(struct fqdir *fqdir) } void fqdir_exit(struct fqdir *fqdir); -void inet_frag_kill(struct inet_frag_queue *q); +void inet_frag_kill(struct inet_frag_queue *q, int *refs); void inet_frag_destroy(struct inet_frag_queue *q); struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key); @@ -145,9 +145,9 @@ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key); unsigned int inet_frag_rbtree_purge(struct rb_root *root, enum skb_drop_reason reason); -static inline void inet_frag_put(struct inet_frag_queue *q) +static inline void inet_frag_putn(struct inet_frag_queue *q, int refs) { - if (refcount_dec_and_test(&q->refcnt)) + if (refs && refcount_sub_and_test(refs, &q->refcnt)) inet_frag_destroy(q); } diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 5eea47f135a42..949641e925398 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -89,6 +89,7 @@ struct inet_bind_bucket { bool fast_ipv6_only; struct hlist_node node; struct hlist_head bhash2; + struct rcu_head rcu; }; struct inet_bind2_bucket { @@ -226,8 +227,7 @@ struct inet_bind_bucket * inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, struct inet_bind_hashbucket *head, const unsigned short snum, int l3mdev); -void inet_bind_bucket_destroy(struct kmem_cache *cachep, - struct inet_bind_bucket *tb); +void inet_bind_bucket_destroy(struct inet_bind_bucket *tb); bool inet_bind_bucket_match(const struct inet_bind_bucket *tb, const struct net *net, unsigned short port, @@ -492,7 +492,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, const int sdif, bool *refcounted) { - struct net *net = dev_net(skb_dst(skb)->dev); + struct net *net = dev_net_rcu(skb_dst(skb)->dev); const struct iphdr *iph = ip_hdr(skb); struct sock *sk; @@ -527,9 +527,12 @@ static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr) int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u64 port_offset, + u32 hash_port0, int (*check_established)(struct inet_timewait_death_row *, struct sock *, __u16, - struct inet_timewait_sock **)); + struct inet_timewait_sock **, + bool rcu_lookup, + u32 hash)); int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk); diff --git a/include/net/ip.h b/include/net/ip.h index ba7b43447775e..8a48ade24620b 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -92,11 +92,12 @@ static inline void ipcm_init(struct ipcm_cookie *ipcm) static inline void ipcm_init_sk(struct ipcm_cookie *ipcm, const struct inet_sock *inet) { - ipcm_init(ipcm); + *ipcm = (struct ipcm_cookie) { + .tos = READ_ONCE(inet->tos), + }; + + sockcm_init(&ipcm->sockc, &inet->sk); - ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark); - ipcm->sockc.priority = READ_ONCE(inet->sk.sk_priority); - ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags); ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if); ipcm->addr = inet->inet_saddr; ipcm->protocol = inet->inet_num; @@ -257,13 +258,6 @@ static inline u8 ip_sendmsg_scope(const struct inet_sock *inet, return RT_SCOPE_UNIVERSE; } -static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet) -{ - u8 dsfield = ipc->tos != -1 ? ipc->tos : READ_ONCE(inet->tos); - - return dsfield & INET_DSCP_MASK; -} - /* datagram.c */ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); @@ -363,7 +357,7 @@ static inline void inet_get_local_port_range(const struct net *net, int *low, in bool inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high); #ifdef CONFIG_SYSCTL -static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port) +static inline bool inet_is_local_reserved_port(const struct net *net, unsigned short port) { if (!net->ipv4.sysctl_local_reserved_ports) return false; @@ -673,6 +667,14 @@ static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, memcpy(buf, &naddr, sizeof(naddr)); } +#if IS_MODULE(CONFIG_IPV6) +#define EXPORT_IPV6_MOD(X) EXPORT_SYMBOL(X) +#define EXPORT_IPV6_MOD_GPL(X) EXPORT_SYMBOL_GPL(X) +#else +#define EXPORT_IPV6_MOD(X) +#define EXPORT_IPV6_MOD_GPL(X) +#endif + #if IS_ENABLED(CONFIG_IPV6) #include #endif diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index a113c11ab56b5..e3864b74e92a6 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -162,6 +162,8 @@ struct fib_info { struct fib_nh fib_nh[] __counted_by(fib_nhs); }; +int __net_init fib4_semantics_init(struct net *net); +void __net_exit fib4_semantics_exit(struct net *net); #ifdef CONFIG_IP_MULTIPLE_TABLES struct fib_rule; diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 1aa31bdb2b31e..a36a335cef9fb 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -95,8 +95,8 @@ struct ip_tunnel_encap { #define ip_tunnel_info_opts(info) \ _Generic(info, \ - const struct ip_tunnel_info * : ((const void *)((info) + 1)),\ - struct ip_tunnel_info * : ((void *)((info) + 1))\ + const struct ip_tunnel_info * : ((const void *)(info)->options),\ + struct ip_tunnel_info * : ((void *)(info)->options)\ ) struct ip_tunnel_info { @@ -107,6 +107,7 @@ struct ip_tunnel_info { #endif u8 options_len; u8 mode; + u8 options[] __aligned_largest __counted_by(options_len); }; /* 6rd prefix/relay information */ @@ -406,8 +407,9 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, bool log_ecn_error); int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm_kern *p, __u32 fwmark); -int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm_kern *p, __u32 fwmark); +int ip_tunnel_newlink(struct net *net, struct net_device *dev, + struct nlattr *tb[], struct ip_tunnel_parm_kern *p, + __u32 fwmark); void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); bool ip_tunnel_netlink_encap_parms(struct nlattr *data[], @@ -650,7 +652,7 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) static inline void ip_tunnel_info_opts_get(void *to, const struct ip_tunnel_info *info) { - memcpy(to, info + 1, info->options_len); + memcpy(to, ip_tunnel_info_opts(info), info->options_len); } static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index f5c43ad1565e2..2ccdf85f34f16 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -246,17 +246,20 @@ extern int sysctl_mld_qrv; #define _DEVADD(net, statname, mod, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ + unsigned long _field = (field); \ + unsigned long _val = (val); \ if (likely(_idev != NULL)) \ - mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \ - mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\ + mod##SNMP_ADD_STATS((_idev)->stats.statname, _field, _val); \ + mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, _field, _val);\ }) #define _DEVUPD(net, statname, mod, idev, field, val) \ ({ \ struct inet6_dev *_idev = (idev); \ + unsigned long _val = (val); \ if (likely(_idev != NULL)) \ - mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \ - mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\ + mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, _val); \ + mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, _val);\ }) /* MIBs */ @@ -363,15 +366,6 @@ struct ipcm6_cookie { struct ipv6_txoptions *opt; }; -static inline void ipcm6_init(struct ipcm6_cookie *ipc6) -{ - *ipc6 = (struct ipcm6_cookie) { - .hlimit = -1, - .tclass = -1, - .dontfrag = -1, - }; -} - static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, const struct sock *sk) { @@ -380,6 +374,8 @@ static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, .tclass = inet6_sk(sk)->tclass, .dontfrag = inet6_test_bit(DONTFRAG, sk), }; + + sockcm_init(&ipc6->sockc, sk); } static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h index 7321ffe3a108c..38ef66826939e 100644 --- a/include/net/ipv6_frag.h +++ b/include/net/ipv6_frag.h @@ -66,6 +66,7 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) { struct net_device *dev = NULL; struct sk_buff *head; + int refs = 1; rcu_read_lock(); /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */ @@ -77,7 +78,7 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) goto out; fq->q.flags |= INET_FRAG_DROP; - inet_frag_kill(&fq->q); + inet_frag_kill(&fq->q, &refs); dev = dev_get_by_index_rcu(net, fq->iif); if (!dev) @@ -109,7 +110,7 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) spin_unlock(&fq->q.lock); out_rcu_unlock: rcu_read_unlock(); - inet_frag_put(&fq->q); + inet_frag_putn(&fq->q, refs); } /* Check if the upper layer header is truncated in the first fragment. */ diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h index 43574bd6612f0..ab05024be5186 100644 --- a/include/net/libeth/rx.h +++ b/include/net/libeth/rx.h @@ -198,6 +198,53 @@ struct libeth_rx_pt { enum xdp_rss_hash_type hash_type:16; }; +/** + * struct libeth_rx_csum - checksum offload bits decoded from the Rx descriptor + * @l3l4p: detectable L3 and L4 integrity check is processed by the hardware + * @ipe: IP checksum error + * @eipe: external (outermost) IP header (only for tunels) + * @eudpe: external (outermost) UDP checksum error (only for tunels) + * @ipv6exadd: IPv6 header with extension headers + * @l4e: L4 integrity error + * @pprs: set for packets that skip checksum calculation in the HW pre parser + * @nat: the packet is a UDP tunneled packet + * @raw_csum_valid: set if raw checksum is valid + * @pad: padding to naturally align raw_csum field + * @raw_csum: raw checksum + */ +struct libeth_rx_csum { + u32 l3l4p:1; + u32 ipe:1; + u32 eipe:1; + u32 eudpe:1; + u32 ipv6exadd:1; + u32 l4e:1; + u32 pprs:1; + u32 nat:1; + + u32 raw_csum_valid:1; + u32 pad:7; + u32 raw_csum:16; +}; + +/** + * struct libeth_rqe_info - receive queue element info + * @len: packet length + * @ptype: packet type based on types programmed into the device + * @eop: whether it's the last fragment of the packet + * @rxe: MAC errors: CRC, Alignment, Oversize, Undersizes, Length error + * @vlan: C-VLAN or S-VLAN tag depending on the VLAN offload configuration + */ +struct libeth_rqe_info { + u32 len; + + u32 ptype:14; + u32 eop:1; + u32 rxe:1; + + u32 vlan:16; +}; + void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt); /** diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h index 53bd2d02a4f0d..39cd50300a189 100644 --- a/include/net/lwtunnel.h +++ b/include/net/lwtunnel.h @@ -116,9 +116,11 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op, int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, unsigned int num); int lwtunnel_valid_encap_type(u16 encap_type, - struct netlink_ext_ack *extack); + struct netlink_ext_ack *extack, + bool rtnl_is_held); int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, - struct netlink_ext_ack *extack); + struct netlink_ext_ack *extack, + bool rtnl_is_held); int lwtunnel_build_state(struct net *net, u16 encap_type, struct nlattr *encap, unsigned int family, const void *cfg, @@ -201,13 +203,15 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op, } static inline int lwtunnel_valid_encap_type(u16 encap_type, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, + bool rtnl_is_held) { NL_SET_ERR_MSG(extack, "CONFIG_LWTUNNEL is not enabled in this kernel"); return -EOPNOTSUPP; } static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, + bool rtnl_is_held) { /* return 0 since we are not walking attr looking for * RTA_ENCAP_TYPE attribute on nexthops. diff --git a/include/net/mac80211.h b/include/net/mac80211.h index c3ed2fcff8b79..c498f685d01f3 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -7,7 +7,7 @@ * Copyright 2007-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2024 Intel Corporation + * Copyright (C) 2018 - 2025 Intel Corporation */ #ifndef MAC80211_H @@ -702,6 +702,7 @@ struct ieee80211_parsed_tpe { * @tpe: transmit power envelope information * @pwr_reduction: power constraint of BSS. * @eht_support: does this BSS support EHT + * @epcs_support: does this BSS support EPCS * @csa_active: marks whether a channel switch is going on. * @mu_mimo_owner: indicates interface owns MU-MIMO capability * @chanctx_conf: The channel context this interface is assigned to, or %NULL @@ -823,7 +824,7 @@ struct ieee80211_bss_conf { u8 pwr_reduction; bool eht_support; - + bool epcs_support; bool csa_active; bool mu_mimo_owner; @@ -2851,6 +2852,11 @@ struct ieee80211_txq { * implements MLO, so operation can continue on other links when one * link is switching. * + * @IEEE80211_HW_STRICT: strictly enforce certain things mandated by the spec + * but otherwise ignored/worked around for interoperability. This is a + * HW flag so drivers can opt in according to their own control, e.g. in + * testing. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2911,6 +2917,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_DISALLOW_PUNCTURING, IEEE80211_HW_DISALLOW_PUNCTURING_5GHZ, IEEE80211_HW_HANDLES_QUIET_CSA, + IEEE80211_HW_STRICT, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -3823,7 +3830,7 @@ enum ieee80211_reconfig_type { * @was_assoc: set if this call is due to deauth/disassoc * while just having been associated * @link_id: the link id on which the frame will be TX'ed. - * Only used with the mgd_prepare_tx() method. + * 0 for a non-MLO connection. */ struct ieee80211_prep_tx_info { u16 duration; @@ -6664,6 +6671,31 @@ void ieee80211_iter_chan_contexts_atomic( void *data), void *iter_data); +/** + * ieee80211_iter_chan_contexts_mtx - iterate channel contexts + * @hw: pointer obtained from ieee80211_alloc_hw(). + * @iter: iterator function + * @iter_data: data passed to iterator function + * + * Iterate all active channel contexts. This function can only be used while + * holding the wiphy mutex. + * + * The iterator will not find a context that's being added (during + * the driver callback to add it) but will find it while it's being + * removed. + * + * Note that during hardware restart, all contexts that existed + * before the restart are considered already present so will be + * found while iterating, whether they've been re-added already + * or not. + */ +void ieee80211_iter_chan_contexts_mtx( + struct ieee80211_hw *hw, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *chanctx_conf, + void *data), + void *iter_data); + /** * ieee80211_ap_probereq_get - retrieve a Probe Request template * @hw: pointer obtained from ieee80211_alloc_hw(). diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index 90f56656b5724..62e9d76738628 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -408,8 +408,6 @@ struct gdma_context { struct gdma_dev mana_ib; }; -#define MAX_NUM_GDMA_DEVICES 4 - static inline bool mana_gd_is_mana(struct gdma_dev *gd) { return gd->dev_id.type == GDMA_DEVICE_MANA; @@ -556,11 +554,15 @@ enum { #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3) #define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5) +/* Driver can handle holes (zeros) in the device list */ +#define GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP BIT(11) + #define GDMA_DRV_CAP_FLAGS1 \ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \ - GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT) + GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT | \ + GDMA_DRV_CAP_FLAG_1_DEV_LIST_HOLES_SUP) #define GDMA_DRV_CAP_FLAGS2 0 @@ -621,11 +623,12 @@ struct gdma_query_max_resources_resp { }; /* HW DATA */ /* GDMA_LIST_DEVICES */ +#define GDMA_DEV_LIST_SIZE 64 struct gdma_list_devices_resp { struct gdma_resp_hdr hdr; u32 num_of_devs; u32 reserved; - struct gdma_dev_id devs[64]; + struct gdma_dev_id devs[GDMA_DEV_LIST_SIZE]; }; /* HW DATA */ /* GDMA_REGISTER_DEVICE */ diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 0d00b24eacafd..b4c66ce9ee3aa 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -468,6 +468,8 @@ struct mana_port_context { u16 port_idx; + u32 speed; + bool port_is_up; bool port_st_save; /* Saved port state */ @@ -497,6 +499,8 @@ struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); void mana_query_gf_stats(struct mana_port_context *apc); +int mana_query_link_cfg(struct mana_port_context *apc); +int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed); int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues); void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); @@ -523,6 +527,8 @@ enum mana_command_code { MANA_FENCE_RQ = 0x20006, MANA_CONFIG_VPORT_RX = 0x20007, MANA_QUERY_VPORT_CONFIG = 0x20008, + MANA_QUERY_LINK_CONFIG = 0x2000A, + MANA_SET_BW_CLAMP = 0x2000B, /* Privileged commands for the PF mode */ MANA_REGISTER_FILTER = 0x28000, @@ -531,6 +537,33 @@ enum mana_command_code { MANA_DEREGISTER_HW_PORT = 0x28004, }; +/* Query Link Configuration*/ +struct mana_query_link_config_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; +}; /* HW DATA */ + +struct mana_query_link_config_resp { + struct gdma_resp_hdr hdr; + u32 link_speed; + bool qos_unconfigured; + u8 reserved[3]; +}; /* HW DATA */ + +/* Set Bandwidth Clamp*/ +struct mana_set_bw_clamp_req { + struct gdma_req_hdr hdr; + mana_handle_t vport; + enum TRI_STATE enable_clamping; + u32 link_speed; +}; /* HW DATA */ + +struct mana_set_bw_clamp_resp { + struct gdma_resp_hdr hdr; + bool qos_unconfigured; + u8 reserved[7]; +}; /* HW DATA */ + /* Query Device Configuration */ struct mana_query_device_cfg_req { struct gdma_req_hdr hdr; diff --git a/include/net/mctp.h b/include/net/mctp.h index 1ecbff7116f62..07d458990113d 100644 --- a/include/net/mctp.h +++ b/include/net/mctp.h @@ -212,7 +212,7 @@ static inline struct mctp_skb_cb *mctp_cb(struct sk_buff *skb) BUILD_BUG_ON(sizeof(struct mctp_skb_cb) > sizeof(skb->cb)); WARN_ON(cb->magic != 0x4d435450); - return (void *)(skb->cb); + return cb; } /* If CONFIG_MCTP_FLOWS, we may add one of these as a SKB extension, diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 814b5f2e3ed5e..bfbad695951cf 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -14,6 +14,7 @@ struct mptcp_info; struct mptcp_sock; +struct mptcp_pm_addr_entry; struct seq_file; /* MPTCP sk_buff extension data */ @@ -103,13 +104,14 @@ struct mptcp_out_options { #define MPTCP_SUBFLOWS_MAX 8 struct mptcp_sched_data { - bool reinject; u8 subflows; struct mptcp_subflow_context *contexts[MPTCP_SUBFLOWS_MAX]; }; struct mptcp_sched_ops { - int (*get_subflow)(struct mptcp_sock *msk, + int (*get_send)(struct mptcp_sock *msk, + struct mptcp_sched_data *data); + int (*get_retrans)(struct mptcp_sock *msk, struct mptcp_sched_data *data); char name[MPTCP_SCHED_NAME_MAX]; @@ -120,6 +122,19 @@ struct mptcp_sched_ops { void (*release)(struct mptcp_sock *msk); } ____cacheline_aligned_in_smp; +#define MPTCP_PM_NAME_MAX 16 +#define MPTCP_PM_MAX 128 +#define MPTCP_PM_BUF_MAX (MPTCP_PM_NAME_MAX * MPTCP_PM_MAX) + +struct mptcp_pm_ops { + char name[MPTCP_PM_NAME_MAX]; + struct module *owner; + struct list_head list; + + void (*init)(struct mptcp_sock *msk); + void (*release)(struct mptcp_sock *msk); +} ____cacheline_aligned_in_smp; + #ifdef CONFIG_MPTCP void mptcp_init(void); diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index f467a66abc6b1..bd57d8fb54f14 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -83,6 +83,9 @@ struct net { struct llist_node defer_free_list; struct llist_node cleanup_list; /* namespaces on death row */ + struct list_head ptype_all; + struct list_head ptype_specific; + #ifdef CONFIG_KEYS struct key_tag *key_domain; /* Key domain of operation tag */ #endif diff --git a/include/net/netdev_lock.h b/include/net/netdev_lock.h new file mode 100644 index 0000000000000..76cbf5a449b6d --- /dev/null +++ b/include/net/netdev_lock.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _NET_NETDEV_LOCK_H +#define _NET_NETDEV_LOCK_H + +#include +#include +#include + +static inline bool netdev_trylock(struct net_device *dev) +{ + return mutex_trylock(&dev->lock); +} + +static inline void netdev_assert_locked(const struct net_device *dev) +{ + lockdep_assert_held(&dev->lock); +} + +static inline void +netdev_assert_locked_or_invisible(const struct net_device *dev) +{ + if (dev->reg_state == NETREG_REGISTERED || + dev->reg_state == NETREG_UNREGISTERING) + netdev_assert_locked(dev); +} + +static inline bool netdev_need_ops_lock(const struct net_device *dev) +{ + bool ret = dev->request_ops_lock || !!dev->queue_mgmt_ops; + +#if IS_ENABLED(CONFIG_NET_SHAPER) + ret |= !!dev->netdev_ops->net_shaper_ops; +#endif + + return ret; +} + +static inline void netdev_lock_ops(struct net_device *dev) +{ + if (netdev_need_ops_lock(dev)) + netdev_lock(dev); +} + +static inline void netdev_unlock_ops(struct net_device *dev) +{ + if (netdev_need_ops_lock(dev)) + netdev_unlock(dev); +} + +static inline void netdev_ops_assert_locked(const struct net_device *dev) +{ + if (netdev_need_ops_lock(dev)) + lockdep_assert_held(&dev->lock); + else + ASSERT_RTNL(); +} + +static inline void +netdev_ops_assert_locked_or_invisible(const struct net_device *dev) +{ + if (dev->reg_state == NETREG_REGISTERED || + dev->reg_state == NETREG_UNREGISTERING) + netdev_ops_assert_locked(dev); +} + +static inline void netdev_lock_ops_compat(struct net_device *dev) +{ + if (netdev_need_ops_lock(dev)) + netdev_lock(dev); + else + rtnl_lock(); +} + +static inline void netdev_unlock_ops_compat(struct net_device *dev) +{ + if (netdev_need_ops_lock(dev)) + netdev_unlock(dev); + else + rtnl_unlock(); +} + +static inline int netdev_lock_cmp_fn(const struct lockdep_map *a, + const struct lockdep_map *b) +{ + /* Only lower devices currently grab the instance lock, so no + * real ordering issues can occur. In the near future, only + * hardware devices will grab instance lock which also does not + * involve any ordering. Suppress lockdep ordering warnings + * until (if) we start grabbing instance lock on pure SW + * devices (bond/team/veth/etc). + */ + if (a == b) + return 0; + return -1; +} + +#define netdev_lockdep_set_classes(dev) \ +{ \ + static struct lock_class_key qdisc_tx_busylock_key; \ + static struct lock_class_key qdisc_xmit_lock_key; \ + static struct lock_class_key dev_addr_list_lock_key; \ + static struct lock_class_key dev_instance_lock_key; \ + unsigned int i; \ + \ + (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ + lockdep_set_class(&(dev)->addr_list_lock, \ + &dev_addr_list_lock_key); \ + lockdep_set_class(&(dev)->lock, \ + &dev_instance_lock_key); \ + lock_set_cmp_fn(&dev->lock, netdev_lock_cmp_fn, NULL); \ + for (i = 0; i < (dev)->num_tx_queues; i++) \ + lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ + &qdisc_xmit_lock_key); \ +} + +#endif diff --git a/include/net/netdev_netlink.h b/include/net/netdev_netlink.h new file mode 100644 index 0000000000000..075962dbe743b --- /dev/null +++ b/include/net/netdev_netlink.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NET_NETDEV_NETLINK_H +#define __NET_NETDEV_NETLINK_H + +#include + +struct netdev_nl_sock { + struct mutex lock; + struct list_head bindings; +}; + +#endif /* __NET_NETDEV_NETLINK_H */ diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h index b02bb9f109d5e..825141d675e58 100644 --- a/include/net/netdev_queues.h +++ b/include/net/netdev_queues.h @@ -23,6 +23,7 @@ struct netdev_queue_stats_rx { u64 hw_drops; u64 hw_drop_overruns; + u64 csum_complete; u64 csum_unnecessary; u64 csum_none; u64 csum_bad; @@ -117,6 +118,10 @@ struct netdev_stat_ops { * * @ndo_queue_stop: Stop the RX queue at the specified index. The stopped * queue's memory is written at the specified address. + * + * Note that @ndo_queue_mem_alloc and @ndo_queue_mem_free may be called while + * the interface is closed. @ndo_queue_start and @ndo_queue_stop will only + * be called for an interface which is open. */ struct netdev_queue_mgmt_ops { size_t ndo_queue_mem_size; diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h index 596836abf7bf4..8cdcd138b33f2 100644 --- a/include/net/netdev_rx_queue.h +++ b/include/net/netdev_rx_queue.h @@ -16,15 +16,16 @@ struct netdev_rx_queue { struct rps_dev_flow_table __rcu *rps_flow_table; #endif struct kobject kobj; + const struct attribute_group **groups; struct net_device *dev; netdevice_tracker dev_tracker; + /* All fields below are "ops protected", + * see comment about net_device::lock + */ #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif - /* NAPI instance for the queue - * Readers and writers must hold RTNL - */ struct napi_struct *napi; struct pp_memory_provider_params mp_params; } ____cacheline_aligned_in_smp; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 60d5dcdb289c9..803d5f1601f9d 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1891,7 +1891,7 @@ void nft_chain_filter_fini(void); void __init nft_chain_route_init(void); void nft_chain_route_fini(void); -void nf_tables_trans_destroy_flush_work(void); +void nf_tables_trans_destroy_flush_work(struct net *net); int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result); __be64 nf_jiffies64_to_msecs(u64 input); @@ -1905,6 +1905,7 @@ static inline int nft_request_module(struct net *net, const char *fmt, ...) { re struct nftables_pernet { struct list_head tables; struct list_head commit_list; + struct list_head destroy_list; struct list_head commit_set_list; struct list_head binding_list; struct list_head module_list; @@ -1915,6 +1916,7 @@ struct nftables_pernet { unsigned int base_seq; unsigned int gc_seq; u8 validate_state; + struct work_struct destroy_work; }; extern unsigned int nf_tables_net_id; diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h index 38cae7113de46..6e202ed5e63f3 100644 --- a/include/net/netfilter/nft_fib.h +++ b/include/net/netfilter/nft_fib.h @@ -18,6 +18,27 @@ nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in) return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK; } +static inline bool nft_fib_can_skip(const struct nft_pktinfo *pkt) +{ + const struct net_device *indev = nft_in(pkt); + const struct sock *sk; + + switch (nft_hook(pkt)) { + case NF_INET_PRE_ROUTING: + case NF_INET_INGRESS: + case NF_INET_LOCAL_IN: + break; + default: + return false; + } + + sk = pkt->skb->sk; + if (sk && sk_fullsock(sk)) + return sk->sk_rx_dst_ifindex == indev->ifindex; + + return nft_fib_is_loopback(pkt->skb, indev); +} + int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset); int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]); diff --git a/include/net/netlink.h b/include/net/netlink.h index e015ffbed819c..29e0db9403820 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -118,6 +118,7 @@ * nla_nest_start(skb, type) start a nested attribute * nla_nest_end(skb, nla) finalize a nested attribute * nla_nest_cancel(skb, nla) cancel nested attribute construction + * nla_put_empty_nest(skb, type) create an empty nest * * Attribute Length Calculations: * nla_attr_size(payload) length of attribute w/o padding @@ -2240,6 +2241,20 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) nlmsg_trim(skb, start); } +/** + * nla_put_empty_nest - Create an empty nest + * @skb: socket buffer the message is stored in + * @attrtype: attribute type of the container + * + * This function is a helper for creating empty nests. + * + * Returns: 0 when successful or -EMSGSIZE on failure. + */ +static inline int nla_put_empty_nest(struct sk_buff *skb, int attrtype) +{ + return nla_nest_start(skb, attrtype) ? 0 : -EMSGSIZE; +} + /** * __nla_validate_nested - Validate a stream of nested attributes * @start: container attribute diff --git a/include/net/netmem.h b/include/net/netmem.h index 1b58faa4f20f9..c61d5b21e7b42 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -24,11 +24,20 @@ struct net_iov { unsigned long __unused_padding; unsigned long pp_magic; struct page_pool *pp; - struct dmabuf_genpool_chunk_owner *owner; + struct net_iov_area *owner; unsigned long dma_addr; atomic_long_t pp_ref_count; }; +struct net_iov_area { + /* Array of net_iovs for this area. */ + struct net_iov *niovs; + size_t num_niovs; + + /* Offset into the dma-buf where this chunk starts. */ + unsigned long base_virtual; +}; + /* These fields in struct page are used by the page_pool and net stack: * * struct { @@ -54,6 +63,16 @@ NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr); NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count); #undef NET_IOV_ASSERT_OFFSET +static inline struct net_iov_area *net_iov_owner(const struct net_iov *niov) +{ + return niov->owner; +} + +static inline unsigned int net_iov_idx(const struct net_iov *niov) +{ + return niov - net_iov_owner(niov)->niovs; +} + /* netmem */ /** diff --git a/include/net/netns/core.h b/include/net/netns/core.h index 9b36f0ff0c200..0459523602cbd 100644 --- a/include/net/netns/core.h +++ b/include/net/netns/core.h @@ -17,6 +17,11 @@ struct netns_core { u8 sysctl_txrehash; u8 sysctl_tstamp_allow_data; + u32 sysctl_wmem_max; + u32 sysctl_rmem_max; + u32 sysctl_wmem_default; + u32 sysctl_rmem_default; + #ifdef CONFIG_PROC_FS struct prot_inuse __percpu *prot_inuse; #endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 46452da352061..6373e3f17da84 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -47,6 +47,11 @@ struct sysctl_fib_multipath_hash_seed { }; #endif +struct udp_tunnel_gro { + struct sock __rcu *sk; + struct hlist_head list; +}; + struct netns_ipv4 { /* Cacheline organization can be found documented in * Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst. @@ -85,6 +90,11 @@ struct netns_ipv4 { struct inet_timewait_death_row tcp_death_row; struct udp_table *udp_table; +#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL) + /* Not in a pernet subsys because need to be available at GRO stage */ + struct udp_tunnel_gro udp_tunnel_gro[2]; +#endif + #ifdef CONFIG_SYSCTL struct ctl_table_header *forw_hdr; struct ctl_table_header *frags_hdr; @@ -111,6 +121,9 @@ struct netns_ipv4 { #endif struct hlist_head *fib_table_hash; struct sock *fibnl; + struct hlist_head *fib_info_hash; + unsigned int fib_info_hash_bits; + unsigned int fib_info_cnt; struct sock *mc_autojoin_sk; @@ -181,6 +194,7 @@ struct netns_ipv4 { u8 sysctl_tcp_window_scaling; u8 sysctl_tcp_timestamps; int sysctl_tcp_rto_min_us; + int sysctl_tcp_rto_max_ms; u8 sysctl_tcp_recovery; u8 sysctl_tcp_thin_linear_timeouts; u8 sysctl_tcp_slow_start_after_idle; @@ -273,4 +287,5 @@ struct netns_ipv4 { struct hlist_head *inet_addr_lst; struct delayed_work addr_chk_work; }; + #endif diff --git a/include/net/page_pool/memory_provider.h b/include/net/page_pool/memory_provider.h new file mode 100644 index 0000000000000..b3e6658977675 --- /dev/null +++ b/include/net/page_pool/memory_provider.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NET_PAGE_POOL_MEMORY_PROVIDER_H +#define _NET_PAGE_POOL_MEMORY_PROVIDER_H + +#include +#include + +struct netdev_rx_queue; +struct sk_buff; + +struct memory_provider_ops { + netmem_ref (*alloc_netmems)(struct page_pool *pool, gfp_t gfp); + bool (*release_netmem)(struct page_pool *pool, netmem_ref netmem); + int (*init)(struct page_pool *pool); + void (*destroy)(struct page_pool *pool); + int (*nl_fill)(void *mp_priv, struct sk_buff *rsp, + struct netdev_rx_queue *rxq); + void (*uninstall)(void *mp_priv, struct netdev_rx_queue *rxq); +}; + +bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr); +void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov); +void net_mp_niov_clear_page_pool(struct net_iov *niov); + +int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, + struct pp_memory_provider_params *p); +void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, + struct pp_memory_provider_params *old_p); + +/** + * net_mp_netmem_place_in_cache() - give a netmem to a page pool + * @pool: the page pool to place the netmem into + * @netmem: netmem to give + * + * Push an accounted netmem into the page pool's allocation cache. The caller + * must ensure that there is space in the cache. It should only be called off + * the mp_ops->alloc_netmems() path. + */ +static inline void net_mp_netmem_place_in_cache(struct page_pool *pool, + netmem_ref netmem) +{ + pool->alloc.cache[pool->alloc.count++] = netmem; +} + +#endif diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index 7f405672b089d..36eb57d73abc6 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -152,8 +152,11 @@ struct page_pool_stats { */ #define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long)) +struct memory_provider_ops; + struct pp_memory_provider_params { void *mp_priv; + const struct memory_provider_ops *mp_ops; }; struct page_pool { @@ -216,6 +219,7 @@ struct page_pool { struct ptr_ring ring; void *mp_priv; + const struct memory_provider_ops *mp_ops; #ifdef CONFIG_PAGE_POOL_STATS /* recycle stats are per-cpu to avoid locking */ diff --git a/include/net/rps.h b/include/net/rps.h index a93401d23d66e..e358e9711f275 100644 --- a/include/net/rps.h +++ b/include/net/rps.h @@ -39,7 +39,7 @@ struct rps_dev_flow { * The rps_dev_flow_table structure contains a table of flow mappings. */ struct rps_dev_flow_table { - unsigned int mask; + u8 log; struct rcu_head rcu; struct rps_dev_flow flows[]; }; diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index bc0069a8b6ea1..ec65a8cebb992 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -69,6 +69,40 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) return AF_UNSPEC; } +/** + * struct rtnl_newlink_params - parameters of rtnl_link_ops::newlink() + * + * @src_net: Source netns of rtnetlink socket + * @link_net: Link netns by IFLA_LINK_NETNSID, NULL if not specified + * @peer_net: Peer netns + * @tb: IFLA_* attributes + * @data: IFLA_INFO_DATA attributes + */ +struct rtnl_newlink_params { + struct net *src_net; + struct net *link_net; + struct net *peer_net; + struct nlattr **tb; + struct nlattr **data; +}; + +/* Get effective link netns from newlink params. Generally, this is link_net + * and falls back to src_net. But for compatibility, a driver may * choose to + * use dev_net(dev) instead. + */ +static inline struct net *rtnl_newlink_link_net(struct rtnl_newlink_params *p) +{ + return p->link_net ? : p->src_net; +} + +/* Get peer netns from newlink params. Fallback to link netns if peer netns is + * not specified explicitly. + */ +static inline struct net *rtnl_newlink_peer_net(struct rtnl_newlink_params *p) +{ + return p->peer_net ? : rtnl_newlink_link_net(p); +} + /** * struct rtnl_link_ops - rtnetlink link operations * @@ -125,10 +159,8 @@ struct rtnl_link_ops { struct nlattr *data[], struct netlink_ext_ack *extack); - int (*newlink)(struct net *src_net, - struct net_device *dev, - struct nlattr *tb[], - struct nlattr *data[], + int (*newlink)(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack); int (*changelink)(struct net_device *dev, struct nlattr *tb[], diff --git a/include/net/sock.h b/include/net/sock.h index 8036b3b79cd8b..7aeddba449197 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -303,6 +303,7 @@ struct sk_filter; * @sk_stamp: time stamp of last packet received * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only * @sk_tsflags: SO_TIMESTAMPING flags + * @sk_bpf_cb_flags: used in bpf_setsockopt() * @sk_use_task_frag: allow sk_page_frag() to use current->task_frag. * Sockets that can be used under memory reclaim should * set this to false. @@ -525,6 +526,8 @@ struct sock { u8 sk_txtime_deadline_mode : 1, sk_txtime_report_errors : 1, sk_txtime_unused : 6; +#define SK_BPF_CB_FLAG_TEST(SK, FLAG) ((SK)->sk_bpf_cb_flags & (FLAG)) + u8 sk_bpf_cb_flags; void *sk_user_data; #ifdef CONFIG_SECURITY @@ -954,6 +957,7 @@ enum sock_flags { SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */ SOCK_RCVMARK, /* Receive SO_MARK ancillary data with packet */ SOCK_RCVPRIORITY, /* Receive SO_PRIORITY ancillary data with packet */ + SOCK_TIMESTAMPING_ANY, /* Copy of sk_tsflags & TSFLAGS_ANY */ }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) @@ -1284,10 +1288,6 @@ struct proto { unsigned int inuse_idx; #endif -#if IS_ENABLED(CONFIG_MPTCP) - int (*forward_alloc_get)(const struct sock *sk); -#endif - bool (*stream_memory_free)(const struct sock *sk, int wake); bool (*sock_is_readable)(struct sock *sk); /* Memory pressure */ @@ -1348,15 +1348,6 @@ int sock_load_diag_module(int family, int protocol); INDIRECT_CALLABLE_DECLARE(bool tcp_stream_memory_free(const struct sock *sk, int wake)); -static inline int sk_forward_alloc_get(const struct sock *sk) -{ -#if IS_ENABLED(CONFIG_MPTCP) - if (sk->sk_prot->forward_alloc_get) - return sk->sk_prot->forward_alloc_get(sk); -#endif - return READ_ONCE(sk->sk_forward_alloc); -} - static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) { if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) @@ -1751,6 +1742,7 @@ static inline bool sock_allow_reclassification(const struct sock *csk) struct sock *sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern); void sk_free(struct sock *sk); +void sk_net_refcnt_upgrade(struct sock *sk); void sk_destruct(struct sock *sk); struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); void sk_free_unlock_clone(struct sock *sk); @@ -1805,6 +1797,8 @@ static inline struct sk_buff *sock_alloc_send_skb(struct sock *sk, } void *sock_kmalloc(struct sock *sk, int size, gfp_t priority); +void *sock_kmemdup(struct sock *sk, const void *src, + int size, gfp_t priority); void sock_kfree_s(struct sock *sk, void *mem, int size); void sock_kzfree_s(struct sock *sk, void *mem, int size); void sk_send_sigurg(struct sock *sk); @@ -1828,6 +1822,7 @@ static inline void sockcm_init(struct sockcm_cookie *sockc, const struct sock *sk) { *sockc = (struct sockcm_cookie) { + .mark = READ_ONCE(sk->sk_mark), .tsflags = READ_ONCE(sk->sk_tsflags), .priority = READ_ONCE(sk->sk_priority), }; @@ -2664,13 +2659,13 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk, { #define FLAGS_RECV_CMSGS ((1UL << SOCK_RXQ_OVFL) | \ (1UL << SOCK_RCVTSTAMP) | \ - (1UL << SOCK_RCVMARK) |\ - (1UL << SOCK_RCVPRIORITY)) + (1UL << SOCK_RCVMARK) | \ + (1UL << SOCK_RCVPRIORITY) | \ + (1UL << SOCK_TIMESTAMPING_ANY)) #define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \ SOF_TIMESTAMPING_RAW_HARDWARE) - if (sk->sk_flags & FLAGS_RECV_CMSGS || - READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY) + if (READ_ONCE(sk->sk_flags) & FLAGS_RECV_CMSGS) __sock_recv_cmsgs(msg, sk, skb); else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP))) sock_write_timestamp(sk, skb->tstamp); @@ -2854,12 +2849,6 @@ void sk_get_meminfo(const struct sock *sk, u32 *meminfo); #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) -extern __u32 sysctl_wmem_max; -extern __u32 sysctl_rmem_max; - -extern __u32 sysctl_wmem_default; -extern __u32 sysctl_rmem_default; - #define SKB_FRAG_PAGE_ORDER get_order(32768) DECLARE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); @@ -2920,6 +2909,13 @@ int sock_set_timestamping(struct sock *sk, int optname, struct so_timestamping timestamping); void sock_enable_timestamps(struct sock *sk); +#if defined(CONFIG_CGROUP_BPF) +void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op); +#else +static inline void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op) +{ +} +#endif void sock_no_linger(struct sock *sk); void sock_set_keepalive(struct sock *sk); void sock_set_priority(struct sock *sk, u32 priority); diff --git a/include/net/tcp.h b/include/net/tcp.h index 2d08473a6dc00..df04dc09c519d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -144,8 +145,9 @@ static_assert((1 << ATO_BITS) > TCP_DELACK_MAX); #define TCP_DELACK_MIN 4U #define TCP_ATO_MIN 4U #endif -#define TCP_RTO_MAX ((unsigned)(120*HZ)) -#define TCP_RTO_MIN ((unsigned)(HZ/5)) +#define TCP_RTO_MAX_SEC 120 +#define TCP_RTO_MAX ((unsigned)(TCP_RTO_MAX_SEC * HZ)) +#define TCP_RTO_MIN ((unsigned)(HZ / 5)) #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */ @@ -372,16 +374,53 @@ static inline void tcp_dec_quickack_mode(struct sock *sk) } } -#define TCP_ECN_OK 1 -#define TCP_ECN_QUEUE_CWR 2 -#define TCP_ECN_DEMAND_CWR 4 -#define TCP_ECN_SEEN 8 +#define TCP_ECN_MODE_RFC3168 BIT(0) +#define TCP_ECN_QUEUE_CWR BIT(1) +#define TCP_ECN_DEMAND_CWR BIT(2) +#define TCP_ECN_SEEN BIT(3) +#define TCP_ECN_MODE_ACCECN BIT(4) + +#define TCP_ECN_DISABLED 0 +#define TCP_ECN_MODE_PENDING (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN) +#define TCP_ECN_MODE_ANY (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN) + +static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp) +{ + return tp->ecn_flags & TCP_ECN_MODE_ANY; +} + +static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp) +{ + return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168; +} + +static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp) +{ + return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN; +} + +static inline bool tcp_ecn_disabled(const struct tcp_sock *tp) +{ + return !tcp_ecn_mode_any(tp); +} + +static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp) +{ + return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING; +} + +static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode) +{ + tp->ecn_flags &= ~TCP_ECN_MODE_ANY; + tp->ecn_flags |= mode; +} enum tcp_tw_status { TCP_TW_SUCCESS = 0, TCP_TW_RST = 1, TCP_TW_ACK = 2, - TCP_TW_SYN = 3 + TCP_TW_SYN = 3, + TCP_TW_ACK_OOW = 4 }; @@ -391,7 +430,7 @@ enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, u32 *tw_isn); struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req, bool fastopen, - bool *lost_race); + bool *lost_race, enum skb_drop_reason *drop_reason); enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); void tcp_enter_loss(struct sock *sk); @@ -416,6 +455,7 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); +void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout); void tcp_set_keepalive(struct sock *sk, int val); void tcp_syn_ack_timeout(const struct request_sock *req); int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, @@ -667,7 +707,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority, enum sk_rst_reason reason); int tcp_send_synack(struct sock *); void tcp_push_one(struct sock *, unsigned int mss_now); -void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags); void tcp_send_ack(struct sock *sk); void tcp_send_delayed_ack(struct sock *sk); void tcp_send_loss_probe(struct sock *sk); @@ -739,6 +779,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) /* tcp.c */ void tcp_get_info(struct sock *, struct tcp_info *); +void tcp_sock_rfree(struct sk_buff *skb); /* Read 'sendfile()'-style from a TCP socket */ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, @@ -756,10 +797,14 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu); int tcp_mss_to_mtu(struct sock *sk, int mss); void tcp_mtup_init(struct sock *sk); +static inline unsigned int tcp_rto_max(const struct sock *sk) +{ + return READ_ONCE(inet_csk(sk)->icsk_rto_max); +} + static inline void tcp_bound_rto(struct sock *sk) { - if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) - inet_csk(sk)->icsk_rto = TCP_RTO_MAX; + inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk)); } static inline u32 __tcp_set_rto(const struct tcp_sock *tp) @@ -800,7 +845,7 @@ u32 tcp_delack_max(const struct sock *sk); static inline u32 tcp_rto_min(const struct sock *sk) { const struct dst_entry *dst = __sk_dst_get(sk); - u32 rto_min = inet_csk(sk)->icsk_rto_min; + u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min); if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); @@ -928,15 +973,22 @@ static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq) #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) -#define TCPHDR_FIN 0x01 -#define TCPHDR_SYN 0x02 -#define TCPHDR_RST 0x04 -#define TCPHDR_PSH 0x08 -#define TCPHDR_ACK 0x10 -#define TCPHDR_URG 0x20 -#define TCPHDR_ECE 0x40 -#define TCPHDR_CWR 0x80 - +#define TCPHDR_FIN BIT(0) +#define TCPHDR_SYN BIT(1) +#define TCPHDR_RST BIT(2) +#define TCPHDR_PSH BIT(3) +#define TCPHDR_ACK BIT(4) +#define TCPHDR_URG BIT(5) +#define TCPHDR_ECE BIT(6) +#define TCPHDR_CWR BIT(7) +#define TCPHDR_AE BIT(8) +#define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ + TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \ + TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE) +#define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \ + TCPHDR_FLAGS_MASK) + +#define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE) #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR) /* State flags for sacked in struct tcp_skb_cb */ @@ -971,14 +1023,16 @@ struct tcp_skb_cb { u16 tcp_gso_size; }; }; - __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ + __u16 tcp_flags; /* TCP header flags (tcp[12-13])*/ __u8 sacked; /* State flags for SACK. */ __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ - __u8 txstamp_ack:1, /* Record TX timestamp for ack? */ +#define TSTAMP_ACK_SK 0x1 +#define TSTAMP_ACK_BPF 0x2 + __u8 txstamp_ack:2, /* Record TX timestamp for ack? */ eor:1, /* Is skb MSG_EOR marked? */ has_rxtstamp:1, /* SKB has a RX timestamp */ - unused:5; + unused:4; __u32 ack_seq; /* Sequence number ACK'd */ union { struct { @@ -1124,9 +1178,9 @@ enum tcp_ca_ack_event_flags { #define TCP_CA_UNSPEC 0 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */ -#define TCP_CONG_NON_RESTRICTED 0x1 +#define TCP_CONG_NON_RESTRICTED BIT(0) /* Requires ECN/ECT set on all packets */ -#define TCP_CONG_NEEDS_ECN 0x2 +#define TCP_CONG_NEEDS_ECN BIT(1) #define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN) union tcp_cc_info; @@ -1440,10 +1494,12 @@ static inline unsigned long tcp_pacing_delay(const struct sock *sk) static inline void tcp_reset_xmit_timer(struct sock *sk, const int what, unsigned long when, - const unsigned long max_when) + bool pace_delay) { - inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk), - max_when); + if (pace_delay) + when += tcp_pacing_delay(sk); + inet_csk_reset_xmit_timer(sk, what, when, + tcp_rto_max(sk)); } /* Something is really bad, we could not queue an additional packet, @@ -1472,7 +1528,7 @@ static inline void tcp_check_probe_timer(struct sock *sk) { if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - tcp_probe0_base(sk), TCP_RTO_MAX); + tcp_probe0_base(sk), true); } static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) @@ -2588,8 +2644,8 @@ struct tcp_ulp_ops { /* cleanup ulp */ void (*release)(struct sock *sk); /* diagnostic */ - int (*get_info)(struct sock *sk, struct sk_buff *skb); - size_t (*get_info_size)(const struct sock *sk); + int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin); + size_t (*get_info_size)(const struct sock *sk, bool net_admin); /* clone ulp */ void (*clone)(const struct request_sock *req, struct sock *newsk, const gfp_t priority); @@ -2671,6 +2727,7 @@ static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); if (sk_fullsock(sk)) { sock_ops.is_fullsock = 1; + sock_ops.is_locked_tcp_sock = 1; sock_owned_by_me(sk); } @@ -2759,9 +2816,9 @@ extern struct static_key_false tcp_have_smc; #endif #if IS_ENABLED(CONFIG_TLS_DEVICE) -void clean_acked_data_enable(struct inet_connection_sock *icsk, +void clean_acked_data_enable(struct tcp_sock *tp, void (*cad)(struct sock *sk, u32 ack_seq)); -void clean_acked_data_disable(struct inet_connection_sock *icsk); +void clean_acked_data_disable(struct tcp_sock *tp); void clean_acked_data_flush(void); #endif @@ -2842,4 +2899,18 @@ enum skb_drop_reason tcp_inbound_hash(struct sock *sk, const void *saddr, const void *daddr, int family, int dif, int sdif); +/* version of skb_set_owner_r() avoiding one atomic_add() */ +static inline void tcp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) +{ + skb_orphan(skb); + skb->sk = sk; + skb->destructor = tcp_sock_rfree; + + sock_owned_by_me(sk); + atomic_set(&sk->sk_rmem_alloc, + atomic_read(&sk->sk_rmem_alloc) + skb->truesize); + + sk_forward_alloc_add(sk, -skb->truesize); +} + #endif /* _TCP_H */ diff --git a/include/net/udp.h b/include/net/udp.h index 6e89520e100dc..a772510b2aa58 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -290,6 +290,7 @@ static inline void udp_lib_init_sock(struct sock *sk) struct udp_sock *up = udp_sk(sk); skb_queue_head_init(&up->reader_queue); + INIT_HLIST_NODE(&up->tunnel_list); up->forward_threshold = sk->sk_rcvbuf >> 2; set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags); } diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index a93dc51f6323e..13b54e6856414 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -203,6 +203,24 @@ static inline void udp_tunnel_encap_enable(struct sock *sk) udp_encap_enable(); } +#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL) +void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add); +void udp_tunnel_update_gro_rcv(struct sock *sk, bool add); +#else +static inline void udp_tunnel_update_gro_lookup(struct net *net, + struct sock *sk, bool add) {} +static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {} +#endif + +static inline void udp_tunnel_cleanup_gro(struct sock *sk) +{ + struct net *net = sock_net(sk); + + udp_tunnel_update_gro_rcv(sk, false); + + udp_tunnel_update_gro_lookup(net, sk, false); +} + #define UDP_TUNNEL_NIC_MAX_TABLES 4 enum udp_tunnel_nic_info_flags { diff --git a/include/net/xdp.h b/include/net/xdp.h index 4dafc5e021f13..48efacbaa35da 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -343,7 +343,6 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, struct net_device *dev); struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, struct net_device *dev); -int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp); struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf); static inline diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index bfe625b55d55d..a58ae7589d121 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -110,11 +110,16 @@ struct xdp_sock { * indicates position where checksumming should start. * csum_offset indicates position where checksum should be stored. * + * void (*tmo_request_launch_time)(u64 launch_time, void *priv) + * Called when AF_XDP frame requested launch time HW offload support. + * launch_time indicates the PTP time at which the device can schedule the + * packet for transmission. */ struct xsk_tx_metadata_ops { void (*tmo_request_timestamp)(void *priv); u64 (*tmo_fill_timestamp)(void *priv); void (*tmo_request_checksum)(u16 csum_start, u16 csum_offset, void *priv); + void (*tmo_request_launch_time)(u64 launch_time, void *priv); }; #ifdef CONFIG_XDP_SOCKETS @@ -162,6 +167,11 @@ static inline void xsk_tx_metadata_request(const struct xsk_tx_metadata *meta, if (!meta) return; + if (ops->tmo_request_launch_time) + if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME) + ops->tmo_request_launch_time(meta->request.launch_time, + priv); + if (ops->tmo_request_timestamp) if (meta->flags & XDP_TXMD_FLAGS_TIMESTAMP) ops->tmo_request_timestamp(priv); diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 784cd34f5bba5..513c8e9704f65 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -196,9 +196,27 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) return xp_raw_get_data(pool, addr); } +/** + * xsk_buff_raw_get_ctx - get &xdp_desc context + * @pool: XSk buff pool desc address belongs to + * @addr: desc address (from userspace) + * + * Wrapper for xp_raw_get_ctx() to be used in drivers, see its kdoc for + * details. + * + * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata + * pointer, if it is present and valid (initialized to %NULL otherwise). + */ +static inline struct xdp_desc_ctx +xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr) +{ + return xp_raw_get_ctx(pool, addr); +} + #define XDP_TXMD_FLAGS_VALID ( \ XDP_TXMD_FLAGS_TIMESTAMP | \ XDP_TXMD_FLAGS_CHECKSUM | \ + XDP_TXMD_FLAGS_LAUNCH_TIME | \ 0) static inline bool @@ -207,20 +225,27 @@ xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta) return !(meta->flags & ~XDP_TXMD_FLAGS_VALID); } -static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) +static inline struct xsk_tx_metadata * +__xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data) { struct xsk_tx_metadata *meta; if (!pool->tx_metadata_len) return NULL; - meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len; + meta = data - pool->tx_metadata_len; if (unlikely(!xsk_buff_valid_tx_metadata(meta))) return NULL; /* no way to signal the error to the user */ return meta; } +static inline struct xsk_tx_metadata * +xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) +{ + return __xsk_buff_get_metadata(pool, xp_raw_get_data(pool, addr)); +} + static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) { struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); @@ -388,12 +413,25 @@ static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr) return NULL; } +static inline struct xdp_desc_ctx +xsk_buff_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr) +{ + return (struct xdp_desc_ctx){ }; +} + static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta) { return false; } -static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) +static inline struct xsk_tx_metadata * +__xsk_buff_get_metadata(const struct xsk_buff_pool *pool, void *data) +{ + return NULL; +} + +static inline struct xsk_tx_metadata * +xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr) { return NULL; } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index ed4b83696c77f..39365fd2ea175 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -464,6 +464,15 @@ struct xfrm_type_offload { int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); +void xfrm_set_type_offload(struct xfrm_state *x); +static inline void xfrm_unset_type_offload(struct xfrm_state *x) +{ + if (!x->type_offload) + return; + + module_put(x->type_offload->owner); + x->type_offload = NULL; +} /** * struct xfrm_mode_cbs - XFRM mode callbacks @@ -1760,8 +1769,7 @@ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack); u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); -int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, - struct netlink_ext_ack *extack); +int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack); int xfrm_init_state(struct xfrm_state *x); int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm_input_resume(struct sk_buff *skb, int nexthdr); @@ -1773,6 +1781,15 @@ int xfrm_trans_queue(struct sk_buff *skb, struct sk_buff *)); int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err); int xfrm_output(struct sock *sk, struct sk_buff *skb); +int xfrm4_tunnel_check_size(struct sk_buff *skb); +#if IS_ENABLED(CONFIG_IPV6) +int xfrm6_tunnel_check_size(struct sk_buff *skb); +#else +static inline int xfrm6_tunnel_check_size(struct sk_buff *skb) +{ + return -EMSGSIZE; +} +#endif #if IS_ENABLED(CONFIG_NET_PKTGEN) int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb); diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index 50779406bc2d9..1dcd4d71468a5 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -141,6 +141,14 @@ u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max); bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count); void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr); dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr); + +struct xdp_desc_ctx { + dma_addr_t dma; + struct xsk_tx_metadata *meta; +}; + +struct xdp_desc_ctx xp_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr); + static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb) { return xskb->dma; diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h index 3dc7a1551ac35..5d653a3491d07 100644 --- a/include/sound/cs35l56.h +++ b/include/sound/cs35l56.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #define CS35L56_DEVID 0x0000000 @@ -61,6 +62,7 @@ #define CS35L56_IRQ1_MASK_8 0x000E0AC #define CS35L56_IRQ1_MASK_18 0x000E0D4 #define CS35L56_IRQ1_MASK_20 0x000E0DC +#define CS35L56_DSP_MBOX_1_RAW 0x0011000 #define CS35L56_DSP_VIRTUAL1_MBOX_1 0x0011020 #define CS35L56_DSP_VIRTUAL1_MBOX_2 0x0011024 #define CS35L56_DSP_VIRTUAL1_MBOX_3 0x0011028 @@ -224,6 +226,7 @@ #define CS35L56_HALO_STATE_SHUTDOWN 1 #define CS35L56_HALO_STATE_BOOT_DONE 2 +#define CS35L56_MBOX_CMD_PING 0x0A000000 #define CS35L56_MBOX_CMD_AUDIO_PLAY 0x0B000001 #define CS35L56_MBOX_CMD_AUDIO_PAUSE 0x0B000002 #define CS35L56_MBOX_CMD_AUDIO_REINIT 0x0B000003 @@ -254,6 +257,16 @@ #define CS35L56_NUM_BULK_SUPPLIES 3 #define CS35L56_NUM_DSP_REGIONS 5 +/* Additional margin for SYSTEM_RESET to control port ready on SPI */ +#define CS35L56_SPI_RESET_TO_PORT_READY_US (CS35L56_CONTROL_PORT_READY_US + 2500) + +struct cs35l56_spi_payload { + __be32 addr; + __be16 pad; + __be32 value; +} __packed; +static_assert(sizeof(struct cs35l56_spi_payload) == 10); + struct cs35l56_base { struct device *dev; struct regmap *regmap; @@ -269,6 +282,7 @@ struct cs35l56_base { s8 cal_index; struct cirrus_amp_cal_data cal_data; struct gpio_desc *reset_gpio; + struct cs35l56_spi_payload *spi_payload_buf; }; static inline bool cs35l56_is_otp_register(unsigned int reg) @@ -276,6 +290,23 @@ static inline bool cs35l56_is_otp_register(unsigned int reg) return (reg >> 16) == 3; } +static inline int cs35l56_init_config_for_spi(struct cs35l56_base *cs35l56, + struct spi_device *spi) +{ + cs35l56->spi_payload_buf = devm_kzalloc(&spi->dev, + sizeof(*cs35l56->spi_payload_buf), + GFP_KERNEL | GFP_DMA); + if (!cs35l56->spi_payload_buf) + return -ENOMEM; + + return 0; +} + +static inline bool cs35l56_is_spi(struct cs35l56_base *cs35l56) +{ + return IS_ENABLED(CONFIG_SPI_MASTER) && !!cs35l56->spi_payload_buf; +} + extern const struct regmap_config cs35l56_regmap_i2c; extern const struct regmap_config cs35l56_regmap_spi; extern const struct regmap_config cs35l56_regmap_sdw; diff --git a/include/sound/soc.h b/include/sound/soc.h index fcdb5adfcd5ec..b3e84bc47c6fd 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1261,7 +1261,10 @@ void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd); /* mixer control */ struct soc_mixer_control { - int min, max, platform_max; + /* Minimum and maximum specified as written to the hardware */ + int min, max; + /* Limited maximum value specified as presented through the control */ + int platform_max; int reg, rreg; unsigned int shift, rshift; unsigned int sign_bit; diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index b0db89058c911..958a2460330c0 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -174,6 +174,7 @@ enum yfs_cm_operation { EM(afs_cell_trace_get_queue_dns, "GET q-dns ") \ EM(afs_cell_trace_get_queue_manage, "GET q-mng ") \ EM(afs_cell_trace_get_queue_new, "GET q-new ") \ + EM(afs_cell_trace_get_server, "GET server") \ EM(afs_cell_trace_get_vol, "GET vol ") \ EM(afs_cell_trace_insert, "INSERT ") \ EM(afs_cell_trace_manage, "MANAGE ") \ @@ -182,6 +183,7 @@ enum yfs_cm_operation { EM(afs_cell_trace_put_destroy, "PUT destry") \ EM(afs_cell_trace_put_queue_work, "PUT q-work") \ EM(afs_cell_trace_put_queue_fail, "PUT q-fail") \ + EM(afs_cell_trace_put_server, "PUT server") \ EM(afs_cell_trace_put_vol, "PUT vol ") \ EM(afs_cell_trace_see_source, "SEE source") \ EM(afs_cell_trace_see_ws, "SEE ws ") \ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index b13dc275ef4a7..851841336ee65 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -360,8 +360,7 @@ TRACE_EVENT(rpc_request, { (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \ { (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \ { (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \ - { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \ - { (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" }) + { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }) DECLARE_EVENT_CLASS(rpc_task_running, diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index a27c4b619dffd..1a40c41ff8c30 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -259,6 +259,12 @@ TRACE_EVENT(tcp_retransmit_synack, __entry->saddr_v6, __entry->daddr_v6) ); +DECLARE_TRACE(tcp_cwnd_reduction_tp, + TP_PROTO(const struct sock *sk, int newly_acked_sacked, + int newly_lost, int flag), + TP_ARGS(sk, newly_acked_sacked, newly_lost, flag) +); + #include TRACE_EVENT(tcp_probe, diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index 35dc016c9bb41..936bcac270b5e 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -342,7 +342,7 @@ enum batadv_nl_attrs { BATADV_ATTR_MCAST_FLAGS_PRIV, /** - * @BATADV_ATTR_VLANID: VLAN id on top of soft interface + * @BATADV_ATTR_VLANID: VLAN id on top of mesh interface */ BATADV_ATTR_VLANID, @@ -380,7 +380,7 @@ enum batadv_nl_attrs { /** * @BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED: whether the bridge loop * avoidance feature is enabled. This feature detects and avoids loops - * between the mesh and devices bridged with the soft interface + * between the mesh and devices bridged with the mesh interface */ BATADV_ATTR_BRIDGE_LOOP_AVOIDANCE_ENABLED, @@ -509,7 +509,7 @@ enum batadv_nl_commands { BATADV_CMD_UNSPEC, /** - * @BATADV_CMD_GET_MESH: Get attributes from softif/mesh + * @BATADV_CMD_GET_MESH: Get attributes from mesh(if) */ BATADV_CMD_GET_MESH, @@ -535,7 +535,7 @@ enum batadv_nl_commands { /** * @BATADV_CMD_GET_HARDIF: Get attributes from a hardif of the - * current softif + * current mesh(if) */ BATADV_CMD_GET_HARDIF, @@ -591,25 +591,25 @@ enum batadv_nl_commands { BATADV_CMD_GET_MCAST_FLAGS, /** - * @BATADV_CMD_SET_MESH: Set attributes for softif/mesh + * @BATADV_CMD_SET_MESH: Set attributes for mesh(if) */ BATADV_CMD_SET_MESH, /** * @BATADV_CMD_SET_HARDIF: Set attributes for hardif of the - * current softif + * current mesh(if) */ BATADV_CMD_SET_HARDIF, /** * @BATADV_CMD_GET_VLAN: Get attributes from a VLAN of the - * current softif + * current mesh(if) */ BATADV_CMD_GET_VLAN, /** * @BATADV_CMD_SET_VLAN: Set attributes for VLAN of the - * current softif + * current mesh(if) */ BATADV_CMD_SET_VLAN, @@ -691,7 +691,7 @@ enum batadv_ifla_attrs { */ IFLA_BATADV_ALGO_NAME, - /* add attributes above here, update the policy in soft-interface.c */ + /* add attributes above here, update the policy in mesh-interface.c */ /** * @__IFLA_BATADV_MAX: internal use diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 661de24449654..28705ae677849 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -6921,6 +6921,12 @@ enum { BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F, }; +enum { + SK_BPF_CB_TX_TIMESTAMPING = 1<<0, + SK_BPF_CB_MASK = (SK_BPF_CB_TX_TIMESTAMPING - 1) | + SK_BPF_CB_TX_TIMESTAMPING +}; + /* List of known BPF sock_ops operators. * New entries can only be added at the end */ @@ -7033,6 +7039,29 @@ enum { * by the kernel or the * earlier bpf-progs. */ + BPF_SOCK_OPS_TSTAMP_SCHED_CB, /* Called when skb is passing + * through dev layer when + * SK_BPF_CB_TX_TIMESTAMPING + * feature is on. + */ + BPF_SOCK_OPS_TSTAMP_SND_SW_CB, /* Called when skb is about to send + * to the nic when SK_BPF_CB_TX_TIMESTAMPING + * feature is on. + */ + BPF_SOCK_OPS_TSTAMP_SND_HW_CB, /* Called in hardware phase when + * SK_BPF_CB_TX_TIMESTAMPING feature + * is on. + */ + BPF_SOCK_OPS_TSTAMP_ACK_CB, /* Called when all the skbs in the + * same sendmsg call are acked + * when SK_BPF_CB_TX_TIMESTAMPING + * feature is on. + */ + BPF_SOCK_OPS_TSTAMP_SENDMSG_CB, /* Called when every sendmsg syscall + * is triggered. It's used to correlate + * sendmsg timestamp with corresponding + * tskey. + */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect @@ -7099,6 +7128,7 @@ enum { TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */ TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */ + SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */ }; enum { diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h index e78cbd85ce7c7..42abf0679fb48 100644 --- a/include/uapi/linux/can.h +++ b/include/uapi/linux/can.h @@ -182,7 +182,7 @@ struct canfd_frame { /* * defined bits for canxl_frame.flags * - * The canxl_frame.flags element contains two bits CANXL_XLF and CANXL_SEC + * The canxl_frame.flags element contains three bits CANXL_[XLF|SEC|RRS] * and shares the relative position of the struct can[fd]_frame.len element. * The CANXL_XLF bit ALWAYS needs to be set to indicate a valid CAN XL frame. * As a side effect setting this bit intentionally breaks the length checks @@ -192,6 +192,7 @@ struct canfd_frame { */ #define CANXL_XLF 0x80 /* mandatory CAN XL frame flag (must always be set!) */ #define CANXL_SEC 0x01 /* Simple Extended Content (security/segmentation) */ +#define CANXL_RRS 0x02 /* Remote Request Substitution */ /* the 8-bit VCID is optionally placed in the canxl_frame.prio element */ #define CANXL_VCID_OFFSET 16 /* bit offset of VCID in prio element */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 9401aa3436733..816339790409c 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -614,6 +614,8 @@ enum devlink_attr { DEVLINK_ATTR_REGION_DIRECT, /* flag */ + DEVLINK_ATTR_INFO_FUNCTION_UID, /* string */ + /* Add new attributes above here, update the spec in * Documentation/netlink/specs/devlink.yaml and re-generate * net/devlink/netlink_gen.c. diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h index 3c70e8ac14b8d..1ea47309d7726 100644 --- a/include/uapi/linux/errqueue.h +++ b/include/uapi/linux/errqueue.h @@ -73,6 +73,7 @@ enum { SCM_TSTAMP_SND, /* driver passed skb to NIC, or HW */ SCM_TSTAMP_SCHED, /* data entered the packet scheduler */ SCM_TSTAMP_ACK, /* data acknowledged by peer */ + SCM_TSTAMP_COMPLETION, /* packet tx completion */ }; #endif /* _UAPI_LINUX_ERRQUEUE_H */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 9b18c4cfe56f8..84833cca29fec 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -2059,6 +2059,24 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT = 102, + ETHTOOL_LINK_MODE_200000baseCR_Full_BIT = 103, + ETHTOOL_LINK_MODE_200000baseKR_Full_BIT = 104, + ETHTOOL_LINK_MODE_200000baseDR_Full_BIT = 105, + ETHTOOL_LINK_MODE_200000baseDR_2_Full_BIT = 106, + ETHTOOL_LINK_MODE_200000baseSR_Full_BIT = 107, + ETHTOOL_LINK_MODE_200000baseVR_Full_BIT = 108, + ETHTOOL_LINK_MODE_400000baseCR2_Full_BIT = 109, + ETHTOOL_LINK_MODE_400000baseKR2_Full_BIT = 110, + ETHTOOL_LINK_MODE_400000baseDR2_Full_BIT = 111, + ETHTOOL_LINK_MODE_400000baseDR2_2_Full_BIT = 112, + ETHTOOL_LINK_MODE_400000baseSR2_Full_BIT = 113, + ETHTOOL_LINK_MODE_400000baseVR2_Full_BIT = 114, + ETHTOOL_LINK_MODE_800000baseCR4_Full_BIT = 115, + ETHTOOL_LINK_MODE_800000baseKR4_Full_BIT = 116, + ETHTOOL_LINK_MODE_800000baseDR4_Full_BIT = 117, + ETHTOOL_LINK_MODE_800000baseDR4_2_Full_BIT = 118, + ETHTOOL_LINK_MODE_800000baseSR4_Full_BIT = 119, + ETHTOOL_LINK_MODE_800000baseVR4_Full_BIT = 120, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS @@ -2271,6 +2289,10 @@ static inline int ethtool_validate_duplex(__u8 duplex) * be exploited to reduce the RSS queue spread. */ #define RXH_XFRM_SYM_XOR (1 << 0) +/* Similar to SYM_XOR, except that one copy of the XOR'ed fields is replaced by + * an OR of the same fields + */ +#define RXH_XFRM_SYM_OR_XOR (1 << 1) #define RXH_XFRM_NO_CHANGE 0xff /* L2-L4 network traffic flow types */ diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h index 00e9890ca3c05..2df6e4035d50b 100644 --- a/include/uapi/linux/fib_rules.h +++ b/include/uapi/linux/fib_rules.h @@ -70,6 +70,9 @@ enum { FRA_DSCP, /* dscp */ FRA_FLOWLABEL, /* flowlabel */ FRA_FLOWLABEL_MASK, /* flowlabel mask */ + FRA_SPORT_MASK, /* sport mask */ + FRA_DPORT_MASK, /* dport mask */ + FRA_DSCP_MASK, /* dscp mask */ __FRA_MAX }; diff --git a/include/uapi/linux/if_cablemodem.h b/include/uapi/linux/if_cablemodem.h deleted file mode 100644 index 1f65130bf2a68..0000000000000 --- a/include/uapi/linux/if_cablemodem.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ -#ifndef _LINUX_CABLEMODEM_H_ -#define _LINUX_CABLEMODEM_H_ -/* - * Author: Franco Venturi - * Copyright 1998 Franco Venturi - * - * This program is free software; you can redistribute it - * and/or modify it under the terms of the GNU General - * Public License as published by the Free Software - * Foundation; either version 2 of the License, or (at - * your option) any later version. - */ - -/* some useful defines for sb1000.c e cmconfig.c - fv */ -#define SIOCGCMSTATS (SIOCDEVPRIVATE+0) /* get cable modem stats */ -#define SIOCGCMFIRMWARE (SIOCDEVPRIVATE+1) /* get cm firmware version */ -#define SIOCGCMFREQUENCY (SIOCDEVPRIVATE+2) /* get cable modem frequency */ -#define SIOCSCMFREQUENCY (SIOCDEVPRIVATE+3) /* set cable modem frequency */ -#define SIOCGCMPIDS (SIOCDEVPRIVATE+4) /* get cable modem PIDs */ -#define SIOCSCMPIDS (SIOCDEVPRIVATE+5) /* set cable modem PIDs */ - -#endif diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index bfe880fbbb24b..3ad2d5d980347 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -378,6 +378,7 @@ enum { IFLA_GRO_IPV4_MAX_SIZE, IFLA_DPLL_PIN, IFLA_MAX_PACING_OFFLOAD_HORIZON, + IFLA_NETNS_IMMUTABLE, __IFLA_MAX }; @@ -1438,6 +1439,7 @@ enum { IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, IFLA_GENEVE_INNER_PROTO_INHERIT, + IFLA_GENEVE_PORT_RANGE, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) @@ -1450,6 +1452,11 @@ enum ifla_geneve_df { GENEVE_DF_MAX = __GENEVE_DF_END - 1, }; +struct ifla_geneve_port_range { + __be16 low; + __be16 high; +}; + /* Bareudp section */ enum { IFLA_BAREUDP_UNSPEC, @@ -1979,4 +1986,19 @@ enum { #define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1) +/* OVPN section */ + +enum ovpn_mode { + OVPN_MODE_P2P, + OVPN_MODE_MP, +}; + +enum { + IFLA_OVPN_UNSPEC, + IFLA_OVPN_MODE, + __IFLA_OVPN_MAX, +}; + +#define IFLA_OVPN_MAX (__IFLA_OVPN_MAX - 1) + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index 42ec5ddaab8dc..42869770776ec 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -127,6 +127,12 @@ struct xdp_options { */ #define XDP_TXMD_FLAGS_CHECKSUM (1 << 1) +/* Request launch time hardware offload. The device will schedule the packet for + * transmission at a pre-determined time called launch time. The value of + * launch time is communicated via launch_time field of struct xsk_tx_metadata. + */ +#define XDP_TXMD_FLAGS_LAUNCH_TIME (1 << 2) + /* AF_XDP offloads request. 'request' union member is consumed by the driver * when the packet is being transmitted. 'completion' union member is * filled by the driver when the transmit completion arrives. @@ -142,6 +148,10 @@ struct xsk_tx_metadata { __u16 csum_start; /* Offset from csum_start where checksum should be stored. */ __u16 csum_offset; + + /* XDP_TXMD_FLAGS_LAUNCH_TIME */ + /* Launch time in nanosecond against the PTP HW Clock */ + __u64 launch_time; } request; struct { diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index e11c826385277..050fa8eb2e8f8 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -380,7 +380,7 @@ enum io_uring_op { * result will be the number of buffers send, with * the starting buffer ID in cqe->flags as per * usual for provided buffer usage. The buffers - * will be contigious from the starting buffer ID. + * will be contiguous from the starting buffer ID. */ #define IORING_RECVSEND_POLL_FIRST (1U << 0) #define IORING_RECV_MULTISHOT (1U << 1) diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index 33745642f7875..e1d2c27533b49 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -268,7 +268,9 @@ struct landlock_net_port_attr { * ~~~~~~~~~~~~~~~~ * * These flags enable to restrict a sandboxed process to a set of network - * actions. This is supported since the Landlock ABI version 4. + * actions. + * + * This is supported since Landlock ABI version 4. * * The following access rights apply to TCP port numbers: * @@ -291,11 +293,13 @@ struct landlock_net_port_attr { * Setting a flag for a ruleset will isolate the Landlock domain to forbid * connections to resources outside the domain. * + * This is supported since Landlock ABI version 6. + * * Scopes: * * - %LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET: Restrict a sandboxed process from * connecting to an abstract UNIX socket created by a process outside the - * related Landlock domain (e.g. a parent domain or a non-sandboxed process). + * related Landlock domain (e.g., a parent domain or a non-sandboxed process). * - %LANDLOCK_SCOPE_SIGNAL: Restrict a sandboxed process from sending a signal * to another process outside the domain. */ diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 55b0ab51096c1..383213de612a8 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -44,8 +44,9 @@ enum { SOF_TIMESTAMPING_BIND_PHC = (1 << 15), SOF_TIMESTAMPING_OPT_ID_TCP = (1 << 16), SOF_TIMESTAMPING_OPT_RX_FILTER = (1 << 17), + SOF_TIMESTAMPING_TX_COMPLETION = (1 << 18), - SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_RX_FILTER, + SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_TX_COMPLETION, SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) | SOF_TIMESTAMPING_LAST }; @@ -58,7 +59,8 @@ enum { #define SOF_TIMESTAMPING_TX_RECORD_MASK (SOF_TIMESTAMPING_TX_HARDWARE | \ SOF_TIMESTAMPING_TX_SOFTWARE | \ SOF_TIMESTAMPING_TX_SCHED | \ - SOF_TIMESTAMPING_TX_ACK) + SOF_TIMESTAMPING_TX_ACK | \ + SOF_TIMESTAMPING_TX_COMPLETION) /** * struct so_timestamping - SO_TIMESTAMPING parameter diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h index e4be227d3ad64..b9b59d60957ff 100644 --- a/include/uapi/linux/netdev.h +++ b/include/uapi/linux/netdev.h @@ -59,10 +59,13 @@ enum netdev_xdp_rx_metadata { * by the driver. * @NETDEV_XSK_FLAGS_TX_CHECKSUM: L3 checksum HW offload is supported by the * driver. + * @NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO: Launch time HW offload is supported + * by the driver. */ enum netdev_xsk_flags { NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, + NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO = 4, }; enum netdev_queue_type { @@ -74,6 +77,12 @@ enum netdev_qstats_scope { NETDEV_QSTATS_SCOPE_QUEUE = 1, }; +enum netdev_napi_threaded { + NETDEV_NAPI_THREADED_DISABLE, + NETDEV_NAPI_THREADED_ENABLE, + NETDEV_NAPI_THREADED_BUSY_POLL_ENABLE, +}; + enum { NETDEV_A_DEV_IFINDEX = 1, NETDEV_A_DEV_PAD, @@ -86,6 +95,11 @@ enum { NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1) }; +enum { + __NETDEV_A_IO_URING_PROVIDER_INFO_MAX, + NETDEV_A_IO_URING_PROVIDER_INFO_MAX = (__NETDEV_A_IO_URING_PROVIDER_INFO_MAX - 1) +}; + enum { NETDEV_A_PAGE_POOL_ID = 1, NETDEV_A_PAGE_POOL_IFINDEX, @@ -94,6 +108,7 @@ enum { NETDEV_A_PAGE_POOL_INFLIGHT_MEM, NETDEV_A_PAGE_POOL_DETACH_TIME, NETDEV_A_PAGE_POOL_DMABUF, + NETDEV_A_PAGE_POOL_IO_URING, __NETDEV_A_PAGE_POOL_MAX, NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1) @@ -125,17 +140,25 @@ enum { NETDEV_A_NAPI_DEFER_HARD_IRQS, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, + NETDEV_A_NAPI_THREADED, __NETDEV_A_NAPI_MAX, NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1) }; +enum { + __NETDEV_A_XSK_INFO_MAX, + NETDEV_A_XSK_INFO_MAX = (__NETDEV_A_XSK_INFO_MAX - 1) +}; + enum { NETDEV_A_QUEUE_ID = 1, NETDEV_A_QUEUE_IFINDEX, NETDEV_A_QUEUE_TYPE, NETDEV_A_QUEUE_NAPI_ID, NETDEV_A_QUEUE_DMABUF, + NETDEV_A_QUEUE_IO_URING, + NETDEV_A_QUEUE_XSK, __NETDEV_A_QUEUE_MAX, NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f6c1b181c886d..ddcc4cda74af5 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -11,7 +11,7 @@ * Copyright 2008 Jouni Malinen * Copyright 2008 Colin McCabe * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -2881,9 +2881,9 @@ enum nl80211_commands { * @NL80211_ATTR_VIF_RADIO_MASK: Bitmask of allowed radios (u32). * A value of 0 means all radios. * - * @NL80211_ATTR_SUPPORTED_SELECTORS: supported selectors, array of - * supported selectors as defined by IEEE 802.11 7.3.2.2 but without the - * length restriction (at most %NL80211_MAX_SUPP_SELECTORS). + * @NL80211_ATTR_SUPPORTED_SELECTORS: supported BSS Membership Selectors, array + * of supported selectors as defined by IEEE Std 802.11-2020 9.4.2.3 but + * without the length restriction (at most %NL80211_MAX_SUPP_SELECTORS). * This can be used to provide a list of selectors that are implemented * by the supplicant. If not given, support for SAE_H2E is assumed. * @@ -2893,6 +2893,12 @@ enum nl80211_commands { * @NL80211_ATTR_EPCS: Flag attribute indicating that EPCS is enabled for a * station interface. * + * @NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS: Extended MLD capabilities and + * operations that userspace implements to use during association/ML + * link reconfig, currently only "BTM MLD Recommendation For Multiple + * APs Support". Drivers may set additional flags that they support + * in the kernel or device. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3448,6 +3454,8 @@ enum nl80211_attrs { NL80211_ATTR_MLO_RECONF_REM_LINKS, NL80211_ATTR_EPCS, + NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -4327,6 +4335,8 @@ enum nl80211_wmm_rule { * otherwise completely disabled. * @NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP: This channel can be used for a * very low power (VLP) AP, despite being NO_IR. + * @NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY: This channel can be active in + * 20 MHz bandwidth, despite being NO_IR. * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -4371,6 +4381,7 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT, NL80211_FREQUENCY_ATTR_CAN_MONITOR, NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP, + NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@ -4582,31 +4593,34 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP not allowed * @NL80211_RRF_ALLOW_6GHZ_VLP_AP: Very low power (VLP) AP can be permitted * despite NO_IR configuration. + * @NL80211_RRF_ALLOW_20MHZ_ACTIVITY: Allow activity in 20 MHz bandwidth, + * despite NO_IR configuration. */ enum nl80211_reg_rule_flags { - NL80211_RRF_NO_OFDM = 1<<0, - NL80211_RRF_NO_CCK = 1<<1, - NL80211_RRF_NO_INDOOR = 1<<2, - NL80211_RRF_NO_OUTDOOR = 1<<3, - NL80211_RRF_DFS = 1<<4, - NL80211_RRF_PTP_ONLY = 1<<5, - NL80211_RRF_PTMP_ONLY = 1<<6, - NL80211_RRF_NO_IR = 1<<7, - __NL80211_RRF_NO_IBSS = 1<<8, - NL80211_RRF_AUTO_BW = 1<<11, - NL80211_RRF_IR_CONCURRENT = 1<<12, - NL80211_RRF_NO_HT40MINUS = 1<<13, - NL80211_RRF_NO_HT40PLUS = 1<<14, - NL80211_RRF_NO_80MHZ = 1<<15, - NL80211_RRF_NO_160MHZ = 1<<16, - NL80211_RRF_NO_HE = 1<<17, - NL80211_RRF_NO_320MHZ = 1<<18, - NL80211_RRF_NO_EHT = 1<<19, - NL80211_RRF_PSD = 1<<20, - NL80211_RRF_DFS_CONCURRENT = 1<<21, - NL80211_RRF_NO_6GHZ_VLP_CLIENT = 1<<22, - NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1<<23, - NL80211_RRF_ALLOW_6GHZ_VLP_AP = 1<<24, + NL80211_RRF_NO_OFDM = 1 << 0, + NL80211_RRF_NO_CCK = 1 << 1, + NL80211_RRF_NO_INDOOR = 1 << 2, + NL80211_RRF_NO_OUTDOOR = 1 << 3, + NL80211_RRF_DFS = 1 << 4, + NL80211_RRF_PTP_ONLY = 1 << 5, + NL80211_RRF_PTMP_ONLY = 1 << 6, + NL80211_RRF_NO_IR = 1 << 7, + __NL80211_RRF_NO_IBSS = 1 << 8, + NL80211_RRF_AUTO_BW = 1 << 11, + NL80211_RRF_IR_CONCURRENT = 1 << 12, + NL80211_RRF_NO_HT40MINUS = 1 << 13, + NL80211_RRF_NO_HT40PLUS = 1 << 14, + NL80211_RRF_NO_80MHZ = 1 << 15, + NL80211_RRF_NO_160MHZ = 1 << 16, + NL80211_RRF_NO_HE = 1 << 17, + NL80211_RRF_NO_320MHZ = 1 << 18, + NL80211_RRF_NO_EHT = 1 << 19, + NL80211_RRF_PSD = 1 << 20, + NL80211_RRF_DFS_CONCURRENT = 1 << 21, + NL80211_RRF_NO_6GHZ_VLP_CLIENT = 1 << 22, + NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1 << 23, + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 1 << 24, + NL80211_RRF_ALLOW_20MHZ_ACTIVITY = 1 << 25, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR @@ -4727,8 +4741,8 @@ enum nl80211_survey_info { * @NL80211_MNTR_FLAG_PLCPFAIL: pass frames with bad PLCP * @NL80211_MNTR_FLAG_CONTROL: pass control frames * @NL80211_MNTR_FLAG_OTHER_BSS: disable BSSID filtering - * @NL80211_MNTR_FLAG_COOK_FRAMES: report frames after processing. - * overrides all other flags. + * @NL80211_MNTR_FLAG_COOK_FRAMES: deprecated + * will unconditionally be refused * @NL80211_MNTR_FLAG_ACTIVE: use the configured MAC address * and ACK incoming unicast packets. * @NL80211_MNTR_FLAG_SKIP_TX: do not pass local tx packets diff --git a/include/uapi/linux/ovpn.h b/include/uapi/linux/ovpn.h new file mode 100644 index 0000000000000..680d1522dc87c --- /dev/null +++ b/include/uapi/linux/ovpn.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/ovpn.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_OVPN_H +#define _UAPI_LINUX_OVPN_H + +#define OVPN_FAMILY_NAME "ovpn" +#define OVPN_FAMILY_VERSION 1 + +#define OVPN_NONCE_TAIL_SIZE 8 + +enum ovpn_cipher_alg { + OVPN_CIPHER_ALG_NONE, + OVPN_CIPHER_ALG_AES_GCM, + OVPN_CIPHER_ALG_CHACHA20_POLY1305, +}; + +enum ovpn_del_peer_reason { + OVPN_DEL_PEER_REASON_TEARDOWN, + OVPN_DEL_PEER_REASON_USERSPACE, + OVPN_DEL_PEER_REASON_EXPIRED, + OVPN_DEL_PEER_REASON_TRANSPORT_ERROR, + OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT, +}; + +enum ovpn_key_slot { + OVPN_KEY_SLOT_PRIMARY, + OVPN_KEY_SLOT_SECONDARY, +}; + +enum { + OVPN_A_PEER_ID = 1, + OVPN_A_PEER_REMOTE_IPV4, + OVPN_A_PEER_REMOTE_IPV6, + OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID, + OVPN_A_PEER_REMOTE_PORT, + OVPN_A_PEER_SOCKET, + OVPN_A_PEER_SOCKET_NETNSID, + OVPN_A_PEER_VPN_IPV4, + OVPN_A_PEER_VPN_IPV6, + OVPN_A_PEER_LOCAL_IPV4, + OVPN_A_PEER_LOCAL_IPV6, + OVPN_A_PEER_LOCAL_PORT, + OVPN_A_PEER_KEEPALIVE_INTERVAL, + OVPN_A_PEER_KEEPALIVE_TIMEOUT, + OVPN_A_PEER_DEL_REASON, + OVPN_A_PEER_VPN_RX_BYTES, + OVPN_A_PEER_VPN_TX_BYTES, + OVPN_A_PEER_VPN_RX_PACKETS, + OVPN_A_PEER_VPN_TX_PACKETS, + OVPN_A_PEER_LINK_RX_BYTES, + OVPN_A_PEER_LINK_TX_BYTES, + OVPN_A_PEER_LINK_RX_PACKETS, + OVPN_A_PEER_LINK_TX_PACKETS, + + __OVPN_A_PEER_MAX, + OVPN_A_PEER_MAX = (__OVPN_A_PEER_MAX - 1) +}; + +enum { + OVPN_A_KEYCONF_PEER_ID = 1, + OVPN_A_KEYCONF_SLOT, + OVPN_A_KEYCONF_KEY_ID, + OVPN_A_KEYCONF_CIPHER_ALG, + OVPN_A_KEYCONF_ENCRYPT_DIR, + OVPN_A_KEYCONF_DECRYPT_DIR, + + __OVPN_A_KEYCONF_MAX, + OVPN_A_KEYCONF_MAX = (__OVPN_A_KEYCONF_MAX - 1) +}; + +enum { + OVPN_A_KEYDIR_CIPHER_KEY = 1, + OVPN_A_KEYDIR_NONCE_TAIL, + + __OVPN_A_KEYDIR_MAX, + OVPN_A_KEYDIR_MAX = (__OVPN_A_KEYDIR_MAX - 1) +}; + +enum { + OVPN_A_IFINDEX = 1, + OVPN_A_PEER, + OVPN_A_KEYCONF, + + __OVPN_A_MAX, + OVPN_A_MAX = (__OVPN_A_MAX - 1) +}; + +enum { + OVPN_CMD_PEER_NEW = 1, + OVPN_CMD_PEER_SET, + OVPN_CMD_PEER_GET, + OVPN_CMD_PEER_DEL, + OVPN_CMD_PEER_DEL_NTF, + OVPN_CMD_KEY_NEW, + OVPN_CMD_KEY_GET, + OVPN_CMD_KEY_SWAP, + OVPN_CMD_KEY_SWAP_NTF, + OVPN_CMD_KEY_DEL, + + __OVPN_CMD_MAX, + OVPN_CMD_MAX = (__OVPN_CMD_MAX - 1) +}; + +#define OVPN_MCGRP_PEERS "peers" + +#endif /* _UAPI_LINUX_OVPN_H */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 66c3903d29cf6..dab9493c791b8 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -307,6 +307,7 @@ enum { #define RTPROT_MROUTED 17 /* Multicast daemon */ #define RTPROT_KEEPALIVED 18 /* Keepalived daemon */ #define RTPROT_BABEL 42 /* Babel daemon */ +#define RTPROT_OVN 84 /* OVN daemon */ #define RTPROT_OPENR 99 /* Open Routing (Open/R) Routes */ #define RTPROT_BGP 186 /* BGP Routes */ #define RTPROT_ISIS 187 /* ISIS Routes */ diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 848c7784e684c..ec47f9b68a1bf 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -23,9 +23,14 @@ enum IPSTATS_MIB_INPKTS, /* InReceives */ IPSTATS_MIB_INOCTETS, /* InOctets */ IPSTATS_MIB_INDELIVERS, /* InDelivers */ - IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */ + IPSTATS_MIB_NOECTPKTS, /* InNoECTPkts */ + IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */ + IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */ + IPSTATS_MIB_CEPKTS, /* InCEPkts */ IPSTATS_MIB_OUTREQUESTS, /* OutRequests */ + IPSTATS_MIB_OUTPKTS, /* OutTransmits */ IPSTATS_MIB_OUTOCTETS, /* OutOctets */ + IPSTATS_MIB_OUTFORWDATAGRAMS, /* OutForwDatagrams */ /* other fields */ IPSTATS_MIB_INHDRERRORS, /* InHdrErrors */ IPSTATS_MIB_INTOOBIGERRORS, /* InTooBigErrors */ @@ -52,12 +57,7 @@ enum IPSTATS_MIB_INBCASTOCTETS, /* InBcastOctets */ IPSTATS_MIB_OUTBCASTOCTETS, /* OutBcastOctets */ IPSTATS_MIB_CSUMERRORS, /* InCsumErrors */ - IPSTATS_MIB_NOECTPKTS, /* InNoECTPkts */ - IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */ - IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */ - IPSTATS_MIB_CEPKTS, /* InCEPkts */ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */ - IPSTATS_MIB_OUTPKTS, /* OutTransmits */ __IPSTATS_MIB_MAX }; @@ -186,6 +186,7 @@ enum LINUX_MIB_TIMEWAITKILLED, /* TimeWaitKilled */ LINUX_MIB_PAWSACTIVEREJECTED, /* PAWSActiveRejected */ LINUX_MIB_PAWSESTABREJECTED, /* PAWSEstabRejected */ + LINUX_MIB_TSECRREJECTED, /* TSEcrRejected */ LINUX_MIB_PAWS_OLD_ACK, /* PAWSOldAck */ LINUX_MIB_DELAYEDACKS, /* DelayedACKs */ LINUX_MIB_DELAYEDACKLOCKED, /* DelayedACKLocked */ diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index dbf896f3146c5..dc8fdc80e16b0 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -28,7 +28,8 @@ struct tcphdr { __be32 seq; __be32 ack_seq; #if defined(__LITTLE_ENDIAN_BITFIELD) - __u16 res1:4, + __u16 ae:1, + res1:3, doff:4, fin:1, syn:1, @@ -40,7 +41,8 @@ struct tcphdr { cwr:1; #elif defined(__BIG_ENDIAN_BITFIELD) __u16 doff:4, - res1:4, + res1:3, + ae:1, cwr:1, ece:1, urg:1, @@ -70,6 +72,7 @@ union tcp_word_hdr { #define tcp_flag_word(tp) (((union tcp_word_hdr *)(tp))->words[3]) enum { + TCP_FLAG_AE = __constant_cpu_to_be32(0x01000000), TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000), TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000), TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000), @@ -78,7 +81,7 @@ enum { TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000), TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000), TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000), - TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000), + TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0E000000), TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000) }; @@ -136,6 +139,9 @@ enum { #define TCP_AO_REPAIR 42 /* Get/Set SNEs and ISNs */ #define TCP_IS_MPTCP 43 /* Is MPTCP being used? */ +#define TCP_RTO_MAX_MS 44 /* max rto time in ms */ +#define TCP_RTO_MIN_US 45 /* min rto time in us */ +#define TCP_DELACK_MAX_US 46 /* max delayed ack time in us */ #define TCP_REPAIR_ON 1 #define TCP_REPAIR_OFF 0 diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h index d85d671deed3c..edca3e430305a 100644 --- a/include/uapi/linux/udp.h +++ b/include/uapi/linux/udp.h @@ -43,5 +43,6 @@ struct udphdr { #define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */ #define UDP_ENCAP_RXRPC 6 #define TCP_ENCAP_ESPINTCP 7 /* Yikes, this is really xfrm encap types. */ +#define UDP_ENCAP_OVPNINUDP 8 /* OpenVPN traffic */ #endif /* _UAPI_LINUX_UDP_H */ diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 91f0f7e214a5a..0522906520465 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -330,6 +330,7 @@ struct usb_device_descriptor { #define USB_CLASS_AUDIO_VIDEO 0x10 #define USB_CLASS_BILLBOARD 0x11 #define USB_CLASS_USB_TYPE_C_BRIDGE 0x12 +#define USB_CLASS_MCTP 0x14 #define USB_CLASS_MISC 0xef #define USB_CLASS_APP_SPEC 0xfe #define USB_SUBCLASS_DFU 0x01 diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index ac9174717ef13..963540deae66a 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -327,6 +327,19 @@ struct virtio_net_rss_config { __u8 hash_key_data[/* hash_key_length */]; }; +struct virtio_net_rss_config_hdr { + __le32 hash_types; + __le16 indirection_table_mask; + __le16 unclassified_queue; + __le16 indirection_table[/* 1 + indirection_table_mask */]; +}; + +struct virtio_net_rss_config_trailer { + __le16 max_tx_vq; + __u8 hash_key_length; + __u8 hash_key_data[/* hash_key_length */]; +}; + #define VIRTIO_NET_CTRL_MQ_RSS_CONFIG 1 /* diff --git a/init/Kconfig b/init/Kconfig index d0d021b3fa3b3..324c2886b2ea3 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1973,7 +1973,7 @@ config RUST depends on !MODVERSIONS || GENDWARFKSYMS depends on !GCC_PLUGIN_RANDSTRUCT depends on !RANDSTRUCT - depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE + depends on !DEBUG_INFO_BTF || (PAHOLE_HAS_LANG_EXCLUDE && !LTO) depends on !CFI_CLANG || HAVE_CFI_ICALL_NORMALIZE_INTEGERS_RUSTC select CFI_ICALL_NORMALIZE_INTEGERS if CFI_CLANG depends on !CALL_PADDING || RUSTC_VERSION >= 108100 diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 5d0928f37471e..91019b4d03088 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -64,7 +64,7 @@ struct io_worker { union { struct rcu_head rcu; - struct work_struct work; + struct delayed_work work; }; }; @@ -770,6 +770,18 @@ static inline bool io_should_retry_thread(struct io_worker *worker, long err) } } +static void queue_create_worker_retry(struct io_worker *worker) +{ + /* + * We only bother retrying because there's a chance that the + * failure to create a worker is due to some temporary condition + * in the forking task (e.g. outstanding signal); give the task + * some time to clear that condition. + */ + schedule_delayed_work(&worker->work, + msecs_to_jiffies(worker->init_retries * 5)); +} + static void create_worker_cont(struct callback_head *cb) { struct io_worker *worker; @@ -809,12 +821,13 @@ static void create_worker_cont(struct callback_head *cb) /* re-create attempts grab a new worker ref, drop the existing one */ io_worker_release(worker); - schedule_work(&worker->work); + queue_create_worker_retry(worker); } static void io_workqueue_create(struct work_struct *work) { - struct io_worker *worker = container_of(work, struct io_worker, work); + struct io_worker *worker = container_of(work, struct io_worker, + work.work); struct io_wq_acct *acct = io_wq_get_acct(worker); if (!io_queue_worker_create(worker, acct, create_worker_cont)) @@ -855,8 +868,8 @@ static bool create_io_worker(struct io_wq *wq, int index) kfree(worker); goto fail; } else { - INIT_WORK(&worker->work, io_workqueue_create); - schedule_work(&worker->work); + INIT_DELAYED_WORK(&worker->work, io_workqueue_create); + queue_create_worker_retry(worker); } return true; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index ceacf6230e342..f7acae5f7e1d0 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2045,6 +2045,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, req->opcode = 0; return io_init_fail_req(req, -EINVAL); } + opcode = array_index_nospec(opcode, IORING_OP_LAST); + def = &io_issue_defs[opcode]; if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) { /* enforce forwards compatibility on users */ diff --git a/io_uring/napi.c b/io_uring/napi.c index b1ade3fda30f3..4a10de03e4269 100644 --- a/io_uring/napi.c +++ b/io_uring/napi.c @@ -44,7 +44,7 @@ int __io_napi_add_id(struct io_ring_ctx *ctx, unsigned int napi_id) struct io_napi_entry *e; /* Non-NAPI IDs can be rejected. */ - if (napi_id < MIN_NAPI_ID) + if (!napi_id_valid(napi_id)) return -EINVAL; hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))]; @@ -87,7 +87,7 @@ static int __io_napi_del_id(struct io_ring_ctx *ctx, unsigned int napi_id) struct io_napi_entry *e; /* Non-NAPI IDs can be rejected. */ - if (napi_id < MIN_NAPI_ID) + if (!napi_id_valid(napi_id)) return -EINVAL; hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))]; diff --git a/io_uring/net.c b/io_uring/net.c index 17852a6616ffe..5d0b56ff50eed 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -322,7 +322,9 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req, if (unlikely(ret)) return ret; - return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL); + ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL); + sr->msg_control = iomsg->msg.msg_control_user; + return ret; } #endif diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index 190f7ee45de93..89ea0135a1a0d 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -4,12 +4,6 @@ #include -#define IO_NODE_ALLOC_CACHE_MAX 32 - -#define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3) -#define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT) -#define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1) - enum { IORING_RSRC_FILE = 0, IORING_RSRC_BUFFER = 1, diff --git a/io_uring/rw.c b/io_uring/rw.c index 7aa1e4c9f64a3..e5528cebcd066 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -23,6 +23,9 @@ #include "poll.h" #include "rw.h" +static void io_complete_rw(struct kiocb *kiocb, long res); +static void io_complete_rw_iopoll(struct kiocb *kiocb, long res); + struct io_rw { /* NOTE: kiocb has the file as the first member, so don't do it here */ struct kiocb kiocb; @@ -289,6 +292,11 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, rw->kiocb.dio_complete = NULL; rw->kiocb.ki_flags = 0; + if (req->ctx->flags & IORING_SETUP_IOPOLL) + rw->kiocb.ki_complete = io_complete_rw_iopoll; + else + rw->kiocb.ki_complete = io_complete_rw; + rw->addr = READ_ONCE(sqe->addr); rw->len = READ_ONCE(sqe->len); rw->flags = READ_ONCE(sqe->rw_flags); @@ -552,19 +560,20 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res) if (kiocb->ki_flags & IOCB_WRITE) io_req_end_write(req); if (unlikely(res != req->cqe.res)) { - if (res == -EAGAIN && io_rw_should_reissue(req)) { + if (res == -EAGAIN && io_rw_should_reissue(req)) req->flags |= REQ_F_REISSUE | REQ_F_BL_NO_RECYCLE; - return; - } - req->cqe.res = res; + else + req->cqe.res = res; } /* order with io_iopoll_complete() checking ->iopoll_completed */ smp_store_release(&req->iopoll_completed, 1); } -static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) +static inline void io_rw_done(struct io_kiocb *req, ssize_t ret) { + struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); + /* IO was queued async, completion will happen later */ if (ret == -EIOCBQUEUED) return; @@ -586,8 +595,10 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) } } - INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll, - io_complete_rw, kiocb, ret); + if (req->ctx->flags & IORING_SETUP_IOPOLL) + io_complete_rw_iopoll(&rw->kiocb, ret); + else + io_complete_rw(&rw->kiocb, ret); } static int kiocb_done(struct io_kiocb *req, ssize_t ret, @@ -598,7 +609,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, if (ret >= 0 && req->flags & REQ_F_CUR_POS) req->file->f_pos = rw->kiocb.ki_pos; - if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { + if (ret >= 0 && !(req->ctx->flags & IORING_SETUP_IOPOLL)) { __io_complete_rw_common(req, ret); /* * Safe to call io_end from here as we're inline @@ -609,7 +620,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, io_req_rw_cleanup(req, issue_flags); return IOU_OK; } else { - io_rw_done(&rw->kiocb, ret); + io_rw_done(req, ret); } return IOU_ISSUE_SKIP_COMPLETE; @@ -813,10 +824,8 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) if (ctx->flags & IORING_SETUP_IOPOLL) { if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll) return -EOPNOTSUPP; - kiocb->private = NULL; kiocb->ki_flags |= IOCB_HIPRI; - kiocb->ki_complete = io_complete_rw_iopoll; req->iopoll_completed = 0; if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) { /* make sure every req only blocks once*/ @@ -826,7 +835,6 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode, int rw_type) } else { if (kiocb->ki_flags & IOCB_HIPRI) return -EINVAL; - kiocb->ki_complete = io_complete_rw; } if (req->flags & REQ_F_HAS_METADATA) { @@ -904,7 +912,8 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) } else if (ret == -EIOCBQUEUED) { return IOU_ISSUE_SKIP_COMPLETE; } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || - (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { + (req->flags & REQ_F_NOWAIT) || !need_complete_io(req) || + (issue_flags & IO_URING_F_MULTISHOT)) { /* read all, failed, already did sync or don't want to retry */ goto done; } @@ -977,6 +986,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) if (!io_file_can_poll(req)) return -EBADFD; + /* make it sync, multishot doesn't support async execution */ + rw->kiocb.ki_complete = NULL; ret = __io_read(req, issue_flags); /* diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 519e3f5e9c10f..91ce49b7ae28c 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -8625,6 +8625,7 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type) case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_CGROUP_SOCKOPT: case BPF_PROG_TYPE_CGROUP_SYSCTL: + case BPF_PROG_TYPE_SOCK_OPS: return BTF_KFUNC_HOOK_CGROUP; case BPF_PROG_TYPE_SCHED_ACT: return BTF_KFUNC_HOOK_SCHED_ACT; diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 774accbd4a223..67e8a2fc1a99d 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -33,8 +33,8 @@ #include #include -#include /* netif_receive_skb_list */ -#include /* eth_type_trans */ +#include +#include /* General idea: XDP packets getting XDP redirected to another CPU, * will maximum be stored/queued for one driver ->poll() call. It is @@ -68,6 +68,7 @@ struct bpf_cpu_map_entry { struct bpf_cpumap_val value; struct bpf_prog *prog; + struct gro_node gro; struct completion kthread_running; struct rcu_work free_work; @@ -133,22 +134,23 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring) } } -static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, - struct list_head *listp, - struct xdp_cpumap_stats *stats) +static u32 cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, + void **skbs, u32 skb_n, + struct xdp_cpumap_stats *stats) { - struct sk_buff *skb, *tmp; struct xdp_buff xdp; - u32 act; + u32 act, pass = 0; int err; - list_for_each_entry_safe(skb, tmp, listp, list) { + for (u32 i = 0; i < skb_n; i++) { + struct sk_buff *skb = skbs[i]; + act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog); switch (act) { case XDP_PASS: + skbs[pass++] = skb; break; case XDP_REDIRECT: - skb_list_del_init(skb); err = xdp_do_generic_redirect(skb->dev, skb, &xdp, rcpu->prog); if (unlikely(err)) { @@ -157,7 +159,7 @@ static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, } else { stats->redirect++; } - return; + break; default: bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act); fallthrough; @@ -165,12 +167,15 @@ static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, trace_xdp_exception(skb->dev, rcpu->prog, act); fallthrough; case XDP_DROP: - skb_list_del_init(skb); - kfree_skb(skb); + napi_consume_skb(skb, true); stats->drop++; - return; + break; } } + + stats->pass += pass; + + return pass; } static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, @@ -204,7 +209,6 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, stats->drop++; } else { frames[nframes++] = xdpf; - stats->pass++; } break; case XDP_REDIRECT: @@ -228,43 +232,65 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, } xdp_clear_return_frame_no_direct(); + stats->pass += nframes; return nframes; } #define CPUMAP_BATCH 8 -static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, - int xdp_n, struct xdp_cpumap_stats *stats, - struct list_head *list) +struct cpu_map_ret { + u32 xdp_n; + u32 skb_n; +}; + +static void cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, + void **skbs, struct cpu_map_ret *ret, + struct xdp_cpumap_stats *stats) { struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; - int nframes; if (!rcpu->prog) - return xdp_n; + goto out; - rcu_read_lock_bh(); + rcu_read_lock(); bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); - nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats); + ret->xdp_n = cpu_map_bpf_prog_run_xdp(rcpu, frames, ret->xdp_n, stats); + if (unlikely(ret->skb_n)) + ret->skb_n = cpu_map_bpf_prog_run_skb(rcpu, skbs, ret->skb_n, + stats); if (stats->redirect) xdp_do_flush(); - if (unlikely(!list_empty(list))) - cpu_map_bpf_prog_run_skb(rcpu, list, stats); - bpf_net_ctx_clear(bpf_net_ctx); - rcu_read_unlock_bh(); /* resched point, may call do_softirq() */ + rcu_read_unlock(); - return nframes; +out: + if (unlikely(ret->skb_n) && ret->xdp_n) + memmove(&skbs[ret->xdp_n], skbs, ret->skb_n * sizeof(*skbs)); +} + +static void cpu_map_gro_flush(struct bpf_cpu_map_entry *rcpu, bool empty) +{ + /* + * If the ring is not empty, there'll be a new iteration soon, and we + * only need to do a full flush if a tick is long (> 1 ms). + * If the ring is empty, to not hold GRO packets in the stack for too + * long, do a full flush. + * This is equivalent to how NAPI decides whether to perform a full + * flush. + */ + gro_flush(&rcpu->gro, !empty && HZ >= 1000); + gro_normal_list(&rcpu->gro); } static int cpu_map_kthread_run(void *data) { struct bpf_cpu_map_entry *rcpu = data; unsigned long last_qs = jiffies; + u32 packets = 0; complete(&rcpu->kthread_running); set_current_state(TASK_INTERRUPTIBLE); @@ -277,11 +303,11 @@ static int cpu_map_kthread_run(void *data) while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { struct xdp_cpumap_stats stats = {}; /* zero stats */ unsigned int kmem_alloc_drops = 0, sched = 0; - gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; - int i, n, m, nframes, xdp_n; + struct cpu_map_ret ret = { }; void *frames[CPUMAP_BATCH]; void *skbs[CPUMAP_BATCH]; - LIST_HEAD(list); + u32 i, n, m; + bool empty; /* Release CPU reschedule checks */ if (__ptr_ring_empty(rcpu->queue)) { @@ -306,7 +332,7 @@ static int cpu_map_kthread_run(void *data) */ n = __ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH); - for (i = 0, xdp_n = 0; i < n; i++) { + for (i = 0; i < n; i++) { void *f = frames[i]; struct page *page; @@ -314,11 +340,11 @@ static int cpu_map_kthread_run(void *data) struct sk_buff *skb = f; __ptr_clear_bit(0, &skb); - list_add_tail(&skb->list, &list); + skbs[ret.skb_n++] = skb; continue; } - frames[xdp_n++] = f; + frames[ret.xdp_n++] = f; page = virt_to_page(f); /* Bring struct page memory area to curr CPU. Read by @@ -328,40 +354,51 @@ static int cpu_map_kthread_run(void *data) prefetchw(page); } + local_bh_disable(); + /* Support running another XDP prog on this CPU */ - nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list); - if (nframes) { - m = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, - gfp, nframes, skbs); - if (unlikely(m == 0)) { - for (i = 0; i < nframes; i++) - skbs[i] = NULL; /* effect: xdp_return_frame */ - kmem_alloc_drops += nframes; - } + cpu_map_bpf_prog_run(rcpu, frames, skbs, &ret, &stats); + if (!ret.xdp_n) + goto stats; + + m = napi_skb_cache_get_bulk(skbs, ret.xdp_n); + if (unlikely(m < ret.xdp_n)) { + for (i = m; i < ret.xdp_n; i++) + xdp_return_frame(frames[i]); + + if (ret.skb_n) + memmove(&skbs[m], &skbs[ret.xdp_n], + ret.skb_n * sizeof(*skbs)); + + kmem_alloc_drops += ret.xdp_n - m; + ret.xdp_n = m; } - local_bh_disable(); - for (i = 0; i < nframes; i++) { + for (i = 0; i < ret.xdp_n; i++) { struct xdp_frame *xdpf = frames[i]; - struct sk_buff *skb = skbs[i]; - skb = __xdp_build_skb_from_frame(xdpf, skb, - xdpf->dev_rx); - if (!skb) { - xdp_return_frame(xdpf); - continue; - } - - list_add_tail(&skb->list, &list); + /* Can fail only when !skb -- already handled above */ + __xdp_build_skb_from_frame(xdpf, skbs[i], xdpf->dev_rx); } +stats: /* Feedback loop via tracepoint. * NB: keep before recv to allow measuring enqueue/dequeue latency. */ trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops, sched, &stats); - netif_receive_skb_list(&list); + for (i = 0; i < ret.xdp_n + ret.skb_n; i++) + gro_receive_skb(&rcpu->gro, skbs[i]); + + /* Flush either every 64 packets or in case of empty ring */ + packets += n; + empty = __ptr_ring_empty(rcpu->queue); + if (packets >= NAPI_POLL_WEIGHT || empty) { + cpu_map_gro_flush(rcpu, empty); + packets = 0; + } + local_bh_enable(); /* resched point, may call do_softirq() */ } __set_current_state(TASK_RUNNING); @@ -430,6 +467,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, rcpu->cpu = cpu; rcpu->map_id = map->id; rcpu->value.qsize = value->qsize; + gro_init(&rcpu->gro); if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd)) goto free_ptr_ring; @@ -458,6 +496,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, if (rcpu->prog) bpf_prog_put(rcpu->prog); free_ptr_ring: + gro_cleanup(&rcpu->gro); ptr_ring_cleanup(rcpu->queue, NULL); free_queue: kfree(rcpu->queue); @@ -487,6 +526,7 @@ static void __cpu_map_entry_free(struct work_struct *work) if (rcpu->prog) bpf_prog_put(rcpu->prog); + gro_cleanup(&rcpu->gro); /* The queue should be empty at this point */ __cpu_map_ring_cleanup(rcpu->queue); ptr_ring_cleanup(rcpu->queue, NULL); diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 1a4fec330eaac..42ae8d595c2c2 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -25,6 +25,7 @@ #include #include #include +#include #include /* Protects offdevs, members of bpf_offload_netdev and offload members @@ -528,13 +529,14 @@ struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) return ERR_PTR(-ENOMEM); bpf_map_init_from_attr(&offmap->map, attr); - rtnl_lock(); - down_write(&bpf_devs_lock); offmap->netdev = __dev_get_by_index(net, attr->map_ifindex); err = bpf_dev_offload_check(offmap->netdev); if (err) - goto err_unlock; + goto err_unlock_rtnl; + + netdev_lock_ops(offmap->netdev); + down_write(&bpf_devs_lock); ondev = bpf_offload_find_netdev(offmap->netdev); if (!ondev) { @@ -548,12 +550,15 @@ struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) list_add_tail(&offmap->offloads, &ondev->maps); up_write(&bpf_devs_lock); + netdev_unlock_ops(offmap->netdev); rtnl_unlock(); return &offmap->map; err_unlock: up_write(&bpf_devs_lock); + netdev_unlock_ops(offmap->netdev); +err_unlock_rtnl: rtnl_unlock(); bpf_map_area_free(offmap); return ERR_PTR(err); diff --git a/kernel/cgroup/dmem.c b/kernel/cgroup/dmem.c index fbe34299673d3..10b63433f0573 100644 --- a/kernel/cgroup/dmem.c +++ b/kernel/cgroup/dmem.c @@ -220,60 +220,32 @@ dmem_cgroup_calculate_protection(struct dmem_cgroup_pool_state *limit_pool, struct dmem_cgroup_pool_state *test_pool) { struct page_counter *climit; - struct cgroup_subsys_state *css, *next_css; + struct cgroup_subsys_state *css; struct dmemcg_state *dmemcg_iter; - struct dmem_cgroup_pool_state *pool, *parent_pool; - bool found_descendant; + struct dmem_cgroup_pool_state *pool, *found_pool; climit = &limit_pool->cnt; rcu_read_lock(); - parent_pool = pool = limit_pool; - css = &limit_pool->cs->css; - /* - * This logic is roughly equivalent to css_foreach_descendant_pre, - * except we also track the parent pool to find out which pool we need - * to calculate protection values for. - * - * We can stop the traversal once we find test_pool among the - * descendants since we don't really care about any others. - */ - while (pool != test_pool) { - next_css = css_next_child(NULL, css); - if (next_css) { - parent_pool = pool; - } else { - while (css != &limit_pool->cs->css) { - next_css = css_next_child(css, css->parent); - if (next_css) - break; - css = css->parent; - parent_pool = pool_parent(parent_pool); - } - /* - * We can only hit this when test_pool is not a - * descendant of limit_pool. - */ - if (WARN_ON_ONCE(css == &limit_pool->cs->css)) - break; - } - css = next_css; - - found_descendant = false; + css_for_each_descendant_pre(css, &limit_pool->cs->css) { dmemcg_iter = container_of(css, struct dmemcg_state, css); + found_pool = NULL; list_for_each_entry_rcu(pool, &dmemcg_iter->pools, css_node) { - if (pool_parent(pool) == parent_pool) { - found_descendant = true; + if (pool->region == limit_pool->region) { + found_pool = pool; break; } } - if (!found_descendant) + if (!found_pool) continue; page_counter_calculate_protection( - climit, &pool->cnt, true); + climit, &found_pool->cnt, true); + + if (found_pool == test_pool) + break; } rcu_read_unlock(); } diff --git a/kernel/events/core.c b/kernel/events/core.c index bcb09e011e9e1..823aa08249161 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4950,7 +4950,7 @@ static struct perf_event_pmu_context * find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx, struct perf_event *event) { - struct perf_event_pmu_context *new = NULL, *epc; + struct perf_event_pmu_context *new = NULL, *pos = NULL, *epc; void *task_ctx_data = NULL; if (!ctx->task) { @@ -5007,12 +5007,19 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx, atomic_inc(&epc->refcount); goto found_epc; } + /* Make sure the pmu_ctx_list is sorted by PMU type: */ + if (!pos && epc->pmu->type > pmu->type) + pos = epc; } epc = new; new = NULL; - list_add(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list); + if (!pos) + list_add_tail(&epc->pmu_ctx_entry, &ctx->pmu_ctx_list); + else + list_add(&epc->pmu_ctx_entry, pos->pmu_ctx_entry.prev); + epc->ctx = ctx; found_epc: @@ -5962,14 +5969,15 @@ static int _perf_event_period(struct perf_event *event, u64 value) if (!value) return -EINVAL; - if (event->attr.freq && value > sysctl_perf_event_sample_rate) - return -EINVAL; - - if (perf_event_check_period(event, value)) - return -EINVAL; - - if (!event->attr.freq && (value & (1ULL << 63))) - return -EINVAL; + if (event->attr.freq) { + if (value > sysctl_perf_event_sample_rate) + return -EINVAL; + } else { + if (perf_event_check_period(event, value)) + return -EINVAL; + if (value & (1ULL << 63)) + return -EINVAL; + } event_function_call(event, __perf_event_period, &value); @@ -8321,7 +8329,8 @@ void perf_event_exec(void) perf_event_enable_on_exec(ctx); perf_event_remove_on_exec(ctx); - perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true); + scoped_guard(rcu) + perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL, true); perf_unpin_context(ctx); put_ctx(ctx); @@ -11821,6 +11830,21 @@ static int pmu_dev_alloc(struct pmu *pmu) static struct lock_class_key cpuctx_mutex; static struct lock_class_key cpuctx_lock; +static bool idr_cmpxchg(struct idr *idr, unsigned long id, void *old, void *new) +{ + void *tmp, *val = idr_find(idr, id); + + if (val != old) + return false; + + tmp = idr_replace(idr, new, id); + if (IS_ERR(tmp)) + return false; + + WARN_ON_ONCE(tmp != val); + return true; +} + int perf_pmu_register(struct pmu *pmu, const char *name, int type) { int cpu, ret, max = PERF_TYPE_MAX; @@ -11847,7 +11871,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) if (type >= 0) max = type; - ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL); + ret = idr_alloc(&pmu_idr, NULL, max, 0, GFP_KERNEL); if (ret < 0) goto free_pdc; @@ -11855,6 +11879,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) type = ret; pmu->type = type; + atomic_set(&pmu->exclusive_cnt, 0); if (pmu_bus_running && !pmu->dev) { ret = pmu_dev_alloc(pmu); @@ -11903,14 +11928,22 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) if (!pmu->event_idx) pmu->event_idx = perf_event_idx_default; + /* + * Now that the PMU is complete, make it visible to perf_try_init_event(). + */ + if (!idr_cmpxchg(&pmu_idr, pmu->type, NULL, pmu)) + goto free_context; list_add_rcu(&pmu->entry, &pmus); - atomic_set(&pmu->exclusive_cnt, 0); + ret = 0; unlock: mutex_unlock(&pmus_lock); return ret; +free_context: + free_percpu(pmu->cpu_pmu_context); + free_dev: if (pmu->dev && pmu->dev != PMU_NULL_DEV) { device_del(pmu->dev); @@ -11930,6 +11963,8 @@ void perf_pmu_unregister(struct pmu *pmu) { mutex_lock(&pmus_lock); list_del_rcu(&pmu->entry); + idr_remove(&pmu_idr, pmu->type); + mutex_unlock(&pmus_lock); /* * We dereference the pmu list under both SRCU and regular RCU, so @@ -11939,7 +11974,6 @@ void perf_pmu_unregister(struct pmu *pmu) synchronize_rcu(); free_percpu(pmu->pmu_disable_count); - idr_remove(&pmu_idr, pmu->type); if (pmu_bus_running && pmu->dev && pmu->dev != PMU_NULL_DEV) { if (pmu->nr_addr_filters) device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); @@ -11947,7 +11981,6 @@ void perf_pmu_unregister(struct pmu *pmu) put_device(pmu->dev); } free_pmu_context(pmu); - mutex_unlock(&pmus_lock); } EXPORT_SYMBOL_GPL(perf_pmu_unregister); diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 2ca797cbe465f..b4ca8898fe178 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -417,7 +417,7 @@ static void update_ref_ctr_warn(struct uprobe *uprobe, struct mm_struct *mm, short d) { pr_warn("ref_ctr %s failed for inode: 0x%lx offset: " - "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%pK\n", + "0x%llx ref_ctr_offset: 0x%llx of mm: 0x%p\n", d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, (unsigned long long) uprobe->offset, (unsigned long long) uprobe->ref_ctr_offset, mm); @@ -495,6 +495,11 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, if (ret <= 0) goto put_old; + if (is_zero_page(old_page)) { + ret = -EINVAL; + goto put_old; + } + if (WARN(!is_register && PageCompound(old_page), "uprobe unregister should never work on compound page\n")) { ret = -EINVAL; @@ -762,10 +767,14 @@ static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get) enum hprobe_state hstate; /* - * return_instance's hprobe is protected by RCU. - * Underlying uprobe is itself protected from reuse by SRCU. + * Caller should guarantee that return_instance is not going to be + * freed from under us. This can be achieved either through holding + * rcu_read_lock() or by owning return_instance in the first place. + * + * Underlying uprobe is itself protected from reuse by SRCU, so ensure + * SRCU lock is held properly. */ - lockdep_assert(rcu_read_lock_held() && srcu_read_lock_held(&uretprobes_srcu)); + lockdep_assert(srcu_read_lock_held(&uretprobes_srcu)); hstate = READ_ONCE(hprobe->state); switch (hstate) { diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index c38a2d2d4a7ee..78dd3d8c65544 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -59,8 +59,8 @@ struct rt_mutex_waiter { }; /** - * rt_wake_q_head - Wrapper around regular wake_q_head to support - * "sleeping" spinlocks on RT + * struct rt_wake_q_head - Wrapper around regular wake_q_head to support + * "sleeping" spinlocks on RT * @head: The regular wake_q_head for sleeping lock variants * @rtlock_task: Task pointer for RT lock (spin/rwlock) wakeups */ diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c index 34bfae72f2952..de9117c0e671e 100644 --- a/kernel/locking/semaphore.c +++ b/kernel/locking/semaphore.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -38,7 +39,7 @@ static noinline void __down(struct semaphore *sem); static noinline int __down_interruptible(struct semaphore *sem); static noinline int __down_killable(struct semaphore *sem); static noinline int __down_timeout(struct semaphore *sem, long timeout); -static noinline void __up(struct semaphore *sem); +static noinline void __up(struct semaphore *sem, struct wake_q_head *wake_q); /** * down - acquire the semaphore @@ -183,13 +184,16 @@ EXPORT_SYMBOL(down_timeout); void __sched up(struct semaphore *sem) { unsigned long flags; + DEFINE_WAKE_Q(wake_q); raw_spin_lock_irqsave(&sem->lock, flags); if (likely(list_empty(&sem->wait_list))) sem->count++; else - __up(sem); + __up(sem, &wake_q); raw_spin_unlock_irqrestore(&sem->lock, flags); + if (!wake_q_empty(&wake_q)) + wake_up_q(&wake_q); } EXPORT_SYMBOL(up); @@ -269,11 +273,12 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long timeout) return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout); } -static noinline void __sched __up(struct semaphore *sem) +static noinline void __sched __up(struct semaphore *sem, + struct wake_q_head *wake_q) { struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, struct semaphore_waiter, list); list_del(&waiter->list); waiter->up = true; - wake_up_process(waiter->task); + wake_q_add(wake_q, waiter->task); } diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 8f6cfec87555a..7098ed44e717d 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -107,7 +107,7 @@ static struct pid_namespace *create_pid_namespace(struct user_namespace *user_ns goto out_free_idr; ns->ns.ops = &pidns_operations; - ns->pid_max = parent_pid_ns->pid_max; + ns->pid_max = PID_MAX_LIMIT; err = register_pidns_sysctls(ns); if (err) goto out_free_inum; diff --git a/kernel/rseq.c b/kernel/rseq.c index 442aba29bc4cf..2cb16091ec0ae 100644 --- a/kernel/rseq.c +++ b/kernel/rseq.c @@ -507,9 +507,6 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, return -EINVAL; if (!access_ok(rseq, rseq_len)) return -EFAULT; - current->rseq = rseq; - current->rseq_len = rseq_len; - current->rseq_sig = sig; #ifdef CONFIG_DEBUG_RSEQ /* * Initialize the in-kernel rseq fields copy for validation of @@ -521,6 +518,14 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, get_user(rseq_kernel_fields(current)->mm_cid, &rseq->mm_cid)) return -EFAULT; #endif + /* + * Activate the registration by setting the rseq area address, length + * and signature in the task struct. + */ + current->rseq = rseq; + current->rseq_len = rseq_len; + current->rseq_sig = sig; + /* * If rseq was previously inactive, and has just been * registered, ensure the cpu_id_start and cpu_id fields diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9aecd914ac691..67189907214d3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7285,7 +7285,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) int __sched __cond_resched(void) { - if (should_resched(0)) { + if (should_resched(0) && !irqs_disabled()) { preempt_schedule_common(); return 1; } diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 5d9143dd08791..6dab4854c6c08 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -9,8 +9,6 @@ #ifdef CONFIG_IRQ_TIME_ACCOUNTING -DEFINE_STATIC_KEY_FALSE(sched_clock_irqtime); - /* * There are no locks covering percpu hardirq/softirq time. * They are only modified in vtime_account, on corresponding CPU @@ -24,14 +22,16 @@ DEFINE_STATIC_KEY_FALSE(sched_clock_irqtime); */ DEFINE_PER_CPU(struct irqtime, cpu_irqtime); +int sched_clock_irqtime; + void enable_sched_clock_irqtime(void) { - static_branch_enable(&sched_clock_irqtime); + sched_clock_irqtime = 1; } void disable_sched_clock_irqtime(void) { - static_branch_disable(&sched_clock_irqtime); + sched_clock_irqtime = 0; } static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 38e4537790af7..ff4df16b5186d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -3189,7 +3189,7 @@ int sched_dl_global_validate(void) * value smaller than the currently allocated bandwidth in * any of the root_domains. */ - for_each_possible_cpu(cpu) { + for_each_online_cpu(cpu) { rcu_read_lock_sched(); if (dl_bw_visited(cpu, gen)) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 5a81d9a1e31f2..7b9dfee858e79 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -3117,7 +3117,6 @@ static struct task_struct *pick_task_scx(struct rq *rq) { struct task_struct *prev = rq->curr; struct task_struct *p; - bool prev_on_scx = prev->sched_class == &ext_sched_class; bool keep_prev = rq->scx.flags & SCX_RQ_BAL_KEEP; bool kick_idle = false; @@ -3137,14 +3136,18 @@ static struct task_struct *pick_task_scx(struct rq *rq) * if pick_task_scx() is called without preceding balance_scx(). */ if (unlikely(rq->scx.flags & SCX_RQ_BAL_PENDING)) { - if (prev_on_scx) { + if (prev->scx.flags & SCX_TASK_QUEUED) { keep_prev = true; } else { keep_prev = false; kick_idle = true; } - } else if (unlikely(keep_prev && !prev_on_scx)) { - /* only allowed during transitions */ + } else if (unlikely(keep_prev && + prev->sched_class != &ext_sched_class)) { + /* + * Can happen while enabling as SCX_RQ_BAL_PENDING assertion is + * conditional on scx_enabled() and may have been skipped. + */ WARN_ON_ONCE(scx_ops_enable_state() == SCX_OPS_ENABLED); keep_prev = false; } @@ -6419,6 +6422,9 @@ static bool check_builtin_idle_enabled(void) __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) { + if (!ops_cpu_valid(prev_cpu, NULL)) + goto prev_cpu; + if (!check_builtin_idle_enabled()) goto prev_cpu; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1c0ef435a7aae..c798d27952431 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4045,15 +4045,17 @@ static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) { struct cfs_rq *prev_cfs_rq; struct list_head *prev; + struct rq *rq = rq_of(cfs_rq); if (cfs_rq->on_list) { prev = cfs_rq->leaf_cfs_rq_list.prev; } else { - struct rq *rq = rq_of(cfs_rq); - prev = rq->tmp_alone_branch; } + if (prev == &rq->leaf_cfs_rq_list) + return false; + prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); return (prev_cfs_rq->tg->parent == cfs_rq->tg); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b93c8c3dc05a5..023b844159c94 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -3259,11 +3259,11 @@ struct irqtime { }; DECLARE_PER_CPU(struct irqtime, cpu_irqtime); -DECLARE_STATIC_KEY_FALSE(sched_clock_irqtime); +extern int sched_clock_irqtime; static inline int irqtime_enabled(void) { - return static_branch_likely(&sched_clock_irqtime); + return sched_clock_irqtime; } /* @@ -3698,10 +3698,28 @@ static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm) { struct cpumask *cidmask = mm_cidmask(mm); struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; - int cid = __this_cpu_read(pcpu_cid->recent_cid); + int cid, max_nr_cid, allowed_max_nr_cid; + /* + * After shrinking the number of threads or reducing the number + * of allowed cpus, reduce the value of max_nr_cid so expansion + * of cid allocation will preserve cache locality if the number + * of threads or allowed cpus increase again. + */ + max_nr_cid = atomic_read(&mm->max_nr_cid); + while ((allowed_max_nr_cid = min_t(int, READ_ONCE(mm->nr_cpus_allowed), + atomic_read(&mm->mm_users))), + max_nr_cid > allowed_max_nr_cid) { + /* atomic_try_cmpxchg loads previous mm->max_nr_cid into max_nr_cid. */ + if (atomic_try_cmpxchg(&mm->max_nr_cid, &max_nr_cid, allowed_max_nr_cid)) { + max_nr_cid = allowed_max_nr_cid; + break; + } + } /* Try to re-use recent cid. This improves cache locality. */ - if (!mm_cid_is_unset(cid) && !cpumask_test_and_set_cpu(cid, cidmask)) + cid = __this_cpu_read(pcpu_cid->recent_cid); + if (!mm_cid_is_unset(cid) && cid < max_nr_cid && + !cpumask_test_and_set_cpu(cid, cidmask)) return cid; /* * Expand cid allocation if the maximum number of concurrency @@ -3709,8 +3727,9 @@ static inline int __mm_cid_try_get(struct task_struct *t, struct mm_struct *mm) * and number of threads. Expanding cid allocation as much as * possible improves cache locality. */ - cid = atomic_read(&mm->max_nr_cid); + cid = max_nr_cid; while (cid < READ_ONCE(mm->nr_cpus_allowed) && cid < atomic_read(&mm->mm_users)) { + /* atomic_try_cmpxchg loads previous mm->max_nr_cid into cid. */ if (!atomic_try_cmpxchg(&mm->max_nr_cid, &cid, cid + 1)) continue; if (!cpumask_test_and_set_cpu(cid, cidmask)) diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c index 1af0bb2cc45c0..fe963384d5c2a 100644 --- a/kernel/time/posix-clock.c +++ b/kernel/time/posix-clock.c @@ -129,6 +129,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp) goto out; } pccontext->clk = clk; + pccontext->fp = fp; if (clk->ops.open) { err = clk->ops.open(pccontext, fp->f_mode); if (err) { @@ -251,7 +252,7 @@ static int pc_clock_adjtime(clockid_t id, struct __kernel_timex *tx) if (err) return err; - if ((cd.fp->f_mode & FMODE_WRITE) == 0) { + if (tx->modes && (cd.fp->f_mode & FMODE_WRITE) == 0) { err = -EACCES; goto out; } diff --git a/kernel/trace/fprobe.c b/kernel/trace/fprobe.c index 2560b312ad576..33082c4e8154e 100644 --- a/kernel/trace/fprobe.c +++ b/kernel/trace/fprobe.c @@ -403,13 +403,12 @@ static void fprobe_graph_remove_ips(unsigned long *addrs, int num) lockdep_assert_held(&fprobe_mutex); fprobe_graph_active--; - if (!fprobe_graph_active) { - /* Q: should we unregister it ? */ + /* Q: should we unregister it ? */ + if (!fprobe_graph_active) unregister_ftrace_graph(&fprobe_graph_ops); - return; - } - ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 1, 0); + if (num) + ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 1, 0); } static int symbols_cmp(const void *a, const void *b) @@ -679,8 +678,7 @@ int unregister_fprobe(struct fprobe *fp) } del_fprobe_hash(fp); - if (count) - fprobe_graph_remove_ips(addrs, count); + fprobe_graph_remove_ips(addrs, count); kfree_rcu(hlist_array, rcu); fp->hlist_array = NULL; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 728ecda6e8d4d..fc88e0688daf0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -540,6 +540,7 @@ static int function_stat_show(struct seq_file *m, void *v) static struct trace_seq s; unsigned long long avg; unsigned long long stddev; + unsigned long long stddev_denom; #endif guard(mutex)(&ftrace_profile_lock); @@ -559,23 +560,19 @@ static int function_stat_show(struct seq_file *m, void *v) #ifdef CONFIG_FUNCTION_GRAPH_TRACER seq_puts(m, " "); - /* Sample standard deviation (s^2) */ - if (rec->counter <= 1) - stddev = 0; - else { - /* - * Apply Welford's method: - * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) - */ + /* + * Variance formula: + * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) + * Maybe Welford's method is better here? + * Divide only by 1000 for ns^2 -> us^2 conversion. + * trace_print_graph_duration will divide by 1000 again. + */ + stddev = 0; + stddev_denom = rec->counter * (rec->counter - 1) * 1000; + if (stddev_denom) { stddev = rec->counter * rec->time_squared - rec->time * rec->time; - - /* - * Divide only 1000 for ns^2 -> us^2 conversion. - * trace_print_graph_duration will divide 1000 again. - */ - stddev = div64_ul(stddev, - rec->counter * (rec->counter - 1) * 1000); + stddev = div64_ul(stddev, stddev_denom); } trace_seq_init(&s); @@ -3220,15 +3217,22 @@ static struct ftrace_hash *copy_hash(struct ftrace_hash *src) * The filter_hash updates uses just the append_hash() function * and the notrace_hash does not. */ -static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash) +static int append_hash(struct ftrace_hash **hash, struct ftrace_hash *new_hash, + int size_bits) { struct ftrace_func_entry *entry; int size; int i; - /* An empty hash does everything */ - if (ftrace_hash_empty(*hash)) - return 0; + if (*hash) { + /* An empty hash does everything */ + if (ftrace_hash_empty(*hash)) + return 0; + } else { + *hash = alloc_ftrace_hash(size_bits); + if (!*hash) + return -ENOMEM; + } /* If new_hash has everything make hash have everything */ if (ftrace_hash_empty(new_hash)) { @@ -3292,16 +3296,18 @@ static int intersect_hash(struct ftrace_hash **hash, struct ftrace_hash *new_has /* Return a new hash that has a union of all @ops->filter_hash entries */ static struct ftrace_hash *append_hashes(struct ftrace_ops *ops) { - struct ftrace_hash *new_hash; + struct ftrace_hash *new_hash = NULL; struct ftrace_ops *subops; + int size_bits; int ret; - new_hash = alloc_ftrace_hash(ops->func_hash->filter_hash->size_bits); - if (!new_hash) - return NULL; + if (ops->func_hash->filter_hash) + size_bits = ops->func_hash->filter_hash->size_bits; + else + size_bits = FTRACE_HASH_DEFAULT_BITS; list_for_each_entry(subops, &ops->subop_list, list) { - ret = append_hash(&new_hash, subops->func_hash->filter_hash); + ret = append_hash(&new_hash, subops->func_hash->filter_hash, size_bits); if (ret < 0) { free_ftrace_hash(new_hash); return NULL; @@ -3310,7 +3316,8 @@ static struct ftrace_hash *append_hashes(struct ftrace_ops *ops) if (ftrace_hash_empty(new_hash)) break; } - return new_hash; + /* Can't return NULL as that means this failed */ + return new_hash ? : EMPTY_HASH; } /* Make @ops trace evenything except what all its subops do not trace */ @@ -3505,7 +3512,8 @@ int ftrace_startup_subops(struct ftrace_ops *ops, struct ftrace_ops *subops, int filter_hash = alloc_and_copy_ftrace_hash(size_bits, ops->func_hash->filter_hash); if (!filter_hash) return -ENOMEM; - ret = append_hash(&filter_hash, subops->func_hash->filter_hash); + ret = append_hash(&filter_hash, subops->func_hash->filter_hash, + size_bits); if (ret < 0) { free_ftrace_hash(filter_hash); return ret; @@ -5707,6 +5715,9 @@ __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) return -ENOENT; free_hash_entry(hash, entry); return 0; + } else if (__ftrace_lookup_ip(hash, ip) != NULL) { + /* Already exists */ + return 0; } entry = add_hash_entry(hash, ip); diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 4cb275316e51e..513de9ceb80ef 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1591,6 +1591,13 @@ s_next(struct seq_file *m, void *v, loff_t *pos) return iter; #endif + /* + * The iter is allocated in s_start() and passed via the 'v' + * parameter. To stop the iterator, NULL must be returned. But + * the return value is what the 'v' parameter in s_stop() receives + * and frees. Free iter here as it will no longer be used. + */ + kfree(iter); return NULL; } @@ -1667,9 +1674,9 @@ static int s_show(struct seq_file *m, void *v) } #endif -static void s_stop(struct seq_file *m, void *p) +static void s_stop(struct seq_file *m, void *v) { - kfree(p); + kfree(v); t_stop(m, NULL); } diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 261163b00137a..53dc6719181e5 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -5689,12 +5689,16 @@ static int event_hist_open(struct inode *inode, struct file *file) guard(mutex)(&event_mutex); event_file = event_file_data(file); - if (!event_file) - return -ENODEV; + if (!event_file) { + ret = -ENODEV; + goto err; + } hist_file = kzalloc(sizeof(*hist_file), GFP_KERNEL); - if (!hist_file) - return -ENOMEM; + if (!hist_file) { + ret = -ENOMEM; + goto err; + } hist_file->file = file; hist_file->last_act = get_hist_hit_count(event_file); @@ -5702,9 +5706,14 @@ static int event_hist_open(struct inode *inode, struct file *file) /* Clear private_data to avoid warning in single_open() */ file->private_data = NULL; ret = single_open(file, hist_show, hist_file); - if (ret) + if (ret) { kfree(hist_file); + goto err; + } + return 0; +err: + tracing_release_file_tr(inode, file); return ret; } @@ -5979,7 +5988,10 @@ static int event_hist_debug_open(struct inode *inode, struct file *file) /* Clear private_data to avoid warning in single_open() */ file->private_data = NULL; - return single_open(file, hist_debug_show, file); + ret = single_open(file, hist_debug_show, file); + if (ret) + tracing_release_file_tr(inode, file); + return ret; } const struct file_operations event_hist_debug_fops = { @@ -6724,27 +6736,27 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops, if (existing_hist_update_only(glob, trigger_data, file)) goto out_free; - ret = event_trigger_register(cmd_ops, file, glob, trigger_data); - if (ret < 0) - goto out_free; + if (!get_named_trigger_data(trigger_data)) { - if (get_named_trigger_data(trigger_data)) - goto enable; + ret = create_actions(hist_data); + if (ret) + goto out_free; - ret = create_actions(hist_data); - if (ret) - goto out_unreg; + if (has_hist_vars(hist_data) || hist_data->n_var_refs) { + ret = save_hist_vars(hist_data); + if (ret) + goto out_free; + } - if (has_hist_vars(hist_data) || hist_data->n_var_refs) { - ret = save_hist_vars(hist_data); + ret = tracing_map_init(hist_data->map); if (ret) - goto out_unreg; + goto out_free; } - ret = tracing_map_init(hist_data->map); - if (ret) - goto out_unreg; -enable: + ret = event_trigger_register(cmd_ops, file, glob, trigger_data); + if (ret < 0) + goto out_free; + ret = hist_trigger_enable(trigger_data, file); if (ret) goto out_unreg; diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c index b8f3c4ba309b6..985ff98272da8 100644 --- a/kernel/trace/trace_fprobe.c +++ b/kernel/trace/trace_fprobe.c @@ -920,13 +920,8 @@ static void __find_tracepoint_module_cb(struct tracepoint *tp, struct module *mo if (!data->tpoint && !strcmp(data->tp_name, tp->name)) { data->tpoint = tp; - if (!data->mod) { + if (!data->mod) data->mod = mod; - if (!try_module_get(data->mod)) { - data->tpoint = NULL; - data->mod = NULL; - } - } } } @@ -938,13 +933,7 @@ static void __find_tracepoint_cb(struct tracepoint *tp, void *priv) data->tpoint = tp; } -/* - * Find a tracepoint from kernel and module. If the tracepoint is in a module, - * this increments the module refcount to prevent unloading until the - * trace_fprobe is registered to the list. After registering the trace_fprobe - * on the trace_fprobe list, the module refcount is decremented because - * tracepoint_probe_module_cb will handle it. - */ +/* Find a tracepoint from kernel and module. */ static struct tracepoint *find_tracepoint(const char *tp_name, struct module **tp_mod) { @@ -973,6 +962,7 @@ static void reenable_trace_fprobe(struct trace_fprobe *tf) } } +/* Find a tracepoint from specified module. */ static struct tracepoint *find_tracepoint_in_module(struct module *mod, const char *tp_name) { @@ -1008,10 +998,13 @@ static int __tracepoint_probe_module_cb(struct notifier_block *self, reenable_trace_fprobe(tf); } } else if (val == MODULE_STATE_GOING && tp_mod->mod == tf->mod) { - tracepoint_probe_unregister(tf->tpoint, + unregister_fprobe(&tf->fp); + if (trace_fprobe_is_tracepoint(tf)) { + tracepoint_probe_unregister(tf->tpoint, tf->tpoint->probestub, NULL); - tf->tpoint = NULL; - tf->mod = NULL; + tf->tpoint = TRACEPOINT_STUB; + tf->mod = NULL; + } } } mutex_unlock(&event_mutex); @@ -1049,6 +1042,19 @@ static int parse_symbol_and_return(int argc, const char *argv[], if (*is_return) return 0; + if (is_tracepoint) { + tmp = *symbol; + while (*tmp && (isalnum(*tmp) || *tmp == '_')) + tmp++; + if (*tmp) { + /* find a wrong character. */ + trace_probe_log_err(tmp - *symbol, BAD_TP_NAME); + kfree(*symbol); + *symbol = NULL; + return -EINVAL; + } + } + /* If there is $retval, this should be a return fprobe. */ for (i = 2; i < argc; i++) { tmp = strstr(argv[i], "$retval"); @@ -1056,6 +1062,8 @@ static int parse_symbol_and_return(int argc, const char *argv[], if (is_tracepoint) { trace_probe_log_set_index(i); trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE); + kfree(*symbol); + *symbol = NULL; return -EINVAL; } *is_return = true; @@ -1161,6 +1169,11 @@ static int trace_fprobe_create_internal(int argc, const char *argv[], if (is_tracepoint) { ctx->flags |= TPARG_FL_TPOINT; tpoint = find_tracepoint(symbol, &tp_mod); + /* lock module until register this tprobe. */ + if (tp_mod && !try_module_get(tp_mod)) { + tpoint = NULL; + tp_mod = NULL; + } if (tpoint) { ctx->funcname = kallsyms_lookup( (unsigned long)tpoint->probestub, @@ -1215,6 +1228,11 @@ static int trace_fprobe_create_internal(int argc, const char *argv[], if (is_return && tf->tp.entry_arg) { tf->fp.entry_handler = trace_fprobe_entry_handler; tf->fp.entry_data_size = traceprobe_get_entry_data_size(&tf->tp); + if (ALIGN(tf->fp.entry_data_size, sizeof(long)) > MAX_FPROBE_DATA_SIZE) { + trace_probe_log_set_index(2); + trace_probe_log_err(0, TOO_MANY_EARGS); + return -E2BIG; + } } ret = traceprobe_set_print_fmt(&tf->tp, diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index d358c9935164d..df56f9b760109 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -216,7 +216,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, parent_ip = function_get_true_parent_ip(parent_ip, fregs); - trace_ctx = tracing_gen_ctx(); + trace_ctx = tracing_gen_ctx_dec(); data = this_cpu_ptr(tr->array_buffer.data); if (!atomic_read(&data->disabled)) @@ -321,7 +321,6 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, struct trace_array *tr = op->private; struct trace_array_cpu *data; unsigned int trace_ctx; - unsigned long flags; int bit; if (unlikely(!tr->function_enabled)) @@ -347,8 +346,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, if (is_repeat_check(tr, last_info, ip, parent_ip)) goto out; - local_save_flags(flags); - trace_ctx = tracing_gen_ctx_flags(flags); + trace_ctx = tracing_gen_ctx_dec(); process_repeats(tr, ip, parent_ip, last_info, trace_ctx); trace_function(tr, ip, parent_ip, trace_ctx); diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 5803e6a415705..96792bc4b0924 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -36,7 +36,6 @@ #define MAX_BTF_ARGS_LEN 128 #define MAX_DENTRY_ARGS_LEN 256 #define MAX_STRING_SIZE PATH_MAX -#define MAX_ARG_BUF_LEN (MAX_TRACE_ARGS * MAX_ARG_NAME_LEN) /* Reserved field names */ #define FIELD_STRING_IP "__probe_ip" @@ -481,6 +480,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, C(NON_UNIQ_SYMBOL, "The symbol is not unique"), \ C(BAD_RETPROBE, "Retprobe address must be an function entry"), \ C(NO_TRACEPOINT, "Tracepoint is not found"), \ + C(BAD_TP_NAME, "Invalid character in tracepoint name"),\ C(BAD_ADDR_SUFFIX, "Invalid probed address suffix"), \ C(NO_GROUP_NAME, "Group name is not specified"), \ C(GROUP_TOO_LONG, "Group name is too long"), \ @@ -544,7 +544,8 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, C(NO_BTF_FIELD, "This field is not found."), \ C(BAD_BTF_TID, "Failed to get BTF type info."),\ C(BAD_TYPE4STR, "This type does not fit for string."),\ - C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"), + C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"),\ + C(TOO_MANY_EARGS, "Too many entry arguments specified"), #undef C #define C(a, b) TP_ERR_##a diff --git a/kernel/vhost_task.c b/kernel/vhost_task.c index 8800f5acc0071..2ef2e1b800916 100644 --- a/kernel/vhost_task.c +++ b/kernel/vhost_task.c @@ -133,7 +133,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), vtsk = kzalloc(sizeof(*vtsk), GFP_KERNEL); if (!vtsk) - return NULL; + return ERR_PTR(-ENOMEM); init_completion(&vtsk->exited); mutex_init(&vtsk->exit_mutex); vtsk->data = arg; @@ -145,7 +145,7 @@ struct vhost_task *vhost_task_create(bool (*fn)(void *), tsk = copy_process(NULL, 0, NUMA_NO_NODE, &args); if (IS_ERR(tsk)) { kfree(vtsk); - return NULL; + return ERR_PTR(PTR_ERR(tsk)); } vtsk->task = tsk; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 97152f2250fe7..bfe030b443e27 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2254,8 +2254,10 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, * queues a new work item to a wq after destroy_workqueue(wq). */ if (unlikely(wq->flags & (__WQ_DESTROYING | __WQ_DRAINING) && - WARN_ON_ONCE(!is_chained_work(wq)))) + WARN_ONCE(!is_chained_work(wq), "workqueue: cannot queue %ps on wq %s\n", + work->func, wq->name))) { return; + } rcu_read_lock(); retry: /* pwq which will be used unless @work is executing elsewhere */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1af972a92d06f..16f2ef9478a68 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2103,7 +2103,7 @@ config FAIL_SKB_REALLOC reallocated, catching possible invalid pointers to the skb. For more information, check - Documentation/dev-tools/fault-injection/fault-injection.rst + Documentation/fault-injection/fault-injection.rst config FAULT_INJECTION_CONFIGFS bool "Configfs interface for fault-injection capabilities" @@ -2481,7 +2481,6 @@ config TEST_IDA config TEST_MISC_MINOR tristate "Basic misc minor Kunit test" if !KUNIT_ALL_TESTS depends on KUNIT - default KUNIT_ALL_TESTS help Kunit test for the misc minor. It tests misc minor functions for dynamic and misc dynamic minor. @@ -2557,15 +2556,6 @@ config TEST_BPF If unsure, say N. -config TEST_BLACKHOLE_DEV - tristate "Test blackhole netdev functionality" - depends on m && NET - help - This builds the "test_blackhole_dev" module that validates the - data path through this blackhole netdev. - - If unsure, say N. - config FIND_BIT_BENCHMARK tristate "Test find_bit functions" help @@ -2888,6 +2878,17 @@ config USERCOPY_KUNIT_TEST on the copy_to/from_user infrastructure, making sure basic user/kernel boundary testing is working. +config BLACKHOLE_DEV_KUNIT_TEST + tristate "Test blackhole netdev functionality" if !KUNIT_ALL_TESTS + depends on NET + depends on KUNIT + default KUNIT_ALL_TESTS + help + This builds the "blackhole_dev_kunit" module that validates the + data path through this blackhole netdev. + + If unsure, say N. + config TEST_UDELAY tristate "udelay test driver" help diff --git a/lib/Makefile b/lib/Makefile index d5cfc7afbbb82..19ff6993c2bc6 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -102,7 +102,6 @@ obj-$(CONFIG_TEST_RUNTIME) += tests/ obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o -obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o obj-$(CONFIG_TEST_MEMINIT) += test_meminit.o obj-$(CONFIG_TEST_LOCKUP) += test_lockup.o obj-$(CONFIG_TEST_HMM) += test_hmm.o @@ -393,6 +392,7 @@ obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o obj-$(CONFIG_CRC_KUNIT_TEST) += crc_kunit.o obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o obj-$(CONFIG_USERCOPY_KUNIT_TEST) += usercopy_kunit.o +obj-$(CONFIG_BLACKHOLE_DEV_KUNIT_TEST) += blackhole_dev_kunit.o obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o diff --git a/lib/test_blackhole_dev.c b/lib/blackhole_dev_kunit.c similarity index 68% rename from lib/test_blackhole_dev.c rename to lib/blackhole_dev_kunit.c index ec290ac2a0d9a..06834ab35f43c 100644 --- a/lib/test_blackhole_dev.c +++ b/lib/blackhole_dev_kunit.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * This module tests the blackhole_dev that is created during the + * This tests the blackhole_dev that is created during the * net subsystem initialization. The test this module performs is * by injecting an skb into the stack with skb->dev as the * blackhole_dev and expects kernel to behave in a sane manner @@ -9,9 +9,8 @@ * Copyright (c) 2018, Mahesh Bandewar */ -#include +#include #include -#include #include #include #include @@ -25,17 +24,15 @@ #define UDP_PORT 1234 -static int __init test_blackholedev_init(void) +static void test_blackholedev(struct kunit *test) { struct ipv6hdr *ip6h; struct sk_buff *skb; struct udphdr *uh; int data_len; - int ret; skb = alloc_skb(SKB_SIZE, GFP_KERNEL); - if (!skb) - return -ENOMEM; + KUNIT_ASSERT_NOT_NULL(test, skb); /* Reserve head-room for the headers */ skb_reserve(skb, HEAD_SIZE); @@ -55,7 +52,7 @@ static int __init test_blackholedev_init(void) ip6h = (struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr)); skb_set_network_header(skb, 0); ip6h->hop_limit = 32; - ip6h->payload_len = data_len + sizeof(struct udphdr); + ip6h->payload_len = htons(data_len + sizeof(struct udphdr)); ip6h->nexthdr = IPPROTO_UDP; ip6h->saddr = in6addr_loopback; ip6h->daddr = in6addr_loopback; @@ -68,32 +65,20 @@ static int __init test_blackholedev_init(void) skb->dev = blackhole_netdev; /* Now attempt to send the packet */ - ret = dev_queue_xmit(skb); - - switch (ret) { - case NET_XMIT_SUCCESS: - pr_warn("dev_queue_xmit() returned NET_XMIT_SUCCESS\n"); - break; - case NET_XMIT_DROP: - pr_warn("dev_queue_xmit() returned NET_XMIT_DROP\n"); - break; - case NET_XMIT_CN: - pr_warn("dev_queue_xmit() returned NET_XMIT_CN\n"); - break; - default: - pr_err("dev_queue_xmit() returned UNKNOWN(%d)\n", ret); - } - - return 0; + KUNIT_EXPECT_EQ(test, dev_queue_xmit(skb), NET_XMIT_SUCCESS); } -static void __exit test_blackholedev_exit(void) -{ - pr_warn("test_blackholedev module terminating.\n"); -} +static struct kunit_case blackholedev_cases[] = { + KUNIT_CASE(test_blackholedev), + {}, +}; + +static struct kunit_suite blackholedev_suite = { + .name = "blackholedev", + .test_cases = blackholedev_cases, +}; -module_init(test_blackholedev_init); -module_exit(test_blackholedev_exit); +kunit_test_suite(blackholedev_suite); MODULE_AUTHOR("Mahesh Bandewar "); MODULE_DESCRIPTION("module test of the blackhole_dev"); diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 4c348670da314..f03d9be3f06ba 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -73,7 +73,7 @@ static void cpu_rmap_release(struct kref *ref) * cpu_rmap_get - internal helper to get new ref on a cpu_rmap * @rmap: reverse-map allocated with alloc_cpu_rmap() */ -static inline void cpu_rmap_get(struct cpu_rmap *rmap) +void cpu_rmap_get(struct cpu_rmap *rmap) { kref_get(&rmap->refcount); } diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index c1b7638a594ac..f97a752e900a0 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -190,7 +190,7 @@ EXPORT_SYMBOL(dql_completed); void dql_reset(struct dql *dql) { /* Reset all dynamic values */ - dql->limit = 0; + dql->limit = dql->min_limit; dql->num_queued = 0; dql->num_completed = 0; dql->last_obj_cnt = 0; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 65f550cb5081b..8c7fdb7d8c8fa 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1190,8 +1190,12 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, if (!n) return -ENOMEM; p = *pages; - for (int k = 0; k < n; k++) - get_page(p[k] = page + k); + for (int k = 0; k < n; k++) { + struct folio *folio = page_folio(page); + p[k] = page + k; + if (!folio_test_slab(folio)) + folio_get(folio); + } maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); i->count -= maxsize; i->iov_offset += maxsize; diff --git a/lib/net_utils.c b/lib/net_utils.c index 42bb0473fb22f..215cda672fee1 100644 --- a/lib/net_utils.c +++ b/lib/net_utils.c @@ -7,11 +7,9 @@ bool mac_pton(const char *s, u8 *mac) { - size_t maxlen = 3 * ETH_ALEN - 1; int i; - /* XX:XX:XX:XX:XX:XX */ - if (strnlen(s, maxlen) < maxlen) + if (strnlen(s, MAC_ADDR_STR_LEN) < MAC_ADDR_STR_LEN) return false; /* Don't dirty result unless string is valid MAC. */ diff --git a/lib/rcuref.c b/lib/rcuref.c index 97f300eca927c..5bd726b71e393 100644 --- a/lib/rcuref.c +++ b/lib/rcuref.c @@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath); /** * rcuref_put_slowpath - Slowpath of __rcuref_put() * @ref: Pointer to the reference count + * @cnt: The resulting value of the fastpath decrement * * Invoked when the reference count is outside of the valid zone. * @@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath); * with a concurrent get()/put() pair. Caller is not allowed to * deconstruct the protected object. */ -bool rcuref_put_slowpath(rcuref_t *ref) +bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt) { - unsigned int cnt = atomic_read(&ref->refcnt); - /* Did this drop the last reference? */ if (likely(cnt == RCUREF_NOREF)) { /* diff --git a/mm/compaction.c b/mm/compaction.c index 12ed8425fa175..a3203d97123ea 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -3181,6 +3181,7 @@ static int kcompactd(void *p) long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); long timeout = default_timeout; + current->flags |= PF_KCOMPACTD; set_freezable(); pgdat->kcompactd_max_order = 0; @@ -3237,6 +3238,8 @@ static int kcompactd(void *p) pgdat->proactive_compact_trigger = false; } + current->flags &= ~PF_KCOMPACTD; + return 0; } diff --git a/mm/damon/core.c b/mm/damon/core.c index c7b9813088626..384935ef4e65e 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -373,6 +373,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern, * or damon_attrs are updated. */ scheme->next_apply_sis = 0; + scheme->walk_completed = false; INIT_LIST_HEAD(&scheme->filters); scheme->stat = (struct damos_stat){}; INIT_LIST_HEAD(&scheme->list); @@ -1429,9 +1430,13 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, { struct damos_filter *filter; + s->core_filters_allowed = false; damos_for_each_filter(filter, s) { - if (damos_filter_match(ctx, t, r, filter)) + if (damos_filter_match(ctx, t, r, filter)) { + if (filter->allow) + s->core_filters_allowed = true; return !filter->allow; + } } return false; } diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 0f9ae14f884dd..c834aa2178352 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -236,6 +236,9 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) { struct damos_filter *filter; + if (scheme->core_filters_allowed) + return false; + damos_for_each_filter(filter, scheme) { if (damos_pa_filter_match(filter, folio)) return !filter->allow; diff --git a/mm/filemap.c b/mm/filemap.c index 804d7365680c1..e9404290f2c63 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include "internal.h" @@ -445,7 +444,7 @@ EXPORT_SYMBOL(filemap_fdatawrite_range); * filemap_fdatawrite_range_kick - start writeback on a range * @mapping: target address_space * @start: index to start writeback on - * @end: last (non-inclusive) index for writeback + * @end: last (inclusive) index for writeback * * This is a non-integrity writeback helper, to start writing back folios * for the indicated range. @@ -1986,8 +1985,19 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, if (err == -EEXIST) goto repeat; - if (err) + if (err) { + /* + * When NOWAIT I/O fails to allocate folios this could + * be due to a nonblocking memory allocation and not + * because the system actually is out of memory. + * Return -EAGAIN so that there caller retries in a + * blocking fashion instead of propagating -ENOMEM + * to the application. + */ + if ((fgp_flags & FGP_NOWAIT) && err == -ENOMEM) + err = -EAGAIN; return ERR_PTR(err); + } /* * filemap_add_folio locks the page, and for mmap * we expect an unlocked page. @@ -2897,8 +2907,7 @@ size_t splice_folio_into_pipe(struct pipe_inode_info *pipe, size = min(size, folio_size(folio) - offset); offset %= PAGE_SIZE; - while (spliced < size && - !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { + while (spliced < size && !pipe_is_full(pipe)) { struct pipe_buffer *buf = pipe_head_buf(pipe); size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced); @@ -2955,7 +2964,7 @@ ssize_t filemap_splice_read(struct file *in, loff_t *ppos, iocb.ki_pos = *ppos; /* Work out how much data we can actually add into the pipe */ - used = pipe_occupancy(pipe->head, pipe->tail); + used = pipe_buf_usage(pipe); npages = max_t(ssize_t, pipe->max_usage - used, 0); len = min_t(size_t, len, npages * PAGE_SIZE); @@ -3015,7 +3024,7 @@ ssize_t filemap_splice_read(struct file *in, loff_t *ppos, total_spliced += n; *ppos += n; in->f_ra.prev_pos = *ppos; - if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) + if (pipe_is_full(pipe)) goto out; } @@ -3199,14 +3208,6 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) unsigned long vm_flags = vmf->vma->vm_flags; unsigned int mmap_miss; - /* - * If we have pre-content watches we need to disable readahead to make - * sure that we don't populate our mapping with 0 filled pages that we - * never emitted an event for. - */ - if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) - return fpin; - #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* Use the readahead code, even if readahead is disabled */ if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) { @@ -3275,10 +3276,6 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf, struct file *fpin = NULL; unsigned int mmap_miss; - /* See comment in do_sync_mmap_readahead. */ - if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) - return fpin; - /* If we don't want any read-ahead, don't bother */ if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) return fpin; @@ -3337,48 +3334,6 @@ static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf) return ret; } -/** - * filemap_fsnotify_fault - maybe emit a pre-content event. - * @vmf: struct vm_fault containing details of the fault. - * - * If we have a pre-content watch on this file we will emit an event for this - * range. If we return anything the fault caller should return immediately, we - * will return VM_FAULT_RETRY if we had to emit an event, which will trigger the - * fault again and then the fault handler will run the second time through. - * - * Return: a bitwise-OR of %VM_FAULT_ codes, 0 if nothing happened. - */ -vm_fault_t filemap_fsnotify_fault(struct vm_fault *vmf) -{ - struct file *fpin = NULL; - int mask = (vmf->flags & FAULT_FLAG_WRITE) ? MAY_WRITE : MAY_ACCESS; - loff_t pos = vmf->pgoff >> PAGE_SHIFT; - size_t count = PAGE_SIZE; - int err; - - /* - * We already did this and now we're retrying with everything locked, - * don't emit the event and continue. - */ - if (vmf->flags & FAULT_FLAG_TRIED) - return 0; - - /* No watches, we're done. */ - if (likely(!FMODE_FSNOTIFY_HSM(vmf->vma->vm_file->f_mode))) - return 0; - - fpin = maybe_unlock_mmap_for_io(vmf, fpin); - if (!fpin) - return VM_FAULT_SIGBUS; - - err = fsnotify_file_area_perm(fpin, mask, &pos, count); - fput(fpin); - if (err) - return VM_FAULT_SIGBUS; - return VM_FAULT_RETRY; -} -EXPORT_SYMBOL_GPL(filemap_fsnotify_fault); - /** * filemap_fault - read in file data for page fault handling * @vmf: struct vm_fault containing details of the fault @@ -3482,37 +3437,6 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) * or because readahead was otherwise unable to retrieve it. */ if (unlikely(!folio_test_uptodate(folio))) { - /* - * If this is a precontent file we have can now emit an event to - * try and populate the folio. - */ - if (!(vmf->flags & FAULT_FLAG_TRIED) && - unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) { - loff_t pos = folio_pos(folio); - size_t count = folio_size(folio); - - /* We're NOWAIT, we have to retry. */ - if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) { - folio_unlock(folio); - goto out_retry; - } - - if (mapping_locked) - filemap_invalidate_unlock_shared(mapping); - mapping_locked = false; - - folio_unlock(folio); - fpin = maybe_unlock_mmap_for_io(vmf, fpin); - if (!fpin) - goto out_retry; - - error = fsnotify_file_area_perm(fpin, MAY_ACCESS, &pos, - count); - if (error) - ret = VM_FAULT_SIGBUS; - goto out_retry; - } - /* * If the invalidate lock is not held, the folio was in cache * and uptodate and now it is not. Strange but possible since we @@ -4170,17 +4094,6 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) bytes = min(chunk - offset, bytes); balance_dirty_pages_ratelimited(mapping); - /* - * Bring in the user page that we will copy from _first_. - * Otherwise there's a nasty deadlock on copying from the - * same page as we're writing to, without it being marked - * up-to-date. - */ - if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { - status = -EFAULT; - break; - } - if (fatal_signal_pending(current)) { status = -EINTR; break; @@ -4198,6 +4111,12 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) if (mapping_writably_mapped(mapping)) flush_dcache_folio(folio); + /* + * Faults here on mmap()s can recurse into arbitrary + * filesystem code. Lots of locks are held that can + * deadlock. Use an atomic copy to avoid deadlocking + * in page fault handling. + */ copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); flush_dcache_folio(folio); @@ -4223,6 +4142,16 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i) bytes = copied; goto retry; } + + /* + * 'folio' is now unlocked and faults on it can be + * handled. Ensure forward progress by trying to + * fault it in now. + */ + if (fault_in_iov_iter_readable(i, bytes) == bytes) { + status = -EFAULT; + break; + } } else { pos += status; written += status; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3d3ebdc002d59..373781b21e5ca 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3304,7 +3304,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, folio_account_cleaned(tail, inode_to_wb(folio->mapping->host)); __filemap_remove_folio(tail, NULL); - folio_put(tail); + folio_put_refs(tail, folio_nr_pages(tail)); } else if (!folio_test_anon(folio)) { __xa_store(&folio->mapping->i_pages, tail->index, tail, 0); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 163190e89ea16..318624c965844 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2135,6 +2135,8 @@ int dissolve_free_hugetlb_folio(struct folio *folio) if (!folio_ref_count(folio)) { struct hstate *h = folio_hstate(folio); + bool adjust_surplus = false; + if (!available_huge_pages(h)) goto out; @@ -2157,7 +2159,9 @@ int dissolve_free_hugetlb_folio(struct folio *folio) goto retry; } - remove_hugetlb_folio(h, folio, false); + if (h->surplus_huge_pages_node[folio_nid(folio)]) + adjust_surplus = true; + remove_hugetlb_folio(h, folio, adjust_surplus); h->max_huge_pages--; spin_unlock_irq(&hugetlb_lock); @@ -2177,7 +2181,7 @@ int dissolve_free_hugetlb_folio(struct folio *folio) rc = hugetlb_vmemmap_restore_folio(h, folio); if (rc) { spin_lock_irq(&hugetlb_lock); - add_hugetlb_folio(h, folio, false); + add_hugetlb_folio(h, folio, adjust_surplus); h->max_huge_pages++; goto out; } @@ -2943,6 +2947,14 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) return ret; } +void wait_for_freed_hugetlb_folios(void) +{ + if (llist_empty(&hpage_freelist)) + return; + + flush_work(&free_hpage_work); +} + typedef enum { /* * For either 0/1: we checked the per-vma resv map, and one resv @@ -5447,7 +5459,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, if (src_ptl != dst_ptl) spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); - pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); + pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz); if (need_clear_uffd_wp && pte_marker_uffd_wp(pte)) huge_pte_clear(mm, new_addr, dst_pte, sz); @@ -5622,7 +5634,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); } - pte = huge_ptep_get_and_clear(mm, address, ptep); + pte = huge_ptep_get_and_clear(mm, address, ptep, sz); tlb_remove_huge_tlb_entry(h, tlb, ptep, address); if (huge_pte_dirty(pte)) set_page_dirty(page); diff --git a/mm/internal.h b/mm/internal.h index 109ef30fee11f..20b3535935a31 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1115,7 +1115,7 @@ static inline int find_next_best_node(int node, nodemask_t *used_node_mask) * mm/memory-failure.c */ #ifdef CONFIG_MEMORY_FAILURE -void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu); +int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill); void shake_folio(struct folio *folio); extern int hwpoison_filter(struct page *p); @@ -1138,8 +1138,9 @@ unsigned long page_mapped_in_vma(const struct page *page, struct vm_area_struct *vma); #else -static inline void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) +static inline int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) { + return -EBUSY; } #endif diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c index 3ea50f09311fd..3df45c25c1f62 100644 --- a/mm/kmsan/hooks.c +++ b/mm/kmsan/hooks.c @@ -357,6 +357,7 @@ void kmsan_handle_dma(struct page *page, size_t offset, size_t size, size -= to_go; } } +EXPORT_SYMBOL_GPL(kmsan_handle_dma); void kmsan_handle_dma_sg(struct scatterlist *sg, int nents, enum dma_data_direction dir) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4de6acb9b8ecb..a037ec92881d5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1921,9 +1921,18 @@ void drain_all_stock(struct mem_cgroup *root_memcg) static int memcg_hotplug_cpu_dead(unsigned int cpu) { struct memcg_stock_pcp *stock; + struct obj_cgroup *old; + unsigned long flags; stock = &per_cpu(memcg_stock, cpu); + + /* drain_obj_stock requires stock_lock */ + local_lock_irqsave(&memcg_stock.stock_lock, flags); + old = drain_obj_stock(stock); + local_unlock_irqrestore(&memcg_stock.stock_lock, flags); + drain_stock(stock); + obj_cgroup_put(old); return 0; } @@ -4993,7 +5002,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); - swap_cgroup_record(folio, entry); + swap_cgroup_record(folio, mem_cgroup_id(swap_memcg), entry); folio_unqueue_deferred_split(folio); folio->memcg_data = 0; @@ -5055,7 +5064,7 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) mem_cgroup_id_get_many(memcg, nr_pages - 1); mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); - swap_cgroup_record(folio, entry); + swap_cgroup_record(folio, mem_cgroup_id(memcg), entry); return 0; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 995a15eb67e2c..327e02fdc029d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1556,11 +1556,35 @@ static int get_hwpoison_page(struct page *p, unsigned long flags) return ret; } -void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) +int unmap_poisoned_folio(struct folio *folio, unsigned long pfn, bool must_kill) { - if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { - struct address_space *mapping; + enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; + struct address_space *mapping; + + if (folio_test_swapcache(folio)) { + pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); + ttu &= ~TTU_HWPOISON; + } + /* + * Propagate the dirty bit from PTEs to struct page first, because we + * need this to decide if we should kill or just drop the page. + * XXX: the dirty test could be racy: set_page_dirty() may not always + * be called inside page lock (it's recommended but not enforced). + */ + mapping = folio_mapping(folio); + if (!must_kill && !folio_test_dirty(folio) && mapping && + mapping_can_writeback(mapping)) { + if (folio_mkclean(folio)) { + folio_set_dirty(folio); + } else { + ttu &= ~TTU_HWPOISON; + pr_info("%#lx: corrupted page was clean: dropped without side effects\n", + pfn); + } + } + + if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) { /* * For hugetlb folios in shared mappings, try_to_unmap * could potentially call huge_pmd_unshare. Because of @@ -1572,7 +1596,7 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) if (!mapping) { pr_info("%#lx: could not lock mapping for mapped hugetlb folio\n", folio_pfn(folio)); - return; + return -EBUSY; } try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); @@ -1580,6 +1604,8 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) } else { try_to_unmap(folio, ttu); } + + return folio_mapped(folio) ? -EBUSY : 0; } /* @@ -1589,8 +1615,6 @@ void unmap_poisoned_folio(struct folio *folio, enum ttu_flags ttu) static bool hwpoison_user_mappings(struct folio *folio, struct page *p, unsigned long pfn, int flags) { - enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; - struct address_space *mapping; LIST_HEAD(tokill); bool unmap_success; int forcekill; @@ -1613,29 +1637,6 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p, if (!folio_mapped(folio)) return true; - if (folio_test_swapcache(folio)) { - pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); - ttu &= ~TTU_HWPOISON; - } - - /* - * Propagate the dirty bit from PTEs to struct page first, because we - * need this to decide if we should kill or just drop the page. - * XXX: the dirty test could be racy: set_page_dirty() may not always - * be called inside page lock (it's recommended but not enforced). - */ - mapping = folio_mapping(folio); - if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping && - mapping_can_writeback(mapping)) { - if (folio_mkclean(folio)) { - folio_set_dirty(folio); - } else { - ttu &= ~TTU_HWPOISON; - pr_info("%#lx: corrupted page was clean: dropped without side effects\n", - pfn); - } - } - /* * First collect all the processes that have the page * mapped in dirty form. This has to be done before try_to_unmap, @@ -1643,9 +1644,7 @@ static bool hwpoison_user_mappings(struct folio *folio, struct page *p, */ collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED); - unmap_poisoned_folio(folio, ttu); - - unmap_success = !folio_mapped(folio); + unmap_success = !unmap_poisoned_folio(folio, pfn, flags & MF_MUST_KILL); if (!unmap_success) pr_err("%#lx: failed to unmap page (folio mapcount=%d)\n", pfn, folio_mapcount(folio)); diff --git a/mm/memory.c b/mm/memory.c index 3b5d0ebd2a02d..3daefae7999bb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -76,7 +76,6 @@ #include #include #include -#include #include @@ -3051,8 +3050,10 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, next = pgd_addr_end(addr, end); if (pgd_none(*pgd) && !create) continue; - if (WARN_ON_ONCE(pgd_leaf(*pgd))) - return -EINVAL; + if (WARN_ON_ONCE(pgd_leaf(*pgd))) { + err = -EINVAL; + break; + } if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { if (!create) continue; @@ -5183,7 +5184,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf) bool is_cow = (vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED); int type, nr_pages; - unsigned long addr = vmf->address; + unsigned long addr; + bool needs_fallback = false; + +fallback: + addr = vmf->address; /* Did we COW the page? */ if (is_cow) @@ -5222,7 +5227,8 @@ vm_fault_t finish_fault(struct vm_fault *vmf) * approach also applies to non-anonymous-shmem faults to avoid * inflating the RSS of the process. */ - if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) { + if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma)) || + unlikely(needs_fallback)) { nr_pages = 1; } else if (nr_pages > 1) { pgoff_t idx = folio_page_idx(folio, page); @@ -5258,9 +5264,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf) ret = VM_FAULT_NOPAGE; goto unlock; } else if (nr_pages > 1 && !pte_range_none(vmf->pte, nr_pages)) { - update_mmu_tlb_range(vma, addr, vmf->pte, nr_pages); - ret = VM_FAULT_NOPAGE; - goto unlock; + needs_fallback = true; + pte_unmap_unlock(vmf->pte, vmf->ptl); + goto fallback; } folio_ref_add(folio, nr_pages - 1); @@ -5743,17 +5749,8 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; - if (vma_is_anonymous(vma)) return do_huge_pmd_anonymous_page(vmf); - /* - * Currently we just emit PAGE_SIZE for our fault events, so don't allow - * a huge fault if we have a pre content watch on this file. This would - * be trivial to support, but there would need to be tests to ensure - * this works properly and those don't exist currently. - */ - if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode))) - return VM_FAULT_FALLBACK; if (vma->vm_ops->huge_fault) return vma->vm_ops->huge_fault(vmf, PMD_ORDER); return VM_FAULT_FALLBACK; @@ -5777,9 +5774,6 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) } if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { - /* See comment in create_huge_pmd. */ - if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode))) - goto split; if (vma->vm_ops->huge_fault) { ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER); if (!(ret & VM_FAULT_FALLBACK)) @@ -5802,9 +5796,6 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf) /* No support for anonymous transparent PUD pages yet */ if (vma_is_anonymous(vma)) return VM_FAULT_FALLBACK; - /* See comment in create_huge_pmd. */ - if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode))) - return VM_FAULT_FALLBACK; if (vma->vm_ops->huge_fault) return vma->vm_ops->huge_fault(vmf, PUD_ORDER); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -5822,9 +5813,6 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) if (vma_is_anonymous(vma)) goto split; if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) { - /* See comment in create_huge_pmd. */ - if (unlikely(FMODE_FSNOTIFY_HSM(vma->vm_file->f_mode))) - goto split; if (vma->vm_ops->huge_fault) { ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER); if (!(ret & VM_FAULT_FALLBACK)) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e3655f07dd6e3..16cf9e17077e3 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1822,26 +1822,24 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (folio_test_large(folio)) pfn = folio_pfn(folio) + folio_nr_pages(folio) - 1; - /* - * HWPoison pages have elevated reference counts so the migration would - * fail on them. It also doesn't make any sense to migrate them in the - * first place. Still try to unmap such a page in case it is still mapped - * (keep the unmap as the catch all safety net). - */ + if (!folio_try_get(folio)) + continue; + + if (unlikely(page_folio(page) != folio)) + goto put_folio; + if (folio_test_hwpoison(folio) || (folio_test_large(folio) && folio_test_has_hwpoisoned(folio))) { if (WARN_ON(folio_test_lru(folio))) folio_isolate_lru(folio); - if (folio_mapped(folio)) - unmap_poisoned_folio(folio, TTU_IGNORE_MLOCK); - continue; - } - - if (!folio_try_get(folio)) - continue; + if (folio_mapped(folio)) { + folio_lock(folio); + unmap_poisoned_folio(folio, pfn, false); + folio_unlock(folio); + } - if (unlikely(page_folio(page) != folio)) goto put_folio; + } if (!isolate_folio_to_list(folio, &source)) { if (__ratelimit(&migrate_rs)) { diff --git a/mm/migrate.c b/mm/migrate.c index fb19a18892c89..97f0edf0c0325 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -518,15 +518,13 @@ static int __folio_migrate_mapping(struct address_space *mapping, if (folio_test_anon(folio) && folio_test_large(folio)) mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); folio_ref_add(newfolio, nr); /* add cache reference */ - if (folio_test_swapbacked(folio)) { + if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); - if (folio_test_swapcache(folio)) { - folio_set_swapcache(newfolio); - newfolio->private = folio_get_private(folio); - } + if (folio_test_swapcache(folio)) { + folio_set_swapcache(newfolio); + newfolio->private = folio_get_private(folio); entries = nr; } else { - VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); entries = 1; } diff --git a/mm/nommu.c b/mm/nommu.c index 0e882a88367a1..529b6c201fea5 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1613,13 +1613,6 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, } EXPORT_SYMBOL(remap_vmalloc_range); -vm_fault_t filemap_fsnotify_fault(struct vm_fault *vmf) -{ - BUG(); - return 0; -} -EXPORT_SYMBOL_GPL(filemap_fsnotify_fault); - vm_fault_t filemap_fault(struct vm_fault *vmf) { BUG(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 579789600a3c7..542d25f77be80 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4243,6 +4243,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, restart: compaction_retries = 0; no_progress_loops = 0; + compact_result = COMPACT_SKIPPED; compact_priority = DEF_COMPACT_PRIORITY; cpuset_mems_cookie = read_mems_allowed_begin(); zonelist_iter_cookie = zonelist_iter_begin(); @@ -5849,11 +5850,10 @@ static void setup_per_zone_lowmem_reserve(void) for (j = i + 1; j < MAX_NR_ZONES; j++) { struct zone *upper_zone = &pgdat->node_zones[j]; - bool empty = !zone_managed_pages(upper_zone); managed_pages += zone_managed_pages(upper_zone); - if (clear || empty) + if (clear) zone->lowmem_reserve[j] = 0; else zone->lowmem_reserve[j] = managed_pages / ratio; @@ -7004,7 +7004,7 @@ static inline bool has_unaccepted_memory(void) static bool cond_accept_memory(struct zone *zone, unsigned int order) { - long to_accept; + long to_accept, wmark; bool ret = false; if (!has_unaccepted_memory()) @@ -7013,8 +7013,18 @@ static bool cond_accept_memory(struct zone *zone, unsigned int order) if (list_empty(&zone->unaccepted_pages)) return false; + wmark = promo_wmark_pages(zone); + + /* + * Watermarks have not been initialized yet. + * + * Accepting one MAX_ORDER page to ensure progress. + */ + if (!wmark) + return try_to_accept_memory_one(zone); + /* How much to accept to get to promo watermark? */ - to_accept = promo_wmark_pages(zone) - + to_accept = wmark - (zone_page_state(zone, NR_FREE_PAGES) - __zone_watermark_unusable_free(zone, order, 0) - zone_page_state(zone, NR_UNACCEPTED)); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index c608e9d728655..a051a29e95ad0 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -607,6 +607,16 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone; int ret; + /* + * Due to the deferred freeing of hugetlb folios, the hugepage folios may + * not immediately release to the buddy system. This can cause PageBuddy() + * to fail in __test_page_isolated_in_pageblock(). To ensure that the + * hugetlb folios are properly released back to the buddy system, we + * invoke the wait_for_freed_hugetlb_folios() function to wait for the + * release to complete. + */ + wait_for_freed_hugetlb_folios(); + /* * Note: pageblock_nr_pages != MAX_PAGE_ORDER. Then, chunks of free * pages are not aligned to pageblock_nr_pages. diff --git a/mm/readahead.c b/mm/readahead.c index 220155a5c9646..6a4e96b69702b 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -128,7 +128,6 @@ #include #include #include -#include #include "internal.h" @@ -558,15 +557,6 @@ void page_cache_sync_ra(struct readahead_control *ractl, unsigned long max_pages, contig_count; pgoff_t prev_index, miss; - /* - * If we have pre-content watches we need to disable readahead to make - * sure that we don't find 0 filled pages in cache that we never emitted - * events for. Filesystems supporting HSM must make sure to not call - * this function with ractl->file unset for files handled by HSM. - */ - if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode))) - return; - /* * Even if readahead is disabled, issue this request as readahead * as we'll need it to satisfy the requested range. The forced @@ -645,10 +635,6 @@ void page_cache_async_ra(struct readahead_control *ractl, if (!ra->ra_pages) return; - /* See the comment in page_cache_sync_ra. */ - if (ractl->file && unlikely(FMODE_FSNOTIFY_HSM(ractl->file->f_mode))) - return; - /* * Same bit is used for PG_readahead and PG_reclaim. */ diff --git a/mm/shmem.c b/mm/shmem.c index 4ea6109a80431..1ede0800e8461 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1548,7 +1548,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) if (WARN_ON_ONCE(!wbc->for_reclaim)) goto redirty; - if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap)) + if ((info->flags & VM_LOCKED) || sbinfo->noswap) goto redirty; if (!total_swap_pages) @@ -2253,7 +2253,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, struct folio *folio = NULL; bool skip_swapcache = false; swp_entry_t swap; - int error, nr_pages; + int error, nr_pages, order, split_order; VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); swap = radix_to_swp_entry(*foliop); @@ -2272,10 +2272,9 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, /* Look it up and read it in.. */ folio = swap_cache_get_folio(swap, NULL, 0); + order = xa_get_order(&mapping->i_pages, index); if (!folio) { - int order = xa_get_order(&mapping->i_pages, index); bool fallback_order0 = false; - int split_order; /* Or update major stats only when swapin succeeds?? */ if (fault_type) { @@ -2339,6 +2338,29 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, error = -ENOMEM; goto failed; } + } else if (order != folio_order(folio)) { + /* + * Swap readahead may swap in order 0 folios into swapcache + * asynchronously, while the shmem mapping can still stores + * large swap entries. In such cases, we should split the + * large swap entry to prevent possible data corruption. + */ + split_order = shmem_split_large_entry(inode, index, swap, gfp); + if (split_order < 0) { + error = split_order; + goto failed; + } + + /* + * If the large swap entry has already been split, it is + * necessary to recalculate the new swap entry based on + * the old order alignment. + */ + if (split_order > 0) { + pgoff_t offset = index - round_down(index, 1 << split_order); + + swap = swp_entry(swp_type(swap), swp_offset(swap) + offset); + } } alloced: @@ -2346,7 +2368,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, folio_lock(folio); if ((!skip_swapcache && !folio_test_swapcache(folio)) || folio->swap.val != swap.val || - !shmem_confirm_swap(mapping, index, swap)) { + !shmem_confirm_swap(mapping, index, swap) || + xa_get_order(&mapping->i_pages, index) != folio_order(folio)) { error = -EEXIST; goto unlock; } @@ -3487,7 +3510,7 @@ static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe, size = min_t(size_t, size, PAGE_SIZE - offset); - if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { + if (!pipe_is_full(pipe)) { struct pipe_buffer *buf = pipe_head_buf(pipe); *buf = (struct pipe_buffer) { @@ -3514,7 +3537,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, int error = 0; /* Work out how much data we can actually add into the pipe */ - used = pipe_occupancy(pipe->head, pipe->tail); + used = pipe_buf_usage(pipe); npages = max_t(ssize_t, pipe->max_usage - used, 0); len = min_t(size_t, len, npages * PAGE_SIZE); @@ -3601,7 +3624,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, total_spliced += n; *ppos += n; in->f_ra.prev_pos = *ppos; - if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) + if (pipe_is_full(pipe)) break; cond_resched(); diff --git a/mm/slab_common.c b/mm/slab_common.c index 4030907b6b7d8..4c9f0a87f733b 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1304,6 +1304,8 @@ module_param(rcu_min_cached_objs, int, 0444); static int rcu_delay_page_cache_fill_msec = 5000; module_param(rcu_delay_page_cache_fill_msec, int, 0444); +static struct workqueue_struct *rcu_reclaim_wq; + /* Maximum number of jiffies to wait before draining a batch. */ #define KFREE_DRAIN_JIFFIES (5 * HZ) #define KFREE_N_BATCHES 2 @@ -1632,10 +1634,10 @@ __schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) if (delayed_work_pending(&krcp->monitor_work)) { delay_left = krcp->monitor_work.timer.expires - jiffies; if (delay < delay_left) - mod_delayed_work(system_unbound_wq, &krcp->monitor_work, delay); + mod_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay); return; } - queue_delayed_work(system_unbound_wq, &krcp->monitor_work, delay); + queue_delayed_work(rcu_reclaim_wq, &krcp->monitor_work, delay); } static void @@ -1733,7 +1735,7 @@ kvfree_rcu_queue_batch(struct kfree_rcu_cpu *krcp) // "free channels", the batch can handle. Break // the loop since it is done with this CPU thus // queuing an RCU work is _always_ success here. - queued = queue_rcu_work(system_unbound_wq, &krwp->rcu_work); + queued = queue_rcu_work(rcu_reclaim_wq, &krwp->rcu_work); WARN_ON_ONCE(!queued); break; } @@ -1883,7 +1885,7 @@ run_page_cache_worker(struct kfree_rcu_cpu *krcp) if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING && !atomic_xchg(&krcp->work_in_progress, 1)) { if (atomic_read(&krcp->backoff_page_cache_fill)) { - queue_delayed_work(system_unbound_wq, + queue_delayed_work(rcu_reclaim_wq, &krcp->page_cache_work, msecs_to_jiffies(rcu_delay_page_cache_fill_msec)); } else { @@ -2120,6 +2122,10 @@ void __init kvfree_rcu_init(void) int i, j; struct shrinker *kfree_rcu_shrinker; + rcu_reclaim_wq = alloc_workqueue("kvfree_rcu_reclaim", + WQ_UNBOUND | WQ_MEM_RECLAIM, 0); + WARN_ON(!rcu_reclaim_wq); + /* Clamp it to [0:100] seconds interval. */ if (rcu_delay_page_cache_fill_msec < 0 || rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) { diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index be39078f255be..1007c30f12e2c 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -58,9 +58,11 @@ static unsigned short __swap_cgroup_id_xchg(struct swap_cgroup *map, * entries must not have been charged * * @folio: the folio that the swap entry belongs to + * @id: mem_cgroup ID to be recorded * @ent: the first swap entry to be recorded */ -void swap_cgroup_record(struct folio *folio, swp_entry_t ent) +void swap_cgroup_record(struct folio *folio, unsigned short id, + swp_entry_t ent) { unsigned int nr_ents = folio_nr_pages(folio); struct swap_cgroup *map; @@ -72,8 +74,7 @@ void swap_cgroup_record(struct folio *folio, swp_entry_t ent) map = swap_cgroup_ctrl[swp_type(ent)].map; do { - old = __swap_cgroup_id_xchg(map, offset, - mem_cgroup_id(folio_memcg(folio))); + old = __swap_cgroup_id_xchg(map, offset, id); VM_BUG_ON(old); } while (++offset != end); } diff --git a/mm/swapfile.c b/mm/swapfile.c index ba19430dd4ead..df7c4e8b089ca 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -653,7 +653,8 @@ static void relocate_cluster(struct swap_info_struct *si, return; if (!ci->count) { - free_cluster(si, ci); + if (ci->flags != CLUSTER_FLAG_FREE) + free_cluster(si, ci); } else if (ci->count != SWAPFILE_CLUSTER) { if (ci->flags != CLUSTER_FLAG_FRAG) move_cluster(si, ci, &si->frag_clusters[ci->order], @@ -858,6 +859,10 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) offset++; } + /* in case no swap cache is reclaimed */ + if (ci->flags == CLUSTER_FLAG_NONE) + relocate_cluster(si, ci); + unlock_cluster(ci); if (to_scan <= 0) break; @@ -2641,7 +2646,6 @@ static void wait_for_allocation(struct swap_info_struct *si) for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) { ci = lock_cluster(si, offset); unlock_cluster(ci); - offset += SWAPFILE_CLUSTER; } } @@ -3542,6 +3546,10 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr) int err, i; si = swp_swap_info(entry); + if (WARN_ON_ONCE(!si)) { + pr_err("%s%08lx\n", Bad_file, entry.val); + return -EINVAL; + } offset = swp_offset(entry); VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); diff --git a/mm/truncate.c b/mm/truncate.c index e2e115adfbc58..76d8fcd89bd00 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -548,8 +548,6 @@ int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); - if (folio_test_dirty(folio)) - return 0; if (folio_mapped(folio)) unmap_mapping_folio(folio); BUG_ON(folio_mapped(folio)); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index af3dfc3633dbe..d06453fa8abae 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -18,6 +18,7 @@ #include #include #include "internal.h" +#include "swap.h" static __always_inline bool validate_dst_vma(struct vm_area_struct *dst_vma, unsigned long dst_end) @@ -1076,16 +1077,14 @@ static int move_present_pte(struct mm_struct *mm, return err; } -static int move_swap_pte(struct mm_struct *mm, +static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma, unsigned long dst_addr, unsigned long src_addr, pte_t *dst_pte, pte_t *src_pte, pte_t orig_dst_pte, pte_t orig_src_pte, pmd_t *dst_pmd, pmd_t dst_pmdval, - spinlock_t *dst_ptl, spinlock_t *src_ptl) + spinlock_t *dst_ptl, spinlock_t *src_ptl, + struct folio *src_folio) { - if (!pte_swp_exclusive(orig_src_pte)) - return -EBUSY; - double_pt_lock(dst_ptl, src_ptl); if (!is_pte_pages_stable(dst_pte, src_pte, orig_dst_pte, orig_src_pte, @@ -1094,6 +1093,16 @@ static int move_swap_pte(struct mm_struct *mm, return -EAGAIN; } + /* + * The src_folio resides in the swapcache, requiring an update to its + * index and mapping to align with the dst_vma, where a swap-in may + * occur and hit the swapcache after moving the PTE. + */ + if (src_folio) { + folio_move_anon_rmap(src_folio, dst_vma); + src_folio->index = linear_page_index(dst_vma, dst_addr); + } + orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte); set_pte_at(mm, dst_addr, dst_pte, orig_src_pte); double_pt_unlock(dst_ptl, src_ptl); @@ -1141,6 +1150,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, __u64 mode) { swp_entry_t entry; + struct swap_info_struct *si = NULL; pte_t orig_src_pte, orig_dst_pte; pte_t src_folio_pte; spinlock_t *src_ptl, *dst_ptl; @@ -1240,6 +1250,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, */ if (!src_folio) { struct folio *folio; + bool locked; /* * Pin the page while holding the lock to be sure the @@ -1259,14 +1270,28 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, goto out; } + locked = folio_trylock(folio); + /* + * We avoid waiting for folio lock with a raised + * refcount for large folios because extra refcounts + * will result in split_folio() failing later and + * retrying. If multiple tasks are trying to move a + * large folio we can end up livelocking. + */ + if (!locked && folio_test_large(folio)) { + spin_unlock(src_ptl); + err = -EAGAIN; + goto out; + } + folio_get(folio); src_folio = folio; src_folio_pte = orig_src_pte; spin_unlock(src_ptl); - if (!folio_trylock(src_folio)) { - pte_unmap(&orig_src_pte); - pte_unmap(&orig_dst_pte); + if (!locked) { + pte_unmap(src_pte); + pte_unmap(dst_pte); src_pte = dst_pte = NULL; /* now we can block and wait */ folio_lock(src_folio); @@ -1282,8 +1307,8 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, /* at this point we have src_folio locked */ if (folio_test_large(src_folio)) { /* split_folio() can block */ - pte_unmap(&orig_src_pte); - pte_unmap(&orig_dst_pte); + pte_unmap(src_pte); + pte_unmap(dst_pte); src_pte = dst_pte = NULL; err = split_folio(src_folio); if (err) @@ -1308,8 +1333,8 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, goto out; } if (!anon_vma_trylock_write(src_anon_vma)) { - pte_unmap(&orig_src_pte); - pte_unmap(&orig_dst_pte); + pte_unmap(src_pte); + pte_unmap(dst_pte); src_pte = dst_pte = NULL; /* now we can block and wait */ anon_vma_lock_write(src_anon_vma); @@ -1322,11 +1347,13 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, orig_dst_pte, orig_src_pte, dst_pmd, dst_pmdval, dst_ptl, src_ptl, src_folio); } else { + struct folio *folio = NULL; + entry = pte_to_swp_entry(orig_src_pte); if (non_swap_entry(entry)) { if (is_migration_entry(entry)) { - pte_unmap(&orig_src_pte); - pte_unmap(&orig_dst_pte); + pte_unmap(src_pte); + pte_unmap(dst_pte); src_pte = dst_pte = NULL; migration_entry_wait(mm, src_pmd, src_addr); err = -EAGAIN; @@ -1335,9 +1362,53 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, goto out; } - err = move_swap_pte(mm, dst_addr, src_addr, dst_pte, src_pte, - orig_dst_pte, orig_src_pte, dst_pmd, - dst_pmdval, dst_ptl, src_ptl); + if (!pte_swp_exclusive(orig_src_pte)) { + err = -EBUSY; + goto out; + } + + si = get_swap_device(entry); + if (unlikely(!si)) { + err = -EAGAIN; + goto out; + } + /* + * Verify the existence of the swapcache. If present, the folio's + * index and mapping must be updated even when the PTE is a swap + * entry. The anon_vma lock is not taken during this process since + * the folio has already been unmapped, and the swap entry is + * exclusive, preventing rmap walks. + * + * For large folios, return -EBUSY immediately, as split_folio() + * also returns -EBUSY when attempting to split unmapped large + * folios in the swapcache. This issue needs to be resolved + * separately to allow proper handling. + */ + if (!src_folio) + folio = filemap_get_folio(swap_address_space(entry), + swap_cache_index(entry)); + if (!IS_ERR_OR_NULL(folio)) { + if (folio_test_large(folio)) { + err = -EBUSY; + folio_put(folio); + goto out; + } + src_folio = folio; + src_folio_pte = orig_src_pte; + if (!folio_trylock(src_folio)) { + pte_unmap(src_pte); + pte_unmap(dst_pte); + src_pte = dst_pte = NULL; + put_swap_device(si); + si = NULL; + /* now we can block and wait */ + folio_lock(src_folio); + goto retry; + } + } + err = move_swap_pte(mm, dst_vma, dst_addr, src_addr, dst_pte, src_pte, + orig_dst_pte, orig_src_pte, dst_pmd, dst_pmdval, + dst_ptl, src_ptl, src_folio); } out: @@ -1354,6 +1425,8 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, if (src_pte) pte_unmap(src_pte); mmu_notifier_invalidate_range_end(&range); + if (si) + put_swap_device(si); return err; } diff --git a/mm/util.c b/mm/util.c index b6b9684a14388..8c965474d329f 100644 --- a/mm/util.c +++ b/mm/util.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -569,6 +570,8 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, LIST_HEAD(uf); ret = security_mmap_file(file, prot, flag); + if (!ret) + ret = fsnotify_mmap_perm(file, prot, pgoff >> PAGE_SHIFT, len); if (!ret) { if (mmap_write_lock_killable(mm)) return -EINTR; diff --git a/mm/vma.c b/mm/vma.c index af1d549b179c9..71ca012c616c9 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -1509,24 +1509,28 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, static struct vm_area_struct *vma_modify(struct vma_merge_struct *vmg) { struct vm_area_struct *vma = vmg->vma; + unsigned long start = vmg->start; + unsigned long end = vmg->end; struct vm_area_struct *merged; /* First, try to merge. */ merged = vma_merge_existing_range(vmg); if (merged) return merged; + if (vmg_nomem(vmg)) + return ERR_PTR(-ENOMEM); /* Split any preceding portion of the VMA. */ - if (vma->vm_start < vmg->start) { - int err = split_vma(vmg->vmi, vma, vmg->start, 1); + if (vma->vm_start < start) { + int err = split_vma(vmg->vmi, vma, start, 1); if (err) return ERR_PTR(err); } /* Split any trailing portion of the VMA. */ - if (vma->vm_end > vmg->end) { - int err = split_vma(vmg->vmi, vma, vmg->end, 0); + if (vma->vm_end > end) { + int err = split_vma(vmg->vmi, vma, end, 0); if (err) return ERR_PTR(err); @@ -2377,7 +2381,8 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap) * vma_merge_new_range() calls khugepaged_enter_vma() too, the below * call covers the non-merge case. */ - khugepaged_enter_vma(vma, map->flags); + if (!vma_is_anonymous(vma)) + khugepaged_enter_vma(vma, map->flags); ksm_add_vma(vma); *vmap = vma; return 0; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a6e7acebe9adf..61981ee1c9d2f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -586,13 +586,13 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, mask |= PGTBL_PGD_MODIFIED; err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); if (err) - return err; + break; } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); - return 0; + return err; } /* diff --git a/mm/zswap.c b/mm/zswap.c index ac9d299e7d0c1..23365e76a3ce3 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -43,7 +43,7 @@ * statistics **********************************/ /* The number of compressed pages currently stored in zswap */ -atomic_long_t zswap_stored_pages = ATOMIC_INIT(0); +atomic_long_t zswap_stored_pages = ATOMIC_LONG_INIT(0); /* * The statistics below are not protected from concurrent access for diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index e45187b882206..41be38264493d 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -131,7 +131,8 @@ int vlan_check_real_dev(struct net_device *real_dev, { const char *name = real_dev->name; - if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { + if (real_dev->features & NETIF_F_VLAN_CHALLENGED || + real_dev->type != ARPHRD_ETHER) { pr_info("VLANs not supported on %s\n", name); NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device"); return -EOPNOTSUPP; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 91d134961357c..fbf296137b094 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "vlan.h" #include "vlanproc.h" @@ -273,17 +274,6 @@ static int vlan_dev_open(struct net_device *dev) goto out; } - if (dev->flags & IFF_ALLMULTI) { - err = dev_set_allmulti(real_dev, 1); - if (err < 0) - goto del_unicast; - } - if (dev->flags & IFF_PROMISC) { - err = dev_set_promiscuity(real_dev, 1); - if (err < 0) - goto clear_allmulti; - } - ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr); if (vlan->flags & VLAN_FLAG_GVRP) @@ -297,12 +287,6 @@ static int vlan_dev_open(struct net_device *dev) netif_carrier_on(dev); return 0; -clear_allmulti: - if (dev->flags & IFF_ALLMULTI) - dev_set_allmulti(real_dev, -1); -del_unicast: - if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) - dev_uc_del(real_dev, dev->dev_addr); out: netif_carrier_off(dev); return err; @@ -315,10 +299,6 @@ static int vlan_dev_stop(struct net_device *dev) dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); - if (dev->flags & IFF_ALLMULTI) - dev_set_allmulti(real_dev, -1); - if (dev->flags & IFF_PROMISC) - dev_set_promiscuity(real_dev, -1); if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); @@ -377,7 +357,6 @@ static int vlan_hwtstamp_set(struct net_device *dev, static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; - const struct net_device_ops *ops = real_dev->netdev_ops; struct ifreq ifrr; int err = -EOPNOTSUPP; @@ -388,8 +367,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: - if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) - err = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd); + err = dev_eth_ioctl(real_dev, &ifrr, cmd); break; } @@ -490,12 +468,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; - if (dev->flags & IFF_UP) { - if (change & IFF_ALLMULTI) - dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); - if (change & IFF_PROMISC) - dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); - } + if (change & IFF_ALLMULTI) + dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (change & IFF_PROMISC) + dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); } static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 134419667d59e..a000b1ef05206 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -135,11 +135,14 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[], return 0; } -static int vlan_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int vlan_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct net_device *real_dev; unsigned int max_mtu; __be16 proto; @@ -155,7 +158,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev, return -EINVAL; } - real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); + real_dev = __dev_get_by_index(link_net, nla_get_u32(tb[IFLA_LINK])); if (!real_dev) { NL_SET_ERR_MSG_MOD(extack, "link does not exist"); return -ENODEV; diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c index 54e7fb1a4ee50..726398fa848ea 100644 --- a/net/atm/atm_sysfs.c +++ b/net/atm/atm_sysfs.c @@ -16,7 +16,7 @@ static ssize_t type_show(struct device *cdev, { struct atm_dev *adev = to_atm_dev(cdev); - return scnprintf(buf, PAGE_SIZE, "%s\n", adev->type); + return sysfs_emit(buf, "%s\n", adev->type); } static ssize_t address_show(struct device *cdev, @@ -24,7 +24,7 @@ static ssize_t address_show(struct device *cdev, { struct atm_dev *adev = to_atm_dev(cdev); - return scnprintf(buf, PAGE_SIZE, "%pM\n", adev->esi); + return sysfs_emit(buf, "%pM\n", adev->esi); } static ssize_t atmaddress_show(struct device *cdev, @@ -37,13 +37,12 @@ static ssize_t atmaddress_show(struct device *cdev, spin_lock_irqsave(&adev->lock, flags); list_for_each_entry(aaddr, &adev->local, entry) { - count += scnprintf(buf + count, PAGE_SIZE - count, - "%1phN.%2phN.%10phN.%6phN.%1phN\n", - &aaddr->addr.sas_addr.prv[0], - &aaddr->addr.sas_addr.prv[1], - &aaddr->addr.sas_addr.prv[3], - &aaddr->addr.sas_addr.prv[13], - &aaddr->addr.sas_addr.prv[19]); + count += sysfs_emit_at(buf, count, "%1phN.%2phN.%10phN.%6phN.%1phN\n", + &aaddr->addr.sas_addr.prv[0], + &aaddr->addr.sas_addr.prv[1], + &aaddr->addr.sas_addr.prv[3], + &aaddr->addr.sas_addr.prv[13], + &aaddr->addr.sas_addr.prv[19]); } spin_unlock_irqrestore(&adev->lock, flags); @@ -55,7 +54,7 @@ static ssize_t atmindex_show(struct device *cdev, { struct atm_dev *adev = to_atm_dev(cdev); - return scnprintf(buf, PAGE_SIZE, "%d\n", adev->number); + return sysfs_emit(buf, "%d\n", adev->number); } static ssize_t carrier_show(struct device *cdev, @@ -63,8 +62,7 @@ static ssize_t carrier_show(struct device *cdev, { struct atm_dev *adev = to_atm_dev(cdev); - return scnprintf(buf, PAGE_SIZE, "%d\n", - adev->signal == ATM_PHY_SIG_LOST ? 0 : 1); + return sysfs_emit(buf, "%d\n", adev->signal == ATM_PHY_SIG_LOST ? 0 : 1); } static ssize_t link_rate_show(struct device *cdev, @@ -87,7 +85,7 @@ static ssize_t link_rate_show(struct device *cdev, default: link_rate = adev->link_rate * 8 * 53; } - return scnprintf(buf, PAGE_SIZE, "%d\n", link_rate); + return sysfs_emit(buf, "%d\n", link_rate); } static DEVICE_ATTR_RO(address); diff --git a/net/atm/lec.c b/net/atm/lec.c index ffef658862db1..a948dd47c3f34 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -181,6 +181,7 @@ static void lec_send(struct atm_vcc *vcc, struct sk_buff *skb) { struct net_device *dev = skb->dev; + unsigned int len = skb->len; ATM_SKB(skb)->vcc = vcc; atm_account_tx(vcc, skb); @@ -191,7 +192,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb) } dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += len; } static void lec_tx_timeout(struct net_device *dev, unsigned int txqueue) diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 324e3ab96bb39..12da0269275c5 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -1314,6 +1314,8 @@ static void MPOA_cache_impos_rcvd(struct k_message *msg, holding_time = msg->content.eg_info.holding_time; dprintk("(%s) entry = %p, holding_time = %u\n", mpc->dev->name, entry, holding_time); + if (entry == NULL && !holding_time) + return; if (entry == NULL && holding_time) { entry = mpc->eg_ops->add_entry(msg, mpc); mpc->eg_ops->put(entry); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 9f3b8b682adb2..3ee7dba343100 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1270,28 +1270,18 @@ static int __must_check ax25_connect(struct socket *sock, } } - /* - * Must bind first - autobinding in this may or may not work. If - * the socket is already bound, check to see if the device has - * been filled in, error if it hasn't. - */ + /* Must bind first - autobinding does not work. */ if (sock_flag(sk, SOCK_ZAPPED)) { - /* check if we can remove this feature. It is broken. */ - printk(KERN_WARNING "ax25_connect(): %s uses autobind, please contact jreuter@yaina.de\n", - current->comm); - if ((err = ax25_rt_autobind(ax25, &fsa->fsa_ax25.sax25_call)) < 0) { - kfree(digi); - goto out_release; - } + kfree(digi); + err = -EINVAL; + goto out_release; + } - ax25_fillin_cb(ax25, ax25->ax25_dev); - ax25_cb_add(ax25); - } else { - if (ax25->ax25_dev == NULL) { - kfree(digi); - err = -EHOSTUNREACH; - goto out_release; - } + /* Check to see if the device has been filled in, error if it hasn't. */ + if (ax25->ax25_dev == NULL) { + kfree(digi); + err = -EHOSTUNREACH; + goto out_release; } if (sk->sk_type == SOCK_SEQPACKET && diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index 69de75db0c9c2..10577434f40bf 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -373,80 +373,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev) return ax25_rt; } -/* - * Adjust path: If you specify a default route and want to connect - * a target on the digipeater path but w/o having a special route - * set before, the path has to be truncated from your target on. - */ -static inline void ax25_adjust_path(ax25_address *addr, ax25_digi *digipeat) -{ - int k; - - for (k = 0; k < digipeat->ndigi; k++) { - if (ax25cmp(addr, &digipeat->calls[k]) == 0) - break; - } - - digipeat->ndigi = k; -} - - -/* - * Find which interface to use. - */ -int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) -{ - ax25_uid_assoc *user; - ax25_route *ax25_rt; - int err = 0; - - ax25_route_lock_use(); - ax25_rt = ax25_get_route(addr, NULL); - if (!ax25_rt) { - ax25_route_lock_unuse(); - return -EHOSTUNREACH; - } - rcu_read_lock(); - if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { - err = -EHOSTUNREACH; - goto put; - } - - user = ax25_findbyuid(current_euid()); - if (user) { - ax25->source_addr = user->call; - ax25_uid_put(user); - } else { - if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { - err = -EPERM; - goto put; - } - ax25->source_addr = *(ax25_address *)ax25->ax25_dev->dev->dev_addr; - } - - if (ax25_rt->digipeat != NULL) { - ax25->digipeat = kmemdup(ax25_rt->digipeat, sizeof(ax25_digi), - GFP_ATOMIC); - if (ax25->digipeat == NULL) { - err = -ENOMEM; - goto put; - } - ax25_adjust_path(addr, ax25->digipeat); - } - - if (ax25->sk != NULL) { - local_bh_disable(); - bh_lock_sock(ax25->sk); - sock_reset_flag(ax25->sk, SOCK_ZAPPED); - bh_unlock_sock(ax25->sk); - local_bh_enable(); - } - -put: - rcu_read_unlock(); - ax25_route_lock_unuse(); - return err; -} struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src, ax25_address *dest, ax25_digi *digi) diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile index b51d8b071b562..1cc9be6de4569 100644 --- a/net/batman-adv/Makefile +++ b/net/batman-adv/Makefile @@ -19,6 +19,7 @@ batman-adv-y += hard-interface.o batman-adv-y += hash.o batman-adv-$(CONFIG_BATMAN_ADV_DEBUG) += log.o batman-adv-y += main.o +batman-adv-y += mesh-interface.o batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast.o batman-adv-$(CONFIG_BATMAN_ADV_MCAST) += multicast_forw.o batman-adv-y += netlink.o @@ -26,7 +27,6 @@ batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o batman-adv-y += originator.o batman-adv-y += routing.o batman-adv-y += send.o -batman-adv-y += soft-interface.o batman-adv-$(CONFIG_BATMAN_ADV_TRACING) += trace.o batman-adv-y += tp_meter.o batman-adv-y += translation-table.o diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c index 4eee53d19eb04..c0c982b6f0292 100644 --- a/net/batman-adv/bat_algo.c +++ b/net/batman-adv/bat_algo.c @@ -90,15 +90,15 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops) } /** - * batadv_algo_select() - Select algorithm of soft interface - * @bat_priv: the bat priv with all the soft interface information + * batadv_algo_select() - Select algorithm of mesh interface + * @bat_priv: the bat priv with all the mesh interface information * @name: name of the algorithm to select * - * The algorithm callbacks for the soft interface will be set when the algorithm + * The algorithm callbacks for the mesh interface will be set when the algorithm * with the correct name was found. Any previous selected algorithm will not be * deinitialized and the new selected algorithm will also not be initialized. * It is therefore not allowed to call batadv_algo_select outside the creation - * function of the soft interface. + * function of the mesh interface. * * Return: 0 on success or negative error number in case of failure */ diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 07ae5dd1f150b..458879d21d663 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -129,7 +130,7 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[]) /** * batadv_iv_ogm_orig_get() - retrieve or create (if does not exist) an * originator - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: mac address of the originator * * Return: the originator object corresponding to the passed mac address or NULL @@ -325,15 +326,14 @@ batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, /* check if there is enough space for the optional TVLV */ next_buff_pos += ntohs(ogm_packet->tvlv_len); - return (next_buff_pos <= packet_len) && - (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); + return next_buff_pos <= packet_len; } /* send a batman ogm to a given interface */ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, struct batadv_hard_iface *hard_iface) { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); const char *fwd_str; u8 packet_num; s16 buff_pos; @@ -355,7 +355,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, /* we might have aggregated direct link packets with an * ordinary base packet */ - if (forw_packet->direct_link_flags & BIT(packet_num) && + if (test_bit(packet_num, forw_packet->direct_link_flags) && forw_packet->if_incoming == hard_iface) batadv_ogm_packet->flags |= BATADV_DIRECTLINK; else @@ -396,20 +396,20 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet, /* send a batman ogm packet */ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) { - struct net_device *soft_iface; + struct net_device *mesh_iface; if (!forw_packet->if_incoming) { pr_err("Error - can't forward packet: incoming iface not specified\n"); return; } - soft_iface = forw_packet->if_incoming->soft_iface; + mesh_iface = forw_packet->if_incoming->mesh_iface; if (WARN_ON(!forw_packet->if_outgoing)) return; - if (forw_packet->if_outgoing->soft_iface != soft_iface) { - pr_warn("%s: soft interface switch for queued OGM\n", __func__); + if (forw_packet->if_outgoing->mesh_iface != mesh_iface) { + pr_warn("%s: mesh interface switch for queued OGM\n", __func__); return; } @@ -424,7 +424,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet) * batadv_iv_ogm_can_aggregate() - find out if an OGM can be aggregated on an * existing forward packet * @new_bat_ogm_packet: OGM packet to be aggregated - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @packet_len: (total) length of the OGM * @send_time: timestamp (jiffies) when the packet is to be sent * @directlink: true if this is a direct link packet @@ -444,28 +444,37 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet, const struct batadv_forw_packet *forw_packet) { struct batadv_ogm_packet *batadv_ogm_packet; - int aggregated_bytes = forw_packet->packet_len + packet_len; + unsigned int aggregated_bytes = forw_packet->packet_len + packet_len; struct batadv_hard_iface *primary_if = NULL; + u8 packet_num = forw_packet->num_packets; bool res = false; unsigned long aggregation_end_time; + unsigned int max_bytes; batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data; aggregation_end_time = send_time; aggregation_end_time += msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS); + max_bytes = min_t(unsigned int, if_outgoing->net_dev->mtu, + BATADV_MAX_AGGREGATION_BYTES); + /* we can aggregate the current packet to this aggregated packet * if: * * - the send time is within our MAX_AGGREGATION_MS time * - the resulting packet won't be bigger than - * MAX_AGGREGATION_BYTES + * MAX_AGGREGATION_BYTES and MTU of the outgoing interface + * - the number of packets is lower than MAX_AGGREGATION_PACKETS * otherwise aggregation is not possible */ if (!time_before(send_time, forw_packet->send_time) || !time_after_eq(aggregation_end_time, forw_packet->send_time)) return false; - if (aggregated_bytes > BATADV_MAX_AGGREGATION_BYTES) + if (aggregated_bytes > max_bytes) + return false; + + if (packet_num >= BATADV_MAX_AGGREGATION_PACKETS) return false; /* packet is not leaving on the same interface. */ @@ -540,16 +549,16 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, struct batadv_hard_iface *if_outgoing, int own_packet) { - struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_forw_packet *forw_packet_aggr; struct sk_buff *skb; unsigned char *skb_buff; unsigned int skb_size; atomic_t *queue_left = own_packet ? NULL : &bat_priv->batman_queue_left; - if (atomic_read(&bat_priv->aggregated_ogms) && - packet_len < BATADV_MAX_AGGREGATION_BYTES) - skb_size = BATADV_MAX_AGGREGATION_BYTES; + if (atomic_read(&bat_priv->aggregated_ogms)) + skb_size = max_t(unsigned int, BATADV_MAX_AGGREGATION_BYTES, + packet_len); else skb_size = packet_len; @@ -574,12 +583,13 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff, memcpy(skb_buff, packet_buff, packet_len); forw_packet_aggr->own = own_packet; - forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS; + bitmap_zero(forw_packet_aggr->direct_link_flags, + BATADV_MAX_AGGREGATION_PACKETS); forw_packet_aggr->send_time = send_time; /* save packet direct link flag status */ if (direct_link) - forw_packet_aggr->direct_link_flags |= 1; + set_bit(0, forw_packet_aggr->direct_link_flags); INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, batadv_iv_send_outstanding_bat_ogm_packet); @@ -592,22 +602,20 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr, const unsigned char *packet_buff, int packet_len, bool direct_link) { - unsigned long new_direct_link_flag; - skb_put_data(forw_packet_aggr->skb, packet_buff, packet_len); forw_packet_aggr->packet_len += packet_len; - forw_packet_aggr->num_packets++; /* save packet direct link flag status */ - if (direct_link) { - new_direct_link_flag = BIT(forw_packet_aggr->num_packets); - forw_packet_aggr->direct_link_flags |= new_direct_link_flag; - } + if (direct_link) + set_bit(forw_packet_aggr->num_packets, + forw_packet_aggr->direct_link_flags); + + forw_packet_aggr->num_packets++; } /** * batadv_iv_ogm_queue_add() - queue up an OGM for transmission - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @packet_buff: pointer to the OGM * @packet_len: (total) length of the OGM * @if_incoming: interface where the packet was received @@ -686,7 +694,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { - struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); u16 tvlv_len; if (batadv_ogm_packet->ttl <= 1) { @@ -739,7 +747,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node, static void batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); struct batadv_hashtable *hash = bat_priv->orig_hash; struct hlist_head *head; struct batadv_orig_node *orig_node; @@ -778,7 +786,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) */ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface) { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff; struct batadv_ogm_packet *batadv_ogm_packet; struct batadv_hard_iface *primary_if, *tmp_hard_iface; @@ -840,7 +848,7 @@ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface) */ rcu_read_lock(); list_for_each_entry_rcu(tmp_hard_iface, &batadv_hardif_list, list) { - if (tmp_hard_iface->soft_iface != hard_iface->soft_iface) + if (tmp_hard_iface->mesh_iface != hard_iface->mesh_iface) continue; if (!kref_get_unless_zero(&tmp_hard_iface->refcount)) @@ -901,7 +909,7 @@ static u8 batadv_iv_orig_ifinfo_sum(struct batadv_orig_node *orig_node, /** * batadv_iv_ogm_orig_update() - use OGM to update corresponding data in an * originator - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: the orig node who originally emitted the ogm packet * @orig_ifinfo: ifinfo for the outgoing interface of the orig_node * @ethhdr: Ethernet header of the OGM @@ -1065,7 +1073,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { - struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node; struct batadv_neigh_ifinfo *neigh_ifinfo; u8 total_count; @@ -1207,7 +1215,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, const struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { - struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_orig_node *orig_node; struct batadv_orig_ifinfo *orig_ifinfo = NULL; struct batadv_neigh_node *neigh_node; @@ -1309,7 +1317,7 @@ batadv_iv_ogm_process_per_outif(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming, struct batadv_hard_iface *if_outgoing) { - struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_hardif_neigh_node *hardif_neigh = NULL; struct batadv_neigh_node *router = NULL; struct batadv_neigh_node *router_router = NULL; @@ -1549,7 +1557,7 @@ static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet, static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) { - struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_orig_node *orig_neigh_node, *orig_node; struct batadv_hard_iface *hard_iface; struct batadv_ogm_packet *ogm_packet; @@ -1599,7 +1607,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; - if (hard_iface->soft_iface != if_incoming->soft_iface) + if (hard_iface->mesh_iface != if_incoming->mesh_iface) continue; if (batadv_compare_eth(ethhdr->h_source, @@ -1664,7 +1672,7 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; - if (hard_iface->soft_iface != bat_priv->soft_iface) + if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (!kref_get_unless_zero(&hard_iface->refcount)) @@ -1690,7 +1698,7 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work) delayed_work = to_delayed_work(work); forw_packet = container_of(delayed_work, struct batadv_forw_packet, delayed_work); - bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); + bat_priv = netdev_priv(forw_packet->if_incoming->mesh_iface); if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) { dropped = true; @@ -1721,7 +1729,7 @@ static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work) static int batadv_iv_ogm_receive(struct sk_buff *skb, struct batadv_hard_iface *if_incoming) { - struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_ogm_packet *ogm_packet; u8 *packet_pos; int ogm_offset; @@ -1800,7 +1808,7 @@ batadv_iv_ogm_neigh_get_tq_avg(struct batadv_neigh_node *neigh_node, * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @orig_node: Originator to dump * @neigh_node: Single hops neighbour @@ -1863,7 +1871,7 @@ batadv_iv_ogm_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @orig_node: Originator to dump * @sub_s: Number of sub entries to skip @@ -1925,7 +1933,7 @@ batadv_iv_ogm_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @head: Bucket to be dumped * @idx_s: Number of entries to be skipped @@ -1966,7 +1974,7 @@ batadv_iv_ogm_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, * batadv_iv_ogm_orig_dump() - Dump the originators into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface */ static void @@ -2088,7 +2096,7 @@ batadv_iv_ogm_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @hard_iface: Hard interface to dump the neighbours for * @idx_s: Number of entries to skip * @@ -2125,7 +2133,7 @@ batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq, * batadv_iv_ogm_neigh_dump() - Dump the neighbours into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @single_hardif: Limit dump to this hard interface */ static void @@ -2152,7 +2160,7 @@ batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb, } else { list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { - if (hard_iface->soft_iface != bat_priv->soft_iface) + if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (i_hardif++ < i_hardif_s) @@ -2236,7 +2244,7 @@ static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface) /** * batadv_iv_init_sel_class() - initialize GW selection class - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv) { @@ -2391,7 +2399,7 @@ static bool batadv_iv_gw_is_eligible(struct batadv_priv *bat_priv, * @msg: Netlink message to dump into * @portid: Port making netlink request * @cb: Control block containing additional options - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @gw_node: Gateway to be dumped * * Return: Error code, or 0 on success @@ -2466,7 +2474,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, * batadv_iv_gw_dump() - Dump gateways into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information */ static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb, struct batadv_priv *bat_priv) diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index d35479c465e2c..c16c2e60889d2 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@ -43,7 +43,7 @@ static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface) { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); struct batadv_hard_iface *primary_if; primary_if = batadv_primary_if_get_selected(bat_priv); @@ -97,7 +97,7 @@ static void batadv_v_primary_iface_set(struct batadv_hard_iface *hard_iface) */ static void batadv_v_iface_update_mac(struct batadv_hard_iface *hard_iface) { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); struct batadv_hard_iface *primary_if; primary_if = batadv_primary_if_get_selected(bat_priv); @@ -166,7 +166,7 @@ batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @hard_iface: The hard interface to be dumped * @idx_s: Entries to be skipped * @@ -203,7 +203,7 @@ batadv_v_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq, * message * @msg: Netlink message to dump into * @cb: Control block containing additional options - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @single_hardif: Limit dumping to this hard interface */ static void @@ -228,7 +228,7 @@ batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb, } } else { list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { - if (hard_iface->soft_iface != bat_priv->soft_iface) + if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (i_hardif++ < i_hardif_s) @@ -254,7 +254,7 @@ batadv_v_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb, * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @orig_node: Originator to dump * @neigh_node: Single hops neighbour @@ -322,7 +322,7 @@ batadv_v_orig_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @orig_node: Originator to dump * @sub_s: Number of sub entries to skip @@ -374,7 +374,7 @@ batadv_v_orig_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface * @head: Bucket to be dumped * @idx_s: Number of entries to be skipped @@ -414,7 +414,7 @@ batadv_v_orig_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, * batadv_v_orig_dump() - Dump the originators into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @if_outgoing: Limit dump to entries with this outgoing interface */ static void @@ -502,7 +502,7 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1, /** * batadv_v_init_sel_class() - initialize GW selection class - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_v_init_sel_class(struct batadv_priv *bat_priv) { @@ -553,7 +553,7 @@ static int batadv_v_gw_throughput_get(struct batadv_gw_node *gw_node, u32 *bw) /** * batadv_v_gw_get_best_gw_node() - retrieve the best GW node - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: the GW node having the best GW-metric, NULL if no GW is known */ @@ -590,7 +590,7 @@ batadv_v_gw_get_best_gw_node(struct batadv_priv *bat_priv) /** * batadv_v_gw_is_eligible() - check if a originator would be selected as GW - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @curr_gw_orig: originator representing the currently selected GW * @orig_node: the originator representing the new candidate * @@ -647,7 +647,7 @@ static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv, * @msg: Netlink message to dump into * @portid: Port making netlink request * @cb: Control block containing additional options - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @gw_node: Gateway to be dumped * * Return: Error code, or 0 on success @@ -746,7 +746,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, * batadv_v_gw_dump() - Dump gateways into a message * @msg: Netlink message to dump into * @cb: Control block containing additional options - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information */ static void batadv_v_gw_dump(struct sk_buff *msg, struct netlink_callback *cb, struct batadv_priv *bat_priv) diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index b065578b4436e..70d6778da0d7b 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -82,7 +82,7 @@ static bool batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh, u32 *pthroughput) { struct batadv_hard_iface *hard_iface = neigh->if_incoming; - struct net_device *soft_iface = hard_iface->soft_iface; + struct net_device *mesh_iface = hard_iface->mesh_iface; struct ethtool_link_ksettings link_settings; struct net_device *real_netdev; struct station_info sinfo; @@ -92,7 +92,7 @@ static bool batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh, /* don't query throughput when no longer associated with any * batman-adv interface */ - if (!soft_iface) + if (!mesh_iface) return false; /* if the user specified a customised value for this interface, then @@ -180,7 +180,7 @@ static bool batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh, default_throughput: if (!(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) { - batadv_info(soft_iface, + batadv_info(mesh_iface, "WiFi driver or ethtool info does not provide information about link speeds on interface %s, therefore defaulting to hardcoded throughput values of %u.%1u Mbps. Consider overriding the throughput manually or checking your driver.\n", hard_iface->net_dev->name, BATADV_THROUGHPUT_DEFAULT_VALUE / 10, @@ -226,7 +226,7 @@ static bool batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh) { struct batadv_hard_iface *hard_iface = neigh->if_incoming; - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); unsigned long last_tx_diff; struct sk_buff *skb; int probe_len, i; @@ -295,7 +295,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work) bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work); hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v); - bat_priv = netdev_priv(hard_iface->soft_iface); + bat_priv = netdev_priv(hard_iface->mesh_iface); if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) goto out; @@ -476,7 +476,7 @@ void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface) /* update orig field of every elp iface belonging to this mesh */ rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { - if (primary_iface->soft_iface != hard_iface->soft_iface) + if (primary_iface->mesh_iface != hard_iface->mesh_iface) continue; batadv_v_elp_iface_activate(primary_iface, hard_iface); @@ -486,7 +486,7 @@ void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface) /** * batadv_v_elp_neigh_update() - update an ELP neighbour node - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @neigh_addr: the neighbour interface address * @if_incoming: the interface the packet was received through * @elp_packet: the received ELP packet @@ -552,7 +552,7 @@ static void batadv_v_elp_neigh_update(struct batadv_priv *bat_priv, int batadv_v_elp_packet_recv(struct sk_buff *skb, struct batadv_hard_iface *if_incoming) { - struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_elp_packet *elp_packet; struct batadv_hard_iface *primary_if; struct ethhdr *ethhdr; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index e503ee0d896bd..b86bb647da5b7 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -45,7 +45,7 @@ /** * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the address of the originator * * Return: the orig_node corresponding to the specified address. If such an @@ -96,7 +96,7 @@ static void batadv_v_ogm_start_queue_timer(struct batadv_hard_iface *hard_iface) /** * batadv_v_ogm_start_timer() - restart the OGM sending timer - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv) { @@ -121,7 +121,7 @@ static void batadv_v_ogm_start_timer(struct batadv_priv *bat_priv) static void batadv_v_ogm_send_to_if(struct sk_buff *skb, struct batadv_hard_iface *hard_iface) { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); if (hard_iface->if_status != BATADV_IF_ACTIVE) { kfree_skb(skb); @@ -239,7 +239,7 @@ static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface) static void batadv_v_ogm_queue_on_if(struct sk_buff *skb, struct batadv_hard_iface *hard_iface) { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); if (!atomic_read(&bat_priv->aggregated_ogms)) { batadv_v_ogm_send_to_if(skb, hard_iface); @@ -256,10 +256,10 @@ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb, } /** - * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM - * @bat_priv: the bat priv with all the soft interface information + * batadv_v_ogm_send_meshif() - periodic worker broadcasting the own OGM + * @bat_priv: the bat priv with all the mesh interface information */ -static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv) +static void batadv_v_ogm_send_meshif(struct batadv_priv *bat_priv) { struct batadv_hard_iface *hard_iface; struct batadv_ogm2_packet *ogm_packet; @@ -302,7 +302,7 @@ static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv) /* broadcast on every interface */ rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { - if (hard_iface->soft_iface != bat_priv->soft_iface) + if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (!kref_get_unless_zero(&hard_iface->refcount)) @@ -373,7 +373,7 @@ static void batadv_v_ogm_send(struct work_struct *work) bat_priv = container_of(bat_v, struct batadv_priv, bat_v); mutex_lock(&bat_priv->bat_v.ogm_buff_mutex); - batadv_v_ogm_send_softif(bat_priv); + batadv_v_ogm_send_meshif(bat_priv); mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex); } @@ -408,7 +408,7 @@ void batadv_v_ogm_aggr_work(struct work_struct *work) */ int batadv_v_ogm_iface_enable(struct batadv_hard_iface *hard_iface) { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); batadv_v_ogm_start_queue_timer(hard_iface); batadv_v_ogm_start_timer(bat_priv); @@ -435,7 +435,7 @@ void batadv_v_ogm_iface_disable(struct batadv_hard_iface *hard_iface) */ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface) { - struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(primary_iface->mesh_iface); struct batadv_ogm2_packet *ogm_packet; mutex_lock(&bat_priv->bat_v.ogm_buff_mutex); @@ -452,7 +452,7 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface) /** * batadv_v_forward_penalty() - apply a penalty to the throughput metric * forwarded with B.A.T.M.A.N. V OGMs - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @if_incoming: the interface where the OGM has been received * @if_outgoing: the interface where the OGM has to be forwarded to * @throughput: the current throughput @@ -505,7 +505,7 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv, /** * batadv_v_ogm_forward() - check conditions and forward an OGM to the given * outgoing interface - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ogm_received: previously received OGM to be forwarded * @orig_node: the originator which has been updated * @neigh_node: the neigh_node through with the OGM has been received @@ -592,7 +592,7 @@ static void batadv_v_ogm_forward(struct batadv_priv *bat_priv, /** * batadv_v_ogm_metric_update() - update route metric based on OGM - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ogm2: OGM2 structure * @orig_node: Originator structure for which the OGM has been received * @neigh_node: the neigh_node through with the OGM has been received @@ -675,7 +675,7 @@ static int batadv_v_ogm_metric_update(struct batadv_priv *bat_priv, /** * batadv_v_ogm_route_update() - update routes based on OGM - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ethhdr: the Ethernet header of the OGM2 * @ogm2: OGM2 structure * @orig_node: Originator structure for which the OGM has been received @@ -770,7 +770,7 @@ static bool batadv_v_ogm_route_update(struct batadv_priv *bat_priv, /** * batadv_v_ogm_process_per_outif() - process a batman v OGM for an outgoing if - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ethhdr: the Ethernet header of the OGM2 * @ogm2: OGM2 structure * @orig_node: Originator structure for which the OGM has been received @@ -839,8 +839,7 @@ batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, /* check if there is enough space for the optional TVLV */ next_buff_pos += ntohs(ogm2_packet->tvlv_len); - return (next_buff_pos <= packet_len) && - (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); + return next_buff_pos <= packet_len; } /** @@ -852,7 +851,7 @@ batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) { - struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct ethhdr *ethhdr; struct batadv_orig_node *orig_node = NULL; struct batadv_hardif_neigh_node *hardif_neigh = NULL; @@ -926,7 +925,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; - if (hard_iface->soft_iface != bat_priv->soft_iface) + if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (!kref_get_unless_zero(&hard_iface->refcount)) @@ -985,7 +984,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, int batadv_v_ogm_packet_recv(struct sk_buff *skb, struct batadv_hard_iface *if_incoming) { - struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(if_incoming->mesh_iface); struct batadv_ogm2_packet *ogm_packet; struct ethhdr *ethhdr; int ogm_offset; @@ -1036,7 +1035,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, /** * batadv_v_ogm_init() - initialise the OGM2 engine - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 on success or a negative error code in case of failure */ @@ -1071,7 +1070,7 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv) /** * batadv_v_ogm_free() - free OGM private resources - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_v_ogm_free(struct batadv_priv *bat_priv) { diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c index 649c41f393e1a..2c49b27116503 100644 --- a/net/batman-adv/bitarray.c +++ b/net/batman-adv/bitarray.c @@ -23,7 +23,7 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n) /** * batadv_bit_get_packet() - receive and process one packet within the sequence * number window - * @priv: the bat priv with all the soft interface information + * @priv: the bat priv with all the mesh interface information * @seq_bits: pointer to the sequence number receive packet * @seq_num_diff: difference between the current/received sequence number and * the last sequence number diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 8c814f790d177..747755647c6a4 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -209,7 +209,7 @@ static void batadv_claim_put(struct batadv_bla_claim *claim) /** * batadv_claim_hash_find() - looks for a claim in the claim hash - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @data: search data (may be local/static data) * * Return: claim if found or NULL otherwise. @@ -248,7 +248,7 @@ batadv_claim_hash_find(struct batadv_priv *bat_priv, /** * batadv_backbone_hash_find() - looks for a backbone gateway in the hash - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the address of the originator * @vid: the VLAN ID * @@ -332,7 +332,7 @@ batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw) /** * batadv_bla_send_claim() - sends a claim frame according to the provided info - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @mac: the mac address to be announced within the claim * @vid: the VLAN ID * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) @@ -343,7 +343,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac, struct sk_buff *skb; struct ethhdr *ethhdr; struct batadv_hard_iface *primary_if; - struct net_device *soft_iface; + struct net_device *mesh_iface; u8 *hw_src; struct batadv_bla_claim_dst local_claim_dest; __be32 zeroip = 0; @@ -356,12 +356,12 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac, sizeof(local_claim_dest)); local_claim_dest.type = claimtype; - soft_iface = primary_if->soft_iface; + mesh_iface = primary_if->mesh_iface; skb = arp_create(ARPOP_REPLY, ETH_P_ARP, /* IP DST: 0.0.0.0 */ zeroip, - primary_if->soft_iface, + primary_if->mesh_iface, /* IP SRC: 0.0.0.0 */ zeroip, /* Ethernet DST: Broadcast */ @@ -439,7 +439,7 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, const u8 *mac, } skb_reset_mac_header(skb); - skb->protocol = eth_type_trans(skb, soft_iface); + skb->protocol = eth_type_trans(skb, mesh_iface); batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); @@ -466,7 +466,7 @@ static void batadv_bla_loopdetect_report(struct work_struct *work) report_work); bat_priv = backbone_gw->bat_priv; - batadv_info(bat_priv->soft_iface, + batadv_info(bat_priv->mesh_iface, "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n", batadv_print_vid(backbone_gw->vid)); snprintf(vid_str, sizeof(vid_str), "%d", @@ -481,7 +481,7 @@ static void batadv_bla_loopdetect_report(struct work_struct *work) /** * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the mac address of the originator * @vid: the VLAN ID * @own_backbone: set if the requested backbone is local @@ -554,7 +554,7 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, const u8 *orig, /** * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the selected primary interface * @vid: VLAN identifier * @@ -580,7 +580,7 @@ batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv, /** * batadv_bla_answer_request() - answer a bla request by sending own claims - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @primary_if: interface where the request came on * @vid: the vid where the request came on * @@ -657,7 +657,7 @@ static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw) /** * batadv_bla_send_announce() - Send an announcement frame - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @backbone_gw: our backbone gateway which should be announced */ static void batadv_bla_send_announce(struct batadv_priv *bat_priv, @@ -678,7 +678,7 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv, /** * batadv_bla_add_claim() - Adds a claim in the claim hash - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @mac: the mac address of the claim * @vid: the VLAN ID of the frame * @backbone_gw: the backbone gateway which claims it @@ -788,7 +788,7 @@ batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim) /** * batadv_bla_del_claim() - delete a claim from the claim hash - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @mac: mac address of the claim to be removed * @vid: VLAN id for the claim to be removed */ @@ -826,7 +826,7 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv, /** * batadv_handle_announce() - check for ANNOUNCE frame - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @an_addr: announcement mac address (ARP Sender HW address) * @backbone_addr: originator address of the sender (Ethernet source MAC) * @vid: the VLAN ID of the frame @@ -884,8 +884,8 @@ static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr, /** * batadv_handle_request() - check for REQUEST frame - * @bat_priv: the bat priv with all the soft interface information - * @primary_if: the primary hard interface of this batman soft interface + * @bat_priv: the bat priv with all the mesh interface information + * @primary_if: the primary hard interface of this batman mesh interface * @backbone_addr: backbone address to be requested (ARP sender HW MAC) * @ethhdr: ethernet header of a packet * @vid: the VLAN ID of the frame @@ -917,8 +917,8 @@ static bool batadv_handle_request(struct batadv_priv *bat_priv, /** * batadv_handle_unclaim() - check for UNCLAIM frame - * @bat_priv: the bat priv with all the soft interface information - * @primary_if: the primary hard interface of this batman soft interface + * @bat_priv: the bat priv with all the mesh interface information + * @primary_if: the primary hard interface of this batman mesh interface * @backbone_addr: originator address of the backbone (Ethernet source) * @claim_addr: Client to be unclaimed (ARP sender HW MAC) * @vid: the VLAN ID of the frame @@ -955,8 +955,8 @@ static bool batadv_handle_unclaim(struct batadv_priv *bat_priv, /** * batadv_handle_claim() - check for CLAIM frame - * @bat_priv: the bat priv with all the soft interface information - * @primary_if: the primary hard interface of this batman soft interface + * @bat_priv: the bat priv with all the mesh interface information + * @primary_if: the primary hard interface of this batman mesh interface * @backbone_addr: originator address of the backbone (Ethernet Source) * @claim_addr: client mac address to be claimed (ARP sender HW MAC) * @vid: the VLAN ID of the frame @@ -992,7 +992,7 @@ static bool batadv_handle_claim(struct batadv_priv *bat_priv, /** * batadv_check_claim_group() - check for claim group membership - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the primary interface of this batman interface * @hw_src: the Hardware source in the ARP Header * @hw_dst: the Hardware destination in the ARP Header @@ -1067,8 +1067,8 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv, /** * batadv_bla_process_claim() - Check if this is a claim frame, and process it - * @bat_priv: the bat priv with all the soft interface information - * @primary_if: the primary hard interface of this batman soft interface + * @bat_priv: the bat priv with all the mesh interface information + * @primary_if: the primary hard interface of this batman mesh interface * @skb: the frame to be checked * * Return: true if it was a claim frame, otherwise return false to @@ -1210,7 +1210,7 @@ static bool batadv_bla_process_claim(struct batadv_priv *bat_priv, /** * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or * immediately - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @now: whether the whole hash shall be wiped now * * Check when we last heard from other nodes, and remove them in case of @@ -1262,7 +1262,7 @@ static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now) /** * batadv_bla_purge_claims() - Remove claims after a timeout or immediately - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the selected primary interface, may be NULL if now is set * @now: whether the whole hash shall be wiped now * @@ -1321,7 +1321,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv, /** * batadv_bla_update_orig_address() - Update the backbone gateways when the own * originator address changes - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @primary_if: the new selected primary_if * @oldif: the old primary interface, may be NULL */ @@ -1376,7 +1376,7 @@ void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, /** * batadv_bla_send_loopdetect() - send a loopdetect frame - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @backbone_gw: the backbone gateway for which a loop should be detected * * To detect loops that the bridge loop avoidance can't handle, send a loop @@ -1396,7 +1396,7 @@ batadv_bla_send_loopdetect(struct batadv_priv *bat_priv, /** * batadv_bla_status_update() - purge bla interfaces if necessary - * @net_dev: the soft interface net device + * @net_dev: the mesh interface net device */ void batadv_bla_status_update(struct net_device *net_dev) { @@ -1520,7 +1520,7 @@ static struct lock_class_key batadv_backbone_hash_lock_class_key; /** * batadv_bla_init() - initialize all bla structures - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 on success, < 0 on error. */ @@ -1586,7 +1586,7 @@ int batadv_bla_init(struct batadv_priv *bat_priv) /** * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup. - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: contains the multicast packet to be checked * @payload_ptr: pointer to position inside the head buffer of the skb * marking the start of the data to be CRC'ed @@ -1680,7 +1680,7 @@ static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv, /** * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup. - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: contains the multicast packet to be checked, decapsulated from a * unicast_packet * @@ -1698,7 +1698,7 @@ static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv, /** * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: contains the bcast_packet to be checked * * Check if it is on our broadcast list. Another gateway might have sent the @@ -1723,7 +1723,7 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, /** * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for * the VLAN identified by vid. - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: originator mac address * @vid: VLAN identifier * @@ -1766,7 +1766,7 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig, * @orig_node: the orig_node of the frame * @hdr_size: maximum length of the frame * - * Return: true if the orig_node is also a gateway on the soft interface, + * Return: true if the orig_node is also a gateway on the mesh interface, * otherwise it returns false. */ bool batadv_bla_is_backbone_gw(struct sk_buff *skb, @@ -1796,9 +1796,9 @@ bool batadv_bla_is_backbone_gw(struct sk_buff *skb, /** * batadv_bla_free() - free all bla structures - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * - * for softinterface free or module unload + * for meshinterface free or module unload */ void batadv_bla_free(struct batadv_priv *bat_priv) { @@ -1822,7 +1822,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv) /** * batadv_bla_loopdetect_check() - check and handle a detected loop - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the packet to check * @primary_if: interface where the request came on * @vid: the VLAN ID of the frame @@ -1877,7 +1877,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, /** * batadv_bla_rx() - check packets coming from the mesh. - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame * @packet_type: the batman packet type this frame came in @@ -2010,7 +2010,7 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, /** * batadv_bla_tx() - check packets going into the mesh - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame * @@ -2232,18 +2232,18 @@ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct batadv_hard_iface *primary_if = NULL; int portid = NETLINK_CB(cb->skb).portid; - struct net_device *soft_iface; + struct net_device *mesh_iface; struct batadv_hashtable *hash; struct batadv_priv *bat_priv; int bucket = cb->args[0]; int idx = cb->args[1]; int ret = 0; - soft_iface = batadv_netlink_get_softif(cb); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif(cb); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); hash = bat_priv->bla.claim_hash; primary_if = batadv_primary_if_get_selected(bat_priv); @@ -2267,7 +2267,7 @@ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb) out: batadv_hardif_put(primary_if); - dev_put(soft_iface); + dev_put(mesh_iface); return ret; } @@ -2393,18 +2393,18 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct batadv_hard_iface *primary_if = NULL; int portid = NETLINK_CB(cb->skb).portid; - struct net_device *soft_iface; + struct net_device *mesh_iface; struct batadv_hashtable *hash; struct batadv_priv *bat_priv; int bucket = cb->args[0]; int idx = cb->args[1]; int ret = 0; - soft_iface = batadv_netlink_get_softif(cb); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif(cb); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); hash = bat_priv->bla.backbone_hash; primary_if = batadv_primary_if_get_selected(bat_priv); @@ -2428,7 +2428,7 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) out: batadv_hardif_put(primary_if); - dev_put(soft_iface); + dev_put(mesh_iface); return ret; } @@ -2437,7 +2437,7 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) /** * batadv_bla_check_claim() - check if address is claimed * - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: mac address of which the claim status is checked * @vid: the VLAN ID * diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index e5a07152d4ecf..8b8132eb0a79f 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -96,7 +96,7 @@ static void batadv_dat_purge(struct work_struct *work); /** * batadv_dat_start_timer() - initialise the DAT periodic worker - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_dat_start_timer(struct batadv_priv *bat_priv) { @@ -145,7 +145,7 @@ static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry) /** * __batadv_dat_purge() - delete entries from the DAT local storage - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @to_purge: function in charge to decide whether an entry has to be purged or * not. This function takes the dat_entry as argument and has to * returns a boolean value: true is the entry has to be deleted, @@ -315,7 +315,7 @@ static u32 batadv_hash_dat(const void *data, u32 size) /** * batadv_dat_entry_hash_find() - look for a given dat_entry in the local hash * table - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ip: search key * @vid: VLAN identifier * @@ -357,7 +357,7 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip, /** * batadv_dat_entry_add() - add a new dat entry or update it if already exists - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ip: ipv4 to add/edit * @mac_addr: mac address to assign to the given ipv4 * @vid: VLAN identifier @@ -414,7 +414,7 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip, /** * batadv_dbg_arp() - print a debug message containing all the ARP packet * details - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: ARP packet * @hdr_size: size of the possible header before the ARP packet * @msg: message to print together with the debugging information @@ -549,7 +549,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res, /** * batadv_choose_next_candidate() - select the next DHT candidate - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @cands: candidates array * @select: number of candidates already present in the array * @ip_key: key to look up in the DHT @@ -613,7 +613,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, /** * batadv_dat_select_candidates() - select the nodes which the DHT message has * to be sent to - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ip_dst: ipv4 to look up in the DHT * @vid: VLAN identifier * @@ -658,7 +658,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst, /** * batadv_dat_forward_data() - copy and send payload to the selected candidates - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: payload to send * @ip: the DHT key * @vid: VLAN identifier @@ -734,7 +734,7 @@ static bool batadv_dat_forward_data(struct batadv_priv *bat_priv, /** * batadv_dat_tvlv_container_update() - update the dat tvlv container after dat * setting change - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv) { @@ -756,7 +756,7 @@ static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv) /** * batadv_dat_status_update() - update the dat tvlv container after dat * setting change - * @net_dev: the soft interface net device + * @net_dev: the mesh interface net device */ void batadv_dat_status_update(struct net_device *net_dev) { @@ -767,7 +767,7 @@ void batadv_dat_status_update(struct net_device *net_dev) /** * batadv_dat_tvlv_ogm_handler_v1() - process incoming dat tvlv container - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node of the ogm * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) * @tvlv_value: tvlv buffer containing the gateway data @@ -786,7 +786,7 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, /** * batadv_dat_hash_free() - free the local DAT hash table - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_dat_hash_free(struct batadv_priv *bat_priv) { @@ -802,7 +802,7 @@ static void batadv_dat_hash_free(struct batadv_priv *bat_priv) /** * batadv_dat_init() - initialise the DAT internals - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 in case of success, a negative error code otherwise */ @@ -828,7 +828,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv) /** * batadv_dat_free() - free the DAT internals - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_dat_free(struct batadv_priv *bat_priv) { @@ -936,18 +936,18 @@ int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct batadv_hard_iface *primary_if = NULL; int portid = NETLINK_CB(cb->skb).portid; - struct net_device *soft_iface; + struct net_device *mesh_iface; struct batadv_hashtable *hash; struct batadv_priv *bat_priv; int bucket = cb->args[0]; int idx = cb->args[1]; int ret = 0; - soft_iface = batadv_netlink_get_softif(cb); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif(cb); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); hash = bat_priv->dat.hash; primary_if = batadv_primary_if_get_selected(bat_priv); @@ -973,14 +973,14 @@ int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb) out: batadv_hardif_put(primary_if); - dev_put(soft_iface); + dev_put(mesh_iface); return ret; } /** * batadv_arp_get_type() - parse an ARP packet and gets the type - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: packet to analyse * @hdr_size: size of the possible header before the ARP packet in the skb * @@ -1080,7 +1080,7 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size) /** * batadv_dat_arp_create_reply() - create an ARP Reply - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ip_src: ARP sender IP * @ip_dst: ARP target IP * @hw_src: Ethernet source and ARP sender MAC @@ -1099,7 +1099,7 @@ batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src, { struct sk_buff *skb; - skb = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_dst, bat_priv->soft_iface, + skb = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_dst, bat_priv->mesh_iface, ip_src, hw_dst, hw_src, hw_dst); if (!skb) return NULL; @@ -1116,7 +1116,7 @@ batadv_dat_arp_create_reply(struct batadv_priv *bat_priv, __be32 ip_src, /** * batadv_dat_snoop_outgoing_arp_request() - snoop the ARP request and try to * answer using DAT - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: packet to check * * Return: true if the message has been sent to the dht candidates, false @@ -1132,7 +1132,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, bool ret = false; struct batadv_dat_entry *dat_entry = NULL; struct sk_buff *skb_new; - struct net_device *soft_iface = bat_priv->soft_iface; + struct net_device *mesh_iface = bat_priv->mesh_iface; int hdr_size = 0; unsigned short vid; @@ -1162,7 +1162,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, * client will answer itself. DAT would only generate a * duplicate packet. * - * Moreover, if the soft-interface is enslaved into a bridge, an + * Moreover, if the mesh-interface is enslaved into a bridge, an * additional DAT answer may trigger kernel warnings about * a packet coming from the wrong port. */ @@ -1191,7 +1191,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, if (!skb_new) goto out; - skb_new->protocol = eth_type_trans(skb_new, soft_iface); + skb_new->protocol = eth_type_trans(skb_new, mesh_iface); batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, @@ -1213,7 +1213,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, /** * batadv_dat_snoop_incoming_arp_request() - snoop the ARP request and try to * answer using the local DAT storage - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: packet to check * @hdr_size: size of the encapsulation header * @@ -1281,7 +1281,7 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv, /** * batadv_dat_snoop_outgoing_arp_reply() - snoop the ARP reply and fill the DHT - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: packet to check */ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, @@ -1324,7 +1324,7 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, /** * batadv_dat_snoop_incoming_arp_reply() - snoop the ARP reply and fill the * local DAT storage only - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: packet to check * @hdr_size: size of the encapsulation header * @@ -1605,7 +1605,7 @@ static bool batadv_dat_get_dhcp_chaddr(struct sk_buff *skb, u8 *buf) /** * batadv_dat_put_dhcp() - puts addresses from a DHCP packet into the DHT and * DAT cache - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @chaddr: the DHCP client MAC address * @yiaddr: the DHCP client IP address * @hw_dst: the DHCP server MAC address @@ -1690,7 +1690,7 @@ batadv_dat_check_dhcp_ack(struct sk_buff *skb, __be16 proto, __be32 *ip_src, /** * batadv_dat_snoop_outgoing_dhcp_ack() - snoop DHCPACK and fill DAT with it - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the packet to snoop * @proto: ethernet protocol hint (behind a potential vlan) * @vid: VLAN identifier @@ -1723,7 +1723,7 @@ void batadv_dat_snoop_outgoing_dhcp_ack(struct batadv_priv *bat_priv, /** * batadv_dat_snoop_incoming_dhcp_ack() - snoop DHCPACK and fill DAT cache - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the packet to snoop * @hdr_size: header size, up to the tail of the batman-adv header * @@ -1771,7 +1771,7 @@ void batadv_dat_snoop_incoming_dhcp_ack(struct batadv_priv *bat_priv, /** * batadv_dat_drop_broadcast_packet() - check if an ARP request has to be * dropped (because the node has already obtained the reply via DAT) or not - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @forw_packet: the broadcast packet * * Return: true if the node can drop the packet, false otherwise. diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h index bed7f3d208444..e7b75e82eb1d8 100644 --- a/net/batman-adv/distributed-arp-table.h +++ b/net/batman-adv/distributed-arp-table.h @@ -56,7 +56,7 @@ batadv_dat_init_orig_node_addr(struct batadv_orig_node *orig_node) /** * batadv_dat_init_own_addr() - assign a DAT address to the node itself - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @primary_if: a pointer to the primary interface */ static inline void @@ -77,7 +77,7 @@ int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb); /** * batadv_dat_inc_counter() - increment the correct DAT packet counter - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @subtype: the 4addr subtype of the packet to be counted * * Updates the ethtool statistics for the received packet if it is a DAT subtype diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 757c084ac2d14..cc14bc41381e0 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -349,7 +349,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb, struct batadv_hard_iface *recv_if, struct batadv_orig_node *orig_node_src) { - struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface); struct batadv_neigh_node *neigh_node = NULL; struct batadv_frag_packet *packet; u16 total_size; diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index f68e34ed1f627..7a11b245e9f40 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -71,7 +71,7 @@ void batadv_gw_node_release(struct kref *ref) /** * batadv_gw_get_selected_gw_node() - Get currently selected gateway - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: selected gateway (with increased refcnt), NULL on errors */ @@ -95,7 +95,7 @@ batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv) /** * batadv_gw_get_selected_orig() - Get originator of currently selected gateway - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: orig_node of selected gateway (with increased refcnt), NULL on errors */ @@ -144,7 +144,7 @@ static void batadv_gw_select(struct batadv_priv *bat_priv, /** * batadv_gw_reselect() - force a gateway reselection - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Set a flag to remind the GW component to perform a new gateway reselection. * However this function does not ensure that the current gateway is going to be @@ -160,7 +160,7 @@ void batadv_gw_reselect(struct batadv_priv *bat_priv) /** * batadv_gw_check_client_stop() - check if client mode has been switched off - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * This function assumes the caller has checked that the gw state *is actually * changing*. This function is not supposed to be called when there is no state @@ -192,7 +192,7 @@ void batadv_gw_check_client_stop(struct batadv_priv *bat_priv) /** * batadv_gw_election() - Elect the best gateway - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_gw_election(struct batadv_priv *bat_priv) { @@ -280,7 +280,7 @@ void batadv_gw_election(struct batadv_priv *bat_priv) /** * batadv_gw_check_election() - Elect orig node as best gateway when eligible - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node which is to be checked */ void batadv_gw_check_election(struct batadv_priv *bat_priv, @@ -314,7 +314,7 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv, /** * batadv_gw_node_add() - add gateway node to list of available gateways - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: originator announcing gateway capabilities * @gateway: announced bandwidth information * @@ -361,7 +361,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv, /** * batadv_gw_node_get() - retrieve gateway node from list of available gateways - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: originator announcing gateway capabilities * * Return: gateway node if found or NULL otherwise. @@ -391,7 +391,7 @@ struct batadv_gw_node *batadv_gw_node_get(struct batadv_priv *bat_priv, /** * batadv_gw_node_update() - update list of available gateways with changed * bandwidth information - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: originator announcing gateway capabilities * @gateway: announced bandwidth information */ @@ -458,7 +458,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv, /** * batadv_gw_node_delete() - Remove orig_node from gateway list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node which is currently in process of being removed */ void batadv_gw_node_delete(struct batadv_priv *bat_priv, @@ -473,8 +473,8 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv, } /** - * batadv_gw_node_free() - Free gateway information from soft interface - * @bat_priv: the bat priv with all the soft interface information + * batadv_gw_node_free() - Free gateway information from mesh interface + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_gw_node_free(struct batadv_priv *bat_priv) { @@ -501,15 +501,15 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv) int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct batadv_hard_iface *primary_if = NULL; - struct net_device *soft_iface; + struct net_device *mesh_iface; struct batadv_priv *bat_priv; int ret; - soft_iface = batadv_netlink_get_softif(cb); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif(cb); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { @@ -528,7 +528,7 @@ int batadv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb) out: batadv_hardif_put(primary_if); - dev_put(soft_iface); + dev_put(mesh_iface); return ret; } @@ -657,7 +657,7 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len, /** * batadv_gw_out_of_range() - check if the dhcp request destination is the best * gateway - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the outgoing packet * * Check if the skb is a DHCP request and if it is sent to the current best GW diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c index 2dd36ef03c841..315fa90f0c947 100644 --- a/net/batman-adv/gateway_common.c +++ b/net/batman-adv/gateway_common.c @@ -20,7 +20,7 @@ /** * batadv_gw_tvlv_container_update() - update the gw tvlv container after * gateway setting change - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv) { @@ -48,7 +48,7 @@ void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv) /** * batadv_gw_tvlv_ogm_handler_v1() - process incoming gateway tvlv container - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node of the ogm * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) * @tvlv_value: tvlv buffer containing the gateway data @@ -89,7 +89,7 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, /** * batadv_gw_init() - initialise the gateway handling internals - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_gw_init(struct batadv_priv *bat_priv) { @@ -105,7 +105,7 @@ void batadv_gw_init(struct batadv_priv *bat_priv) /** * batadv_gw_free() - free the gateway handling internals - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_gw_free(struct batadv_priv *bat_priv) { diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 96a412beab2de..f145f96626531 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -36,9 +36,9 @@ #include "distributed-arp-table.h" #include "gateway_client.h" #include "log.h" +#include "mesh-interface.h" #include "originator.h" #include "send.h" -#include "soft-interface.h" #include "translation-table.h" /** @@ -51,7 +51,7 @@ void batadv_hardif_release(struct kref *ref) struct batadv_hard_iface *hard_iface; hard_iface = container_of(ref, struct batadv_hard_iface, refcount); - dev_put(hard_iface->net_dev); + netdev_put(hard_iface->net_dev, &hard_iface->dev_tracker); kfree_rcu(hard_iface, rcu); } @@ -141,7 +141,7 @@ static bool batadv_mutual_parents(const struct net_device *dev1, * is important to prevent this new interface from being used to create a new * mesh network (this behaviour would lead to a batman-over-batman * configuration). This function recursively checks all the fathers of the - * device passed as argument looking for a batman-adv soft interface. + * device passed as argument looking for a batman-adv mesh interface. * * Return: true if the device is descendant of a batman-adv mesh interface (or * if it is a batman-adv interface itself), false otherwise @@ -155,7 +155,7 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) bool ret; /* check if this is a batman-adv mesh interface */ - if (batadv_softif_is_valid(net_dev)) + if (batadv_meshif_is_valid(net_dev)) return true; iflink = dev_get_iflink(net_dev); @@ -233,10 +233,10 @@ static struct net_device *batadv_get_real_netdevice(struct net_device *netdev) } hard_iface = batadv_hardif_get_by_netdev(netdev); - if (!hard_iface || !hard_iface->soft_iface) + if (!hard_iface || !hard_iface->mesh_iface) goto out; - net = dev_net(hard_iface->soft_iface); + net = dev_net(hard_iface->mesh_iface); real_net = batadv_getlink_net(netdev, net); /* iflink to itself, most likely physical device */ @@ -438,13 +438,13 @@ int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing, } static struct batadv_hard_iface * -batadv_hardif_get_active(const struct net_device *soft_iface) +batadv_hardif_get_active(const struct net_device *mesh_iface) { struct batadv_hard_iface *hard_iface; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { - if (hard_iface->soft_iface != soft_iface) + if (hard_iface->mesh_iface != mesh_iface) continue; if (hard_iface->if_status == BATADV_IF_ACTIVE && @@ -532,9 +532,9 @@ static void batadv_check_known_mac_addr(const struct net_device *net_dev) /** * batadv_hardif_recalc_extra_skbroom() - Recalculate skbuff extra head/tailroom - * @soft_iface: netdev struct of the mesh interface + * @mesh_iface: netdev struct of the mesh interface */ -static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface) +static void batadv_hardif_recalc_extra_skbroom(struct net_device *mesh_iface) { const struct batadv_hard_iface *hard_iface; unsigned short lower_header_len = ETH_HLEN; @@ -547,7 +547,7 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface) if (hard_iface->if_status == BATADV_IF_NOT_IN_USE) continue; - if (hard_iface->soft_iface != soft_iface) + if (hard_iface->mesh_iface != mesh_iface) continue; lower_header_len = max_t(unsigned short, lower_header_len, @@ -567,20 +567,20 @@ static void batadv_hardif_recalc_extra_skbroom(struct net_device *soft_iface) /* fragmentation headers don't strip the unicast/... header */ needed_headroom += sizeof(struct batadv_frag_packet); - soft_iface->needed_headroom = needed_headroom; - soft_iface->needed_tailroom = lower_tailroom; + mesh_iface->needed_headroom = needed_headroom; + mesh_iface->needed_tailroom = lower_tailroom; } /** - * batadv_hardif_min_mtu() - Calculate maximum MTU for soft interface - * @soft_iface: netdev struct of the soft interface + * batadv_hardif_min_mtu() - Calculate maximum MTU for mesh interface + * @mesh_iface: netdev struct of the mesh interface * - * Return: MTU for the soft-interface (limited by the minimal MTU of all active + * Return: MTU for the mesh-interface (limited by the minimal MTU of all active * slave interfaces) */ -int batadv_hardif_min_mtu(struct net_device *soft_iface) +int batadv_hardif_min_mtu(struct net_device *mesh_iface) { - struct batadv_priv *bat_priv = netdev_priv(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(mesh_iface); const struct batadv_hard_iface *hard_iface; int min_mtu = INT_MAX; @@ -590,7 +590,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED) continue; - if (hard_iface->soft_iface != soft_iface) + if (hard_iface->mesh_iface != mesh_iface) continue; min_mtu = min_t(int, hard_iface->net_dev->mtu, min_mtu); @@ -616,26 +616,24 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) */ atomic_set(&bat_priv->packet_size_max, min_mtu); - /* the real soft-interface MTU is computed by removing the payload + /* the real mesh-interface MTU is computed by removing the payload * overhead from the maximum amount of bytes that was just computed. - * - * However batman-adv does not support MTUs bigger than ETH_DATA_LEN */ - return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN); + return min_t(int, min_mtu - batadv_max_header_len(), BATADV_MAX_MTU); } /** * batadv_update_min_mtu() - Adjusts the MTU if a new interface with a smaller * MTU appeared - * @soft_iface: netdev struct of the soft interface + * @mesh_iface: netdev struct of the mesh interface */ -void batadv_update_min_mtu(struct net_device *soft_iface) +void batadv_update_min_mtu(struct net_device *mesh_iface) { - struct batadv_priv *bat_priv = netdev_priv(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(mesh_iface); int limit_mtu; int mtu; - mtu = batadv_hardif_min_mtu(soft_iface); + mtu = batadv_hardif_min_mtu(mesh_iface); if (bat_priv->mtu_set_by_user) limit_mtu = bat_priv->mtu_set_by_user; @@ -643,12 +641,12 @@ void batadv_update_min_mtu(struct net_device *soft_iface) limit_mtu = ETH_DATA_LEN; mtu = min(mtu, limit_mtu); - dev_set_mtu(soft_iface, mtu); + dev_set_mtu(mesh_iface, mtu); /* Check if the local translate table should be cleaned up to match a * new (and smaller) MTU. */ - batadv_tt_local_resize_to_mtu(soft_iface); + batadv_tt_local_resize_to_mtu(mesh_iface); } static void @@ -660,7 +658,7 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface) if (hard_iface->if_status != BATADV_IF_INACTIVE) goto out; - bat_priv = netdev_priv(hard_iface->soft_iface); + bat_priv = netdev_priv(hard_iface->mesh_iface); bat_priv->algo_ops->iface.update_mac(hard_iface); hard_iface->if_status = BATADV_IF_TO_BE_ACTIVATED; @@ -672,10 +670,10 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface) if (!primary_if) batadv_primary_if_select(bat_priv, hard_iface); - batadv_info(hard_iface->soft_iface, "Interface activated: %s\n", + batadv_info(hard_iface->mesh_iface, "Interface activated: %s\n", hard_iface->net_dev->name); - batadv_update_min_mtu(hard_iface->soft_iface); + batadv_update_min_mtu(hard_iface->mesh_iface); if (bat_priv->algo_ops->iface.activate) bat_priv->algo_ops->iface.activate(hard_iface); @@ -693,21 +691,21 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface) hard_iface->if_status = BATADV_IF_INACTIVE; - batadv_info(hard_iface->soft_iface, "Interface deactivated: %s\n", + batadv_info(hard_iface->mesh_iface, "Interface deactivated: %s\n", hard_iface->net_dev->name); - batadv_update_min_mtu(hard_iface->soft_iface); + batadv_update_min_mtu(hard_iface->mesh_iface); } /** - * batadv_hardif_enable_interface() - Enslave hard interface to soft interface - * @hard_iface: hard interface to add to soft interface - * @soft_iface: netdev struct of the mesh interface + * batadv_hardif_enable_interface() - Enslave hard interface to mesh interface + * @hard_iface: hard interface to add to mesh interface + * @mesh_iface: netdev struct of the mesh interface * * Return: 0 on success or negative error number in case of failure */ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, - struct net_device *soft_iface) + struct net_device *mesh_iface) { struct batadv_priv *bat_priv; __be16 ethertype = htons(ETH_P_BATMAN); @@ -717,7 +715,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, int ret; hardif_mtu = READ_ONCE(hard_iface->net_dev->mtu); - required_mtu = READ_ONCE(soft_iface->mtu) + max_header_len; + required_mtu = READ_ONCE(mesh_iface->mtu) + max_header_len; if (hardif_mtu < ETH_MIN_MTU + max_header_len) return -EINVAL; @@ -727,12 +725,13 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, kref_get(&hard_iface->refcount); - dev_hold(soft_iface); - hard_iface->soft_iface = soft_iface; - bat_priv = netdev_priv(hard_iface->soft_iface); + dev_hold(mesh_iface); + netdev_hold(mesh_iface, &hard_iface->meshif_dev_tracker, GFP_ATOMIC); + hard_iface->mesh_iface = mesh_iface; + bat_priv = netdev_priv(hard_iface->mesh_iface); ret = netdev_master_upper_dev_link(hard_iface->net_dev, - soft_iface, NULL, NULL, NULL); + mesh_iface, NULL, NULL, NULL); if (ret) goto err_dev; @@ -748,19 +747,19 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; dev_add_pack(&hard_iface->batman_adv_ptype); - batadv_info(hard_iface->soft_iface, "Adding interface: %s\n", + batadv_info(hard_iface->mesh_iface, "Adding interface: %s\n", hard_iface->net_dev->name); if (atomic_read(&bat_priv->fragmentation) && hardif_mtu < required_mtu) - batadv_info(hard_iface->soft_iface, + batadv_info(hard_iface->mesh_iface, "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %i would solve the problem.\n", hard_iface->net_dev->name, hardif_mtu, required_mtu); if (!atomic_read(&bat_priv->fragmentation) && hardif_mtu < required_mtu) - batadv_info(hard_iface->soft_iface, + batadv_info(hard_iface->mesh_iface, "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %i.\n", hard_iface->net_dev->name, hardif_mtu, required_mtu); @@ -768,11 +767,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, if (batadv_hardif_is_iface_up(hard_iface)) batadv_hardif_activate_interface(hard_iface); else - batadv_err(hard_iface->soft_iface, + batadv_err(hard_iface->mesh_iface, "Not using interface %s (retrying later): interface not active\n", hard_iface->net_dev->name); - batadv_hardif_recalc_extra_skbroom(soft_iface); + batadv_hardif_recalc_extra_skbroom(mesh_iface); if (bat_priv->algo_ops->iface.enabled) bat_priv->algo_ops->iface.enabled(hard_iface); @@ -781,17 +780,17 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, return 0; err_upper: - netdev_upper_dev_unlink(hard_iface->net_dev, soft_iface); + netdev_upper_dev_unlink(hard_iface->net_dev, mesh_iface); err_dev: - hard_iface->soft_iface = NULL; - dev_put(soft_iface); + hard_iface->mesh_iface = NULL; + netdev_put(mesh_iface, &hard_iface->meshif_dev_tracker); batadv_hardif_put(hard_iface); return ret; } /** - * batadv_hardif_cnt() - get number of interfaces enslaved to soft interface - * @soft_iface: soft interface to check + * batadv_hardif_cnt() - get number of interfaces enslaved to mesh interface + * @mesh_iface: mesh interface to check * * This function is only using RCU for locking - the result can therefore be * off when another function is modifying the list at the same time. The @@ -799,14 +798,14 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, * * Return: number of connected/enslaved hard interfaces */ -static size_t batadv_hardif_cnt(const struct net_device *soft_iface) +static size_t batadv_hardif_cnt(const struct net_device *mesh_iface) { struct batadv_hard_iface *hard_iface; size_t count = 0; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { - if (hard_iface->soft_iface != soft_iface) + if (hard_iface->mesh_iface != mesh_iface) continue; count++; @@ -817,12 +816,12 @@ static size_t batadv_hardif_cnt(const struct net_device *soft_iface) } /** - * batadv_hardif_disable_interface() - Remove hard interface from soft interface + * batadv_hardif_disable_interface() - Remove hard interface from mesh interface * @hard_iface: hard interface to be removed */ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface) { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); struct batadv_hard_iface *primary_if = NULL; batadv_hardif_deactivate_interface(hard_iface); @@ -830,7 +829,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface) if (hard_iface->if_status != BATADV_IF_INACTIVE) goto out; - batadv_info(hard_iface->soft_iface, "Removing interface: %s\n", + batadv_info(hard_iface->mesh_iface, "Removing interface: %s\n", hard_iface->net_dev->name); dev_remove_pack(&hard_iface->batman_adv_ptype); batadv_hardif_put(hard_iface); @@ -839,7 +838,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface) if (hard_iface == primary_if) { struct batadv_hard_iface *new_if; - new_if = batadv_hardif_get_active(hard_iface->soft_iface); + new_if = batadv_hardif_get_active(hard_iface->mesh_iface); batadv_primary_if_select(bat_priv, new_if); batadv_hardif_put(new_if); @@ -851,16 +850,16 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface) /* delete all references to this hard_iface */ batadv_purge_orig_ref(bat_priv); batadv_purge_outstanding_packets(bat_priv, hard_iface); - dev_put(hard_iface->soft_iface); + netdev_put(hard_iface->mesh_iface, &hard_iface->meshif_dev_tracker); - netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface); - batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface); + netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->mesh_iface); + batadv_hardif_recalc_extra_skbroom(hard_iface->mesh_iface); /* nobody uses this interface anymore */ - if (batadv_hardif_cnt(hard_iface->soft_iface) <= 1) + if (batadv_hardif_cnt(hard_iface->mesh_iface) <= 1) batadv_gw_check_client_stop(bat_priv); - hard_iface->soft_iface = NULL; + hard_iface->mesh_iface = NULL; batadv_hardif_put(hard_iface); out: @@ -875,16 +874,16 @@ batadv_hardif_add_interface(struct net_device *net_dev) ASSERT_RTNL(); if (!batadv_is_valid_iface(net_dev)) - goto out; - - dev_hold(net_dev); + return NULL; hard_iface = kzalloc(sizeof(*hard_iface), GFP_ATOMIC); if (!hard_iface) - goto release_dev; + return NULL; + netdev_hold(net_dev, &hard_iface->dev_tracker, GFP_ATOMIC); hard_iface->net_dev = net_dev; - hard_iface->soft_iface = NULL; + + hard_iface->mesh_iface = NULL; hard_iface->if_status = BATADV_IF_NOT_IN_USE; INIT_LIST_HEAD(&hard_iface->list); @@ -909,11 +908,6 @@ batadv_hardif_add_interface(struct net_device *net_dev) batadv_hardif_generation++; return hard_iface; - -release_dev: - dev_put(net_dev); -out: - return NULL; } static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface) @@ -932,13 +926,13 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface) } /** - * batadv_hard_if_event_softif() - Handle events for soft interfaces + * batadv_hard_if_event_meshif() - Handle events for mesh interfaces * @event: NETDEV_* event to handle * @net_dev: net_device which generated an event * * Return: NOTIFY_* result */ -static int batadv_hard_if_event_softif(unsigned long event, +static int batadv_hard_if_event_meshif(unsigned long event, struct net_device *net_dev) { struct batadv_priv *bat_priv; @@ -946,7 +940,7 @@ static int batadv_hard_if_event_softif(unsigned long event, switch (event) { case NETDEV_REGISTER: bat_priv = netdev_priv(net_dev); - batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS); + batadv_meshif_create_vlan(bat_priv, BATADV_NO_FLAGS); break; } @@ -961,8 +955,8 @@ static int batadv_hard_if_event(struct notifier_block *this, struct batadv_hard_iface *primary_if = NULL; struct batadv_priv *bat_priv; - if (batadv_softif_is_valid(net_dev)) - return batadv_hard_if_event_softif(event, net_dev); + if (batadv_meshif_is_valid(net_dev)) + return batadv_hard_if_event_meshif(event, net_dev); hard_iface = batadv_hardif_get_by_netdev(net_dev); if (!hard_iface && (event == NETDEV_REGISTER || @@ -988,8 +982,8 @@ static int batadv_hard_if_event(struct notifier_block *this, batadv_hardif_remove_interface(hard_iface); break; case NETDEV_CHANGEMTU: - if (hard_iface->soft_iface) - batadv_update_min_mtu(hard_iface->soft_iface); + if (hard_iface->mesh_iface) + batadv_update_min_mtu(hard_iface->mesh_iface); break; case NETDEV_CHANGEADDR: if (hard_iface->if_status == BATADV_IF_NOT_IN_USE) @@ -997,7 +991,7 @@ static int batadv_hard_if_event(struct notifier_block *this, batadv_check_known_mac_addr(hard_iface->net_dev); - bat_priv = netdev_priv(hard_iface->soft_iface); + bat_priv = netdev_priv(hard_iface->mesh_iface); bat_priv->algo_ops->iface.update_mac(hard_iface); primary_if = batadv_primary_if_get_selected(bat_priv); diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h index 64f660dbbe542..262a783647427 100644 --- a/net/batman-adv/hard-interface.h +++ b/net/batman-adv/hard-interface.h @@ -23,12 +23,12 @@ enum batadv_hard_if_state { /** * @BATADV_IF_NOT_IN_USE: interface is not used as slave interface of a - * batman-adv soft interface + * batman-adv mesh interface */ BATADV_IF_NOT_IN_USE, /** - * @BATADV_IF_TO_BE_REMOVED: interface will be removed from soft + * @BATADV_IF_TO_BE_REMOVED: interface will be removed from mesh * interface */ BATADV_IF_TO_BE_REMOVED, @@ -74,10 +74,10 @@ bool batadv_is_wifi_hardif(struct batadv_hard_iface *hard_iface); struct batadv_hard_iface* batadv_hardif_get_by_netdev(const struct net_device *net_dev); int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, - struct net_device *soft_iface); + struct net_device *mesh_iface); void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface); -int batadv_hardif_min_mtu(struct net_device *soft_iface); -void batadv_update_min_mtu(struct net_device *soft_iface); +int batadv_hardif_min_mtu(struct net_device *mesh_iface); +void batadv_update_min_mtu(struct net_device *mesh_iface); void batadv_hardif_release(struct kref *ref); int batadv_hardif_no_broadcast(struct batadv_hard_iface *if_outgoing, u8 *orig_addr, u8 *orig_neigh); @@ -97,7 +97,7 @@ static inline void batadv_hardif_put(struct batadv_hard_iface *hard_iface) /** * batadv_primary_if_get_selected() - Get reference to primary interface - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: primary interface (with increased refcnt), otherwise NULL */ diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c index 7a93a1e94c40e..c19d07eeb0701 100644 --- a/net/batman-adv/log.c +++ b/net/batman-adv/log.c @@ -13,7 +13,7 @@ /** * batadv_debug_log() - Add debug log entry - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @fmt: format string * * Return: 0 on success or negative error number in case of failure diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h index 6717c965f0faf..567afaa8df998 100644 --- a/net/batman-adv/log.h +++ b/net/batman-adv/log.h @@ -71,7 +71,7 @@ __printf(2, 3); /** * _batadv_dbg() - Store debug output with(out) rate limiting * @type: type of debug message - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ratelimited: whether output should be rate limited * @fmt: format string * @arg: variable arguments @@ -97,7 +97,7 @@ static inline void _batadv_dbg(int type __always_unused, /** * batadv_dbg() - Store debug output without rate limiting * @type: type of debug message - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @arg: format string and variable arguments */ #define batadv_dbg(type, bat_priv, arg...) \ @@ -106,7 +106,7 @@ static inline void _batadv_dbg(int type __always_unused, /** * batadv_dbg_ratelimited() - Store debug output with rate limiting * @type: type of debug message - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @arg: format string and variable arguments */ #define batadv_dbg_ratelimited(type, bat_priv, arg...) \ @@ -114,7 +114,7 @@ static inline void _batadv_dbg(int type __always_unused, /** * batadv_info() - Store message in debug buffer and print it to kmsg buffer - * @net_dev: the soft interface net device + * @net_dev: the mesh interface net device * @fmt: format string * @arg: variable arguments */ @@ -128,7 +128,7 @@ static inline void _batadv_dbg(int type __always_unused, /** * batadv_err() - Store error in debug buffer and print it to kmsg buffer - * @net_dev: the soft interface net device + * @net_dev: the mesh interface net device * @fmt: format string * @arg: variable arguments */ diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 333e947afcce7..a08132888a3d4 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -51,13 +51,13 @@ #include "gateway_common.h" #include "hard-interface.h" #include "log.h" +#include "mesh-interface.h" #include "multicast.h" #include "netlink.h" #include "network-coding.h" #include "originator.h" #include "routing.h" #include "send.h" -#include "soft-interface.h" #include "tp_meter.h" #include "translation-table.h" @@ -143,14 +143,14 @@ static void __exit batadv_exit(void) } /** - * batadv_mesh_init() - Initialize soft interface - * @soft_iface: netdev struct of the soft interface + * batadv_mesh_init() - Initialize mesh interface + * @mesh_iface: netdev struct of the mesh interface * * Return: 0 on success or negative error number in case of failure */ -int batadv_mesh_init(struct net_device *soft_iface) +int batadv_mesh_init(struct net_device *mesh_iface) { - struct batadv_priv *bat_priv = netdev_priv(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(mesh_iface); int ret; spin_lock_init(&bat_priv->forw_bat_list_lock); @@ -167,7 +167,7 @@ int batadv_mesh_init(struct net_device *soft_iface) #endif spin_lock_init(&bat_priv->tvlv.container_list_lock); spin_lock_init(&bat_priv->tvlv.handler_list_lock); - spin_lock_init(&bat_priv->softif_vlan_list_lock); + spin_lock_init(&bat_priv->meshif_vlan_list_lock); spin_lock_init(&bat_priv->tp_list_lock); INIT_HLIST_HEAD(&bat_priv->forw_bat_list); @@ -186,7 +186,7 @@ int batadv_mesh_init(struct net_device *soft_iface) #endif INIT_HLIST_HEAD(&bat_priv->tvlv.container_list); INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list); - INIT_HLIST_HEAD(&bat_priv->softif_vlan_list); + INIT_HLIST_HEAD(&bat_priv->meshif_vlan_list); INIT_HLIST_HEAD(&bat_priv->tp_list); bat_priv->gw.generation = 0; @@ -253,12 +253,12 @@ int batadv_mesh_init(struct net_device *soft_iface) } /** - * batadv_mesh_free() - Deinitialize soft interface - * @soft_iface: netdev struct of the soft interface + * batadv_mesh_free() - Deinitialize mesh interface + * @mesh_iface: netdev struct of the mesh interface */ -void batadv_mesh_free(struct net_device *soft_iface) +void batadv_mesh_free(struct net_device *mesh_iface) { - struct batadv_priv *bat_priv = netdev_priv(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(mesh_iface); atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING); @@ -297,7 +297,7 @@ void batadv_mesh_free(struct net_device *soft_iface) /** * batadv_is_my_mac() - check if the given mac address belongs to any of the * real interfaces in the current mesh - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the address to check * * Return: 'true' if the mac address was found, false otherwise. @@ -312,7 +312,7 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr) if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; - if (hard_iface->soft_iface != bat_priv->soft_iface) + if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) { @@ -457,10 +457,10 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb))) goto err_free; - if (!hard_iface->soft_iface) + if (!hard_iface->mesh_iface) goto err_free; - bat_priv = netdev_priv(hard_iface->soft_iface); + bat_priv = netdev_priv(hard_iface->mesh_iface); if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) goto err_free; @@ -651,7 +651,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len) /** * batadv_vlan_ap_isola_get() - return AP isolation status for the given vlan - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @vid: the VLAN identifier for which the AP isolation attributed as to be * looked up * @@ -661,15 +661,15 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len) bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid) { bool ap_isolation_enabled = false; - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; /* if the AP isolation is requested on a VLAN, then check for its * setting in the proper VLAN private data structure */ - vlan = batadv_softif_vlan_get(bat_priv, vid); + vlan = batadv_meshif_vlan_get(bat_priv, vid); if (vlan) { ap_isolation_enabled = atomic_read(&vlan->ap_isolation); - batadv_softif_vlan_put(vlan); + batadv_meshif_vlan_put(vlan); } return ap_isolation_enabled; @@ -677,7 +677,7 @@ bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid) /** * batadv_throw_uevent() - Send an uevent with batman-adv specific env data - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @type: subsystem type of event. Stored in uevent's BATTYPE * @action: action type of event. Stored in uevent's BATACTION * @data: string with additional information to the event (ignored for @@ -692,7 +692,7 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type, struct kobject *bat_kobj; char *uevent_env[4] = { NULL, NULL, NULL, NULL }; - bat_kobj = &bat_priv->soft_iface->dev.kobj; + bat_kobj = &bat_priv->mesh_iface->dev.kobj; uevent_env[0] = kasprintf(GFP_ATOMIC, "%s%s", BATADV_UEV_TYPE_VAR, diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 964f3088af5b5..67af435ee04e1 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -13,7 +13,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2025.0" +#define BATADV_SOURCE_VERSION "2025.1" #endif /* B.A.T.M.A.N. parameters */ @@ -22,6 +22,8 @@ #define BATADV_THROUGHPUT_MAX_VALUE 0xFFFFFFFF #define BATADV_JITTER 20 +#define BATADV_MAX_MTU (ETH_MAX_MTU - batadv_max_header_len()) + /* Time To Live of broadcast messages */ #define BATADV_TTL 50 @@ -102,9 +104,7 @@ */ #define BATADV_TQ_SIMILARITY_THRESHOLD 50 -/* should not be bigger than 512 bytes or change the size of - * forw_packet->direct_link_flags - */ +#define BATADV_MAX_AGGREGATION_PACKETS 32 #define BATADV_MAX_AGGREGATION_BYTES 512 #define BATADV_MAX_AGGREGATION_MS 100 @@ -129,10 +129,10 @@ #define BATADV_TP_MAX_NUM 5 /** - * enum batadv_mesh_state - State of a soft interface + * enum batadv_mesh_state - State of a mesh interface */ enum batadv_mesh_state { - /** @BATADV_MESH_INACTIVE: soft interface is not yet running */ + /** @BATADV_MESH_INACTIVE: mesh interface is not yet running */ BATADV_MESH_INACTIVE, /** @BATADV_MESH_ACTIVE: interface is up and running */ @@ -238,8 +238,8 @@ extern unsigned int batadv_hardif_generation; extern unsigned char batadv_broadcast_addr[]; extern struct workqueue_struct *batadv_event_workqueue; -int batadv_mesh_init(struct net_device *soft_iface); -void batadv_mesh_free(struct net_device *soft_iface); +int batadv_mesh_init(struct net_device *mesh_iface); +void batadv_mesh_free(struct net_device *mesh_iface); bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr); int batadv_max_header_len(void); void batadv_skb_set_priority(struct sk_buff *skb, int offset); @@ -345,8 +345,8 @@ static inline bool batadv_has_timed_out(unsigned long timestamp, #define batadv_seq_after(x, y) batadv_seq_before(y, x) /** - * batadv_add_counter() - Add to per cpu statistics counter of soft interface - * @bat_priv: the bat priv with all the soft interface information + * batadv_add_counter() - Add to per cpu statistics counter of mesh interface + * @bat_priv: the bat priv with all the mesh interface information * @idx: counter index which should be modified * @count: value to increase counter by * @@ -359,8 +359,8 @@ static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx, } /** - * batadv_inc_counter() - Increase per cpu statistics counter of soft interface - * @b: the bat priv with all the soft interface information + * batadv_inc_counter() - Increase per cpu statistics counter of mesh interface + * @b: the bat priv with all the mesh interface information * @i: counter index which should be modified */ #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1) diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/mesh-interface.c similarity index 83% rename from net/batman-adv/soft-interface.c rename to net/batman-adv/mesh-interface.c index 822d788a5f86b..59e7b5aacbc93 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/mesh-interface.c @@ -4,7 +4,7 @@ * Marek Lindner, Simon Wunderlich */ -#include "soft-interface.h" +#include "mesh-interface.h" #include "main.h" #include @@ -91,7 +91,7 @@ static int batadv_interface_release(struct net_device *dev) /** * batadv_sum_counter() - Sum the cpu-local counters for index 'idx' - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @idx: index of counter to sum up * * Return: sum of all cpu-local counters @@ -125,7 +125,7 @@ static struct net_device_stats *batadv_interface_stats(struct net_device *dev) static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) { struct batadv_priv *bat_priv = netdev_priv(dev); - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; struct sockaddr *addr = p; u8 old_addr[ETH_ALEN]; @@ -140,7 +140,7 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) return 0; rcu_read_lock(); - hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { + hlist_for_each_entry_rcu(vlan, &bat_priv->meshif_vlan_list, list) { batadv_tt_local_remove(bat_priv, old_addr, vlan->vid, "mac address changed", false); batadv_tt_local_add(dev, addr->sa_data, vlan->vid, @@ -170,7 +170,7 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) * @dev: registered network device to modify * * We do not actually need to set any rx filters for the virtual batman - * soft interface. However a dummy handler enables a user to set static + * mesh interface. However a dummy handler enables a user to set static * multicast listeners for instance. */ static void batadv_interface_set_rx_mode(struct net_device *dev) @@ -178,10 +178,10 @@ static void batadv_interface_set_rx_mode(struct net_device *dev) } static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, - struct net_device *soft_iface) + struct net_device *mesh_iface) { struct ethhdr *ethhdr; - struct batadv_priv *bat_priv = netdev_priv(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(mesh_iface); struct batadv_hard_iface *primary_if = NULL; struct batadv_bcast_packet *bcast_packet; static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, @@ -209,7 +209,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, /* reset control block to avoid left overs from previous users */ memset(skb->cb, 0, sizeof(struct batadv_skb_cb)); - netif_trans_update(soft_iface); + netif_trans_update(mesh_iface); vid = batadv_get_vid(skb, 0); skb_reset_mac_header(skb); @@ -246,7 +246,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, /* Register the client MAC in the transtable */ if (!is_multicast_ether_addr(ethhdr->h_source) && !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) { - client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source, + client_added = batadv_tt_local_add(mesh_iface, ethhdr->h_source, vid, skb->skb_iif, skb->mark); if (!client_added) @@ -397,12 +397,12 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, /** * batadv_interface_rx() - receive ethernet frame on local batman-adv interface - * @soft_iface: local interface which will receive the ethernet frame - * @skb: ethernet frame for @soft_iface + * @mesh_iface: local interface which will receive the ethernet frame + * @skb: ethernet frame for @mesh_iface * @hdr_size: size of already parsed batman-adv header * @orig_node: originator from which the batman-adv packet was sent * - * Sends an ethernet frame to the receive path of the local @soft_iface. + * Sends an ethernet frame to the receive path of the local @mesh_iface. * skb->data has still point to the batman-adv header with the size @hdr_size. * The caller has to have parsed this header already and made sure that at least * @hdr_size bytes are still available for pull in @skb. @@ -412,12 +412,12 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, * unicast packets will be dropped directly when it was sent between two * isolated clients. */ -void batadv_interface_rx(struct net_device *soft_iface, +void batadv_interface_rx(struct net_device *mesh_iface, struct sk_buff *skb, int hdr_size, struct batadv_orig_node *orig_node) { struct batadv_bcast_packet *batadv_bcast_packet; - struct batadv_priv *bat_priv = netdev_priv(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(mesh_iface); struct vlan_ethhdr *vhdr; struct ethhdr *ethhdr; unsigned short vid; @@ -457,7 +457,7 @@ void batadv_interface_rx(struct net_device *soft_iface, } /* skb->dev & skb->pkt_type are set here */ - skb->protocol = eth_type_trans(skb, soft_iface); + skb->protocol = eth_type_trans(skb, mesh_iface); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); batadv_inc_counter(bat_priv, BATADV_CNT_RX); @@ -502,38 +502,38 @@ void batadv_interface_rx(struct net_device *soft_iface, } /** - * batadv_softif_vlan_release() - release vlan from lists and queue for free + * batadv_meshif_vlan_release() - release vlan from lists and queue for free * after rcu grace period * @ref: kref pointer of the vlan object */ -void batadv_softif_vlan_release(struct kref *ref) +void batadv_meshif_vlan_release(struct kref *ref) { - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; - vlan = container_of(ref, struct batadv_softif_vlan, refcount); + vlan = container_of(ref, struct batadv_meshif_vlan, refcount); - spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); + spin_lock_bh(&vlan->bat_priv->meshif_vlan_list_lock); hlist_del_rcu(&vlan->list); - spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock); + spin_unlock_bh(&vlan->bat_priv->meshif_vlan_list_lock); kfree_rcu(vlan, rcu); } /** - * batadv_softif_vlan_get() - get the vlan object for a specific vid - * @bat_priv: the bat priv with all the soft interface information + * batadv_meshif_vlan_get() - get the vlan object for a specific vid + * @bat_priv: the bat priv with all the mesh interface information * @vid: the identifier of the vlan object to retrieve * * Return: the private data of the vlan matching the vid passed as argument or * NULL otherwise. The refcounter of the returned object is incremented by 1. */ -struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv, +struct batadv_meshif_vlan *batadv_meshif_vlan_get(struct batadv_priv *bat_priv, unsigned short vid) { - struct batadv_softif_vlan *vlan_tmp, *vlan = NULL; + struct batadv_meshif_vlan *vlan_tmp, *vlan = NULL; rcu_read_lock(); - hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) { + hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->meshif_vlan_list, list) { if (vlan_tmp->vid != vid) continue; @@ -549,28 +549,28 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv, } /** - * batadv_softif_create_vlan() - allocate the needed resources for a new vlan - * @bat_priv: the bat priv with all the soft interface information + * batadv_meshif_create_vlan() - allocate the needed resources for a new vlan + * @bat_priv: the bat priv with all the mesh interface information * @vid: the VLAN identifier * * Return: 0 on success, a negative error otherwise. */ -int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) +int batadv_meshif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) { - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; - spin_lock_bh(&bat_priv->softif_vlan_list_lock); + spin_lock_bh(&bat_priv->meshif_vlan_list_lock); - vlan = batadv_softif_vlan_get(bat_priv, vid); + vlan = batadv_meshif_vlan_get(bat_priv, vid); if (vlan) { - batadv_softif_vlan_put(vlan); - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + batadv_meshif_vlan_put(vlan); + spin_unlock_bh(&bat_priv->meshif_vlan_list_lock); return -EEXIST; } vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); if (!vlan) { - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + spin_unlock_bh(&bat_priv->meshif_vlan_list_lock); return -ENOMEM; } @@ -581,37 +581,37 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) atomic_set(&vlan->ap_isolation, 0); kref_get(&vlan->refcount); - hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + hlist_add_head_rcu(&vlan->list, &bat_priv->meshif_vlan_list); + spin_unlock_bh(&bat_priv->meshif_vlan_list_lock); /* add a new TT local entry. This one will be marked with the NOPURGE * flag */ - batadv_tt_local_add(bat_priv->soft_iface, - bat_priv->soft_iface->dev_addr, vid, + batadv_tt_local_add(bat_priv->mesh_iface, + bat_priv->mesh_iface->dev_addr, vid, BATADV_NULL_IFINDEX, BATADV_NO_MARK); - /* don't return reference to new softif_vlan */ - batadv_softif_vlan_put(vlan); + /* don't return reference to new meshif_vlan */ + batadv_meshif_vlan_put(vlan); return 0; } /** - * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object - * @bat_priv: the bat priv with all the soft interface information + * batadv_meshif_destroy_vlan() - remove and destroy a meshif_vlan object + * @bat_priv: the bat priv with all the mesh interface information * @vlan: the object to remove */ -static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, - struct batadv_softif_vlan *vlan) +static void batadv_meshif_destroy_vlan(struct batadv_priv *bat_priv, + struct batadv_meshif_vlan *vlan) { /* explicitly remove the associated TT local entry because it is marked * with the NOPURGE flag */ - batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, + batadv_tt_local_remove(bat_priv, bat_priv->mesh_iface->dev_addr, vlan->vid, "vlan interface destroyed", false); - batadv_softif_vlan_put(vlan); + batadv_meshif_vlan_put(vlan); } /** @@ -629,7 +629,7 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto, unsigned short vid) { struct batadv_priv *bat_priv = netdev_priv(dev); - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; /* only 802.1Q vlans are supported. * batman-adv does not know how to handle other types @@ -648,21 +648,21 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto, vid |= BATADV_VLAN_HAS_TAG; /* if a new vlan is getting created and it already exists, it means that - * it was not deleted yet. batadv_softif_vlan_get() increases the + * it was not deleted yet. batadv_meshif_vlan_get() increases the * refcount in order to revive the object. * * if it does not exist then create it. */ - vlan = batadv_softif_vlan_get(bat_priv, vid); + vlan = batadv_meshif_vlan_get(bat_priv, vid); if (!vlan) - return batadv_softif_create_vlan(bat_priv, vid); + return batadv_meshif_create_vlan(bat_priv, vid); /* add a new TT local entry. This one will be marked with the NOPURGE * flag. This must be added again, even if the vlan object already * exists, because the entry was deleted by kill_vid() */ - batadv_tt_local_add(bat_priv->soft_iface, - bat_priv->soft_iface->dev_addr, vid, + batadv_tt_local_add(bat_priv->mesh_iface, + bat_priv->mesh_iface->dev_addr, vid, BATADV_NULL_IFINDEX, BATADV_NO_MARK); return 0; @@ -684,7 +684,7 @@ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto, unsigned short vid) { struct batadv_priv *bat_priv = netdev_priv(dev); - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; /* only 802.1Q vlans are supported. batman-adv does not know how to * handle other types @@ -693,19 +693,19 @@ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto, return -EINVAL; /* "priority tag" frames are handled like "untagged" frames - * and no softif_vlan needs to be destroyed + * and no meshif_vlan needs to be destroyed */ if (vid == 0) return 0; - vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG); + vlan = batadv_meshif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG); if (!vlan) return -ENOENT; - batadv_softif_destroy_vlan(bat_priv, vlan); + batadv_meshif_destroy_vlan(bat_priv, vlan); /* finally free the vlan object */ - batadv_softif_vlan_put(vlan); + batadv_meshif_vlan_put(vlan); return 0; } @@ -741,12 +741,12 @@ static void batadv_set_lockdep_class(struct net_device *dev) } /** - * batadv_softif_init_late() - late stage initialization of soft interface + * batadv_meshif_init_late() - late stage initialization of mesh interface * @dev: registered network device to modify * * Return: error code on failures */ -static int batadv_softif_init_late(struct net_device *dev) +static int batadv_meshif_init_late(struct net_device *dev) { struct batadv_priv *bat_priv; u32 random_seqno; @@ -756,7 +756,7 @@ static int batadv_softif_init_late(struct net_device *dev) batadv_set_lockdep_class(dev); bat_priv = netdev_priv(dev); - bat_priv->soft_iface = dev; + bat_priv->mesh_iface = dev; /* batadv_interface_stats() needs to be available as soon as * register_netdevice() has been called @@ -790,7 +790,7 @@ static int batadv_softif_init_late(struct net_device *dev) atomic_set(&bat_priv->log_level, 0); #endif atomic_set(&bat_priv->fragmentation, 1); - atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN); + atomic_set(&bat_priv->packet_size_max, BATADV_MAX_MTU); atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN); atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN); @@ -837,14 +837,14 @@ static int batadv_softif_init_late(struct net_device *dev) } /** - * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface - * @dev: batadv_soft_interface used as master interface + * batadv_meshif_slave_add() - Add a slave interface to a batadv_mesh_interface + * @dev: batadv_mesh_interface used as master interface * @slave_dev: net_device which should become the slave interface * @extack: extended ACK report struct * * Return: 0 if successful or error otherwise. */ -static int batadv_softif_slave_add(struct net_device *dev, +static int batadv_meshif_slave_add(struct net_device *dev, struct net_device *slave_dev, struct netlink_ext_ack *extack) { @@ -852,7 +852,7 @@ static int batadv_softif_slave_add(struct net_device *dev, int ret = -EINVAL; hard_iface = batadv_hardif_get_by_netdev(slave_dev); - if (!hard_iface || hard_iface->soft_iface) + if (!hard_iface || hard_iface->mesh_iface) goto out; ret = batadv_hardif_enable_interface(hard_iface, dev); @@ -863,13 +863,13 @@ static int batadv_softif_slave_add(struct net_device *dev, } /** - * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface - * @dev: batadv_soft_interface used as master interface + * batadv_meshif_slave_del() - Delete a slave iface from a batadv_mesh_interface + * @dev: batadv_mesh_interface used as master interface * @slave_dev: net_device which should be removed from the master interface * * Return: 0 if successful or error otherwise. */ -static int batadv_softif_slave_del(struct net_device *dev, +static int batadv_meshif_slave_del(struct net_device *dev, struct net_device *slave_dev) { struct batadv_hard_iface *hard_iface; @@ -877,7 +877,7 @@ static int batadv_softif_slave_del(struct net_device *dev, hard_iface = batadv_hardif_get_by_netdev(slave_dev); - if (!hard_iface || hard_iface->soft_iface != dev) + if (!hard_iface || hard_iface->mesh_iface != dev) goto out; batadv_hardif_disable_interface(hard_iface); @@ -889,7 +889,7 @@ static int batadv_softif_slave_del(struct net_device *dev, } static const struct net_device_ops batadv_netdev_ops = { - .ndo_init = batadv_softif_init_late, + .ndo_init = batadv_meshif_init_late, .ndo_open = batadv_interface_open, .ndo_stop = batadv_interface_release, .ndo_get_stats = batadv_interface_stats, @@ -900,8 +900,8 @@ static const struct net_device_ops batadv_netdev_ops = { .ndo_set_rx_mode = batadv_interface_set_rx_mode, .ndo_start_xmit = batadv_interface_tx, .ndo_validate_addr = eth_validate_addr, - .ndo_add_slave = batadv_softif_slave_add, - .ndo_del_slave = batadv_softif_slave_del, + .ndo_add_slave = batadv_meshif_slave_add, + .ndo_del_slave = batadv_meshif_slave_del, }; static void batadv_get_drvinfo(struct net_device *dev, @@ -1009,10 +1009,10 @@ static const struct ethtool_ops batadv_ethtool_ops = { }; /** - * batadv_softif_free() - Deconstructor of batadv_soft_interface + * batadv_meshif_free() - Deconstructor of batadv_mesh_interface * @dev: Device to cleanup and remove */ -static void batadv_softif_free(struct net_device *dev) +static void batadv_meshif_free(struct net_device *dev) { batadv_mesh_free(dev); @@ -1024,25 +1024,26 @@ static void batadv_softif_free(struct net_device *dev) } /** - * batadv_softif_init_early() - early stage initialization of soft interface + * batadv_meshif_init_early() - early stage initialization of mesh interface * @dev: registered network device to modify */ -static void batadv_softif_init_early(struct net_device *dev) +static void batadv_meshif_init_early(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &batadv_netdev_ops; dev->needs_free_netdev = true; - dev->priv_destructor = batadv_softif_free; + dev->priv_destructor = batadv_meshif_free; dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; dev->priv_flags |= IFF_NO_QUEUE; dev->lltx = true; - dev->netns_local = true; + dev->netns_immutable = true; /* can't call min_mtu, because the needed variables * have not been initialized yet */ dev->mtu = ETH_DATA_LEN; + dev->max_mtu = BATADV_MAX_MTU; /* generate random address */ eth_hw_addr_random(dev); @@ -1051,14 +1052,14 @@ static void batadv_softif_init_early(struct net_device *dev) } /** - * batadv_softif_validate() - validate configuration of new batadv link + * batadv_meshif_validate() - validate configuration of new batadv link * @tb: IFLA_INFO_DATA netlink attributes * @data: enum batadv_ifla_attrs attributes * @extack: extended ACK report struct * * Return: 0 if successful or error otherwise. */ -static int batadv_softif_validate(struct nlattr *tb[], struct nlattr *data[], +static int batadv_meshif_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct batadv_algo_ops *algo_ops; @@ -1076,20 +1077,19 @@ static int batadv_softif_validate(struct nlattr *tb[], struct nlattr *data[], } /** - * batadv_softif_newlink() - pre-initialize and register new batadv link - * @src_net: the applicable net namespace + * batadv_meshif_newlink() - pre-initialize and register new batadv link * @dev: network device to register - * @tb: IFLA_INFO_DATA netlink attributes - * @data: enum batadv_ifla_attrs attributes + * @params: rtnl newlink parameters * @extack: extended ACK report struct * * Return: 0 if successful or error otherwise. */ -static int batadv_softif_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int batadv_meshif_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct batadv_priv *bat_priv = netdev_priv(dev); + struct nlattr **data = params->data; const char *algo_name; int err; @@ -1104,40 +1104,40 @@ static int batadv_softif_newlink(struct net *src_net, struct net_device *dev, } /** - * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via + * batadv_meshif_destroy_netlink() - deletion of batadv_mesh_interface via * netlink - * @soft_iface: the to-be-removed batman-adv interface + * @mesh_iface: the to-be-removed batman-adv interface * @head: list pointer */ -static void batadv_softif_destroy_netlink(struct net_device *soft_iface, +static void batadv_meshif_destroy_netlink(struct net_device *mesh_iface, struct list_head *head) { - struct batadv_priv *bat_priv = netdev_priv(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(mesh_iface); struct batadv_hard_iface *hard_iface; - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; list_for_each_entry(hard_iface, &batadv_hardif_list, list) { - if (hard_iface->soft_iface == soft_iface) + if (hard_iface->mesh_iface == mesh_iface) batadv_hardif_disable_interface(hard_iface); } /* destroy the "untagged" VLAN */ - vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS); + vlan = batadv_meshif_vlan_get(bat_priv, BATADV_NO_FLAGS); if (vlan) { - batadv_softif_destroy_vlan(bat_priv, vlan); - batadv_softif_vlan_put(vlan); + batadv_meshif_destroy_vlan(bat_priv, vlan); + batadv_meshif_vlan_put(vlan); } - unregister_netdevice_queue(soft_iface, head); + unregister_netdevice_queue(mesh_iface, head); } /** - * batadv_softif_is_valid() - Check whether device is a batadv soft interface + * batadv_meshif_is_valid() - Check whether device is a batadv mesh interface * @net_dev: device which should be checked * * Return: true when net_dev is a batman-adv interface, false otherwise */ -bool batadv_softif_is_valid(const struct net_device *net_dev) +bool batadv_meshif_is_valid(const struct net_device *net_dev) { if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx) return true; @@ -1152,10 +1152,10 @@ static const struct nla_policy batadv_ifla_policy[IFLA_BATADV_MAX + 1] = { struct rtnl_link_ops batadv_link_ops __read_mostly = { .kind = "batadv", .priv_size = sizeof(struct batadv_priv), - .setup = batadv_softif_init_early, + .setup = batadv_meshif_init_early, .maxtype = IFLA_BATADV_MAX, .policy = batadv_ifla_policy, - .validate = batadv_softif_validate, - .newlink = batadv_softif_newlink, - .dellink = batadv_softif_destroy_netlink, + .validate = batadv_meshif_validate, + .newlink = batadv_meshif_newlink, + .dellink = batadv_meshif_destroy_netlink, }; diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/mesh-interface.h similarity index 50% rename from net/batman-adv/soft-interface.h rename to net/batman-adv/mesh-interface.h index 9f2003f1a4972..7ba055b2bc269 100644 --- a/net/batman-adv/soft-interface.h +++ b/net/batman-adv/mesh-interface.h @@ -4,8 +4,8 @@ * Marek Lindner */ -#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_ -#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ +#ifndef _NET_BATMAN_ADV_MESH_INTERFACE_H_ +#define _NET_BATMAN_ADV_MESH_INTERFACE_H_ #include "main.h" @@ -16,27 +16,27 @@ #include int batadv_skb_head_push(struct sk_buff *skb, unsigned int len); -void batadv_interface_rx(struct net_device *soft_iface, +void batadv_interface_rx(struct net_device *mesh_iface, struct sk_buff *skb, int hdr_size, struct batadv_orig_node *orig_node); -bool batadv_softif_is_valid(const struct net_device *net_dev); +bool batadv_meshif_is_valid(const struct net_device *net_dev); extern struct rtnl_link_ops batadv_link_ops; -int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid); -void batadv_softif_vlan_release(struct kref *ref); -struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv, +int batadv_meshif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid); +void batadv_meshif_vlan_release(struct kref *ref); +struct batadv_meshif_vlan *batadv_meshif_vlan_get(struct batadv_priv *bat_priv, unsigned short vid); /** - * batadv_softif_vlan_put() - decrease the vlan object refcounter and + * batadv_meshif_vlan_put() - decrease the vlan object refcounter and * possibly release it * @vlan: the vlan object to release */ -static inline void batadv_softif_vlan_put(struct batadv_softif_vlan *vlan) +static inline void batadv_meshif_vlan_put(struct batadv_meshif_vlan *vlan) { if (!vlan) return; - kref_put(&vlan->refcount, batadv_softif_vlan_release); + kref_put(&vlan->refcount, batadv_meshif_vlan_release); } -#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ +#endif /* _NET_BATMAN_ADV_MESH_INTERFACE_H_ */ diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index d95c418484fa2..5786680aff30c 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -63,7 +63,7 @@ static void batadv_mcast_mla_update(struct work_struct *work); /** * batadv_mcast_start_timer() - schedule the multicast periodic worker - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_mcast_start_timer(struct batadv_priv *bat_priv) { @@ -72,18 +72,18 @@ static void batadv_mcast_start_timer(struct batadv_priv *bat_priv) } /** - * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists - * @soft_iface: netdev struct of the mesh interface + * batadv_mcast_get_bridge() - get the bridge on top of the meshif if it exists + * @mesh_iface: netdev struct of the mesh interface * - * If the given soft interface has a bridge on top then the refcount + * If the given mesh interface has a bridge on top then the refcount * of the according net device is increased. * * Return: NULL if no such bridge exists. Otherwise the net device of the * bridge. */ -static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) +static struct net_device *batadv_mcast_get_bridge(struct net_device *mesh_iface) { - struct net_device *upper = soft_iface; + struct net_device *upper = mesh_iface; rcu_read_lock(); do { @@ -97,7 +97,7 @@ static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) } /** - * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from + * batadv_mcast_mla_rtr_flags_meshif_get_ipv4() - get mcast router flags from * node for IPv4 * @dev: the interface to check * @@ -107,7 +107,7 @@ static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface) * * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise. */ -static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev) +static u8 batadv_mcast_mla_rtr_flags_meshif_get_ipv4(struct net_device *dev) { struct in_device *in_dev = __in_dev_get_rcu(dev); @@ -118,7 +118,7 @@ static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev) } /** - * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from + * batadv_mcast_mla_rtr_flags_meshif_get_ipv6() - get mcast router flags from * node for IPv6 * @dev: the interface to check * @@ -129,7 +129,7 @@ static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev) * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise. */ #if IS_ENABLED(CONFIG_IPV6_MROUTE) -static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) +static u8 batadv_mcast_mla_rtr_flags_meshif_get_ipv6(struct net_device *dev) { struct inet6_dev *in6_dev = __in6_dev_get(dev); @@ -140,16 +140,16 @@ static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) } #else static inline u8 -batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) +batadv_mcast_mla_rtr_flags_meshif_get_ipv6(struct net_device *dev) { return BATADV_MCAST_WANT_NO_RTR6; } #endif /** - * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node - * @bat_priv: the bat priv with all the soft interface information - * @bridge: bridge interface on top of the soft_iface if present, + * batadv_mcast_mla_rtr_flags_meshif_get() - get mcast router flags from node + * @bat_priv: the bat priv with all the mesh interface information + * @bridge: bridge interface on top of the mesh_iface if present, * otherwise pass NULL * * Checks the presence of IPv4 and IPv6 multicast routers on this @@ -161,16 +161,16 @@ batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev) * BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present * The former two OR'd: no multicast router is present */ -static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv, +static u8 batadv_mcast_mla_rtr_flags_meshif_get(struct batadv_priv *bat_priv, struct net_device *bridge) { - struct net_device *dev = bridge ? bridge : bat_priv->soft_iface; + struct net_device *dev = bridge ? bridge : bat_priv->mesh_iface; u8 flags = BATADV_NO_FLAGS; rcu_read_lock(); - flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev); - flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev); + flags |= batadv_mcast_mla_rtr_flags_meshif_get_ipv4(dev); + flags |= batadv_mcast_mla_rtr_flags_meshif_get_ipv6(dev); rcu_read_unlock(); @@ -179,8 +179,8 @@ static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv, /** * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge - * @bat_priv: the bat priv with all the soft interface information - * @bridge: bridge interface on top of the soft_iface if present, + * @bat_priv: the bat priv with all the mesh interface information + * @bridge: bridge interface on top of the mesh_iface if present, * otherwise pass NULL * * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge. @@ -194,7 +194,7 @@ static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv, static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv, struct net_device *bridge) { - struct net_device *dev = bat_priv->soft_iface; + struct net_device *dev = bat_priv->mesh_iface; u8 flags = BATADV_NO_FLAGS; if (!bridge) @@ -210,8 +210,8 @@ static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv, /** * batadv_mcast_mla_rtr_flags_get() - get multicast router flags - * @bat_priv: the bat priv with all the soft interface information - * @bridge: bridge interface on top of the soft_iface if present, + * @bat_priv: the bat priv with all the mesh interface information + * @bridge: bridge interface on top of the mesh_iface if present, * otherwise pass NULL * * Checks the presence of IPv4 and IPv6 multicast routers on this @@ -228,7 +228,7 @@ static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv, { u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6; - flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge); + flags &= batadv_mcast_mla_rtr_flags_meshif_get(bat_priv, bridge); flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge); return flags; @@ -236,7 +236,7 @@ static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv, /** * batadv_mcast_mla_forw_flags_get() - get multicast forwarding flags - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Checks if all active hard interfaces have an MTU larger or equal to 1280 * bytes (IPv6 minimum MTU). @@ -252,7 +252,7 @@ static u8 batadv_mcast_mla_forw_flags_get(struct batadv_priv *bat_priv) if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; - if (hard_iface->soft_iface != bat_priv->soft_iface) + if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (hard_iface->net_dev->mtu < IPV6_MIN_MTU) { @@ -267,7 +267,7 @@ static u8 batadv_mcast_mla_forw_flags_get(struct batadv_priv *bat_priv) /** * batadv_mcast_mla_flags_get() - get the new multicast flags - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: A set of flags for the current/next TVLV, querier and * bridge state. @@ -275,7 +275,7 @@ static u8 batadv_mcast_mla_forw_flags_get(struct batadv_priv *bat_priv) static struct batadv_mcast_mla_flags batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv) { - struct net_device *dev = bat_priv->soft_iface; + struct net_device *dev = bat_priv->mesh_iface; struct batadv_mcast_querier_state *qr4, *qr6; struct batadv_mcast_mla_flags mla_flags; struct net_device *bridge; @@ -351,13 +351,13 @@ static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr, } /** - * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners + * batadv_mcast_mla_meshif_get_ipv4() - get meshif IPv4 multicast listeners * @dev: the device to collect multicast addresses from * @mcast_list: a list to put found addresses into * @flags: flags indicating the new multicast state * * Collects multicast addresses of IPv4 multicast listeners residing - * on this kernel on the given soft interface, dev, in + * on this kernel on the given mesh interface, dev, in * the given mcast_list. In general, multicast listeners provided by * your multicast receiving applications run directly on this node. * @@ -365,7 +365,7 @@ static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr, * items added to the mcast_list otherwise. */ static int -batadv_mcast_mla_softif_get_ipv4(struct net_device *dev, +batadv_mcast_mla_meshif_get_ipv4(struct net_device *dev, struct hlist_head *mcast_list, struct batadv_mcast_mla_flags *flags) { @@ -417,13 +417,13 @@ batadv_mcast_mla_softif_get_ipv4(struct net_device *dev, } /** - * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners + * batadv_mcast_mla_meshif_get_ipv6() - get meshif IPv6 multicast listeners * @dev: the device to collect multicast addresses from * @mcast_list: a list to put found addresses into * @flags: flags indicating the new multicast state * * Collects multicast addresses of IPv6 multicast listeners residing - * on this kernel on the given soft interface, dev, in + * on this kernel on the given mesh interface, dev, in * the given mcast_list. In general, multicast listeners provided by * your multicast receiving applications run directly on this node. * @@ -432,7 +432,7 @@ batadv_mcast_mla_softif_get_ipv4(struct net_device *dev, */ #if IS_ENABLED(CONFIG_IPV6) static int -batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, +batadv_mcast_mla_meshif_get_ipv6(struct net_device *dev, struct hlist_head *mcast_list, struct batadv_mcast_mla_flags *flags) { @@ -490,7 +490,7 @@ batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, } #else static inline int -batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, +batadv_mcast_mla_meshif_get_ipv6(struct net_device *dev, struct hlist_head *mcast_list, struct batadv_mcast_mla_flags *flags) { @@ -499,13 +499,13 @@ batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, #endif /** - * batadv_mcast_mla_softif_get() - get softif multicast listeners + * batadv_mcast_mla_meshif_get() - get meshif multicast listeners * @dev: the device to collect multicast addresses from * @mcast_list: a list to put found addresses into * @flags: flags indicating the new multicast state * * Collects multicast addresses of multicast listeners residing - * on this kernel on the given soft interface, dev, in + * on this kernel on the given mesh interface, dev, in * the given mcast_list. In general, multicast listeners provided by * your multicast receiving applications run directly on this node. * @@ -518,7 +518,7 @@ batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, * items added to the mcast_list otherwise. */ static int -batadv_mcast_mla_softif_get(struct net_device *dev, +batadv_mcast_mla_meshif_get(struct net_device *dev, struct hlist_head *mcast_list, struct batadv_mcast_mla_flags *flags) { @@ -528,11 +528,11 @@ batadv_mcast_mla_softif_get(struct net_device *dev, if (bridge) dev = bridge; - ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags); + ret4 = batadv_mcast_mla_meshif_get_ipv4(dev, mcast_list, flags); if (ret4 < 0) goto out; - ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags); + ret6 = batadv_mcast_mla_meshif_get_ipv6(dev, mcast_list, flags); if (ret6 < 0) { ret4 = 0; goto out; @@ -576,7 +576,7 @@ static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src) * * Collects multicast addresses of multicast listeners residing * on foreign, non-mesh devices which we gave access to our mesh via - * a bridge on top of the given soft interface, dev, in the given + * a bridge on top of the given mesh interface, dev, in the given * mcast_list. * * Return: -ENOMEM on memory allocation error or the number of @@ -672,7 +672,7 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list) /** * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @mcast_list: a list of addresses which should _not_ be removed * * Retracts the announcement of any multicast listener from the @@ -704,7 +704,7 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv, /** * batadv_mcast_mla_tt_add() - add multicast listener announcements - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @mcast_list: a list of addresses which are going to get added * * Adds multicast listener announcements from the given mcast_list to the @@ -724,7 +724,7 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, &bat_priv->mcast.mla_list)) continue; - if (!batadv_tt_local_add(bat_priv->soft_iface, + if (!batadv_tt_local_add(bat_priv->mesh_iface, mcast_entry->addr, BATADV_NO_FLAGS, BATADV_NULL_IFINDEX, BATADV_NO_MARK)) continue; @@ -737,7 +737,7 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, /** * batadv_mcast_querier_log() - debug output regarding the querier status on * link - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD") * @old_state: the previous querier state on our link * @new_state: the new querier state on our link @@ -754,7 +754,7 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv, * potentially shadowing listeners from us then. * * This is only interesting for nodes with a bridge on top of their - * soft interface. + * mesh interface. */ static void batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, @@ -762,14 +762,14 @@ batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, struct batadv_mcast_querier_state *new_state) { if (!old_state->exists && new_state->exists) - batadv_info(bat_priv->soft_iface, "%s Querier appeared\n", + batadv_info(bat_priv->mesh_iface, "%s Querier appeared\n", str_proto); else if (old_state->exists && !new_state->exists) - batadv_info(bat_priv->soft_iface, + batadv_info(bat_priv->mesh_iface, "%s Querier disappeared - multicast optimizations disabled\n", str_proto); else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists) - batadv_info(bat_priv->soft_iface, + batadv_info(bat_priv->mesh_iface, "No %s Querier present - multicast optimizations disabled\n", str_proto); @@ -789,7 +789,7 @@ batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, /** * batadv_mcast_bridge_log() - debug output for topology changes in bridged * setups - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @new_flags: flags indicating the new multicast state * * If no bridges are ever used on this node, then this function does nothing. @@ -798,7 +798,7 @@ batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto, * which might be relevant to our multicast optimizations. * * More precisely, it outputs information when a bridge interface is added or - * removed from a soft interface. And when a bridge is present, it further + * removed from a mesh interface. And when a bridge is present, it further * outputs information about the querier state which is relevant for the * multicast flags this node is going to set. */ @@ -827,7 +827,7 @@ batadv_mcast_bridge_log(struct batadv_priv *bat_priv, /** * batadv_mcast_flags_log() - output debug information about mcast flag changes - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @flags: TVLV flags indicating the new multicast state * * Whenever the multicast TVLV flags this node announces change, this function @@ -860,7 +860,7 @@ static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) /** * batadv_mcast_mla_flags_update() - update multicast flags - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @flags: flags indicating the new multicast state * * Updates the own multicast tvlv with our current multicast related settings, @@ -889,7 +889,7 @@ batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv, /** * __batadv_mcast_mla_update() - update the own MLAs - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Updates the own multicast listener announcements in the translation * table as well as the own, announced multicast tvlv container. @@ -901,18 +901,18 @@ batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv, */ static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv) { - struct net_device *soft_iface = bat_priv->soft_iface; + struct net_device *mesh_iface = bat_priv->mesh_iface; struct hlist_head mcast_list = HLIST_HEAD_INIT; struct batadv_mcast_mla_flags flags; int ret; flags = batadv_mcast_mla_flags_get(bat_priv); - ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags); + ret = batadv_mcast_mla_meshif_get(mesh_iface, &mcast_list, &flags); if (ret < 0) goto out; - ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags); + ret = batadv_mcast_mla_bridge_get(mesh_iface, &mcast_list, &flags); if (ret < 0) goto out; @@ -977,7 +977,7 @@ static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb) /** * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding * potential - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the IPv4 packet to check * @is_unsnoopable: stores whether the destination is snoopable * @is_routable: stores whether the destination is routable @@ -1042,7 +1042,7 @@ static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb) /** * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding * potential - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the IPv6 packet to check * @is_unsnoopable: stores whether the destination is snoopable * @is_routable: stores whether the destination is routable @@ -1084,7 +1084,7 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_mode_check() - check for optimized forwarding potential - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast frame to check * @is_unsnoopable: stores whether the destination is snoopable * @is_routable: stores whether the destination is routable @@ -1124,7 +1124,7 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast * interest - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ethhdr: ethernet header of a packet * * Return: the number of nodes which want all IPv4 multicast traffic if the @@ -1147,7 +1147,7 @@ static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_rtr_count() - count nodes with a multicast router - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @protocol: the ethernet protocol type to count multicast routers for * * Return: the number of nodes which want all routable IPv4 multicast traffic @@ -1170,7 +1170,7 @@ static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_mode_by_count() - get forwarding mode by count - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to check * @vid: the vlan identifier * @is_routable: stores whether the destination is routable @@ -1214,7 +1214,7 @@ batadv_mcast_forw_mode_by_count(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_mode() - check on how to forward a multicast packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to check * @vid: the vlan identifier * @is_routable: stores whether the destination is routable @@ -1259,7 +1259,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, /** * batadv_mcast_forw_send_orig() - send a multicast packet to an originator - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to send * @vid: the vlan identifier * @orig_node: the originator to send the packet to @@ -1288,7 +1288,7 @@ static int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_tt() - forwards a packet to multicast listeners - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to transmit * @vid: the vlan identifier * @@ -1336,7 +1336,7 @@ batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, /** * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4 - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to transmit * @vid: the vlan identifier * @@ -1373,7 +1373,7 @@ batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6 - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: The multicast packet to transmit * @vid: the vlan identifier * @@ -1410,7 +1410,7 @@ batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to transmit * @vid: the vlan identifier * @@ -1439,7 +1439,7 @@ batadv_mcast_forw_want_all(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4 - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to transmit * @vid: the vlan identifier * @@ -1476,7 +1476,7 @@ batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6 - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: The multicast packet to transmit * @vid: the vlan identifier * @@ -1513,7 +1513,7 @@ batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to transmit * @vid: the vlan identifier * @@ -1542,7 +1542,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_send() - send packet to any detected multicast recipient - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to transmit * @vid: the vlan identifier * @is_routable: stores whether the destination is routable @@ -1590,7 +1590,7 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, /** * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node which multicast state might have changed of * @mcast_flags: flags indicating the new multicast state * @@ -1636,7 +1636,7 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, /** * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node which multicast state might have changed of * @mcast_flags: flags indicating the new multicast state * @@ -1681,7 +1681,7 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, /** * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node which multicast state might have changed of * @mcast_flags: flags indicating the new multicast state * @@ -1726,7 +1726,7 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, /** * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node which multicast state might have changed of * @mcast_flags: flags indicating the new multicast state * @@ -1771,7 +1771,7 @@ static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv, /** * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node which multicast state might have changed of * @mcast_flags: flags indicating the new multicast state * @@ -1816,7 +1816,7 @@ static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv, /** * batadv_mcast_have_mc_ptype_update() - update multicast packet type counter - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node which multicast state might have changed of * @mcast_flags: flags indicating the new multicast state * @@ -1872,7 +1872,7 @@ batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len) /** * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node of the ogm * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) * @tvlv_value: tvlv buffer containing the multicast data @@ -1915,7 +1915,7 @@ static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv, /** * batadv_mcast_init() - initialize the multicast optimizations structures - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_mcast_init(struct batadv_priv *bat_priv) { @@ -1934,7 +1934,7 @@ void batadv_mcast_init(struct batadv_priv *bat_priv) /** * batadv_mcast_mesh_info_put() - put multicast info into a netlink message * @msg: buffer for the message - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 or error code. */ @@ -2060,7 +2060,7 @@ batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, * @msg: buffer for the message * @portid: netlink port * @cb: Control block containing additional options - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @bucket: current bucket to dump * @idx: index in current bucket to the next entry to dump * @@ -2103,15 +2103,15 @@ batadv_mcast_netlink_get_primary(struct netlink_callback *cb, struct batadv_hard_iface **primary_if) { struct batadv_hard_iface *hard_iface = NULL; - struct net_device *soft_iface; + struct net_device *mesh_iface; struct batadv_priv *bat_priv; int ret = 0; - soft_iface = batadv_netlink_get_softif(cb); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif(cb); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); hard_iface = batadv_primary_if_get_selected(bat_priv); if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) { @@ -2120,7 +2120,7 @@ batadv_mcast_netlink_get_primary(struct netlink_callback *cb, } out: - dev_put(soft_iface); + dev_put(mesh_iface); if (!ret && primary_if) *primary_if = hard_iface; @@ -2150,7 +2150,7 @@ int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb) if (ret) return ret; - bat_priv = netdev_priv(primary_if->soft_iface); + bat_priv = netdev_priv(primary_if->mesh_iface); ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx); batadv_hardif_put(primary_if); @@ -2159,7 +2159,7 @@ int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb) /** * batadv_mcast_free() - free the multicast optimizations structures - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_mcast_free(struct batadv_priv *bat_priv) { diff --git a/net/batman-adv/multicast_forw.c b/net/batman-adv/multicast_forw.c index fafd6ba8c056d..b8668a80b94a1 100644 --- a/net/batman-adv/multicast_forw.c +++ b/net/batman-adv/multicast_forw.c @@ -131,7 +131,7 @@ batadv_mcast_forw_orig_entry(struct hlist_node *node, /** * batadv_mcast_forw_push_dest() - push an originator MAC address onto an skb - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the skb to push the destination address onto * @vid: the vlan identifier * @orig_node: the originator node to get the MAC address from @@ -174,7 +174,7 @@ static bool batadv_mcast_forw_push_dest(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_push_dests_list() - push originators from list onto an skb - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the skb to push the destination addresses onto * @vid: the vlan identifier * @head: the list to gather originators from @@ -215,7 +215,7 @@ static int batadv_mcast_forw_push_dests_list(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_push_tt() - push originators with interest through TT - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the skb to push the destination addresses onto * @vid: the vlan identifier * @num_dests: a pointer to store the number of pushed addresses in @@ -262,7 +262,7 @@ batadv_mcast_forw_push_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, /** * batadv_mcast_forw_push_want_all() - push originators with want-all flag - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the skb to push the destination addresses onto * @vid: the vlan identifier * @num_dests: a pointer to store the number of pushed addresses in @@ -308,7 +308,7 @@ static bool batadv_mcast_forw_push_want_all(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_push_want_rtr() - push originators with want-router flag - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the skb to push the destination addresses onto * @vid: the vlan identifier * @num_dests: a pointer to store the number of pushed addresses in @@ -475,7 +475,7 @@ batadv_mcast_forw_push_adjust_padding(struct sk_buff *skb, int *count, /** * batadv_mcast_forw_push_dests() - push originator addresses onto an skb - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the skb to push the destination addresses onto * @vid: the vlan identifier * @is_routable: indicates whether the destination is routable @@ -567,7 +567,7 @@ static int batadv_mcast_forw_push_tracker(struct sk_buff *skb, int num_dests, /** * batadv_mcast_forw_push_tvlvs() - push a multicast tracker TVLV onto an skb - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the skb to push the tracker TVLV onto * @vid: the vlan identifier * @is_routable: indicates whether the destination is routable @@ -634,7 +634,7 @@ batadv_mcast_forw_push_hdr(struct sk_buff *skb, unsigned short tvlv_len) /** * batadv_mcast_forw_scrub_dests() - scrub destinations in a tracker TVLV - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @comp_neigh: next hop neighbor to scrub+collect destinations for * @dest: start MAC entry in original skb's tracker TVLV * @next_dest: start MAC entry in to be sent skb's tracker TVLV @@ -905,7 +905,7 @@ static void batadv_mcast_forw_shrink_tracker(struct sk_buff *skb) /** * batadv_mcast_forw_packet() - forward a batman-adv multicast packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the received or locally generated batman-adv multicast packet * @local_xmit: indicates that the packet was locally generated and not received * @@ -920,7 +920,7 @@ static void batadv_mcast_forw_shrink_tracker(struct sk_buff *skb) * * Return: NET_RX_SUCCESS or NET_RX_DROP on success or a negative error * code on failure. NET_RX_SUCCESS if the received packet is supposed to be - * decapsulated and forwarded to the own soft interface, NET_RX_DROP otherwise. + * decapsulated and forwarded to the own mesh interface, NET_RX_DROP otherwise. */ static int batadv_mcast_forw_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, bool local_xmit) @@ -1028,7 +1028,7 @@ static int batadv_mcast_forw_packet(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_tracker_tvlv_handler() - handle an mcast tracker tvlv - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the received batman-adv multicast packet * * Parses the tracker TVLV of an incoming batman-adv multicast packet and @@ -1042,7 +1042,7 @@ static int batadv_mcast_forw_packet(struct batadv_priv *bat_priv, * * Return: NET_RX_SUCCESS or NET_RX_DROP on success or a negative error * code on failure. NET_RX_SUCCESS if the received packet is supposed to be - * decapsulated and forwarded to the own soft interface, NET_RX_DROP otherwise. + * decapsulated and forwarded to the own mesh interface, NET_RX_DROP otherwise. */ int batadv_mcast_forw_tracker_tvlv_handler(struct batadv_priv *bat_priv, struct sk_buff *skb) @@ -1075,7 +1075,7 @@ unsigned int batadv_mcast_forw_packet_hdrlen(unsigned int num_dests) /** * batadv_mcast_forw_expand_head() - expand headroom for an mcast packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to send * * Tries to expand an skb's headroom so that its head to tail is 1298 @@ -1110,7 +1110,7 @@ static int batadv_mcast_forw_expand_head(struct batadv_priv *bat_priv, /** * batadv_mcast_forw_push() - encapsulate skb in a batman-adv multicast packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to encapsulate and send * @vid: the vlan identifier * @is_routable: indicates whether the destination is routable @@ -1154,7 +1154,7 @@ bool batadv_mcast_forw_push(struct batadv_priv *bat_priv, struct sk_buff *skb, /** * batadv_mcast_forw_mcsend() - send a self prepared batman-adv multicast packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the multicast packet to encapsulate and send * * Transmits a batman-adv multicast packet that was locally prepared and diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index eefba5600ded3..e7c8f9f2bb1f9 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -43,10 +43,10 @@ #include "gateway_common.h" #include "hard-interface.h" #include "log.h" +#include "mesh-interface.h" #include "multicast.h" #include "network-coding.h" #include "originator.h" -#include "soft-interface.h" #include "tp_meter.h" #include "translation-table.h" @@ -63,7 +63,7 @@ enum batadv_netlink_multicast_groups { */ enum batadv_genl_ops_flags { /** - * @BATADV_FLAG_NEED_MESH: request requires valid soft interface in + * @BATADV_FLAG_NEED_MESH: request requires valid mesh interface in * attribute BATADV_ATTR_MESH_IFINDEX and expects a pointer to it to be * saved in info->user_ptr[0] */ @@ -166,24 +166,24 @@ static int batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype) } /** - * batadv_netlink_mesh_fill_ap_isolation() - Add ap_isolation softif attribute + * batadv_netlink_mesh_fill_ap_isolation() - Add ap_isolation meshif attribute * @msg: Netlink message to dump into - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 on success or negative error number in case of failure */ static int batadv_netlink_mesh_fill_ap_isolation(struct sk_buff *msg, struct batadv_priv *bat_priv) { - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; u8 ap_isolation; - vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS); + vlan = batadv_meshif_vlan_get(bat_priv, BATADV_NO_FLAGS); if (!vlan) return 0; ap_isolation = atomic_read(&vlan->ap_isolation); - batadv_softif_vlan_put(vlan); + batadv_meshif_vlan_put(vlan); return nla_put_u8(msg, BATADV_ATTR_AP_ISOLATION_ENABLED, !!ap_isolation); @@ -192,21 +192,21 @@ static int batadv_netlink_mesh_fill_ap_isolation(struct sk_buff *msg, /** * batadv_netlink_set_mesh_ap_isolation() - Set ap_isolation from genl msg * @attr: parsed BATADV_ATTR_AP_ISOLATION_ENABLED attribute - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 on success or negative error number in case of failure */ static int batadv_netlink_set_mesh_ap_isolation(struct nlattr *attr, struct batadv_priv *bat_priv) { - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; - vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS); + vlan = batadv_meshif_vlan_get(bat_priv, BATADV_NO_FLAGS); if (!vlan) return -ENOENT; atomic_set(&vlan->ap_isolation, !!nla_get_u8(attr)); - batadv_softif_vlan_put(vlan); + batadv_meshif_vlan_put(vlan); return 0; } @@ -214,7 +214,7 @@ static int batadv_netlink_set_mesh_ap_isolation(struct nlattr *attr, /** * batadv_netlink_mesh_fill() - Fill message with mesh attributes * @msg: Netlink message to dump into - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @cmd: type of message to generate * @portid: Port making netlink request * @seq: sequence number for message @@ -227,7 +227,7 @@ static int batadv_netlink_mesh_fill(struct sk_buff *msg, enum batadv_nl_commands cmd, u32 portid, u32 seq, int flags) { - struct net_device *soft_iface = bat_priv->soft_iface; + struct net_device *mesh_iface = bat_priv->mesh_iface; struct batadv_hard_iface *primary_if = NULL; struct net_device *hard_iface; void *hdr; @@ -239,10 +239,10 @@ static int batadv_netlink_mesh_fill(struct sk_buff *msg, if (nla_put_string(msg, BATADV_ATTR_VERSION, BATADV_SOURCE_VERSION) || nla_put_string(msg, BATADV_ATTR_ALGO_NAME, bat_priv->algo_ops->name) || - nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, soft_iface->ifindex) || - nla_put_string(msg, BATADV_ATTR_MESH_IFNAME, soft_iface->name) || + nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, mesh_iface->ifindex) || + nla_put_string(msg, BATADV_ATTR_MESH_IFNAME, mesh_iface->name) || nla_put(msg, BATADV_ATTR_MESH_ADDRESS, ETH_ALEN, - soft_iface->dev_addr) || + mesh_iface->dev_addr) || nla_put_u8(msg, BATADV_ATTR_TT_TTVN, (u8)atomic_read(&bat_priv->tt.vn))) goto nla_put_failure; @@ -369,8 +369,8 @@ static int batadv_netlink_mesh_fill(struct sk_buff *msg, } /** - * batadv_netlink_notify_mesh() - send softif attributes to listener - * @bat_priv: the bat priv with all the soft interface information + * batadv_netlink_notify_mesh() - send meshif attributes to listener + * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 on success, < 0 on error */ @@ -391,14 +391,14 @@ static int batadv_netlink_notify_mesh(struct batadv_priv *bat_priv) } genlmsg_multicast_netns(&batadv_netlink_family, - dev_net(bat_priv->soft_iface), msg, 0, + dev_net(bat_priv->mesh_iface), msg, 0, BATADV_NL_MCGRP_CONFIG, GFP_KERNEL); return 0; } /** - * batadv_netlink_get_mesh() - Get softif attributes + * batadv_netlink_get_mesh() - Get meshif attributes * @skb: Netlink message with request data * @info: receiver information * @@ -427,7 +427,7 @@ static int batadv_netlink_get_mesh(struct sk_buff *skb, struct genl_info *info) } /** - * batadv_netlink_set_mesh() - Set softif attributes + * batadv_netlink_set_mesh() - Set meshif attributes * @skb: Netlink message with request data * @info: receiver information * @@ -474,7 +474,7 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info) atomic_set(&bat_priv->bridge_loop_avoidance, !!nla_get_u8(attr)); - batadv_bla_status_update(bat_priv->soft_iface); + batadv_bla_status_update(bat_priv->mesh_iface); } #endif /* CONFIG_BATMAN_ADV_BLA */ @@ -484,7 +484,7 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info) atomic_set(&bat_priv->distributed_arp_table, !!nla_get_u8(attr)); - batadv_dat_status_update(bat_priv->soft_iface); + batadv_dat_status_update(bat_priv->mesh_iface); } #endif /* CONFIG_BATMAN_ADV_DAT */ @@ -494,7 +494,7 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info) atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr)); rtnl_lock(); - batadv_update_min_mtu(bat_priv->soft_iface); + batadv_update_min_mtu(bat_priv->mesh_iface); rtnl_unlock(); } @@ -594,7 +594,7 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info) attr = info->attrs[BATADV_ATTR_NETWORK_CODING_ENABLED]; atomic_set(&bat_priv->network_coding, !!nla_get_u8(attr)); - batadv_nc_status_update(bat_priv->soft_iface); + batadv_nc_status_update(bat_priv->mesh_iface); } #endif /* CONFIG_BATMAN_ADV_NC */ @@ -633,7 +633,7 @@ batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie) /** * batadv_netlink_tpmeter_notify() - send tp_meter result via netlink to client - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @dst: destination of tp_meter session * @result: reason for tp meter session stop * @test_time: total time of the tp_meter session @@ -680,7 +680,7 @@ int batadv_netlink_tpmeter_notify(struct batadv_priv *bat_priv, const u8 *dst, genlmsg_end(msg, hdr); genlmsg_multicast_netns(&batadv_netlink_family, - dev_net(bat_priv->soft_iface), msg, 0, + dev_net(bat_priv->mesh_iface), msg, 0, BATADV_NL_MCGRP_TPMETER, GFP_KERNEL); return 0; @@ -778,7 +778,7 @@ batadv_netlink_tp_meter_cancel(struct sk_buff *skb, struct genl_info *info) /** * batadv_netlink_hardif_fill() - Fill message with hardif attributes * @msg: Netlink message to dump into - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @hard_iface: hard interface which was modified * @cmd: type of message to generate * @portid: Port making netlink request @@ -806,11 +806,11 @@ static int batadv_netlink_hardif_fill(struct sk_buff *msg, genl_dump_check_consistent(cb, hdr); if (nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, - bat_priv->soft_iface->ifindex)) + bat_priv->mesh_iface->ifindex)) goto nla_put_failure; if (nla_put_string(msg, BATADV_ATTR_MESH_IFNAME, - bat_priv->soft_iface->name)) + bat_priv->mesh_iface->name)) goto nla_put_failure; if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX, @@ -850,7 +850,7 @@ static int batadv_netlink_hardif_fill(struct sk_buff *msg, /** * batadv_netlink_notify_hardif() - send hardif attributes to listener - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @hard_iface: hard interface which was modified * * Return: 0 on success, < 0 on error @@ -873,7 +873,7 @@ static int batadv_netlink_notify_hardif(struct batadv_priv *bat_priv, } genlmsg_multicast_netns(&batadv_netlink_family, - dev_net(bat_priv->soft_iface), msg, 0, + dev_net(bat_priv->mesh_iface), msg, 0, BATADV_NL_MCGRP_CONFIG, GFP_KERNEL); return 0; @@ -963,24 +963,24 @@ static int batadv_netlink_set_hardif(struct sk_buff *skb, static int batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb) { - struct net_device *soft_iface; + struct net_device *mesh_iface; struct batadv_hard_iface *hard_iface; struct batadv_priv *bat_priv; int portid = NETLINK_CB(cb->skb).portid; int skip = cb->args[0]; int i = 0; - soft_iface = batadv_netlink_get_softif(cb); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif(cb); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); rtnl_lock(); cb->seq = batadv_hardif_generation << 1 | 1; list_for_each_entry(hard_iface, &batadv_hardif_list, list) { - if (hard_iface->soft_iface != soft_iface) + if (hard_iface->mesh_iface != mesh_iface) continue; if (i++ < skip) @@ -997,7 +997,7 @@ batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb) rtnl_unlock(); - dev_put(soft_iface); + dev_put(mesh_iface); cb->args[0] = i; @@ -1007,7 +1007,7 @@ batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb) /** * batadv_netlink_vlan_fill() - Fill message with vlan attributes * @msg: Netlink message to dump into - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @vlan: vlan which was modified * @cmd: type of message to generate * @portid: Port making netlink request @@ -1018,7 +1018,7 @@ batadv_netlink_dump_hardif(struct sk_buff *msg, struct netlink_callback *cb) */ static int batadv_netlink_vlan_fill(struct sk_buff *msg, struct batadv_priv *bat_priv, - struct batadv_softif_vlan *vlan, + struct batadv_meshif_vlan *vlan, enum batadv_nl_commands cmd, u32 portid, u32 seq, int flags) { @@ -1029,11 +1029,11 @@ static int batadv_netlink_vlan_fill(struct sk_buff *msg, return -ENOBUFS; if (nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, - bat_priv->soft_iface->ifindex)) + bat_priv->mesh_iface->ifindex)) goto nla_put_failure; if (nla_put_string(msg, BATADV_ATTR_MESH_IFNAME, - bat_priv->soft_iface->name)) + bat_priv->mesh_iface->name)) goto nla_put_failure; if (nla_put_u32(msg, BATADV_ATTR_VLANID, vlan->vid & VLAN_VID_MASK)) @@ -1053,13 +1053,13 @@ static int batadv_netlink_vlan_fill(struct sk_buff *msg, /** * batadv_netlink_notify_vlan() - send vlan attributes to listener - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @vlan: vlan which was modified * * Return: 0 on success, < 0 on error */ static int batadv_netlink_notify_vlan(struct batadv_priv *bat_priv, - struct batadv_softif_vlan *vlan) + struct batadv_meshif_vlan *vlan) { struct sk_buff *msg; int ret; @@ -1076,7 +1076,7 @@ static int batadv_netlink_notify_vlan(struct batadv_priv *bat_priv, } genlmsg_multicast_netns(&batadv_netlink_family, - dev_net(bat_priv->soft_iface), msg, 0, + dev_net(bat_priv->mesh_iface), msg, 0, BATADV_NL_MCGRP_CONFIG, GFP_KERNEL); return 0; @@ -1091,7 +1091,7 @@ static int batadv_netlink_notify_vlan(struct batadv_priv *bat_priv, */ static int batadv_netlink_get_vlan(struct sk_buff *skb, struct genl_info *info) { - struct batadv_softif_vlan *vlan = info->user_ptr[1]; + struct batadv_meshif_vlan *vlan = info->user_ptr[1]; struct batadv_priv *bat_priv = info->user_ptr[0]; struct sk_buff *msg; int ret; @@ -1121,7 +1121,7 @@ static int batadv_netlink_get_vlan(struct sk_buff *skb, struct genl_info *info) */ static int batadv_netlink_set_vlan(struct sk_buff *skb, struct genl_info *info) { - struct batadv_softif_vlan *vlan = info->user_ptr[1]; + struct batadv_meshif_vlan *vlan = info->user_ptr[1]; struct batadv_priv *bat_priv = info->user_ptr[0]; struct nlattr *attr; @@ -1137,43 +1137,43 @@ static int batadv_netlink_set_vlan(struct sk_buff *skb, struct genl_info *info) } /** - * batadv_netlink_get_softif_from_ifindex() - Get soft-iface from ifindex + * batadv_netlink_get_meshif_from_ifindex() - Get mesh-iface from ifindex * @net: the applicable net namespace - * @ifindex: index of the soft interface + * @ifindex: index of the mesh interface * - * Return: Pointer to soft interface (with increased refcnt) on success, error + * Return: Pointer to mesh interface (with increased refcnt) on success, error * pointer on error */ static struct net_device * -batadv_netlink_get_softif_from_ifindex(struct net *net, int ifindex) +batadv_netlink_get_meshif_from_ifindex(struct net *net, int ifindex) { - struct net_device *soft_iface; + struct net_device *mesh_iface; - soft_iface = dev_get_by_index(net, ifindex); - if (!soft_iface) + mesh_iface = dev_get_by_index(net, ifindex); + if (!mesh_iface) return ERR_PTR(-ENODEV); - if (!batadv_softif_is_valid(soft_iface)) - goto err_put_softif; + if (!batadv_meshif_is_valid(mesh_iface)) + goto err_put_meshif; - return soft_iface; + return mesh_iface; -err_put_softif: - dev_put(soft_iface); +err_put_meshif: + dev_put(mesh_iface); return ERR_PTR(-EINVAL); } /** - * batadv_netlink_get_softif_from_info() - Get soft-iface from genl attributes + * batadv_netlink_get_meshif_from_info() - Get mesh-iface from genl attributes * @net: the applicable net namespace * @info: receiver information * - * Return: Pointer to soft interface (with increased refcnt) on success, error + * Return: Pointer to mesh interface (with increased refcnt) on success, error * pointer on error */ static struct net_device * -batadv_netlink_get_softif_from_info(struct net *net, struct genl_info *info) +batadv_netlink_get_meshif_from_info(struct net *net, struct genl_info *info) { int ifindex; @@ -1182,30 +1182,30 @@ batadv_netlink_get_softif_from_info(struct net *net, struct genl_info *info) ifindex = nla_get_u32(info->attrs[BATADV_ATTR_MESH_IFINDEX]); - return batadv_netlink_get_softif_from_ifindex(net, ifindex); + return batadv_netlink_get_meshif_from_ifindex(net, ifindex); } /** - * batadv_netlink_get_softif() - Retrieve soft interface from netlink callback + * batadv_netlink_get_meshif() - Retrieve mesh interface from netlink callback * @cb: callback structure containing arguments * - * Return: Pointer to soft interface (with increased refcnt) on success, error + * Return: Pointer to mesh interface (with increased refcnt) on success, error * pointer on error */ -struct net_device *batadv_netlink_get_softif(struct netlink_callback *cb) +struct net_device *batadv_netlink_get_meshif(struct netlink_callback *cb) { int ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX); if (!ifindex) return ERR_PTR(-ENONET); - return batadv_netlink_get_softif_from_ifindex(sock_net(cb->skb->sk), + return batadv_netlink_get_meshif_from_ifindex(sock_net(cb->skb->sk), ifindex); } /** * batadv_netlink_get_hardif_from_ifindex() - Get hard-iface from ifindex - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @net: the applicable net namespace * @ifindex: index of the hard interface * @@ -1227,7 +1227,7 @@ batadv_netlink_get_hardif_from_ifindex(struct batadv_priv *bat_priv, if (!hard_iface) goto err_put_harddev; - if (hard_iface->soft_iface != bat_priv->soft_iface) + if (hard_iface->mesh_iface != bat_priv->mesh_iface) goto err_put_hardif; /* hard_dev is referenced by hard_iface and not needed here */ @@ -1245,7 +1245,7 @@ batadv_netlink_get_hardif_from_ifindex(struct batadv_priv *bat_priv, /** * batadv_netlink_get_hardif_from_info() - Get hard-iface from genl attributes - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @net: the applicable net namespace * @info: receiver information * @@ -1268,7 +1268,7 @@ batadv_netlink_get_hardif_from_info(struct batadv_priv *bat_priv, /** * batadv_netlink_get_hardif() - Retrieve hard interface from netlink callback - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @cb: callback structure containing arguments * * Return: Pointer to hard interface (with increased refcnt) on success, error @@ -1290,18 +1290,18 @@ batadv_netlink_get_hardif(struct batadv_priv *bat_priv, /** * batadv_get_vlan_from_info() - Retrieve vlan from genl attributes - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @net: the applicable net namespace * @info: receiver information * * Return: Pointer to vlan on success (with increased refcnt), error pointer * on error */ -static struct batadv_softif_vlan * +static struct batadv_meshif_vlan * batadv_get_vlan_from_info(struct batadv_priv *bat_priv, struct net *net, struct genl_info *info) { - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; u16 vid; if (!info->attrs[BATADV_ATTR_VLANID]) @@ -1309,7 +1309,7 @@ batadv_get_vlan_from_info(struct batadv_priv *bat_priv, struct net *net, vid = nla_get_u16(info->attrs[BATADV_ATTR_VLANID]); - vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG); + vlan = batadv_meshif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG); if (!vlan) return ERR_PTR(-ENOENT); @@ -1331,8 +1331,8 @@ static int batadv_pre_doit(const struct genl_split_ops *ops, struct net *net = genl_info_net(info); struct batadv_hard_iface *hard_iface; struct batadv_priv *bat_priv = NULL; - struct batadv_softif_vlan *vlan; - struct net_device *soft_iface; + struct batadv_meshif_vlan *vlan; + struct net_device *mesh_iface; u8 user_ptr1_flags; u8 mesh_dep_flags; int ret; @@ -1347,11 +1347,11 @@ static int batadv_pre_doit(const struct genl_split_ops *ops, return -EINVAL; if (ops->internal_flags & BATADV_FLAG_NEED_MESH) { - soft_iface = batadv_netlink_get_softif_from_info(net, info); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif_from_info(net, info); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); info->user_ptr[0] = bat_priv; } @@ -1360,7 +1360,7 @@ static int batadv_pre_doit(const struct genl_split_ops *ops, info); if (IS_ERR(hard_iface)) { ret = PTR_ERR(hard_iface); - goto err_put_softif; + goto err_put_meshif; } info->user_ptr[1] = hard_iface; @@ -1370,7 +1370,7 @@ static int batadv_pre_doit(const struct genl_split_ops *ops, vlan = batadv_get_vlan_from_info(bat_priv, net, info); if (IS_ERR(vlan)) { ret = PTR_ERR(vlan); - goto err_put_softif; + goto err_put_meshif; } info->user_ptr[1] = vlan; @@ -1378,9 +1378,9 @@ static int batadv_pre_doit(const struct genl_split_ops *ops, return 0; -err_put_softif: +err_put_meshif: if (bat_priv) - dev_put(bat_priv->soft_iface); + dev_put(bat_priv->mesh_iface); return ret; } @@ -1396,7 +1396,7 @@ static void batadv_post_doit(const struct genl_split_ops *ops, struct genl_info *info) { struct batadv_hard_iface *hard_iface; - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; struct batadv_priv *bat_priv; if (ops->internal_flags & BATADV_FLAG_NEED_HARDIF && @@ -1408,12 +1408,12 @@ static void batadv_post_doit(const struct genl_split_ops *ops, if (ops->internal_flags & BATADV_FLAG_NEED_VLAN && info->user_ptr[1]) { vlan = info->user_ptr[1]; - batadv_softif_vlan_put(vlan); + batadv_meshif_vlan_put(vlan); } if (ops->internal_flags & BATADV_FLAG_NEED_MESH && info->user_ptr[0]) { bat_priv = info->user_ptr[0]; - dev_put(bat_priv->soft_iface); + dev_put(bat_priv->mesh_iface); } } @@ -1567,7 +1567,7 @@ void __init batadv_netlink_register(void) ret = genl_register_family(&batadv_netlink_family); if (ret) - pr_warn("unable to register netlink family"); + pr_warn("unable to register netlink family\n"); } /** diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h index 2097c2ae98f16..fe4548b974bb0 100644 --- a/net/batman-adv/netlink.h +++ b/net/batman-adv/netlink.h @@ -15,7 +15,7 @@ void batadv_netlink_register(void); void batadv_netlink_unregister(void); -struct net_device *batadv_netlink_get_softif(struct netlink_callback *cb); +struct net_device *batadv_netlink_get_meshif(struct netlink_callback *cb); struct batadv_hard_iface * batadv_netlink_get_hardif(struct batadv_priv *bat_priv, struct netlink_callback *cb); diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 71ebd0284f95d..9f56308779cc3 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -65,7 +65,7 @@ int __init batadv_nc_init(void) /** * batadv_nc_start_timer() - initialise the nc periodic worker - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_nc_start_timer(struct batadv_priv *bat_priv) { @@ -76,7 +76,7 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv) /** * batadv_nc_tvlv_container_update() - update the network coding tvlv container * after network coding setting change - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv) { @@ -98,7 +98,7 @@ static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv) /** * batadv_nc_status_update() - update the network coding tvlv container after * network coding setting change - * @net_dev: the soft interface net device + * @net_dev: the mesh interface net device */ void batadv_nc_status_update(struct net_device *net_dev) { @@ -109,7 +109,7 @@ void batadv_nc_status_update(struct net_device *net_dev) /** * batadv_nc_tvlv_ogm_handler_v1() - process incoming nc tvlv container - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node of the ogm * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) * @tvlv_value: tvlv buffer containing the gateway data @@ -128,7 +128,7 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, /** * batadv_nc_mesh_init() - initialise coding hash table and start housekeeping - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 on success or negative error number in case of failure */ @@ -171,7 +171,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv) /** * batadv_nc_init_bat_priv() - initialise the nc specific bat_priv variables - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv) { @@ -267,7 +267,7 @@ static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet, /** * batadv_nc_to_purge_nc_node() - checks whether an nc node has to be purged - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @nc_node: the nc node to check * * Return: true if the entry has to be purged now, false otherwise @@ -283,7 +283,7 @@ static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv, /** * batadv_nc_to_purge_nc_path_coding() - checks whether an nc path has timed out - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @nc_path: the nc path to check * * Return: true if the entry has to be purged now, false otherwise @@ -304,7 +304,7 @@ static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv, /** * batadv_nc_to_purge_nc_path_decoding() - checks whether an nc path has timed * out - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @nc_path: the nc path to check * * Return: true if the entry has to be purged now, false otherwise @@ -325,7 +325,7 @@ static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv, /** * batadv_nc_purge_orig_nc_nodes() - go through list of nc nodes and purge stale * entries - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @list: list of nc nodes * @lock: nc node list lock * @to_purge: function in charge to decide whether an entry has to be purged or @@ -363,7 +363,7 @@ batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv, /** * batadv_nc_purge_orig() - purges all nc node data attached of the given * originator - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig_node with the nc node entries to be purged * @to_purge: function in charge to decide whether an entry has to be purged or * not. This function takes the nc node as argument and has to return @@ -389,7 +389,7 @@ void batadv_nc_purge_orig(struct batadv_priv *bat_priv, /** * batadv_nc_purge_orig_hash() - traverse entire originator hash to check if * they have timed out nc nodes - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv) { @@ -416,7 +416,7 @@ static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv) /** * batadv_nc_purge_paths() - traverse all nc paths part of the hash and remove * unused ones - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @hash: hash table containing the nc paths to check * @to_purge: function in charge to decide whether an entry has to be purged or * not. This function takes the nc node as argument and has to return @@ -579,7 +579,7 @@ static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet) /** * batadv_nc_sniffed_purge() - Checks timestamp of given sniffed nc_packet. - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @nc_path: the nc path the packet belongs to * @nc_packet: the nc packet to be checked * @@ -618,7 +618,7 @@ static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv, /** * batadv_nc_fwd_flush() - Checks the timestamp of the given nc packet. - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @nc_path: the nc path the packet belongs to * @nc_packet: the nc packet to be checked * @@ -657,7 +657,7 @@ static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv, /** * batadv_nc_process_nc_paths() - traverse given nc packet pool and free timed * out nc packets - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @hash: to be processed hash table * @process_fn: Function called to process given nc packet. Should return true * to encourage this function to proceed with the next packet. @@ -744,7 +744,7 @@ static void batadv_nc_worker(struct work_struct *work) /** * batadv_can_nc_with_orig() - checks whether the given orig node is suitable * for coding or not - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: neighboring orig node which may be used as nc candidate * @ogm_packet: incoming ogm packet also used for the checks * @@ -825,7 +825,7 @@ batadv_nc_find_nc_node(struct batadv_orig_node *orig_node, /** * batadv_nc_get_nc_node() - retrieves an nc node or creates the entry if it was * not found - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node originating the ogm packet * @orig_neigh_node: neighboring orig node from which we received the ogm packet * (can be equal to orig_node) @@ -888,7 +888,7 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv, /** * batadv_nc_update_nc_node() - updates stored incoming and outgoing nc node * structs (best called on incoming OGMs) - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node originating the ogm packet * @orig_neigh_node: neighboring orig node from which we received the ogm packet * (can be equal to orig_node) @@ -940,7 +940,7 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv, /** * batadv_nc_get_path() - get existing nc_path or allocate a new one - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @hash: hash table containing the nc path * @src: ethernet source address - first half of the nc path search key * @dst: ethernet destination address - second half of the nc path search key @@ -1032,7 +1032,7 @@ static void batadv_nc_memxor(char *dst, const char *src, unsigned int len) /** * batadv_nc_code_packets() - code a received unicast_packet with an nc packet * into a coded_packet and send it - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: data skb to forward * @ethhdr: pointer to the ethernet header inside the skb * @nc_packet: structure containing the packet to the skb can be coded with @@ -1245,7 +1245,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src) /** * batadv_nc_path_search() - Find the coding path matching in_nc_node and * out_nc_node to retrieve a buffered packet that can be used for coding. - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @in_nc_node: pointer to skb next hop's neighbor nc node * @out_nc_node: pointer to skb source's neighbor nc node * @skb: data skb to forward @@ -1313,7 +1313,7 @@ batadv_nc_path_search(struct batadv_priv *bat_priv, /** * batadv_nc_skb_src_search() - Loops through the list of neighboring nodes of * the skb's sender (may be equal to the originator). - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: data skb to forward * @eth_dst: next hop mac address of skb * @eth_src: source mac address of skb @@ -1359,7 +1359,7 @@ batadv_nc_skb_src_search(struct batadv_priv *bat_priv, /** * batadv_nc_skb_store_before_coding() - set the ethernet src and dst of the * unicast skb before it is stored for use in later decoding - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: data skb to store * @eth_dst_new: new destination mac address of skb */ @@ -1408,7 +1408,7 @@ static bool batadv_nc_skb_dst_search(struct sk_buff *skb, struct batadv_neigh_node *neigh_node, struct ethhdr *ethhdr) { - struct net_device *netdev = neigh_node->if_incoming->soft_iface; + struct net_device *netdev = neigh_node->if_incoming->mesh_iface; struct batadv_priv *bat_priv = netdev_priv(netdev); struct batadv_orig_node *orig_node = neigh_node->orig_node; struct batadv_nc_node *nc_node; @@ -1495,7 +1495,7 @@ static bool batadv_nc_skb_add_to_path(struct sk_buff *skb, bool batadv_nc_skb_forward(struct sk_buff *skb, struct batadv_neigh_node *neigh_node) { - const struct net_device *netdev = neigh_node->if_incoming->soft_iface; + const struct net_device *netdev = neigh_node->if_incoming->mesh_iface; struct batadv_priv *bat_priv = netdev_priv(netdev); struct batadv_unicast_packet *packet; struct batadv_nc_path *nc_path; @@ -1544,7 +1544,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb, /** * batadv_nc_skb_store_for_decoding() - save a clone of the skb which can be * used when decoding coded packets - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: data skb to store */ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv, @@ -1605,7 +1605,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv, /** * batadv_nc_skb_store_sniffed_unicast() - check if a received unicast packet * should be saved in the decoding buffer and, if so, store it there - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: unicast skb to store */ void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv, @@ -1625,7 +1625,7 @@ void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv, /** * batadv_nc_skb_decode_packet() - decode given skb using the decode data stored * in nc_packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: unicast skb to decode * @nc_packet: decode data needed to decode the skb * @@ -1719,7 +1719,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb, /** * batadv_nc_find_decoding_packet() - search through buffered decoding data to * find the data needed to decode the coded packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @ethhdr: pointer to the ethernet header inside the coded packet * @coded: coded packet we try to find decode data for * @@ -1793,7 +1793,7 @@ batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv, static int batadv_nc_recv_coded_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if) { - struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface); struct batadv_unicast_packet *unicast_packet; struct batadv_coded_packet *coded_packet; struct batadv_nc_packet *nc_packet; @@ -1858,7 +1858,7 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb, /** * batadv_nc_mesh_free() - clean up network coding memory - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_nc_mesh_free(struct batadv_priv *bat_priv) { diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index bcc2e20e0cd6c..d9cfc5c6b2088 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -47,7 +47,7 @@ static struct lock_class_key batadv_orig_hash_lock_class_key; /** * batadv_orig_hash_find() - Find and return originator from orig_hash - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @data: mac address of the originator * * Return: orig_node (with increased refcnt), NULL on errors @@ -213,7 +213,7 @@ void batadv_orig_node_vlan_release(struct kref *ref) /** * batadv_originator_init() - Initialize all originator structures - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 on success or negative error number in case of failure */ @@ -338,7 +338,7 @@ batadv_orig_router_get(struct batadv_orig_node *orig_node, /** * batadv_orig_to_router() - get next hop neighbor to an orig address - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_addr: the originator MAC address to search the best next hop router for * @if_outgoing: the interface where the payload packet has been received or * the OGM should be sent to @@ -567,7 +567,7 @@ batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface, const u8 *neigh_addr, struct batadv_orig_node *orig_node) { - struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(hard_iface->mesh_iface); struct batadv_hardif_neigh_node *hardif_neigh; spin_lock_bh(&hard_iface->neigh_list_lock); @@ -754,20 +754,20 @@ batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node, int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct batadv_hard_iface *primary_if, *hard_iface; - struct net_device *soft_iface; + struct net_device *mesh_iface; struct batadv_priv *bat_priv; int ret; - soft_iface = batadv_netlink_get_softif(cb); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif(cb); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { ret = -ENOENT; - goto out_put_soft_iface; + goto out_put_mesh_iface; } hard_iface = batadv_netlink_get_hardif(bat_priv, cb); @@ -794,8 +794,8 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb) batadv_hardif_put(hard_iface); out_put_primary_if: batadv_hardif_put(primary_if); -out_put_soft_iface: - dev_put(soft_iface); +out_put_mesh_iface: + dev_put(mesh_iface); return ret; } @@ -892,7 +892,7 @@ void batadv_orig_node_release(struct kref *ref) /** * batadv_originator_free() - Free all originator structures - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_originator_free(struct batadv_priv *bat_priv) { @@ -928,7 +928,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv) /** * batadv_orig_node_new() - creates a new orig_node - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the mac address of the originator * * Creates a new originator object and initialises all the generic fields. @@ -1009,7 +1009,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, /** * batadv_purge_neigh_ifinfo() - purge obsolete ifinfo entries from neighbor - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @neigh: orig node which is to be checked */ static void @@ -1050,7 +1050,7 @@ batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv, /** * batadv_purge_orig_ifinfo() - purge obsolete ifinfo entries from originator - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node which is to be checked * * Return: true if any ifinfo entry was purged, false otherwise. @@ -1102,7 +1102,7 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv, /** * batadv_purge_orig_neighbors() - purges neighbors from originator - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node which is to be checked * * Return: true if any neighbor was purged, false otherwise @@ -1160,7 +1160,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv, /** * batadv_find_best_neighbor() - finds the best neighbor after purging - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node which is to be checked * @if_outgoing: the interface for which the metric should be compared * @@ -1194,7 +1194,7 @@ batadv_find_best_neighbor(struct batadv_priv *bat_priv, /** * batadv_purge_orig_node() - purges obsolete information from an orig_node - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node which is to be checked * * This function checks if the orig_node or substructures of it have become @@ -1236,7 +1236,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, if (hard_iface->if_status != BATADV_IF_ACTIVE) continue; - if (hard_iface->soft_iface != bat_priv->soft_iface) + if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (!kref_get_unless_zero(&hard_iface->refcount)) @@ -1258,7 +1258,7 @@ static bool batadv_purge_orig_node(struct batadv_priv *bat_priv, /** * batadv_purge_orig_ref() - Purge all outdated originators - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_purge_orig_ref(struct batadv_priv *bat_priv) { @@ -1325,20 +1325,20 @@ static void batadv_purge_orig(struct work_struct *work) int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb) { struct batadv_hard_iface *primary_if, *hard_iface; - struct net_device *soft_iface; + struct net_device *mesh_iface; struct batadv_priv *bat_priv; int ret; - soft_iface = batadv_netlink_get_softif(cb); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif(cb); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { ret = -ENOENT; - goto out_put_soft_iface; + goto out_put_mesh_iface; } hard_iface = batadv_netlink_get_hardif(bat_priv, cb); @@ -1365,8 +1365,8 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb) batadv_hardif_put(hard_iface); out_put_primary_if: batadv_hardif_put(primary_if); -out_put_soft_iface: - dev_put(soft_iface); +out_put_mesh_iface: + dev_put(mesh_iface); return ret; } diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index f1061985149fc..35d8c57839998 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -30,10 +30,10 @@ #include "fragmentation.h" #include "hard-interface.h" #include "log.h" +#include "mesh-interface.h" #include "network-coding.h" #include "originator.h" #include "send.h" -#include "soft-interface.h" #include "tp_meter.h" #include "translation-table.h" #include "tvlv.h" @@ -43,7 +43,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, /** * _batadv_update_route() - set the router for this originator - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node which is to be configured * @recv_if: the receive interface for which this route is set * @neigh_node: neighbor which should be the next router @@ -106,7 +106,7 @@ static void _batadv_update_route(struct batadv_priv *bat_priv, /** * batadv_update_route() - set the router for this originator - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node which is to be configured * @recv_if: the receive interface for which this route is set * @neigh_node: neighbor which should be the next router @@ -133,7 +133,7 @@ void batadv_update_route(struct batadv_priv *bat_priv, /** * batadv_window_protected() - checks whether the host restarted and is in the * protection time. - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @seq_num_diff: difference between the current/received sequence number and * the last sequence number * @seq_old_max_diff: maximum age of sequence number not considered as restart @@ -207,7 +207,7 @@ bool batadv_check_management_packet(struct sk_buff *skb, /** * batadv_recv_my_icmp_packet() - receive an icmp packet locally - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: icmp packet to process * * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP @@ -338,7 +338,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv, int batadv_recv_icmp_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if) { - struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface); struct batadv_icmp_header *icmph; struct batadv_icmp_packet_rr *icmp_packet_rr; struct ethhdr *ethhdr; @@ -428,7 +428,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, /** * batadv_check_unicast_packet() - Check for malformed unicast packets - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: packet to check * @hdr_size: size of header to pull * @@ -511,7 +511,7 @@ batadv_last_bonding_replace(struct batadv_orig_node *orig_node, /** * batadv_find_router() - find a suitable router for this originator - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: the destination node * @recv_if: pointer to interface this packet was received on * @@ -656,7 +656,7 @@ batadv_find_router(struct batadv_priv *bat_priv, static int batadv_route_unicast_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if) { - struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface); struct batadv_orig_node *orig_node = NULL; struct batadv_unicast_packet *unicast_packet; struct ethhdr *ethhdr = eth_hdr(skb); @@ -727,7 +727,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb, /** * batadv_reroute_unicast_packet() - update the unicast header for re-routing - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: unicast packet to process * @unicast_packet: the unicast header to be updated * @dst_addr: the payload destination @@ -879,7 +879,7 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, return false; /* update the header in order to let the packet be delivered to this - * node's soft interface + * node's mesh interface */ primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) @@ -909,7 +909,7 @@ int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if) { struct batadv_unicast_packet *unicast_packet; - struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface); int check, hdr_size = sizeof(*unicast_packet); check = batadv_check_unicast_packet(bat_priv, skb, hdr_size); @@ -938,7 +938,7 @@ int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb, int batadv_recv_unicast_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if) { - struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface); struct batadv_unicast_packet *unicast_packet; struct batadv_unicast_4addr_packet *unicast_4addr_packet; u8 *orig_addr, *orig_addr_gw; @@ -1017,7 +1017,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size); - batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, + batadv_interface_rx(recv_if->mesh_iface, skb, hdr_size, orig_node); rx_success: @@ -1047,7 +1047,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, int batadv_recv_unicast_tvlv(struct sk_buff *skb, struct batadv_hard_iface *recv_if) { - struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface); struct batadv_unicast_tvlv_packet *unicast_tvlv_packet; unsigned char *tvlv_buff; u16 tvlv_buff_len; @@ -1103,7 +1103,7 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb, int batadv_recv_frag_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if) { - struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface); struct batadv_orig_node *orig_node_src = NULL; struct batadv_frag_packet *frag_packet; int ret = NET_RX_DROP; @@ -1165,7 +1165,7 @@ int batadv_recv_frag_packet(struct sk_buff *skb, int batadv_recv_bcast_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if) { - struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface); struct batadv_orig_node *orig_node = NULL; struct batadv_bcast_packet *bcast_packet; struct ethhdr *ethhdr; @@ -1255,7 +1255,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, batadv_dat_snoop_incoming_dhcp_ack(bat_priv, skb, hdr_size); /* broadcast for me */ - batadv_interface_rx(recv_if->soft_iface, skb, hdr_size, orig_node); + batadv_interface_rx(recv_if->mesh_iface, skb, hdr_size, orig_node); rx_success: ret = NET_RX_SUCCESS; @@ -1279,14 +1279,14 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, * * Parses the given, received batman-adv multicast packet. Depending on the * contents of its TVLV forwards it and/or decapsulates it to hand it to the - * soft interface. + * mesh interface. * * Return: NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise. */ int batadv_recv_mcast_packet(struct sk_buff *skb, struct batadv_hard_iface *recv_if) { - struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface); + struct batadv_priv *bat_priv = netdev_priv(recv_if->mesh_iface); struct batadv_mcast_packet *mcast_packet; int hdr_size = sizeof(*mcast_packet); unsigned char *tvlv_buff; @@ -1329,7 +1329,7 @@ int batadv_recv_mcast_packet(struct sk_buff *skb, batadv_add_counter(bat_priv, BATADV_CNT_MCAST_RX_LOCAL_BYTES, skb->len - hdr_size); - batadv_interface_rx(bat_priv->soft_iface, skb, hdr_size, NULL); + batadv_interface_rx(bat_priv->mesh_iface, skb, hdr_size, NULL); /* skb was consumed */ skb = NULL; } diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 0379b126865d7..735ac80778219 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -34,10 +34,10 @@ #include "gateway_client.h" #include "hard-interface.h" #include "log.h" +#include "mesh-interface.h" #include "network-coding.h" #include "originator.h" #include "routing.h" -#include "soft-interface.h" #include "translation-table.h" static void batadv_send_outstanding_bcast_packet(struct work_struct *work); @@ -68,7 +68,7 @@ int batadv_send_skb_packet(struct sk_buff *skb, struct ethhdr *ethhdr; int ret; - bat_priv = netdev_priv(hard_iface->soft_iface); + bat_priv = netdev_priv(hard_iface->mesh_iface); if (hard_iface->if_status != BATADV_IF_ACTIVE) goto send_skb_err; @@ -272,7 +272,7 @@ static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb, /** * batadv_send_skb_prepare_unicast_4addr() - encapsulate an skb with a * unicast 4addr header - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the skb containing the payload to encapsulate * @orig: the destination node * @packet_subtype: the unicast 4addr packet subtype to use @@ -314,7 +314,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv, /** * batadv_send_skb_unicast() - encapsulate and send an skb via unicast - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: payload to send * @packet_type: the batman unicast packet type to use * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast @@ -384,7 +384,7 @@ int batadv_send_skb_unicast(struct batadv_priv *bat_priv, /** * batadv_send_skb_via_tt_generic() - send an skb via TT lookup - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: payload to send * @packet_type: the batman unicast packet type to use * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast @@ -430,7 +430,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv, /** * batadv_send_skb_via_gw() - send an skb via gateway lookup - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: payload to send * @vid: the vid to be used to search the translation table * @@ -532,7 +532,7 @@ batadv_forw_packet_alloc(struct batadv_hard_iface *if_incoming, forw_packet->queue_left = queue_left; forw_packet->if_incoming = if_incoming; forw_packet->if_outgoing = if_outgoing; - forw_packet->num_packets = 0; + forw_packet->num_packets = 1; return forw_packet; @@ -695,7 +695,7 @@ static void batadv_forw_packet_queue(struct batadv_forw_packet *forw_packet, /** * batadv_forw_packet_bcast_queue() - try to queue a broadcast packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @forw_packet: the forwarding packet to queue * @send_time: timestamp (jiffies) when the packet is to be sent * @@ -714,7 +714,7 @@ batadv_forw_packet_bcast_queue(struct batadv_priv *bat_priv, /** * batadv_forw_packet_ogmv1_queue() - try to queue an OGMv1 packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @forw_packet: the forwarding packet to queue * @send_time: timestamp (jiffies) when the packet is to be sent * @@ -732,7 +732,7 @@ void batadv_forw_packet_ogmv1_queue(struct batadv_priv *bat_priv, /** * batadv_forw_bcast_packet_to_list() - queue broadcast packet for transmissions - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet @@ -787,7 +787,7 @@ static int batadv_forw_bcast_packet_to_list(struct batadv_priv *bat_priv, /** * batadv_forw_bcast_packet_if() - forward and queue a broadcast packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet @@ -838,7 +838,7 @@ static int batadv_forw_bcast_packet_if(struct batadv_priv *bat_priv, /** * batadv_send_no_broadcast() - check whether (re)broadcast is necessary - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to check * @own_packet: true if it is a self-generated broadcast packet * @if_out: the outgoing interface checked and considered for (re)broadcast @@ -900,7 +900,7 @@ static bool batadv_send_no_broadcast(struct batadv_priv *bat_priv, /** * __batadv_forw_bcast_packet() - forward and queue a broadcast packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet @@ -930,7 +930,7 @@ static int __batadv_forw_bcast_packet(struct batadv_priv *bat_priv, rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { - if (hard_iface->soft_iface != bat_priv->soft_iface) + if (hard_iface->mesh_iface != bat_priv->mesh_iface) continue; if (!kref_get_unless_zero(&hard_iface->refcount)) @@ -958,7 +958,7 @@ static int __batadv_forw_bcast_packet(struct batadv_priv *bat_priv, /** * batadv_forw_bcast_packet() - forward and queue a broadcast packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet @@ -979,7 +979,7 @@ int batadv_forw_bcast_packet(struct batadv_priv *bat_priv, /** * batadv_send_bcast_packet() - send and queue a broadcast packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: broadcast packet to add * @delay: number of jiffies to wait before sending * @own_packet: true if it is a self-generated broadcast packet @@ -1060,7 +1060,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) delayed_work = to_delayed_work(work); forw_packet = container_of(delayed_work, struct batadv_forw_packet, delayed_work); - bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); + bat_priv = netdev_priv(forw_packet->if_incoming->mesh_iface); if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) { dropped = true; @@ -1095,7 +1095,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) /** * batadv_purge_outstanding_packets() - stop/purge scheduled bcast/OGMv1 packets - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @hard_iface: the hard interface to cancel and purge bcast/ogm packets on * * This method cancels and purges any broadcast and OGMv1 packet on the given diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h index 08af251b765c2..3415afec4a0c2 100644 --- a/net/batman-adv/send.h +++ b/net/batman-adv/send.h @@ -68,7 +68,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb, /** * batadv_send_skb_via_tt() - send an skb via TT lookup - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the payload to send * @dst_hint: can be used to override the destination contained in the skb * @vid: the vid to be used to search the translation table @@ -89,7 +89,7 @@ static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv, /** * batadv_send_skb_via_tt_4addr() - send an skb via TT lookup - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the payload to send * @packet_subtype: the unicast 4addr packet subtype to use * @dst_hint: can be used to override the destination contained in the skb diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index 7f3dd3c393e07..9fb14e40e1562 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -206,7 +206,7 @@ static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars, * batadv_tp_batctl_notify() - send client status result to client * @reason: reason for tp meter session stop * @dst: destination of tp_meter session - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @start_time: start of transmission in jiffies * @total_sent: bytes acked to the receiver * @cookie: cookie of tp_meter session @@ -238,7 +238,7 @@ static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason, * batadv_tp_batctl_error_notify() - send client error result to client * @reason: reason for tp meter session stop * @dst: destination of tp_meter session - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @cookie: cookie of tp_meter session */ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason, @@ -251,7 +251,7 @@ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason, /** * batadv_tp_list_find() - find a tp_vars object in the global list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @dst: the other endpoint MAC address to look for * * Look for a tp_vars object matching dst as end_point and return it after @@ -287,7 +287,7 @@ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv, /** * batadv_tp_list_find_session() - find tp_vars session object in the global * list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @dst: the other endpoint MAC address to look for * @session: session identifier * @@ -366,7 +366,7 @@ static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars) /** * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tp_vars: the private data of the current TP meter session to cleanup */ static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv, @@ -396,7 +396,7 @@ static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv, /** * batadv_tp_sender_end() - print info about ended session and inform client - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tp_vars: the private data of the current TP meter session */ static void batadv_tp_sender_end(struct batadv_priv *bat_priv, @@ -619,7 +619,7 @@ static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src, /** * batadv_tp_recv_ack() - ACK receiving function - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the buffer containing the received packet * * Process a received TP ACK packet @@ -832,7 +832,7 @@ static int batadv_tp_send(void *arg) } /* assume that all the hard_interfaces have a correctly - * configured MTU, so use the soft_iface MTU as MSS. + * configured MTU, so use the mesh_iface MTU as MSS. * This might not be true and in that case the fragmentation * should be used. * Now, try to send the packet as it is @@ -927,7 +927,7 @@ static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars) /** * batadv_tp_start() - start a new tp meter session - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @dst: the receiver MAC address * @test_length: test length in milliseconds * @cookie: session cookie @@ -993,7 +993,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst, /* initialise the CWND to 3*MSS (Section 3.1 in RFC5681). * For batman-adv the MSS is the size of the payload received by the - * soft_interface, hence its MTU + * mesh_interface, hence its MTU */ tp_vars->cwnd = BATADV_TP_PLEN * 3; /* at the beginning initialise the SS threshold to the biggest possible @@ -1052,7 +1052,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst, /** * batadv_tp_stop() - stop currently running tp meter session - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @dst: the receiver MAC address * @return_value: reason for tp meter session stop */ @@ -1141,7 +1141,7 @@ static void batadv_tp_receiver_shutdown(struct timer_list *t) /** * batadv_tp_send_ack() - send an ACK packet - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @dst: the mac address of the destination originator * @seq: the sequence number to ACK * @timestamp: the timestamp to echo back in the ACK @@ -1320,7 +1320,7 @@ static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars) /** * batadv_tp_init_recv() - return matching or create new receiver tp_vars - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @icmp: received icmp tp msg * * Return: corresponding tp_vars or NULL on errors @@ -1373,7 +1373,7 @@ batadv_tp_init_recv(struct batadv_priv *bat_priv, /** * batadv_tp_recv_msg() - process a single data message - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the buffer containing the received packet * * Process a received TP MSG packet @@ -1457,7 +1457,7 @@ static void batadv_tp_recv_msg(struct batadv_priv *bat_priv, /** * batadv_tp_meter_recv() - main TP Meter receiving function - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @skb: the buffer containing the received packet */ void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb) diff --git a/net/batman-adv/trace.h b/net/batman-adv/trace.h index 6b816cf1a953b..7da692ec38e90 100644 --- a/net/batman-adv/trace.h +++ b/net/batman-adv/trace.h @@ -34,7 +34,7 @@ TRACE_EVENT(batadv_dbg, TP_ARGS(bat_priv, vaf), TP_STRUCT__entry( - __string(device, bat_priv->soft_iface->name) + __string(device, bat_priv->mesh_iface->name) __string(driver, KBUILD_MODNAME) __vstring(msg, vaf->fmt, vaf->va) ), diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index d4b71d34310f4..4a3165920de19 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -47,9 +47,9 @@ #include "hard-interface.h" #include "hash.h" #include "log.h" +#include "mesh-interface.h" #include "netlink.h" #include "originator.h" -#include "soft-interface.h" #include "tvlv.h" static struct kmem_cache *batadv_tl_cache __read_mostly; @@ -161,7 +161,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr, /** * batadv_tt_local_hash_find() - search the local table for a given client - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the mac address of the client to look for * @vid: VLAN identifier * @@ -186,7 +186,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr, /** * batadv_tt_global_hash_find() - search the global table for a given client - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the mac address of the client to look for * @vid: VLAN identifier * @@ -221,7 +221,7 @@ static void batadv_tt_local_entry_release(struct kref *ref) tt_local_entry = container_of(ref, struct batadv_tt_local_entry, common.refcount); - batadv_softif_vlan_put(tt_local_entry->vlan); + batadv_meshif_vlan_put(tt_local_entry->vlan); kfree_rcu(tt_local_entry, common.rcu); } @@ -260,7 +260,7 @@ void batadv_tt_global_entry_release(struct kref *ref) /** * batadv_tt_global_hash_count() - count the number of orig entries - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the mac address of the client to count entries for * @vid: VLAN identifier * @@ -286,28 +286,28 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv, /** * batadv_tt_local_size_mod() - change the size by v of the local table * identified by vid - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @vid: the VLAN identifier of the sub-table to change * @v: the amount to sum to the local table size */ static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv, unsigned short vid, int v) { - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; - vlan = batadv_softif_vlan_get(bat_priv, vid); + vlan = batadv_meshif_vlan_get(bat_priv, vid); if (!vlan) return; atomic_add(v, &vlan->tt.num_entries); - batadv_softif_vlan_put(vlan); + batadv_meshif_vlan_put(vlan); } /** * batadv_tt_local_size_inc() - increase by one the local table size for the * given vid - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @vid: the VLAN identifier */ static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv, @@ -319,7 +319,7 @@ static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv, /** * batadv_tt_local_size_dec() - decrease by one the local table size for the * given vid - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @vid: the VLAN identifier */ static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv, @@ -412,7 +412,7 @@ batadv_tt_orig_list_entry_put(struct batadv_tt_orig_list_entry *orig_entry) /** * batadv_tt_local_event() - store a local TT event (ADD/DEL) - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tt_local_entry: the TT entry involved in the event * @event_flags: flags to store in the event structure */ @@ -504,7 +504,7 @@ static u16 batadv_tt_entries(u16 tt_len) /** * batadv_tt_local_table_transmit_size() - calculates the local translation * table size when transmitted over the air - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: local translation table size in bytes. */ @@ -512,11 +512,11 @@ static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv) { u16 num_vlan = 0; u16 tt_local_entries = 0; - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; int hdr_size; rcu_read_lock(); - hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { + hlist_for_each_entry_rcu(vlan, &bat_priv->meshif_vlan_list, list) { num_vlan++; tt_local_entries += atomic_read(&vlan->tt.num_entries); } @@ -576,7 +576,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv, /** * batadv_tt_local_add() - add a new client to the local table or update an * existing client - * @soft_iface: netdev struct of the mesh interface + * @mesh_iface: netdev struct of the mesh interface * @addr: the mac address of the client to add * @vid: VLAN identifier * @ifindex: index of the interface where the client is connected to (useful to @@ -586,14 +586,14 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv, * * Return: true if the client was successfully added, false otherwise. */ -bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, +bool batadv_tt_local_add(struct net_device *mesh_iface, const u8 *addr, unsigned short vid, int ifindex, u32 mark) { - struct batadv_priv *bat_priv = netdev_priv(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(mesh_iface); struct batadv_tt_local_entry *tt_local; struct batadv_tt_global_entry *tt_global = NULL; - struct net *net = dev_net(soft_iface); - struct batadv_softif_vlan *vlan; + struct net *net = dev_net(mesh_iface); + struct batadv_meshif_vlan *vlan; struct net_device *in_dev = NULL; struct batadv_hard_iface *in_hardif = NULL; struct hlist_head *head; @@ -650,7 +650,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, table_size += batadv_tt_len(1); packet_size_max = atomic_read(&bat_priv->packet_size_max); if (table_size > packet_size_max) { - net_ratelimited_function(batadv_info, soft_iface, + net_ratelimited_function(batadv_info, mesh_iface, "Local translation table size (%i) exceeds maximum packet size (%i); Ignoring new local tt entry: %pM\n", table_size, packet_size_max, addr); goto out; @@ -661,9 +661,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, goto out; /* increase the refcounter of the related vlan */ - vlan = batadv_softif_vlan_get(bat_priv, vid); + vlan = batadv_meshif_vlan_get(bat_priv, vid); if (!vlan) { - net_ratelimited_function(batadv_info, soft_iface, + net_ratelimited_function(batadv_info, mesh_iface, "adding TT local entry %pM to non-existent VLAN %d\n", addr, batadv_print_vid(vid)); kmem_cache_free(batadv_tl_cache, tt_local); @@ -693,7 +693,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, /* the batman interface mac and multicast addresses should never be * purged */ - if (batadv_compare_eth(addr, soft_iface->dev_addr) || + if (batadv_compare_eth(addr, mesh_iface->dev_addr) || is_multicast_ether_addr(addr)) tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE; @@ -849,7 +849,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node, /** * batadv_tt_prepare_tvlv_local_data() - allocate and prepare the TT TVLV for * this node - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tt_data: uninitialised pointer to the address of the TVLV buffer * @tt_change: uninitialised pointer to the address of the area where the TT * changes can be stored @@ -871,7 +871,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, s32 *tt_len) { struct batadv_tvlv_tt_vlan_data *tt_vlan; - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; u16 num_vlan = 0; u16 vlan_entries = 0; u16 total_entries = 0; @@ -879,8 +879,8 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, u8 *tt_change_ptr; int change_offset; - spin_lock_bh(&bat_priv->softif_vlan_list_lock); - hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) { + spin_lock_bh(&bat_priv->meshif_vlan_list_lock); + hlist_for_each_entry(vlan, &bat_priv->meshif_vlan_list, list) { vlan_entries = atomic_read(&vlan->tt.num_entries); if (vlan_entries < 1) continue; @@ -909,7 +909,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, (*tt_data)->num_vlan = htons(num_vlan); tt_vlan = (*tt_data)->vlan_data; - hlist_for_each_entry(vlan, &bat_priv->softif_vlan_list, list) { + hlist_for_each_entry(vlan, &bat_priv->meshif_vlan_list, list) { vlan_entries = atomic_read(&vlan->tt.num_entries); if (vlan_entries < 1) continue; @@ -925,14 +925,14 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv, *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr; out: - spin_unlock_bh(&bat_priv->softif_vlan_list_lock); + spin_unlock_bh(&bat_priv->meshif_vlan_list_lock); return tvlv_len; } /** * batadv_tt_tvlv_container_update() - update the translation table tvlv * container after local tt changes have been committed - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) { @@ -956,7 +956,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) * The local change history should still be cleaned up so the next * TT round can start again with a clean state. */ - if (tt_diff_len > bat_priv->soft_iface->mtu) { + if (tt_diff_len > bat_priv->mesh_iface->mtu) { tt_diff_len = 0; tt_diff_entries_num = 0; drop_changes = true; @@ -1025,7 +1025,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) * @msg :Netlink message to dump into * @portid: Port making netlink request * @cb: Control block containing additional options - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @common: tt local & tt global common data * * Return: Error code, or 0 on success @@ -1037,7 +1037,7 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, struct batadv_tt_common_entry *common) { void *hdr; - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; struct batadv_tt_local_entry *local; unsigned int last_seen_msecs; u32 crc; @@ -1045,13 +1045,13 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, local = container_of(common, struct batadv_tt_local_entry, common); last_seen_msecs = jiffies_to_msecs(jiffies - local->last_seen); - vlan = batadv_softif_vlan_get(bat_priv, common->vid); + vlan = batadv_meshif_vlan_get(bat_priv, common->vid); if (!vlan) return 0; crc = vlan->tt.crc; - batadv_softif_vlan_put(vlan); + batadv_meshif_vlan_put(vlan); hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq, &batadv_netlink_family, NLM_F_MULTI, @@ -1084,7 +1084,7 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, * @msg: Netlink message to dump into * @portid: Port making netlink request * @cb: Control block containing additional options - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @hash: hash to dump * @bucket: bucket index to dump * @idx_s: Number of entries to skip @@ -1130,7 +1130,7 @@ batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, */ int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb) { - struct net_device *soft_iface; + struct net_device *mesh_iface; struct batadv_priv *bat_priv; struct batadv_hard_iface *primary_if = NULL; struct batadv_hashtable *hash; @@ -1139,11 +1139,11 @@ int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb) int idx = cb->args[1]; int portid = NETLINK_CB(cb->skb).portid; - soft_iface = batadv_netlink_get_softif(cb); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif(cb); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { @@ -1165,7 +1165,7 @@ int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb) out: batadv_hardif_put(primary_if); - dev_put(soft_iface); + dev_put(mesh_iface); cb->args[0] = bucket; cb->args[1] = idx; @@ -1194,7 +1194,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv, /** * batadv_tt_local_remove() - logically remove an entry from the local table - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the MAC address of the client to remove * @vid: VLAN identifier * @message: message to append to the log on deletion @@ -1259,7 +1259,7 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, /** * batadv_tt_local_purge_list() - purge inactive tt local entries - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @head: pointer to the list containing the local tt entries * @timeout: parameter deciding whether a given tt local entry is considered * inactive or not @@ -1294,7 +1294,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv, /** * batadv_tt_local_purge() - purge inactive tt local entries - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @timeout: parameter deciding whether a given tt local entry is considered * inactive or not */ @@ -1529,7 +1529,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, /** * batadv_tt_global_add() - add a new TT global entry or update an existing one - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: the originator announcing the client * @tt_addr: the mac address of the non-mesh client * @vid: VLAN identifier @@ -1702,7 +1702,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, /** * batadv_transtable_best_orig() - Get best originator list entry from tt entry - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tt_global_entry: global translation table entry to be analyzed * * This function assumes the caller holds rcu_read_lock(). @@ -1809,7 +1809,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @common: tt local & tt global common data * @sub_s: Number of entries to skip * @@ -1854,7 +1854,7 @@ batadv_tt_global_dump_entry(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @portid: Port making netlink request * @seq: Sequence number of netlink message - * @bat_priv: The bat priv with all the soft interface information + * @bat_priv: The bat priv with all the mesh interface information * @head: Pointer to the list containing the global tt entries * @idx_s: Number of entries to skip * @sub: Number of entries to skip @@ -1897,7 +1897,7 @@ batadv_tt_global_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq, */ int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb) { - struct net_device *soft_iface; + struct net_device *mesh_iface; struct batadv_priv *bat_priv; struct batadv_hard_iface *primary_if = NULL; struct batadv_hashtable *hash; @@ -1908,11 +1908,11 @@ int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb) int sub = cb->args[2]; int portid = NETLINK_CB(cb->skb).portid; - soft_iface = batadv_netlink_get_softif(cb); - if (IS_ERR(soft_iface)) - return PTR_ERR(soft_iface); + mesh_iface = batadv_netlink_get_meshif(cb); + if (IS_ERR(mesh_iface)) + return PTR_ERR(mesh_iface); - bat_priv = netdev_priv(soft_iface); + bat_priv = netdev_priv(mesh_iface); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { @@ -1937,7 +1937,7 @@ int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb) out: batadv_hardif_put(primary_if); - dev_put(soft_iface); + dev_put(mesh_iface); cb->args[0] = bucket; cb->args[1] = idx; @@ -1990,7 +1990,7 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry) /** * batadv_tt_global_del_orig_node() - remove orig_node from a global tt entry - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tt_global_entry: the global entry to remove the orig_node from * @orig_node: the originator announcing the client * @message: message to append to the log on deletion @@ -2069,7 +2069,7 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv, /** * batadv_tt_global_del() - remove a client from the global table - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: an originator serving this client * @addr: the mac address of the client * @vid: VLAN identifier @@ -2134,7 +2134,7 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv, /** * batadv_tt_global_del_orig() - remove all the TT global entries belonging to * the given originator matching the provided vid - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: the originator owning the entries to remove * @match_vid: the VLAN identifier to match. If negative all the entries will be * removed @@ -2305,7 +2305,7 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry, /** * batadv_transtable_search() - get the mesh destination for a given client - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @src: mac address of the source client * @addr: mac address of the destination client * @vid: VLAN identifier @@ -2364,7 +2364,7 @@ struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv, /** * batadv_tt_global_crc() - calculates the checksum of the local table belonging * to the given orig_node - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: originator for which the CRC should be computed * @vid: VLAN identifier for which the CRC32 has to be computed * @@ -2458,7 +2458,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv, /** * batadv_tt_local_crc() - calculates the checksum of the local table - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @vid: VLAN identifier for which the CRC32 has to be computed * * For details about the computation, please refer to the documentation for @@ -2593,7 +2593,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv) /** * batadv_tt_req_node_new() - search and possibly create a tt_req_node object - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node this request is being issued for * * Return: the pointer to the new tt_req_node struct if no request @@ -2689,7 +2689,7 @@ static bool batadv_tt_global_valid(const void *entry_ptr, /** * batadv_tt_tvlv_generate() - fill the tvlv buff with the tt entries from the * specified tt hash - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @hash: hash table containing the tt entries * @tt_len: expected tvlv tt data buffer length in number of bytes * @tvlv_buff: pointer to the buffer to fill with the TT data @@ -2810,15 +2810,15 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node, /** * batadv_tt_local_update_crc() - update all the local CRCs - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv) { - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; /* recompute the global CRC for each VLAN */ rcu_read_lock(); - hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { + hlist_for_each_entry_rcu(vlan, &bat_priv->meshif_vlan_list, list) { vlan->tt.crc = batadv_tt_local_crc(bat_priv, vlan->vid); } rcu_read_unlock(); @@ -2826,7 +2826,7 @@ static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv) /** * batadv_tt_global_update_crc() - update all the global CRCs for this orig_node - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: the orig_node for which the CRCs have to be updated */ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv, @@ -2853,7 +2853,7 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv, /** * batadv_send_tt_request() - send a TT Request message to a given node - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @dst_orig_node: the destination of the message * @ttvn: the version number that the source of the message is looking for * @tt_vlan: pointer to the first tvlv VLAN object to request @@ -2938,7 +2938,7 @@ static bool batadv_send_tt_request(struct batadv_priv *bat_priv, /** * batadv_send_other_tt_response() - send reply to tt request concerning another * node's translation table - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tt_data: tt data containing the tt request information * @req_src: mac address of tt request sender * @req_dst: mac address of tt request recipient @@ -3029,7 +3029,7 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv, /* Don't send the response, if larger than fragmented packet. */ tt_len = sizeof(struct batadv_unicast_tvlv_packet) + tvlv_len; if (tt_len > atomic_read(&bat_priv->packet_size_max)) { - net_ratelimited_function(batadv_info, bat_priv->soft_iface, + net_ratelimited_function(batadv_info, bat_priv->mesh_iface, "Ignoring TT_REQUEST from %pM; Response size exceeds max packet size.\n", res_dst_orig_node->orig); goto out; @@ -3068,7 +3068,7 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv, /** * batadv_send_my_tt_response() - send reply to tt request concerning this * node's translation table - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tt_data: tt data containing the tt request information * @req_src: mac address of tt request sender * @@ -3185,7 +3185,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv, /** * batadv_send_tt_response() - send reply to tt request - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tt_data: tt data containing the tt request information * @req_src: mac address of tt request sender * @req_dst: mac address of tt request recipient @@ -3280,7 +3280,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv, /** * batadv_is_my_client() - check if a client is served by the local node - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the mac address of the client to check * @vid: VLAN identifier * @@ -3309,7 +3309,7 @@ bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr, /** * batadv_handle_tt_response() - process incoming tt reply - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tt_data: tt data containing the tt request information * @resp_src: mac address of tt reply sender * @num_entries: number of tt change entries appended to the tt data @@ -3397,7 +3397,7 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv) /** * batadv_tt_check_roam_count() - check if a client has roamed too frequently - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @client: mac address of the roaming client * * This function checks whether the client already reached the @@ -3452,7 +3452,7 @@ static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client) /** * batadv_send_roam_adv() - send a roaming advertisement message - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @client: mac address of the roaming client * @vid: VLAN identifier * @orig_node: message destination @@ -3516,8 +3516,8 @@ static void batadv_tt_purge(struct work_struct *work) } /** - * batadv_tt_free() - Free translation table of soft interface - * @bat_priv: the bat priv with all the soft interface information + * batadv_tt_free() - Free translation table of mesh interface + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_tt_free(struct batadv_priv *bat_priv) { @@ -3540,7 +3540,7 @@ void batadv_tt_free(struct batadv_priv *bat_priv) /** * batadv_tt_local_set_flags() - set or unset the specified flags on the local * table and possibly count them in the TT size - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @flags: the flag to switch * @enable: whether to set or unset the flag * @count: whether to increase the TT size by the number of changed entries @@ -3626,7 +3626,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv) /** * batadv_tt_local_commit_changes_nolock() - commit all pending local tt changes * which have been queued in the time since the last commit - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Caller must hold tt->commit_lock. */ @@ -3659,7 +3659,7 @@ static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv) /** * batadv_tt_local_commit_changes() - commit all pending local tt changes which * have been queued in the time since the last commit - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information */ void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv) { @@ -3670,7 +3670,7 @@ void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv) /** * batadv_is_ap_isolated() - Check if packet from upper layer should be dropped - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @src: source mac address of packet * @dst: destination mac address of packet * @vid: vlan id of packet @@ -3682,10 +3682,10 @@ bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst, { struct batadv_tt_local_entry *tt_local_entry; struct batadv_tt_global_entry *tt_global_entry; - struct batadv_softif_vlan *vlan; + struct batadv_meshif_vlan *vlan; bool ret = false; - vlan = batadv_softif_vlan_get(bat_priv, vid); + vlan = batadv_meshif_vlan_get(bat_priv, vid); if (!vlan) return false; @@ -3707,14 +3707,14 @@ bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, u8 *src, u8 *dst, local_entry_put: batadv_tt_local_entry_put(tt_local_entry); vlan_put: - batadv_softif_vlan_put(vlan); + batadv_meshif_vlan_put(vlan); return ret; } /** * batadv_tt_update_orig() - update global translation table with new tt * information received via ogms - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: the orig_node of the ogm * @tt_buff: pointer to the first tvlv VLAN entry * @tt_num_vlan: number of tvlv VLAN entries @@ -3798,7 +3798,7 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv, /** * batadv_tt_global_client_is_roaming() - check if a client is marked as roaming - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the mac address of the client to check * @vid: VLAN identifier * @@ -3824,7 +3824,7 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv, /** * batadv_tt_local_client_is_roaming() - tells whether the client is roaming - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the mac address of the local client to query * @vid: VLAN identifier * @@ -3850,7 +3850,7 @@ bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv, /** * batadv_tt_add_temporary_global_entry() - Add temporary entry to global TT - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig_node: orig node which the temporary entry should be associated with * @addr: mac address of the client * @vid: VLAN id of the new temporary global translation table @@ -3883,14 +3883,14 @@ bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, /** * batadv_tt_local_resize_to_mtu() - resize the local translation table fit the * maximum packet size that can be transported through the mesh - * @soft_iface: netdev struct of the mesh interface + * @mesh_iface: netdev struct of the mesh interface * * Remove entries older than 'timeout' and half timeout if more entries need * to be removed. */ -void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface) +void batadv_tt_local_resize_to_mtu(struct net_device *mesh_iface) { - struct batadv_priv *bat_priv = netdev_priv(soft_iface); + struct batadv_priv *bat_priv = netdev_priv(mesh_iface); int packet_size_max = atomic_read(&bat_priv->packet_size_max); int table_size, timeout = BATADV_TT_LOCAL_TIMEOUT / 2; bool reduced = false; @@ -3907,7 +3907,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface) timeout /= 2; reduced = true; - net_ratelimited_function(batadv_info, soft_iface, + net_ratelimited_function(batadv_info, mesh_iface, "Forced to purge local tt entries to fit new maximum fragment MTU (%i)\n", packet_size_max); } @@ -3923,7 +3923,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface) /** * batadv_tt_tvlv_ogm_handler_v1() - process incoming tt tvlv container - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @orig: the orig_node of the ogm * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags) * @tvlv_value: tvlv buffer containing the gateway data @@ -3962,7 +3962,7 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, /** * batadv_tt_tvlv_unicast_handler_v1() - process incoming (unicast) tt tvlv * container - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @src: mac address of tt tvlv sender * @dst: mac address of tt tvlv recipient * @tvlv_value: tvlv buffer containing the tt data @@ -4044,7 +4044,7 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv, /** * batadv_roam_tvlv_unicast_handler_v1() - process incoming tt roam tvlv * container - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @src: mac address of tt tvlv sender * @dst: mac address of tt tvlv recipient * @tvlv_value: tvlv buffer containing the tt data @@ -4093,7 +4093,7 @@ static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv, /** * batadv_tt_init() - initialise the translation table internals - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Return: 0 on success or negative error number in case of failure. */ @@ -4131,7 +4131,7 @@ int batadv_tt_init(struct batadv_priv *bat_priv) /** * batadv_tt_global_is_isolated() - check if a client is marked as isolated - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @addr: the mac address of the client * @vid: the identifier of the VLAN where this client is connected * diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h index d18740d9a22b6..618d9dbca5eac 100644 --- a/net/batman-adv/translation-table.h +++ b/net/batman-adv/translation-table.h @@ -16,7 +16,7 @@ #include int batadv_tt_init(struct batadv_priv *bat_priv); -bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, +bool batadv_tt_local_add(struct net_device *mesh_iface, const u8 *addr, unsigned short vid, int ifindex, u32 mark); u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid, @@ -45,7 +45,7 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv, u8 *addr, unsigned short vid); bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv, u8 *addr, unsigned short vid); -void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface); +void batadv_tt_local_resize_to_mtu(struct net_device *mesh_iface); bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv, struct batadv_orig_node *orig_node, const unsigned char *addr, diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index 2a583215d439b..76dff1f9c559a 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c @@ -59,7 +59,7 @@ static void batadv_tvlv_handler_put(struct batadv_tvlv_handler *tvlv_handler) /** * batadv_tvlv_handler_get() - retrieve tvlv handler from the tvlv handler list * based on the provided type and version (both need to match) - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @type: tvlv handler type to look for * @version: tvlv handler version to look for * @@ -118,7 +118,7 @@ static void batadv_tvlv_container_put(struct batadv_tvlv_container *tvlv) /** * batadv_tvlv_container_get() - retrieve tvlv container from the tvlv container * list based on the provided type and version (both need to match) - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @type: tvlv container type to look for * @version: tvlv container version to look for * @@ -152,7 +152,7 @@ batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version) /** * batadv_tvlv_container_list_size() - calculate the size of the tvlv container * list entries - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * * Has to be called with the appropriate locks being acquired * (tvlv.container_list_lock). @@ -177,7 +177,7 @@ static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv) /** * batadv_tvlv_container_remove() - remove tvlv container from the tvlv * container list - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tvlv: the to be removed tvlv container * * Has to be called with the appropriate locks being acquired @@ -201,7 +201,7 @@ static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv, /** * batadv_tvlv_container_unregister() - unregister tvlv container based on the * provided type and version (both need to match) - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @type: tvlv container type to unregister * @version: tvlv container type to unregister */ @@ -219,7 +219,7 @@ void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv, /** * batadv_tvlv_container_register() - register tvlv type, version and content * to be propagated with each (primary interface) OGM - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @type: tvlv container type * @version: tvlv container version * @tvlv_value: tvlv container content @@ -297,7 +297,7 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff, /** * batadv_tvlv_container_ogm_append() - append tvlv container content to given * OGM packet buffer - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @packet_buff: ogm packet buffer * @packet_buff_len: ogm packet buffer size including ogm header and tvlv * content @@ -350,7 +350,7 @@ u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv, /** * batadv_tvlv_call_handler() - parse the given tvlv buffer to call the * appropriate handlers - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @tvlv_handler: tvlv callback function handling the tvlv content * @packet_type: indicates for which packet type the TVLV handler is called * @orig_node: orig node emitting the ogm packet @@ -421,7 +421,7 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv, /** * batadv_tvlv_containers_process() - parse the given tvlv buffer to call the * appropriate handlers - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @packet_type: indicates for which packet type the TVLV handler is called * @orig_node: orig node emitting the ogm packet * @skb: the skb the TVLV handler is called for @@ -490,7 +490,7 @@ int batadv_tvlv_containers_process(struct batadv_priv *bat_priv, /** * batadv_tvlv_ogm_receive() - process an incoming ogm and call the appropriate * handlers - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @batadv_ogm_packet: ogm packet containing the tvlv containers * @orig_node: orig node emitting the ogm packet */ @@ -518,7 +518,7 @@ void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv, * batadv_tvlv_handler_register() - register tvlv handler based on the provided * type and version (both need to match) for ogm tvlv payload and/or unicast * payload - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @optr: ogm tvlv handler callback function. This function receives the orig * node, flags and the tvlv content as argument to process. * @uptr: unicast tvlv handler callback function. This function receives the @@ -583,7 +583,7 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv, /** * batadv_tvlv_handler_unregister() - unregister tvlv handler based on the * provided type and version (both need to match) - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @type: tvlv handler type to be unregistered * @version: tvlv handler version to be unregistered */ @@ -606,7 +606,7 @@ void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv, /** * batadv_tvlv_unicast_send() - send a unicast packet with tvlv payload to the * specified host - * @bat_priv: the bat priv with all the soft interface information + * @bat_priv: the bat priv with all the mesh interface information * @src: source mac address of the unicast packet * @dst: destination mac address of the unicast packet * @type: tvlv type diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index fe89f08533fe6..0ca0fc072fc9c 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -186,6 +186,9 @@ struct batadv_hard_iface { /** @net_dev: pointer to the net_device */ struct net_device *net_dev; + /** @dev_tracker: device tracker for @net_dev */ + netdevice_tracker dev_tracker; + /** @refcount: number of contexts the object is used */ struct kref refcount; @@ -196,10 +199,13 @@ struct batadv_hard_iface { struct packet_type batman_adv_ptype; /** - * @soft_iface: the batman-adv interface which uses this network + * @mesh_iface: the batman-adv interface which uses this network * interface */ - struct net_device *soft_iface; + struct net_device *mesh_iface; + + /** @meshif_dev_tracker: device tracker for @mesh_iface */ + netdevice_tracker meshif_dev_tracker; /** @rcu: struct used for freeing in an RCU-safe manner */ struct rcu_head rcu; @@ -487,7 +493,7 @@ struct batadv_orig_node { /** @hash_entry: hlist node for &batadv_priv.orig_hash */ struct hlist_node hash_entry; - /** @bat_priv: pointer to soft_iface this orig node belongs to */ + /** @bat_priv: pointer to mesh_iface this orig node belongs to */ struct batadv_priv *bat_priv; /** @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno */ @@ -899,13 +905,13 @@ enum batadv_counters { /** * @BATADV_CNT_MCAST_RX_LOCAL: counter for received batman-adv multicast - * packets which were forwarded to the local soft interface + * packets which were forwarded to the local mesh interface */ BATADV_CNT_MCAST_RX_LOCAL, /** * @BATADV_CNT_MCAST_RX_LOCAL_BYTES: bytes counter for received - * batman-adv multicast packets which were forwarded to the local soft + * batman-adv multicast packets which were forwarded to the local mesh * interface */ BATADV_CNT_MCAST_RX_LOCAL_BYTES, @@ -1137,29 +1143,6 @@ struct batadv_priv_bla { }; #endif -#ifdef CONFIG_BATMAN_ADV_DEBUG - -/** - * struct batadv_priv_debug_log - debug logging data - */ -struct batadv_priv_debug_log { - /** @log_buff: buffer holding the logs (ring buffer) */ - char log_buff[BATADV_LOG_BUF_LEN]; - - /** @log_start: index of next character to read */ - unsigned long log_start; - - /** @log_end: index of next character to write */ - unsigned long log_end; - - /** @lock: lock protecting log_buff, log_start & log_end */ - spinlock_t lock; - - /** @queue_wait: log reader's wait queue */ - wait_queue_head_t queue_wait; -}; -#endif - /** * struct batadv_priv_gw - per mesh interface gateway data */ @@ -1264,7 +1247,7 @@ struct batadv_mcast_mla_flags { /** @enabled: whether the multicast tvlv is currently enabled */ unsigned char enabled:1; - /** @bridged: whether the soft interface has a bridge on top */ + /** @bridged: whether the mesh interface has a bridge on top */ unsigned char bridged:1; /** @tvlv_flags: the flags we have last sent in our mcast tvlv */ @@ -1400,7 +1383,7 @@ struct batadv_priv_nc { /** * @decoding_hash: Hash table used to buffer skbs that might be needed * to decode a received coded skb. The buffer is used for 1) skbs - * arriving on the soft-interface; 2) skbs overheard on the + * arriving on the mesh-interface; 2) skbs overheard on the * hard-interface; and 3) skbs forwarded by batman-adv. */ struct batadv_hashtable *decoding_hash; @@ -1553,9 +1536,9 @@ struct batadv_tp_vars { }; /** - * struct batadv_softif_vlan - per VLAN attributes set + * struct batadv_meshif_vlan - per VLAN attributes set */ -struct batadv_softif_vlan { +struct batadv_meshif_vlan { /** @bat_priv: pointer to the mesh object */ struct batadv_priv *bat_priv; @@ -1568,7 +1551,7 @@ struct batadv_softif_vlan { /** @tt: TT private attributes (VLAN specific) */ struct batadv_vlan_tt tt; - /** @list: list node for &bat_priv.softif_vlan_list */ + /** @list: list node for &bat_priv.meshif_vlan_list */ struct hlist_node list; /** @@ -1581,7 +1564,7 @@ struct batadv_softif_vlan { }; /** - * struct batadv_priv_bat_v - B.A.T.M.A.N. V per soft-interface private data + * struct batadv_priv_bat_v - B.A.T.M.A.N. V per mesh-interface private data */ struct batadv_priv_bat_v { /** @ogm_buff: buffer holding the OGM packet */ @@ -1610,8 +1593,8 @@ struct batadv_priv { */ atomic_t mesh_state; - /** @soft_iface: net device which holds this struct as private data */ - struct net_device *soft_iface; + /** @mesh_iface: net device which holds this struct as private data */ + struct net_device *mesh_iface; /** * @mtu_set_by_user: MTU was set once by user @@ -1760,24 +1743,19 @@ struct batadv_priv { struct batadv_algo_ops *algo_ops; /** - * @softif_vlan_list: a list of softif_vlan structs, one per VLAN + * @meshif_vlan_list: a list of meshif_vlan structs, one per VLAN * created on top of the mesh interface represented by this object */ - struct hlist_head softif_vlan_list; + struct hlist_head meshif_vlan_list; - /** @softif_vlan_list_lock: lock protecting softif_vlan_list */ - spinlock_t softif_vlan_list_lock; + /** @meshif_vlan_list_lock: lock protecting meshif_vlan_list */ + spinlock_t meshif_vlan_list_lock; #ifdef CONFIG_BATMAN_ADV_BLA /** @bla: bridge loop avoidance data */ struct batadv_priv_bla bla; #endif -#ifdef CONFIG_BATMAN_ADV_DEBUG - /** @debug_log: holding debug logging relevant data */ - struct batadv_priv_debug_log *debug_log; -#endif - /** @gw: gateway data */ struct batadv_priv_gw gw; @@ -1808,7 +1786,7 @@ struct batadv_priv { #endif /* CONFIG_BATMAN_ADV_NC */ #ifdef CONFIG_BATMAN_ADV_BATMAN_V - /** @bat_v: B.A.T.M.A.N. V per soft-interface private data */ + /** @bat_v: B.A.T.M.A.N. V per mesh-interface private data */ struct batadv_priv_bat_v bat_v; #endif }; @@ -1831,7 +1809,7 @@ struct batadv_bla_backbone_gw { /** @hash_entry: hlist node for &batadv_priv_bla.backbone_hash */ struct hlist_node hash_entry; - /** @bat_priv: pointer to soft_iface this backbone gateway belongs to */ + /** @bat_priv: pointer to mesh_iface this backbone gateway belongs to */ struct batadv_priv *bat_priv; /** @lasttime: last time we heard of this backbone gw */ @@ -1936,8 +1914,8 @@ struct batadv_tt_local_entry { /** @last_seen: timestamp used for purging stale tt local entries */ unsigned long last_seen; - /** @vlan: soft-interface vlan of the entry */ - struct batadv_softif_vlan *vlan; + /** @vlan: mesh-interface vlan of the entry */ + struct batadv_meshif_vlan *vlan; }; /** @@ -2161,7 +2139,7 @@ struct batadv_forw_packet { u16 packet_len; /** @direct_link_flags: direct link flags for aggregated OGM packets */ - u32 direct_link_flags; + DECLARE_BITMAP(direct_link_flags, BATADV_MAX_AGGREGATION_PACKETS); /** @num_packets: counter for aggregated OGMv1 packets */ u8 num_packets; diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 50cfec8ccac4f..f0c862091bff2 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -443,7 +444,7 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, memset(&msg, 0, sizeof(msg)); iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, skb->len); - err = l2cap_chan_send(chan, &msg, skb->len); + err = l2cap_chan_send(chan, &msg, skb->len, NULL); if (err > 0) { netdev->stats.tx_bytes += err; netdev->stats.tx_packets++; @@ -825,11 +826,16 @@ static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb) { + struct sk_buff *skb; + /* Note that we must allocate using GFP_ATOMIC here as * this function is called originally from netdev hard xmit * function in atomic context. */ - return bt_skb_alloc(hdr_len + len, GFP_ATOMIC); + skb = bt_skb_alloc(hdr_len + len, GFP_ATOMIC); + if (!skb) + return ERR_PTR(-ENOMEM); + return skb; } static void chan_suspend_cb(struct l2cap_chan *chan) diff --git a/net/bluetooth/coredump.c b/net/bluetooth/coredump.c index c18df3a086075..819eacb387622 100644 --- a/net/bluetooth/coredump.c +++ b/net/bluetooth/coredump.c @@ -240,6 +240,26 @@ static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev, bt_dev_dbg(hdev, "Failed to set pattern"); } +static void hci_devcd_dump(struct hci_dev *hdev) +{ + struct sk_buff *skb; + u32 size; + + bt_dev_dbg(hdev, "state %d", hdev->dump.state); + + size = hdev->dump.tail - hdev->dump.head; + + /* Emit a devcoredump with the available data */ + dev_coredumpv(&hdev->dev, hdev->dump.head, size, GFP_KERNEL); + + /* Send a copy to monitor as a diagnostic packet */ + skb = bt_skb_alloc(size, GFP_ATOMIC); + if (skb) { + skb_put_data(skb, hdev->dump.head, size); + hci_recv_diag(hdev, skb); + } +} + static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev, struct sk_buff *skb) { @@ -256,7 +276,7 @@ static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev, bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size, hdev->dump.alloc_size); - dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL); + hci_devcd_dump(hdev); } static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev, @@ -275,8 +295,7 @@ static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev, bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size, hdev->dump.alloc_size); - /* Emit a devcoredump with the available data */ - dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL); + hci_devcd_dump(hdev); } /* Bluetooth devcoredump state machine. @@ -391,8 +410,7 @@ void hci_devcd_timeout(struct work_struct *work) bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size, hdev->dump.alloc_size); - /* Emit a devcoredump with the available data */ - dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL); + hci_devcd_dump(hdev); hci_devcd_reset(hdev); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index d097e308a7554..95972fd4c7843 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -27,6 +27,7 @@ #include #include +#include #include #include @@ -1002,6 +1003,7 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t } skb_queue_head_init(&conn->data_q); + skb_queue_head_init(&conn->tx_q.queue); INIT_LIST_HEAD(&conn->chan_list); INIT_LIST_HEAD(&conn->link_list); @@ -1155,6 +1157,7 @@ void hci_conn_del(struct hci_conn *conn) } skb_queue_purge(&conn->data_q); + skb_queue_purge(&conn->tx_q.queue); /* Remove the connection from the list and cleanup its remaining * state. This is a separate function since for some cases like @@ -3064,3 +3067,122 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason) */ return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL); } + +void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset, + const struct sockcm_cookie *sockc) +{ + struct sock *sk = skb ? skb->sk : NULL; + + /* This shall be called on a single skb of those generated by user + * sendmsg(), and only when the sendmsg() does not return error to + * user. This is required for keeping the tskey that increments here in + * sync with possible sendmsg() counting by user. + * + * Stream sockets shall set key_offset to sendmsg() length in bytes + * and call with the last fragment, others to 1 and first fragment. + */ + + if (!skb || !sockc || !sk || !key_offset) + return; + + sock_tx_timestamp(sk, sockc, &skb_shinfo(skb)->tx_flags); + + if (sockc->tsflags & SOF_TIMESTAMPING_OPT_ID && + sockc->tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) { + if (sockc->tsflags & SOCKCM_FLAG_TS_OPT_ID) { + skb_shinfo(skb)->tskey = sockc->ts_opt_id; + } else { + int key = atomic_add_return(key_offset, &sk->sk_tskey); + + skb_shinfo(skb)->tskey = key - 1; + } + } +} + +void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb) +{ + struct tx_queue *comp = &conn->tx_q; + bool track = false; + + /* Emit SND now, ie. just before sending to driver */ + if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) + __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SND); + + /* COMPLETION tstamp is emitted for tracked skb later in Number of + * Completed Packets event. Available only for flow controlled cases. + * + * TODO: SCO support without flowctl (needs to be done in drivers) + */ + switch (conn->type) { + case ISO_LINK: + case ACL_LINK: + case LE_LINK: + break; + case SCO_LINK: + case ESCO_LINK: + if (!hci_dev_test_flag(conn->hdev, HCI_SCO_FLOWCTL)) + return; + break; + default: + return; + } + + if (skb->sk && (skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP)) + track = true; + + /* If nothing is tracked, just count extra skbs at the queue head */ + if (!track && !comp->tracked) { + comp->extra++; + return; + } + + if (track) { + skb = skb_clone_sk(skb); + if (!skb) + goto count_only; + + comp->tracked++; + } else { + skb = skb_clone(skb, GFP_KERNEL); + if (!skb) + goto count_only; + } + + skb_queue_tail(&comp->queue, skb); + return; + +count_only: + /* Stop tracking skbs, and only count. This will not emit timestamps for + * the packets, but if we get here something is more seriously wrong. + */ + comp->tracked = 0; + comp->extra += skb_queue_len(&comp->queue) + 1; + skb_queue_purge(&comp->queue); +} + +void hci_conn_tx_dequeue(struct hci_conn *conn) +{ + struct tx_queue *comp = &conn->tx_q; + struct sk_buff *skb; + + /* If there are tracked skbs, the counted extra go before dequeuing real + * skbs, to keep ordering. When nothing is tracked, the ordering doesn't + * matter so dequeue real skbs first to get rid of them ASAP. + */ + if (comp->extra && (comp->tracked || skb_queue_empty(&comp->queue))) { + comp->extra--; + return; + } + + skb = skb_dequeue(&comp->queue); + if (!skb) + return; + + if (skb->sk) { + comp->tracked--; + __skb_tstamp_tx(skb, NULL, NULL, skb->sk, + SCM_TSTAMP_COMPLETION); + } + + kfree_skb(skb); +} diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index e7ec12437c8b1..5eb0600bbd03c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -57,6 +57,7 @@ DEFINE_RWLOCK(hci_dev_list_lock); /* HCI callback list */ LIST_HEAD(hci_cb_list); +DEFINE_MUTEX(hci_cb_list_lock); /* HCI ID Numbering */ static DEFINE_IDA(hci_index_ida); @@ -2972,7 +2973,9 @@ int hci_register_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - list_add_tail_rcu(&cb->list, &hci_cb_list); + mutex_lock(&hci_cb_list_lock); + list_add_tail(&cb->list, &hci_cb_list); + mutex_unlock(&hci_cb_list_lock); return 0; } @@ -2982,8 +2985,9 @@ int hci_unregister_cb(struct hci_cb *cb) { BT_DBG("%p name %s", cb, cb->name); - list_del_rcu(&cb->list); - synchronize_rcu(); + mutex_lock(&hci_cb_list_lock); + list_del(&cb->list); + mutex_unlock(&hci_cb_list_lock); return 0; } @@ -3025,6 +3029,13 @@ static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) return 0; } +static int hci_send_conn_frame(struct hci_dev *hdev, struct hci_conn *conn, + struct sk_buff *skb) +{ + hci_conn_tx_queue(conn, skb); + return hci_send_frame(hdev, skb); +} + /* Send HCI command */ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, const void *param) @@ -3548,51 +3559,45 @@ static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) } /* Schedule SCO */ -static void hci_sched_sco(struct hci_dev *hdev) +static void hci_sched_sco(struct hci_dev *hdev, __u8 type) { struct hci_conn *conn; struct sk_buff *skb; - int quote; + int quote, *cnt; + unsigned int pkts = hdev->sco_pkts; - BT_DBG("%s", hdev->name); + bt_dev_dbg(hdev, "type %u", type); - if (!hci_conn_num(hdev, SCO_LINK)) + if (!hci_conn_num(hdev, type) || !pkts) return; - while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { - while (quote-- && (skb = skb_dequeue(&conn->data_q))) { - BT_DBG("skb %p len %d", skb, skb->len); - hci_send_frame(hdev, skb); - - conn->sent++; - if (conn->sent == ~0) - conn->sent = 0; - } - } -} - -static void hci_sched_esco(struct hci_dev *hdev) -{ - struct hci_conn *conn; - struct sk_buff *skb; - int quote; - - BT_DBG("%s", hdev->name); - - if (!hci_conn_num(hdev, ESCO_LINK)) - return; + /* Use sco_pkts if flow control has not been enabled which will limit + * the amount of buffer sent in a row. + */ + if (!hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + cnt = &pkts; + else + cnt = &hdev->sco_cnt; - while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, - "e))) { + while (*cnt && (conn = hci_low_sent(hdev, type, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); - hci_send_frame(hdev, skb); + hci_send_conn_frame(hdev, conn, skb); conn->sent++; if (conn->sent == ~0) conn->sent = 0; + (*cnt)--; } } + + /* Rescheduled if all packets were sent and flow control is not enabled + * as there could be more packets queued that could not be sent and + * since no HCI_EV_NUM_COMP_PKTS event will be generated the reschedule + * needs to be forced. + */ + if (!pkts && !hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) + queue_work(hdev->workqueue, &hdev->tx_work); } static void hci_sched_acl_pkt(struct hci_dev *hdev) @@ -3620,7 +3625,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) hci_conn_enter_active_mode(chan->conn, bt_cb(skb)->force_active); - hci_send_frame(hdev, skb); + hci_send_conn_frame(hdev, chan->conn, skb); hdev->acl_last_tx = jiffies; hdev->acl_cnt--; @@ -3628,8 +3633,8 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) chan->conn->sent++; /* Send pending SCO packets right away */ - hci_sched_sco(hdev); - hci_sched_esco(hdev); + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); } } @@ -3676,7 +3681,7 @@ static void hci_sched_le(struct hci_dev *hdev) skb = skb_dequeue(&chan->data_q); - hci_send_frame(hdev, skb); + hci_send_conn_frame(hdev, chan->conn, skb); hdev->le_last_tx = jiffies; (*cnt)--; @@ -3684,8 +3689,8 @@ static void hci_sched_le(struct hci_dev *hdev) chan->conn->sent++; /* Send pending SCO packets right away */ - hci_sched_sco(hdev); - hci_sched_esco(hdev); + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); } } @@ -3710,7 +3715,7 @@ static void hci_sched_iso(struct hci_dev *hdev) while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); - hci_send_frame(hdev, skb); + hci_send_conn_frame(hdev, conn, skb); conn->sent++; if (conn->sent == ~0) @@ -3730,8 +3735,8 @@ static void hci_tx_work(struct work_struct *work) if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { /* Schedule queues and send stuff to HCI driver */ - hci_sched_sco(hdev); - hci_sched_esco(hdev); + hci_sched_sco(hdev, SCO_LINK); + hci_sched_sco(hdev, ESCO_LINK); hci_sched_iso(hdev); hci_sched_acl(hdev); hci_sched_le(hdev); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 2cc7a93063501..83990c975c1f6 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -151,7 +151,7 @@ static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, struct sk_buff *skb) { - struct hci_ev_status *rp = data; + struct hci_rp_remote_name_req_cancel *rp = data; bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); @@ -930,6 +930,9 @@ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, hdev->sco_pkts = 8; } + if (!read_voice_setting_capable(hdev)) + hdev->sco_pkts = 0; + hdev->acl_cnt = hdev->acl_pkts; hdev->sco_cnt = hdev->sco_pkts; @@ -3391,23 +3394,30 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, hci_update_scan(hdev); } - params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); - if (params) { - switch (params->auto_connect) { - case HCI_AUTO_CONN_LINK_LOSS: - if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) + /* Re-enable passive scanning if disconnected device is marked + * as auto-connectable. + */ + if (conn->type == LE_LINK) { + params = hci_conn_params_lookup(hdev, &conn->dst, + conn->dst_type); + if (params) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_LINK_LOSS: + if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) + break; + fallthrough; + + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + hci_pend_le_list_del_init(params); + hci_pend_le_list_add(params, + &hdev->pend_le_conns); + hci_update_passive_scan(hdev); break; - fallthrough; - case HCI_AUTO_CONN_DIRECT: - case HCI_AUTO_CONN_ALWAYS: - hci_pend_le_list_del_init(params); - hci_pend_le_list_add(params, &hdev->pend_le_conns); - hci_update_passive_scan(hdev); - break; - - default: - break; + default: + break; + } } } @@ -4005,8 +4015,8 @@ static const struct hci_cc { HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), - HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, - hci_cc_remote_name_req_cancel), + HCI_CC(HCI_OP_REMOTE_NAME_REQ_CANCEL, hci_cc_remote_name_req_cancel, + sizeof(struct hci_rp_remote_name_req_cancel)), HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, sizeof(struct hci_rp_role_discovery)), HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, @@ -4405,6 +4415,7 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, struct hci_comp_pkts_info *info = &ev->handles[i]; struct hci_conn *conn; __u16 handle, count; + unsigned int i; handle = __le16_to_cpu(info->handle); count = __le16_to_cpu(info->count); @@ -4415,6 +4426,9 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, conn->sent -= count; + for (i = 0; i < count; ++i) + hci_conn_tx_dequeue(conn); + switch (conn->type) { case ACL_LINK: hdev->acl_cnt += count; @@ -4435,9 +4449,11 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, break; case SCO_LINK: + case ESCO_LINK: hdev->sco_cnt += count; if (hdev->sco_cnt > hdev->sco_pkts) hdev->sco_cnt = hdev->sco_pkts; + break; case ISO_LINK: diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index dd770ef5ec368..609b035e5c904 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -1910,7 +1910,7 @@ int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, hdev->adv_instance_timeout = timeout; queue_delayed_work(hdev->req_workqueue, &hdev->adv_instance_expire, - msecs_to_jiffies(timeout * 1000)); + secs_to_jiffies(timeout)); } /* If we're just re-scheduling the same instance again then do not @@ -3696,6 +3696,9 @@ static int hci_read_local_name_sync(struct hci_dev *hdev) /* Read Voice Setting */ static int hci_read_voice_setting_sync(struct hci_dev *hdev) { + if (!read_voice_setting_capable(hdev)) + return 0; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL, HCI_CMD_TIMEOUT); } @@ -3766,6 +3769,28 @@ static int hci_write_ca_timeout_sync(struct hci_dev *hdev) sizeof(param), ¶m, HCI_CMD_TIMEOUT); } +/* Enable SCO flow control if supported */ +static int hci_write_sync_flowctl_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_sync_flowctl cp; + int err; + + /* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */ + if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) || + !test_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.enable = 0x01; + + err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (!err) + hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL); + + return err; +} + /* BR Controller init stage 2 command sequence */ static const struct hci_init_stage br_init2[] = { /* HCI_OP_READ_BUFFER_SIZE */ @@ -3784,6 +3809,8 @@ static const struct hci_init_stage br_init2[] = { HCI_INIT(hci_clear_event_filter_sync), /* HCI_OP_WRITE_CA_TIMEOUT */ HCI_INIT(hci_write_ca_timeout_sync), + /* HCI_OP_WRITE_SYNC_FLOWCTL */ + HCI_INIT(hci_write_sync_flowctl_sync), {} }; @@ -4129,7 +4156,8 @@ static int hci_read_page_scan_type_sync(struct hci_dev *hdev) * support the Read Page Scan Type command. Check support for * this command in the bit mask of supported commands. */ - if (!(hdev->commands[13] & 0x01)) + if (!(hdev->commands[13] & 0x01) || + test_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 44acddf58a0cd..3501a991f1c64 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -518,7 +518,8 @@ static struct bt_iso_qos *iso_sock_get_qos(struct sock *sk) return &iso_pi(sk)->qos; } -static int iso_send_frame(struct sock *sk, struct sk_buff *skb) +static int iso_send_frame(struct sock *sk, struct sk_buff *skb, + const struct sockcm_cookie *sockc) { struct iso_conn *conn = iso_pi(sk)->conn; struct bt_iso_qos *qos = iso_sock_get_qos(sk); @@ -538,10 +539,12 @@ static int iso_send_frame(struct sock *sk, struct sk_buff *skb) hdr->slen = cpu_to_le16(hci_iso_data_len_pack(len, HCI_ISO_STATUS_VALID)); - if (sk->sk_state == BT_CONNECTED) + if (sk->sk_state == BT_CONNECTED) { + hci_setup_tx_timestamp(skb, 1, sockc); hci_send_iso(conn->hcon, skb); - else + } else { len = -ENOTCONN; + } return len; } @@ -1348,6 +1351,7 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct sk_buff *skb, **frag; + struct sockcm_cookie sockc; size_t mtu; int err; @@ -1360,6 +1364,14 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; + hci_sockcm_init(&sockc, sk); + + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (err) + return err; + } + lock_sock(sk); if (sk->sk_state != BT_CONNECTED) { @@ -1405,7 +1417,7 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); if (sk->sk_state == BT_CONNECTED) - err = iso_send_frame(sk, skb); + err = iso_send_frame(sk, skb, &sockc); else err = -ENOTCONN; @@ -1474,6 +1486,10 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg, BT_DBG("sk %p", sk); + if (unlikely(flags & MSG_ERRQUEUE)) + return sock_recv_errqueue(sk, msg, len, SOL_BLUETOOTH, + BT_SCM_ERROR); + if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { sock_hold(sk); lock_sock(sk); @@ -2187,11 +2203,6 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) return HCI_LM_ACCEPT; } -static bool iso_match(struct hci_conn *hcon) -{ - return hcon->type == ISO_LINK || hcon->type == LE_LINK; -} - static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) { if (hcon->type != ISO_LINK) { @@ -2373,7 +2384,6 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) static struct hci_cb iso_cb = { .name = "ISO", - .match = iso_match, .connect_cfm = iso_connect_cfm, .disconn_cfm = iso_disconn_cfm, }; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index fec11e576f310..c7b66b2ea9f21 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -282,7 +282,7 @@ static void __set_retrans_timer(struct l2cap_chan *chan) if (!delayed_work_pending(&chan->monitor_timer) && chan->retrans_timeout) { l2cap_set_timer(chan, &chan->retrans_timer, - msecs_to_jiffies(chan->retrans_timeout)); + secs_to_jiffies(chan->retrans_timeout)); } } @@ -291,7 +291,7 @@ static void __set_monitor_timer(struct l2cap_chan *chan) __clear_retrans_timer(chan); if (chan->monitor_timeout) { l2cap_set_timer(chan, &chan->monitor_timer, - msecs_to_jiffies(chan->monitor_timeout)); + secs_to_jiffies(chan->monitor_timeout)); } } @@ -632,7 +632,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_hold(conn->hcon); - list_add(&chan->list, &conn->chan_l); + /* Append to the list since the order matters for ECRED */ + list_add_tail(&chan->list, &conn->chan_l); } void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) @@ -2514,7 +2515,33 @@ static void l2cap_le_flowctl_send(struct l2cap_chan *chan) skb_queue_len(&chan->tx_q)); } -int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) +static void l2cap_tx_timestamp(struct sk_buff *skb, + const struct sockcm_cookie *sockc, + size_t len) +{ + struct sock *sk = skb ? skb->sk : NULL; + + if (sk && sk->sk_type == SOCK_STREAM) + hci_setup_tx_timestamp(skb, len, sockc); + else + hci_setup_tx_timestamp(skb, 1, sockc); +} + +static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue, + const struct sockcm_cookie *sockc, + size_t len) +{ + struct sk_buff *skb = skb_peek(queue); + struct sock *sk = skb ? skb->sk : NULL; + + if (sk && sk->sk_type == SOCK_STREAM) + l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len); + else + l2cap_tx_timestamp(skb, sockc, len); +} + +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, + const struct sockcm_cookie *sockc) { struct sk_buff *skb; int err; @@ -2529,6 +2556,8 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) if (IS_ERR(skb)) return PTR_ERR(skb); + l2cap_tx_timestamp(skb, sockc, len); + l2cap_do_send(chan, skb); return len; } @@ -2552,6 +2581,8 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) if (err) return err; + l2cap_tx_timestamp_seg(&seg_queue, sockc, len); + skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); l2cap_le_flowctl_send(chan); @@ -2573,6 +2604,8 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) if (IS_ERR(skb)) return PTR_ERR(skb); + l2cap_tx_timestamp(skb, sockc, len); + l2cap_do_send(chan, skb); err = len; break; @@ -2596,10 +2629,13 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) if (err) break; - if (chan->mode == L2CAP_MODE_ERTM) + if (chan->mode == L2CAP_MODE_ERTM) { + /* TODO: ERTM mode timestamping */ l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); - else + } else { + l2cap_tx_timestamp_seg(&seg_queue, sockc, len); l2cap_streaming_send(chan, &seg_queue); + } err = len; @@ -3771,7 +3807,11 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) struct l2cap_ecred_conn_rsp *rsp_flex = container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr); - if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + /* Check if channel for outgoing connection or if it wasn't deferred + * since in those cases it must be skipped. + */ + if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) || + !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) return; /* Reset ident so only one response is sent */ @@ -7177,11 +7217,6 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, return NULL; } -static bool l2cap_match(struct hci_conn *hcon) -{ - return hcon->type == ACL_LINK || hcon->type == LE_LINK; -} - static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) { struct hci_dev *hdev = hcon->hdev; @@ -7189,6 +7224,9 @@ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) struct l2cap_chan *pchan; u8 dst_type; + if (hcon->type != ACL_LINK && hcon->type != LE_LINK) + return; + BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); if (status) { @@ -7253,6 +7291,9 @@ int l2cap_disconn_ind(struct hci_conn *hcon) static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) { + if (hcon->type != ACL_LINK && hcon->type != LE_LINK) + return; + BT_DBG("hcon %p reason %d", hcon, reason); l2cap_conn_del(hcon, bt_to_errno(reason)); @@ -7560,7 +7601,6 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) static struct hci_cb l2cap_cb = { .name = "L2CAP", - .match = l2cap_match, .connect_cfm = l2cap_connect_cfm, .disconn_cfm = l2cap_disconn_cfm, .security_cfm = l2cap_security_cfm, diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index acd11b268b98a..5aa55fa695943 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -1106,6 +1106,7 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct sockcm_cookie sockc; int err; BT_DBG("sock %p, sk %p", sock, sk); @@ -1120,6 +1121,14 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (sk->sk_state != BT_CONNECTED) return -ENOTCONN; + hci_sockcm_init(&sockc, sk); + + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (err) + return err; + } + lock_sock(sk); err = bt_sock_wait_ready(sk, msg->msg_flags); release_sock(sk); @@ -1127,7 +1136,7 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, return err; l2cap_chan_lock(chan); - err = l2cap_chan_send(chan, msg, len); + err = l2cap_chan_send(chan, msg, len, &sockc); l2cap_chan_unlock(chan); return err; @@ -1168,6 +1177,10 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, struct l2cap_pinfo *pi = l2cap_pi(sk); int err; + if (unlikely(flags & MSG_ERRQUEUE)) + return sock_recv_errqueue(sk, msg, len, SOL_BLUETOOTH, + BT_SCM_ERROR); + lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index f53304cb09dbe..4fd30ba243be5 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1533,7 +1533,7 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data, if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && hdev->discov_timeout > 0) { - int to = msecs_to_jiffies(hdev->discov_timeout * 1000); + int to = secs_to_jiffies(hdev->discov_timeout); queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to); } @@ -1641,7 +1641,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, hdev->discov_timeout = timeout; if (cp->val && hdev->discov_timeout > 0) { - int to = msecs_to_jiffies(hdev->discov_timeout * 1000); + int to = secs_to_jiffies(hdev->discov_timeout); queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to); } @@ -2534,7 +2534,7 @@ static int send_hci_cmd_sync(struct hci_dev *hdev, void *data) skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cp->opcode), le16_to_cpu(cp->params_len), cp->params, cp->event, cp->timeout ? - msecs_to_jiffies(cp->timeout * 1000) : + secs_to_jiffies(cp->timeout) : HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_HCI_CMD_SYNC, @@ -5743,29 +5743,6 @@ static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, return err; } -void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status) -{ - struct mgmt_pending_cmd *cmd; - - bt_dev_dbg(hdev, "status %u", status); - - hci_dev_lock(hdev); - - cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev); - if (!cmd) - cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev); - - if (!cmd) - cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev); - - if (cmd) { - cmd->cmd_complete(cmd, mgmt_status(status)); - mgmt_pending_remove(cmd); - } - - hci_dev_unlock(hdev); -} - static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, uint8_t *mgmt_status) { @@ -6018,23 +5995,6 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, return err; } -void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status) -{ - struct mgmt_pending_cmd *cmd; - - bt_dev_dbg(hdev, "status %u", status); - - hci_dev_lock(hdev); - - cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev); - if (cmd) { - cmd->cmd_complete(cmd, mgmt_status(status)); - mgmt_pending_remove(cmd); - } - - hci_dev_unlock(hdev); -} - static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; @@ -9660,6 +9620,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) + eir_precalc_len(sizeof(conn->dev_class))); + if (!skb) + return; + ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, &conn->dst); ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); @@ -10413,6 +10376,8 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0)); + if (!skb) + return; ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, bdaddr); diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c index 17ab909a7c07f..e5ff65e424b5b 100644 --- a/net/bluetooth/mgmt_util.c +++ b/net/bluetooth/mgmt_util.c @@ -229,23 +229,6 @@ struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, return NULL; } -struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, - u16 opcode, - struct hci_dev *hdev, - const void *data) -{ - struct mgmt_pending_cmd *cmd; - - list_for_each_entry(cmd, &hdev->mgmt_pending, list) { - if (cmd->user_data != data) - continue; - if (cmd->opcode == opcode) - return cmd; - } - - return NULL; -} - void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, void (*cb)(struct mgmt_pending_cmd *cmd, void *data), void *data) diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h index bdf978605d5a8..f2ba994ab1d84 100644 --- a/net/bluetooth/mgmt_util.h +++ b/net/bluetooth/mgmt_util.h @@ -54,10 +54,6 @@ int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, struct hci_dev *hdev); -struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, - u16 opcode, - struct hci_dev *hdev, - const void *data); void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, void (*cb)(struct mgmt_pending_cmd *cmd, void *data), void *data); diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 4c56ca5a216c6..ad5177e3a69b7 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -2134,11 +2134,6 @@ static int rfcomm_run(void *unused) return 0; } -static bool rfcomm_match(struct hci_conn *hcon) -{ - return hcon->type == ACL_LINK; -} - static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) { struct rfcomm_session *s; @@ -2185,7 +2180,6 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) static struct hci_cb rfcomm_cb = { .name = "RFCOMM", - .match = rfcomm_match, .security_cfm = rfcomm_security_cfm }; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index aa7bfe26cb40f..2945d27e75dce 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -107,6 +107,14 @@ static void sco_conn_put(struct sco_conn *conn) kref_put(&conn->ref, sco_conn_free); } +static struct sco_conn *sco_conn_hold(struct sco_conn *conn) +{ + BT_DBG("conn %p refcnt %u", conn, kref_read(&conn->ref)); + + kref_get(&conn->ref); + return conn; +} + static struct sco_conn *sco_conn_hold_unless_zero(struct sco_conn *conn) { if (!conn) @@ -370,7 +378,8 @@ static int sco_connect(struct sock *sk) return err; } -static int sco_send_frame(struct sock *sk, struct sk_buff *skb) +static int sco_send_frame(struct sock *sk, struct sk_buff *skb, + const struct sockcm_cookie *sockc) { struct sco_conn *conn = sco_pi(sk)->conn; int len = skb->len; @@ -381,6 +390,7 @@ static int sco_send_frame(struct sock *sk, struct sk_buff *skb) BT_DBG("sk %p len %d", sk, len); + hci_setup_tx_timestamp(skb, 1, sockc); hci_send_sco(conn->hcon, skb); return len; @@ -776,6 +786,7 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct sk_buff *skb; + struct sockcm_cookie sockc; int err; BT_DBG("sock %p, sk %p", sock, sk); @@ -787,6 +798,14 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; + hci_sockcm_init(&sockc, sk); + + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (err) + return err; + } + skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -794,7 +813,7 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); if (sk->sk_state == BT_CONNECTED) - err = sco_send_frame(sk, skb); + err = sco_send_frame(sk, skb, &sockc); else err = -ENOTCONN; @@ -860,6 +879,10 @@ static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg, struct sock *sk = sock->sk; struct sco_pinfo *pi = sco_pi(sk); + if (unlikely(flags & MSG_ERRQUEUE)) + return sock_recv_errqueue(sk, msg, len, SOL_BLUETOOTH, + BT_SCM_ERROR); + lock_sock(sk); if (sk->sk_state == BT_CONNECT2 && @@ -1353,6 +1376,7 @@ static void sco_conn_ready(struct sco_conn *conn) bacpy(&sco_pi(sk)->src, &conn->hcon->src); bacpy(&sco_pi(sk)->dst, &conn->hcon->dst); + sco_conn_hold(conn); hci_conn_hold(conn->hcon); __sco_chan_add(conn, sk, parent); @@ -1398,27 +1422,30 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) return lm; } -static bool sco_match(struct hci_conn *hcon) -{ - return hcon->type == SCO_LINK || hcon->type == ESCO_LINK; -} - static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) { + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return; + BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status); if (!status) { struct sco_conn *conn; conn = sco_conn_add(hcon); - if (conn) + if (conn) { sco_conn_ready(conn); + sco_conn_put(conn); + } } else sco_conn_del(hcon, bt_to_errno(status)); } static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return; + BT_DBG("hcon %p reason %d", hcon, reason); sco_conn_del(hcon, bt_to_errno(reason)); @@ -1444,7 +1471,6 @@ void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) static struct hci_cb sco_cb = { .name = "SCO", - .match = sco_match, .connect_cfm = sco_connect_cfm, .disconn_cfm = sco_disconn_cfm, }; diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 8b9724fd752a1..47f359f24d1fd 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -55,7 +55,7 @@ /* Keys which are not distributed with Secure Connections */ #define SMP_SC_NO_DIST (SMP_DIST_ENC_KEY | SMP_DIST_LINK_KEY) -#define SMP_TIMEOUT msecs_to_jiffies(30000) +#define SMP_TIMEOUT secs_to_jiffies(30) #define ID_ADDR_TIMEOUT msecs_to_jiffies(200) @@ -608,7 +608,7 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iv, 2, 1 + len); - l2cap_chan_send(chan, &msg, 1 + len); + l2cap_chan_send(chan, &msg, 1 + len, NULL); if (!chan->data) return; diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 0ab4613aa07ad..a818fdc22da9a 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -16,6 +16,8 @@ #include #include +#include + #include "br_private.h" #define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \ @@ -488,7 +490,7 @@ void br_dev_setup(struct net_device *dev) SET_NETDEV_DEVTYPE(dev, &br_type); dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE; dev->lltx = true; - dev->netns_local = true; + dev->netns_immutable = true; dev->features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c index f213ed1083618..6bc0a11f2ed3e 100644 --- a/net/bridge/br_ioctl.c +++ b/net/bridge/br_ioctl.c @@ -394,10 +394,26 @@ static int old_deviceless(struct net *net, void __user *data) return -EOPNOTSUPP; } -int br_ioctl_stub(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg) +int br_ioctl_stub(struct net *net, unsigned int cmd, void __user *uarg) { int ret = -EOPNOTSUPP; + struct ifreq ifr; + + if (cmd == SIOCBRADDIF || cmd == SIOCBRDELIF) { + void __user *data; + char *colon; + + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + + if (get_user_ifreq(&ifr, &data, uarg)) + return -EFAULT; + + ifr.ifr_name[IFNAMSIZ - 1] = 0; + colon = strchr(ifr.ifr_name, ':'); + if (colon) + *colon = 0; + } rtnl_lock(); @@ -430,7 +446,21 @@ int br_ioctl_stub(struct net *net, struct net_bridge *br, unsigned int cmd, break; case SIOCBRADDIF: case SIOCBRDELIF: - ret = add_del_if(br, ifr->ifr_ifindex, cmd == SIOCBRADDIF); + { + struct net_device *dev; + + dev = __dev_get_by_name(net, ifr.ifr_name); + if (!dev || !netif_device_present(dev)) { + ret = -ENODEV; + break; + } + if (!netif_is_bridge_master(dev)) { + ret = -EOPNOTSUPP; + break; + } + + ret = add_del_if(netdev_priv(dev), ifr.ifr_ifindex, cmd == SIOCBRADDIF); + } break; } diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 1a52a0bca086d..7e1ad229e1330 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -1040,7 +1040,7 @@ static int br_mdb_add_group(const struct br_mdb_config *cfg, /* host join */ if (!port) { - if (mp->host_joined) { + if (mp->host_joined && !(cfg->nlflags & NLM_F_REPLACE)) { NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host"); return -EEXIST; } diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 3e0f47203f2a1..6e337937d0d7b 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1553,11 +1553,13 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[], return 0; } -static int br_dev_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int br_dev_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct net_bridge *br = netdev_priv(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; int err; err = register_netdevice(dev); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 1054b8a88edc4..d5b3c5936a79e 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -949,8 +949,7 @@ br_port_get_check_rtnl(const struct net_device *dev) /* br_ioctl.c */ int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd); -int br_ioctl_stub(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg); +int br_ioctl_stub(struct net *net, unsigned int cmd, void __user *uarg); /* br_multicast.c */ #ifdef CONFIG_BRIDGE_IGMP_SNOOPING diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 94ad09e36df2a..fa6a3c2634a8c 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -438,10 +438,11 @@ static void caif_netlink_parms(struct nlattr *data[], } } -static int ipcaif_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int ipcaif_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct nlattr **data = params->data; int ret; struct chnl_net *caifdev; ASSERT_RTNL(); diff --git a/net/can/af_can.c b/net/can/af_can.c index 01f3fbb3b67dc..4c059e41c8316 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -172,6 +172,8 @@ static int can_create(struct net *net, struct socket *sock, int protocol, sock_orphan(sk); sock_put(sk); sock->sk = NULL; + } else { + sock_prot_inuse_add(net, sk->sk_prot, 1); } errout: @@ -287,8 +289,8 @@ int can_send(struct sk_buff *skb, int loop) netif_rx(newskb); /* update statistics */ - pkg_stats->tx_frames++; - pkg_stats->tx_frames_delta++; + atomic_long_inc(&pkg_stats->tx_frames); + atomic_long_inc(&pkg_stats->tx_frames_delta); return 0; @@ -647,8 +649,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) int matches; /* update statistics */ - pkg_stats->rx_frames++; - pkg_stats->rx_frames_delta++; + atomic_long_inc(&pkg_stats->rx_frames); + atomic_long_inc(&pkg_stats->rx_frames_delta); /* create non-zero unique skb identifier together with *skb */ while (!(can_skb_prv(skb)->skbcnt)) @@ -669,8 +671,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) consume_skb(skb); if (matches > 0) { - pkg_stats->matches++; - pkg_stats->matches_delta++; + atomic_long_inc(&pkg_stats->matches); + atomic_long_inc(&pkg_stats->matches_delta); } } diff --git a/net/can/af_can.h b/net/can/af_can.h index 7c2d9161e2245..22f3352c77fec 100644 --- a/net/can/af_can.h +++ b/net/can/af_can.h @@ -66,9 +66,9 @@ struct receiver { struct can_pkg_stats { unsigned long jiffies_init; - unsigned long rx_frames; - unsigned long tx_frames; - unsigned long matches; + atomic_long_t rx_frames; + atomic_long_t tx_frames; + atomic_long_t matches; unsigned long total_rx_rate; unsigned long total_tx_rate; @@ -82,9 +82,9 @@ struct can_pkg_stats { unsigned long max_tx_rate; unsigned long max_rx_match_ratio; - unsigned long rx_frames_delta; - unsigned long tx_frames_delta; - unsigned long matches_delta; + atomic_long_t rx_frames_delta; + atomic_long_t tx_frames_delta; + atomic_long_t matches_delta; }; /* persistent statistics */ diff --git a/net/can/bcm.c b/net/can/bcm.c index 217049fa496e9..6dc041e054ba1 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1625,6 +1625,7 @@ static int bcm_release(struct socket *sock) sock->sk = NULL; release_sock(sk); + sock_prot_inuse_add(net, sk->sk_prot, -1); sock_put(sk); return 0; diff --git a/net/can/isotp.c b/net/can/isotp.c index 16046931542a5..789583c62f980 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -1239,6 +1239,7 @@ static int isotp_release(struct socket *sock) sock->sk = NULL; release_sock(sk); + sock_prot_inuse_add(net, sk->sk_prot, -1); sock_put(sk); return 0; diff --git a/net/can/proc.c b/net/can/proc.c index bbce97825f13f..25fdf060e30d0 100644 --- a/net/can/proc.c +++ b/net/can/proc.c @@ -118,6 +118,13 @@ void can_stat_update(struct timer_list *t) struct can_pkg_stats *pkg_stats = net->can.pkg_stats; unsigned long j = jiffies; /* snapshot */ + long rx_frames = atomic_long_read(&pkg_stats->rx_frames); + long tx_frames = atomic_long_read(&pkg_stats->tx_frames); + long matches = atomic_long_read(&pkg_stats->matches); + long rx_frames_delta = atomic_long_read(&pkg_stats->rx_frames_delta); + long tx_frames_delta = atomic_long_read(&pkg_stats->tx_frames_delta); + long matches_delta = atomic_long_read(&pkg_stats->matches_delta); + /* restart counting in timer context on user request */ if (user_reset) can_init_stats(net); @@ -127,35 +134,33 @@ void can_stat_update(struct timer_list *t) can_init_stats(net); /* prevent overflow in calc_rate() */ - if (pkg_stats->rx_frames > (ULONG_MAX / HZ)) + if (rx_frames > (LONG_MAX / HZ)) can_init_stats(net); /* prevent overflow in calc_rate() */ - if (pkg_stats->tx_frames > (ULONG_MAX / HZ)) + if (tx_frames > (LONG_MAX / HZ)) can_init_stats(net); /* matches overflow - very improbable */ - if (pkg_stats->matches > (ULONG_MAX / 100)) + if (matches > (LONG_MAX / 100)) can_init_stats(net); /* calc total values */ - if (pkg_stats->rx_frames) - pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) / - pkg_stats->rx_frames; + if (rx_frames) + pkg_stats->total_rx_match_ratio = (matches * 100) / rx_frames; pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j, - pkg_stats->tx_frames); + tx_frames); pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j, - pkg_stats->rx_frames); + rx_frames); /* calc current values */ - if (pkg_stats->rx_frames_delta) + if (rx_frames_delta) pkg_stats->current_rx_match_ratio = - (pkg_stats->matches_delta * 100) / - pkg_stats->rx_frames_delta; + (matches_delta * 100) / rx_frames_delta; - pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta); - pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta); + pkg_stats->current_tx_rate = calc_rate(0, HZ, tx_frames_delta); + pkg_stats->current_rx_rate = calc_rate(0, HZ, rx_frames_delta); /* check / update maximum values */ if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate) @@ -168,9 +173,9 @@ void can_stat_update(struct timer_list *t) pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio; /* clear values for 'current rate' calculation */ - pkg_stats->tx_frames_delta = 0; - pkg_stats->rx_frames_delta = 0; - pkg_stats->matches_delta = 0; + atomic_long_set(&pkg_stats->tx_frames_delta, 0); + atomic_long_set(&pkg_stats->rx_frames_delta, 0); + atomic_long_set(&pkg_stats->matches_delta, 0); /* restart timer (one second) */ mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ)); @@ -214,9 +219,12 @@ static int can_stats_proc_show(struct seq_file *m, void *v) struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats; seq_putc(m, '\n'); - seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames); - seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames); - seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches); + seq_printf(m, " %8ld transmitted frames (TXF)\n", + atomic_long_read(&pkg_stats->tx_frames)); + seq_printf(m, " %8ld received frames (RXF)\n", + atomic_long_read(&pkg_stats->rx_frames)); + seq_printf(m, " %8ld matched frames (RXMF)\n", + atomic_long_read(&pkg_stats->matches)); seq_putc(m, '\n'); diff --git a/net/can/raw.c b/net/can/raw.c index 46e8ed9d64da3..020f21430b1d8 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -397,11 +397,13 @@ static int raw_release(struct socket *sock) { struct sock *sk = sock->sk; struct raw_sock *ro; + struct net *net; if (!sk) return 0; ro = raw_sk(sk); + net = sock_net(sk); spin_lock(&raw_notifier_lock); while (raw_busy_notifier == ro) { @@ -421,7 +423,7 @@ static int raw_release(struct socket *sock) raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); netdev_put(ro->dev, &ro->dev_tracker); } else { - raw_disable_allfilters(sock_net(sk), NULL, sk); + raw_disable_allfilters(net, NULL, sk); } } @@ -440,6 +442,7 @@ static int raw_release(struct socket *sock) release_sock(sk); rtnl_unlock(); + sock_prot_inuse_add(net, sk->sk_prot, -1); sock_put(sk); return 0; @@ -963,7 +966,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) skb->dev = dev; skb->priority = sockc.priority; - skb->mark = READ_ONCE(sk->sk_mark); + skb->mark = sockc.mark; skb->tstamp = sockc.transmit_time; skb_setup_tx_timestamp(skb, &sockc); diff --git a/net/core/Makefile b/net/core/Makefile index d9326600e289b..a10c3bd96798d 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -9,7 +9,7 @@ obj-y := sock.o request_sock.o skbuff.o datagram.o stream.o scm.o \ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o -obj-y += dev.o dev_addr_lists.o dst.o netevent.o \ +obj-y += dev.o dev_api.o dev_addr_lists.o dst.o netevent.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \ fib_notifier.o xdp.o flow_offload.o gro.o \ diff --git a/net/core/dev.c b/net/core/dev.c index 1b252e9459fdb..72b0c5ff96898 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -78,6 +78,7 @@ #include #include #include +#include #include #include #include @@ -156,9 +157,11 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -570,10 +573,18 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev) static inline struct list_head *ptype_head(const struct packet_type *pt) { - if (pt->type == htons(ETH_P_ALL)) - return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all; - else - return pt->dev ? &pt->dev->ptype_specific : + if (pt->type == htons(ETH_P_ALL)) { + if (!pt->af_packet_net && !pt->dev) + return NULL; + + return pt->dev ? &pt->dev->ptype_all : + &pt->af_packet_net->ptype_all; + } + + if (pt->dev) + return &pt->dev->ptype_specific; + + return pt->af_packet_net ? &pt->af_packet_net->ptype_specific : &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; } @@ -594,6 +605,9 @@ void dev_add_pack(struct packet_type *pt) { struct list_head *head = ptype_head(pt); + if (WARN_ON_ONCE(!head)) + return; + spin_lock(&ptype_lock); list_add_rcu(&pt->list, head); spin_unlock(&ptype_lock); @@ -618,6 +632,9 @@ void __dev_remove_pack(struct packet_type *pt) struct list_head *head = ptype_head(pt); struct packet_type *pt1; + if (!head) + return; + spin_lock(&ptype_lock); list_for_each_entry(pt1, head, list) { @@ -1007,7 +1024,7 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id) WARN_ON_ONCE(!rcu_read_lock_held()); - if (napi_id < MIN_NAPI_ID) + if (!napi_id_valid(napi_id)) return NULL; napi = napi_by_id(napi_id); @@ -1035,6 +1052,18 @@ struct net_device *__netdev_put_lock(struct net_device *dev) return dev; } +static struct net_device *__netdev_put_lock_ops_compat(struct net_device *dev) +{ + netdev_lock_ops_compat(dev); + if (dev->reg_state > NETREG_REGISTERED) { + netdev_unlock_ops_compat(dev); + dev_put(dev); + return NULL; + } + dev_put(dev); + return dev; +} + /** * netdev_get_by_index_lock() - find a device by its ifindex * @net: the applicable net namespace @@ -1057,6 +1086,18 @@ struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex) return __netdev_put_lock(dev); } +struct net_device * +netdev_get_by_index_lock_ops_compat(struct net *net, int ifindex) +{ + struct net_device *dev; + + dev = dev_get_by_index(net, ifindex); + if (!dev) + return NULL; + + return __netdev_put_lock_ops_compat(dev); +} + struct net_device * netdev_xa_find_lock(struct net *net, struct net_device *dev, unsigned long *index) @@ -1082,6 +1123,31 @@ netdev_xa_find_lock(struct net *net, struct net_device *dev, } while (true); } +struct net_device * +netdev_xa_find_lock_ops_compat(struct net *net, struct net_device *dev, + unsigned long *index) +{ + if (dev) + netdev_unlock_ops_compat(dev); + + do { + rcu_read_lock(); + dev = xa_find(&net->dev_by_index, index, ULONG_MAX, XA_PRESENT); + if (!dev) { + rcu_read_unlock(); + return NULL; + } + dev_hold(dev); + rcu_read_unlock(); + + dev = __netdev_put_lock_ops_compat(dev); + if (dev) + return dev; + + (*index)++; + } while (true); +} + static DEFINE_SEQLOCK(netdev_rename_lock); void netdev_copy_name(struct net_device *dev, char *name) @@ -1370,15 +1436,7 @@ static int dev_get_valid_name(struct net *net, struct net_device *dev, return ret < 0 ? ret : 0; } -/** - * dev_change_name - change name of a device - * @dev: device - * @newname: name (or format string) must be at least IFNAMSIZ - * - * Change name of a device, can pass format strings "eth%d". - * for wildcarding. - */ -int dev_change_name(struct net_device *dev, const char *newname) +int netif_change_name(struct net_device *dev, const char *newname) { struct net *net = dev_net(dev); unsigned char old_assign_type; @@ -1448,15 +1506,7 @@ int dev_change_name(struct net_device *dev, const char *newname) return err; } -/** - * dev_set_alias - change ifalias of a device - * @dev: device - * @alias: name up to IFALIASZ - * @len: limit of bytes to copy from info - * - * Set ifalias for a device, - */ -int dev_set_alias(struct net_device *dev, const char *alias, size_t len) +int netif_set_alias(struct net_device *dev, const char *alias, size_t len) { struct dev_ifalias *new_alias = NULL; @@ -1482,7 +1532,6 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) return len; } -EXPORT_SYMBOL(dev_set_alias); /** * dev_get_alias - get ifalias of a device @@ -1628,6 +1677,8 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) set_bit(__LINK_STATE_START, &dev->state); + netdev_ops_assert_locked(dev); + if (ops->ndo_validate_addr) ret = ops->ndo_validate_addr(dev); @@ -1648,20 +1699,7 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) return ret; } -/** - * dev_open - prepare an interface for use. - * @dev: device to open - * @extack: netlink extended ack - * - * Takes a device from down to up state. The device's private open - * function is invoked and then the multicast lists are loaded. Finally - * the device is moved into the up state and a %NETDEV_UP message is - * sent to the netdev notifier chain. - * - * Calling this function on an active interface is a nop. On a failure - * a negative errno code is returned. - */ -int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) +int netif_open(struct net_device *dev, struct netlink_ext_ack *extack) { int ret; @@ -1677,7 +1715,6 @@ int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) return ret; } -EXPORT_SYMBOL(dev_open); static void __dev_close_many(struct list_head *head) { @@ -1715,6 +1752,9 @@ static void __dev_close_many(struct list_head *head) * We allow it to be called even after a DETACH hot-plug * event. */ + + netdev_ops_assert_locked(dev); + if (ops->ndo_stop) ops->ndo_stop(dev); @@ -1752,16 +1792,7 @@ void dev_close_many(struct list_head *head, bool unlink) } EXPORT_SYMBOL(dev_close_many); -/** - * dev_close - shutdown an interface. - * @dev: device to shutdown - * - * This function moves an active device into down state. A - * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device - * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier - * chain. - */ -void dev_close(struct net_device *dev) +void netif_close(struct net_device *dev) { if (dev->flags & IFF_UP) { LIST_HEAD(single); @@ -1771,18 +1802,9 @@ void dev_close(struct net_device *dev) list_del(&single); } } -EXPORT_SYMBOL(dev_close); +EXPORT_SYMBOL(netif_close); - -/** - * dev_disable_lro - disable Large Receive Offload on a device - * @dev: device - * - * Disable Large Receive Offload (LRO) on a net device. Must be - * called under RTNL. This is needed if received packets may be - * forwarded to another interface. - */ -void dev_disable_lro(struct net_device *dev) +void netif_disable_lro(struct net_device *dev) { struct net_device *lower_dev; struct list_head *iter; @@ -1793,10 +1815,12 @@ void dev_disable_lro(struct net_device *dev) if (unlikely(dev->features & NETIF_F_LRO)) netdev_WARN(dev, "failed to disable LRO!\n"); - netdev_for_each_lower_dev(dev, lower_dev, iter) - dev_disable_lro(lower_dev); + netdev_for_each_lower_dev(dev, lower_dev, iter) { + netdev_lock_ops(lower_dev); + netif_disable_lro(lower_dev); + netdev_unlock_ops(lower_dev); + } } -EXPORT_SYMBOL(dev_disable_lro); /** * dev_disable_gro_hw - disable HW Generic Receive Offload on a device @@ -1874,7 +1898,9 @@ static void call_netdevice_unregister_notifiers(struct notifier_block *nb, dev); call_netdevice_notifier(nb, NETDEV_DOWN, dev); } + netdev_lock_ops(dev); call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); + netdev_unlock_ops(dev); } static int call_netdevice_register_net_notifiers(struct notifier_block *nb, @@ -2141,21 +2167,15 @@ int register_netdevice_notifier_dev_net(struct net_device *dev, struct notifier_block *nb, struct netdev_net_notifier *nn) { - struct net *net = dev_net(dev); int err; - /* rtnl_net_lock() assumes dev is not yet published by - * register_netdevice(). - */ - DEBUG_NET_WARN_ON_ONCE(!list_empty(&dev->dev_list)); - - rtnl_net_lock(net); - err = __register_netdevice_notifier_net(net, nb, false); + rtnl_net_dev_lock(dev); + err = __register_netdevice_notifier_net(dev_net(dev), nb, false); if (!err) { nn->nb = nb; list_add(&nn->list, &dev->net_notifier_list); } - rtnl_net_unlock(net); + rtnl_net_dev_unlock(dev); return err; } @@ -2487,16 +2507,21 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) } /** - * dev_nit_active - return true if any network interface taps are in use + * dev_nit_active_rcu - return true if any network interface taps are in use + * + * The caller must hold the RCU lock * * @dev: network device to check for the presence of taps */ -bool dev_nit_active(struct net_device *dev) +bool dev_nit_active_rcu(const struct net_device *dev) { - return !list_empty(&net_hotdata.ptype_all) || + /* Callers may hold either RCU or RCU BH lock */ + WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + + return !list_empty(&dev_net(dev)->ptype_all) || !list_empty(&dev->ptype_all); } -EXPORT_SYMBOL_GPL(dev_nit_active); +EXPORT_SYMBOL_GPL(dev_nit_active_rcu); /* * Support routine. Sends outgoing frames to any network @@ -2505,11 +2530,12 @@ EXPORT_SYMBOL_GPL(dev_nit_active); void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) { - struct list_head *ptype_list = &net_hotdata.ptype_all; struct packet_type *ptype, *pt_prev = NULL; + struct list_head *ptype_list; struct sk_buff *skb2 = NULL; rcu_read_lock(); + ptype_list = &dev_net_rcu(dev)->ptype_all; again: list_for_each_entry_rcu(ptype, ptype_list, list) { if (READ_ONCE(ptype->ignore_outgoing)) @@ -2553,7 +2579,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) pt_prev = ptype; } - if (ptype_list == &net_hotdata.ptype_all) { + if (ptype_list != &dev->ptype_all) { ptype_list = &dev->ptype_all; goto again; } @@ -3156,6 +3182,7 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) if (dev->reg_state == NETREG_REGISTERED || dev->reg_state == NETREG_UNREGISTERING) { ASSERT_RTNL(); + netdev_ops_assert_locked(dev); rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, txq); @@ -3186,7 +3213,6 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) } EXPORT_SYMBOL(netif_set_real_num_tx_queues); -#ifdef CONFIG_SYSFS /** * netif_set_real_num_rx_queues - set actual number of RX queues used * @dev: Network device @@ -3206,6 +3232,7 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) if (dev->reg_state == NETREG_REGISTERED) { ASSERT_RTNL(); + netdev_ops_assert_locked(dev); rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, rxq); @@ -3217,7 +3244,6 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) return 0; } EXPORT_SYMBOL(netif_set_real_num_rx_queues); -#endif /** * netif_set_real_num_queues - set actual number of RX and TX queues used @@ -3798,7 +3824,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev, unsigned int len; int rc; - if (dev_nit_active(dev)) + if (dev_nit_active_rcu(dev)) dev_queue_xmit_nit(skb, dev); len = skb->len; @@ -3878,6 +3904,9 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device { netdev_features_t features; + if (!skb_frags_readable(skb)) + goto out_kfree_skb; + features = netif_skb_features(skb); skb = validate_xmit_vlan(skb, features); if (unlikely(!skb)) @@ -4571,7 +4600,8 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev) skb_reset_mac_header(skb); skb_assert_len(skb); - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP)) + if (unlikely(skb_shinfo(skb)->tx_flags & + (SKBTX_SCHED_TSTAMP | SKBTX_BPF))) __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED); /* Disable soft irqs for various locks below. Also @@ -4763,7 +4793,7 @@ static inline void ____napi_schedule(struct softnet_data *sd, * we have to raise NET_RX_SOFTIRQ. */ if (!sd->in_net_rx_action) - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + raise_softirq_irqoff(NET_RX_SOFTIRQ); } #ifdef CONFIG_RPS @@ -4773,6 +4803,11 @@ EXPORT_SYMBOL(rps_needed); struct static_key_false rfs_needed __read_mostly; EXPORT_SYMBOL(rfs_needed); +static u32 rfs_slot(u32 hash, const struct rps_dev_flow_table *flow_table) +{ + return hash_32(hash, flow_table->log); +} + static struct rps_dev_flow * set_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow *rflow, u16 next_cpu) @@ -4799,7 +4834,7 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb, flow_table = rcu_dereference(rxqueue->rps_flow_table); if (!flow_table) goto out; - flow_id = skb_get_hash(skb) & flow_table->mask; + flow_id = rfs_slot(skb_get_hash(skb), flow_table); rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb, rxq_index, flow_id); if (rc < 0) @@ -4878,7 +4913,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, /* OK, now we know there is a match, * we can look at the local (per receive queue) flow table */ - rflow = &flow_table->flows[hash & flow_table->mask]; + rflow = &flow_table->flows[rfs_slot(hash, flow_table)]; tcpu = rflow->cpu; /* @@ -4945,13 +4980,13 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, rcu_read_lock(); flow_table = rcu_dereference(rxqueue->rps_flow_table); - if (flow_table && flow_id <= flow_table->mask) { + if (flow_table && flow_id < (1UL << flow_table->log)) { rflow = &flow_table->flows[flow_id]; cpu = READ_ONCE(rflow->cpu); if (READ_ONCE(rflow->filter) == filter_id && cpu < nr_cpu_ids && ((int)(READ_ONCE(per_cpu(softnet_data, cpu).input_queue_head) - READ_ONCE(rflow->last_qtail)) < - (int)(10 * flow_table->mask))) + (int)(10 << flow_table->log))) expire = false; } rcu_read_unlock(); @@ -5738,7 +5773,8 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, if (pfmemalloc) goto skip_taps; - list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) { + list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all, + list) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; @@ -5850,6 +5886,14 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, &ptype_base[ntohs(type) & PTYPE_HASH_MASK]); + + /* orig_dev and skb->dev could belong to different netns; + * Even in such case we need to traverse only the list + * coming from skb->dev, as the ptype owner (packet socket) + * will use dev_net(skb->dev) to do namespace filtering. + */ + deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, + &dev_net_rcu(skb->dev)->ptype_specific); } deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, @@ -6060,7 +6104,7 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) static_branch_dec(&generic_xdp_needed_key); } else if (new && !old) { static_branch_inc(&generic_xdp_needed_key); - dev_disable_lro(dev); + netif_disable_lro(dev); dev_disable_gro_hw(dev); } break; @@ -6190,16 +6234,18 @@ EXPORT_SYMBOL(netif_receive_skb_list); static void flush_backlog(struct work_struct *work) { struct sk_buff *skb, *tmp; + struct sk_buff_head list; struct softnet_data *sd; + __skb_queue_head_init(&list); local_bh_disable(); sd = this_cpu_ptr(&softnet_data); backlog_lock_irq_disable(sd); skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { - if (skb->dev->reg_state == NETREG_UNREGISTERING) { + if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); - dev_kfree_skb_irq(skb); + __skb_queue_tail(&list, skb); rps_input_queue_head_incr(sd); } } @@ -6207,14 +6253,16 @@ static void flush_backlog(struct work_struct *work) local_lock_nested_bh(&softnet_data.process_queue_bh_lock); skb_queue_walk_safe(&sd->process_queue, skb, tmp) { - if (skb->dev->reg_state == NETREG_UNREGISTERING) { + if (READ_ONCE(skb->dev->reg_state) == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->process_queue); - kfree_skb(skb); + __skb_queue_tail(&list, skb); rps_input_queue_head_incr(sd); } } local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); local_bh_enable(); + + __skb_queue_purge_reason(&list, SKB_DROP_REASON_DEV_READY); } static bool flush_required(int cpu) @@ -6474,11 +6522,12 @@ bool napi_complete_done(struct napi_struct *n, int work_done) * the guarantee we will be called later. */ if (unlikely(n->state & (NAPIF_STATE_NPSVC | - NAPIF_STATE_IN_BUSY_POLL))) + NAPIF_STATE_IN_BUSY_POLL | + NAPIF_STATE_SCHED_THREADED_BUSY_POLL))) return false; if (work_done) { - if (n->gro_bitmask) + if (n->gro.bitmask) timeout = napi_get_gro_flush_timeout(n); n->defer_hard_irqs_count = napi_get_defer_hard_irqs(n); } @@ -6488,15 +6537,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done) if (timeout) ret = false; } - if (n->gro_bitmask) { - /* When the NAPI instance uses a timeout and keeps postponing - * it, we need to bound somehow the time packets are kept in - * the GRO layer - */ - napi_gro_flush(n, !!timeout); - } - gro_normal_list(n); + /* + * When the NAPI instance uses a timeout and keeps postponing + * it, we need to bound somehow the time packets are kept in + * the GRO layer. + */ + gro_flush(&n->gro, !!timeout); + gro_normal_list(&n->gro); if (unlikely(!list_empty(&n->poll_list))) { /* If n->poll_list is not empty, we need to mask irqs */ @@ -6555,24 +6603,25 @@ static void skb_defer_free_flush(struct softnet_data *sd) } } +static void __napi_gro_flush_helper(struct napi_struct *napi) +{ + /* Flush too old packets. If HZ < 1000, flush all packets */ + gro_flush(&napi->gro, HZ >= 1000); + gro_normal_list(&napi->gro); +} + #if defined(CONFIG_NET_RX_BUSY_POLL) static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule) { if (!skip_schedule) { - gro_normal_list(napi); + gro_normal_list(&napi->gro); __napi_schedule(napi); return; } - if (napi->gro_bitmask) { - /* flush too old packets - * If HZ < 1000, flush all packets. - */ - napi_gro_flush(napi, HZ >= 1000); - } + __napi_gro_flush_helper(napi); - gro_normal_list(napi); clear_bit(NAPI_STATE_SCHED, &napi->state); } @@ -6679,7 +6728,7 @@ static void __napi_busy_loop(unsigned int napi_id, } work = napi_poll(napi, budget); trace_napi_poll(napi, work, budget); - gro_normal_list(napi); + gro_normal_list(&napi->gro); count: if (work > 0) __NET_ADD_STATS(dev_net(napi->dev), @@ -6779,6 +6828,8 @@ void napi_resume_irqs(unsigned int napi_id) static void __napi_hash_add_with_id(struct napi_struct *napi, unsigned int napi_id) { + napi->gro.cached_napi_id = napi_id; + WRITE_ONCE(napi->napi_id, napi_id); hlist_add_head_rcu(&napi->napi_hash_node, &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]); @@ -6806,7 +6857,7 @@ static void napi_hash_add(struct napi_struct *napi) /* 0..NR_CPUS range is reserved for sender_cpu use */ do { - if (unlikely(++napi_gen_id < MIN_NAPI_ID)) + if (unlikely(!napi_id_valid(++napi_gen_id))) napi_gen_id = MIN_NAPI_ID; } while (napi_by_id(napi_gen_id)); @@ -6847,20 +6898,48 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) return HRTIMER_NORESTART; } -static void init_gro_hash(struct napi_struct *napi) +static void napi_set_threaded_state(struct napi_struct *napi, + enum netdev_napi_threaded threaded) { - int i; + unsigned long val; - for (i = 0; i < GRO_HASH_BUCKETS; i++) { - INIT_LIST_HEAD(&napi->gro_hash[i].list); - napi->gro_hash[i].count = 0; + val = 0; + if (threaded == NETDEV_NAPI_THREADED_BUSY_POLL_ENABLE) + val |= NAPIF_STATE_THREADED_BUSY_POLL; + if (threaded) + val |= NAPIF_STATE_THREADED; + set_mask_bits(&napi->state, NAPIF_STATE_THREADED_BUSY_POLL_MASK, val); +} + +int napi_set_threaded(struct napi_struct *napi, + enum netdev_napi_threaded threaded) +{ + if (napi->dev->threaded) + return -EINVAL; + + if (threaded) { + if (!napi->thread) { + int err = napi_kthread_create(napi); + + if (err) + return err; + } } - napi->gro_bitmask = 0; + + if (napi->config) + napi->config->threaded = threaded; + + /* Make sure kthread is created before THREADED bit is set. */ + smp_mb__before_atomic(); + napi_set_threaded_state(napi, threaded); + + return 0; } -int dev_set_threaded(struct net_device *dev, bool threaded) +int dev_set_threaded(struct net_device *dev, enum netdev_napi_threaded threaded) { struct napi_struct *napi; + unsigned long val; int err = 0; netdev_assert_locked_or_invisible(dev); @@ -6868,12 +6947,22 @@ int dev_set_threaded(struct net_device *dev, bool threaded) if (dev->threaded == threaded) return 0; + val = 0; if (threaded) { + /* Check if threaded is set at napi level already */ + list_for_each_entry(napi, &dev->napi_list, dev_list) + if (test_bit(NAPI_STATE_THREADED, &napi->state)) + return -EINVAL; + + val |= NAPIF_STATE_THREADED; + if (threaded == NETDEV_NAPI_THREADED_BUSY_POLL_ENABLE) + val |= NAPIF_STATE_THREADED_BUSY_POLL; + list_for_each_entry(napi, &dev->napi_list, dev_list) { if (!napi->thread) { err = napi_kthread_create(napi); if (err) { - threaded = false; + threaded = NETDEV_NAPI_THREADED_DISABLE; break; } } @@ -6892,9 +6981,13 @@ int dev_set_threaded(struct net_device *dev, bool threaded) * polled. In this case, the switch between threaded mode and * softirq mode will happen in the next round of napi_schedule(). * This should not cause hiccups/stalls to the live traffic. + * + * Switch to busy_poll threaded napi will occur after the threaded + * napi is scheduled. */ list_for_each_entry(napi, &dev->napi_list, dev_list) - assign_bit(NAPI_STATE_THREADED, &napi->state, threaded); + set_mask_bits(&napi->state, + NAPIF_STATE_THREADED_BUSY_POLL_MASK, val); return err; } @@ -6919,8 +7012,7 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, if (WARN_ON_ONCE(napi && !napi->dev)) return; - if (dev->reg_state >= NETREG_REGISTERED) - ASSERT_RTNL(); + netdev_ops_assert_locked_or_invisible(dev); switch (type) { case NETDEV_QUEUE_TYPE_RX: @@ -6937,11 +7029,175 @@ void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index, } EXPORT_SYMBOL(netif_queue_set_napi); +static void +netif_napi_irq_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct napi_struct *napi = + container_of(notify, struct napi_struct, notify); +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; + int err; +#endif + + if (napi->config && napi->dev->irq_affinity_auto) + cpumask_copy(&napi->config->affinity_mask, mask); + +#ifdef CONFIG_RFS_ACCEL + if (napi->dev->rx_cpu_rmap_auto) { + err = cpu_rmap_update(rmap, napi->napi_rmap_idx, mask); + if (err) + netdev_warn(napi->dev, "RMAP update failed (%d)\n", + err); + } +#endif +} + +#ifdef CONFIG_RFS_ACCEL +static void netif_napi_affinity_release(struct kref *ref) +{ + struct napi_struct *napi = + container_of(ref, struct napi_struct, notify.kref); + struct cpu_rmap *rmap = napi->dev->rx_cpu_rmap; + + netdev_assert_locked(napi->dev); + WARN_ON(test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, + &napi->state)); + + if (!napi->dev->rx_cpu_rmap_auto) + return; + rmap->obj[napi->napi_rmap_idx] = NULL; + napi->napi_rmap_idx = -1; + cpu_rmap_put(rmap); +} + +int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs) +{ + if (dev->rx_cpu_rmap_auto) + return 0; + + dev->rx_cpu_rmap = alloc_irq_cpu_rmap(num_irqs); + if (!dev->rx_cpu_rmap) + return -ENOMEM; + + dev->rx_cpu_rmap_auto = true; + return 0; +} +EXPORT_SYMBOL(netif_enable_cpu_rmap); + +static void netif_del_cpu_rmap(struct net_device *dev) +{ + struct cpu_rmap *rmap = dev->rx_cpu_rmap; + + if (!dev->rx_cpu_rmap_auto) + return; + + /* Free the rmap */ + cpu_rmap_put(rmap); + dev->rx_cpu_rmap = NULL; + dev->rx_cpu_rmap_auto = false; +} + +#else +static void netif_napi_affinity_release(struct kref *ref) +{ +} + +int netif_enable_cpu_rmap(struct net_device *dev, unsigned int num_irqs) +{ + return 0; +} +EXPORT_SYMBOL(netif_enable_cpu_rmap); + +static void netif_del_cpu_rmap(struct net_device *dev) +{ +} +#endif + +void netif_set_affinity_auto(struct net_device *dev) +{ + unsigned int i, maxqs, numa; + + maxqs = max(dev->num_tx_queues, dev->num_rx_queues); + numa = dev_to_node(&dev->dev); + + for (i = 0; i < maxqs; i++) + cpumask_set_cpu(cpumask_local_spread(i, numa), + &dev->napi_config[i].affinity_mask); + + dev->irq_affinity_auto = true; +} +EXPORT_SYMBOL(netif_set_affinity_auto); + +void netif_napi_set_irq_locked(struct napi_struct *napi, int irq) +{ + int rc; + + netdev_assert_locked_or_invisible(napi->dev); + + if (napi->irq == irq) + return; + + /* Remove existing resources */ + if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) + irq_set_affinity_notifier(napi->irq, NULL); + + napi->irq = irq; + if (irq < 0 || + (!napi->dev->rx_cpu_rmap_auto && !napi->dev->irq_affinity_auto)) + return; + + /* Abort for buggy drivers */ + if (napi->dev->irq_affinity_auto && WARN_ON_ONCE(!napi->config)) + return; + +#ifdef CONFIG_RFS_ACCEL + if (napi->dev->rx_cpu_rmap_auto) { + rc = cpu_rmap_add(napi->dev->rx_cpu_rmap, napi); + if (rc < 0) + return; + + cpu_rmap_get(napi->dev->rx_cpu_rmap); + napi->napi_rmap_idx = rc; + } +#endif + + /* Use core IRQ notifier */ + napi->notify.notify = netif_napi_irq_notify; + napi->notify.release = netif_napi_affinity_release; + rc = irq_set_affinity_notifier(irq, &napi->notify); + if (rc) { + netdev_warn(napi->dev, "Unable to set IRQ notifier (%d)\n", + rc); + goto put_rmap; + } + + set_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state); + return; + +put_rmap: +#ifdef CONFIG_RFS_ACCEL + if (napi->dev->rx_cpu_rmap_auto) { + napi->dev->rx_cpu_rmap->obj[napi->napi_rmap_idx] = NULL; + cpu_rmap_put(napi->dev->rx_cpu_rmap); + napi->napi_rmap_idx = -1; + } +#endif + napi->notify.notify = NULL; + napi->notify.release = NULL; +} +EXPORT_SYMBOL(netif_napi_set_irq_locked); + static void napi_restore_config(struct napi_struct *n) { n->defer_hard_irqs = n->config->defer_hard_irqs; n->gro_flush_timeout = n->config->gro_flush_timeout; n->irq_suspend_timeout = n->config->irq_suspend_timeout; + + if (n->dev->irq_affinity_auto && + test_bit(NAPI_STATE_HAS_NOTIFIER, &n->state)) + irq_set_affinity(n->irq, &n->config->affinity_mask); + /* a NAPI ID might be stored in the config, if so use it. if not, use * napi_hash_add to generate one for us. */ @@ -6951,6 +7207,8 @@ static void napi_restore_config(struct napi_struct *n) napi_hash_add(n); n->config->napi_id = n->napi_id; } + + napi_set_threaded(n, n->config->threaded); } static void napi_save_config(struct napi_struct *n) @@ -6977,7 +7235,7 @@ netif_napi_dev_list_add(struct net_device *dev, struct napi_struct *napi) higher = &dev->napi_list; list_for_each_entry(pos, &dev->napi_list, dev_list) { - if (pos->napi_id >= MIN_NAPI_ID) + if (napi_id_valid(pos->napi_id)) pos_id = pos->napi_id; else if (pos->config) pos_id = pos->config->napi_id; @@ -7021,10 +7279,8 @@ void netif_napi_add_weight_locked(struct net_device *dev, INIT_HLIST_NODE(&napi->napi_hash_node); hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); napi->timer.function = napi_watchdog; - init_gro_hash(napi); + gro_init(&napi->gro); napi->skb = NULL; - INIT_LIST_HEAD(&napi->rx_list); - napi->rx_count = 0; napi->poll = poll; if (weight > NAPI_POLL_WEIGHT) netdev_err_once(dev, "%s() called with weight %d\n", __func__, @@ -7050,8 +7306,12 @@ void netif_napi_add_weight_locked(struct net_device *dev, * Clear dev->threaded if kthread creation failed so that * threaded mode will not be enabled in napi_enable(). */ - if (dev->threaded && napi_kthread_create(napi)) - dev->threaded = false; + if (dev->threaded) { + if (napi_kthread_create(napi)) + dev->threaded = false; + else + napi_set_threaded_state(napi, dev->threaded); + } netif_napi_set_irq_locked(napi, -1); } EXPORT_SYMBOL(netif_napi_add_weight_locked); @@ -7073,7 +7333,9 @@ void napi_disable_locked(struct napi_struct *n) } new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC; - new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL); + new &= ~(NAPIF_STATE_THREADED + | NAPIF_STATE_THREADED_BUSY_POLL + | NAPIF_STATE_PREFER_BUSY_POLL); } while (!try_cmpxchg(&n->state, &val, new)); hrtimer_cancel(&n->timer); @@ -7117,7 +7379,7 @@ void napi_enable_locked(struct napi_struct *n) new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC); if (n->dev->threaded && n->thread) - new |= NAPIF_STATE_THREADED; + napi_set_threaded_state(n, n->dev->threaded); } while (!try_cmpxchg(&n->state, &val, new)); } EXPORT_SYMBOL(napi_enable_locked); @@ -7138,19 +7400,6 @@ void napi_enable(struct napi_struct *n) } EXPORT_SYMBOL(napi_enable); -static void flush_gro_hash(struct napi_struct *napi) -{ - int i; - - for (i = 0; i < GRO_HASH_BUCKETS; i++) { - struct sk_buff *skb, *n; - - list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list) - kfree_skb(skb); - napi->gro_hash[i].count = 0; - } -} - /* Must be called in process context */ void __netif_napi_del_locked(struct napi_struct *napi) { @@ -7159,6 +7408,12 @@ void __netif_napi_del_locked(struct napi_struct *napi) if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state)) return; + /* Make sure NAPI is disabled (or was never enabled). */ + WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); + + if (test_and_clear_bit(NAPI_STATE_HAS_NOTIFIER, &napi->state)) + irq_set_affinity_notifier(napi->irq, NULL); + if (napi->config) { napi->index = -1; napi->config = NULL; @@ -7167,8 +7422,7 @@ void __netif_napi_del_locked(struct napi_struct *napi) list_del_rcu(&napi->dev_list); napi_free_frags(napi); - flush_gro_hash(napi); - napi->gro_bitmask = 0; + gro_cleanup(&napi->gro); if (napi->thread) { kthread_stop(napi->thread); @@ -7227,14 +7481,7 @@ static int __napi_poll(struct napi_struct *n, bool *repoll) return work; } - if (n->gro_bitmask) { - /* flush too old packets - * If HZ < 1000, flush all packets. - */ - napi_gro_flush(n, HZ >= 1000); - } - - gro_normal_list(n); + __napi_gro_flush_helper(n); /* Some drivers may have called napi_schedule * prior to exhausting their budget. @@ -7294,7 +7541,7 @@ static int napi_thread_wait(struct napi_struct *napi) return -1; } -static void napi_threaded_poll_loop(struct napi_struct *napi) +static void napi_threaded_poll_loop(struct napi_struct *napi, bool busy_poll) { struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct softnet_data *sd; @@ -7323,22 +7570,53 @@ static void napi_threaded_poll_loop(struct napi_struct *napi) } skb_defer_free_flush(sd); bpf_net_ctx_clear(bpf_net_ctx); + + /* Push the skbs up the stack if busy polling. */ + if (busy_poll) + __napi_gro_flush_helper(napi); local_bh_enable(); - if (!repoll) + /* If busy polling then do not break here because we need to + * call cond_resched and rcu_softirq_qs_periodic to prevent + * watchdog warnings. + */ + if (!repoll && !busy_poll) break; rcu_softirq_qs_periodic(last_qs); cond_resched(); + + if (!repoll) + break; } } static int napi_threaded_poll(void *data) { struct napi_struct *napi = data; + bool busy_poll_sched; + unsigned long val; + bool busy_poll; + + while (!napi_thread_wait(napi)) { + /* Once woken up, this means that we are scheduled as threaded + * napi and this thread owns the napi context, if busy poll + * state is set then we busy poll this napi. + */ + val = READ_ONCE(napi->state); + busy_poll = val & NAPIF_STATE_THREADED_BUSY_POLL; + busy_poll_sched = val & NAPIF_STATE_SCHED_THREADED_BUSY_POLL; - while (!napi_thread_wait(napi)) - napi_threaded_poll_loop(napi); + /* Do not busy poll if napi is disabled. */ + if (unlikely(val & NAPIF_STATE_DISABLE)) + busy_poll = false; + + if (busy_poll != busy_poll_sched) + assign_bit(NAPI_STATE_SCHED_THREADED_BUSY_POLL, + &napi->state, busy_poll); + + napi_threaded_poll_loop(napi, busy_poll); + } return 0; } @@ -9093,7 +9371,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc) } EXPORT_SYMBOL(dev_set_promiscuity); -static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) +int netif_set_allmulti(struct net_device *dev, int inc, bool notify) { unsigned int old_flags = dev->flags, old_gflags = dev->gflags; unsigned int allmulti, flags; @@ -9128,25 +9406,6 @@ static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify) return 0; } -/** - * dev_set_allmulti - update allmulti count on a device - * @dev: device - * @inc: modifier - * - * Add or remove reception of all multicast frames to a device. While the - * count in the device remains above zero the interface remains listening - * to all interfaces. Once it hits zero the device reverts back to normal - * filtering operation. A negative @inc value is used to drop the counter - * when releasing a resource needing all multicasts. - * Return 0 if successful or a negative errno code on error. - */ - -int dev_set_allmulti(struct net_device *dev, int inc) -{ - return __dev_set_allmulti(dev, inc, true); -} -EXPORT_SYMBOL(dev_set_allmulti); - /* * Upload unicast and multicast address lists to device and * configure RX filtering. When the device doesn't support unicast @@ -9262,7 +9521,7 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags, if ((flags ^ dev->gflags) & IFF_PROMISC) { int inc = (flags & IFF_PROMISC) ? 1 : -1; - unsigned int old_flags = dev->flags; + old_flags = dev->flags; dev->gflags ^= IFF_PROMISC; @@ -9279,7 +9538,7 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags, int inc = (flags & IFF_ALLMULTI) ? 1 : -1; dev->gflags ^= IFF_ALLMULTI; - __dev_set_allmulti(dev, inc, false); + netif_set_allmulti(dev, inc, false); } return ret; @@ -9314,17 +9573,8 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags, } } -/** - * dev_change_flags - change device settings - * @dev: device - * @flags: device state flags - * @extack: netlink extended ack - * - * Change settings on device based state flags. The flags are - * in the userspace exported format. - */ -int dev_change_flags(struct net_device *dev, unsigned int flags, - struct netlink_ext_ack *extack) +int netif_change_flags(struct net_device *dev, unsigned int flags, + struct netlink_ext_ack *extack) { int ret; unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags; @@ -9337,7 +9587,6 @@ int dev_change_flags(struct net_device *dev, unsigned int flags, __dev_notify_flags(dev, old_flags, changes, 0, NULL); return ret; } -EXPORT_SYMBOL(dev_change_flags); int __dev_set_mtu(struct net_device *dev, int new_mtu) { @@ -9369,15 +9618,15 @@ int dev_validate_mtu(struct net_device *dev, int new_mtu, } /** - * dev_set_mtu_ext - Change maximum transfer unit + * netif_set_mtu_ext - Change maximum transfer unit * @dev: device * @new_mtu: new transfer unit * @extack: netlink extended ack * * Change the maximum transfer size of the network device. */ -int dev_set_mtu_ext(struct net_device *dev, int new_mtu, - struct netlink_ext_ack *extack) +int netif_set_mtu_ext(struct net_device *dev, int new_mtu, + struct netlink_ext_ack *extack) { int err, orig_mtu; @@ -9415,25 +9664,20 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu, return err; } -int dev_set_mtu(struct net_device *dev, int new_mtu) +int netif_set_mtu(struct net_device *dev, int new_mtu) { struct netlink_ext_ack extack; int err; memset(&extack, 0, sizeof(extack)); - err = dev_set_mtu_ext(dev, new_mtu, &extack); + err = netif_set_mtu_ext(dev, new_mtu, &extack); if (err && extack._msg) net_err_ratelimited("%s: %s\n", dev->name, extack._msg); return err; } -EXPORT_SYMBOL(dev_set_mtu); +EXPORT_SYMBOL(netif_set_mtu); -/** - * dev_change_tx_queue_len - Change TX queue length of a netdevice - * @dev: device - * @new_len: new tx queue length - */ -int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) +int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len) { unsigned int orig_len = dev->tx_queue_len; int res; @@ -9460,12 +9704,7 @@ int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) return res; } -/** - * dev_set_group - Change group this device belongs to - * @dev: device - * @new_group: group this device should belong to - */ -void dev_set_group(struct net_device *dev, int new_group) +void netif_set_group(struct net_device *dev, int new_group) { dev->group = new_group; } @@ -9491,16 +9730,8 @@ int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, } EXPORT_SYMBOL(dev_pre_changeaddr_notify); -/** - * dev_set_mac_address - Change Media Access Control Address - * @dev: device - * @sa: new address - * @extack: netlink extended ack - * - * Change the hardware (MAC) address of the device - */ -int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, - struct netlink_ext_ack *extack) +int netif_set_mac_address(struct net_device *dev, struct sockaddr *sa, + struct netlink_ext_ack *extack) { const struct net_device_ops *ops = dev->netdev_ops; int err; @@ -9524,22 +9755,9 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, add_device_randomness(dev->dev_addr, dev->addr_len); return 0; } -EXPORT_SYMBOL(dev_set_mac_address); DECLARE_RWSEM(dev_addr_sem); -int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, - struct netlink_ext_ack *extack) -{ - int ret; - - down_write(&dev_addr_sem); - ret = dev_set_mac_address(dev, sa, extack); - up_write(&dev_addr_sem); - return ret; -} -EXPORT_SYMBOL(dev_set_mac_address_user); - int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) { size_t size = sizeof(sa->sa_data_min); @@ -9568,14 +9786,7 @@ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name) } EXPORT_SYMBOL(dev_get_mac_address); -/** - * dev_change_carrier - Change device carrier - * @dev: device - * @new_carrier: new value - * - * Change device carrier - */ -int dev_change_carrier(struct net_device *dev, bool new_carrier) +int netif_change_carrier(struct net_device *dev, bool new_carrier) { const struct net_device_ops *ops = dev->netdev_ops; @@ -9686,13 +9897,7 @@ bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b) } EXPORT_SYMBOL(netdev_port_same_parent_id); -/** - * dev_change_proto_down - set carrier according to proto_down. - * - * @dev: device - * @proto_down: new value - */ -int dev_change_proto_down(struct net_device *dev, bool proto_down) +int netif_change_proto_down(struct net_device *dev, bool proto_down) { if (!dev->change_proto_down) return -EOPNOTSUPP; @@ -9707,14 +9912,14 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down) } /** - * dev_change_proto_down_reason - proto down reason + * netdev_change_proto_down_reason_locked - proto down reason * * @dev: device * @mask: proto down mask * @value: proto down value */ -void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, - u32 value) +void netdev_change_proto_down_reason_locked(struct net_device *dev, + unsigned long mask, u32 value) { u32 proto_down_reason; int b; @@ -9803,7 +10008,7 @@ u8 dev_xdp_sb_prog_count(struct net_device *dev) return count; } -int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf) +int netif_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf) { if (!dev->netdev_ops->ndo_bpf) return -EOPNOTSUPP; @@ -9823,7 +10028,6 @@ int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf) return dev->netdev_ops->ndo_bpf(dev, bpf); } -EXPORT_SYMBOL_GPL(dev_xdp_propagate); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) { @@ -9853,6 +10057,8 @@ static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, struct netdev_bpf xdp; int err; + netdev_ops_assert_locked(dev); + if (dev->cfg->hds_config == ETHTOOL_TCP_DATA_SPLIT_ENABLED && prog && !prog->aux->xdp_has_frags) { NL_SET_ERR_MSG(extack, "unable to install XDP to device using tcp-data-split"); @@ -10085,7 +10291,9 @@ static void bpf_xdp_link_release(struct bpf_link *link) * already NULL, in which case link was already auto-detached */ if (xdp_link->dev) { + netdev_lock_ops(xdp_link->dev); WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); + netdev_unlock_ops(xdp_link->dev); xdp_link->dev = NULL; } @@ -10167,10 +10375,12 @@ static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, goto out_unlock; } + netdev_lock_ops(xdp_link->dev); mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags); bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, xdp_link->flags, new_prog); + netdev_unlock_ops(xdp_link->dev); if (err) goto out_unlock; @@ -10296,7 +10506,7 @@ u32 dev_get_min_mp_channel_count(const struct net_device *dev) { int i; - ASSERT_RTNL(); + netdev_ops_assert_locked(dev); for (i = dev->real_num_rx_queues - 1; i >= 0; i--) if (dev->_rx[i].mp_params.mp_priv) @@ -10522,6 +10732,7 @@ int __netdev_update_features(struct net_device *dev) int err = -1; ASSERT_RTNL(); + netdev_ops_assert_locked(dev); features = netdev_get_wanted_features(dev); @@ -10955,7 +11166,9 @@ int register_netdevice(struct net_device *dev) if (ret) goto err_uninit_notify; + netdev_lock_ops(dev); __netdev_update_features(dev); + netdev_unlock_ops(dev); /* * Default initial state at registry is that the @@ -11114,8 +11327,11 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list) rtnl_lock(); /* Rebroadcast unregister notification */ - list_for_each_entry(dev, list, todo_list) + list_for_each_entry(dev, list, todo_list) { + netdev_lock_ops(dev); call_netdevice_notifiers(NETDEV_UNREGISTER, dev); + netdev_unlock_ops(dev); + } __rtnl_unlock(); rcu_barrier(); @@ -11200,9 +11416,8 @@ void netdev_run_todo(void) list_replace_init(&net_unlink_list, &unlink_list); while (!list_empty(&unlink_list)) { - struct net_device *dev = list_first_entry(&unlink_list, - struct net_device, - unlink_list); + dev = list_first_entry(&unlink_list, struct net_device, + unlink_list); list_del_init(&dev->unlink_list); dev->nested_level = dev->lower_level - 1; } @@ -11712,6 +11927,8 @@ void free_netdev(struct net_device *dev) netdev_napi_exit(dev); + netif_del_cpu_rmap(dev); + ref_tracker_dir_exit(&dev->refcnt_tracker); #ifdef CONFIG_PCPU_DEV_REFCNT free_percpu(dev->pcpu_refcnt); @@ -11826,6 +12043,19 @@ void unregister_netdevice_queue(struct net_device *dev, struct list_head *head) } EXPORT_SYMBOL(unregister_netdevice_queue); +static void dev_memory_provider_uninstall(struct net_device *dev) +{ + unsigned int i; + + for (i = 0; i < dev->real_num_rx_queues; i++) { + struct netdev_rx_queue *rxq = &dev->_rx[i]; + struct pp_memory_provider_params *p = &rxq->mp_params; + + if (p->mp_ops && p->mp_ops->uninstall) + p->mp_ops->uninstall(rxq->mp_params.mp_priv, rxq); + } +} + void unregister_netdevice_many_notify(struct list_head *head, u32 portid, const struct nlmsghdr *nlh) { @@ -11857,11 +12087,14 @@ void unregister_netdevice_many_notify(struct list_head *head, } /* If device is running, close it first. */ - list_for_each_entry(dev, head, unreg_list) + list_for_each_entry(dev, head, unreg_list) { list_add_tail(&dev->close_list, &close_head); + netdev_lock_ops(dev); + } dev_close_many(&close_head, true); list_for_each_entry(dev, head, unreg_list) { + netdev_unlock_ops(dev); /* And unlink it from device chain. */ unlist_netdevice(dev); netdev_lock(dev); @@ -11878,16 +12111,20 @@ void unregister_netdevice_many_notify(struct list_head *head, /* Shutdown queueing discipline. */ dev_shutdown(dev); dev_tcx_uninstall(dev); + netdev_lock_ops(dev); dev_xdp_uninstall(dev); + dev_memory_provider_uninstall(dev); + netdev_unlock_ops(dev); bpf_dev_bound_netdev_unregister(dev); - dev_dmabuf_uninstall(dev); netdev_offload_xstats_disable_all(dev); /* Notify protocols, that we are about to destroy * this device. They should clean all the things. */ + netdev_lock_ops(dev); call_netdevice_notifiers(NETDEV_UNREGISTER, dev); + netdev_unlock_ops(dev); if (!dev->rtnl_link_ops || dev->rtnl_link_state == RTNL_LINK_INITIALIZED) @@ -11974,24 +12211,9 @@ void unregister_netdev(struct net_device *dev) } EXPORT_SYMBOL(unregister_netdev); -/** - * __dev_change_net_namespace - move device to different nethost namespace - * @dev: device - * @net: network namespace - * @pat: If not NULL name pattern to try if the current device name - * is already taken in the destination network namespace. - * @new_ifindex: If not zero, specifies device index in the target - * namespace. - * - * This function shuts down a device interface and moves it - * to a new network namespace. On success 0 is returned, on - * a failure a netagive errno code is returned. - * - * Callers must hold the rtnl semaphore. - */ - -int __dev_change_net_namespace(struct net_device *dev, struct net *net, - const char *pat, int new_ifindex) +int netif_change_net_namespace(struct net_device *dev, struct net *net, + const char *pat, int new_ifindex, + struct netlink_ext_ack *extack) { struct netdev_name_node *name_node; struct net *net_old = dev_net(dev); @@ -12002,12 +12224,16 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, /* Don't allow namespace local devices to be moved. */ err = -EINVAL; - if (dev->netns_local) + if (dev->netns_immutable) { + NL_SET_ERR_MSG(extack, "The interface netns is immutable"); goto out; + } /* Ensure the device has been registered */ - if (dev->reg_state != NETREG_REGISTERED) + if (dev->reg_state != NETREG_REGISTERED) { + NL_SET_ERR_MSG(extack, "The interface isn't registered"); goto out; + } /* Get out if there is nothing todo */ err = 0; @@ -12020,30 +12246,49 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, err = -EEXIST; if (netdev_name_in_use(net, dev->name)) { /* We get here if we can't use the current device name */ - if (!pat) + if (!pat) { + NL_SET_ERR_MSG(extack, + "An interface with the same name exists in the target netns"); goto out; + } err = dev_prep_valid_name(net, dev, pat, new_name, EEXIST); - if (err < 0) + if (err < 0) { + NL_SET_ERR_MSG_FMT(extack, + "Unable to use '%s' for the new interface name in the target netns", + pat); goto out; + } } /* Check that none of the altnames conflicts. */ err = -EEXIST; - netdev_for_each_altname(dev, name_node) - if (netdev_name_in_use(net, name_node->name)) + netdev_for_each_altname(dev, name_node) { + if (netdev_name_in_use(net, name_node->name)) { + NL_SET_ERR_MSG_FMT(extack, + "An interface with the altname %s exists in the target netns", + name_node->name); goto out; + } + } /* Check that new_ifindex isn't used yet. */ if (new_ifindex) { err = dev_index_reserve(net, new_ifindex); - if (err < 0) + if (err < 0) { + NL_SET_ERR_MSG_FMT(extack, + "The ifindex %d is not available in the target netns", + new_ifindex); goto out; + } } else { /* If there is an ifindex conflict assign a new one */ err = dev_index_reserve(net, dev->ifindex); if (err == -EBUSY) err = dev_index_reserve(net, 0); - if (err < 0) + if (err < 0) { + NL_SET_ERR_MSG(extack, + "Unable to allocate a new ifindex in the target netns"); goto out; + } new_ifindex = err; } @@ -12052,7 +12297,7 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, */ /* If device is running close it first. */ - dev_close(dev); + netif_close(dev); /* And unlink it from device chain */ unlist_netdevice(dev); @@ -12134,7 +12379,6 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, out: return err; } -EXPORT_SYMBOL_GPL(__dev_change_net_namespace); static int dev_cpu_dead(unsigned int oldcpu) { @@ -12249,7 +12493,7 @@ static struct hlist_head * __net_init netdev_create_hash(void) static int __net_init netdev_init(struct net *net) { BUILD_BUG_ON(GRO_HASH_BUCKETS > - 8 * sizeof_field(struct napi_struct, gro_bitmask)); + BITS_PER_BYTE * sizeof_field(struct gro_node, bitmask)); INIT_LIST_HEAD(&net->dev_base_head); @@ -12384,7 +12628,7 @@ static void __net_exit default_device_exit_net(struct net *net) char fb_name[IFNAMSIZ]; /* Ignore unmoveable devices (i.e. loopback) */ - if (dev->netns_local) + if (dev->netns_immutable) continue; /* Leave virtual devices for the generic cleanup */ @@ -12545,7 +12789,7 @@ static void run_backlog_napi(unsigned int cpu) { struct softnet_data *sd = per_cpu_ptr(&softnet_data, cpu); - napi_threaded_poll_loop(&sd->backlog); + napi_threaded_poll_loop(&sd->backlog, false); } static void backlog_napi_setup(unsigned int cpu) @@ -12614,7 +12858,7 @@ static int __init net_dev_init(void) INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); spin_lock_init(&sd->defer_lock); - init_gro_hash(&sd->backlog); + gro_init(&sd->backlog.gro); sd->backlog.poll = process_backlog; sd->backlog.weight = weight_p; INIT_LIST_HEAD(&sd->backlog.poll_list); diff --git a/net/core/dev.h b/net/core/dev.h index a5b166bbd169a..c4b645120d72b 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -6,6 +6,7 @@ #include #include #include +#include struct net; struct netlink_ext_ack; @@ -40,6 +41,21 @@ DEFINE_FREE(netdev_unlock, struct net_device *, if (_T) netdev_unlock(_T)); (var_name = netdev_xa_find_lock(net, var_name, &ifindex)); \ ifindex++) +struct net_device * +netdev_get_by_index_lock_ops_compat(struct net *net, int ifindex); +struct net_device * +netdev_xa_find_lock_ops_compat(struct net *net, struct net_device *dev, + unsigned long *index); + +DEFINE_FREE(netdev_unlock_ops_compat, struct net_device *, + if (_T) netdev_unlock_ops_compat(_T)); + +#define for_each_netdev_lock_ops_compat_scoped(net, var_name, ifindex) \ + for (struct net_device *var_name __free(netdev_unlock_ops_compat) = NULL; \ + (var_name = netdev_xa_find_lock_ops_compat(net, var_name, \ + &ifindex)); \ + ifindex++) + #ifdef CONFIG_PROC_FS int __init dev_proc_init(void); #else @@ -85,6 +101,7 @@ struct netdev_name_node { }; int netdev_get_name(struct net *net, char *name, int ifindex); +int netif_change_name(struct net_device *dev, const char *newname); int dev_change_name(struct net_device *dev, const char *newname); #define netdev_for_each_altname(dev, namenode) \ @@ -98,24 +115,28 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); int dev_validate_mtu(struct net_device *dev, int mtu, struct netlink_ext_ack *extack); -int dev_set_mtu_ext(struct net_device *dev, int mtu, - struct netlink_ext_ack *extack); +int netif_set_mtu_ext(struct net_device *dev, int new_mtu, + struct netlink_ext_ack *extack); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); +int netif_change_proto_down(struct net_device *dev, bool proto_down); int dev_change_proto_down(struct net_device *dev, bool proto_down); -void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, - u32 value); +void netdev_change_proto_down_reason_locked(struct net_device *dev, + unsigned long mask, u32 value); typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags); +int netif_change_tx_queue_len(struct net_device *dev, unsigned long new_len); int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len); +void netif_set_group(struct net_device *dev, int new_group); void dev_set_group(struct net_device *dev, int new_group); +int netif_change_carrier(struct net_device *dev, bool new_carrier); int dev_change_carrier(struct net_device *dev, bool new_carrier); void __dev_set_rx_mode(struct net_device *dev); @@ -134,9 +155,11 @@ static inline void netif_set_up(struct net_device *dev, bool value) else dev->flags &= ~IFF_UP; - netdev_lock(dev); + if (!netdev_need_ops_lock(dev)) + netdev_lock(dev); dev->up = value; - netdev_unlock(dev); + if (!netdev_need_ops_lock(dev)) + netdev_unlock(dev); } static inline void netif_set_gso_max_size(struct net_device *dev, @@ -299,6 +322,18 @@ void xdp_do_check_flushed(struct napi_struct *napi); static inline void xdp_do_check_flushed(struct napi_struct *napi) { } #endif +/* Best effort check that NAPI is not idle (can't be scheduled to run) */ +static inline void napi_assert_will_not_race(const struct napi_struct *napi) +{ + /* uninitialized instance, can't race */ + if (!napi->poll_list.next) + return; + + /* SCHED bit is set on disabled instances */ + WARN_ON(!test_bit(NAPI_STATE_SCHED, &napi->state)); + WARN_ON(READ_ONCE(napi->list_owner) != -1); +} + void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); #define XMIT_RECURSION_LIMIT 8 diff --git a/net/core/dev_api.c b/net/core/dev_api.c new file mode 100644 index 0000000000000..8dbc606121007 --- /dev/null +++ b/net/core/dev_api.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include + +#include "dev.h" + +/** + * dev_change_name() - change name of a device + * @dev: device + * @newname: name (or format string) must be at least IFNAMSIZ + * + * Change name of a device, can pass format strings "eth%d". + * for wildcarding. + * + * Return: 0 on success, -errno on failure. + */ +int dev_change_name(struct net_device *dev, const char *newname) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_change_name(dev, newname); + netdev_unlock_ops(dev); + + return ret; +} + +/** + * dev_set_alias() - change ifalias of a device + * @dev: device + * @alias: name up to IFALIASZ + * @len: limit of bytes to copy from info + * + * Set ifalias for a device. + * + * Return: 0 on success, -errno on failure. + */ +int dev_set_alias(struct net_device *dev, const char *alias, size_t len) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_set_alias(dev, alias, len); + netdev_unlock_ops(dev); + + return ret; +} +EXPORT_SYMBOL(dev_set_alias); + +/** + * dev_change_flags() - change device settings + * @dev: device + * @flags: device state flags + * @extack: netlink extended ack + * + * Change settings on device based state flags. The flags are + * in the userspace exported format. + * + * Return: 0 on success, -errno on failure. + */ +int dev_change_flags(struct net_device *dev, unsigned int flags, + struct netlink_ext_ack *extack) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_change_flags(dev, flags, extack); + netdev_unlock_ops(dev); + + return ret; +} +EXPORT_SYMBOL(dev_change_flags); + +/** + * dev_set_group() - change group this device belongs to + * @dev: device + * @new_group: group this device should belong to + */ +void dev_set_group(struct net_device *dev, int new_group) +{ + netdev_lock_ops(dev); + netif_set_group(dev, new_group); + netdev_unlock_ops(dev); +} + +int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, + struct netlink_ext_ack *extack) +{ + int ret; + + down_write(&dev_addr_sem); + netdev_lock_ops(dev); + ret = netif_set_mac_address(dev, sa, extack); + netdev_unlock_ops(dev); + up_write(&dev_addr_sem); + + return ret; +} +EXPORT_SYMBOL(dev_set_mac_address_user); + +/** + * dev_change_net_namespace() - move device to different nethost namespace + * @dev: device + * @net: network namespace + * @pat: If not NULL name pattern to try if the current device name + * is already taken in the destination network namespace. + * + * This function shuts down a device interface and moves it + * to a new network namespace. On success 0 is returned, on + * a failure a netagive errno code is returned. + * + * Callers must hold the rtnl semaphore. + * + * Return: 0 on success, -errno on failure. + */ +int dev_change_net_namespace(struct net_device *dev, struct net *net, + const char *pat) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_change_net_namespace(dev, net, pat, 0, NULL); + netdev_unlock_ops(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_change_net_namespace); + +/** + * dev_change_carrier() - change device carrier + * @dev: device + * @new_carrier: new value + * + * Change device carrier + * + * Return: 0 on success, -errno on failure. + */ +int dev_change_carrier(struct net_device *dev, bool new_carrier) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_change_carrier(dev, new_carrier); + netdev_unlock_ops(dev); + + return ret; +} + +/** + * dev_change_tx_queue_len() - change TX queue length of a netdevice + * @dev: device + * @new_len: new tx queue length + * + * Return: 0 on success, -errno on failure. + */ +int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_change_tx_queue_len(dev, new_len); + netdev_unlock_ops(dev); + + return ret; +} + +/** + * dev_change_proto_down() - set carrier according to proto_down + * @dev: device + * @proto_down: new value + * + * Return: 0 on success, -errno on failure. + */ +int dev_change_proto_down(struct net_device *dev, bool proto_down) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_change_proto_down(dev, proto_down); + netdev_unlock_ops(dev); + + return ret; +} + +/** + * dev_open() - prepare an interface for use + * @dev: device to open + * @extack: netlink extended ack + * + * Takes a device from down to up state. The device's private open + * function is invoked and then the multicast lists are loaded. Finally + * the device is moved into the up state and a %NETDEV_UP message is + * sent to the netdev notifier chain. + * + * Calling this function on an active interface is a nop. On a failure + * a negative errno code is returned. + * + * Return: 0 on success, -errno on failure. + */ +int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_open(dev, extack); + netdev_unlock_ops(dev); + + return ret; +} +EXPORT_SYMBOL(dev_open); + +/** + * dev_close() - shutdown an interface + * @dev: device to shutdown + * + * This function moves an active device into down state. A + * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device + * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier + * chain. + */ +void dev_close(struct net_device *dev) +{ + netdev_lock_ops(dev); + netif_close(dev); + netdev_unlock_ops(dev); +} +EXPORT_SYMBOL(dev_close); + +int dev_eth_ioctl(struct net_device *dev, + struct ifreq *ifr, unsigned int cmd) +{ + const struct net_device_ops *ops = dev->netdev_ops; + int ret = -ENODEV; + + if (!ops->ndo_eth_ioctl) + return -EOPNOTSUPP; + + netdev_lock_ops(dev); + if (netif_device_present(dev)) + ret = ops->ndo_eth_ioctl(dev, ifr, cmd); + netdev_unlock_ops(dev); + + return ret; +} +EXPORT_SYMBOL(dev_eth_ioctl); + +int dev_set_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_set_mtu(dev, new_mtu); + netdev_unlock_ops(dev); + + return ret; +} +EXPORT_SYMBOL(dev_set_mtu); + +/** + * dev_disable_lro() - disable Large Receive Offload on a device + * @dev: device + * + * Disable Large Receive Offload (LRO) on a net device. Must be + * called under RTNL. This is needed if received packets may be + * forwarded to another interface. + */ +void dev_disable_lro(struct net_device *dev) +{ + netdev_lock_ops(dev); + netif_disable_lro(dev); + netdev_unlock_ops(dev); +} +EXPORT_SYMBOL(dev_disable_lro); + +/** + * dev_set_allmulti() - update allmulti count on a device + * @dev: device + * @inc: modifier + * + * Add or remove reception of all multicast frames to a device. While the + * count in the device remains above zero the interface remains listening + * to all interfaces. Once it hits zero the device reverts back to normal + * filtering operation. A negative @inc value is used to drop the counter + * when releasing a resource needing all multicasts. + * + * Return: 0 on success, -errno on failure. + */ + +int dev_set_allmulti(struct net_device *dev, int inc) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_set_allmulti(dev, inc, true); + netdev_unlock_ops(dev); + + return ret; +} +EXPORT_SYMBOL(dev_set_allmulti); + +/** + * dev_set_mac_address() - change Media Access Control Address + * @dev: device + * @sa: new address + * @extack: netlink extended ack + * + * Change the hardware (MAC) address of the device + * + * Return: 0 on success, -errno on failure. + */ +int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, + struct netlink_ext_ack *extack) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_set_mac_address(dev, sa, extack); + netdev_unlock_ops(dev); + + return ret; +} +EXPORT_SYMBOL(dev_set_mac_address); + +int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf) +{ + int ret; + + netdev_lock_ops(dev); + ret = netif_xdp_propagate(dev, bpf); + netdev_unlock_ops(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_xdp_propagate); diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 4c2098ac9d724..fff13a8b48f18 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "dev.h" @@ -110,7 +111,7 @@ static int dev_getifmap(struct net_device *dev, struct ifreq *ifr) return 0; } -static int dev_setifmap(struct net_device *dev, struct ifreq *ifr) +static int netif_setifmap(struct net_device *dev, struct ifreq *ifr) { struct compat_ifmap *cifmap = (struct compat_ifmap *)&ifr->ifr_map; @@ -240,20 +241,6 @@ int net_hwtstamp_validate(const struct kernel_hwtstamp_config *cfg) return 0; } -static int dev_eth_ioctl(struct net_device *dev, - struct ifreq *ifr, unsigned int cmd) -{ - const struct net_device_ops *ops = dev->netdev_ops; - - if (!ops->ndo_eth_ioctl) - return -EOPNOTSUPP; - - if (!netif_device_present(dev)) - return -ENODEV; - - return ops->ndo_eth_ioctl(dev, ifr, cmd); -} - /** * dev_get_hwtstamp_phylib() - Get hardware timestamping settings of NIC * or of attached phylib PHY @@ -305,7 +292,9 @@ static int dev_get_hwtstamp(struct net_device *dev, struct ifreq *ifr) return -ENODEV; kernel_cfg.ifr = ifr; + netdev_lock_ops(dev); err = dev_get_hwtstamp_phylib(dev, &kernel_cfg); + netdev_unlock_ops(dev); if (err) return err; @@ -429,7 +418,9 @@ static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr) if (!netif_device_present(dev)) return -ENODEV; + netdev_lock_ops(dev); err = dev_set_hwtstamp_phylib(dev, &kernel_cfg, &extack); + netdev_unlock_ops(dev); if (err) return err; @@ -504,10 +495,14 @@ static int dev_siocbond(struct net_device *dev, const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_siocbond) { + int ret = -ENODEV; + + netdev_lock_ops(dev); if (netif_device_present(dev)) - return ops->ndo_siocbond(dev, ifr, cmd); - else - return -ENODEV; + ret = ops->ndo_siocbond(dev, ifr, cmd); + netdev_unlock_ops(dev); + + return ret; } return -EOPNOTSUPP; @@ -519,10 +514,14 @@ static int dev_siocdevprivate(struct net_device *dev, struct ifreq *ifr, const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_siocdevprivate) { + int ret = -ENODEV; + + netdev_lock_ops(dev); if (netif_device_present(dev)) - return ops->ndo_siocdevprivate(dev, ifr, data, cmd); - else - return -ENODEV; + ret = ops->ndo_siocdevprivate(dev, ifr, data, cmd); + netdev_unlock_ops(dev); + + return ret; } return -EOPNOTSUPP; @@ -533,10 +532,14 @@ static int dev_siocwandev(struct net_device *dev, struct if_settings *ifs) const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_siocwandev) { + int ret = -ENODEV; + + netdev_lock_ops(dev); if (netif_device_present(dev)) - return ops->ndo_siocwandev(dev, ifs); - else - return -ENODEV; + ret = ops->ndo_siocwandev(dev, ifs); + netdev_unlock_ops(dev); + + return ret; } return -EOPNOTSUPP; @@ -551,7 +554,6 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, int err; struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); const struct net_device_ops *ops; - netdevice_tracker dev_tracker; if (!dev) return -ENODEV; @@ -580,11 +582,16 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, min(sizeof(ifr->ifr_hwaddr.sa_data_min), (size_t)dev->addr_len)); + netdev_lock_ops(dev); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); + netdev_unlock_ops(dev); return 0; case SIOCSIFMAP: - return dev_setifmap(dev, ifr); + netdev_lock_ops(dev); + err = netif_setifmap(dev, ifr); + netdev_unlock_ops(dev); + return err; case SIOCADDMULTI: if (!ops->ndo_set_rx_mode || @@ -592,7 +599,10 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; - return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); + netdev_lock_ops(dev); + err = dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); + netdev_unlock_ops(dev); + return err; case SIOCDELMULTI: if (!ops->ndo_set_rx_mode || @@ -600,7 +610,10 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; - return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); + netdev_lock_ops(dev); + err = dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); + netdev_unlock_ops(dev); + return err; case SIOCSIFTXQLEN: if (ifr->ifr_qlen < 0) @@ -614,22 +627,6 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data, case SIOCWANDEV: return dev_siocwandev(dev, &ifr->ifr_settings); - case SIOCBRADDIF: - case SIOCBRDELIF: - if (!netif_device_present(dev)) - return -ENODEV; - if (!netif_is_bridge_master(dev)) - return -EOPNOTSUPP; - - netdev_hold(dev, &dev_tracker, GFP_KERNEL); - rtnl_net_unlock(net); - - err = br_ioctl_call(net, netdev_priv(dev), cmd, ifr, NULL); - - netdev_put(dev, &dev_tracker); - rtnl_net_lock(net); - return err; - case SIOCDEVPRIVATE ... SIOCDEVPRIVATE + 15: return dev_siocdevprivate(dev, ifr, data, cmd); @@ -812,8 +809,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: - case SIOCBRADDIF: - case SIOCBRDELIF: case SIOCSHWTSTAMP: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; diff --git a/net/core/devmem.c b/net/core/devmem.c index 3bba3f018df03..ee145a2aa41c2 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include "devmem.h" @@ -24,23 +25,30 @@ /* Device memory support */ -/* Protected by rtnl_lock() */ static DEFINE_XARRAY_FLAGS(net_devmem_dmabuf_bindings, XA_FLAGS_ALLOC1); +static const struct memory_provider_ops dmabuf_devmem_ops; + +bool net_is_devmem_iov(struct net_iov *niov) +{ + return niov->pp->mp_ops == &dmabuf_devmem_ops; +} + static void net_devmem_dmabuf_free_chunk_owner(struct gen_pool *genpool, struct gen_pool_chunk *chunk, void *not_used) { struct dmabuf_genpool_chunk_owner *owner = chunk->owner; - kvfree(owner->niovs); + kvfree(owner->area.niovs); kfree(owner); } static dma_addr_t net_devmem_get_dma_addr(const struct net_iov *niov) { - struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov); + struct dmabuf_genpool_chunk_owner *owner; + owner = net_devmem_iov_to_chunk_owner(niov); return owner->base_dma_addr + ((dma_addr_t)net_iov_idx(niov) << PAGE_SHIFT); } @@ -83,7 +91,7 @@ net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) offset = dma_addr - owner->base_dma_addr; index = offset / PAGE_SIZE; - niov = &owner->niovs[index]; + niov = &owner->area.niovs[index]; niov->pp_magic = 0; niov->pp = NULL; @@ -94,7 +102,7 @@ net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) void net_devmem_free_dmabuf(struct net_iov *niov) { - struct net_devmem_dmabuf_binding *binding = net_iov_binding(niov); + struct net_devmem_dmabuf_binding *binding = net_devmem_iov_binding(niov); unsigned long dma_addr = net_devmem_get_dma_addr(niov); if (WARN_ON(!gen_pool_has_addr(binding->chunk_pool, dma_addr, @@ -109,6 +117,7 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) struct netdev_rx_queue *rxq; unsigned long xa_idx; unsigned int rxq_idx; + int err; if (binding->list.next) list_del(&binding->list); @@ -117,10 +126,12 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) WARN_ON(rxq->mp_params.mp_priv != binding); rxq->mp_params.mp_priv = NULL; + rxq->mp_params.mp_ops = NULL; rxq_idx = get_netdev_rx_queue_index(rxq); - WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx)); + err = netdev_rx_queue_restart(binding->dev, rxq_idx); + WARN_ON(err && err != -ENETDOWN); } xa_erase(&net_devmem_dmabuf_bindings, binding->id); @@ -152,7 +163,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, } rxq = __netif_get_rx_queue(dev, rxq_idx); - if (rxq->mp_params.mp_priv) { + if (rxq->mp_params.mp_ops) { NL_SET_ERR_MSG(extack, "designated queue already memory provider bound"); return -EEXIST; } @@ -170,6 +181,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, return err; rxq->mp_params.mp_priv = binding; + rxq->mp_params.mp_ops = &dmabuf_devmem_ops; err = netdev_rx_queue_restart(dev, rxq_idx); if (err) @@ -179,6 +191,7 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, err_xa_erase: rxq->mp_params.mp_priv = NULL; + rxq->mp_params.mp_ops = NULL; xa_erase(&binding->bound_rxqs, xa_idx); return err; @@ -261,9 +274,9 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, goto err_free_chunks; } - owner->base_virtual = virtual; + owner->area.base_virtual = virtual; owner->base_dma_addr = dma_addr; - owner->num_niovs = len / PAGE_SIZE; + owner->area.num_niovs = len / PAGE_SIZE; owner->binding = binding; err = gen_pool_add_owner(binding->chunk_pool, dma_addr, @@ -275,17 +288,17 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, goto err_free_chunks; } - owner->niovs = kvmalloc_array(owner->num_niovs, - sizeof(*owner->niovs), - GFP_KERNEL); - if (!owner->niovs) { + owner->area.niovs = kvmalloc_array(owner->area.num_niovs, + sizeof(*owner->area.niovs), + GFP_KERNEL); + if (!owner->area.niovs) { err = -ENOMEM; goto err_free_chunks; } - for (i = 0; i < owner->num_niovs; i++) { - niov = &owner->niovs[i]; - niov->owner = owner; + for (i = 0; i < owner->area.num_niovs; i++) { + niov = &owner->area.niovs[i]; + niov->owner = &owner->area; page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), net_devmem_get_dma_addr(niov)); } @@ -313,26 +326,6 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, return ERR_PTR(err); } -void dev_dmabuf_uninstall(struct net_device *dev) -{ - struct net_devmem_dmabuf_binding *binding; - struct netdev_rx_queue *rxq; - unsigned long xa_idx; - unsigned int i; - - for (i = 0; i < dev->real_num_rx_queues; i++) { - binding = dev->_rx[i].mp_params.mp_priv; - if (!binding) - continue; - - xa_for_each(&binding->bound_rxqs, xa_idx, rxq) - if (rxq == &dev->_rx[i]) { - xa_erase(&binding->bound_rxqs, xa_idx); - break; - } - } -} - /*** "Dmabuf devmem memory provider" ***/ int mp_dmabuf_devmem_init(struct page_pool *pool) @@ -398,3 +391,36 @@ bool mp_dmabuf_devmem_release_page(struct page_pool *pool, netmem_ref netmem) /* We don't want the page pool put_page()ing our net_iovs. */ return false; } + +static int mp_dmabuf_devmem_nl_fill(void *mp_priv, struct sk_buff *rsp, + struct netdev_rx_queue *rxq) +{ + const struct net_devmem_dmabuf_binding *binding = mp_priv; + int type = rxq ? NETDEV_A_QUEUE_DMABUF : NETDEV_A_PAGE_POOL_DMABUF; + + return nla_put_u32(rsp, type, binding->id); +} + +static void mp_dmabuf_devmem_uninstall(void *mp_priv, + struct netdev_rx_queue *rxq) +{ + struct net_devmem_dmabuf_binding *binding = mp_priv; + struct netdev_rx_queue *bound_rxq; + unsigned long xa_idx; + + xa_for_each(&binding->bound_rxqs, xa_idx, bound_rxq) { + if (bound_rxq == rxq) { + xa_erase(&binding->bound_rxqs, xa_idx); + break; + } + } +} + +static const struct memory_provider_ops dmabuf_devmem_ops = { + .init = mp_dmabuf_devmem_init, + .destroy = mp_dmabuf_devmem_destroy, + .alloc_netmems = mp_dmabuf_devmem_alloc_netmems, + .release_netmem = mp_dmabuf_devmem_release_page, + .nl_fill = mp_dmabuf_devmem_nl_fill, + .uninstall = mp_dmabuf_devmem_uninstall, +}; diff --git a/net/core/devmem.h b/net/core/devmem.h index 76099ef9c482f..7fc158d527293 100644 --- a/net/core/devmem.h +++ b/net/core/devmem.h @@ -10,6 +10,8 @@ #ifndef _NET_DEVMEM_H #define _NET_DEVMEM_H +#include + struct netlink_ext_ack; struct net_devmem_dmabuf_binding { @@ -51,17 +53,11 @@ struct net_devmem_dmabuf_binding { * allocations from this chunk. */ struct dmabuf_genpool_chunk_owner { - /* Offset into the dma-buf where this chunk starts. */ - unsigned long base_virtual; + struct net_iov_area area; + struct net_devmem_dmabuf_binding *binding; /* dma_addr of the start of the chunk. */ dma_addr_t base_dma_addr; - - /* Array of net_iovs for this chunk. */ - struct net_iov *niovs; - size_t num_niovs; - - struct net_devmem_dmabuf_binding *binding; }; void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding); @@ -72,38 +68,34 @@ void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding); int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, struct net_devmem_dmabuf_binding *binding, struct netlink_ext_ack *extack); -void dev_dmabuf_uninstall(struct net_device *dev); static inline struct dmabuf_genpool_chunk_owner * -net_iov_owner(const struct net_iov *niov) +net_devmem_iov_to_chunk_owner(const struct net_iov *niov) { - return niov->owner; + struct net_iov_area *owner = net_iov_owner(niov); + + return container_of(owner, struct dmabuf_genpool_chunk_owner, area); } -static inline unsigned int net_iov_idx(const struct net_iov *niov) +static inline struct net_devmem_dmabuf_binding * +net_devmem_iov_binding(const struct net_iov *niov) { - return niov - net_iov_owner(niov)->niovs; + return net_devmem_iov_to_chunk_owner(niov)->binding; } -static inline struct net_devmem_dmabuf_binding * -net_iov_binding(const struct net_iov *niov) +static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov) { - return net_iov_owner(niov)->binding; + return net_devmem_iov_binding(niov)->id; } static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov) { - struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov); + struct net_iov_area *owner = net_iov_owner(niov); return owner->base_virtual + ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT); } -static inline u32 net_iov_binding_id(const struct net_iov *niov) -{ - return net_iov_owner(niov)->binding->id; -} - static inline void net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding) { @@ -123,6 +115,8 @@ struct net_iov * net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding); void net_devmem_free_dmabuf(struct net_iov *ppiov); +bool net_is_devmem_iov(struct net_iov *niov); + #else struct net_devmem_dmabuf_binding; @@ -152,10 +146,6 @@ net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, return -EOPNOTSUPP; } -static inline void dev_dmabuf_uninstall(struct net_device *dev) -{ -} - static inline struct net_iov * net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) { @@ -171,10 +161,15 @@ static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov) return 0; } -static inline u32 net_iov_binding_id(const struct net_iov *niov) +static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov) { return 0; } + +static inline bool net_is_devmem_iov(struct net_iov *niov) +{ + return false; +} #endif #endif /* _NET_DEVMEM_H */ diff --git a/net/core/dst.c b/net/core/dst.c index 9552a90d4772d..c99b95cf9cbb1 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -286,7 +286,8 @@ struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type, { struct metadata_dst *md_dst; - md_dst = kmalloc(sizeof(*md_dst) + optslen, flags); + md_dst = kmalloc(struct_size(md_dst, u.tun_info.options, optslen), + flags); if (!md_dst) return NULL; @@ -314,7 +315,8 @@ metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags) int cpu; struct metadata_dst __percpu *md_dst; - md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen, + md_dst = __alloc_percpu_gfp(struct_size(md_dst, u.tun_info.options, + optslen), __alignof__(struct metadata_dst), flags); if (!md_dst) return NULL; diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 94a7872ab2318..4bc64d912a1c0 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -373,7 +373,8 @@ static int call_fib_rule_notifiers(struct net *net, .rule = rule, }; - ASSERT_RTNL(); + ASSERT_RTNL_NET(net); + /* Paired with READ_ONCE() in fib_rules_seq() */ WRITE_ONCE(ops->fib_rules_seq, ops->fib_rules_seq + 1); return call_fib_notifiers(net, event_type, &info.info); @@ -461,9 +462,6 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops, if (rule->tun_id && r->tun_id != rule->tun_id) continue; - if (r->fr_net != rule->fr_net) - continue; - if (rule->l3mdev && r->l3mdev != rule->l3mdev) continue; @@ -483,11 +481,17 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops, &rule->sport_range)) continue; + if (rule->sport_mask && r->sport_mask != rule->sport_mask) + continue; + if (fib_rule_port_range_set(&rule->dport_range) && !fib_rule_port_range_compare(&r->dport_range, &rule->dport_range)) continue; + if (rule->dport_mask && r->dport_mask != rule->dport_mask) + continue; + if (!ops->compare(r, frh, tb)) continue; return r; @@ -517,14 +521,40 @@ static int fib_nl2rule_l3mdev(struct nlattr *nla, struct fib_rule *nlrule, } #endif -static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh, +static int fib_nl2rule_port_mask(const struct nlattr *mask_attr, + const struct fib_rule_port_range *range, + u16 *port_mask, + struct netlink_ext_ack *extack) +{ + if (!fib_rule_port_range_valid(range)) { + NL_SET_ERR_MSG_ATTR(extack, mask_attr, + "Cannot specify port mask without port value"); + return -EINVAL; + } + + if (fib_rule_port_is_range(range)) { + NL_SET_ERR_MSG_ATTR(extack, mask_attr, + "Cannot specify port mask for port range"); + return -EINVAL; + } + + if (range->start & ~nla_get_u16(mask_attr)) { + NL_SET_ERR_MSG_ATTR(extack, mask_attr, "Invalid port mask"); + return -EINVAL; + } + + *port_mask = nla_get_u16(mask_attr); + + return 0; +} + +static int fib_nl2rule(struct net *net, struct nlmsghdr *nlh, struct netlink_ext_ack *extack, struct fib_rules_ops *ops, struct nlattr *tb[], struct fib_rule **rule, bool *user_priority) { - struct net *net = sock_net(skb->sk); struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rule *nlrule = NULL; int err = -EINVAL; @@ -556,30 +586,18 @@ static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[FRA_PRIORITY]) { nlrule->pref = nla_get_u32(tb[FRA_PRIORITY]); *user_priority = true; - } else { - nlrule->pref = fib_default_rule_pref(ops); } nlrule->proto = nla_get_u8_default(tb[FRA_PROTOCOL], RTPROT_UNSPEC); if (tb[FRA_IIFNAME]) { - struct net_device *dev; - nlrule->iifindex = -1; nla_strscpy(nlrule->iifname, tb[FRA_IIFNAME], IFNAMSIZ); - dev = __dev_get_by_name(net, nlrule->iifname); - if (dev) - nlrule->iifindex = dev->ifindex; } if (tb[FRA_OIFNAME]) { - struct net_device *dev; - nlrule->oifindex = -1; nla_strscpy(nlrule->oifname, tb[FRA_OIFNAME], IFNAMSIZ); - dev = __dev_get_by_name(net, nlrule->oifname); - if (dev) - nlrule->oifindex = dev->ifindex; } if (tb[FRA_FWMARK]) { @@ -621,11 +639,6 @@ static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh, } nlrule->target = nla_get_u32(tb[FRA_GOTO]); - /* Backward jumps are prohibited to avoid endless loops */ - if (nlrule->target <= nlrule->pref) { - NL_SET_ERR_MSG(extack, "Backward goto not supported"); - goto errout_free; - } } else if (nlrule->action == FR_ACT_GOTO) { NL_SET_ERR_MSG(extack, "Missing goto target for action goto"); goto errout_free; @@ -664,6 +677,16 @@ static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh, NL_SET_ERR_MSG(extack, "Invalid sport range"); goto errout_free; } + if (!fib_rule_port_is_range(&nlrule->sport_range)) + nlrule->sport_mask = U16_MAX; + } + + if (tb[FRA_SPORT_MASK]) { + err = fib_nl2rule_port_mask(tb[FRA_SPORT_MASK], + &nlrule->sport_range, + &nlrule->sport_mask, extack); + if (err) + goto errout_free; } if (tb[FRA_DPORT_RANGE]) { @@ -673,6 +696,16 @@ static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh, NL_SET_ERR_MSG(extack, "Invalid dport range"); goto errout_free; } + if (!fib_rule_port_is_range(&nlrule->dport_range)) + nlrule->dport_mask = U16_MAX; + } + + if (tb[FRA_DPORT_MASK]) { + err = fib_nl2rule_port_mask(tb[FRA_DPORT_MASK], + &nlrule->dport_range, + &nlrule->dport_mask, extack); + if (err) + goto errout_free; } *rule = nlrule; @@ -685,6 +718,39 @@ static int fib_nl2rule(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } +static int fib_nl2rule_rtnl(struct fib_rule *nlrule, + struct fib_rules_ops *ops, + struct nlattr *tb[], + struct netlink_ext_ack *extack) +{ + if (!tb[FRA_PRIORITY]) + nlrule->pref = fib_default_rule_pref(ops); + + /* Backward jumps are prohibited to avoid endless loops */ + if (tb[FRA_GOTO] && nlrule->target <= nlrule->pref) { + NL_SET_ERR_MSG(extack, "Backward goto not supported"); + return -EINVAL; + } + + if (tb[FRA_IIFNAME]) { + struct net_device *dev; + + dev = __dev_get_by_name(nlrule->fr_net, nlrule->iifname); + if (dev) + nlrule->iifindex = dev->ifindex; + } + + if (tb[FRA_OIFNAME]) { + struct net_device *dev; + + dev = __dev_get_by_name(nlrule->fr_net, nlrule->oifname); + if (dev) + nlrule->oifindex = dev->ifindex; + } + + return 0; +} + static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, struct nlattr **tb, struct fib_rule *rule) { @@ -721,9 +787,6 @@ static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, if (r->tun_id != rule->tun_id) continue; - if (r->fr_net != rule->fr_net) - continue; - if (r->l3mdev != rule->l3mdev) continue; @@ -741,10 +804,16 @@ static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh, &rule->sport_range)) continue; + if (r->sport_mask != rule->sport_mask) + continue; + if (!fib_rule_port_range_compare(&r->dport_range, &rule->dport_range)) continue; + if (r->dport_mask != rule->dport_mask) + continue; + if (!ops->compare(r, frh, tb)) continue; return 1; @@ -774,17 +843,19 @@ static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = { [FRA_DSCP] = NLA_POLICY_MAX(NLA_U8, INET_DSCP_MASK >> 2), [FRA_FLOWLABEL] = { .type = NLA_BE32 }, [FRA_FLOWLABEL_MASK] = { .type = NLA_BE32 }, + [FRA_SPORT_MASK] = { .type = NLA_U16 }, + [FRA_DPORT_MASK] = { .type = NLA_U16 }, + [FRA_DSCP_MASK] = NLA_POLICY_MASK(NLA_U8, INET_DSCP_MASK >> 2), }; -int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack) +int fib_newrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack, bool rtnl_held) { - struct net *net = sock_net(skb->sk); + struct fib_rule *rule = NULL, *r, *last = NULL; struct fib_rule_hdr *frh = nlmsg_data(nlh); + int err = -EINVAL, unresolved = 0; struct fib_rules_ops *ops = NULL; - struct fib_rule *rule = NULL, *r, *last = NULL; struct nlattr *tb[FRA_MAX + 1]; - int err = -EINVAL, unresolved = 0; bool user_priority = false; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { @@ -806,10 +877,17 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, goto errout; } - err = fib_nl2rule(skb, nlh, extack, ops, tb, &rule, &user_priority); + err = fib_nl2rule(net, nlh, extack, ops, tb, &rule, &user_priority); if (err) goto errout; + if (!rtnl_held) + rtnl_net_lock(net); + + err = fib_nl2rule_rtnl(rule, ops, tb, extack); + if (err) + goto errout_free; + if ((nlh->nlmsg_flags & NLM_F_EXCL) && rule_exists(ops, frh, tb, rule)) { err = -EEXIST; @@ -871,29 +949,42 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, if (rule->tun_id) ip_tunnel_need_metadata(); + fib_rule_get(rule); + + if (!rtnl_held) + rtnl_net_unlock(net); + notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid); + fib_rule_put(rule); flush_route_cache(ops); rules_ops_put(ops); return 0; errout_free: + if (!rtnl_held) + rtnl_net_unlock(net); kfree(rule); errout: rules_ops_put(ops); return err; } -EXPORT_SYMBOL_GPL(fib_nl_newrule); +EXPORT_SYMBOL_GPL(fib_newrule); -int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, - struct netlink_ext_ack *extack) +static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) { - struct net *net = sock_net(skb->sk); + return fib_newrule(sock_net(skb->sk), skb, nlh, extack, false); +} + +int fib_delrule(struct net *net, struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack, bool rtnl_held) +{ + struct fib_rule *rule = NULL, *nlrule = NULL; struct fib_rule_hdr *frh = nlmsg_data(nlh); struct fib_rules_ops *ops = NULL; - struct fib_rule *rule = NULL, *r, *nlrule = NULL; struct nlattr *tb[FRA_MAX+1]; - int err = -EINVAL; bool user_priority = false; + int err = -EINVAL; if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh))) { NL_SET_ERR_MSG(extack, "Invalid msg length"); @@ -914,25 +1005,32 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, goto errout; } - err = fib_nl2rule(skb, nlh, extack, ops, tb, &nlrule, &user_priority); + err = fib_nl2rule(net, nlh, extack, ops, tb, &nlrule, &user_priority); if (err) goto errout; + if (!rtnl_held) + rtnl_net_lock(net); + + err = fib_nl2rule_rtnl(nlrule, ops, tb, extack); + if (err) + goto errout_free; + rule = rule_find(ops, frh, tb, nlrule, user_priority); if (!rule) { err = -ENOENT; - goto errout; + goto errout_free; } if (rule->flags & FIB_RULE_PERMANENT) { err = -EPERM; - goto errout; + goto errout_free; } if (ops->delete) { err = ops->delete(rule); if (err) - goto errout; + goto errout_free; } if (rule->tun_id) @@ -954,7 +1052,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, * current if it is goto rule, have actually been added. */ if (ops->nr_goto_rules > 0) { - struct fib_rule *n; + struct fib_rule *n, *r; n = list_next_entry(rule, list); if (&n->list == &ops->rules_list || n->pref != rule->pref) @@ -968,22 +1066,33 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, } } - call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops, - NULL); - notify_rule_change(RTM_DELRULE, rule, ops, nlh, - NETLINK_CB(skb).portid); + call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops, NULL); + + if (!rtnl_held) + rtnl_net_unlock(net); + + notify_rule_change(RTM_DELRULE, rule, ops, nlh, NETLINK_CB(skb).portid); fib_rule_put(rule); flush_route_cache(ops); rules_ops_put(ops); kfree(nlrule); return 0; -errout: +errout_free: + if (!rtnl_held) + rtnl_net_unlock(net); kfree(nlrule); +errout: rules_ops_put(ops); return err; } -EXPORT_SYMBOL_GPL(fib_nl_delrule); +EXPORT_SYMBOL_GPL(fib_delrule); + +static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + return fib_delrule(sock_net(skb->sk), skb, nlh, extack, false); +} static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, struct fib_rule *rule) @@ -1002,7 +1111,9 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops, + nla_total_size(1) /* FRA_PROTOCOL */ + nla_total_size(1) /* FRA_IP_PROTO */ + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_SPORT_RANGE */ - + nla_total_size(sizeof(struct fib_rule_port_range)); /* FRA_DPORT_RANGE */ + + nla_total_size(sizeof(struct fib_rule_port_range)) /* FRA_DPORT_RANGE */ + + nla_total_size(2) /* FRA_SPORT_MASK */ + + nla_total_size(2); /* FRA_DPORT_MASK */ if (ops->nlmsg_payload) payload += ops->nlmsg_payload(rule); @@ -1070,8 +1181,12 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule, nla_put_uid_range(skb, &rule->uid_range)) || (fib_rule_port_range_set(&rule->sport_range) && nla_put_port_range(skb, FRA_SPORT_RANGE, &rule->sport_range)) || + (rule->sport_mask && nla_put_u16(skb, FRA_SPORT_MASK, + rule->sport_mask)) || (fib_rule_port_range_set(&rule->dport_range) && nla_put_port_range(skb, FRA_DPORT_RANGE, &rule->dport_range)) || + (rule->dport_mask && nla_put_u16(skb, FRA_DPORT_MASK, + rule->dport_mask)) || (rule->ip_proto && nla_put_u8(skb, FRA_IP_PROTO, rule->ip_proto))) goto nla_put_failure; @@ -1295,8 +1410,10 @@ static struct pernet_operations fib_rules_net_ops = { }; static const struct rtnl_msg_handler fib_rules_rtnl_msg_handlers[] __initconst = { - {.msgtype = RTM_NEWRULE, .doit = fib_nl_newrule}, - {.msgtype = RTM_DELRULE, .doit = fib_nl_delrule}, + {.msgtype = RTM_NEWRULE, .doit = fib_nl_newrule, + .flags = RTNL_FLAG_DOIT_PERNET}, + {.msgtype = RTM_DELRULE, .doit = fib_nl_delrule, + .flags = RTNL_FLAG_DOIT_PERNET}, {.msgtype = RTM_GETRULE, .dumpit = fib_nl_dumprule, .flags = RTNL_FLAG_DUMP_UNLOCKED}, }; diff --git a/net/core/filter.c b/net/core/filter.c index dcc53ac5c5458..bc6828761a47c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5222,6 +5222,25 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = { .arg1_type = ARG_PTR_TO_CTX, }; +static int sk_bpf_set_get_cb_flags(struct sock *sk, char *optval, bool getopt) +{ + u32 sk_bpf_cb_flags; + + if (getopt) { + *(u32 *)optval = sk->sk_bpf_cb_flags; + return 0; + } + + sk_bpf_cb_flags = *(u32 *)optval; + + if (sk_bpf_cb_flags & ~SK_BPF_CB_MASK) + return -EINVAL; + + sk->sk_bpf_cb_flags = sk_bpf_cb_flags; + + return 0; +} + static int sol_socket_sockopt(struct sock *sk, int optname, char *optval, int *optlen, bool getopt) @@ -5238,6 +5257,7 @@ static int sol_socket_sockopt(struct sock *sk, int optname, case SO_MAX_PACING_RATE: case SO_BINDTOIFINDEX: case SO_TXREHASH: + case SK_BPF_CB_FLAGS: if (*optlen != sizeof(int)) return -EINVAL; break; @@ -5247,6 +5267,9 @@ static int sol_socket_sockopt(struct sock *sk, int optname, return -EINVAL; } + if (optname == SK_BPF_CB_FLAGS) + return sk_bpf_set_get_cb_flags(sk, optval, getopt); + if (getopt) { if (optname == SO_BINDTODEVICE) return -EINVAL; @@ -5259,6 +5282,38 @@ static int sol_socket_sockopt(struct sock *sk, int optname, KERNEL_SOCKPTR(optval), *optlen); } +static int bpf_sol_tcp_getsockopt(struct sock *sk, int optname, + char *optval, int optlen) +{ + if (optlen != sizeof(int)) + return -EINVAL; + + switch (optname) { + case TCP_BPF_SOCK_OPS_CB_FLAGS: { + int cb_flags = tcp_sk(sk)->bpf_sock_ops_cb_flags; + + memcpy(optval, &cb_flags, optlen); + break; + } + case TCP_BPF_RTO_MIN: { + int rto_min_us = jiffies_to_usecs(inet_csk(sk)->icsk_rto_min); + + memcpy(optval, &rto_min_us, optlen); + break; + } + case TCP_BPF_DELACK_MAX: { + int delack_max_us = jiffies_to_usecs(inet_csk(sk)->icsk_delack_max); + + memcpy(optval, &delack_max_us, optlen); + break; + } + default: + return -EINVAL; + } + + return 0; +} + static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname, char *optval, int optlen) { @@ -5382,6 +5437,7 @@ static int sol_tcp_sockopt(struct sock *sk, int optname, case TCP_USER_TIMEOUT: case TCP_NOTSENT_LOWAT: case TCP_SAVE_SYN: + case TCP_RTO_MAX_MS: if (*optlen != sizeof(int)) return -EINVAL; break; @@ -5391,20 +5447,9 @@ static int sol_tcp_sockopt(struct sock *sk, int optname, if (*optlen < 1) return -EINVAL; break; - case TCP_BPF_SOCK_OPS_CB_FLAGS: - if (*optlen != sizeof(int)) - return -EINVAL; - if (getopt) { - struct tcp_sock *tp = tcp_sk(sk); - int cb_flags = tp->bpf_sock_ops_cb_flags; - - memcpy(optval, &cb_flags, *optlen); - return 0; - } - return bpf_sol_tcp_setsockopt(sk, optname, optval, *optlen); default: if (getopt) - return -EINVAL; + return bpf_sol_tcp_getsockopt(sk, optname, optval, *optlen); return bpf_sol_tcp_setsockopt(sk, optname, optval, *optlen); } @@ -5500,6 +5545,11 @@ static int __bpf_setsockopt(struct sock *sk, int level, int optname, return -EINVAL; } +static bool is_locked_tcp_sock_ops(struct bpf_sock_ops_kern *bpf_sock) +{ + return bpf_sock->op <= BPF_SOCK_OPS_WRITE_HDR_OPT_CB; +} + static int _bpf_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen) { @@ -5650,6 +5700,9 @@ static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = { BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, int, level, int, optname, char *, optval, int, optlen) { + if (!is_locked_tcp_sock_ops(bpf_sock)) + return -EOPNOTSUPP; + return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen); } @@ -5735,6 +5788,9 @@ static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock, BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, int, level, int, optname, char *, optval, int, optlen) { + if (!is_locked_tcp_sock_ops(bpf_sock)) + return -EOPNOTSUPP; + if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP && optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) { int ret, copy_len = 0; @@ -5777,6 +5833,9 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock, struct sock *sk = bpf_sock->sk; int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; + if (!is_locked_tcp_sock_ops(bpf_sock)) + return -EOPNOTSUPP; + if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk)) return -EINVAL; @@ -7586,6 +7645,9 @@ BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock, u8 search_kind, search_len, copy_len, magic_len; int ret; + if (!is_locked_tcp_sock_ops(bpf_sock)) + return -EOPNOTSUPP; + /* 2 byte is the minimal option len except TCPOPT_NOP and * TCPOPT_EOL which are useless for the bpf prog to learn * and this helper disallow loading them also. @@ -10360,10 +10422,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, } \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, \ - is_fullsock), \ + is_locked_tcp_sock), \ fullsock_reg, si->src_reg, \ offsetof(struct bpf_sock_ops_kern, \ - is_fullsock)); \ + is_locked_tcp_sock)); \ *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \ if (si->dst_reg == si->src_reg) \ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ @@ -10448,10 +10510,10 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, temp)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, \ - is_fullsock), \ + is_locked_tcp_sock), \ reg, si->dst_reg, \ offsetof(struct bpf_sock_ops_kern, \ - is_fullsock)); \ + is_locked_tcp_sock)); \ *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, sk),\ @@ -12064,6 +12126,25 @@ __bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct __sk_buff *s, struct sock *sk, #endif } +__bpf_kfunc int bpf_sock_ops_enable_tx_tstamp(struct bpf_sock_ops_kern *skops, + u64 flags) +{ + struct sk_buff *skb; + + if (skops->op != BPF_SOCK_OPS_TSTAMP_SENDMSG_CB) + return -EOPNOTSUPP; + + if (flags) + return -EINVAL; + + skb = skops->skb; + skb_shinfo(skb)->tx_flags |= SKBTX_BPF; + TCP_SKB_CB(skb)->txstamp_ack |= TSTAMP_ACK_BPF; + skb_shinfo(skb)->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; + + return 0; +} + __bpf_kfunc_end_defs(); int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, @@ -12097,6 +12178,10 @@ BTF_KFUNCS_START(bpf_kfunc_check_set_tcp_reqsk) BTF_ID_FLAGS(func, bpf_sk_assign_tcp_reqsk, KF_TRUSTED_ARGS) BTF_KFUNCS_END(bpf_kfunc_check_set_tcp_reqsk) +BTF_KFUNCS_START(bpf_kfunc_check_set_sock_ops) +BTF_ID_FLAGS(func, bpf_sock_ops_enable_tx_tstamp, KF_TRUSTED_ARGS) +BTF_KFUNCS_END(bpf_kfunc_check_set_sock_ops) + static const struct btf_kfunc_id_set bpf_kfunc_set_skb = { .owner = THIS_MODULE, .set = &bpf_kfunc_check_set_skb, @@ -12117,6 +12202,11 @@ static const struct btf_kfunc_id_set bpf_kfunc_set_tcp_reqsk = { .set = &bpf_kfunc_check_set_tcp_reqsk, }; +static const struct btf_kfunc_id_set bpf_kfunc_set_sock_ops = { + .owner = THIS_MODULE, + .set = &bpf_kfunc_check_set_sock_ops, +}; + static int __init bpf_kfunc_init(void) { int ret; @@ -12135,7 +12225,8 @@ static int __init bpf_kfunc_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &bpf_kfunc_set_xdp); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, &bpf_kfunc_set_sock_addr); - return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_kfunc_set_tcp_reqsk); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_kfunc_set_tcp_reqsk); + return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SOCK_OPS, &bpf_kfunc_set_sock_ops); } late_initcall(bpf_kfunc_init); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 9cd8de6bebb54..1b61bb25ba0e5 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -106,7 +106,7 @@ int flow_dissector_bpf_prog_attach_check(struct net *net, #endif /* CONFIG_BPF_SYSCALL */ /** - * __skb_flow_get_ports - extract the upper layer ports and return them + * skb_flow_get_ports - extract the upper layer ports and return them * @skb: sk_buff to extract the ports from * @thoff: transport header offset * @ip_proto: protocol for which to get port offset @@ -116,8 +116,8 @@ int flow_dissector_bpf_prog_attach_check(struct net *net, * The function will try to retrieve the ports at offset thoff + poff where poff * is the protocol port offset returned from proto_ports_offset */ -__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, - const void *data, int hlen) +__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, + const void *data, int hlen) { int poff = proto_ports_offset(ip_proto); @@ -137,7 +137,7 @@ __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, return 0; } -EXPORT_SYMBOL(__skb_flow_get_ports); +EXPORT_SYMBOL(skb_flow_get_ports); static bool icmp_has_id(u8 type) { @@ -870,7 +870,7 @@ __skb_flow_dissect_ports(const struct sk_buff *skb, if (!key_ports && !key_ports_range) return; - ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen); + ports = skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen); if (key_ports) key_ports->ports = ports; diff --git a/net/core/gro.c b/net/core/gro.c index 78b320b631744..b350e5b69549d 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -250,8 +250,7 @@ int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) return 0; } - -static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) +static void gro_complete(struct gro_node *gro, struct sk_buff *skb) { struct list_head *head = &net_hotdata.offload_base; struct packet_offload *ptype; @@ -284,43 +283,43 @@ static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) } out: - gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); + gro_normal_one(gro, skb, NAPI_GRO_CB(skb)->count); } -static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, - bool flush_old) +static void __gro_flush_chain(struct gro_node *gro, u32 index, bool flush_old) { - struct list_head *head = &napi->gro_hash[index].list; + struct list_head *head = &gro->hash[index].list; struct sk_buff *skb, *p; list_for_each_entry_safe_reverse(skb, p, head, list) { if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) return; skb_list_del_init(skb); - napi_gro_complete(napi, skb); - napi->gro_hash[index].count--; + gro_complete(gro, skb); + gro->hash[index].count--; } - if (!napi->gro_hash[index].count) - __clear_bit(index, &napi->gro_bitmask); + if (!gro->hash[index].count) + __clear_bit(index, &gro->bitmask); } -/* napi->gro_hash[].list contains packets ordered by age. +/* + * gro->hash[].list contains packets ordered by age. * youngest packets at the head of it. * Complete skbs in reverse order to reduce latencies. */ -void napi_gro_flush(struct napi_struct *napi, bool flush_old) +void __gro_flush(struct gro_node *gro, bool flush_old) { - unsigned long bitmask = napi->gro_bitmask; + unsigned long bitmask = gro->bitmask; unsigned int i, base = ~0U; while ((i = ffs(bitmask)) != 0) { bitmask >>= i; base += i; - __napi_gro_flush_chain(napi, base, flush_old); + __gro_flush_chain(gro, base, flush_old); } } -EXPORT_SYMBOL(napi_gro_flush); +EXPORT_SYMBOL(__gro_flush); static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb, const struct sk_buff *p, @@ -439,7 +438,7 @@ static void gro_try_pull_from_frag0(struct sk_buff *skb) gro_pull_from_frag0(skb, grow); } -static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) +static void gro_flush_oldest(struct gro_node *gro, struct list_head *head) { struct sk_buff *oldest; @@ -455,14 +454,15 @@ static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) * SKB to the chain. */ skb_list_del_init(oldest); - napi_gro_complete(napi, oldest); + gro_complete(gro, oldest); } -static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) +static enum gro_result dev_gro_receive(struct gro_node *gro, + struct sk_buff *skb) { u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); - struct gro_list *gro_list = &napi->gro_hash[bucket]; struct list_head *head = &net_hotdata.offload_base; + struct gro_list *gro_list = &gro->hash[bucket]; struct packet_offload *ptype; __be16 type = skb->protocol; struct sk_buff *pp = NULL; @@ -526,7 +526,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff if (pp) { skb_list_del_init(pp); - napi_gro_complete(napi, pp); + gro_complete(gro, pp); gro_list->count--; } @@ -537,7 +537,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff goto normal; if (unlikely(gro_list->count >= MAX_GRO_SKBS)) - gro_flush_oldest(napi, &gro_list->list); + gro_flush_oldest(gro, &gro_list->list); else gro_list->count++; @@ -551,10 +551,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff ret = GRO_HELD; ok: if (gro_list->count) { - if (!test_bit(bucket, &napi->gro_bitmask)) - __set_bit(bucket, &napi->gro_bitmask); - } else if (test_bit(bucket, &napi->gro_bitmask)) { - __clear_bit(bucket, &napi->gro_bitmask); + if (!test_bit(bucket, &gro->bitmask)) + __set_bit(bucket, &gro->bitmask); + } else if (test_bit(bucket, &gro->bitmask)) { + __clear_bit(bucket, &gro->bitmask); } return ret; @@ -593,13 +593,12 @@ struct packet_offload *gro_find_complete_by_type(__be16 type) } EXPORT_SYMBOL(gro_find_complete_by_type); -static gro_result_t napi_skb_finish(struct napi_struct *napi, - struct sk_buff *skb, - gro_result_t ret) +static gro_result_t gro_skb_finish(struct gro_node *gro, struct sk_buff *skb, + gro_result_t ret) { switch (ret) { case GRO_NORMAL: - gro_normal_one(napi, skb, 1); + gro_normal_one(gro, skb, 1); break; case GRO_MERGED_FREE: @@ -620,21 +619,21 @@ static gro_result_t napi_skb_finish(struct napi_struct *napi, return ret; } -gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) +gro_result_t gro_receive_skb(struct gro_node *gro, struct sk_buff *skb) { gro_result_t ret; - skb_mark_napi_id(skb, napi); + __skb_mark_napi_id(skb, gro); trace_napi_gro_receive_entry(skb); skb_gro_reset_offset(skb, 0); - ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); + ret = gro_skb_finish(gro, skb, dev_gro_receive(gro, skb)); trace_napi_gro_receive_exit(ret); return ret; } -EXPORT_SYMBOL(napi_gro_receive); +EXPORT_SYMBOL(gro_receive_skb); static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) { @@ -653,6 +652,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) skb->pkt_type = PACKET_HOST; skb->encapsulation = 0; + skb->ip_summed = CHECKSUM_NONE; skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_size = 0; if (unlikely(skb->slow_gro)) { @@ -690,7 +690,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi, __skb_push(skb, ETH_HLEN); skb->protocol = eth_type_trans(skb, skb->dev); if (ret == GRO_NORMAL) - gro_normal_one(napi, skb, 1); + gro_normal_one(&napi->gro, skb, 1); break; case GRO_MERGED_FREE: @@ -759,7 +759,7 @@ gro_result_t napi_gro_frags(struct napi_struct *napi) trace_napi_gro_frags_entry(skb); - ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); + ret = napi_frags_finish(napi, skb, dev_gro_receive(&napi->gro, skb)); trace_napi_gro_frags_exit(ret); return ret; @@ -791,3 +791,37 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) return sum; } EXPORT_SYMBOL(__skb_gro_checksum_complete); + +void gro_init(struct gro_node *gro) +{ + for (u32 i = 0; i < GRO_HASH_BUCKETS; i++) { + INIT_LIST_HEAD(&gro->hash[i].list); + gro->hash[i].count = 0; + } + + gro->bitmask = 0; + gro->cached_napi_id = 0; + + INIT_LIST_HEAD(&gro->rx_list); + gro->rx_count = 0; +} + +void gro_cleanup(struct gro_node *gro) +{ + struct sk_buff *skb, *n; + + for (u32 i = 0; i < GRO_HASH_BUCKETS; i++) { + list_for_each_entry_safe(skb, n, &gro->hash[i].list, list) + kfree_skb(skb); + + gro->hash[i].count = 0; + } + + gro->bitmask = 0; + gro->cached_napi_id = 0; + + list_for_each_entry_safe(skb, n, &gro->rx_list, list) + kfree_skb(skb); + + gro->rx_count = 0; +} diff --git a/net/core/hotdata.c b/net/core/hotdata.c index d0aaaaa556f22..0bc893d5f07b0 100644 --- a/net/core/hotdata.c +++ b/net/core/hotdata.c @@ -7,7 +7,6 @@ struct net_hotdata net_hotdata __cacheline_aligned = { .offload_base = LIST_HEAD_INIT(net_hotdata.offload_base), - .ptype_all = LIST_HEAD_INIT(net_hotdata.ptype_all), .gro_normal_batch = 8, .netdev_budget = 300, diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index 711cd3b4347a7..e39a459540ec0 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -23,6 +23,8 @@ #include #include +#include "dev.h" + DEFINE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled); EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_enabled); @@ -147,7 +149,8 @@ int lwtunnel_build_state(struct net *net, u16 encap_type, } EXPORT_SYMBOL_GPL(lwtunnel_build_state); -int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack) +int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack, + bool rtnl_is_held) { const struct lwtunnel_encap_ops *ops; int ret = -EINVAL; @@ -158,21 +161,19 @@ int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack) return ret; } - rcu_read_lock(); - ops = rcu_dereference(lwtun_encaps[encap_type]); - rcu_read_unlock(); + ops = rcu_access_pointer(lwtun_encaps[encap_type]); #ifdef CONFIG_MODULES if (!ops) { const char *encap_type_str = lwtunnel_encap_str(encap_type); if (encap_type_str) { - __rtnl_unlock(); + if (rtnl_is_held) + __rtnl_unlock(); request_module("rtnl-lwt-%s", encap_type_str); - rtnl_lock(); + if (rtnl_is_held) + rtnl_lock(); - rcu_read_lock(); - ops = rcu_dereference(lwtun_encaps[encap_type]); - rcu_read_unlock(); + ops = rcu_access_pointer(lwtun_encaps[encap_type]); } } #endif @@ -185,7 +186,8 @@ int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack) EXPORT_SYMBOL_GPL(lwtunnel_valid_encap_type); int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, + bool rtnl_is_held) { struct rtnexthop *rtnh = (struct rtnexthop *)attr; struct nlattr *nla_entype; @@ -207,7 +209,8 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining, encap_type = nla_get_u16(nla_entype); if (lwtunnel_valid_encap_type(encap_type, - extack) != 0) + extack, + rtnl_is_held) != 0) return -EOPNOTSUPP; } } @@ -325,13 +328,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_cmp_encap); int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; - int ret = -EINVAL; + struct dst_entry *dst; + int ret; - if (!dst) + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); + ret = -ENETDOWN; goto drop; + } + + dst = skb_dst(skb); + if (!dst) { + ret = -EINVAL; + goto drop; + } lwtstate = dst->lwtstate; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || @@ -341,8 +354,11 @@ int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); - if (likely(ops && ops->output)) + if (likely(ops && ops->output)) { + dev_xmit_recursion_inc(); ret = ops->output(net, sk, skb); + dev_xmit_recursion_dec(); + } rcu_read_unlock(); if (ret == -EOPNOTSUPP) @@ -359,13 +375,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_output); int lwtunnel_xmit(struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; - int ret = -EINVAL; + struct dst_entry *dst; + int ret; - if (!dst) + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); + ret = -ENETDOWN; goto drop; + } + + dst = skb_dst(skb); + if (!dst) { + ret = -EINVAL; + goto drop; + } lwtstate = dst->lwtstate; @@ -376,8 +402,11 @@ int lwtunnel_xmit(struct sk_buff *skb) ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); - if (likely(ops && ops->xmit)) + if (likely(ops && ops->xmit)) { + dev_xmit_recursion_inc(); ret = ops->xmit(skb); + dev_xmit_recursion_dec(); + } rcu_read_unlock(); if (ret == -EOPNOTSUPP) @@ -394,13 +423,23 @@ EXPORT_SYMBOL_GPL(lwtunnel_xmit); int lwtunnel_input(struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; - int ret = -EINVAL; + struct dst_entry *dst; + int ret; + + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); + ret = -ENETDOWN; + goto drop; + } - if (!dst) + dst = skb_dst(skb); + if (!dst) { + ret = -EINVAL; goto drop; + } lwtstate = dst->lwtstate; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || @@ -410,8 +449,11 @@ int lwtunnel_input(struct sk_buff *skb) ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); - if (likely(ops && ops->input)) + if (likely(ops && ops->input)) { + dev_xmit_recursion_inc(); ret = ops->input(skb); + dev_xmit_recursion_dec(); + } rcu_read_unlock(); if (ret == -EOPNOTSUPP) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index bd0251bd74a1f..0738aa6cca25f 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -518,7 +518,7 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) if (!ret) return NULL; - hash_heads = kvzalloc(size, GFP_ATOMIC); + hash_heads = kzalloc(size, GFP_ATOMIC); if (!hash_heads) { kfree(ret); return NULL; @@ -536,7 +536,7 @@ static void neigh_hash_free_rcu(struct rcu_head *head) struct neigh_hash_table, rcu); - kvfree(nht->hash_heads); + kfree(nht->hash_heads); kfree(nht); } @@ -832,12 +832,10 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, return -ENOENT; } -static void neigh_parms_destroy(struct neigh_parms *parms); - static inline void neigh_parms_put(struct neigh_parms *parms) { if (refcount_dec_and_test(&parms->refcnt)) - neigh_parms_destroy(parms); + kfree(parms); } /* @@ -1713,11 +1711,6 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) } EXPORT_SYMBOL(neigh_parms_release); -static void neigh_parms_destroy(struct neigh_parms *parms) -{ - kfree(parms); -} - static struct lock_class_key neigh_table_proxy_queue_class; static struct neigh_table __rcu *neigh_tables[NEIGH_NR_TABLES] __read_mostly; @@ -2250,6 +2243,7 @@ static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = { static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { [NDTPA_IFINDEX] = { .type = NLA_U32 }, [NDTPA_QUEUE_LEN] = { .type = NLA_U32 }, + [NDTPA_QUEUE_LENBYTES] = { .type = NLA_U32 }, [NDTPA_PROXY_QLEN] = { .type = NLA_U32 }, [NDTPA_APP_PROBES] = { .type = NLA_U32 }, [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index fa6d3969734a6..3e92bf0f9060b 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -185,7 +185,13 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos) } } - list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) { + list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) { + if (i == pos) + return pt; + ++i; + } + + list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_specific, list) { if (i == pos) return pt; ++i; @@ -210,6 +216,7 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos) static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct net *net = seq_file_net(seq); struct net_device *dev; struct packet_type *pt; struct list_head *nxt; @@ -232,15 +239,22 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) goto found; } } - - nxt = net_hotdata.ptype_all.next; - goto ptype_all; + nxt = net->ptype_all.next; + goto net_ptype_all; } - if (pt->type == htons(ETH_P_ALL)) { -ptype_all: - if (nxt != &net_hotdata.ptype_all) + if (pt->af_packet_net) { +net_ptype_all: + if (nxt != &net->ptype_all && nxt != &net->ptype_specific) goto found; + + if (nxt == &net->ptype_all) { + /* continue with ->ptype_specific if it's not empty */ + nxt = net->ptype_specific.next; + if (nxt != &net->ptype_specific) + goto found; + } + hash = 0; nxt = ptype_base[0].next; } else diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 07cb99b114bdd..0b7624236896f 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -42,6 +43,87 @@ static inline int dev_isalive(const struct net_device *dev) return READ_ONCE(dev->reg_state) <= NETREG_REGISTERED; } +/* There is a possible ABBA deadlock between rtnl_lock and kernfs_node->active, + * when unregistering a net device and accessing associated sysfs files. The + * potential deadlock is as follow: + * + * CPU 0 CPU 1 + * + * rtnl_lock vfs_read + * unregister_netdevice_many kernfs_seq_start + * device_del / kobject_put kernfs_get_active (kn->active++) + * kernfs_drain sysfs_kf_seq_show + * wait_event( rtnl_lock + * kn->active == KN_DEACTIVATED_BIAS) -> waits on CPU 0 to release + * -> waits on CPU 1 to decrease kn->active the rtnl lock. + * + * The historical fix was to use rtnl_trylock with restart_syscall to bail out + * of sysfs operations when the lock couldn't be taken. This fixed the above + * issue as it allowed CPU 1 to bail out of the ABBA situation. + * + * But it came with performances issues, as syscalls are being restarted in + * loops when there was contention on the rtnl lock, with huge slow downs in + * specific scenarios (e.g. lots of virtual interfaces created and userspace + * daemons querying their attributes). + * + * The idea below is to bail out of the active kernfs_node protection + * (kn->active) while trying to take the rtnl lock. + * + * This replaces rtnl_lock() and still has to be used with rtnl_unlock(). The + * net device is guaranteed to be alive if this returns successfully. + */ +static int sysfs_rtnl_lock(struct kobject *kobj, struct attribute *attr, + struct net_device *ndev) +{ + struct kernfs_node *kn; + int ret = 0; + + /* First, we hold a reference to the net device as the unregistration + * path might run in parallel. This will ensure the net device and the + * associated sysfs objects won't be freed while we try to take the rtnl + * lock. + */ + dev_hold(ndev); + /* sysfs_break_active_protection was introduced to allow self-removal of + * devices and their associated sysfs files by bailing out of the + * sysfs/kernfs protection. We do this here to allow the unregistration + * path to complete in parallel. The following takes a reference on the + * kobject and the kernfs_node being accessed. + * + * This works because we hold a reference onto the net device and the + * unregistration path will wait for us eventually in netdev_run_todo + * (outside an rtnl lock section). + */ + kn = sysfs_break_active_protection(kobj, attr); + /* We can now try to take the rtnl lock. This can't deadlock us as the + * unregistration path is able to drain sysfs files (kernfs_node) thanks + * to the above dance. + */ + if (rtnl_lock_interruptible()) { + ret = -ERESTARTSYS; + goto unbreak; + } + /* Check dismantle on the device hasn't started, otherwise deny the + * operation. + */ + if (!dev_isalive(ndev)) { + rtnl_unlock(); + ret = -ENODEV; + goto unbreak; + } + /* We are now sure the device dismantle hasn't started nor that it can + * start before we exit the locking section as we hold the rtnl lock. + * There's no need to keep unbreaking the sysfs protection nor to hold + * a net device reference from that point; that was only needed to take + * the rtnl lock. + */ +unbreak: + sysfs_unbreak_active_protection(kn); + dev_put(ndev); + + return ret; +} + /* use same locking rules as GIF* ioctl's */ static ssize_t netdev_show(const struct device *dev, struct device_attribute *attr, char *buf, @@ -95,14 +177,14 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, if (ret) goto err; - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); + if (ret) + goto err; + + ret = (*set)(netdev, new); + if (ret == 0) + ret = len; - if (dev_isalive(netdev)) { - ret = (*set)(netdev, new); - if (ret == 0) - ret = len; - } rtnl_unlock(); err: return ret; @@ -220,7 +302,7 @@ static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, struct net_device *netdev = to_net_dev(dev); /* The check is also done in change_carrier; this helps returning early - * without hitting the trylock/restart in netdev_store. + * without hitting the locking section in netdev_store. */ if (!netdev->netdev_ops->ndo_change_carrier) return -EOPNOTSUPP; @@ -232,11 +314,13 @@ static ssize_t carrier_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); - int ret = -EINVAL; + int ret; - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); + if (ret) + return ret; + ret = -EINVAL; if (netif_running(netdev)) { /* Synchronize carrier state with link watch, * see also rtnl_getlink(). @@ -245,8 +329,8 @@ static ssize_t carrier_show(struct device *dev, ret = sysfs_emit(buf, fmt_dec, !!netif_carrier_ok(netdev)); } - rtnl_unlock(); + rtnl_unlock(); return ret; } static DEVICE_ATTR_RW(carrier); @@ -258,14 +342,16 @@ static ssize_t speed_show(struct device *dev, int ret = -EINVAL; /* The check is also done in __ethtool_get_link_ksettings; this helps - * returning early without hitting the trylock/restart below. + * returning early without hitting the locking section below. */ if (!netdev->ethtool_ops->get_link_ksettings) return ret; - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); + if (ret) + return ret; + ret = -EINVAL; if (netif_running(netdev)) { struct ethtool_link_ksettings cmd; @@ -284,14 +370,16 @@ static ssize_t duplex_show(struct device *dev, int ret = -EINVAL; /* The check is also done in __ethtool_get_link_ksettings; this helps - * returning early without hitting the trylock/restart below. + * returning early without hitting the locking section below. */ if (!netdev->ethtool_ops->get_link_ksettings) return ret; - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); + if (ret) + return ret; + ret = -EINVAL; if (netif_running(netdev)) { struct ethtool_link_ksettings cmd; @@ -481,7 +569,7 @@ static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, struct net_device *netdev = to_net_dev(dev); struct net *net = dev_net(netdev); size_t count = len; - ssize_t ret = 0; + ssize_t ret; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; @@ -490,16 +578,15 @@ static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, if (len > 0 && buf[len - 1] == '\n') --count; - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); + if (ret) + return ret; - if (dev_isalive(netdev)) { - ret = dev_set_alias(netdev, buf, count); - if (ret < 0) - goto err; - ret = len; - netdev_state_change(netdev); - } + ret = dev_set_alias(netdev, buf, count); + if (ret < 0) + goto err; + ret = len; + netdev_state_change(netdev); err: rtnl_unlock(); @@ -511,7 +598,7 @@ static ssize_t ifalias_show(struct device *dev, { const struct net_device *netdev = to_net_dev(dev); char tmp[IFALIASZ]; - ssize_t ret = 0; + ssize_t ret; ret = dev_get_alias(netdev, tmp, sizeof(tmp)); if (ret > 0) @@ -551,24 +638,23 @@ static ssize_t phys_port_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); - ssize_t ret = -EINVAL; + struct netdev_phys_item_id ppid; + ssize_t ret; /* The check is also done in dev_get_phys_port_id; this helps returning - * early without hitting the trylock/restart below. + * early without hitting the locking section below. */ if (!netdev->netdev_ops->ndo_get_phys_port_id) return -EOPNOTSUPP; - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); + if (ret) + return ret; - if (dev_isalive(netdev)) { - struct netdev_phys_item_id ppid; + ret = dev_get_phys_port_id(netdev, &ppid); + if (!ret) + ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); - ret = dev_get_phys_port_id(netdev, &ppid); - if (!ret) - ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); - } rtnl_unlock(); return ret; @@ -579,25 +665,24 @@ static ssize_t phys_port_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); - ssize_t ret = -EINVAL; + char name[IFNAMSIZ]; + ssize_t ret; /* The checks are also done in dev_get_phys_port_name; this helps - * returning early without hitting the trylock/restart below. + * returning early without hitting the locking section below. */ if (!netdev->netdev_ops->ndo_get_phys_port_name && !netdev->devlink_port) return -EOPNOTSUPP; - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); + if (ret) + return ret; - if (dev_isalive(netdev)) { - char name[IFNAMSIZ]; + ret = dev_get_phys_port_name(netdev, name, sizeof(name)); + if (!ret) + ret = sysfs_emit(buf, "%s\n", name); - ret = dev_get_phys_port_name(netdev, name, sizeof(name)); - if (!ret) - ret = sysfs_emit(buf, "%s\n", name); - } rtnl_unlock(); return ret; @@ -608,26 +693,25 @@ static ssize_t phys_switch_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); - ssize_t ret = -EINVAL; + struct netdev_phys_item_id ppid = { }; + ssize_t ret; /* The checks are also done in dev_get_phys_port_name; this helps - * returning early without hitting the trylock/restart below. This works + * returning early without hitting the locking section below. This works * because recurse is false when calling dev_get_port_parent_id. */ if (!netdev->netdev_ops->ndo_get_port_parent_id && !netdev->devlink_port) return -EOPNOTSUPP; - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(&dev->kobj, &attr->attr, netdev); + if (ret) + return ret; - if (dev_isalive(netdev)) { - struct netdev_phys_item_id ppid = { }; + ret = dev_get_port_parent_id(netdev, &ppid, false); + if (!ret) + ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); - ret = dev_get_port_parent_id(netdev, &ppid, false); - if (!ret) - ret = sysfs_emit(buf, "%*phN\n", ppid.id_len, ppid.id); - } rtnl_unlock(); return ret; @@ -657,7 +741,7 @@ static int modify_napi_threaded(struct net_device *dev, unsigned long val) if (list_empty(&dev->napi_list)) return -EOPNOTSUPP; - if (val != 0 && val != 1) + if (val > NETDEV_NAPI_THREADED_BUSY_POLL_ENABLE) return -EOPNOTSUPP; ret = dev_set_threaded(dev, val); @@ -972,7 +1056,7 @@ static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, rcu_read_lock(); flow_table = rcu_dereference(queue->rps_flow_table); if (flow_table) - val = (unsigned long)flow_table->mask + 1; + val = 1UL << flow_table->log; rcu_read_unlock(); return sysfs_emit(buf, "%lu\n", val); @@ -1025,7 +1109,7 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, if (!table) return -ENOMEM; - table->mask = mask; + table->log = ilog2(mask) + 1; for (count = 0; count <= mask; count++) table->flows[count].cpu = RPS_NO_CPU; } else { @@ -1108,7 +1192,6 @@ static void rx_queue_get_ownership(const struct kobject *kobj, static const struct kobj_type rx_queue_ktype = { .sysfs_ops = &rx_queue_sysfs_ops, .release = rx_queue_release, - .default_groups = rx_queue_default_groups, .namespace = rx_queue_namespace, .get_ownership = rx_queue_get_ownership, }; @@ -1131,6 +1214,22 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) struct kobject *kobj = &queue->kobj; int error = 0; + /* Rx queues are cleared in rx_queue_release to allow later + * re-registration. This is triggered when their kobj refcount is + * dropped. + * + * If a queue is removed while both a read (or write) operation and a + * the re-addition of the same queue are pending (waiting on rntl_lock) + * it might happen that the re-addition will execute before the read, + * making the initial removal to never happen (queue's kobj refcount + * won't drop enough because of the pending read). In such rare case, + * return to allow the removal operation to complete. + */ + if (unlikely(kobj->state_initialized)) { + netdev_warn_once(dev, "Cannot re-add rx queues before their removal completed"); + return -EAGAIN; + } + /* Kobject_put later will trigger rx_queue_release call which * decreases dev refcount: Take that reference here */ @@ -1142,20 +1241,27 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) if (error) goto err; + queue->groups = rx_queue_default_groups; + error = sysfs_create_groups(kobj, queue->groups); + if (error) + goto err; + if (dev->sysfs_rx_queue_group) { error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); if (error) - goto err; + goto err_default_groups; } error = rx_queue_default_mask(dev, queue); if (error) - goto err; + goto err_default_groups; kobject_uevent(kobj, KOBJ_ADD); return error; +err_default_groups: + sysfs_remove_groups(kobj, queue->groups); err: kobject_put(kobj); return error; @@ -1200,12 +1306,14 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) } while (--i >= new_num) { - struct kobject *kobj = &dev->_rx[i].kobj; + struct netdev_rx_queue *queue = &dev->_rx[i]; + struct kobject *kobj = &queue->kobj; if (!refcount_read(&dev_net(dev)->ns.count)) kobj->uevent_suppress = 1; if (dev->sysfs_rx_queue_group) sysfs_remove_group(kobj, dev->sysfs_rx_queue_group); + sysfs_remove_groups(kobj, queue->groups); kobject_put(kobj); } @@ -1244,9 +1352,11 @@ static int net_rx_queue_change_owner(struct net_device *dev, int num, */ struct netdev_queue_attribute { struct attribute attr; - ssize_t (*show)(struct netdev_queue *queue, char *buf); - ssize_t (*store)(struct netdev_queue *queue, - const char *buf, size_t len); + ssize_t (*show)(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf); + ssize_t (*store)(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, const char *buf, + size_t len); }; #define to_netdev_queue_attr(_attr) \ container_of(_attr, struct netdev_queue_attribute, attr) @@ -1263,7 +1373,7 @@ static ssize_t netdev_queue_attr_show(struct kobject *kobj, if (!attribute->show) return -EIO; - return attribute->show(queue, buf); + return attribute->show(kobj, attr, queue, buf); } static ssize_t netdev_queue_attr_store(struct kobject *kobj, @@ -1277,7 +1387,7 @@ static ssize_t netdev_queue_attr_store(struct kobject *kobj, if (!attribute->store) return -EIO; - return attribute->store(queue, buf, count); + return attribute->store(kobj, attr, queue, buf, count); } static const struct sysfs_ops netdev_queue_sysfs_ops = { @@ -1285,7 +1395,8 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = { .store = netdev_queue_attr_store, }; -static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) +static ssize_t tx_timeout_show(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf) { unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout); @@ -1303,18 +1414,18 @@ static unsigned int get_netdev_queue_index(struct netdev_queue *queue) return i; } -static ssize_t traffic_class_show(struct netdev_queue *queue, - char *buf) +static ssize_t traffic_class_show(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf) { struct net_device *dev = queue->dev; - int num_tc, tc; - int index; + int num_tc, tc, index, ret; if (!netif_is_multiqueue(dev)) return -ENOENT; - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(kobj, attr, queue->dev); + if (ret) + return ret; index = get_netdev_queue_index(queue); @@ -1341,24 +1452,25 @@ static ssize_t traffic_class_show(struct netdev_queue *queue, } #ifdef CONFIG_XPS -static ssize_t tx_maxrate_show(struct netdev_queue *queue, - char *buf) +static ssize_t tx_maxrate_show(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf) { return sysfs_emit(buf, "%lu\n", queue->tx_maxrate); } -static ssize_t tx_maxrate_store(struct netdev_queue *queue, - const char *buf, size_t len) +static ssize_t tx_maxrate_store(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, const char *buf, + size_t len) { - struct net_device *dev = queue->dev; int err, index = get_netdev_queue_index(queue); + struct net_device *dev = queue->dev; u32 rate = 0; if (!capable(CAP_NET_ADMIN)) return -EPERM; /* The check is also done later; this helps returning early without - * hitting the trylock/restart below. + * hitting the locking section below. */ if (!dev->netdev_ops->ndo_set_tx_maxrate) return -EOPNOTSUPP; @@ -1367,18 +1479,23 @@ static ssize_t tx_maxrate_store(struct netdev_queue *queue, if (err < 0) return err; - if (!rtnl_trylock()) - return restart_syscall(); + err = sysfs_rtnl_lock(kobj, attr, dev); + if (err) + return err; err = -EOPNOTSUPP; + netdev_lock_ops(dev); if (dev->netdev_ops->ndo_set_tx_maxrate) err = dev->netdev_ops->ndo_set_tx_maxrate(dev, index, rate); + netdev_unlock_ops(dev); - rtnl_unlock(); if (!err) { queue->tx_maxrate = rate; + rtnl_unlock(); return len; } + + rtnl_unlock(); return err; } @@ -1422,16 +1539,17 @@ static ssize_t bql_set(const char *buf, const size_t count, return count; } -static ssize_t bql_show_hold_time(struct netdev_queue *queue, - char *buf) +static ssize_t bql_show_hold_time(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf) { struct dql *dql = &queue->dql; return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time)); } -static ssize_t bql_set_hold_time(struct netdev_queue *queue, - const char *buf, size_t len) +static ssize_t bql_set_hold_time(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, const char *buf, + size_t len) { struct dql *dql = &queue->dql; unsigned int value; @@ -1450,15 +1568,17 @@ static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init = __ATTR(hold_time, 0644, bql_show_hold_time, bql_set_hold_time); -static ssize_t bql_show_stall_thrs(struct netdev_queue *queue, char *buf) +static ssize_t bql_show_stall_thrs(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf) { struct dql *dql = &queue->dql; return sysfs_emit(buf, "%u\n", jiffies_to_msecs(dql->stall_thrs)); } -static ssize_t bql_set_stall_thrs(struct netdev_queue *queue, - const char *buf, size_t len) +static ssize_t bql_set_stall_thrs(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, const char *buf, + size_t len) { struct dql *dql = &queue->dql; unsigned int value; @@ -1484,13 +1604,15 @@ static ssize_t bql_set_stall_thrs(struct netdev_queue *queue, static struct netdev_queue_attribute bql_stall_thrs_attribute __ro_after_init = __ATTR(stall_thrs, 0644, bql_show_stall_thrs, bql_set_stall_thrs); -static ssize_t bql_show_stall_max(struct netdev_queue *queue, char *buf) +static ssize_t bql_show_stall_max(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf) { return sysfs_emit(buf, "%u\n", READ_ONCE(queue->dql.stall_max)); } -static ssize_t bql_set_stall_max(struct netdev_queue *queue, - const char *buf, size_t len) +static ssize_t bql_set_stall_max(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, const char *buf, + size_t len) { WRITE_ONCE(queue->dql.stall_max, 0); return len; @@ -1499,7 +1621,8 @@ static ssize_t bql_set_stall_max(struct netdev_queue *queue, static struct netdev_queue_attribute bql_stall_max_attribute __ro_after_init = __ATTR(stall_max, 0644, bql_show_stall_max, bql_set_stall_max); -static ssize_t bql_show_stall_cnt(struct netdev_queue *queue, char *buf) +static ssize_t bql_show_stall_cnt(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf) { struct dql *dql = &queue->dql; @@ -1509,8 +1632,8 @@ static ssize_t bql_show_stall_cnt(struct netdev_queue *queue, char *buf) static struct netdev_queue_attribute bql_stall_cnt_attribute __ro_after_init = __ATTR(stall_cnt, 0444, bql_show_stall_cnt, NULL); -static ssize_t bql_show_inflight(struct netdev_queue *queue, - char *buf) +static ssize_t bql_show_inflight(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf) { struct dql *dql = &queue->dql; @@ -1521,13 +1644,16 @@ static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = __ATTR(inflight, 0444, bql_show_inflight, NULL); #define BQL_ATTR(NAME, FIELD) \ -static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ - char *buf) \ +static ssize_t bql_show_ ## NAME(struct kobject *kobj, \ + struct attribute *attr, \ + struct netdev_queue *queue, char *buf) \ { \ return bql_show(buf, queue->dql.FIELD); \ } \ \ -static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ +static ssize_t bql_set_ ## NAME(struct kobject *kobj, \ + struct attribute *attr, \ + struct netdev_queue *queue, \ const char *buf, size_t len) \ { \ return bql_set(buf, len, &queue->dql.FIELD); \ @@ -1613,19 +1739,21 @@ static ssize_t xps_queue_show(struct net_device *dev, unsigned int index, return len < PAGE_SIZE ? len : -EINVAL; } -static ssize_t xps_cpus_show(struct netdev_queue *queue, char *buf) +static ssize_t xps_cpus_show(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf) { struct net_device *dev = queue->dev; unsigned int index; - int len, tc; + int len, tc, ret; if (!netif_is_multiqueue(dev)) return -ENOENT; index = get_netdev_queue_index(queue); - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(kobj, attr, queue->dev); + if (ret) + return ret; /* If queue belongs to subordinate dev use its map */ dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; @@ -1636,18 +1764,21 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue, char *buf) return -EINVAL; } - /* Make sure the subordinate device can't be freed */ - get_device(&dev->dev); + /* Increase the net device refcnt to make sure it won't be freed while + * xps_queue_show is running. + */ + dev_hold(dev); rtnl_unlock(); len = xps_queue_show(dev, index, tc, buf, XPS_CPUS); - put_device(&dev->dev); + dev_put(dev); return len; } -static ssize_t xps_cpus_store(struct netdev_queue *queue, - const char *buf, size_t len) +static ssize_t xps_cpus_store(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, const char *buf, + size_t len) { struct net_device *dev = queue->dev; unsigned int index; @@ -1671,9 +1802,10 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue, return err; } - if (!rtnl_trylock()) { + err = sysfs_rtnl_lock(kobj, attr, dev); + if (err) { free_cpumask_var(mask); - return restart_syscall(); + return err; } err = netif_set_xps_queue(dev, mask, index); @@ -1687,26 +1819,34 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue, static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init = __ATTR_RW(xps_cpus); -static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf) +static ssize_t xps_rxqs_show(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, char *buf) { struct net_device *dev = queue->dev; unsigned int index; - int tc; + int tc, ret; index = get_netdev_queue_index(queue); - if (!rtnl_trylock()) - return restart_syscall(); + ret = sysfs_rtnl_lock(kobj, attr, dev); + if (ret) + return ret; tc = netdev_txq_to_tc(dev, index); + + /* Increase the net device refcnt to make sure it won't be freed while + * xps_queue_show is running. + */ + dev_hold(dev); rtnl_unlock(); - if (tc < 0) - return -EINVAL; - return xps_queue_show(dev, index, tc, buf, XPS_RXQS); + ret = tc >= 0 ? xps_queue_show(dev, index, tc, buf, XPS_RXQS) : -EINVAL; + dev_put(dev); + return ret; } -static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, +static ssize_t xps_rxqs_store(struct kobject *kobj, struct attribute *attr, + struct netdev_queue *queue, const char *buf, size_t len) { struct net_device *dev = queue->dev; @@ -1730,9 +1870,10 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf, return err; } - if (!rtnl_trylock()) { + err = sysfs_rtnl_lock(kobj, attr, dev); + if (err) { bitmap_free(mask); - return restart_syscall(); + return err; } cpus_read_lock(); @@ -1792,7 +1933,6 @@ static void netdev_queue_get_ownership(const struct kobject *kobj, static const struct kobj_type netdev_queue_ktype = { .sysfs_ops = &netdev_queue_sysfs_ops, .release = netdev_queue_release, - .default_groups = netdev_queue_default_groups, .namespace = netdev_queue_namespace, .get_ownership = netdev_queue_get_ownership, }; @@ -1811,6 +1951,22 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) struct kobject *kobj = &queue->kobj; int error = 0; + /* Tx queues are cleared in netdev_queue_release to allow later + * re-registration. This is triggered when their kobj refcount is + * dropped. + * + * If a queue is removed while both a read (or write) operation and a + * the re-addition of the same queue are pending (waiting on rntl_lock) + * it might happen that the re-addition will execute before the read, + * making the initial removal to never happen (queue's kobj refcount + * won't drop enough because of the pending read). In such rare case, + * return to allow the removal operation to complete. + */ + if (unlikely(kobj->state_initialized)) { + netdev_warn_once(dev, "Cannot re-add tx queues before their removal completed"); + return -EAGAIN; + } + /* Kobject_put later will trigger netdev_queue_release call * which decreases dev refcount: Take that reference here */ @@ -1822,15 +1978,22 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) if (error) goto err; + queue->groups = netdev_queue_default_groups; + error = sysfs_create_groups(kobj, queue->groups); + if (error) + goto err; + if (netdev_uses_bql(dev)) { error = sysfs_create_group(kobj, &dql_group); if (error) - goto err; + goto err_default_groups; } kobject_uevent(kobj, KOBJ_ADD); return 0; +err_default_groups: + sysfs_remove_groups(kobj, queue->groups); err: kobject_put(kobj); return error; @@ -1885,6 +2048,7 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) if (netdev_uses_bql(dev)) sysfs_remove_group(&queue->kobj, &dql_group); + sysfs_remove_groups(&queue->kobj, queue->groups); kobject_put(&queue->kobj); } @@ -1984,8 +2148,10 @@ static void remove_queue_kobjects(struct net_device *dev) net_rx_queue_update_kobjects(dev, real_rx, 0); netdev_queue_update_kobjects(dev, real_tx, 0); + netdev_lock_ops(dev); dev->real_num_rx_queues = 0; dev->real_num_tx_queues = 0; + netdev_unlock_ops(dev); #ifdef CONFIG_SYSFS kset_unregister(dev->queues_kset); #endif diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 4303f2a492624..587c6672d64f3 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -317,6 +317,27 @@ static __net_init void preinit_net_sysctl(struct net *net) net->core.sysctl_optmem_max = 128 * 1024; net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED; net->core.sysctl_tstamp_allow_data = 1; + + /* + * net.core.{r,w}mem_{default,max} used to be non-namespaced. + * For backward compatibility, inherit values from the current netns + * when creating a new one, so that setting them in init_net + * affects new namespaces like it used to. This avoids causing + * surprising performance regressions for namespaced applications + * relying on tuned rmem/wmem. + */ + if (net == &init_net) { + net->core.sysctl_wmem_max = SK_WMEM_MAX; + net->core.sysctl_rmem_max = SK_RMEM_MAX; + net->core.sysctl_wmem_default = SK_WMEM_MAX; + net->core.sysctl_rmem_default = SK_RMEM_MAX; + } else { + struct net *current_net = current->nsproxy->net_ns; + net->core.sysctl_wmem_max = current_net->core.sysctl_wmem_max; + net->core.sysctl_rmem_max = current_net->core.sysctl_rmem_max; + net->core.sysctl_wmem_default = current_net->core.sysctl_wmem_default; + net->core.sysctl_rmem_default = current_net->core.sysctl_rmem_default; + } } /* init code that must occur even if setup_net() is not called. */ @@ -340,6 +361,8 @@ static __net_init void preinit_net(struct net *net, struct user_namespace *user_ lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL); #endif + INIT_LIST_HEAD(&net->ptype_all); + INIT_LIST_HEAD(&net->ptype_specific); preinit_net_sysctl(net); } @@ -1512,3 +1535,154 @@ const struct proc_ns_operations netns_operations = { .owner = netns_owner, }; #endif + +#ifdef CONFIG_DEBUG_FS +#ifdef CONFIG_NET_NS_REFCNT_TRACKER + +#include + +static struct dentry *ns_debug_dir; +static unsigned int ns_debug_net_id; + +struct ns_debug_net { + struct dentry *netdir; + struct dentry *refcnt; + struct dentry *notrefcnt; +}; + +#define MAX_NS_DEBUG_BUFSIZE (32 * PAGE_SIZE) + +static int +ns_debug_tracker_show(struct seq_file *f, void *v) +{ + struct ref_tracker_dir *tracker = f->private; + int len, bufsize = PAGE_SIZE; + char *buf; + + for (;;) { + buf = kvmalloc(bufsize, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = ref_tracker_dir_snprint(tracker, buf, bufsize); + if (len < bufsize) + break; + + kvfree(buf); + bufsize *= 2; + if (bufsize > MAX_NS_DEBUG_BUFSIZE) + return -ENOBUFS; + } + seq_write(f, buf, len); + kvfree(buf); + return 0; +} + +static int +ns_debug_ref_open(struct inode *inode, struct file *filp) +{ + int ret; + struct net *net = inode->i_private; + + ret = single_open(filp, ns_debug_tracker_show, &net->refcnt_tracker); + if (!ret) + net_passive_inc(net); + return ret; +} + +static int +ns_debug_notref_open(struct inode *inode, struct file *filp) +{ + int ret; + struct net *net = inode->i_private; + + ret = single_open(filp, ns_debug_tracker_show, &net->notrefcnt_tracker); + if (!ret) + net_passive_inc(net); + return ret; +} + +static int +ns_debug_ref_release(struct inode *inode, struct file *filp) +{ + struct net *net = inode->i_private; + + net_passive_dec(net); + return single_release(inode, filp); +} + +static const struct file_operations ns_debug_ref_fops = { + .owner = THIS_MODULE, + .open = ns_debug_ref_open, + .read = seq_read, + .llseek = seq_lseek, + .release = ns_debug_ref_release, +}; + +static const struct file_operations ns_debug_notref_fops = { + .owner = THIS_MODULE, + .open = ns_debug_notref_open, + .read = seq_read, + .llseek = seq_lseek, + .release = ns_debug_ref_release, +}; + +static int +ns_debug_init_net(struct net *net) +{ + struct ns_debug_net *dnet = net_generic(net, ns_debug_net_id); + char name[11]; /* 10 decimal digits + NULL term */ + int len; + + len = snprintf(name, sizeof(name), "%u", net->ns.inum); + if (len >= sizeof(name)) + return -EOVERFLOW; + + dnet->netdir = debugfs_create_dir(name, ns_debug_dir); + if (IS_ERR(dnet->netdir)) + return PTR_ERR(dnet->netdir); + + dnet->refcnt = debugfs_create_file("refcnt", S_IFREG | 0400, dnet->netdir, + net, &ns_debug_ref_fops); + if (IS_ERR(dnet->refcnt)) { + debugfs_remove(dnet->netdir); + return PTR_ERR(dnet->refcnt); + } + + dnet->notrefcnt = debugfs_create_file("notrefcnt", S_IFREG | 0400, dnet->netdir, + net, &ns_debug_notref_fops); + if (IS_ERR(dnet->notrefcnt)) { + debugfs_remove_recursive(dnet->netdir); + return PTR_ERR(dnet->notrefcnt); + } + + return 0; +} + +static void +ns_debug_exit_net(struct net *net) +{ + struct ns_debug_net *dnet = net_generic(net, ns_debug_net_id); + + debugfs_remove_recursive(dnet->netdir); +} + +static struct pernet_operations ns_debug_net_ops = { + .init = ns_debug_init_net, + .exit = ns_debug_exit_net, + .id = &ns_debug_net_id, + .size = sizeof(struct ns_debug_net), +}; + +static int __init ns_debug_init(void) +{ + ns_debug_dir = debugfs_create_dir("net_ns", NULL); + if (IS_ERR(ns_debug_dir)) + return PTR_ERR(ns_debug_dir); + + register_pernet_subsys(&ns_debug_net_ops); + return 0; +} +late_initcall(ns_debug_init); +#endif /* CONFIG_NET_NS_REFCNT_TRACKER */ +#endif /* CONFIG_DEBUG_FS */ diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c index 996ac6a449eba..1dbe5f19a192b 100644 --- a/net/core/netdev-genl-gen.c +++ b/net/core/netdev-genl-gen.c @@ -9,7 +9,7 @@ #include "netdev-genl-gen.h" #include -#include +#include /* Integer value ranges */ static const struct netlink_range_validation netdev_a_page_pool_id_range = { @@ -92,11 +92,12 @@ static const struct nla_policy netdev_bind_rx_nl_policy[NETDEV_A_DMABUF_FD + 1] }; /* NETDEV_CMD_NAPI_SET - do */ -static const struct nla_policy netdev_napi_set_nl_policy[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT + 1] = { +static const struct nla_policy netdev_napi_set_nl_policy[NETDEV_A_NAPI_THREADED + 1] = { [NETDEV_A_NAPI_ID] = { .type = NLA_U32, }, [NETDEV_A_NAPI_DEFER_HARD_IRQS] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_napi_defer_hard_irqs_range), [NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT] = { .type = NLA_UINT, }, [NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT] = { .type = NLA_UINT, }, + [NETDEV_A_NAPI_THREADED] = NLA_POLICY_MAX(NLA_U32, 2), }; /* Ops table for netdev */ @@ -187,7 +188,7 @@ static const struct genl_split_ops netdev_nl_ops[] = { .cmd = NETDEV_CMD_NAPI_SET, .doit = netdev_nl_napi_set_doit, .policy = netdev_napi_set_nl_policy, - .maxattr = NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, + .maxattr = NETDEV_A_NAPI_THREADED, .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO, }, }; @@ -217,7 +218,7 @@ struct genl_family netdev_nl_family __ro_after_init = { .n_split_ops = ARRAY_SIZE(netdev_nl_ops), .mcgrps = netdev_nl_mcgrps, .n_mcgrps = ARRAY_SIZE(netdev_nl_mcgrps), - .sock_priv_size = sizeof(struct list_head), + .sock_priv_size = sizeof(struct netdev_nl_sock), .sock_priv_init = __netdev_nl_sock_priv_init, .sock_priv_destroy = __netdev_nl_sock_priv_destroy, }; diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h index e09dd7539ff2b..17d39fd64c948 100644 --- a/net/core/netdev-genl-gen.h +++ b/net/core/netdev-genl-gen.h @@ -10,7 +10,7 @@ #include #include -#include +#include /* Common nested types */ extern const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1]; @@ -42,7 +42,7 @@ enum { extern struct genl_family netdev_nl_family; -void netdev_nl_sock_priv_init(struct list_head *priv); -void netdev_nl_sock_priv_destroy(struct list_head *priv); +void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv); +void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv); #endif /* _LINUX_NETDEV_GEN_H */ diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c index 715f85c6b62ea..42b449db35ac2 100644 --- a/net/core/netdev-genl.c +++ b/net/core/netdev-genl.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "dev.h" #include "devmem.h" @@ -52,6 +53,8 @@ XDP_METADATA_KFUNC_xxx xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; + if (netdev->xsk_tx_metadata_ops->tmo_request_launch_time) + xsk_features |= NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO; } if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || @@ -183,6 +186,9 @@ netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) goto nla_put_failure; + if (nla_put_u32(rsp, NETDEV_A_NAPI_THREADED, !!napi->thread)) + goto nla_put_failure; + if (napi->thread) { pid = task_pid_nr(napi->thread); if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) @@ -266,7 +272,7 @@ netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, prev_id = UINT_MAX; list_for_each_entry(napi, &netdev->napi_list, dev_list) { - if (napi->napi_id < MIN_NAPI_ID) + if (!napi_id_valid(napi->napi_id)) continue; /* Dump continuation below depends on the list being sorted */ @@ -321,8 +327,14 @@ netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info) { u64 irq_suspend_timeout = 0; u64 gro_flush_timeout = 0; + u32 threaded = 0; u32 defer = 0; + if (info->attrs[NETDEV_A_NAPI_THREADED]) { + threaded = nla_get_u32(info->attrs[NETDEV_A_NAPI_THREADED]); + napi_set_threaded(napi, threaded); + } + if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) { defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]); napi_set_defer_hard_irqs(napi, defer); @@ -364,11 +376,18 @@ int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info) return err; } +static int nla_put_napi_id(struct sk_buff *skb, const struct napi_struct *napi) +{ + if (napi && napi_id_valid(napi->napi_id)) + return nla_put_u32(skb, NETDEV_A_QUEUE_NAPI_ID, napi->napi_id); + return 0; +} + static int netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { - struct net_devmem_dmabuf_binding *binding; + struct pp_memory_provider_params *params; struct netdev_rx_queue *rxq; struct netdev_queue *txq; void *hdr; @@ -385,21 +404,30 @@ netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, switch (q_type) { case NETDEV_QUEUE_TYPE_RX: rxq = __netif_get_rx_queue(netdev, q_idx); - if (rxq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, - rxq->napi->napi_id)) + if (nla_put_napi_id(rsp, rxq->napi)) goto nla_put_failure; - binding = rxq->mp_params.mp_priv; - if (binding && - nla_put_u32(rsp, NETDEV_A_QUEUE_DMABUF, binding->id)) + params = &rxq->mp_params; + if (params->mp_ops && + params->mp_ops->nl_fill(params->mp_priv, rsp, rxq)) goto nla_put_failure; +#ifdef CONFIG_XDP_SOCKETS + if (rxq->pool) + if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) + goto nla_put_failure; +#endif break; case NETDEV_QUEUE_TYPE_TX: txq = netdev_get_tx_queue(netdev, q_idx); - if (txq->napi && nla_put_u32(rsp, NETDEV_A_QUEUE_NAPI_ID, - txq->napi->napi_id)) + if (nla_put_napi_id(rsp, txq->napi)) goto nla_put_failure; +#ifdef CONFIG_XDP_SOCKETS + if (txq->pool) + if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) + goto nla_put_failure; +#endif + break; } genlmsg_end(rsp, hdr); @@ -462,18 +490,15 @@ int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) if (!rsp) return -ENOMEM; - rtnl_lock(); - - netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); + netdev = netdev_get_by_index_lock_ops_compat(genl_info_net(info), + ifindex); if (netdev) { err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); - netdev_unlock(netdev); + netdev_unlock_ops_compat(netdev); } else { err = -ENODEV; } - rtnl_unlock(); - if (err) goto err_free_msg; @@ -522,17 +547,17 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); - rtnl_lock(); if (ifindex) { - netdev = netdev_get_by_index_lock(net, ifindex); + netdev = netdev_get_by_index_lock_ops_compat(net, ifindex); if (netdev) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); - netdev_unlock(netdev); + netdev_unlock_ops_compat(netdev); } else { err = -ENODEV; } } else { - for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { + for_each_netdev_lock_ops_compat_scoped(net, netdev, + ctx->ifindex) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); if (err < 0) break; @@ -540,7 +565,6 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) ctx->txq_idx = 0; } } - rtnl_unlock(); return err; } @@ -576,6 +600,7 @@ netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || + netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_COMPLETE, rx->csum_complete) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || @@ -809,8 +834,8 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; struct net_devmem_dmabuf_binding *binding; - struct list_head *sock_binding_list; u32 ifindex, dmabuf_fd, rxq_idx; + struct netdev_nl_sock *priv; struct net_device *netdev; struct sk_buff *rsp; struct nlattr *attr; @@ -825,10 +850,9 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); - sock_binding_list = genl_sk_priv_get(&netdev_nl_family, - NETLINK_CB(skb).sk); - if (IS_ERR(sock_binding_list)) - return PTR_ERR(sock_binding_list); + priv = genl_sk_priv_get(&netdev_nl_family, NETLINK_CB(skb).sk); + if (IS_ERR(priv)) + return PTR_ERR(priv); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) @@ -840,11 +864,18 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) goto err_genlmsg_free; } - rtnl_lock(); + mutex_lock(&priv->lock); - netdev = __dev_get_by_index(genl_info_net(info), ifindex); + netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (!netdev || !netif_device_present(netdev)) { err = -ENODEV; + goto err_unlock_sock; + } + + if (!netdev_need_ops_lock(netdev)) { + err = -EOPNOTSUPP; + NL_SET_BAD_ATTR(info->extack, + info->attrs[NETDEV_A_DEV_IFINDEX]); goto err_unlock; } @@ -889,7 +920,7 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) goto err_unbind; } - list_add(&binding->list, sock_binding_list); + list_add(&binding->list, &priv->bindings); nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); genlmsg_end(rsp, hdr); @@ -898,34 +929,41 @@ int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) if (err) goto err_unbind; - rtnl_unlock(); + netdev_unlock(netdev); + + mutex_unlock(&priv->lock); return 0; err_unbind: net_devmem_unbind_dmabuf(binding); err_unlock: - rtnl_unlock(); + netdev_unlock(netdev); +err_unlock_sock: + mutex_unlock(&priv->lock); err_genlmsg_free: nlmsg_free(rsp); return err; } -void netdev_nl_sock_priv_init(struct list_head *priv) +void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv) { - INIT_LIST_HEAD(priv); + INIT_LIST_HEAD(&priv->bindings); + mutex_init(&priv->lock); } -void netdev_nl_sock_priv_destroy(struct list_head *priv) +void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv) { struct net_devmem_dmabuf_binding *binding; struct net_devmem_dmabuf_binding *temp; - list_for_each_entry_safe(binding, temp, priv, list) { - rtnl_lock(); + mutex_lock(&priv->lock); + list_for_each_entry_safe(binding, temp, &priv->bindings, list) { + netdev_lock(binding->dev); net_devmem_unbind_dmabuf(binding); - rtnl_unlock(); + netdev_unlock(binding->dev); } + mutex_unlock(&priv->lock); } static int netdev_genl_netdevice_event(struct notifier_block *nb, diff --git a/net/core/netdev_rx_queue.c b/net/core/netdev_rx_queue.c index db82786fa0c40..3af716f77a13b 100644 --- a/net/core/netdev_rx_queue.c +++ b/net/core/netdev_rx_queue.c @@ -1,36 +1,37 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include +#include #include #include +#include #include "page_pool_priv.h" int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) { struct netdev_rx_queue *rxq = __netif_get_rx_queue(dev, rxq_idx); + const struct netdev_queue_mgmt_ops *qops = dev->queue_mgmt_ops; void *new_mem, *old_mem; int err; - if (!dev->queue_mgmt_ops || !dev->queue_mgmt_ops->ndo_queue_stop || - !dev->queue_mgmt_ops->ndo_queue_mem_free || - !dev->queue_mgmt_ops->ndo_queue_mem_alloc || - !dev->queue_mgmt_ops->ndo_queue_start) + if (!qops || !qops->ndo_queue_stop || !qops->ndo_queue_mem_free || + !qops->ndo_queue_mem_alloc || !qops->ndo_queue_start) return -EOPNOTSUPP; - ASSERT_RTNL(); + netdev_assert_locked(dev); - new_mem = kvzalloc(dev->queue_mgmt_ops->ndo_queue_mem_size, GFP_KERNEL); + new_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL); if (!new_mem) return -ENOMEM; - old_mem = kvzalloc(dev->queue_mgmt_ops->ndo_queue_mem_size, GFP_KERNEL); + old_mem = kvzalloc(qops->ndo_queue_mem_size, GFP_KERNEL); if (!old_mem) { err = -ENOMEM; goto err_free_new_mem; } - err = dev->queue_mgmt_ops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx); + err = qops->ndo_queue_mem_alloc(dev, new_mem, rxq_idx); if (err) goto err_free_old_mem; @@ -38,15 +39,19 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) if (err) goto err_free_new_queue_mem; - err = dev->queue_mgmt_ops->ndo_queue_stop(dev, old_mem, rxq_idx); - if (err) - goto err_free_new_queue_mem; + if (netif_running(dev)) { + err = qops->ndo_queue_stop(dev, old_mem, rxq_idx); + if (err) + goto err_free_new_queue_mem; - err = dev->queue_mgmt_ops->ndo_queue_start(dev, new_mem, rxq_idx); - if (err) - goto err_start_queue; + err = qops->ndo_queue_start(dev, new_mem, rxq_idx); + if (err) + goto err_start_queue; + } else { + swap(new_mem, old_mem); + } - dev->queue_mgmt_ops->ndo_queue_mem_free(dev, old_mem); + qops->ndo_queue_mem_free(dev, old_mem); kvfree(old_mem); kvfree(new_mem); @@ -61,15 +66,15 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) * WARN if we fail to recover the old rx queue, and at least free * old_mem so we don't also leak that. */ - if (dev->queue_mgmt_ops->ndo_queue_start(dev, old_mem, rxq_idx)) { + if (qops->ndo_queue_start(dev, old_mem, rxq_idx)) { WARN(1, "Failed to restart old queue in error path. RX queue %d may be unhealthy.", rxq_idx); - dev->queue_mgmt_ops->ndo_queue_mem_free(dev, old_mem); + qops->ndo_queue_mem_free(dev, old_mem); } err_free_new_queue_mem: - dev->queue_mgmt_ops->ndo_queue_mem_free(dev, new_mem); + qops->ndo_queue_mem_free(dev, new_mem); err_free_old_mem: kvfree(old_mem); @@ -80,3 +85,74 @@ int netdev_rx_queue_restart(struct net_device *dev, unsigned int rxq_idx) return err; } EXPORT_SYMBOL_NS_GPL(netdev_rx_queue_restart, "NETDEV_INTERNAL"); + +static int __net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, + struct pp_memory_provider_params *p) +{ + struct netdev_rx_queue *rxq; + int ret; + + if (!netdev_need_ops_lock(dev)) + return -EOPNOTSUPP; + + if (ifq_idx >= dev->real_num_rx_queues) + return -EINVAL; + ifq_idx = array_index_nospec(ifq_idx, dev->real_num_rx_queues); + + rxq = __netif_get_rx_queue(dev, ifq_idx); + if (rxq->mp_params.mp_ops) + return -EEXIST; + + rxq->mp_params = *p; + ret = netdev_rx_queue_restart(dev, ifq_idx); + if (ret) { + rxq->mp_params.mp_ops = NULL; + rxq->mp_params.mp_priv = NULL; + } + return ret; +} + +int net_mp_open_rxq(struct net_device *dev, unsigned ifq_idx, + struct pp_memory_provider_params *p) +{ + int ret; + + netdev_lock(dev); + ret = __net_mp_open_rxq(dev, ifq_idx, p); + netdev_unlock(dev); + return ret; +} + +static void __net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, + struct pp_memory_provider_params *old_p) +{ + struct netdev_rx_queue *rxq; + + if (WARN_ON_ONCE(ifq_idx >= dev->real_num_rx_queues)) + return; + + rxq = __netif_get_rx_queue(dev, ifq_idx); + + /* Callers holding a netdev ref may get here after we already + * went thru shutdown via dev_memory_provider_uninstall(). + */ + if (dev->reg_state > NETREG_REGISTERED && + !rxq->mp_params.mp_ops) + return; + + if (WARN_ON_ONCE(rxq->mp_params.mp_ops != old_p->mp_ops || + rxq->mp_params.mp_priv != old_p->mp_priv)) + return; + + rxq->mp_params.mp_ops = NULL; + rxq->mp_params.mp_priv = NULL; + WARN_ON(netdev_rx_queue_restart(dev, ifq_idx)); +} + +void net_mp_close_rxq(struct net_device *dev, unsigned ifq_idx, + struct pp_memory_provider_params *old_p) +{ + netdev_lock(dev); + __net_mp_close_rxq(dev, ifq_idx, old_p); + netdev_unlock(dev); +} diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 62b4041aae1ae..4ddb7490df4b8 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -284,12 +284,13 @@ static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) struct sk_buff *skb; zap_completion_queue(); - refill_skbs(np); repeat: skb = alloc_skb(len, GFP_ATOMIC); - if (!skb) + if (!skb) { skb = skb_dequeue(&np->skb_pool); + schedule_work(&np->refill_wq); + } if (!skb) { if (++count < 10) { @@ -319,6 +320,7 @@ static int netpoll_owner_active(struct net_device *dev) static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { netdev_tx_t status = NETDEV_TX_BUSY; + netdev_tx_t ret = NET_XMIT_DROP; struct net_device *dev; unsigned long tries; /* It is up to the caller to keep npinfo alive. */ @@ -327,11 +329,12 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) lockdep_assert_irqs_disabled(); dev = np->dev; + rcu_read_lock(); npinfo = rcu_dereference_bh(dev->npinfo); if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { dev_kfree_skb_irq(skb); - return NET_XMIT_DROP; + goto out; } /* don't get messages out of order, and no recursion */ @@ -370,7 +373,10 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) skb_queue_tail(&npinfo->txq, skb); schedule_delayed_work(&npinfo->tx_work,0); } - return NETDEV_TX_OK; + ret = NETDEV_TX_OK; +out: + rcu_read_unlock(); + return ret; } netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) @@ -427,7 +433,6 @@ int netpoll_send_udp(struct netpoll *np, const char *msg, int len) udph->len = htons(udp_len); if (np->ipv6) { - udph->check = 0; udph->check = csum_ipv6_magic(&np->local_ip.in6, &np->remote_ip.in6, udp_len, IPPROTO_UDP, @@ -501,7 +506,8 @@ void netpoll_print_options(struct netpoll *np) np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6); else np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip); - np_info(np, "interface '%s'\n", np->dev_name); + np_info(np, "interface name '%s'\n", np->dev_name); + np_info(np, "local ethernet address '%pM'\n", np->dev_mac); np_info(np, "remote port %d\n", np->remote_port); if (np->ipv6) np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6); @@ -535,6 +541,7 @@ static void skb_pool_flush(struct netpoll *np) { struct sk_buff_head *skb_pool; + cancel_work_sync(&np->refill_wq); skb_pool = &np->skb_pool; skb_queue_purge_reason(skb_pool, SKB_CONSUMED); } @@ -570,11 +577,18 @@ int netpoll_parse_options(struct netpoll *np, char *opt) cur++; if (*cur != ',') { - /* parse out dev name */ + /* parse out dev_name or dev_mac */ if ((delim = strchr(cur, ',')) == NULL) goto parse_failed; *delim = 0; - strscpy(np->dev_name, cur, sizeof(np->dev_name)); + + np->dev_name[0] = '\0'; + eth_broadcast_addr(np->dev_mac); + if (!strchr(cur, ':')) + strscpy(np->dev_name, cur, sizeof(np->dev_name)); + else if (!mac_pton(cur, np->dev_mac)) + goto parse_failed; + cur = delim; } cur++; @@ -621,6 +635,14 @@ int netpoll_parse_options(struct netpoll *np, char *opt) } EXPORT_SYMBOL(netpoll_parse_options); +static void refill_skbs_work_handler(struct work_struct *work) +{ + struct netpoll *np = + container_of(work, struct netpoll, refill_wq); + + refill_skbs(np); +} + int __netpoll_setup(struct netpoll *np, struct net_device *ndev) { struct netpoll_info *npinfo; @@ -666,6 +688,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) /* fill up the skb queue */ refill_skbs(np); + INIT_WORK(&np->refill_wq, refill_skbs_work_handler); /* last thing to do is link it to the net device structure */ rcu_assign_pointer(ndev->npinfo, npinfo); @@ -679,27 +702,45 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) } EXPORT_SYMBOL_GPL(__netpoll_setup); +/* + * Returns a pointer to a string representation of the identifier used + * to select the egress interface for the given netpoll instance. buf + * must be a buffer of length at least MAC_ADDR_STR_LEN + 1. + */ +static char *egress_dev(struct netpoll *np, char *buf) +{ + if (np->dev_name[0]) + return np->dev_name; + + snprintf(buf, MAC_ADDR_STR_LEN, "%pM", np->dev_mac); + return buf; +} + int netpoll_setup(struct netpoll *np) { + struct net *net = current->nsproxy->net_ns; + char buf[MAC_ADDR_STR_LEN + 1]; struct net_device *ndev = NULL; bool ip_overwritten = false; struct in_device *in_dev; int err; rtnl_lock(); - if (np->dev_name[0]) { - struct net *net = current->nsproxy->net_ns; + if (np->dev_name[0]) ndev = __dev_get_by_name(net, np->dev_name); - } + else if (is_valid_ether_addr(np->dev_mac)) + ndev = dev_getbyhwaddr(net, ARPHRD_ETHER, np->dev_mac); + if (!ndev) { - np_err(np, "%s doesn't exist, aborting\n", np->dev_name); + np_err(np, "%s doesn't exist, aborting\n", egress_dev(np, buf)); err = -ENODEV; goto unlock; } netdev_hold(ndev, &np->dev_tracker, GFP_KERNEL); if (netdev_master_upper_dev_get(ndev)) { - np_err(np, "%s is a slave device, aborting\n", np->dev_name); + np_err(np, "%s is a slave device, aborting\n", + egress_dev(np, buf)); err = -EBUSY; goto put; } @@ -707,7 +748,8 @@ int netpoll_setup(struct netpoll *np) if (!netif_running(ndev)) { unsigned long atmost; - np_info(np, "device %s not up yet, forcing it\n", np->dev_name); + np_info(np, "device %s not up yet, forcing it\n", + egress_dev(np, buf)); err = dev_open(ndev, NULL); @@ -741,7 +783,7 @@ int netpoll_setup(struct netpoll *np) if (!ifa) { put_noaddr: np_err(np, "no IP address for %s, aborting\n", - np->dev_name); + egress_dev(np, buf)); err = -EDESTADDRREQ; goto put; } @@ -772,13 +814,13 @@ int netpoll_setup(struct netpoll *np) } if (err) { np_err(np, "no IPv6 address for %s, aborting\n", - np->dev_name); + egress_dev(np, buf)); goto put; } else np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6); #else np_err(np, "IPv6 is not supported %s, aborting\n", - np->dev_name); + egress_dev(np, buf)); err = -EINVAL; goto put; #endif diff --git a/net/core/page_pool.c b/net/core/page_pool.c index f5e908c9e7ad8..7745ad924ae2d 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -11,8 +11,10 @@ #include #include +#include #include #include +#include #include #include @@ -25,6 +27,7 @@ #include +#include "dev.h" #include "mp_dmabuf_devmem.h" #include "netmem_priv.h" #include "page_pool_priv.h" @@ -277,21 +280,23 @@ static int page_pool_init(struct page_pool *pool, get_device(pool->p.dev); if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) { - /* We rely on rtnl_lock()ing to make sure netdev_rx_queue - * configuration doesn't change while we're initializing - * the page_pool. - */ - ASSERT_RTNL(); + netdev_assert_locked(pool->slow.netdev); rxq = __netif_get_rx_queue(pool->slow.netdev, pool->slow.queue_idx); pool->mp_priv = rxq->mp_params.mp_priv; + pool->mp_ops = rxq->mp_params.mp_ops; } - if (pool->mp_priv) { + if (pool->mp_ops) { if (!pool->dma_map || !pool->dma_sync) return -EOPNOTSUPP; - err = mp_dmabuf_devmem_init(pool); + if (WARN_ON(!is_kernel_rodata((unsigned long)pool->mp_ops))) { + err = -EFAULT; + goto free_ptr_ring; + } + + err = pool->mp_ops->init(pool); if (err) { pr_warn("%s() mem-provider init failed %d\n", __func__, err); @@ -587,8 +592,8 @@ netmem_ref page_pool_alloc_netmems(struct page_pool *pool, gfp_t gfp) return netmem; /* Slow-path: cache empty, do real allocation */ - if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv) - netmem = mp_dmabuf_devmem_alloc_netmems(pool, gfp); + if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops) + netmem = pool->mp_ops->alloc_netmems(pool, gfp); else netmem = __page_pool_alloc_pages_slow(pool, gfp); return netmem; @@ -679,8 +684,8 @@ void page_pool_return_page(struct page_pool *pool, netmem_ref netmem) bool put; put = true; - if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_priv) - put = mp_dmabuf_devmem_release_page(pool, netmem); + if (static_branch_unlikely(&page_pool_mem_providers) && pool->mp_ops) + put = pool->mp_ops->release_netmem(pool, netmem); else __page_pool_release_page_dma(pool, netmem); @@ -1048,8 +1053,8 @@ static void __page_pool_destroy(struct page_pool *pool) page_pool_unlist(pool); page_pool_uninit(pool); - if (pool->mp_priv) { - mp_dmabuf_devmem_destroy(pool); + if (pool->mp_ops) { + pool->mp_ops->destroy(pool); static_branch_dec(&page_pool_mem_providers); } @@ -1104,7 +1109,13 @@ static void page_pool_release_retry(struct work_struct *wq) int inflight; inflight = page_pool_release(pool); - if (!inflight) + /* In rare cases, a driver bug may cause inflight to go negative. + * Don't reschedule release if inflight is 0 or negative. + * - If 0, the page_pool has been destroyed + * - if negative, we will never recover + * in both cases no reschedule is necessary. + */ + if (inflight <= 0) return; /* Periodic warning for page pools the user can't see */ @@ -1140,11 +1151,7 @@ void page_pool_disable_direct_recycling(struct page_pool *pool) if (!pool->p.napi) return; - /* To avoid races with recycling and additional barriers make sure - * pool and NAPI are unlinked when NAPI is disabled. - */ - WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state)); - WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1); + napi_assert_will_not_race(pool->p.napi); mutex_lock(&page_pools_lock); WRITE_ONCE(pool->p.napi, NULL); @@ -1190,3 +1197,31 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid) } } EXPORT_SYMBOL(page_pool_update_nid); + +bool net_mp_niov_set_dma_addr(struct net_iov *niov, dma_addr_t addr) +{ + return page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov), addr); +} + +/* Associate a niov with a page pool. Should follow with a matching + * net_mp_niov_clear_page_pool() + */ +void net_mp_niov_set_page_pool(struct page_pool *pool, struct net_iov *niov) +{ + netmem_ref netmem = net_iov_to_netmem(niov); + + page_pool_set_pp_info(pool, netmem); + + pool->pages_state_hold_cnt++; + trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); +} + +/* Disassociate a niov from a page pool. Should only be used in the + * ->release_netmem() path. + */ +void net_mp_niov_clear_page_pool(struct net_iov *niov) +{ + netmem_ref netmem = net_iov_to_netmem(niov); + + page_pool_clear_pp_info(netmem); +} diff --git a/net/core/page_pool_user.c b/net/core/page_pool_user.c index 6677e0c2e2565..c82a95beceff8 100644 --- a/net/core/page_pool_user.c +++ b/net/core/page_pool_user.c @@ -8,9 +8,9 @@ #include #include #include +#include #include -#include "devmem.h" #include "page_pool_priv.h" #include "netdev-genl-gen.h" @@ -216,7 +216,6 @@ static int page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool, const struct genl_info *info) { - struct net_devmem_dmabuf_binding *binding = pool->mp_priv; size_t inflight, refsz; unsigned int napi_id; void *hdr; @@ -234,7 +233,7 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool, goto err_cancel; napi_id = pool->p.napi ? READ_ONCE(pool->p.napi->napi_id) : 0; - if (napi_id >= MIN_NAPI_ID && + if (napi_id_valid(napi_id) && nla_put_uint(rsp, NETDEV_A_PAGE_POOL_NAPI_ID, napi_id)) goto err_cancel; @@ -249,7 +248,7 @@ page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool, pool->user.detach_time)) goto err_cancel; - if (binding && nla_put_u32(rsp, NETDEV_A_PAGE_POOL_DMABUF, binding->id)) + if (pool->mp_ops && pool->mp_ops->nl_fill(pool->mp_priv, rsp, NULL)) goto err_cancel; genlmsg_end(rsp, hdr); @@ -356,7 +355,7 @@ void page_pool_unlist(struct page_pool *pool) int page_pool_check_memory_provider(struct net_device *dev, struct netdev_rx_queue *rxq) { - struct net_devmem_dmabuf_binding *binding = rxq->mp_params.mp_priv; + void *binding = rxq->mp_params.mp_priv; struct page_pool *pool; struct hlist_node *n; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 82b6a2c3c141f..fe7fdefab994c 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -158,9 +158,7 @@ #include #include #include -#ifdef CONFIG_XFRM #include -#endif #include #include #include @@ -517,21 +515,23 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char data[128]; + size_t max; struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id); if (!capable(CAP_NET_ADMIN)) return -EPERM; - if (count == 0) + if (count < 1) return -EINVAL; - if (count > sizeof(data)) - count = sizeof(data); - - if (copy_from_user(data, buf, count)) + max = min(count, sizeof(data) - 1); + if (copy_from_user(data, buf, max)) return -EFAULT; - data[count - 1] = 0; /* Strip trailing '\n' and terminate string */ + if (data[max - 1] == '\n') + data[max - 1] = 0; /* strip trailing '\n', terminate string */ + else + data[max] = 0; /* terminate string */ if (!strcmp(data, "stop")) pktgen_stop_all_threads(pn); @@ -744,31 +744,32 @@ static int pktgen_if_show(struct seq_file *seq, void *v) } -static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, - __u32 *num) +static ssize_t hex32_arg(const char __user *user_buffer, size_t maxlen, + __u32 *num) { - int i = 0; + size_t i = 0; + *num = 0; for (; i < maxlen; i++) { int value; char c; - *num <<= 4; if (get_user(c, &user_buffer[i])) return -EFAULT; value = hex_to_bin(c); - if (value >= 0) + if (value >= 0) { + *num <<= 4; *num |= value; - else + } else { break; + } } return i; } -static int count_trail_chars(const char __user * user_buffer, - unsigned int maxlen) +static ssize_t count_trail_chars(const char __user *user_buffer, size_t maxlen) { - int i; + size_t i; for (i = 0; i < maxlen; i++) { char c; @@ -790,10 +791,10 @@ static int count_trail_chars(const char __user * user_buffer, return i; } -static long num_arg(const char __user *user_buffer, unsigned long maxlen, - unsigned long *num) +static ssize_t num_arg(const char __user *user_buffer, size_t maxlen, + unsigned long *num) { - int i; + size_t i; *num = 0; for (i = 0; i < maxlen; i++) { @@ -809,9 +810,9 @@ static long num_arg(const char __user *user_buffer, unsigned long maxlen, return i; } -static int strn_len(const char __user * user_buffer, unsigned int maxlen) +static ssize_t strn_len(const char __user *user_buffer, size_t maxlen) { - int i; + size_t i; for (i = 0; i < maxlen; i++) { char c; @@ -823,6 +824,7 @@ static int strn_len(const char __user * user_buffer, unsigned int maxlen) case '\r': case '\t': case ' ': + case '=': goto done_str; default: break; @@ -838,11 +840,11 @@ static int strn_len(const char __user * user_buffer, unsigned int maxlen) * "size1,weight_1 size2,weight_2 ... size_n,weight_n" for example. */ static ssize_t get_imix_entries(const char __user *buffer, + size_t maxlen, struct pktgen_dev *pkt_dev) { - const int max_digits = 10; - int i = 0; - long len; + size_t i = 0, max; + ssize_t len; char c; pkt_dev->n_imix_entries = 0; @@ -854,21 +856,30 @@ static ssize_t get_imix_entries(const char __user *buffer, if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES) return -E2BIG; - len = num_arg(&buffer[i], max_digits, &size); + if (i >= maxlen) + return -EINVAL; + + max = min(10, maxlen - i); + len = num_arg(&buffer[i], max, &size); if (len < 0) return len; i += len; + if (i >= maxlen) + return -EINVAL; if (get_user(c, &buffer[i])) return -EFAULT; /* Check for comma between size_i and weight_i */ if (c != ',') return -EINVAL; i++; + if (i >= maxlen) + return -EINVAL; if (size < 14 + 20 + 8) size = 14 + 20 + 8; - len = num_arg(&buffer[i], max_digits, &weight); + max = min(10, maxlen - i); + len = num_arg(&buffer[i], max, &weight); if (len < 0) return len; if (weight <= 0) @@ -878,39 +889,55 @@ static ssize_t get_imix_entries(const char __user *buffer, pkt_dev->imix_entries[pkt_dev->n_imix_entries].weight = weight; i += len; + pkt_dev->n_imix_entries++; + + if (i >= maxlen) + break; if (get_user(c, &buffer[i])) return -EFAULT; - i++; - pkt_dev->n_imix_entries++; } while (c == ' '); return i; } -static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) +static ssize_t get_labels(const char __user *buffer, + size_t maxlen, struct pktgen_dev *pkt_dev) { unsigned int n = 0; + size_t i = 0, max; + ssize_t len; char c; - ssize_t i = 0; - int len; pkt_dev->nr_labels = 0; do { __u32 tmp; - len = hex32_arg(&buffer[i], 8, &tmp); - if (len <= 0) + + if (n >= MAX_MPLS_LABELS) + return -E2BIG; + + if (i >= maxlen) + return -EINVAL; + + max = min(8, maxlen - i); + len = hex32_arg(&buffer[i], max, &tmp); + if (len < 0) return len; + + /* return empty list in case of invalid input or zero value */ + if (len == 0 || tmp == 0) + return maxlen; + pkt_dev->labels[n] = htonl(tmp); if (pkt_dev->labels[n] & MPLS_STACK_BOTTOM) pkt_dev->flags |= F_MPLS_RND; i += len; + n++; + if (i >= maxlen) + break; if (get_user(c, &buffer[i])) return -EFAULT; i++; - n++; - if (n >= MAX_MPLS_LABELS) - return -E2BIG; } while (c == ','); pkt_dev->nr_labels = n; @@ -952,11 +979,11 @@ static ssize_t pktgen_if_write(struct file *file, { struct seq_file *seq = file->private_data; struct pktgen_dev *pkt_dev = seq->private; - int i, max, len; + size_t i, max; + ssize_t len; char name[16], valstr[32]; unsigned long value = 0; char *pg_result = NULL; - int tmp = 0; char buf[128]; pg_result = &(pkt_dev->result[0]); @@ -967,16 +994,16 @@ static ssize_t pktgen_if_write(struct file *file, } max = count; - tmp = count_trail_chars(user_buffer, max); - if (tmp < 0) { + len = count_trail_chars(user_buffer, max); + if (len < 0) { pr_warn("illegal format\n"); - return tmp; + return len; } - i = tmp; + i = len; /* Read variable name */ - - len = strn_len(&user_buffer[i], sizeof(name) - 1); + max = min(sizeof(name) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1004,11 +1031,11 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "min_pkt_size")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (value < 14 + 20 + 8) value = 14 + 20 + 8; if (value != pkt_dev->min_pkt_size) { @@ -1021,11 +1048,11 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "max_pkt_size")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (value < 14 + 20 + 8) value = 14 + 20 + 8; if (value != pkt_dev->max_pkt_size) { @@ -1040,11 +1067,11 @@ static ssize_t pktgen_if_write(struct file *file, /* Shortcut for min = max */ if (!strcmp(name, "pkt_size")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (value < 14 + 20 + 8) value = 14 + 20 + 8; if (value != pkt_dev->min_pkt_size) { @@ -1060,43 +1087,43 @@ static ssize_t pktgen_if_write(struct file *file, if (pkt_dev->clone_skb > 0) return -EINVAL; - len = get_imix_entries(&user_buffer[i], pkt_dev); + max = count - i; + len = get_imix_entries(&user_buffer[i], max, pkt_dev); if (len < 0) return len; fill_imix_distribution(pkt_dev); - i += len; return count; } if (!strcmp(name, "debug")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; debug = value; sprintf(pg_result, "OK: debug=%u", debug); return count; } if (!strcmp(name, "frags")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; pkt_dev->nfrags = value; sprintf(pg_result, "OK: frags=%d", pkt_dev->nfrags); return count; } if (!strcmp(name, "delay")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (value == 0x7FFFFFFF) pkt_dev->delay = ULLONG_MAX; else @@ -1107,13 +1134,13 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "rate")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (!value) - return len; + return -EINVAL; pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value; if (debug) pr_info("Delay set at: %llu ns\n", pkt_dev->delay); @@ -1122,13 +1149,13 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "ratep")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (!value) - return len; + return -EINVAL; pkt_dev->delay = NSEC_PER_SEC/value; if (debug) pr_info("Delay set at: %llu ns\n", pkt_dev->delay); @@ -1137,11 +1164,11 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "udp_src_min")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (value != pkt_dev->udp_src_min) { pkt_dev->udp_src_min = value; pkt_dev->cur_udp_src = value; @@ -1150,11 +1177,11 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "udp_dst_min")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (value != pkt_dev->udp_dst_min) { pkt_dev->udp_dst_min = value; pkt_dev->cur_udp_dst = value; @@ -1163,11 +1190,11 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "udp_src_max")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (value != pkt_dev->udp_src_max) { pkt_dev->udp_src_max = value; pkt_dev->cur_udp_src = value; @@ -1176,11 +1203,11 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "udp_dst_max")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (value != pkt_dev->udp_dst_max) { pkt_dev->udp_dst_max = value; pkt_dev->cur_udp_dst = value; @@ -1189,7 +1216,8 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "clone_skb")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; /* clone_skb is not supported for netif_receive xmit_mode and @@ -1198,34 +1226,33 @@ static ssize_t pktgen_if_write(struct file *file, if ((value > 0) && ((pkt_dev->xmit_mode == M_NETIF_RECEIVE) || !(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) - return -ENOTSUPP; + return -EOPNOTSUPP; if (value > 0 && (pkt_dev->n_imix_entries > 0 || !(pkt_dev->flags & F_SHARED))) return -EINVAL; - i += len; pkt_dev->clone_skb = value; sprintf(pg_result, "OK: clone_skb=%d", pkt_dev->clone_skb); return count; } if (!strcmp(name, "count")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; pkt_dev->count = value; sprintf(pg_result, "OK: count=%llu", (unsigned long long)pkt_dev->count); return count; } if (!strcmp(name, "src_mac_count")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (pkt_dev->src_mac_count != value) { pkt_dev->src_mac_count = value; pkt_dev->cur_src_mac_offset = 0; @@ -1235,11 +1262,11 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "dst_mac_count")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (pkt_dev->dst_mac_count != value) { pkt_dev->dst_mac_count = value; pkt_dev->cur_dst_mac_offset = 0; @@ -1249,16 +1276,16 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "burst")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if ((value > 1) && ((pkt_dev->xmit_mode == M_QUEUE_XMIT) || ((pkt_dev->xmit_mode == M_START_XMIT) && (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))))) - return -ENOTSUPP; + return -EOPNOTSUPP; if (value > 1 && !(pkt_dev->flags & F_SHARED)) return -EINVAL; @@ -1268,12 +1295,11 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "node")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; - if (node_possible(value)) { pkt_dev->node = value; sprintf(pg_result, "OK: node=%d", pkt_dev->node); @@ -1289,21 +1315,21 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "xmit_mode")) { char f[32]; - memset(f, 0, 32); - len = strn_len(&user_buffer[i], sizeof(f) - 1); + max = min(sizeof(f) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; + memset(f, 0, sizeof(f)); if (copy_from_user(f, &user_buffer[i], len)) return -EFAULT; - i += len; if (strcmp(f, "start_xmit") == 0) { pkt_dev->xmit_mode = M_START_XMIT; } else if (strcmp(f, "netif_receive") == 0) { /* clone_skb set earlier, not supported in this mode */ if (pkt_dev->clone_skb > 0) - return -ENOTSUPP; + return -EOPNOTSUPP; pkt_dev->xmit_mode = M_NETIF_RECEIVE; @@ -1329,14 +1355,14 @@ static ssize_t pktgen_if_write(struct file *file, char f[32]; char *end; - memset(f, 0, 32); - len = strn_len(&user_buffer[i], sizeof(f) - 1); + max = min(sizeof(f) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; + memset(f, 0, 32); if (copy_from_user(f, &user_buffer[i], len)) return -EFAULT; - i += len; flag = pktgen_read_flag(f, &disable); if (flag) { @@ -1378,7 +1404,8 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) { - len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1); + max = min(sizeof(pkt_dev->dst_min) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1393,12 +1420,13 @@ static ssize_t pktgen_if_write(struct file *file, } if (debug) pr_debug("dst_min set to: %s\n", pkt_dev->dst_min); - i += len; + sprintf(pg_result, "OK: dst_min=%s", pkt_dev->dst_min); return count; } if (!strcmp(name, "dst_max")) { - len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1); + max = min(sizeof(pkt_dev->dst_max) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1413,12 +1441,13 @@ static ssize_t pktgen_if_write(struct file *file, } if (debug) pr_debug("dst_max set to: %s\n", pkt_dev->dst_max); - i += len; + sprintf(pg_result, "OK: dst_max=%s", pkt_dev->dst_max); return count; } if (!strcmp(name, "dst6")) { - len = strn_len(&user_buffer[i], sizeof(buf) - 1); + max = min(sizeof(buf) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1436,12 +1465,12 @@ static ssize_t pktgen_if_write(struct file *file, if (debug) pr_debug("dst6 set to: %s\n", buf); - i += len; sprintf(pg_result, "OK: dst6=%s", buf); return count; } if (!strcmp(name, "dst6_min")) { - len = strn_len(&user_buffer[i], sizeof(buf) - 1); + max = min(sizeof(buf) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1458,12 +1487,12 @@ static ssize_t pktgen_if_write(struct file *file, if (debug) pr_debug("dst6_min set to: %s\n", buf); - i += len; sprintf(pg_result, "OK: dst6_min=%s", buf); return count; } if (!strcmp(name, "dst6_max")) { - len = strn_len(&user_buffer[i], sizeof(buf) - 1); + max = min(sizeof(buf) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1479,12 +1508,12 @@ static ssize_t pktgen_if_write(struct file *file, if (debug) pr_debug("dst6_max set to: %s\n", buf); - i += len; sprintf(pg_result, "OK: dst6_max=%s", buf); return count; } if (!strcmp(name, "src6")) { - len = strn_len(&user_buffer[i], sizeof(buf) - 1); + max = min(sizeof(buf) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1502,12 +1531,12 @@ static ssize_t pktgen_if_write(struct file *file, if (debug) pr_debug("src6 set to: %s\n", buf); - i += len; sprintf(pg_result, "OK: src6=%s", buf); return count; } if (!strcmp(name, "src_min")) { - len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1); + max = min(sizeof(pkt_dev->src_min) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1522,12 +1551,13 @@ static ssize_t pktgen_if_write(struct file *file, } if (debug) pr_debug("src_min set to: %s\n", pkt_dev->src_min); - i += len; + sprintf(pg_result, "OK: src_min=%s", pkt_dev->src_min); return count; } if (!strcmp(name, "src_max")) { - len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1); + max = min(sizeof(pkt_dev->src_max) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1542,12 +1572,13 @@ static ssize_t pktgen_if_write(struct file *file, } if (debug) pr_debug("src_max set to: %s\n", pkt_dev->src_max); - i += len; + sprintf(pg_result, "OK: src_max=%s", pkt_dev->src_max); return count; } if (!strcmp(name, "dst_mac")) { - len = strn_len(&user_buffer[i], sizeof(valstr) - 1); + max = min(sizeof(valstr) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1564,7 +1595,8 @@ static ssize_t pktgen_if_write(struct file *file, return count; } if (!strcmp(name, "src_mac")) { - len = strn_len(&user_buffer[i], sizeof(valstr) - 1); + max = min(sizeof(valstr) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1588,11 +1620,11 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "flows")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (value > MAX_CFLOWS) value = MAX_CFLOWS; @@ -1602,44 +1634,44 @@ static ssize_t pktgen_if_write(struct file *file, } #ifdef CONFIG_XFRM if (!strcmp(name, "spi")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; pkt_dev->spi = value; sprintf(pg_result, "OK: spi=%u", pkt_dev->spi); return count; } #endif if (!strcmp(name, "flowlen")) { - len = num_arg(&user_buffer[i], 10, &value); + max = min(10, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; pkt_dev->lflow = value; sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow); return count; } if (!strcmp(name, "queue_map_min")) { - len = num_arg(&user_buffer[i], 5, &value); + max = min(5, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; pkt_dev->queue_map_min = value; sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min); return count; } if (!strcmp(name, "queue_map_max")) { - len = num_arg(&user_buffer[i], 5, &value); + max = min(5, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; pkt_dev->queue_map_max = value; sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max); return count; @@ -1648,10 +1680,11 @@ static ssize_t pktgen_if_write(struct file *file, if (!strcmp(name, "mpls")) { unsigned int n, cnt; - len = get_labels(&user_buffer[i], pkt_dev); + max = count - i; + len = get_labels(&user_buffer[i], max, pkt_dev); if (len < 0) return len; - i += len; + cnt = sprintf(pg_result, "OK: mpls="); for (n = 0; n < pkt_dev->nr_labels; n++) cnt += sprintf(pg_result + cnt, @@ -1669,11 +1702,11 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "vlan_id")) { - len = num_arg(&user_buffer[i], 4, &value); + max = min(4, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if (value <= 4095) { pkt_dev->vlan_id = value; /* turn on VLAN */ @@ -1696,11 +1729,11 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "vlan_p")) { - len = num_arg(&user_buffer[i], 1, &value); + max = min(1, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) { pkt_dev->vlan_p = value; sprintf(pg_result, "OK: vlan_p=%u", pkt_dev->vlan_p); @@ -1711,11 +1744,11 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "vlan_cfi")) { - len = num_arg(&user_buffer[i], 1, &value); + max = min(1, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) { pkt_dev->vlan_cfi = value; sprintf(pg_result, "OK: vlan_cfi=%u", pkt_dev->vlan_cfi); @@ -1726,11 +1759,11 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "svlan_id")) { - len = num_arg(&user_buffer[i], 4, &value); + max = min(4, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) { pkt_dev->svlan_id = value; /* turn on SVLAN */ @@ -1753,11 +1786,11 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "svlan_p")) { - len = num_arg(&user_buffer[i], 1, &value); + max = min(1, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) { pkt_dev->svlan_p = value; sprintf(pg_result, "OK: svlan_p=%u", pkt_dev->svlan_p); @@ -1768,11 +1801,11 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "svlan_cfi")) { - len = num_arg(&user_buffer[i], 1, &value); + max = min(1, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) { pkt_dev->svlan_cfi = value; sprintf(pg_result, "OK: svlan_cfi=%u", pkt_dev->svlan_cfi); @@ -1783,12 +1816,13 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "tos")) { - __u32 tmp_value = 0; - len = hex32_arg(&user_buffer[i], 2, &tmp_value); + __u32 tmp_value; + + max = min(2, count - i); + len = hex32_arg(&user_buffer[i], max, &tmp_value); if (len < 0) return len; - i += len; if (len == 2) { pkt_dev->tos = tmp_value; sprintf(pg_result, "OK: tos=0x%02x", pkt_dev->tos); @@ -1799,12 +1833,13 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "traffic_class")) { - __u32 tmp_value = 0; - len = hex32_arg(&user_buffer[i], 2, &tmp_value); + __u32 tmp_value; + + max = min(2, count - i); + len = hex32_arg(&user_buffer[i], max, &tmp_value); if (len < 0) return len; - i += len; if (len == 2) { pkt_dev->traffic_class = tmp_value; sprintf(pg_result, "OK: traffic_class=0x%02x", pkt_dev->traffic_class); @@ -1815,11 +1850,11 @@ static ssize_t pktgen_if_write(struct file *file, } if (!strcmp(name, "skb_priority")) { - len = num_arg(&user_buffer[i], 9, &value); + max = min(9, count - i); + len = num_arg(&user_buffer[i], max, &value); if (len < 0) return len; - i += len; pkt_dev->skb_priority = value; sprintf(pg_result, "OK: skb_priority=%i", pkt_dev->skb_priority); @@ -1879,7 +1914,8 @@ static ssize_t pktgen_thread_write(struct file *file, { struct seq_file *seq = file->private_data; struct pktgen_thread *t = seq->private; - int i, max, len, ret; + size_t i, max; + ssize_t len, ret; char name[40]; char *pg_result; @@ -1896,8 +1932,8 @@ static ssize_t pktgen_thread_write(struct file *file, i = len; /* Read variable name */ - - len = strn_len(&user_buffer[i], sizeof(name) - 1); + max = min(sizeof(name) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1927,14 +1963,15 @@ static ssize_t pktgen_thread_write(struct file *file, if (!strcmp(name, "add_device")) { char f[32]; memset(f, 0, 32); - len = strn_len(&user_buffer[i], sizeof(f) - 1); + max = min(sizeof(f) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) { ret = len; goto out; } if (copy_from_user(f, &user_buffer[i], len)) return -EFAULT; - i += len; + mutex_lock(&pktgen_thread_lock); ret = pktgen_add_device(t, f); mutex_unlock(&pktgen_thread_lock); @@ -2358,13 +2395,13 @@ static inline int f_pick(struct pktgen_dev *pkt_dev) } -#ifdef CONFIG_XFRM /* If there was already an IPSEC SA, we keep it as is, else * we go look for it ... */ #define DUMMY_MARK 0 static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) { +#ifdef CONFIG_XFRM struct xfrm_state *x = pkt_dev->flows[flow].x; struct pktgen_net *pn = net_generic(dev_net(pkt_dev->odev), pg_net_id); if (!x) { @@ -2390,11 +2427,10 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) } } -} #endif +} static void set_cur_queue_map(struct pktgen_dev *pkt_dev) { - if (pkt_dev->flags & F_QUEUE_MAP_CPU) pkt_dev->cur_queue_map = smp_processor_id(); @@ -2569,10 +2605,8 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) pkt_dev->flows[flow].flags |= F_INIT; pkt_dev->flows[flow].cur_daddr = pkt_dev->cur_daddr; -#ifdef CONFIG_XFRM if (pkt_dev->flags & F_IPSEC) get_ipsec_sa(pkt_dev, flow); -#endif pkt_dev->nflows++; } } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d1e559fce918d..5a24a30dfc2d1 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #if IS_ENABLED(CONFIG_IPV6) #include @@ -80,6 +81,11 @@ void rtnl_lock(void) } EXPORT_SYMBOL(rtnl_lock); +int rtnl_lock_interruptible(void) +{ + return mutex_lock_interruptible(&rtnl_mutex); +} + int rtnl_lock_killable(void) { return mutex_lock_killable(&rtnl_mutex); @@ -1287,6 +1293,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_TSO_MAX_SEGS */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + + nla_total_size(1) /* IFLA_NETNS_IMMUTABLE */ + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ + nla_total_size(4) /* IFLA_LINK_NETNSID */ + nla_total_size(4) /* IFLA_GROUP */ @@ -2041,6 +2048,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN) || nla_put_u8(skb, IFLA_LINKMODE, READ_ONCE(dev->link_mode)) || + nla_put_u8(skb, IFLA_NETNS_IMMUTABLE, dev->netns_immutable) || nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) || nla_put_u32(skb, IFLA_MIN_MTU, READ_ONCE(dev->min_mtu)) || nla_put_u32(skb, IFLA_MAX_MTU, READ_ONCE(dev->max_mtu)) || @@ -2229,6 +2237,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_ALLMULTI] = { .type = NLA_REJECT }, [IFLA_GSO_IPV4_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1), [IFLA_GRO_IPV4_MAX_SIZE] = { .type = NLA_U32 }, + [IFLA_NETNS_IMMUTABLE] = { .type = NLA_REJECT }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { @@ -2904,12 +2913,19 @@ static int do_set_master(struct net_device *dev, int ifindex, const struct net_device_ops *ops; int err; + /* Release the lower lock, the upper is responsible for locking + * the lower if needed. None of the existing upper devices + * use netdev instance lock, so don't grab it. + */ + if (upper_dev) { if (upper_dev->ifindex == ifindex) return 0; ops = upper_dev->netdev_ops; if (ops->ndo_del_slave) { + netdev_unlock_ops(dev); err = ops->ndo_del_slave(upper_dev, dev); + netdev_lock_ops(dev); if (err) return err; } else { @@ -2923,7 +2939,9 @@ static int do_set_master(struct net_device *dev, int ifindex, return -EINVAL; ops = upper_dev->netdev_ops; if (ops->ndo_add_slave) { + netdev_unlock_ops(dev); err = ops->ndo_add_slave(upper_dev, dev, extack); + netdev_lock_ops(dev); if (err) return err; } else { @@ -2973,7 +2991,7 @@ static int do_set_proto_down(struct net_device *dev, if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); - dev_change_proto_down_reason(dev, mask, value); + netdev_change_proto_down_reason_locked(dev, mask, value); } if (nl_proto_down) { @@ -2984,8 +3002,7 @@ static int do_set_proto_down(struct net_device *dev, NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); return -EBUSY; } - err = dev_change_proto_down(dev, - proto_down); + err = netif_change_proto_down(dev, proto_down); if (err) return err; } @@ -3005,6 +3022,8 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev, char ifname[IFNAMSIZ]; int err; + netdev_lock_ops(dev); + err = validate_linkmsg(dev, tb, extack); if (err < 0) goto errout; @@ -3020,7 +3039,8 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev, new_ifindex = nla_get_s32_default(tb[IFLA_NEW_IFINDEX], 0); - err = __dev_change_net_namespace(dev, tgt_net, pat, new_ifindex); + err = netif_change_net_namespace(dev, tgt_net, pat, + new_ifindex, extack); if (err) goto errout; @@ -3068,24 +3088,35 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev, goto errout; } sa->sa_family = dev->type; + + netdev_unlock_ops(dev); + + /* dev_addr_sem is an outer lock, enforce proper ordering */ + down_write(&dev_addr_sem); + netdev_lock_ops(dev); + memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]), dev->addr_len); - err = dev_set_mac_address_user(dev, sa, extack); + err = netif_set_mac_address(dev, sa, extack); kfree(sa); - if (err) + if (err) { + up_write(&dev_addr_sem); goto errout; + } status |= DO_SETLINK_MODIFIED; + + up_write(&dev_addr_sem); } if (tb[IFLA_MTU]) { - err = dev_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); + err = netif_set_mtu_ext(dev, nla_get_u32(tb[IFLA_MTU]), extack); if (err < 0) goto errout; status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_GROUP]) { - dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); + netif_set_group(dev, nla_get_u32(tb[IFLA_GROUP])); status |= DO_SETLINK_NOTIFY; } @@ -3095,15 +3126,15 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev, * requested. */ if (ifm->ifi_index > 0 && ifname[0]) { - err = dev_change_name(dev, ifname); + err = netif_change_name(dev, ifname); if (err < 0) goto errout; status |= DO_SETLINK_MODIFIED; } if (tb[IFLA_IFALIAS]) { - err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), - nla_len(tb[IFLA_IFALIAS])); + err = netif_set_alias(dev, nla_data(tb[IFLA_IFALIAS]), + nla_len(tb[IFLA_IFALIAS])); if (err < 0) goto errout; status |= DO_SETLINK_NOTIFY; @@ -3115,8 +3146,8 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev, } if (ifm->ifi_flags || ifm->ifi_change) { - err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), - extack); + err = netif_change_flags(dev, rtnl_dev_combine_flags(dev, ifm), + extack); if (err < 0) goto errout; } @@ -3129,7 +3160,7 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev, } if (tb[IFLA_CARRIER]) { - err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); + err = netif_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER])); if (err) goto errout; status |= DO_SETLINK_MODIFIED; @@ -3138,7 +3169,7 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev, if (tb[IFLA_TXQLEN]) { unsigned int value = nla_get_u32(tb[IFLA_TXQLEN]); - err = dev_change_tx_queue_len(dev, value); + err = netif_change_tx_queue_len(dev, value); if (err) goto errout; status |= DO_SETLINK_MODIFIED; @@ -3369,6 +3400,8 @@ static int do_setlink(const struct sk_buff *skb, struct net_device *dev, dev->name); } + netdev_unlock_ops(dev); + return err; } @@ -3762,7 +3795,13 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, struct netlink_ext_ack *extack) { unsigned char name_assign_type = NET_NAME_USER; - struct net *net = sock_net(skb->sk); + struct rtnl_newlink_params params = { + .src_net = sock_net(skb->sk), + .link_net = link_net, + .peer_net = peer_net, + .tb = tb, + .data = data, + }; u32 portid = NETLINK_CB(skb).portid; struct net_device *dev; char ifname[IFNAMSIZ]; @@ -3778,8 +3817,8 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, name_assign_type = NET_NAME_ENUM; } - dev = rtnl_create_link(link_net ? : tgt_net, ifname, - name_assign_type, ops, tb, extack); + dev = rtnl_create_link(tgt_net, ifname, name_assign_type, ops, tb, + extack); if (IS_ERR(dev)) { err = PTR_ERR(dev); goto out; @@ -3787,13 +3826,8 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, dev->ifindex = ifm->ifi_index; - if (link_net) - net = link_net; - if (peer_net) - net = peer_net; - if (ops->newlink) - err = ops->newlink(net, dev, tb, data, extack); + err = ops->newlink(dev, ¶ms, extack); else err = register_netdevice(dev); if (err < 0) { @@ -3801,22 +3835,22 @@ static int rtnl_newlink_create(struct sk_buff *skb, struct ifinfomsg *ifm, goto out; } + netdev_lock_ops(dev); + err = rtnl_configure_link(dev, ifm, portid, nlh); if (err < 0) goto out_unregister; - if (link_net) { - err = dev_change_net_namespace(dev, tgt_net, ifname); - if (err < 0) - goto out_unregister; - } if (tb[IFLA_MASTER]) { err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack); if (err) goto out_unregister; } + + netdev_unlock_ops(dev); out: return err; out_unregister: + netdev_unlock_ops(dev); if (ops->newlink) { LIST_HEAD(list_kill); @@ -3862,20 +3896,26 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, { struct nlattr ** const tb = tbs->tb; struct net *net = sock_net(skb->sk); + struct net *device_net; struct net_device *dev; struct ifinfomsg *ifm; bool link_specified; + /* When creating, lookup for existing device in target net namespace */ + device_net = (nlh->nlmsg_flags & NLM_F_CREATE) && + (nlh->nlmsg_flags & NLM_F_EXCL) ? + tgt_net : net; + ifm = nlmsg_data(nlh); if (ifm->ifi_index > 0) { link_specified = true; - dev = __dev_get_by_index(net, ifm->ifi_index); + dev = __dev_get_by_index(device_net, ifm->ifi_index); } else if (ifm->ifi_index < 0) { NL_SET_ERR_MSG(extack, "ifindex can't be negative"); return -EINVAL; } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { link_specified = true; - dev = rtnl_dev_get(net, tb); + dev = rtnl_dev_get(device_net, tb); } else { link_specified = false; dev = NULL; diff --git a/net/core/scm.c b/net/core/scm.c index 4f6a14babe5ae..733c0cbd393d2 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -282,6 +282,16 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) } EXPORT_SYMBOL(put_cmsg); +int put_cmsg_notrunc(struct msghdr *msg, int level, int type, int len, + void *data) +{ + /* Don't produce truncated CMSGs */ + if (!msg->msg_control || msg->msg_controllen < CMSG_LEN(len)) + return -ETOOSMALL; + + return put_cmsg(msg, level, type, len, data); +} + void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss_internal) { struct scm_timestamping64 tss; diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index b0ff6153be623..568779d5a0efa 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -71,7 +71,7 @@ u32 secure_tcpv6_ts_off(const struct net *net, return siphash(&combined, offsetofend(typeof(combined), daddr), &ts_secret); } -EXPORT_SYMBOL(secure_tcpv6_ts_off); +EXPORT_IPV6_MOD(secure_tcpv6_ts_off); u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr, __be16 sport, __be16 dport) diff --git a/net/core/selftests.c b/net/core/selftests.c index 8f801e6e3b91b..e99ae983fca9b 100644 --- a/net/core/selftests.c +++ b/net/core/selftests.c @@ -299,7 +299,7 @@ static int net_test_phy_loopback_enable(struct net_device *ndev) if (!ndev->phydev) return -EOPNOTSUPP; - return phy_loopback(ndev->phydev, true); + return phy_loopback(ndev->phydev, true, 0); } static int net_test_phy_loopback_disable(struct net_device *ndev) @@ -307,7 +307,7 @@ static int net_test_phy_loopback_disable(struct net_device *ndev) if (!ndev->phydev) return -EOPNOTSUPP; - return phy_loopback(ndev->phydev, false); + return phy_loopback(ndev->phydev, false, 0); } static int net_test_phy_loopback_udp(struct net_device *ndev) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 7b03b64fdcb27..b7acb83083a8c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -295,6 +295,68 @@ static struct sk_buff *napi_skb_cache_get(void) return skb; } +/** + * napi_skb_cache_get_bulk - obtain a number of zeroed skb heads from the cache + * @skbs: pointer to an at least @n-sized array to fill with skb pointers + * @n: number of entries to provide + * + * Tries to obtain @n &sk_buff entries from the NAPI percpu cache and writes + * the pointers into the provided array @skbs. If there are less entries + * available, tries to replenish the cache and bulk-allocates the diff from + * the MM layer if needed. + * The heads are being zeroed with either memset() or %__GFP_ZERO, so they are + * ready for {,__}build_skb_around() and don't have any data buffers attached. + * Must be called *only* from the BH context. + * + * Return: number of successfully allocated skbs (@n if no actual allocation + * needed or kmem_cache_alloc_bulk() didn't fail). + */ +u32 napi_skb_cache_get_bulk(void **skbs, u32 n) +{ + struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + u32 bulk, total = n; + + local_lock_nested_bh(&napi_alloc_cache.bh_lock); + + if (nc->skb_count >= n) + goto get; + + /* No enough cached skbs. Try refilling the cache first */ + bulk = min(NAPI_SKB_CACHE_SIZE - nc->skb_count, NAPI_SKB_CACHE_BULK); + nc->skb_count += kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, + GFP_ATOMIC | __GFP_NOWARN, bulk, + &nc->skb_cache[nc->skb_count]); + if (likely(nc->skb_count >= n)) + goto get; + + /* Still not enough. Bulk-allocate the missing part directly, zeroed */ + n -= kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, + GFP_ATOMIC | __GFP_ZERO | __GFP_NOWARN, + n - nc->skb_count, &skbs[nc->skb_count]); + if (likely(nc->skb_count >= n)) + goto get; + + /* kmem_cache didn't allocate the number we need, limit the output */ + total -= n - nc->skb_count; + n = nc->skb_count; + +get: + for (u32 base = nc->skb_count - n, i = 0; i < n; i++) { + u32 cache_size = kmem_cache_size(net_hotdata.skbuff_cache); + + skbs[i] = nc->skb_cache[base + i]; + + kasan_mempool_unpoison_object(skbs[i], cache_size); + memset(skbs[i], 0, offsetof(struct sk_buff, tail)); + } + + nc->skb_count -= n; + local_unlock_nested_bh(&napi_alloc_cache.bh_lock); + + return total; +} +EXPORT_SYMBOL_GPL(napi_skb_cache_get_bulk); + static inline void __finalize_skb_around(struct sk_buff *skb, void *data, unsigned int size) { @@ -3177,7 +3239,7 @@ static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg) typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg); static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, - int len, sendmsg_func sendmsg) + int len, sendmsg_func sendmsg, int flags) { unsigned int orig_len = len; struct sk_buff *head = skb; @@ -3195,7 +3257,7 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, kv.iov_base = skb->data + offset; kv.iov_len = slen; memset(&msg, 0, sizeof(msg)); - msg.msg_flags = MSG_DONTWAIT; + msg.msg_flags = MSG_DONTWAIT | flags; iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen); ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked, @@ -3232,7 +3294,8 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, while (slen) { struct bio_vec bvec; struct msghdr msg = { - .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT, + .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | + flags, }; bvec_set_page(&bvec, skb_frag_page(frag), slen, @@ -3278,14 +3341,21 @@ static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int len) { - return __skb_send_sock(sk, skb, offset, len, sendmsg_locked); + return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, 0); } EXPORT_SYMBOL_GPL(skb_send_sock_locked); +int skb_send_sock_locked_with_flags(struct sock *sk, struct sk_buff *skb, + int offset, int len, int flags) +{ + return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, flags); +} +EXPORT_SYMBOL_GPL(skb_send_sock_locked_with_flags); + /* Send skb data on a socket. Socket must be unlocked. */ int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) { - return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked); + return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, 0); } /** @@ -5449,6 +5519,54 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, } EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); +static bool skb_tstamp_tx_report_so_timestamping(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps, + int tstype) +{ + switch (tstype) { + case SCM_TSTAMP_SCHED: + return skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP; + case SCM_TSTAMP_SND: + return skb_shinfo(skb)->tx_flags & (hwtstamps ? SKBTX_HW_TSTAMP_NOBPF : + SKBTX_SW_TSTAMP); + case SCM_TSTAMP_ACK: + return TCP_SKB_CB(skb)->txstamp_ack & TSTAMP_ACK_SK; + case SCM_TSTAMP_COMPLETION: + return skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP; + } + + return false; +} + +static void skb_tstamp_tx_report_bpf_timestamping(struct sk_buff *skb, + struct skb_shared_hwtstamps *hwtstamps, + struct sock *sk, + int tstype) +{ + int op; + + switch (tstype) { + case SCM_TSTAMP_SCHED: + op = BPF_SOCK_OPS_TSTAMP_SCHED_CB; + break; + case SCM_TSTAMP_SND: + if (hwtstamps) { + op = BPF_SOCK_OPS_TSTAMP_SND_HW_CB; + *skb_hwtstamps(skb) = *hwtstamps; + } else { + op = BPF_SOCK_OPS_TSTAMP_SND_SW_CB; + } + break; + case SCM_TSTAMP_ACK: + op = BPF_SOCK_OPS_TSTAMP_ACK_CB; + break; + default: + return; + } + + bpf_skops_tx_timestamping(sk, skb, op); +} + void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb, struct skb_shared_hwtstamps *hwtstamps, @@ -5461,6 +5579,13 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, if (!sk) return; + if (skb_shinfo(orig_skb)->tx_flags & SKBTX_BPF) + skb_tstamp_tx_report_bpf_timestamping(orig_skb, hwtstamps, + sk, tstype); + + if (!skb_tstamp_tx_report_so_timestamping(orig_skb, hwtstamps, tstype)) + return; + tsflags = READ_ONCE(sk->sk_tsflags); if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) && skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS) @@ -6033,11 +6158,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) skb->offload_fwd_mark = 0; skb->offload_l3_fwd_mark = 0; #endif + ipvs_reset(skb); if (!xnet) return; - ipvs_reset(skb); skb->mark = 0; skb_clear_tstamp(skb); } diff --git a/net/core/sock.c b/net/core/sock.c index eae2ae70a2e03..88694cd59abdf 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -278,14 +278,6 @@ static struct lock_class_key af_wlock_keys[AF_MAX]; static struct lock_class_key af_elock_keys[AF_MAX]; static struct lock_class_key af_kern_callback_keys[AF_MAX]; -/* Run time adjustable parameters. */ -__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; -EXPORT_SYMBOL(sysctl_wmem_max); -__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; -EXPORT_SYMBOL(sysctl_rmem_max); -__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; -__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; - DEFINE_STATIC_KEY_FALSE(memalloc_socks_key); EXPORT_SYMBOL_GPL(memalloc_socks_key); @@ -938,6 +930,7 @@ int sock_set_timestamping(struct sock *sk, int optname, WRITE_ONCE(sk->sk_tsflags, val); sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); + sock_valbool_flag(sk, SOCK_TIMESTAMPING_ANY, !!(val & TSFLAGS_ANY)); if (val & SOF_TIMESTAMPING_RX_SOFTWARE) sock_enable_timestamp(sk, @@ -948,6 +941,20 @@ int sock_set_timestamping(struct sock *sk, int optname, return 0; } +#if defined(CONFIG_CGROUP_BPF) +void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op) +{ + struct bpf_sock_ops_kern sock_ops; + + memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); + sock_ops.op = op; + sock_ops.is_fullsock = 1; + sock_ops.sk = sk; + bpf_skops_init_skb(&sock_ops, skb, 0); + __cgroup_bpf_run_filter_sock_ops(sk, &sock_ops, CGROUP_SOCK_OPS); +} +#endif + void sock_set_keepalive(struct sock *sk) { lock_sock(sk); @@ -1318,7 +1325,7 @@ int sk_setsockopt(struct sock *sk, int level, int optname, * play 'guess the biggest size' games. RCVBUF/SNDBUF * are treated in BSD as hints */ - val = min_t(u32, val, READ_ONCE(sysctl_wmem_max)); + val = min_t(u32, val, READ_ONCE(sock_net(sk)->core.sysctl_wmem_max)); set_sndbuf: /* Ensure val * 2 fits into an int, to prevent max_t() * from treating it as a negative value. @@ -1350,7 +1357,7 @@ int sk_setsockopt(struct sock *sk, int level, int optname, * play 'guess the biggest size' games. RCVBUF/SNDBUF * are treated in BSD as hints */ - __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max))); + __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sock_net(sk)->core.sysctl_rmem_max))); break; case SO_RCVBUFFORCE: @@ -2041,7 +2048,7 @@ int sk_getsockopt(struct sock *sk, int level, int optname, v.val = READ_ONCE(sk->sk_napi_id); /* aggregate non-NAPI IDs down to 0 */ - if (v.val < MIN_NAPI_ID) + if (!napi_id_valid(v.val)) v.val = 0; break; @@ -2246,6 +2253,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, get_net_track(net, &sk->ns_tracker, priority); sock_inuse_add(net, 1); } else { + net_passive_inc(net); __netns_tracker_alloc(net, &sk->ns_tracker, false, priority); } @@ -2270,6 +2278,7 @@ EXPORT_SYMBOL(sk_alloc); static void __sk_destruct(struct rcu_head *head) { struct sock *sk = container_of(head, struct sock, sk_rcu); + struct net *net = sock_net(sk); struct sk_filter *filter; if (sk->sk_destruct) @@ -2301,14 +2310,28 @@ static void __sk_destruct(struct rcu_head *head) put_cred(sk->sk_peer_cred); put_pid(sk->sk_peer_pid); - if (likely(sk->sk_net_refcnt)) - put_net_track(sock_net(sk), &sk->ns_tracker); - else - __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); - + if (likely(sk->sk_net_refcnt)) { + put_net_track(net, &sk->ns_tracker); + } else { + __netns_tracker_free(net, &sk->ns_tracker, false); + net_passive_dec(net); + } sk_prot_free(sk->sk_prot_creator, sk); } +void sk_net_refcnt_upgrade(struct sock *sk) +{ + struct net *net = sock_net(sk); + + WARN_ON_ONCE(sk->sk_net_refcnt); + __netns_tracker_free(net, &sk->ns_tracker, false); + net_passive_dec(net); + sk->sk_net_refcnt = 1; + get_net_track(net, &sk->ns_tracker, GFP_KERNEL); + sock_inuse_add(net, 1); +} +EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade); + void sk_destruct(struct sock *sk) { bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); @@ -2405,6 +2428,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) * is not properly dismantling its kernel sockets at netns * destroy time. */ + net_passive_inc(sock_net(newsk)); __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker, false, priority); } @@ -2533,8 +2557,12 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) u32 max_segs = 1; sk->sk_route_caps = dst->dev->features; - if (sk_is_tcp(sk)) + if (sk_is_tcp(sk)) { + struct inet_connection_sock *icsk = inet_csk(sk); + sk->sk_route_caps |= NETIF_F_GSO; + icsk->icsk_ack.dst_quick_ack = dst_metric(dst, RTAX_QUICKACK); + } if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; if (unlikely(sk->sk_gso_disabled)) @@ -2804,6 +2832,22 @@ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) } EXPORT_SYMBOL(sock_kmalloc); +/* + * Duplicate the input "src" memory block using the socket's + * option memory buffer. + */ +void *sock_kmemdup(struct sock *sk, const void *src, + int size, gfp_t priority) +{ + void *mem; + + mem = sock_kmalloc(sk, size, priority); + if (mem) + memcpy(mem, src, size); + return mem; +} +EXPORT_SYMBOL(sock_kmemdup); + /* Free an option memory block. Note, we actually want the inline * here as this allows gcc to detect the nullify and fold away the * condition entirely. @@ -3566,8 +3610,8 @@ void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid) timer_setup(&sk->sk_timer, NULL, 0); sk->sk_allocation = GFP_KERNEL; - sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); - sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); + sk->sk_rcvbuf = READ_ONCE(sock_net(sk)->core.sysctl_rmem_default); + sk->sk_sndbuf = READ_ONCE(sock_net(sk)->core.sysctl_wmem_default); sk->sk_state = TCP_CLOSE; sk->sk_use_task_frag = true; sk_set_socket(sk, sock); @@ -3881,7 +3925,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem) mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf); - mem[SK_MEMINFO_FWD_ALLOC] = sk_forward_alloc_get(sk); + mem[SK_MEMINFO_FWD_ALLOC] = READ_ONCE(sk->sk_forward_alloc); mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index ad2741f1346af..aedc249bf0e23 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -34,6 +34,7 @@ static int min_sndbuf = SOCK_MIN_SNDBUF; static int min_rcvbuf = SOCK_MIN_RCVBUF; static int max_skb_frags = MAX_SKB_FRAGS; static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE; +static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ; static int net_msg_warn; /* Unused, but still a sysctl */ @@ -587,7 +588,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, + .extra1 = &netdev_budget_usecs_min, }, { .procname = "fb_tunnels_only_for_init_net", @@ -675,21 +676,9 @@ static struct ctl_table netns_core_table[] = { .extra2 = SYSCTL_ONE, .proc_handler = proc_dou8vec_minmax, }, - { - .procname = "tstamp_allow_data", - .data = &init_net.core.sysctl_tstamp_allow_data, - .maxlen = sizeof(u8), - .mode = 0644, - .proc_handler = proc_dou8vec_minmax, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE - }, - /* sysctl_core_net_init() will set the values after this - * to readonly in network namespaces - */ { .procname = "wmem_max", - .data = &sysctl_wmem_max, + .data = &init_net.core.sysctl_wmem_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, @@ -697,7 +686,7 @@ static struct ctl_table netns_core_table[] = { }, { .procname = "rmem_max", - .data = &sysctl_rmem_max, + .data = &init_net.core.sysctl_rmem_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, @@ -705,7 +694,7 @@ static struct ctl_table netns_core_table[] = { }, { .procname = "wmem_default", - .data = &sysctl_wmem_default, + .data = &init_net.core.sysctl_wmem_default, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, @@ -713,7 +702,7 @@ static struct ctl_table netns_core_table[] = { }, { .procname = "rmem_default", - .data = &sysctl_rmem_default, + .data = &init_net.core.sysctl_rmem_default, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, @@ -747,13 +736,8 @@ static __net_init int sysctl_core_net_init(struct net *net) goto err_dup; for (i = 0; i < table_size; ++i) { - if (tbl[i].data == &sysctl_wmem_max) - break; - tbl[i].data += (char *)net - (char *)&init_net; } - for (; i < table_size; ++i) - tbl[i].mode &= ~0222; } net->core.sysctl_hdr = register_net_sysctl_sz(net, "net/core", tbl, table_size); diff --git a/net/core/xdp.c b/net/core/xdp.c index 2c6ab6fb452f7..f86eedad586a7 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -618,16 +618,6 @@ void xdp_warn(const char *msg, const char *func, const int line) }; EXPORT_SYMBOL_GPL(xdp_warn); -int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp) -{ - n_skb = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, n_skb, skbs); - if (unlikely(!n_skb)) - return -ENOMEM; - - return 0; -} -EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk); - /** * xdp_build_skb_from_buff - create an skb from &xdp_buff * @xdp: &xdp_buff to convert to an skb diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index be515ba821e2d..2045ddac0fe9f 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -426,9 +426,6 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, newinet = inet_sk(newsk); ireq = inet_rsk(req); - sk_daddr_set(newsk, ireq->ir_rmt_addr); - sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); - newinet->inet_saddr = ireq->ir_loc_addr; RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; @@ -937,8 +934,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { .net_header_len = sizeof(struct iphdr), .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, - .addr2sockaddr = inet_csk_addr2sockaddr, - .sockaddr_len = sizeof(struct sockaddr_in), }; static int dccp_v4_init_sock(struct sock *sk) diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index d6649246188d7..e24dbffabfc19 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -365,6 +365,9 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ireq = inet_rsk(req); ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; + ireq->ir_rmt_addr = LOOPBACK4_IPV6; + ireq->ir_loc_addr = LOOPBACK4_IPV6; + ireq->ireq_family = AF_INET6; ireq->ir_mark = inet_request_mark(sk, skb); @@ -504,10 +507,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, memcpy(newnp, np, sizeof(struct ipv6_pinfo)); - newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; newnp->saddr = ireq->ir_v6_loc_addr; - newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; - newsk->sk_bound_dev_if = ireq->ir_iif; /* Now IPv6 options... @@ -546,9 +546,6 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, dccp_sync_mss(newsk, dst_mtu(dst)); - newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; - newinet->inet_rcv_saddr = LOOPBACK4_IPV6; - if (__inet_inherit_port(sk, newsk) < 0) { inet_csk_prepare_forced_close(newsk); dccp_done(newsk); @@ -991,8 +988,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { .net_header_len = sizeof(struct ipv6hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, - .addr2sockaddr = inet6_csk_addr2sockaddr, - .sockaddr_len = sizeof(struct sockaddr_in6), }; /* @@ -1007,8 +1002,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .net_header_len = sizeof(struct iphdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, - .addr2sockaddr = inet6_csk_addr2sockaddr, - .sockaddr_len = sizeof(struct sockaddr_in6), }; static void dccp_v6_sk_destruct(struct sock *sk) diff --git a/net/dccp/output.c b/net/dccp/output.c index 5c2e24f3c39b7..39cf3430177ac 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -627,11 +627,10 @@ void dccp_send_delayed_ack(struct sock *sk) return; } - if (!time_before(timeout, icsk->icsk_ack.timeout)) - timeout = icsk->icsk_ack.timeout; + if (!time_before(timeout, icsk_delack_timeout(icsk))) + timeout = icsk_delack_timeout(icsk); } icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; - icsk->icsk_ack.timeout = timeout; sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); } #endif diff --git a/net/dccp/timer.c b/net/dccp/timer.c index a4cfb47b60e52..232ac4ae0a73f 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c @@ -139,9 +139,9 @@ static void dccp_write_timer(struct timer_list *t) if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending) goto out; - if (time_after(icsk->icsk_timeout, jiffies)) { + if (time_after(icsk_timeout(icsk), jiffies)) { sk_reset_timer(sk, &icsk->icsk_retransmit_timer, - icsk->icsk_timeout); + icsk_timeout(icsk)); goto out; } @@ -185,9 +185,9 @@ static void dccp_delack_timer(struct timer_list *t) if (sk->sk_state == DCCP_CLOSED || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) goto out; - if (time_after(icsk->icsk_ack.timeout, jiffies)) { + if (time_after(icsk_delack_timeout(icsk), jiffies)) { sk_reset_timer(sk, &icsk->icsk_delack_timer, - icsk->icsk_ack.timeout); + icsk_delack_timeout(icsk)); goto out; } diff --git a/net/devlink/core.c b/net/devlink/core.c index f49cd83f1955f..7203c39532fcc 100644 --- a/net/devlink/core.c +++ b/net/devlink/core.c @@ -117,7 +117,7 @@ static struct devlink_rel *devlink_rel_alloc(void) err = xa_alloc_cyclic(&devlink_rels, &rel->index, rel, xa_limit_32b, &next, GFP_KERNEL); - if (err) { + if (err < 0) { kfree(rel); return ERR_PTR(err); } diff --git a/net/devlink/dev.c b/net/devlink/dev.c index d6e3db300acbe..6c442549722c7 100644 --- a/net/devlink/dev.c +++ b/net/devlink/dev.c @@ -763,6 +763,15 @@ int devlink_info_board_serial_number_put(struct devlink_info_req *req, } EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put); +int devlink_info_function_uid_put(struct devlink_info_req *req, + const char *fuid) +{ + if (!req->msg) + return 0; + return nla_put_string(req->msg, DEVLINK_ATTR_INFO_FUNCTION_UID, fuid); +} +EXPORT_SYMBOL_GPL(devlink_info_function_uid_put); + static int devlink_info_version_put(struct devlink_info_req *req, int attr, const char *version_name, const char *version_value, diff --git a/net/dsa/conduit.c b/net/dsa/conduit.c index 3dfdb3cb47dc7..4ae255cfb23f8 100644 --- a/net/dsa/conduit.c +++ b/net/dsa/conduit.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "conduit.h" #include "dsa.h" @@ -26,7 +27,9 @@ static int dsa_conduit_get_regs_len(struct net_device *dev) int len; if (ops->get_regs_len) { + netdev_lock_ops(dev); len = ops->get_regs_len(dev); + netdev_unlock_ops(dev); if (len < 0) return len; ret += len; @@ -57,11 +60,15 @@ static void dsa_conduit_get_regs(struct net_device *dev, int len; if (ops->get_regs_len && ops->get_regs) { + netdev_lock_ops(dev); len = ops->get_regs_len(dev); - if (len < 0) + if (len < 0) { + netdev_unlock_ops(dev); return; + } regs->len = len; ops->get_regs(dev, regs, data); + netdev_unlock_ops(dev); data += regs->len; } @@ -91,8 +98,10 @@ static void dsa_conduit_get_ethtool_stats(struct net_device *dev, int count = 0; if (ops->get_sset_count && ops->get_ethtool_stats) { + netdev_lock_ops(dev); count = ops->get_sset_count(dev, ETH_SS_STATS); ops->get_ethtool_stats(dev, stats, data); + netdev_unlock_ops(dev); } if (ds->ops->get_ethtool_stats) @@ -114,8 +123,10 @@ static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev, if (count >= 0) phy_ethtool_get_stats(dev->phydev, stats, data); } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) { + netdev_lock_ops(dev); count = ops->get_sset_count(dev, ETH_SS_PHY_STATS); ops->get_ethtool_phy_stats(dev, stats, data); + netdev_unlock_ops(dev); } if (count < 0) @@ -132,11 +143,13 @@ static int dsa_conduit_get_sset_count(struct net_device *dev, int sset) struct dsa_switch *ds = cpu_dp->ds; int count = 0; + netdev_lock_ops(dev); if (sset == ETH_SS_PHY_STATS && dev->phydev && !ops->get_ethtool_phy_stats) count = phy_ethtool_get_sset_count(dev->phydev); else if (ops->get_sset_count) count = ops->get_sset_count(dev, sset); + netdev_unlock_ops(dev); if (count < 0) count = 0; @@ -163,6 +176,7 @@ static void dsa_conduit_get_strings(struct net_device *dev, uint32_t stringset, /* We do not want to be NULL-terminated, since this is a prefix */ pfx[sizeof(pfx) - 1] = '_'; + netdev_lock_ops(dev); if (stringset == ETH_SS_PHY_STATS && dev->phydev && !ops->get_ethtool_phy_stats) { mcount = phy_ethtool_get_sset_count(dev->phydev); @@ -176,6 +190,7 @@ static void dsa_conduit_get_strings(struct net_device *dev, uint32_t stringset, mcount = 0; ops->get_strings(dev, stringset, data); } + netdev_unlock_ops(dev); if (ds->ops->get_strings) { ndata = data + mcount * len; diff --git a/net/dsa/user.c b/net/dsa/user.c index 291ab1b4acc4d..804dc7dac4f2f 100644 --- a/net/dsa/user.c +++ b/net/dsa/user.c @@ -897,7 +897,7 @@ static void dsa_skb_tx_timestamp(struct dsa_user_priv *p, { struct dsa_switch *ds = p->dp->ds; - if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NOBPF)) return; if (!ds->ops->port_txtstamp) @@ -1243,16 +1243,25 @@ static int dsa_user_set_eee(struct net_device *dev, struct ethtool_keee *e) if (!ds->ops->support_eee || !ds->ops->support_eee(ds, dp->index)) return -EOPNOTSUPP; - /* Port's PHY and MAC both need to be EEE capable */ - if (!dev->phydev) - return -ENODEV; + /* If the port is using phylink managed EEE, then an unimplemented + * set_mac_eee() is permissible. + */ + if (!phylink_mac_implements_lpi(ds->phylink_mac_ops)) { + /* Port's PHY and MAC both need to be EEE capable */ + if (!dev->phydev) + return -ENODEV; - if (!ds->ops->set_mac_eee) - return -EOPNOTSUPP; + if (!ds->ops->set_mac_eee) + return -EOPNOTSUPP; - ret = ds->ops->set_mac_eee(ds, dp->index, e); - if (ret) - return ret; + ret = ds->ops->set_mac_eee(ds, dp->index, e); + if (ret) + return ret; + } else if (ds->ops->set_mac_eee) { + ret = ds->ops->set_mac_eee(ds, dp->index, e); + if (ret) + return ret; + } return phylink_ethtool_set_eee(dp->pl, e); } diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c index f22051f33868a..0364b8fb577bc 100644 --- a/net/ethtool/cabletest.c +++ b/net/ethtool/cabletest.c @@ -2,6 +2,7 @@ #include #include +#include #include "netlink.h" #include "common.h" @@ -72,23 +73,24 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info) dev = req_info.dev; rtnl_lock(); - phydev = ethnl_req_get_phydev(&req_info, - tb[ETHTOOL_A_CABLE_TEST_HEADER], + netdev_lock_ops(dev); + phydev = ethnl_req_get_phydev(&req_info, tb, + ETHTOOL_A_CABLE_TEST_HEADER, info->extack); if (IS_ERR_OR_NULL(phydev)) { ret = -EOPNOTSUPP; - goto out_rtnl; + goto out_unlock; } ops = ethtool_phy_ops; if (!ops || !ops->start_cable_test) { ret = -EOPNOTSUPP; - goto out_rtnl; + goto out_unlock; } ret = ethnl_ops_begin(dev); if (ret < 0) - goto out_rtnl; + goto out_unlock; ret = ops->start_cable_test(phydev, info->extack); @@ -97,7 +99,8 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info) if (!ret) ethnl_cable_test_started(phydev, ETHTOOL_MSG_CABLE_TEST_NTF); -out_rtnl: +out_unlock: + netdev_unlock_ops(dev); rtnl_unlock(); ethnl_parse_header_dev_put(&req_info); return ret; @@ -339,23 +342,24 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info) goto out_dev_put; rtnl_lock(); - phydev = ethnl_req_get_phydev(&req_info, - tb[ETHTOOL_A_CABLE_TEST_TDR_HEADER], + netdev_lock_ops(dev); + phydev = ethnl_req_get_phydev(&req_info, tb, + ETHTOOL_A_CABLE_TEST_TDR_HEADER, info->extack); if (IS_ERR_OR_NULL(phydev)) { ret = -EOPNOTSUPP; - goto out_rtnl; + goto out_unlock; } ops = ethtool_phy_ops; if (!ops || !ops->start_cable_test_tdr) { ret = -EOPNOTSUPP; - goto out_rtnl; + goto out_unlock; } ret = ethnl_ops_begin(dev); if (ret < 0) - goto out_rtnl; + goto out_unlock; ret = ops->start_cable_test_tdr(phydev, info->extack, &cfg); @@ -365,7 +369,8 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info) ethnl_cable_test_started(phydev, ETHTOOL_MSG_CABLE_TEST_TDR_NTF); -out_rtnl: +out_unlock: + netdev_unlock_ops(dev); rtnl_unlock(); out_dev_put: ethnl_parse_header_dev_put(&req_info); diff --git a/net/ethtool/cmis_fw_update.c b/net/ethtool/cmis_fw_update.c index 48aef6220f009..df5f344209c47 100644 --- a/net/ethtool/cmis_fw_update.c +++ b/net/ethtool/cmis_fw_update.c @@ -2,6 +2,7 @@ #include #include +#include #include "common.h" #include "module_fw.h" @@ -418,8 +419,13 @@ cmis_fw_update_commit_image(struct ethtool_cmis_cdb *cdb, static int cmis_fw_update_reset(struct net_device *dev) { __u32 reset_data = ETH_RESET_PHY; + int ret; - return dev->ethtool_ops->reset(dev, &reset_data); + netdev_lock_ops(dev); + ret = dev->ethtool_ops->reset(dev, &reset_data); + netdev_unlock_ops(dev); + + return ret; } void diff --git a/net/ethtool/common.c b/net/ethtool/common.c index d88e9080643b8..0cb6da1f692a0 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "netlink.h" #include "common.h" @@ -35,6 +36,7 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = { [NETIF_F_TSO_BIT] = "tx-tcp-segmentation", [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust", [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", + [NETIF_F_GSO_ACCECN_BIT] = "tx-tcp-accecn-segmentation", [NETIF_F_TSO_MANGLEID_BIT] = "tx-tcp-mangleid-segmentation", [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", @@ -213,6 +215,24 @@ const char link_mode_names[][ETH_GSTRING_LEN] = { __DEFINE_LINK_MODE_NAME(10, T1S, Half), __DEFINE_LINK_MODE_NAME(10, T1S_P2MP, Half), __DEFINE_LINK_MODE_NAME(10, T1BRR, Full), + __DEFINE_LINK_MODE_NAME(200000, CR, Full), + __DEFINE_LINK_MODE_NAME(200000, KR, Full), + __DEFINE_LINK_MODE_NAME(200000, DR, Full), + __DEFINE_LINK_MODE_NAME(200000, DR_2, Full), + __DEFINE_LINK_MODE_NAME(200000, SR, Full), + __DEFINE_LINK_MODE_NAME(200000, VR, Full), + __DEFINE_LINK_MODE_NAME(400000, CR2, Full), + __DEFINE_LINK_MODE_NAME(400000, KR2, Full), + __DEFINE_LINK_MODE_NAME(400000, DR2, Full), + __DEFINE_LINK_MODE_NAME(400000, DR2_2, Full), + __DEFINE_LINK_MODE_NAME(400000, SR2, Full), + __DEFINE_LINK_MODE_NAME(400000, VR2, Full), + __DEFINE_LINK_MODE_NAME(800000, CR4, Full), + __DEFINE_LINK_MODE_NAME(800000, KR4, Full), + __DEFINE_LINK_MODE_NAME(800000, DR4, Full), + __DEFINE_LINK_MODE_NAME(800000, DR4_2, Full), + __DEFINE_LINK_MODE_NAME(800000, SR4, Full), + __DEFINE_LINK_MODE_NAME(800000, VR4, Full), }; static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -221,8 +241,11 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); #define __LINK_MODE_LANES_CR4 4 #define __LINK_MODE_LANES_CR8 8 #define __LINK_MODE_LANES_DR 1 +#define __LINK_MODE_LANES_DR_2 1 #define __LINK_MODE_LANES_DR2 2 +#define __LINK_MODE_LANES_DR2_2 2 #define __LINK_MODE_LANES_DR4 4 +#define __LINK_MODE_LANES_DR4_2 4 #define __LINK_MODE_LANES_DR8 8 #define __LINK_MODE_LANES_KR 1 #define __LINK_MODE_LANES_KR2 2 @@ -251,6 +274,9 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); #define __LINK_MODE_LANES_T1L 1 #define __LINK_MODE_LANES_T1S 1 #define __LINK_MODE_LANES_T1S_P2MP 1 +#define __LINK_MODE_LANES_VR 1 +#define __LINK_MODE_LANES_VR2 2 +#define __LINK_MODE_LANES_VR4 4 #define __LINK_MODE_LANES_VR8 8 #define __LINK_MODE_LANES_DR8_2 8 #define __LINK_MODE_LANES_T1BRR 1 @@ -378,8 +404,27 @@ const struct link_mode_info link_mode_params[] = { __DEFINE_LINK_MODE_PARAMS(10, T1S, Half), __DEFINE_LINK_MODE_PARAMS(10, T1S_P2MP, Half), __DEFINE_LINK_MODE_PARAMS(10, T1BRR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, KR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, DR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, DR_2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, VR, Full), + __DEFINE_LINK_MODE_PARAMS(400000, CR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, DR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, DR2_2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, SR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, VR2, Full), + __DEFINE_LINK_MODE_PARAMS(800000, CR4, Full), + __DEFINE_LINK_MODE_PARAMS(800000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(800000, DR4, Full), + __DEFINE_LINK_MODE_PARAMS(800000, DR4_2, Full), + __DEFINE_LINK_MODE_PARAMS(800000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(800000, VR4, Full), }; static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS); +EXPORT_SYMBOL_GPL(link_mode_params); const char netif_msg_class_names[][ETH_GSTRING_LEN] = { [NETIF_MSG_DRV_BIT] = "drv", @@ -431,6 +476,7 @@ const char sof_timestamping_names[][ETH_GSTRING_LEN] = { [const_ilog2(SOF_TIMESTAMPING_BIND_PHC)] = "bind-phc", [const_ilog2(SOF_TIMESTAMPING_OPT_ID_TCP)] = "option-id-tcp", [const_ilog2(SOF_TIMESTAMPING_OPT_RX_FILTER)] = "option-rx-filter", + [const_ilog2(SOF_TIMESTAMPING_TX_COMPLETION)] = "tx-completion", }; static_assert(ARRAY_SIZE(sof_timestamping_names) == __SOF_TIMESTAMPING_CNT); @@ -771,6 +817,21 @@ int ethtool_check_ops(const struct ethtool_ops *ops) return 0; } +void ethtool_ringparam_get_cfg(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kparam, + struct netlink_ext_ack *extack) +{ + memset(param, 0, sizeof(*param)); + memset(kparam, 0, sizeof(*kparam)); + + param->cmd = ETHTOOL_GRINGPARAM; + dev->ethtool_ops->get_ringparam(dev, param, kparam, extack); + + /* Driver gives us current state, we want to return current config */ + kparam->tcp_data_split = dev->cfg->hds_config; +} + static void ethtool_init_tsinfo(struct kernel_ethtool_ts_info *info) { memset(info, 0, sizeof(*info)); diff --git a/net/ethtool/common.h b/net/ethtool/common.h index 58e9e7db06f90..b4683d286a5a5 100644 --- a/net/ethtool/common.h +++ b/net/ethtool/common.h @@ -15,12 +15,6 @@ #define __SOF_TIMESTAMPING_CNT (const_ilog2(SOF_TIMESTAMPING_LAST) + 1) #define __HWTSTAMP_FLAG_CNT (const_ilog2(HWTSTAMP_FLAG_LAST) + 1) -struct link_mode_info { - int speed; - u8 lanes; - u8 duplex; -}; - struct genl_info; struct hwtstamp_provider_desc; @@ -33,7 +27,6 @@ tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN]; extern const char phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN]; extern const char link_mode_names[][ETH_GSTRING_LEN]; -extern const struct link_mode_info link_mode_params[]; extern const char netif_msg_class_names[][ETH_GSTRING_LEN]; extern const char wol_mode_names[][ETH_GSTRING_LEN]; extern const char sof_timestamping_names[][ETH_GSTRING_LEN]; @@ -51,6 +44,12 @@ int ethtool_check_max_channel(struct net_device *dev, struct ethtool_channels channels, struct genl_info *info); int ethtool_check_rss_ctx_busy(struct net_device *dev, u32 rss_context); + +void ethtool_ringparam_get_cfg(struct net_device *dev, + struct ethtool_ringparam *param, + struct kernel_ethtool_ringparam *kparam, + struct netlink_ext_ack *extack); + int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info); int ethtool_get_ts_info_by_phc(struct net_device *dev, struct kernel_ethtool_ts_info *info, diff --git a/net/ethtool/features.c b/net/ethtool/features.c index b6cb101d7f19e..f2217983be2bd 100644 --- a/net/ethtool/features.c +++ b/net/ethtool/features.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only +#include + #include "netlink.h" #include "common.h" #include "bitset.h" @@ -234,9 +236,10 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info) dev = req_info.dev; rtnl_lock(); + netdev_lock_ops(dev); ret = ethnl_ops_begin(dev); if (ret < 0) - goto out_rtnl; + goto out_unlock; ethnl_features_to_bitmap(old_active, dev->features); ethnl_features_to_bitmap(old_wanted, dev->wanted_features); ret = ethnl_parse_bitset(req_wanted, req_mask, NETDEV_FEATURE_COUNT, @@ -286,7 +289,8 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info) out_ops: ethnl_ops_complete(dev); -out_rtnl: +out_unlock: + netdev_unlock_ops(dev); rtnl_unlock(); ethnl_parse_header_dev_put(&req_info); return ret; diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 7609ce2b2c5e2..221639407c724 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "common.h" @@ -992,11 +993,17 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, if (rc) return rc; - /* Nonzero ring with RSS only makes sense if NIC adds them together */ - if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS && - !ops->cap_rss_rxnfc_adds && - ethtool_get_flow_spec_ring(info.fs.ring_cookie)) - return -EINVAL; + if (cmd == ETHTOOL_SRXCLSRLINS && info.fs.flow_type & FLOW_RSS) { + /* Nonzero ring with RSS only makes sense + * if NIC adds them together + */ + if (!ops->cap_rss_rxnfc_adds && + ethtool_get_flow_spec_ring(info.fs.ring_cookie)) + return -EINVAL; + + if (!xa_load(&dev->ethtool->rss_ctx, info.rss_context)) + return -EINVAL; + } if (cmd == ETHTOOL_SRXFH && ops->get_rxfh) { struct ethtool_rxfh_param rxfh = {}; @@ -1005,11 +1012,11 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, if (rc) return rc; - /* Sanity check: if symmetric-xor is set, then: + /* Sanity check: if symmetric-xor/symmetric-or-xor is set, then: * 1 - no other fields besides IP src/dst and/or L4 src/dst * 2 - If src is set, dst must also be set */ - if ((rxfh.input_xfrm & RXH_XFRM_SYM_XOR) && + if ((rxfh.input_xfrm & (RXH_XFRM_SYM_XOR | RXH_XFRM_SYM_OR_XOR)) && ((info.data & ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)) || (!!(info.data & RXH_IP_SRC) ^ !!(info.data & RXH_IP_DST)) || @@ -1382,11 +1389,11 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, return -EOPNOTSUPP; /* Check input data transformation capabilities */ if (rxfh.input_xfrm && rxfh.input_xfrm != RXH_XFRM_SYM_XOR && + rxfh.input_xfrm != RXH_XFRM_SYM_OR_XOR && rxfh.input_xfrm != RXH_XFRM_NO_CHANGE) return -EINVAL; if (rxfh.input_xfrm != RXH_XFRM_NO_CHANGE && - (rxfh.input_xfrm & RXH_XFRM_SYM_XOR) && - !ops->cap_rss_sym_xor_supported) + rxfh.input_xfrm & ~ops->supported_input_xfrm) return -EOPNOTSUPP; create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC; @@ -2059,8 +2066,8 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) { - struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM }; struct kernel_ethtool_ringparam kernel_ringparam; + struct ethtool_ringparam ringparam, max; int ret; if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam) @@ -2069,7 +2076,7 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) return -EFAULT; - dev->ethtool_ops->get_ringparam(dev, &max, &kernel_ringparam, NULL); + ethtool_ringparam_get_cfg(dev, &max, &kernel_ringparam, NULL); /* ensure new ring parameters are within the maximums */ if (ringparam.rx_pending > max.rx_max_pending || @@ -2311,6 +2318,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) */ busy = true; netdev_hold(dev, &dev_tracker, GFP_KERNEL); + netdev_unlock_ops(dev); rtnl_unlock(); if (rc == 0) { @@ -2325,8 +2333,10 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) do { rtnl_lock(); + netdev_lock_ops(dev); rc = ops->set_phys_id(dev, (i++ & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON); + netdev_unlock_ops(dev); rtnl_unlock(); if (rc) break; @@ -2335,6 +2345,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) } rtnl_lock(); + netdev_lock_ops(dev); netdev_put(dev, &dev_tracker); busy = false; @@ -3134,6 +3145,7 @@ __dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr, return -EPERM; } + netdev_lock_ops(dev); if (dev->dev.parent) pm_runtime_get_sync(dev->dev.parent); @@ -3367,6 +3379,7 @@ __dev_ethtool(struct net *net, struct ifreq *ifr, void __user *useraddr, out: if (dev->dev.parent) pm_runtime_put(dev->dev.parent); + netdev_unlock_ops(dev); return rc; } diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c index af19e1bed303f..05a5f72c99fab 100644 --- a/net/ethtool/linkstate.c +++ b/net/ethtool/linkstate.c @@ -103,7 +103,7 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base, struct phy_device *phydev; int ret; - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_LINKSTATE_HEADER], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_LINKSTATE_HEADER, info->extack); if (IS_ERR(phydev)) { ret = PTR_ERR(phydev); diff --git a/net/ethtool/module.c b/net/ethtool/module.c index 6988e07bdcd6d..4d4e0a82579a2 100644 --- a/net/ethtool/module.c +++ b/net/ethtool/module.c @@ -4,6 +4,7 @@ #include #include #include +#include #include "netlink.h" #include "common.h" @@ -419,19 +420,21 @@ int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info) dev = req_info.dev; rtnl_lock(); + netdev_lock_ops(dev); ret = ethnl_ops_begin(dev); if (ret < 0) - goto out_rtnl; + goto out_unlock; ret = ethnl_module_fw_flash_validate(dev, info->extack); if (ret < 0) - goto out_rtnl; + goto out_unlock; ret = module_flash_fw(dev, tb, skb, info); ethnl_ops_complete(dev); -out_rtnl: +out_unlock: + netdev_unlock_ops(dev); rtnl_unlock(); ethnl_parse_header_dev_put(&req_info); return ret; diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index b4c45207fa32e..217024af68fd3 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include #include #include #include @@ -90,6 +91,8 @@ int ethnl_ops_begin(struct net_device *dev) if (dev->dev.parent) pm_runtime_get_sync(dev->dev.parent); + netdev_ops_assert_locked(dev); + if (!netif_device_present(dev) || dev->reg_state >= NETREG_UNREGISTERING) { ret = -ENODEV; @@ -211,7 +214,7 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info, } struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, - const struct nlattr *header, + struct nlattr **tb, unsigned int header, struct netlink_ext_ack *extack) { struct phy_device *phydev; @@ -225,8 +228,8 @@ struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, return req_info->dev->phydev; phydev = phy_link_topo_get_phy(req_info->dev, req_info->phy_index); - if (!phydev) { - NL_SET_ERR_MSG_ATTR(extack, header, + if (!phydev && tb) { + NL_SET_ERR_MSG_ATTR(extack, tb[header], "no phy matching phyindex"); return ERR_PTR(-ENODEV); } @@ -336,24 +339,6 @@ int ethnl_multicast(struct sk_buff *skb, struct net_device *dev) /* GET request helpers */ -/** - * struct ethnl_dump_ctx - context structure for generic dumpit() callback - * @ops: request ops of currently processed message type - * @req_info: parsed request header of processed request - * @reply_data: data needed to compose the reply - * @pos_ifindex: saved iteration position - ifindex - * - * These parameters are kept in struct netlink_callback as context preserved - * between iterations. They are initialized by ethnl_default_start() and used - * in ethnl_default_dumpit() and ethnl_default_done(). - */ -struct ethnl_dump_ctx { - const struct ethnl_request_ops *ops; - struct ethnl_req_info *req_info; - struct ethnl_reply_data *reply_data; - unsigned long pos_ifindex; -}; - static const struct ethnl_request_ops * ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = { [ETHTOOL_MSG_STRSET_GET] = ðnl_strset_request_ops, @@ -397,6 +382,7 @@ ethnl_default_requests[__ETHTOOL_MSG_USER_CNT] = { [ETHTOOL_MSG_MM_SET] = ðnl_mm_request_ops, [ETHTOOL_MSG_TSCONFIG_GET] = ðnl_tsconfig_request_ops, [ETHTOOL_MSG_TSCONFIG_SET] = ðnl_tsconfig_request_ops, + [ETHTOOL_MSG_PHY_GET] = ðnl_phy_request_ops, }; static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb) @@ -490,7 +476,11 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info) ethnl_init_reply_data(reply_data, ops, req_info->dev); rtnl_lock(); + if (req_info->dev) + netdev_lock_ops(req_info->dev); ret = ops->prepare_data(req_info, reply_data, info); + if (req_info->dev) + netdev_unlock_ops(req_info->dev); rtnl_unlock(); if (ret < 0) goto err_cleanup; @@ -533,7 +523,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info) return ret; } -static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, +static int ethnl_default_dump_one(struct sk_buff *skb, const struct ethnl_dump_ctx *ctx, const struct genl_info *info) { @@ -546,13 +536,15 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, if (!ehdr) return -EMSGSIZE; - ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev); rtnl_lock(); + netdev_lock_ops(ctx->reply_data->dev); ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, info); + netdev_unlock_ops(ctx->reply_data->dev); rtnl_unlock(); if (ret < 0) goto out; - ret = ethnl_fill_reply_header(skb, dev, ctx->ops->hdr_attr); + ret = ethnl_fill_reply_header(skb, ctx->reply_data->dev, + ctx->ops->hdr_attr); if (ret < 0) goto out; ret = ctx->ops->fill_reply(skb, ctx->req_info, ctx->reply_data); @@ -560,17 +552,111 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, out: if (ctx->ops->cleanup_data) ctx->ops->cleanup_data(ctx->reply_data); - ctx->reply_data->dev = NULL; + if (ret < 0) genlmsg_cancel(skb, ehdr); else genlmsg_end(skb, ehdr); + return ret; } -/* Default ->dumpit() handler for GET requests. */ -static int ethnl_default_dumpit(struct sk_buff *skb, - struct netlink_callback *cb) +/* Specific context for phy-targeting command DUMP operatins. We keep in context + * the latest phy_index we dumped, in case of an interrupted DUMP. + */ +struct ethnl_dump_ctx_perphy { + unsigned long phy_index; +}; + +/** + * ethnl_dump_start_perphy() - Initialise a dump for PHY cmds + * @ctx: Generic ethnl dump context whose cmd_ctx will be initialized + * + * Initializes a dump context for ethnl commands that may return + * one message per PHY on a single netdev. + * + * Returns: 0 for success, a negative value for errors. + */ +int ethnl_dump_start_perphy(struct ethnl_dump_ctx *ctx) +{ + struct ethnl_dump_ctx_perphy *dump_ctx; + + dump_ctx = kzalloc(sizeof(*dump_ctx), GFP_KERNEL); + if (!dump_ctx) + return -ENOMEM; + + ctx->cmd_ctx = dump_ctx; + + return 0; +} + +/** + * ethnl_dump_done_perphy() - Releases the per-phy dump context + * @ctx: Generic ethnl dump context whose cmd_ctx will be released + */ +void ethnl_dump_done_perphy(struct ethnl_dump_ctx *ctx) +{ + kfree(ctx->cmd_ctx); +} + +/** + * ethnl_dump_one_dev_perphy() - Dump all PHY-related messages for one netdev + * @skb: skb containing the DUMP result + * @ctx: Dump context. Will be kept across the DUMP operation. + * @info: Genl receive info + * + * Some commands are related to PHY devices attached to netdevs. As there may be + * multiple PHYs, this DUMP handler will populate the reply with one message per + * PHY on a single netdev. + * + * Returns: 0 for success or when nothing to do, a negative value otherwise. + */ +int ethnl_dump_one_dev_perphy(struct sk_buff *skb, + struct ethnl_dump_ctx *ctx, + const struct genl_info *info) +{ + struct ethnl_dump_ctx_perphy *dump_ctx = ctx->cmd_ctx; + struct net_device *dev = ctx->reply_data->dev; + struct phy_device_node *pdn; + int ret = 0; + + if (!dev->link_topo) + return 0; + + xa_for_each_start(&dev->link_topo->phys, dump_ctx->phy_index, + pdn, dump_ctx->phy_index) { + ctx->req_info->phy_index = dump_ctx->phy_index; + + /* We can re-use the original dump_one as ->prepare_data in + * commands use ethnl_req_get_phydev(), which gets the PHY from + * what's in req_info + */ + ret = ethnl_default_dump_one(skb, ctx, info); + if (ret) + break; + } + + return ret; +} + +static int ethnl_default_dump_one_dev(struct sk_buff *skb, struct net_device *dev, + struct ethnl_dump_ctx *ctx, + const struct genl_info *info) +{ + int ret; + + ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev); + + if (ctx->ops->dump_one_dev) + ret = ctx->ops->dump_one_dev(skb, ctx, info); + else + ret = ethnl_default_dump_one(skb, ctx, info); + + ctx->reply_data->dev = NULL; + return ret; +} + +static int ethnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) { struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb); struct net *net = sock_net(skb->sk); @@ -582,7 +668,9 @@ static int ethnl_default_dumpit(struct sk_buff *skb, dev_hold(dev); rcu_read_unlock(); - ret = ethnl_default_dump_one(skb, dev, ctx, genl_info_dump(cb)); + ctx->req_info->dev = dev; + ret = ethnl_default_dump_one_dev(skb, dev, ctx, + genl_info_dump(cb)); rcu_read_lock(); dev_put(dev); @@ -599,6 +687,31 @@ static int ethnl_default_dumpit(struct sk_buff *skb, return ret; } +/* Default ->dumpit() handler for GET requests. */ +static int ethnl_default_dumpit(struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb); + int ret; + + if (ctx->req_info->dev) { + /* Filtered DUMP request targeted to a single netdev. We already + * hold a ref to the netdev from ->start() + */ + ret = ethnl_default_dump_one_dev(skb, ctx->req_info->dev, ctx, + genl_info_dump(cb)); + netdev_put(ctx->req_info->dev, &ctx->req_info->dev_tracker); + + if (ret < 0 && ret != -EOPNOTSUPP && likely(skb->len)) + ret = skb->len; + + } else { + ret = ethnl_dump_all(skb, cb); + } + + return ret; +} + /* generic ->start() handler for GET requests */ static int ethnl_default_start(struct netlink_callback *cb) { @@ -626,10 +739,10 @@ static int ethnl_default_start(struct netlink_callback *cb) } ret = ethnl_default_parse(req_info, &info->info, ops, false); - if (req_info->dev) { - /* We ignore device specification in dump requests but as the - * same parser as for non-dump (doit) requests is used, it - * would take reference to the device if it finds one + if (req_info->dev && !ops->allow_pernetdev_dump) { + /* We ignore device specification in unfiltered dump requests + * but as the same parser as for non-dump (doit) requests is + * used, it would take reference to the device if it finds one */ netdev_put(req_info->dev, &req_info->dev_tracker); req_info->dev = NULL; @@ -642,6 +755,12 @@ static int ethnl_default_start(struct netlink_callback *cb) ctx->reply_data = reply_data; ctx->pos_ifindex = 0; + if (ctx->ops->dump_start) { + ret = ctx->ops->dump_start(ctx); + if (ret) + goto free_reply_data; + } + return 0; free_reply_data: @@ -657,6 +776,9 @@ static int ethnl_default_done(struct netlink_callback *cb) { struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb); + if (ctx->ops->dump_done) + ctx->ops->dump_done(ctx); + kfree(ctx->reply_data); kfree(ctx->req_info); @@ -693,6 +815,7 @@ static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info) dev = req_info.dev; rtnl_lock(); + netdev_lock_ops(dev); dev->cfg_pending = kmemdup(dev->cfg, sizeof(*dev->cfg), GFP_KERNEL_ACCOUNT); if (!dev->cfg_pending) { @@ -720,6 +843,7 @@ static int ethnl_default_set_doit(struct sk_buff *skb, struct genl_info *info) kfree(dev->cfg_pending); out_tie_cfg: dev->cfg_pending = dev->cfg; + netdev_unlock_ops(dev); rtnl_unlock(); out_dev: ethnl_parse_header_dev_put(&req_info); @@ -777,6 +901,8 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd, req_info->dev = dev; req_info->flags |= ETHTOOL_FLAG_COMPACT_BITSETS; + netdev_ops_assert_locked(dev); + ethnl_init_reply_data(reply_data, ops, dev); ret = ops->prepare_data(req_info, reply_data, &info); if (ret < 0) @@ -1256,10 +1382,10 @@ static const struct genl_ops ethtool_genl_ops[] = { }, { .cmd = ETHTOOL_MSG_PHY_GET, - .doit = ethnl_phy_doit, - .start = ethnl_phy_start, - .dumpit = ethnl_phy_dumpit, - .done = ethnl_phy_done, + .doit = ethnl_default_doit, + .start = ethnl_default_start, + .dumpit = ethnl_default_dumpit, + .done = ethnl_default_done, .policy = ethnl_phy_get_policy, .maxattr = ARRAY_SIZE(ethnl_phy_get_policy) - 1, }, diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index ff69ca0715dea..60f16090640f8 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -275,7 +275,8 @@ static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info) * ethnl_req_get_phydev() - Gets the phy_device targeted by this request, * if any. Must be called under rntl_lock(). * @req_info: The ethnl request to get the phy from. - * @header: The netlink header, used for error reporting. + * @tb: The netlink attributes array, for error reporting. + * @header: The netlink header index, used for error reporting. * @extack: The netlink extended ACK, for error reporting. * * The caller must hold RTNL, until it's done interacting with the returned @@ -289,7 +290,7 @@ static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info) * is returned. */ struct phy_device *ethnl_req_get_phydev(const struct ethnl_req_info *req_info, - const struct nlattr *header, + struct nlattr **tb, unsigned int header, struct netlink_ext_ack *extack); /** @@ -306,6 +307,32 @@ struct ethnl_reply_data { struct net_device *dev; }; +/** + * struct ethnl_dump_ctx - context structure for generic dumpit() callback + * @ops: request ops of currently processed message type + * @req_info: parsed request header of processed request + * @reply_data: data needed to compose the reply + * @pos_ifindex: saved iteration position - ifindex + * @cmd_ctx: command-specific context to maintain across the dump. + * + * These parameters are kept in struct netlink_callback as context preserved + * between iterations. They are initialized by ethnl_default_start() and used + * in ethnl_default_dumpit() and ethnl_default_done(). + */ +struct ethnl_dump_ctx { + const struct ethnl_request_ops *ops; + struct ethnl_req_info *req_info; + struct ethnl_reply_data *reply_data; + unsigned long pos_ifindex; + void *cmd_ctx; +}; + +/* Generic callbacks to be used by PHY targeting commands */ +int ethnl_dump_start_perphy(struct ethnl_dump_ctx *ctx); +int ethnl_dump_one_dev_perphy(struct sk_buff *skb, struct ethnl_dump_ctx *ctx, + const struct genl_info *info); +void ethnl_dump_done_perphy(struct ethnl_dump_ctx *ctx); + int ethnl_ops_begin(struct net_device *dev); void ethnl_ops_complete(struct net_device *dev); @@ -330,6 +357,7 @@ int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid, * @req_info_size: size of request info * @reply_data_size: size of reply data * @allow_nodev_do: allow non-dump request with no device identification + * @allow_pernetdev_dump: allow filtering dump requests with ifname/ifindex * @set_ntf_cmd: notification to generate on changes (SET) * @parse_request: * Parse request except common header (struct ethnl_req_info). Common @@ -371,6 +399,15 @@ int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid, * - 0 if no configuration has changed * - 1 if configuration changed and notification should be generated * - negative errno on errors + * @dump_start: + * Optional callback to prepare a dump operation, should there be a need + * to maintain some context across the dump. + * @dump_one_dev: + * Optional callback to generate all messages for a given netdev. This + * is relevant only when a request can produce different results for the + * same netdev depending on command-specific attributes. + * @dump_done: + * Optional callback to cleanup any context allocated in ->dump_start() * * Description of variable parts of GET request handling when using the * unified infrastructure. When used, a pointer to an instance of this @@ -387,6 +424,7 @@ struct ethnl_request_ops { unsigned int req_info_size; unsigned int reply_data_size; bool allow_nodev_do; + bool allow_pernetdev_dump; u8 set_ntf_cmd; int (*parse_request)(struct ethnl_req_info *req_info, @@ -406,6 +444,12 @@ struct ethnl_request_ops { struct genl_info *info); int (*set)(struct ethnl_req_info *req_info, struct genl_info *info); + + int (*dump_start)(struct ethnl_dump_ctx *ctx); + int (*dump_one_dev)(struct sk_buff *skb, + struct ethnl_dump_ctx *ctx, + const struct genl_info *info); + void (*dump_done)(struct ethnl_dump_ctx *ctx); }; /* request handlers */ @@ -498,10 +542,6 @@ int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb); int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info); int ethnl_rss_dump_start(struct netlink_callback *cb); int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb); -int ethnl_phy_start(struct netlink_callback *cb); -int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info); -int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb); -int ethnl_phy_done(struct netlink_callback *cb); int ethnl_tsinfo_start(struct netlink_callback *cb); int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb); int ethnl_tsinfo_done(struct netlink_callback *cb); diff --git a/net/ethtool/phy.c b/net/ethtool/phy.c index ed8f690f6bac8..88343fed73666 100644 --- a/net/ethtool/phy.c +++ b/net/ethtool/phy.c @@ -9,298 +9,163 @@ #include #include #include +#include struct phy_req_info { - struct ethnl_req_info base; - struct phy_device_node *pdn; + struct ethnl_req_info base; }; -#define PHY_REQINFO(__req_base) \ - container_of(__req_base, struct phy_req_info, base) +struct phy_reply_data { + struct ethnl_reply_data base; + u32 phyindex; + char *drvname; + char *name; + unsigned int upstream_type; + char *upstream_sfp_name; + unsigned int upstream_index; + char *downstream_sfp_name; +}; + +#define PHY_REPDATA(__reply_base) \ + container_of(__reply_base, struct phy_reply_data, base) const struct nla_policy ethnl_phy_get_policy[ETHTOOL_A_PHY_HEADER + 1] = { [ETHTOOL_A_PHY_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), }; -/* Caller holds rtnl */ -static ssize_t -ethnl_phy_reply_size(const struct ethnl_req_info *req_base, - struct netlink_ext_ack *extack) +static int phy_reply_size(const struct ethnl_req_info *req_info, + const struct ethnl_reply_data *reply_data) { - struct phy_req_info *req_info = PHY_REQINFO(req_base); - struct phy_device_node *pdn = req_info->pdn; - struct phy_device *phydev = pdn->phy; + struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); size_t size = 0; - ASSERT_RTNL(); - /* ETHTOOL_A_PHY_INDEX */ size += nla_total_size(sizeof(u32)); /* ETHTOOL_A_DRVNAME */ - if (phydev->drv) - size += nla_total_size(strlen(phydev->drv->name) + 1); + if (rep_data->drvname) + size += nla_total_size(strlen(rep_data->drvname) + 1); /* ETHTOOL_A_NAME */ - size += nla_total_size(strlen(dev_name(&phydev->mdio.dev)) + 1); + size += nla_total_size(strlen(rep_data->name) + 1); /* ETHTOOL_A_PHY_UPSTREAM_TYPE */ size += nla_total_size(sizeof(u32)); - if (phy_on_sfp(phydev)) { - const char *upstream_sfp_name = sfp_get_name(pdn->parent_sfp_bus); - - /* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */ - if (upstream_sfp_name) - size += nla_total_size(strlen(upstream_sfp_name) + 1); + /* ETHTOOL_A_PHY_UPSTREAM_SFP_NAME */ + if (rep_data->upstream_sfp_name) + size += nla_total_size(strlen(rep_data->upstream_sfp_name) + 1); - /* ETHTOOL_A_PHY_UPSTREAM_INDEX */ + /* ETHTOOL_A_PHY_UPSTREAM_INDEX */ + if (rep_data->upstream_index) size += nla_total_size(sizeof(u32)); - } /* ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME */ - if (phydev->sfp_bus) { - const char *sfp_name = sfp_get_name(phydev->sfp_bus); - - if (sfp_name) - size += nla_total_size(strlen(sfp_name) + 1); - } + if (rep_data->downstream_sfp_name) + size += nla_total_size(strlen(rep_data->downstream_sfp_name) + 1); return size; } -static int -ethnl_phy_fill_reply(const struct ethnl_req_info *req_base, struct sk_buff *skb) +static int phy_prepare_data(const struct ethnl_req_info *req_info, + struct ethnl_reply_data *reply_data, + const struct genl_info *info) { - struct phy_req_info *req_info = PHY_REQINFO(req_base); - struct phy_device_node *pdn = req_info->pdn; - struct phy_device *phydev = pdn->phy; - enum phy_upstream ptype; + struct phy_link_topology *topo = reply_data->dev->link_topo; + struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); + struct nlattr **tb = info->attrs; + struct phy_device_node *pdn; + struct phy_device *phydev; - ptype = pdn->upstream_type; + /* RTNL is held by the caller */ + phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PHY_HEADER, + info->extack); + if (IS_ERR_OR_NULL(phydev)) + return -EOPNOTSUPP; - if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, phydev->phyindex) || - nla_put_string(skb, ETHTOOL_A_PHY_NAME, dev_name(&phydev->mdio.dev)) || - nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, ptype)) - return -EMSGSIZE; + pdn = xa_load(&topo->phys, phydev->phyindex); + if (!pdn) + return -EOPNOTSUPP; - if (phydev->drv && - nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, phydev->drv->name)) - return -EMSGSIZE; + rep_data->phyindex = phydev->phyindex; + rep_data->name = kstrdup(dev_name(&phydev->mdio.dev), GFP_KERNEL); + rep_data->drvname = kstrdup(phydev->drv->name, GFP_KERNEL); + rep_data->upstream_type = pdn->upstream_type; - if (ptype == PHY_UPSTREAM_PHY) { + if (pdn->upstream_type == PHY_UPSTREAM_PHY) { struct phy_device *upstream = pdn->upstream.phydev; - const char *sfp_upstream_name; - - /* Parent index */ - if (nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, upstream->phyindex)) - return -EMSGSIZE; - - if (pdn->parent_sfp_bus) { - sfp_upstream_name = sfp_get_name(pdn->parent_sfp_bus); - if (sfp_upstream_name && - nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME, - sfp_upstream_name)) - return -EMSGSIZE; - } + rep_data->upstream_index = upstream->phyindex; } - if (phydev->sfp_bus) { - const char *sfp_name = sfp_get_name(phydev->sfp_bus); + if (pdn->parent_sfp_bus) + rep_data->upstream_sfp_name = kstrdup(sfp_get_name(pdn->parent_sfp_bus), + GFP_KERNEL); - if (sfp_name && - nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME, - sfp_name)) - return -EMSGSIZE; - } + if (phydev->sfp_bus) + rep_data->downstream_sfp_name = kstrdup(sfp_get_name(phydev->sfp_bus), + GFP_KERNEL); return 0; } -static int ethnl_phy_parse_request(struct ethnl_req_info *req_base, - struct nlattr **tb, - struct netlink_ext_ack *extack) +static int phy_fill_reply(struct sk_buff *skb, + const struct ethnl_req_info *req_info, + const struct ethnl_reply_data *reply_data) { - struct phy_link_topology *topo = req_base->dev->link_topo; - struct phy_req_info *req_info = PHY_REQINFO(req_base); - struct phy_device *phydev; - - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PHY_HEADER], - extack); - if (!phydev) - return 0; - - if (IS_ERR(phydev)) - return PTR_ERR(phydev); - - if (!topo) - return 0; - - req_info->pdn = xa_load(&topo->phys, phydev->phyindex); - - return 0; -} - -int ethnl_phy_doit(struct sk_buff *skb, struct genl_info *info) -{ - struct phy_req_info req_info = {}; - struct nlattr **tb = info->attrs; - struct sk_buff *rskb; - void *reply_payload; - int reply_len; - int ret; - - ret = ethnl_parse_header_dev_get(&req_info.base, - tb[ETHTOOL_A_PHY_HEADER], - genl_info_net(info), info->extack, - true); - if (ret < 0) - return ret; - - rtnl_lock(); - - ret = ethnl_phy_parse_request(&req_info.base, tb, info->extack); - if (ret < 0) - goto err_unlock_rtnl; - - /* No PHY, return early */ - if (!req_info.pdn) - goto err_unlock_rtnl; - - ret = ethnl_phy_reply_size(&req_info.base, info->extack); - if (ret < 0) - goto err_unlock_rtnl; - reply_len = ret + ethnl_reply_header_size(); - - rskb = ethnl_reply_init(reply_len, req_info.base.dev, - ETHTOOL_MSG_PHY_GET_REPLY, - ETHTOOL_A_PHY_HEADER, - info, &reply_payload); - if (!rskb) { - ret = -ENOMEM; - goto err_unlock_rtnl; - } + struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); - ret = ethnl_phy_fill_reply(&req_info.base, rskb); - if (ret) - goto err_free_msg; - - rtnl_unlock(); - ethnl_parse_header_dev_put(&req_info.base); - genlmsg_end(rskb, reply_payload); - - return genlmsg_reply(rskb, info); - -err_free_msg: - nlmsg_free(rskb); -err_unlock_rtnl: - rtnl_unlock(); - ethnl_parse_header_dev_put(&req_info.base); - return ret; -} - -struct ethnl_phy_dump_ctx { - struct phy_req_info *phy_req_info; - unsigned long ifindex; - unsigned long phy_index; -}; - -int ethnl_phy_start(struct netlink_callback *cb) -{ - const struct genl_info *info = genl_info_dump(cb); - struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx; - int ret; + if (nla_put_u32(skb, ETHTOOL_A_PHY_INDEX, rep_data->phyindex) || + nla_put_string(skb, ETHTOOL_A_PHY_NAME, rep_data->name) || + nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_TYPE, rep_data->upstream_type)) + return -EMSGSIZE; - BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); + if (rep_data->drvname && + nla_put_string(skb, ETHTOOL_A_PHY_DRVNAME, rep_data->drvname)) + return -EMSGSIZE; - ctx->phy_req_info = kzalloc(sizeof(*ctx->phy_req_info), GFP_KERNEL); - if (!ctx->phy_req_info) - return -ENOMEM; + if (rep_data->upstream_index && + nla_put_u32(skb, ETHTOOL_A_PHY_UPSTREAM_INDEX, + rep_data->upstream_index)) + return -EMSGSIZE; - ret = ethnl_parse_header_dev_get(&ctx->phy_req_info->base, - info->attrs[ETHTOOL_A_PHY_HEADER], - sock_net(cb->skb->sk), cb->extack, - false); - ctx->ifindex = 0; - ctx->phy_index = 0; + if (rep_data->upstream_sfp_name && + nla_put_string(skb, ETHTOOL_A_PHY_UPSTREAM_SFP_NAME, + rep_data->upstream_sfp_name)) + return -EMSGSIZE; - if (ret) - kfree(ctx->phy_req_info); + if (rep_data->downstream_sfp_name && + nla_put_string(skb, ETHTOOL_A_PHY_DOWNSTREAM_SFP_NAME, + rep_data->downstream_sfp_name)) + return -EMSGSIZE; - return ret; + return 0; } -int ethnl_phy_done(struct netlink_callback *cb) +static void phy_cleanup_data(struct ethnl_reply_data *reply_data) { - struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx; - - if (ctx->phy_req_info->base.dev) - ethnl_parse_header_dev_put(&ctx->phy_req_info->base); - - kfree(ctx->phy_req_info); + struct phy_reply_data *rep_data = PHY_REPDATA(reply_data); - return 0; + kfree(rep_data->drvname); + kfree(rep_data->name); + kfree(rep_data->upstream_sfp_name); + kfree(rep_data->downstream_sfp_name); } -static int ethnl_phy_dump_one_dev(struct sk_buff *skb, struct net_device *dev, - struct netlink_callback *cb) -{ - struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx; - struct phy_req_info *pri = ctx->phy_req_info; - struct phy_device_node *pdn; - int ret = 0; - void *ehdr; - - if (!dev->link_topo) - return 0; - - xa_for_each_start(&dev->link_topo->phys, ctx->phy_index, pdn, ctx->phy_index) { - ehdr = ethnl_dump_put(skb, cb, ETHTOOL_MSG_PHY_GET_REPLY); - if (!ehdr) { - ret = -EMSGSIZE; - break; - } - - ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_PHY_HEADER); - if (ret < 0) { - genlmsg_cancel(skb, ehdr); - break; - } - - pri->pdn = pdn; - ret = ethnl_phy_fill_reply(&pri->base, skb); - if (ret < 0) { - genlmsg_cancel(skb, ehdr); - break; - } - - genlmsg_end(skb, ehdr); - } +const struct ethnl_request_ops ethnl_phy_request_ops = { + .request_cmd = ETHTOOL_MSG_PHY_GET, + .reply_cmd = ETHTOOL_MSG_PHY_GET_REPLY, + .hdr_attr = ETHTOOL_A_PHY_HEADER, + .req_info_size = sizeof(struct phy_req_info), + .reply_data_size = sizeof(struct phy_reply_data), - return ret; -} + .prepare_data = phy_prepare_data, + .reply_size = phy_reply_size, + .fill_reply = phy_fill_reply, + .cleanup_data = phy_cleanup_data, -int ethnl_phy_dumpit(struct sk_buff *skb, struct netlink_callback *cb) -{ - struct ethnl_phy_dump_ctx *ctx = (void *)cb->ctx; - struct net *net = sock_net(skb->sk); - struct net_device *dev; - int ret = 0; - - rtnl_lock(); - - if (ctx->phy_req_info->base.dev) { - ret = ethnl_phy_dump_one_dev(skb, ctx->phy_req_info->base.dev, cb); - } else { - for_each_netdev_dump(net, dev, ctx->ifindex) { - ret = ethnl_phy_dump_one_dev(skb, dev, cb); - if (ret) - break; - - ctx->phy_index = 0; - } - } - rtnl_unlock(); + .dump_start = ethnl_dump_start_perphy, + .dump_one_dev = ethnl_dump_one_dev_perphy, + .dump_done = ethnl_dump_done_perphy, - return ret; -} + .allow_pernetdev_dump = true, +}; diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c index d95d92f173a6d..2148ff6075617 100644 --- a/net/ethtool/plca.c +++ b/net/ethtool/plca.c @@ -62,7 +62,7 @@ static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base, struct phy_device *phydev; int ret; - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER, info->extack); // check that the PHY device is available and connected if (IS_ERR_OR_NULL(phydev)) { @@ -152,7 +152,7 @@ ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info) bool mod = false; int ret; - phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PLCA_HEADER], + phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PLCA_HEADER, info->extack); // check that the PHY device is available and connected if (IS_ERR_OR_NULL(phydev)) @@ -191,6 +191,12 @@ const struct ethnl_request_ops ethnl_plca_cfg_request_ops = { .set = ethnl_set_plca, .set_ntf_cmd = ETHTOOL_MSG_PLCA_NTF, + + .dump_start = ethnl_dump_start_perphy, + .dump_one_dev = ethnl_dump_one_dev_perphy, + .dump_done = ethnl_dump_done_perphy, + + .allow_pernetdev_dump = true, }; // PLCA get status message -------------------------------------------------- // @@ -211,7 +217,7 @@ static int plca_get_status_prepare_data(const struct ethnl_req_info *req_base, struct phy_device *phydev; int ret; - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PLCA_HEADER], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PLCA_HEADER, info->extack); // check that the PHY device is available and connected if (IS_ERR_OR_NULL(phydev)) { @@ -268,4 +274,10 @@ const struct ethnl_request_ops ethnl_plca_status_request_ops = { .prepare_data = plca_get_status_prepare_data, .reply_size = plca_get_status_reply_size, .fill_reply = plca_get_status_fill_reply, + + .dump_start = ethnl_dump_start_perphy, + .dump_one_dev = ethnl_dump_one_dev_perphy, + .dump_done = ethnl_dump_done_perphy, + + .allow_pernetdev_dump = true, }; diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c index 2819e2ba6be2d..f3d14be8bdd9b 100644 --- a/net/ethtool/pse-pd.c +++ b/net/ethtool/pse-pd.c @@ -64,7 +64,7 @@ static int pse_prepare_data(const struct ethnl_req_info *req_base, if (ret < 0) return ret; - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_PSE_HEADER], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_PSE_HEADER, info->extack); if (IS_ERR(phydev)) return -ENODEV; @@ -261,7 +261,7 @@ ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info) struct phy_device *phydev; int ret; - phydev = ethnl_req_get_phydev(req_info, tb[ETHTOOL_A_PSE_HEADER], + phydev = ethnl_req_get_phydev(req_info, tb, ETHTOOL_A_PSE_HEADER, info->extack); ret = ethnl_set_pse_validate(phydev, info); if (ret) @@ -314,4 +314,10 @@ const struct ethnl_request_ops ethnl_pse_request_ops = { .set = ethnl_set_pse, /* PSE has no notification */ + + .dump_start = ethnl_dump_start_perphy, + .dump_one_dev = ethnl_dump_one_dev_perphy, + .dump_done = ethnl_dump_done_perphy, + + .allow_pernetdev_dump = true, }; diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c index 7839bfd1ac6a0..aeedd5ec6b8cd 100644 --- a/net/ethtool/rings.c +++ b/net/ethtool/rings.c @@ -215,17 +215,16 @@ ethnl_set_rings_validate(struct ethnl_req_info *req_info, static int ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info) { - struct kernel_ethtool_ringparam kernel_ringparam = {}; - struct ethtool_ringparam ringparam = {}; + struct kernel_ethtool_ringparam kernel_ringparam; struct net_device *dev = req_info->dev; + struct ethtool_ringparam ringparam; struct nlattr **tb = info->attrs; const struct nlattr *err_attr; bool mod = false; int ret; - dev->ethtool_ops->get_ringparam(dev, &ringparam, - &kernel_ringparam, info->extack); - kernel_ringparam.tcp_data_split = dev->cfg->hds_config; + ethtool_ringparam_get_cfg(dev, &ringparam, &kernel_ringparam, + info->extack); ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod); ethnl_update_u32(&ringparam.rx_mini_pending, diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c index 58df9ad02ce8a..6d9b1769896ba 100644 --- a/net/ethtool/rss.c +++ b/net/ethtool/rss.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only +#include + #include "netlink.h" #include "common.h" @@ -345,7 +347,9 @@ int ethnl_rss_dumpit(struct sk_buff *skb, struct netlink_callback *cb) if (ctx->match_ifindex && ctx->match_ifindex != ctx->ifindex) break; + netdev_lock_ops(dev); ret = rss_dump_one_dev(skb, cb, dev); + netdev_unlock_ops(dev); if (ret) break; } diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c index 038a2558f0520..3ca8eb2a3b314 100644 --- a/net/ethtool/stats.c +++ b/net/ethtool/stats.c @@ -138,7 +138,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base, struct phy_device *phydev; int ret; - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_STATS_HEADER], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_STATS_HEADER, info->extack); if (IS_ERR(phydev)) return PTR_ERR(phydev); diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c index 6b76c05caba4d..f6a67109beda1 100644 --- a/net/ethtool/strset.c +++ b/net/ethtool/strset.c @@ -309,7 +309,7 @@ static int strset_prepare_data(const struct ethnl_req_info *req_base, return 0; } - phydev = ethnl_req_get_phydev(req_base, tb[ETHTOOL_A_HEADER_FLAGS], + phydev = ethnl_req_get_phydev(req_base, tb, ETHTOOL_A_HEADER_FLAGS, info->extack); /* phydev can be NULL, check for errors only */ diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c index 691be6c445b38..8130b406ef107 100644 --- a/net/ethtool/tsinfo.c +++ b/net/ethtool/tsinfo.c @@ -4,6 +4,7 @@ #include #include #include +#include #include "netlink.h" #include "common.h" @@ -290,7 +291,8 @@ static void *ethnl_tsinfo_prepare_dump(struct sk_buff *skb, reply_data = ctx->reply_data; memset(reply_data, 0, sizeof(*reply_data)); reply_data->base.dev = dev; - memset(&reply_data->ts_info, 0, sizeof(reply_data->ts_info)); + reply_data->ts_info.cmd = ETHTOOL_GET_TS_INFO; + reply_data->ts_info.phc_index = -1; return ehdr; } @@ -448,12 +450,15 @@ int ethnl_tsinfo_dumpit(struct sk_buff *skb, struct netlink_callback *cb) rtnl_lock(); if (ctx->req_info->base.dev) { - ret = ethnl_tsinfo_dump_one_net_topo(skb, - ctx->req_info->base.dev, - cb); + dev = ctx->req_info->base.dev; + netdev_lock_ops(dev); + ret = ethnl_tsinfo_dump_one_net_topo(skb, dev, cb); + netdev_unlock_ops(dev); } else { for_each_netdev_dump(net, dev, ctx->pos_ifindex) { + netdev_lock_ops(dev); ret = ethnl_tsinfo_dump_one_net_topo(skb, dev, cb); + netdev_unlock_ops(dev); if (ret < 0 && ret != -EOPNOTSUPP) break; ctx->pos_phyindex = 0; diff --git a/net/hsr/Kconfig b/net/hsr/Kconfig index 1b048c17b6c83..fcacdf4f0ffc3 100644 --- a/net/hsr/Kconfig +++ b/net/hsr/Kconfig @@ -38,3 +38,21 @@ config HSR relying on this code in a safety critical system! If unsure, say N. + +if HSR + +config PRP_DUP_DISCARD_KUNIT_TEST + tristate "PRP duplicate discard KUnit tests" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Covers the PRP duplicate discard algorithm. + Only useful for kernel devs running KUnit test harness and are not + for inclusion into a production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + +endif diff --git a/net/hsr/Makefile b/net/hsr/Makefile index 75df90d3b4162..34e581db5c413 100644 --- a/net/hsr/Makefile +++ b/net/hsr/Makefile @@ -8,3 +8,5 @@ obj-$(CONFIG_HSR) += hsr.o hsr-y := hsr_main.o hsr_framereg.o hsr_device.o \ hsr_netlink.o hsr_slave.o hsr_forward.o hsr-$(CONFIG_DEBUG_FS) += hsr_debugfs.o + +obj-$(CONFIG_PRP_DUP_DISCARD_KUNIT_TEST) += prp_dup_discard_test.o diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index b6fb18469439a..439cfb7ad5d14 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -616,6 +616,7 @@ static struct hsr_proto_ops hsr_ops = { .drop_frame = hsr_drop_frame, .fill_frame_info = hsr_fill_frame_info, .invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame, + .register_frame_out = hsr_register_frame_out, }; static struct hsr_proto_ops prp_ops = { @@ -626,6 +627,7 @@ static struct hsr_proto_ops prp_ops = { .fill_frame_info = prp_fill_frame_info, .handle_san_frame = prp_handle_san_frame, .update_san_info = prp_update_san_info, + .register_frame_out = prp_register_frame_out, }; void hsr_dev_setup(struct net_device *dev) @@ -643,7 +645,7 @@ void hsr_dev_setup(struct net_device *dev) /* Not sure about this. Taken from bridge code. netdevice.h says * it means "Does not change network namespaces". */ - dev->netns_local = true; + dev->netns_immutable = true; dev->needs_free_netdev = true; diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index a4bacf1985558..c67c0d35921de 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -536,8 +536,8 @@ static void hsr_forward_do(struct hsr_frame_info *frame) * Also for SAN, this shouldn't be done. */ if (!frame->is_from_san && - hsr_register_frame_out(port, frame->node_src, - frame->sequence_nr)) + hsr->proto_ops->register_frame_out && + hsr->proto_ops->register_frame_out(port, frame)) continue; if (frame->is_supervision && port->type == HSR_PT_MASTER && diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 73bc6f659812f..4ce471a2f3870 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -35,6 +35,7 @@ static bool seq_nr_after(u16 a, u16 b) #define seq_nr_before(a, b) seq_nr_after((b), (a)) #define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b))) +#define PRP_DROP_WINDOW_LEN 32768 bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr) { @@ -176,8 +177,11 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, new_node->time_in[i] = now; new_node->time_out[i] = now; } - for (i = 0; i < HSR_PT_PORTS; i++) + for (i = 0; i < HSR_PT_PORTS; i++) { new_node->seq_out[i] = seq_out; + new_node->seq_expected[i] = seq_out + 1; + new_node->seq_start[i] = seq_out + 1; + } if (san && hsr->proto_ops->handle_san_frame) hsr->proto_ops->handle_san_frame(san, rx_port, new_node); @@ -482,9 +486,11 @@ void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, * 0 otherwise, or * negative error code on error */ -int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, - u16 sequence_nr) +int hsr_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame) { + struct hsr_node *node = frame->node_src; + u16 sequence_nr = frame->sequence_nr; + spin_lock_bh(&node->seq_out_lock); if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]) && time_is_after_jiffies(node->time_out[port->type] + @@ -499,6 +505,93 @@ int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, return 0; } +/* Adaptation of the PRP duplicate discard algorithm described in wireshark + * wiki (https://wiki.wireshark.org/PRP) + * + * A drop window is maintained for both LANs with start sequence set to the + * first sequence accepted on the LAN that has not been seen on the other LAN, + * and expected sequence set to the latest received sequence number plus one. + * + * When a frame is received on either LAN it is compared against the received + * frames on the other LAN. If it is outside the drop window of the other LAN + * the frame is accepted and the drop window is updated. + * The drop window for the other LAN is reset. + * + * 'port' is the outgoing interface + * 'frame' is the frame to be sent + * + * Return: + * 1 if frame can be shown to have been sent recently on this interface, + * 0 otherwise + */ +int prp_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame) +{ + enum hsr_port_type other_port; + enum hsr_port_type rcv_port; + struct hsr_node *node; + u16 sequence_diff; + u16 sequence_exp; + u16 sequence_nr; + + /* out-going frames are always in order + * and can be checked the same way as for HSR + */ + if (frame->port_rcv->type == HSR_PT_MASTER) + return hsr_register_frame_out(port, frame); + + /* for PRP we should only forward frames from the slave ports + * to the master port + */ + if (port->type != HSR_PT_MASTER) + return 1; + + node = frame->node_src; + sequence_nr = frame->sequence_nr; + sequence_exp = sequence_nr + 1; + rcv_port = frame->port_rcv->type; + other_port = rcv_port == HSR_PT_SLAVE_A ? HSR_PT_SLAVE_B : + HSR_PT_SLAVE_A; + + spin_lock_bh(&node->seq_out_lock); + if (time_is_before_jiffies(node->time_out[port->type] + + msecs_to_jiffies(HSR_ENTRY_FORGET_TIME)) || + (node->seq_start[rcv_port] == node->seq_expected[rcv_port] && + node->seq_start[other_port] == node->seq_expected[other_port])) { + /* the node hasn't been sending for a while + * or both drop windows are empty, forward the frame + */ + node->seq_start[rcv_port] = sequence_nr; + } else if (seq_nr_before(sequence_nr, node->seq_expected[other_port]) && + seq_nr_before_or_eq(node->seq_start[other_port], sequence_nr)) { + /* drop the frame, update the drop window for the other port + * and reset our drop window + */ + node->seq_start[other_port] = sequence_exp; + node->seq_expected[rcv_port] = sequence_exp; + node->seq_start[rcv_port] = node->seq_expected[rcv_port]; + spin_unlock_bh(&node->seq_out_lock); + return 1; + } + + /* update the drop window for the port where this frame was received + * and clear the drop window for the other port + */ + node->seq_start[other_port] = node->seq_expected[other_port]; + node->seq_expected[rcv_port] = sequence_exp; + sequence_diff = sequence_exp - node->seq_start[rcv_port]; + if (sequence_diff > PRP_DROP_WINDOW_LEN) + node->seq_start[rcv_port] = sequence_exp - PRP_DROP_WINDOW_LEN; + + node->time_out[port->type] = jiffies; + node->seq_out[port->type] = sequence_nr; + spin_unlock_bh(&node->seq_out_lock); + return 0; +} + +#if IS_MODULE(CONFIG_PRP_DUP_DISCARD_KUNIT_TEST) +EXPORT_SYMBOL(prp_register_frame_out); +#endif + static struct hsr_port *get_late_port(struct hsr_priv *hsr, struct hsr_node *node) { diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index 993fa950d8144..b04948659d84d 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -44,8 +44,7 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb, void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port, u16 sequence_nr); -int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node, - u16 sequence_nr); +int hsr_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame); void hsr_prune_nodes(struct timer_list *t); void hsr_prune_proxy_nodes(struct timer_list *t); @@ -73,6 +72,8 @@ void prp_update_san_info(struct hsr_node *node, bool is_sup); bool hsr_is_node_in_db(struct list_head *node_db, const unsigned char addr[ETH_ALEN]); +int prp_register_frame_out(struct hsr_port *port, struct hsr_frame_info *frame); + struct hsr_node { struct list_head mac_list; /* Protect R/W access to seq_out */ @@ -89,6 +90,9 @@ struct hsr_node { bool san_b; u16 seq_out[HSR_PT_PORTS]; bool removed; + /* PRP specific duplicate handling */ + u16 seq_expected[HSR_PT_PORTS]; + u16 seq_start[HSR_PT_PORTS]; struct rcu_head rcu_head; }; diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 7561845b8bf6f..1bc47b17a2968 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -175,6 +175,8 @@ struct hsr_proto_ops { struct hsr_frame_info *frame); bool (*invalid_dan_ingress_frame)(__be16 protocol); void (*update_san_info)(struct hsr_node *node, bool is_sup); + int (*register_frame_out)(struct hsr_port *port, + struct hsr_frame_info *frame); }; struct hsr_self_node { diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index b68f2f71d0e1f..b120470246cc5 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -29,10 +29,12 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { /* Here, it seems a netdevice has already been allocated for us, and the * hsr_dev_setup routine has been executed. Nice! */ -static int hsr_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int hsr_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *link_net = rtnl_newlink_link_net(params); + struct nlattr **data = params->data; enum hsr_version proto_version; unsigned char multicast_spec; u8 proto = HSR_PROTOCOL_HSR; @@ -46,7 +48,7 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, NL_SET_ERR_MSG_MOD(extack, "Slave1 device not specified"); return -EINVAL; } - link[0] = __dev_get_by_index(src_net, + link[0] = __dev_get_by_index(link_net, nla_get_u32(data[IFLA_HSR_SLAVE1])); if (!link[0]) { NL_SET_ERR_MSG_MOD(extack, "Slave1 does not exist"); @@ -56,7 +58,7 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, NL_SET_ERR_MSG_MOD(extack, "Slave2 device not specified"); return -EINVAL; } - link[1] = __dev_get_by_index(src_net, + link[1] = __dev_get_by_index(link_net, nla_get_u32(data[IFLA_HSR_SLAVE2])); if (!link[1]) { NL_SET_ERR_MSG_MOD(extack, "Slave2 does not exist"); @@ -69,7 +71,7 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, } if (data[IFLA_HSR_INTERLINK]) - interlink = __dev_get_by_index(src_net, + interlink = __dev_get_by_index(link_net, nla_get_u32(data[IFLA_HSR_INTERLINK])); if (interlink && interlink == link[0]) { diff --git a/net/hsr/prp_dup_discard_test.c b/net/hsr/prp_dup_discard_test.c new file mode 100644 index 0000000000000..e86b7b633ae89 --- /dev/null +++ b/net/hsr/prp_dup_discard_test.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include "hsr_main.h" +#include "hsr_framereg.h" + +struct prp_test_data { + struct hsr_port port; + struct hsr_port port_rcv; + struct hsr_frame_info frame; + struct hsr_node node; +}; + +static struct prp_test_data *build_prp_test_data(struct kunit *test) +{ + struct prp_test_data *data = kunit_kzalloc(test, + sizeof(struct prp_test_data), GFP_USER); + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, data); + + data->frame.node_src = &data->node; + data->frame.port_rcv = &data->port_rcv; + data->port_rcv.type = HSR_PT_SLAVE_A; + data->node.seq_start[HSR_PT_SLAVE_A] = 1; + data->node.seq_expected[HSR_PT_SLAVE_A] = 1; + data->node.seq_start[HSR_PT_SLAVE_B] = 1; + data->node.seq_expected[HSR_PT_SLAVE_B] = 1; + data->node.seq_out[HSR_PT_MASTER] = 0; + data->node.time_out[HSR_PT_MASTER] = jiffies; + data->port.type = HSR_PT_MASTER; + + return data; +} + +static void check_prp_counters(struct kunit *test, + struct prp_test_data *data, + u16 seq_start_a, u16 seq_expected_a, + u16 seq_start_b, u16 seq_expected_b) +{ + KUNIT_EXPECT_EQ(test, data->node.seq_start[HSR_PT_SLAVE_A], + seq_start_a); + KUNIT_EXPECT_EQ(test, data->node.seq_start[HSR_PT_SLAVE_B], + seq_start_b); + KUNIT_EXPECT_EQ(test, data->node.seq_expected[HSR_PT_SLAVE_A], + seq_expected_a); + KUNIT_EXPECT_EQ(test, data->node.seq_expected[HSR_PT_SLAVE_B], + seq_expected_b); +} + +static void prp_dup_discard_forward(struct kunit *test) +{ + /* Normal situation, both LANs in sync. Next frame is forwarded */ + struct prp_test_data *data = build_prp_test_data(test); + + data->frame.sequence_nr = 2; + KUNIT_EXPECT_EQ(test, 0, + prp_register_frame_out(&data->port, &data->frame)); + KUNIT_EXPECT_EQ(test, data->frame.sequence_nr, + data->node.seq_out[HSR_PT_MASTER]); + KUNIT_EXPECT_EQ(test, jiffies, data->node.time_out[HSR_PT_MASTER]); + check_prp_counters(test, data, data->frame.sequence_nr, + data->frame.sequence_nr + 1, 1, 1); +} + +static void prp_dup_discard_inside_dropwindow(struct kunit *test) +{ + /* Normal situation, other LAN ahead by one. Frame is dropped */ + struct prp_test_data *data = build_prp_test_data(test); + unsigned long time = jiffies - 10; + + data->frame.sequence_nr = 1; + data->node.seq_expected[HSR_PT_SLAVE_B] = 3; + data->node.seq_out[HSR_PT_MASTER] = 2; + data->node.time_out[HSR_PT_MASTER] = time; + + KUNIT_EXPECT_EQ(test, 1, + prp_register_frame_out(&data->port, &data->frame)); + KUNIT_EXPECT_EQ(test, 2, data->node.seq_out[HSR_PT_MASTER]); + KUNIT_EXPECT_EQ(test, time, data->node.time_out[HSR_PT_MASTER]); + check_prp_counters(test, data, 2, 2, 2, 3); +} + +static void prp_dup_discard_node_timeout(struct kunit *test) +{ + /* Timeout situation, node hasn't sent anything for a while */ + struct prp_test_data *data = build_prp_test_data(test); + + data->frame.sequence_nr = 7; + data->node.seq_start[HSR_PT_SLAVE_A] = 1234; + data->node.seq_expected[HSR_PT_SLAVE_A] = 1235; + data->node.seq_start[HSR_PT_SLAVE_B] = 1234; + data->node.seq_expected[HSR_PT_SLAVE_B] = 1234; + data->node.seq_out[HSR_PT_MASTER] = 1234; + data->node.time_out[HSR_PT_MASTER] = + jiffies - msecs_to_jiffies(HSR_ENTRY_FORGET_TIME) - 1; + + KUNIT_EXPECT_EQ(test, 0, + prp_register_frame_out(&data->port, &data->frame)); + KUNIT_EXPECT_EQ(test, data->frame.sequence_nr, + data->node.seq_out[HSR_PT_MASTER]); + KUNIT_EXPECT_EQ(test, jiffies, data->node.time_out[HSR_PT_MASTER]); + check_prp_counters(test, data, data->frame.sequence_nr, + data->frame.sequence_nr + 1, 1234, 1234); +} + +static void prp_dup_discard_out_of_sequence(struct kunit *test) +{ + /* One frame is received out of sequence on both LANs */ + struct prp_test_data *data = build_prp_test_data(test); + + data->node.seq_start[HSR_PT_SLAVE_A] = 10; + data->node.seq_expected[HSR_PT_SLAVE_A] = 10; + data->node.seq_start[HSR_PT_SLAVE_B] = 10; + data->node.seq_expected[HSR_PT_SLAVE_B] = 10; + data->node.seq_out[HSR_PT_MASTER] = 9; + + /* 1st old frame, should be accepted */ + data->frame.sequence_nr = 8; + KUNIT_EXPECT_EQ(test, 0, + prp_register_frame_out(&data->port, &data->frame)); + KUNIT_EXPECT_EQ(test, data->frame.sequence_nr, + data->node.seq_out[HSR_PT_MASTER]); + check_prp_counters(test, data, data->frame.sequence_nr, + data->frame.sequence_nr + 1, 10, 10); + + /* 2nd frame should be dropped */ + data->frame.sequence_nr = 8; + data->port_rcv.type = HSR_PT_SLAVE_B; + KUNIT_EXPECT_EQ(test, 1, + prp_register_frame_out(&data->port, &data->frame)); + check_prp_counters(test, data, data->frame.sequence_nr + 1, + data->frame.sequence_nr + 1, + data->frame.sequence_nr + 1, + data->frame.sequence_nr + 1); + + /* Next frame, this is forwarded */ + data->frame.sequence_nr = 10; + data->port_rcv.type = HSR_PT_SLAVE_A; + KUNIT_EXPECT_EQ(test, 0, + prp_register_frame_out(&data->port, &data->frame)); + KUNIT_EXPECT_EQ(test, data->frame.sequence_nr, + data->node.seq_out[HSR_PT_MASTER]); + check_prp_counters(test, data, data->frame.sequence_nr, + data->frame.sequence_nr + 1, 9, 9); + + /* and next one is dropped */ + data->frame.sequence_nr = 10; + data->port_rcv.type = HSR_PT_SLAVE_B; + KUNIT_EXPECT_EQ(test, 1, + prp_register_frame_out(&data->port, &data->frame)); + check_prp_counters(test, data, data->frame.sequence_nr + 1, + data->frame.sequence_nr + 1, + data->frame.sequence_nr + 1, + data->frame.sequence_nr + 1); +} + +static void prp_dup_discard_lan_b_late(struct kunit *test) +{ + /* LAN B is behind */ + struct prp_test_data *data = build_prp_test_data(test); + + data->node.seq_start[HSR_PT_SLAVE_A] = 9; + data->node.seq_expected[HSR_PT_SLAVE_A] = 9; + data->node.seq_start[HSR_PT_SLAVE_B] = 9; + data->node.seq_expected[HSR_PT_SLAVE_B] = 9; + data->node.seq_out[HSR_PT_MASTER] = 8; + + data->frame.sequence_nr = 9; + KUNIT_EXPECT_EQ(test, 0, + prp_register_frame_out(&data->port, &data->frame)); + KUNIT_EXPECT_EQ(test, data->frame.sequence_nr, + data->node.seq_out[HSR_PT_MASTER]); + check_prp_counters(test, data, 9, 10, 9, 9); + + data->frame.sequence_nr = 10; + KUNIT_EXPECT_EQ(test, 0, + prp_register_frame_out(&data->port, &data->frame)); + KUNIT_EXPECT_EQ(test, data->frame.sequence_nr, + data->node.seq_out[HSR_PT_MASTER]); + check_prp_counters(test, data, 9, 11, 9, 9); + + data->frame.sequence_nr = 9; + data->port_rcv.type = HSR_PT_SLAVE_B; + KUNIT_EXPECT_EQ(test, 1, + prp_register_frame_out(&data->port, &data->frame)); + check_prp_counters(test, data, 10, 11, 10, 10); + + data->frame.sequence_nr = 10; + data->port_rcv.type = HSR_PT_SLAVE_B; + KUNIT_EXPECT_EQ(test, 1, + prp_register_frame_out(&data->port, &data->frame)); + check_prp_counters(test, data, 11, 11, 11, 11); +} + +static struct kunit_case prp_dup_discard_test_cases[] = { + KUNIT_CASE(prp_dup_discard_forward), + KUNIT_CASE(prp_dup_discard_inside_dropwindow), + KUNIT_CASE(prp_dup_discard_node_timeout), + KUNIT_CASE(prp_dup_discard_out_of_sequence), + KUNIT_CASE(prp_dup_discard_lan_b_late), + {} +}; + +static struct kunit_suite prp_dup_discard_suite = { + .name = "prp_duplicate_discard", + .test_cases = prp_dup_discard_test_cases, +}; + +kunit_test_suite(prp_dup_discard_suite); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("KUnit tests for PRP duplicate discard"); +MODULE_AUTHOR("Jaakko Karrenpalo "); diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c index 175efd860f7b4..018929563c6bc 100644 --- a/net/ieee802154/6lowpan/core.c +++ b/net/ieee802154/6lowpan/core.c @@ -50,6 +50,7 @@ #include #include +#include #include "6lowpan_i.h" @@ -116,7 +117,7 @@ static void lowpan_setup(struct net_device *ldev) ldev->netdev_ops = &lowpan_netdev_ops; ldev->header_ops = &lowpan_header_ops; ldev->needs_free_netdev = true; - ldev->netns_local = true; + ldev->netns_immutable = true; } static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[], @@ -129,10 +130,11 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[], return 0; } -static int lowpan_newlink(struct net *src_net, struct net_device *ldev, - struct nlattr *tb[], struct nlattr *data[], +static int lowpan_newlink(struct net_device *ldev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct nlattr **tb = params->tb; struct net_device *wdev; int ret; @@ -142,6 +144,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *ldev, if (!tb[IFLA_LINK]) return -EINVAL; + if (params->link_net && !net_eq(params->link_net, dev_net(ldev))) + return -EINVAL; /* find and hold wpan device */ wdev = dev_get_by_index(dev_net(ldev), nla_get_u32(tb[IFLA_LINK])); if (!wdev) diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index 867d637d86f08..d4b983d170382 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -31,7 +31,8 @@ static const char lowpan_frags_cache_name[] = "lowpan-frags"; static struct inet_frags lowpan_frags; static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb, - struct sk_buff *prev, struct net_device *ldev); + struct sk_buff *prev, struct net_device *ldev, + int *refs); static void lowpan_frag_init(struct inet_frag_queue *q, const void *a) { @@ -45,6 +46,7 @@ static void lowpan_frag_expire(struct timer_list *t) { struct inet_frag_queue *frag = from_timer(frag, t, timer); struct frag_queue *fq; + int refs = 1; fq = container_of(frag, struct frag_queue, q); @@ -53,10 +55,10 @@ static void lowpan_frag_expire(struct timer_list *t) if (fq->q.flags & INET_FRAG_COMPLETE) goto out; - inet_frag_kill(&fq->q); + inet_frag_kill(&fq->q, &refs); out: spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q); + inet_frag_putn(&fq->q, refs); } static inline struct lowpan_frag_queue * @@ -82,7 +84,8 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb, } static int lowpan_frag_queue(struct lowpan_frag_queue *fq, - struct sk_buff *skb, u8 frag_type) + struct sk_buff *skb, u8 frag_type, + int *refs) { struct sk_buff *prev_tail; struct net_device *ldev; @@ -143,7 +146,7 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq, unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; - res = lowpan_frag_reasm(fq, skb, prev_tail, ldev); + res = lowpan_frag_reasm(fq, skb, prev_tail, ldev, refs); skb->_skb_refdst = orefdst; return res; } @@ -162,11 +165,12 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq, * the last and the first frames arrived and all the bits are here. */ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb, - struct sk_buff *prev_tail, struct net_device *ldev) + struct sk_buff *prev_tail, struct net_device *ldev, + int *refs) { void *reasm_data; - inet_frag_kill(&fq->q); + inet_frag_kill(&fq->q, refs); reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); if (!reasm_data) @@ -300,17 +304,20 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type) goto err; } + rcu_read_lock(); fq = fq_find(net, cb, &hdr.source, &hdr.dest); if (fq != NULL) { - int ret; + int ret, refs = 0; spin_lock(&fq->q.lock); - ret = lowpan_frag_queue(fq, skb, frag_type); + ret = lowpan_frag_queue(fq, skb, frag_type, &refs); spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q); + rcu_read_unlock(); + inet_frag_putn(&fq->q, refs); return ret; } + rcu_read_unlock(); err: kfree_skb(skb); diff --git a/net/ieee802154/core.c b/net/ieee802154/core.c index 88adb04e40722..89b671b12600f 100644 --- a/net/ieee802154/core.c +++ b/net/ieee802154/core.c @@ -226,11 +226,11 @@ int cfg802154_switch_netns(struct cfg802154_registered_device *rdev, list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) { if (!wpan_dev->netdev) continue; - wpan_dev->netdev->netns_local = false; + wpan_dev->netdev->netns_immutable = false; err = dev_change_net_namespace(wpan_dev->netdev, net, "wpan%d"); if (err) break; - wpan_dev->netdev->netns_local = true; + wpan_dev->netdev->netns_immutable = true; } if (err) { @@ -242,11 +242,11 @@ int cfg802154_switch_netns(struct cfg802154_registered_device *rdev, list) { if (!wpan_dev->netdev) continue; - wpan_dev->netdev->netns_local = false; + wpan_dev->netdev->netns_immutable = false; err = dev_change_net_namespace(wpan_dev->netdev, net, "wpan%d"); WARN_ON(err); - wpan_dev->netdev->netns_local = true; + wpan_dev->netdev->netns_immutable = true; } return err; @@ -291,7 +291,7 @@ static int cfg802154_netdev_notifier_call(struct notifier_block *nb, switch (state) { /* TODO NETDEV_DEVTYPE */ case NETDEV_REGISTER: - dev->netns_local = true; + dev->netns_immutable = true; wpan_dev->identifier = ++rdev->wpan_dev_id; list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list); rdev->devlist_generation++; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 21f46ee7b6e95..5df1f1325259d 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -153,7 +153,7 @@ void inet_sock_destruct(struct sock *sk) WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc)); WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc)); WARN_ON_ONCE(sk->sk_wmem_queued); - WARN_ON_ONCE(sk_forward_alloc_get(sk)); + WARN_ON_ONCE(sk->sk_forward_alloc); kfree(rcu_dereference_protected(inet->inet_opt, 1)); dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1)); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 814300eee39de..a648fff71ea7d 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1064,8 +1064,8 @@ static int arp_req_set_proxy(struct net *net, struct net_device *dev, int on) IPV4_DEVCONF_ALL(net, PROXY_ARP) = on; return 0; } - if (__in_dev_get_rtnl(dev)) { - IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on); + if (__in_dev_get_rtnl_net(dev)) { + IN_DEV_CONF_SET(__in_dev_get_rtnl_net(dev), PROXY_ARP, on); return 0; } return -ENXIO; @@ -1295,14 +1295,14 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg) switch (cmd) { case SIOCDARP: - rtnl_lock(); + rtnl_net_lock(net); err = arp_req_delete(net, &r); - rtnl_unlock(); + rtnl_net_unlock(net); break; case SIOCSARP: - rtnl_lock(); + rtnl_net_lock(net); err = arp_req_set(net, &r); - rtnl_unlock(); + rtnl_net_unlock(net); break; case SIOCGARP: rcu_read_lock(); diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 554804774628e..e01492234b0b3 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -121,7 +121,7 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log, BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt) { /* bpf_tcp_ca prog cannot have NULL tp */ - __tcp_send_ack((struct sock *)tp, rcv_nxt); + __tcp_send_ack((struct sock *)tp, rcv_nxt, 0); return 0; } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 55b8151759bc9..754f60fb6e252 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -46,6 +46,7 @@ #include #include #include +#include "igmp_internal.h" #include #include #ifdef CONFIG_SYSCTL @@ -107,15 +108,6 @@ static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = { [IFA_PROTO] = { .type = NLA_U8 }, }; -struct inet_fill_args { - u32 portid; - u32 seq; - int event; - unsigned int flags; - int netnsid; - int ifindex; -}; - #define IN4_ADDR_HSIZE_SHIFT 8 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT) @@ -1847,9 +1839,38 @@ static int inet_valid_dump_ifaddr_req(const struct nlmsghdr *nlh, return 0; } -static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb, - struct netlink_callback *cb, int *s_ip_idx, - struct inet_fill_args *fillargs) +static int in_dev_dump_ifmcaddr(struct in_device *in_dev, struct sk_buff *skb, + struct netlink_callback *cb, int *s_ip_idx, + struct inet_fill_args *fillargs) +{ + struct ip_mc_list *im; + int ip_idx = 0; + int err; + + for (im = rcu_dereference(in_dev->mc_list); + im; + im = rcu_dereference(im->next_rcu)) { + if (ip_idx < *s_ip_idx) { + ip_idx++; + continue; + } + err = inet_fill_ifmcaddr(skb, in_dev->dev, im, fillargs); + if (err < 0) + goto done; + + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); + ip_idx++; + } + err = 0; + ip_idx = 0; +done: + *s_ip_idx = ip_idx; + return err; +} + +static int in_dev_dump_ifaddr(struct in_device *in_dev, struct sk_buff *skb, + struct netlink_callback *cb, int *s_ip_idx, + struct inet_fill_args *fillargs) { struct in_ifaddr *ifa; int ip_idx = 0; @@ -1875,6 +1896,21 @@ static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb, return err; } +static int in_dev_dump_addr(struct in_device *in_dev, struct sk_buff *skb, + struct netlink_callback *cb, int *s_ip_idx, + struct inet_fill_args *fillargs) +{ + switch (fillargs->event) { + case RTM_NEWADDR: + return in_dev_dump_ifaddr(in_dev, skb, cb, s_ip_idx, fillargs); + case RTM_GETMULTICAST: + return in_dev_dump_ifmcaddr(in_dev, skb, cb, s_ip_idx, + fillargs); + default: + return -EINVAL; + } +} + /* Combine dev_addr_genid and dev_base_seq to detect changes. */ static u32 inet_base_seq(const struct net *net) @@ -1890,13 +1926,14 @@ static u32 inet_base_seq(const struct net *net) return res; } -static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) +static int inet_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, + int event) { const struct nlmsghdr *nlh = cb->nlh; struct inet_fill_args fillargs = { .portid = NETLINK_CB(cb->skb).portid, .seq = nlh->nlmsg_seq, - .event = RTM_NEWADDR, + .event = event, .flags = NLM_F_MULTI, .netnsid = -1, }; @@ -1950,6 +1987,16 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) return err; } +static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb) +{ + return inet_dump_addr(skb, cb, RTM_NEWADDR); +} + +static int inet_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb) +{ + return inet_dump_addr(skb, cb, RTM_GETMULTICAST); +} + static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh, u32 portid) { @@ -2846,6 +2893,8 @@ static const struct rtnl_msg_handler devinet_rtnl_msg_handlers[] __initconst = { {.protocol = PF_INET, .msgtype = RTM_GETNETCONF, .doit = inet_netconf_get_devconf, .dumpit = inet_netconf_dump_devconf, .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, + {.owner = THIS_MODULE, .protocol = PF_INET, .msgtype = RTM_GETMULTICAST, + .dumpit = inet_dump_ifmcaddr, .flags = RTNL_FLAG_DUMP_UNLOCKED}, }; void __init devinet_init(void) diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 272e42d813230..3f4e629998fab 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -553,18 +553,16 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, const struct in_ifaddr *ifa; struct in_device *in_dev; - in_dev = __in_dev_get_rtnl(dev); + in_dev = __in_dev_get_rtnl_net(dev); if (!in_dev) return -ENODEV; *colon = ':'; - rcu_read_lock(); - in_dev_for_each_ifa_rcu(ifa, in_dev) { + in_dev_for_each_ifa_rtnl_net(net, ifa, in_dev) { if (strcmp(ifa->ifa_label, devname) == 0) break; } - rcu_read_unlock(); if (!ifa) return -ENODEV; @@ -635,7 +633,7 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, struct rtentry *rt) if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; - rtnl_lock(); + rtnl_net_lock(net); err = rtentry_to_fib_config(net, cmd, rt, &cfg); if (err == 0) { struct fib_table *tb; @@ -659,7 +657,7 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, struct rtentry *rt) /* allocated by rtentry_to_fib_config() */ kfree(cfg.fc_mx); } - rtnl_unlock(); + rtnl_net_unlock(net); return err; } return -EINVAL; @@ -809,7 +807,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, case RTA_MULTIPATH: err = lwtunnel_valid_encap_type_attr(nla_data(attr), nla_len(attr), - extack); + extack, false); if (err < 0) goto errout; cfg->fc_mp = nla_data(attr); @@ -827,7 +825,7 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, case RTA_ENCAP_TYPE: cfg->fc_encap_type = nla_get_u16(attr); err = lwtunnel_valid_encap_type(cfg->fc_encap_type, - extack); + extack, false); if (err < 0) goto errout; break; @@ -837,19 +835,33 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, } } + if (cfg->fc_dst_len > 32) { + NL_SET_ERR_MSG(extack, "Invalid prefix length"); + err = -EINVAL; + goto errout; + } + + if (cfg->fc_dst_len < 32 && (ntohl(cfg->fc_dst) << cfg->fc_dst_len)) { + NL_SET_ERR_MSG(extack, "Invalid prefix for given prefix length"); + err = -EINVAL; + goto errout; + } + if (cfg->fc_nh_id) { if (cfg->fc_oif || cfg->fc_gw_family || cfg->fc_encap || cfg->fc_mp) { NL_SET_ERR_MSG(extack, "Nexthop specification and nexthop id are mutually exclusive"); - return -EINVAL; + err = -EINVAL; + goto errout; } } if (has_gw && has_via) { NL_SET_ERR_MSG(extack, "Nexthop configuration can not contain both GATEWAY and VIA"); - return -EINVAL; + err = -EINVAL; + goto errout; } if (!cfg->fc_table) @@ -872,20 +884,24 @@ static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, if (err < 0) goto errout; + rtnl_net_lock(net); + if (cfg.fc_nh_id && !nexthop_find_by_id(net, cfg.fc_nh_id)) { NL_SET_ERR_MSG(extack, "Nexthop id does not exist"); err = -EINVAL; - goto errout; + goto unlock; } tb = fib_get_table(net, cfg.fc_table); if (!tb) { NL_SET_ERR_MSG(extack, "FIB table does not exist"); err = -ESRCH; - goto errout; + goto unlock; } err = fib_table_delete(net, tb, &cfg, extack); +unlock: + rtnl_net_unlock(net); errout: return err; } @@ -902,15 +918,20 @@ static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, if (err < 0) goto errout; + rtnl_net_lock(net); + tb = fib_new_table(net, cfg.fc_table); if (!tb) { err = -ENOBUFS; - goto errout; + goto unlock; } err = fib_table_insert(net, tb, &cfg, extack); if (!err && cfg.fc_type == RTN_LOCAL) net->ipv4.fib_has_custom_local_routes = true; + +unlock: + rtnl_net_unlock(net); errout: return err; } @@ -1450,7 +1471,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, fib_sync_up(dev, RTNH_F_DEAD); #endif atomic_inc(&net->ipv4.dev_addr_genid); - rt_cache_flush(dev_net(dev)); + rt_cache_flush(net); break; case NETDEV_DOWN: fib_del_ifaddr(ifa, NULL); @@ -1461,7 +1482,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, */ fib_disable_ip(dev, event, true); } else { - rt_cache_flush(dev_net(dev)); + rt_cache_flush(net); } break; } @@ -1575,7 +1596,7 @@ static void ip_fib_net_exit(struct net *net) { int i; - ASSERT_RTNL(); + ASSERT_RTNL_NET(net); #ifdef CONFIG_IP_MULTIPLE_TABLES RCU_INIT_POINTER(net->ipv4.fib_main, NULL); RCU_INIT_POINTER(net->ipv4.fib_default, NULL); @@ -1615,9 +1636,15 @@ static int __net_init fib_net_init(struct net *net) error = ip_fib_net_init(net); if (error < 0) goto out; + + error = fib4_semantics_init(net); + if (error) + goto out_semantics; + error = nl_fib_lookup_init(net); if (error < 0) goto out_nlfl; + error = fib_proc_init(net); if (error < 0) goto out_proc; @@ -1627,9 +1654,11 @@ static int __net_init fib_net_init(struct net *net) out_proc: nl_fib_lookup_exit(net); out_nlfl: - rtnl_lock(); + fib4_semantics_exit(net); +out_semantics: + rtnl_net_lock(net); ip_fib_net_exit(net); - rtnl_unlock(); + rtnl_net_unlock(net); goto out; } @@ -1644,10 +1673,15 @@ static void __net_exit fib_net_exit_batch(struct list_head *net_list) struct net *net; rtnl_lock(); - list_for_each_entry(net, net_list, exit_list) + list_for_each_entry(net, net_list, exit_list) { + __rtnl_net_lock(net); ip_fib_net_exit(net); - + __rtnl_net_unlock(net); + } rtnl_unlock(); + + list_for_each_entry(net, net_list, exit_list) + fib4_semantics_exit(net); } static struct pernet_operations fib_net_ops = { @@ -1658,9 +1692,9 @@ static struct pernet_operations fib_net_ops = { static const struct rtnl_msg_handler fib_rtnl_msg_handlers[] __initconst = { {.protocol = PF_INET, .msgtype = RTM_NEWROUTE, - .doit = inet_rtm_newroute}, + .doit = inet_rtm_newroute, .flags = RTNL_FLAG_DOIT_PERNET}, {.protocol = PF_INET, .msgtype = RTM_DELROUTE, - .doit = inet_rtm_delroute}, + .doit = inet_rtm_delroute, .flags = RTNL_FLAG_DOIT_PERNET}, {.protocol = PF_INET, .msgtype = RTM_GETROUTE, .dumpit = inet_dump_fib, .flags = RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE}, }; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 9517b8667e000..fa58d6620ed67 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -37,6 +37,7 @@ struct fib4_rule { u8 dst_len; u8 src_len; dscp_t dscp; + dscp_t dscp_mask; u8 dscp_full:1; /* DSCP or TOS selector */ __be32 src; __be32 srcmask; @@ -192,7 +193,8 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_match(struct fib_rule *rule, * to mask the upper three DSCP bits prior to matching to maintain * legacy behavior. */ - if (r->dscp_full && r->dscp != inet_dsfield_to_dscp(fl4->flowi4_tos)) + if (r->dscp_full && + (r->dscp ^ inet_dsfield_to_dscp(fl4->flowi4_tos)) & r->dscp_mask) return 0; else if (!r->dscp_full && r->dscp && !fib_dscp_masked_match(r->dscp, fl4)) @@ -201,12 +203,12 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_match(struct fib_rule *rule, if (rule->ip_proto && (rule->ip_proto != fl4->flowi4_proto)) return 0; - if (fib_rule_port_range_set(&rule->sport_range) && - !fib_rule_port_inrange(&rule->sport_range, fl4->fl4_sport)) + if (!fib_rule_port_match(&rule->sport_range, rule->sport_mask, + fl4->fl4_sport)) return 0; - if (fib_rule_port_range_set(&rule->dport_range) && - !fib_rule_port_inrange(&rule->dport_range, fl4->fl4_dport)) + if (!fib_rule_port_match(&rule->dport_range, rule->dport_mask, + fl4->fl4_dport)) return 0; return 1; @@ -235,19 +237,43 @@ static int fib4_nl2rule_dscp(const struct nlattr *nla, struct fib4_rule *rule4, } rule4->dscp = inet_dsfield_to_dscp(nla_get_u8(nla) << 2); + rule4->dscp_mask = inet_dsfield_to_dscp(INET_DSCP_MASK); rule4->dscp_full = true; return 0; } +static int fib4_nl2rule_dscp_mask(const struct nlattr *nla, + struct fib4_rule *rule4, + struct netlink_ext_ack *extack) +{ + dscp_t dscp_mask; + + if (!rule4->dscp_full) { + NL_SET_ERR_MSG_ATTR(extack, nla, + "Cannot specify DSCP mask without DSCP value"); + return -EINVAL; + } + + dscp_mask = inet_dsfield_to_dscp(nla_get_u8(nla) << 2); + if (rule4->dscp & ~dscp_mask) { + NL_SET_ERR_MSG_ATTR(extack, nla, "Invalid DSCP mask"); + return -EINVAL; + } + + rule4->dscp_mask = dscp_mask; + + return 0; +} + static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb, struct netlink_ext_ack *extack) { - struct net *net = sock_net(skb->sk); + struct fib4_rule *rule4 = (struct fib4_rule *)rule; + struct net *net = rule->fr_net; int err = -EINVAL; - struct fib4_rule *rule4 = (struct fib4_rule *) rule; if (tb[FRA_FLOWLABEL] || tb[FRA_FLOWLABEL_MASK]) { NL_SET_ERR_MSG(extack, @@ -271,6 +297,10 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, fib4_nl2rule_dscp(tb[FRA_DSCP], rule4, extack) < 0) goto errout; + if (tb[FRA_DSCP_MASK] && + fib4_nl2rule_dscp_mask(tb[FRA_DSCP_MASK], rule4, extack) < 0) + goto errout; + /* split local/main if they are not already split */ err = fib_unmerge(net); if (err) @@ -366,6 +396,14 @@ static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, return 0; } + if (tb[FRA_DSCP_MASK]) { + dscp_t dscp_mask; + + dscp_mask = inet_dsfield_to_dscp(nla_get_u8(tb[FRA_DSCP_MASK]) << 2); + if (!rule4->dscp_full || rule4->dscp_mask != dscp_mask) + return 0; + } + #ifdef CONFIG_IP_ROUTE_CLASSID if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW]))) return 0; @@ -391,7 +429,9 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb, if (rule4->dscp_full) { frh->tos = 0; if (nla_put_u8(skb, FRA_DSCP, - inet_dscp_to_dsfield(rule4->dscp) >> 2)) + inet_dscp_to_dsfield(rule4->dscp) >> 2) || + nla_put_u8(skb, FRA_DSCP_MASK, + inet_dscp_to_dsfield(rule4->dscp_mask) >> 2)) goto nla_put_failure; } else { frh->tos = inet_dscp_to_dsfield(rule4->dscp); @@ -418,7 +458,8 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) return nla_total_size(4) /* dst */ + nla_total_size(4) /* src */ + nla_total_size(4) /* flow */ - + nla_total_size(1); /* dscp */ + + nla_total_size(1) /* dscp */ + + nla_total_size(1); /* dscp mask */ } static void fib4_rule_flush_cache(struct fib_rules_ops *ops) diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index d2cee5c314f5e..f68bb9e34c34c 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -50,12 +50,6 @@ #include "fib_lookup.h" -static struct hlist_head *fib_info_hash; -static struct hlist_head *fib_info_laddrhash; -static unsigned int fib_info_hash_size; -static unsigned int fib_info_hash_bits; -static unsigned int fib_info_cnt; - /* for_nexthops and change_nexthops only used when nexthop object * is not set in a fib_info. The logic within can reference fib_nh. */ @@ -258,8 +252,7 @@ void fib_release_info(struct fib_info *fi) ASSERT_RTNL(); if (fi && refcount_dec_and_test(&fi->fib_treeref)) { hlist_del(&fi->fib_hash); - - fib_info_cnt--; + fi->fib_net->ipv4.fib_info_cnt--; if (fi->fib_prefsrc) hlist_del(&fi->fib_lhash); @@ -335,11 +328,12 @@ static unsigned int fib_info_hashfn_1(int init_val, u8 protocol, u8 scope, static unsigned int fib_info_hashfn_result(const struct net *net, unsigned int val) { - return hash_32(val ^ net_hash_mix(net), fib_info_hash_bits); + return hash_32(val ^ net_hash_mix(net), net->ipv4.fib_info_hash_bits); } -static inline unsigned int fib_info_hashfn(struct fib_info *fi) +static struct hlist_head *fib_info_hash_bucket(struct fib_info *fi) { + struct net *net = fi->fib_net; unsigned int val; val = fib_info_hashfn_1(fi->fib_nhs, fi->fib_protocol, @@ -354,7 +348,70 @@ static inline unsigned int fib_info_hashfn(struct fib_info *fi) } endfor_nexthops(fi) } - return fib_info_hashfn_result(fi->fib_net, val); + return &net->ipv4.fib_info_hash[fib_info_hashfn_result(net, val)]; +} + +static struct hlist_head *fib_info_laddrhash_bucket(const struct net *net, + __be32 val) +{ + unsigned int hash_bits = net->ipv4.fib_info_hash_bits; + u32 slot; + + slot = hash_32(net_hash_mix(net) ^ (__force u32)val, hash_bits); + + return &net->ipv4.fib_info_hash[(1 << hash_bits) + slot]; +} + +static struct hlist_head *fib_info_hash_alloc(unsigned int hash_bits) +{ + /* The second half is used for prefsrc */ + return kvcalloc((1 << hash_bits) * 2, sizeof(struct hlist_head *), + GFP_KERNEL); +} + +static void fib_info_hash_free(struct hlist_head *head) +{ + kvfree(head); +} + +static void fib_info_hash_grow(struct net *net) +{ + unsigned int old_size = 1 << net->ipv4.fib_info_hash_bits; + struct hlist_head *new_info_hash, *old_info_hash; + unsigned int i; + + if (net->ipv4.fib_info_cnt < old_size) + return; + + new_info_hash = fib_info_hash_alloc(net->ipv4.fib_info_hash_bits + 1); + if (!new_info_hash) + return; + + old_info_hash = net->ipv4.fib_info_hash; + net->ipv4.fib_info_hash = new_info_hash; + net->ipv4.fib_info_hash_bits += 1; + + for (i = 0; i < old_size; i++) { + struct hlist_head *head = &old_info_hash[i]; + struct hlist_node *n; + struct fib_info *fi; + + hlist_for_each_entry_safe(fi, n, head, fib_hash) + hlist_add_head(&fi->fib_hash, fib_info_hash_bucket(fi)); + } + + for (i = 0; i < old_size; i++) { + struct hlist_head *lhead = &old_info_hash[old_size + i]; + struct hlist_node *n; + struct fib_info *fi; + + hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) + hlist_add_head(&fi->fib_lhash, + fib_info_laddrhash_bucket(fi->fib_net, + fi->fib_prefsrc)); + } + + fib_info_hash_free(old_info_hash); } /* no metrics, only nexthop id */ @@ -370,13 +427,12 @@ static struct fib_info *fib_find_info_nh(struct net *net, (__force u32)cfg->fc_prefsrc, cfg->fc_priority); hash = fib_info_hashfn_result(net, hash); - head = &fib_info_hash[hash]; + head = &net->ipv4.fib_info_hash[hash]; hlist_for_each_entry(fi, head, fib_hash) { - if (!net_eq(fi->fib_net, net)) - continue; if (!fi->nh || fi->nh->id != cfg->fc_nh_id) continue; + if (cfg->fc_protocol == fi->fib_protocol && cfg->fc_scope == fi->fib_scope && cfg->fc_prefsrc == fi->fib_prefsrc && @@ -392,18 +448,13 @@ static struct fib_info *fib_find_info_nh(struct net *net, static struct fib_info *fib_find_info(struct fib_info *nfi) { - struct hlist_head *head; + struct hlist_head *head = fib_info_hash_bucket(nfi); struct fib_info *fi; - unsigned int hash; - - hash = fib_info_hashfn(nfi); - head = &fib_info_hash[hash]; hlist_for_each_entry(fi, head, fib_hash) { - if (!net_eq(fi->fib_net, nfi->fib_net)) - continue; if (fi->fib_nhs != nfi->fib_nhs) continue; + if (nfi->fib_protocol == fi->fib_protocol && nfi->fib_scope == fi->fib_scope && nfi->fib_prefsrc == fi->fib_prefsrc && @@ -1239,64 +1290,6 @@ int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope, return err; } -static struct hlist_head * -fib_info_laddrhash_bucket(const struct net *net, __be32 val) -{ - u32 slot = hash_32(net_hash_mix(net) ^ (__force u32)val, - fib_info_hash_bits); - - return &fib_info_laddrhash[slot]; -} - -static void fib_info_hash_move(struct hlist_head *new_info_hash, - struct hlist_head *new_laddrhash, - unsigned int new_size) -{ - struct hlist_head *old_info_hash, *old_laddrhash; - unsigned int old_size = fib_info_hash_size; - unsigned int i; - - ASSERT_RTNL(); - old_info_hash = fib_info_hash; - old_laddrhash = fib_info_laddrhash; - fib_info_hash_size = new_size; - fib_info_hash_bits = ilog2(new_size); - - for (i = 0; i < old_size; i++) { - struct hlist_head *head = &fib_info_hash[i]; - struct hlist_node *n; - struct fib_info *fi; - - hlist_for_each_entry_safe(fi, n, head, fib_hash) { - struct hlist_head *dest; - unsigned int new_hash; - - new_hash = fib_info_hashfn(fi); - dest = &new_info_hash[new_hash]; - hlist_add_head(&fi->fib_hash, dest); - } - } - fib_info_hash = new_info_hash; - - fib_info_laddrhash = new_laddrhash; - for (i = 0; i < old_size; i++) { - struct hlist_head *lhead = &old_laddrhash[i]; - struct hlist_node *n; - struct fib_info *fi; - - hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) { - struct hlist_head *ldest; - - ldest = fib_info_laddrhash_bucket(fi->fib_net, - fi->fib_prefsrc); - hlist_add_head(&fi->fib_lhash, ldest); - } - } - - kvfree(old_info_hash); - kvfree(old_laddrhash); -} - __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc, unsigned char scope) { @@ -1409,32 +1402,14 @@ struct fib_info *fib_create_info(struct fib_config *cfg, } #endif - err = -ENOBUFS; - - if (fib_info_cnt >= fib_info_hash_size) { - unsigned int new_size = fib_info_hash_size << 1; - struct hlist_head *new_info_hash; - struct hlist_head *new_laddrhash; - size_t bytes; - - if (!new_size) - new_size = 16; - bytes = (size_t)new_size * sizeof(struct hlist_head *); - new_info_hash = kvzalloc(bytes, GFP_KERNEL); - new_laddrhash = kvzalloc(bytes, GFP_KERNEL); - if (!new_info_hash || !new_laddrhash) { - kvfree(new_info_hash); - kvfree(new_laddrhash); - } else { - fib_info_hash_move(new_info_hash, new_laddrhash, new_size); - } - if (!fib_info_hash_size) - goto failure; - } + fib_info_hash_grow(net); fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL); - if (!fi) + if (!fi) { + err = -ENOBUFS; goto failure; + } + fi->fib_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len, extack); if (IS_ERR(fi->fib_metrics)) { err = PTR_ERR(fi->fib_metrics); @@ -1571,9 +1546,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg, refcount_set(&fi->fib_treeref, 1); refcount_set(&fi->fib_clntref, 1); - fib_info_cnt++; - hlist_add_head(&fi->fib_hash, - &fib_info_hash[fib_info_hashfn(fi)]); + net->ipv4.fib_info_cnt++; + hlist_add_head(&fi->fib_hash, fib_info_hash_bucket(fi)); + if (fi->fib_prefsrc) { struct hlist_head *head; @@ -1855,7 +1830,7 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local) struct fib_info *fi; int ret = 0; - if (!fib_info_laddrhash || local == 0) + if (!local) return 0; head = fib_info_laddrhash_bucket(net, local); @@ -2257,3 +2232,22 @@ void fib_select_path(struct net *net, struct fib_result *res, fl4->saddr = inet_select_addr(l3mdev, 0, RT_SCOPE_LINK); } } + +int __net_init fib4_semantics_init(struct net *net) +{ + unsigned int hash_bits = 4; + + net->ipv4.fib_info_hash = fib_info_hash_alloc(hash_bits); + if (!net->ipv4.fib_info_hash) + return -ENOMEM; + + net->ipv4.fib_info_hash_bits = hash_bits; + net->ipv4.fib_info_cnt = 0; + + return 0; +} + +void __net_exit fib4_semantics_exit(struct net *net) +{ + fib_info_hash_free(net->ipv4.fib_info_hash); +} diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index d6411ac810961..59a6f0a9638f9 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1187,22 +1187,6 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp, return 0; } -static bool fib_valid_key_len(u32 key, u8 plen, struct netlink_ext_ack *extack) -{ - if (plen > KEYLENGTH) { - NL_SET_ERR_MSG(extack, "Invalid prefix length"); - return false; - } - - if ((plen < KEYLENGTH) && (key << plen)) { - NL_SET_ERR_MSG(extack, - "Invalid prefix for given prefix length"); - return false; - } - - return true; -} - static void fib_remove_alias(struct trie *t, struct key_vector *tp, struct key_vector *l, struct fib_alias *old); @@ -1223,9 +1207,6 @@ int fib_table_insert(struct net *net, struct fib_table *tb, key = ntohl(cfg->fc_dst); - if (!fib_valid_key_len(key, plen, extack)) - return -EINVAL; - pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen); fi = fib_create_info(cfg, extack); @@ -1717,9 +1698,6 @@ int fib_table_delete(struct net *net, struct fib_table *tb, key = ntohl(cfg->fc_dst); - if (!fib_valid_key_len(key, plen, extack)) - return -EINVAL; - l = fib_find_node(t, &tp, key); if (!l) return -ESRCH; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 5482edb5aade2..717cb7d3607a1 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -405,7 +405,6 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) struct ipcm_cookie ipc; struct flowi4 fl4; struct sock *sk; - struct inet_sock *inet; __be32 daddr, saddr; u32 mark = IP4_REPLY_MARK(net, skb->mark); int type = icmp_param->data.icmph.type; @@ -424,12 +423,11 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) sk = icmp_xmit_lock(net); if (!sk) goto out_bh_enable; - inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; ipcm_init(&ipc); - inet->tos = ip_hdr(skb)->tos; + ipc.tos = ip_hdr(skb)->tos; ipc.sockc.mark = mark; daddr = ipc.addr = ip_hdr(skb)->saddr; saddr = fib_compute_spec_dst(skb); @@ -737,8 +735,8 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, icmp_param.data.icmph.checksum = 0; icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); - inet_sk(sk)->tos = tos; ipcm_init(&ipc); + ipc.tos = tos; ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts.opt; ipc.sockc.mark = mark; @@ -1250,22 +1248,6 @@ int icmp_rcv(struct sk_buff *skb) goto reason_check; } - if (icmph->type == ICMP_EXT_ECHOREPLY) { - reason = ping_rcv(skb); - goto reason_check; - } - - /* - * 18 is the highest 'known' ICMP type. Anything else is a mystery - * - * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently - * discarded. - */ - if (icmph->type > NR_ICMP_TYPES) { - reason = SKB_DROP_REASON_UNHANDLED_PROTO; - goto error; - } - /* * Parse the ICMP message */ @@ -1292,6 +1274,23 @@ int icmp_rcv(struct sk_buff *skb) } } + if (icmph->type == ICMP_EXT_ECHOREPLY || + icmph->type == ICMP_ECHOREPLY) { + reason = ping_rcv(skb); + return reason ? NET_RX_DROP : NET_RX_SUCCESS; + } + + /* + * 18 is the highest 'known' ICMP type. Anything else is a mystery + * + * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently + * discarded. + */ + if (icmph->type > NR_ICMP_TYPES) { + reason = SKB_DROP_REASON_UNHANDLED_PROTO; + goto error; + } + reason = icmp_pointers[icmph->type].handler(skb); reason_check: if (!reason) { diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 3da126cea884d..2c394c364cb9a 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -81,6 +81,7 @@ #include #include #include +#include "igmp_internal.h" #include #include #include @@ -1432,14 +1433,16 @@ static void ip_mc_hash_remove(struct in_device *in_dev, *mc_hash = im->next_hash; } -static int inet_fill_ifmcaddr(struct sk_buff *skb, struct net_device *dev, - const struct ip_mc_list *im, int event) +int inet_fill_ifmcaddr(struct sk_buff *skb, struct net_device *dev, + const struct ip_mc_list *im, + struct inet_fill_args *args) { struct ifa_cacheinfo ci; struct ifaddrmsg *ifm; struct nlmsghdr *nlh; - nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct ifaddrmsg), 0); + nlh = nlmsg_put(skb, args->portid, args->seq, args->event, + sizeof(struct ifaddrmsg), args->flags); if (!nlh) return -EMSGSIZE; @@ -1468,6 +1471,9 @@ static int inet_fill_ifmcaddr(struct sk_buff *skb, struct net_device *dev, static void inet_ifmcaddr_notify(struct net_device *dev, const struct ip_mc_list *im, int event) { + struct inet_fill_args fillargs = { + .event = event, + }; struct net *net = dev_net(dev); struct sk_buff *skb; int err = -ENOMEM; @@ -1479,7 +1485,7 @@ static void inet_ifmcaddr_notify(struct net_device *dev, if (!skb) goto error; - err = inet_fill_ifmcaddr(skb, dev, im, event); + err = inet_fill_ifmcaddr(skb, dev, im, &fillargs); if (err < 0) { WARN_ON_ONCE(err == -EMSGSIZE); nlmsg_free(skb); diff --git a/net/ipv4/igmp_internal.h b/net/ipv4/igmp_internal.h new file mode 100644 index 0000000000000..0a1bcc8ec8e15 --- /dev/null +++ b/net/ipv4/igmp_internal.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_IGMP_INTERNAL_H +#define _LINUX_IGMP_INTERNAL_H + +struct inet_fill_args { + u32 portid; + u32 seq; + int event; + unsigned int flags; + int netnsid; + int ifindex; +}; + +int inet_fill_ifmcaddr(struct sk_buff *skb, struct net_device *dev, + const struct ip_mc_list *im, + struct inet_fill_args *args); +#endif diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index e4decfb270fa1..dd5cf8914a28d 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -157,12 +157,10 @@ static bool inet_use_bhash2_on_bind(const struct sock *sk) { #if IS_ENABLED(CONFIG_IPV6) if (sk->sk_family == AF_INET6) { - int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr); - - if (addr_type == IPV6_ADDR_ANY) + if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) return false; - if (addr_type != IPV6_ADDR_MAPPED) + if (!ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) return true; } #endif @@ -600,7 +598,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum) if (bhash2_created) inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep, tb2); if (bhash_created) - inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); + inet_bind_bucket_destroy(tb); } if (head2_lock_acquired) spin_unlock(&head2->lock); @@ -799,18 +797,6 @@ void inet_csk_clear_xmit_timers_sync(struct sock *sk) sk_stop_timer_sync(sk, &sk->sk_timer); } -void inet_csk_delete_keepalive_timer(struct sock *sk) -{ - sk_stop_timer(sk, &sk->sk_timer); -} -EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); - -void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) -{ - sk_reset_timer(sk, &sk->sk_timer, jiffies + len); -} -EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); - struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4, const struct request_sock *req) @@ -1249,39 +1235,59 @@ struct sock *inet_csk_clone_lock(const struct sock *sk, const gfp_t priority) { struct sock *newsk = sk_clone_lock(sk, priority); + struct inet_connection_sock *newicsk; + struct inet_request_sock *ireq; + struct inet_sock *newinet; - if (newsk) { - struct inet_connection_sock *newicsk = inet_csk(newsk); + if (!newsk) + return NULL; - inet_sk_set_state(newsk, TCP_SYN_RECV); - newicsk->icsk_bind_hash = NULL; - newicsk->icsk_bind2_hash = NULL; + newicsk = inet_csk(newsk); + newinet = inet_sk(newsk); + ireq = inet_rsk(req); - inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; - inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num; - inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); + newicsk->icsk_bind_hash = NULL; + newicsk->icsk_bind2_hash = NULL; - /* listeners have SOCK_RCU_FREE, not the children */ - sock_reset_flag(newsk, SOCK_RCU_FREE); + newinet->inet_dport = ireq->ir_rmt_port; + newinet->inet_num = ireq->ir_num; + newinet->inet_sport = htons(ireq->ir_num); - inet_sk(newsk)->mc_list = NULL; + newsk->sk_bound_dev_if = ireq->ir_iif; - newsk->sk_mark = inet_rsk(req)->ir_mark; - atomic64_set(&newsk->sk_cookie, - atomic64_read(&inet_rsk(req)->ir_cookie)); + newsk->sk_daddr = ireq->ir_rmt_addr; + newsk->sk_rcv_saddr = ireq->ir_loc_addr; + newinet->inet_saddr = ireq->ir_loc_addr; - newicsk->icsk_retransmits = 0; - newicsk->icsk_backoff = 0; - newicsk->icsk_probes_out = 0; - newicsk->icsk_probes_tstamp = 0; +#if IS_ENABLED(CONFIG_IPV6) + newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; + newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; +#endif - /* Deinitialize accept_queue to trap illegal accesses. */ - memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); + /* listeners have SOCK_RCU_FREE, not the children */ + sock_reset_flag(newsk, SOCK_RCU_FREE); - inet_clone_ulp(req, newsk, priority); + inet_sk(newsk)->mc_list = NULL; + + newsk->sk_mark = inet_rsk(req)->ir_mark; + atomic64_set(&newsk->sk_cookie, + atomic64_read(&inet_rsk(req)->ir_cookie)); + + newicsk->icsk_retransmits = 0; + newicsk->icsk_backoff = 0; + newicsk->icsk_probes_out = 0; + newicsk->icsk_probes_tstamp = 0; + + /* Deinitialize accept_queue to trap illegal accesses. */ + memset(&newicsk->icsk_accept_queue, 0, + sizeof(newicsk->icsk_accept_queue)); + + inet_sk_set_state(newsk, TCP_SYN_RECV); + + inet_clone_ulp(req, newsk, priority); + + security_inet_csk_clone(newsk, req); - security_inet_csk_clone(newsk, req); - } return newsk; } EXPORT_SYMBOL_GPL(inet_csk_clone_lock); @@ -1547,17 +1553,6 @@ void inet_csk_listen_stop(struct sock *sk) } EXPORT_SYMBOL_GPL(inet_csk_listen_stop); -void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) -{ - struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; - const struct inet_sock *inet = inet_sk(sk); - - sin->sin_family = AF_INET; - sin->sin_addr.s_addr = inet->inet_daddr; - sin->sin_port = inet->inet_dport; -} -EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); - static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) { const struct inet_sock *inet = inet_sk(sk); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 321acc8abf17e..c2bb91d9e9ffd 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -282,7 +282,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, struct inet_diag_meminfo minfo = { .idiag_rmem = sk_rmem_alloc_get(sk), .idiag_wmem = READ_ONCE(sk->sk_wmem_queued), - .idiag_fmem = sk_forward_alloc_get(sk), + .idiag_fmem = READ_ONCE(sk->sk_forward_alloc), .idiag_tmem = sk_wmem_alloc_get(sk), }; @@ -315,12 +315,12 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, r->idiag_timer = 1; r->idiag_retrans = icsk->icsk_retransmits; r->idiag_expires = - jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies); + jiffies_delta_to_msecs(icsk_timeout(icsk) - jiffies); } else if (icsk_pending == ICSK_TIME_PROBE0) { r->idiag_timer = 4; r->idiag_retrans = icsk->icsk_probes_out; r->idiag_expires = - jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies); + jiffies_delta_to_msecs(icsk_timeout(icsk) - jiffies); } else if (timer_pending(&sk->sk_timer)) { r->idiag_timer = 2; r->idiag_retrans = icsk->icsk_probes_out; diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index d179a2c842227..19fae4811ab28 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -145,8 +145,7 @@ static void inet_frags_free_cb(void *ptr, void *arg) } spin_unlock_bh(&fq->lock); - if (refcount_sub_and_test(count, &fq->refcnt)) - inet_frag_destroy(fq); + inet_frag_putn(fq, count); } static LLIST_HEAD(fqdir_free_list); @@ -226,10 +225,10 @@ void fqdir_exit(struct fqdir *fqdir) } EXPORT_SYMBOL(fqdir_exit); -void inet_frag_kill(struct inet_frag_queue *fq) +void inet_frag_kill(struct inet_frag_queue *fq, int *refs) { if (del_timer(&fq->timer)) - refcount_dec(&fq->refcnt); + (*refs)++; if (!(fq->flags & INET_FRAG_COMPLETE)) { struct fqdir *fqdir = fq->fqdir; @@ -244,7 +243,7 @@ void inet_frag_kill(struct inet_frag_queue *fq) if (!READ_ONCE(fqdir->dead)) { rhashtable_remove_fast(&fqdir->rhashtable, &fq->node, fqdir->f->rhash_params); - refcount_dec(&fq->refcnt); + (*refs)++; } else { fq->flags |= INET_FRAG_HASH_DEAD; } @@ -328,7 +327,8 @@ static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir, timer_setup(&q->timer, f->frag_expire, 0); spin_lock_init(&q->lock); - refcount_set(&q->refcnt, 3); + /* One reference for the timer, one for the hash table. */ + refcount_set(&q->refcnt, 2); return q; } @@ -350,15 +350,20 @@ static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir, *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key, &q->node, f->rhash_params); if (*prev) { + /* We could not insert in the hash table, + * we need to cancel what inet_frag_alloc() + * anticipated. + */ + int refs = 1; + q->flags |= INET_FRAG_COMPLETE; - inet_frag_kill(q); - inet_frag_destroy(q); + inet_frag_kill(q, &refs); + inet_frag_putn(q, refs); return NULL; } return q; } -/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key) { /* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */ @@ -368,17 +373,11 @@ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key) if (!high_thresh || frag_mem_limit(fqdir) > high_thresh) return NULL; - rcu_read_lock(); - prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params); if (!prev) fq = inet_frag_create(fqdir, key, &prev); - if (!IS_ERR_OR_NULL(prev)) { + if (!IS_ERR_OR_NULL(prev)) fq = prev; - if (!refcount_inc_not_zero(&fq->refcnt)) - fq = NULL; - } - rcu_read_unlock(); return fq; } EXPORT_SYMBOL(inet_frag_find); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 9bfcfd016e182..5bf163f756e95 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -35,8 +35,8 @@ u32 inet_ehashfn(const struct net *net, const __be32 laddr, { net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret)); - return __inet_ehashfn(laddr, lport, faddr, fport, - inet_ehash_secret + net_hash_mix(net)); + return lport + __inet_ehashfn(laddr, 0, faddr, fport, + inet_ehash_secret + net_hash_mix(net)); } EXPORT_SYMBOL_GPL(inet_ehashfn); @@ -76,7 +76,7 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, tb->fastreuse = 0; tb->fastreuseport = 0; INIT_HLIST_HEAD(&tb->bhash2); - hlist_add_head(&tb->node, &head->chain); + hlist_add_head_rcu(&tb->node, &head->chain); } return tb; } @@ -84,11 +84,11 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep, /* * Caller must hold hashbucket lock for this tb with local BH disabled */ -void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb) +void inet_bind_bucket_destroy(struct inet_bind_bucket *tb) { if (hlist_empty(&tb->bhash2)) { - __hlist_del(&tb->node); - kmem_cache_free(cachep, tb); + hlist_del_rcu(&tb->node); + kfree_rcu(tb, rcu); } } @@ -201,7 +201,7 @@ static void __inet_put_port(struct sock *sk) } spin_unlock(&head2->lock); - inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); + inet_bind_bucket_destroy(tb); spin_unlock(&head->lock); } @@ -285,7 +285,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child) error: if (created_inet_bind_bucket) - inet_bind_bucket_destroy(table->bind_bucket_cachep, tb); + inet_bind_bucket_destroy(tb); spin_unlock(&head2->lock); spin_unlock(&head->lock); return -ENOMEM; @@ -537,7 +537,9 @@ EXPORT_SYMBOL_GPL(__inet_lookup_established); /* called with local bh disabled */ static int __inet_check_established(struct inet_timewait_death_row *death_row, struct sock *sk, __u16 lport, - struct inet_timewait_sock **twp) + struct inet_timewait_sock **twp, + bool rcu_lookup, + u32 hash) { struct inet_hashinfo *hinfo = death_row->hashinfo; struct inet_sock *inet = inet_sk(sk); @@ -548,14 +550,25 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, int sdif = l3mdev_master_ifindex_by_index(net, dif); INET_ADDR_COOKIE(acookie, saddr, daddr); const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); - unsigned int hash = inet_ehashfn(net, daddr, lport, - saddr, inet->inet_dport); struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); - spinlock_t *lock = inet_ehash_lockp(hinfo, hash); - struct sock *sk2; - const struct hlist_nulls_node *node; struct inet_timewait_sock *tw = NULL; + const struct hlist_nulls_node *node; + struct sock *sk2; + spinlock_t *lock; + if (rcu_lookup) { + sk_nulls_for_each(sk2, node, &head->chain) { + if (sk2->sk_hash != hash || + !inet_match(net, sk2, acookie, ports, dif, sdif)) + continue; + if (sk2->sk_state == TCP_TIME_WAIT) + break; + return -EADDRNOTAVAIL; + } + return 0; + } + + lock = inet_ehash_lockp(hinfo, hash); spin_lock(lock); sk_nulls_for_each(sk2, node, &head->chain) { @@ -993,8 +1006,10 @@ static u32 *table_perturb; int __inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk, u64 port_offset, + u32 hash_port0, int (*check_established)(struct inet_timewait_death_row *, - struct sock *, __u16, struct inet_timewait_sock **)) + struct sock *, __u16, struct inet_timewait_sock **, + bool rcu_lookup, u32 hash)) { struct inet_hashinfo *hinfo = death_row->hashinfo; struct inet_bind_hashbucket *head, *head2; @@ -1012,7 +1027,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, if (port) { local_bh_disable(); - ret = check_established(death_row, sk, port, NULL); + ret = check_established(death_row, sk, port, NULL, false, + hash_port0 + port); local_bh_enable(); return ret; } @@ -1048,6 +1064,22 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, continue; head = &hinfo->bhash[inet_bhashfn(net, port, hinfo->bhash_size)]; + rcu_read_lock(); + hlist_for_each_entry_rcu(tb, &head->chain, node) { + if (!inet_bind_bucket_match(tb, net, port, l3mdev)) + continue; + if (tb->fastreuse >= 0 || tb->fastreuseport >= 0) { + rcu_read_unlock(); + goto next_port; + } + if (!check_established(death_row, sk, port, &tw, true, + hash_port0 + port)) + break; + rcu_read_unlock(); + goto next_port; + } + rcu_read_unlock(); + spin_lock_bh(&head->lock); /* Does not bother with rcv_saddr checks, because @@ -1057,12 +1089,13 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, if (inet_bind_bucket_match(tb, net, port, l3mdev)) { if (tb->fastreuse >= 0 || tb->fastreuseport >= 0) - goto next_port; + goto next_port_unlock; WARN_ON(hlist_empty(&tb->bhash2)); if (!check_established(death_row, sk, - port, &tw)) + port, &tw, false, + hash_port0 + port)) goto ok; - goto next_port; + goto next_port_unlock; } } @@ -1076,8 +1109,9 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, tb->fastreuse = -1; tb->fastreuseport = -1; goto ok; -next_port: +next_port_unlock: spin_unlock_bh(&head->lock); +next_port: cond_resched(); } @@ -1149,7 +1183,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, spin_unlock(&head2->lock); if (tb_created) - inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb); + inet_bind_bucket_destroy(tb); spin_unlock(&head->lock); if (tw) @@ -1166,11 +1200,18 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row, int inet_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { + const struct inet_sock *inet = inet_sk(sk); + const struct net *net = sock_net(sk); u64 port_offset = 0; + u32 hash_port0; if (!inet_sk(sk)->inet_num) port_offset = inet_sk_port_offset(sk); - return __inet_hash_connect(death_row, sk, port_offset, + + hash_port0 = inet_ehashfn(net, inet->inet_rcv_saddr, 0, + inet->inet_daddr, inet->inet_dport); + + return __inet_hash_connect(death_row, sk, port_offset, hash_port0, __inet_check_established); } EXPORT_SYMBOL_GPL(inet_hash_connect); @@ -1230,22 +1271,37 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) { unsigned int locksz = sizeof(spinlock_t); unsigned int i, nblocks = 1; + spinlock_t *ptr = NULL; - if (locksz != 0) { - /* allocate 2 cache lines or at least one spinlock per cpu */ - nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); - nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); + if (locksz == 0) + goto set_mask; - /* no more locks than number of hash buckets */ - nblocks = min(nblocks, hashinfo->ehash_mask + 1); + /* Allocate 2 cache lines or at least one spinlock per cpu. */ + nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U) * num_possible_cpus(); - hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL); - if (!hashinfo->ehash_locks) - return -ENOMEM; + /* At least one page per NUMA node. */ + nblocks = max(nblocks, num_online_nodes() * PAGE_SIZE / locksz); + + nblocks = roundup_pow_of_two(nblocks); + + /* No more locks than number of hash buckets. */ + nblocks = min(nblocks, hashinfo->ehash_mask + 1); - for (i = 0; i < nblocks; i++) - spin_lock_init(&hashinfo->ehash_locks[i]); + if (num_online_nodes() > 1) { + /* Use vmalloc() to allow NUMA policy to spread pages + * on all available nodes if desired. + */ + ptr = vmalloc_array(nblocks, locksz); + } + if (!ptr) { + ptr = kvmalloc_array(nblocks, locksz, GFP_KERNEL); + if (!ptr) + return -ENOMEM; } + for (i = 0; i < nblocks; i++) + spin_lock_init(&ptr[i]); + hashinfo->ehash_locks = ptr; +set_mask: hashinfo->ehash_locks_mask = nblocks - 1; return 0; } diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 337390ba85b40..aded4bf1bc16d 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -39,7 +39,7 @@ void inet_twsk_bind_unhash(struct inet_timewait_sock *tw, tw->tw_tb = NULL; tw->tw_tb2 = NULL; inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2); - inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); + inet_bind_bucket_destroy(tb); __sock_put((struct sock *)tw); } diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index b8b23a77ceb4f..7b1e0a2d69066 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -60,7 +60,7 @@ void inet_peer_base_init(struct inet_peer_base *bp) seqlock_init(&bp->lock); bp->total = 0; } -EXPORT_SYMBOL_GPL(inet_peer_base_init); +EXPORT_IPV6_MOD_GPL(inet_peer_base_init); #define PEER_MAX_GC 32 @@ -218,7 +218,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, return p; } -EXPORT_SYMBOL_GPL(inet_getpeer); +EXPORT_IPV6_MOD_GPL(inet_getpeer); void inet_putpeer(struct inet_peer *p) { @@ -269,7 +269,7 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) WRITE_ONCE(peer->rate_tokens, token); return rc; } -EXPORT_SYMBOL(inet_peer_xrlim_allow); +EXPORT_IPV6_MOD(inet_peer_xrlim_allow); void inetpeer_invalidate_tree(struct inet_peer_base *base) { @@ -286,4 +286,4 @@ void inetpeer_invalidate_tree(struct inet_peer_base *base) base->total = 0; } -EXPORT_SYMBOL(inetpeer_invalidate_tree); +EXPORT_IPV6_MOD(inetpeer_invalidate_tree); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 7a435746a22de..77f395b28ec74 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -76,7 +76,8 @@ static u8 ip4_frag_ecn(u8 tos) static struct inet_frags ip4_frags; static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, - struct sk_buff *prev_tail, struct net_device *dev); + struct sk_buff *prev_tail, struct net_device *dev, + int *refs); static void ip4_frag_init(struct inet_frag_queue *q, const void *a) @@ -107,22 +108,6 @@ static void ip4_frag_free(struct inet_frag_queue *q) inet_putpeer(qp->peer); } - -/* Destruction primitives. */ - -static void ipq_put(struct ipq *ipq) -{ - inet_frag_put(&ipq->q); -} - -/* Kill ipq entry. It is not destroyed immediately, - * because caller (and someone more) holds reference count. - */ -static void ipq_kill(struct ipq *ipq) -{ - inet_frag_kill(&ipq->q); -} - static bool frag_expire_skip_icmp(u32 user) { return user == IP_DEFRAG_AF_PACKET || @@ -143,6 +128,7 @@ static void ip_expire(struct timer_list *t) struct sk_buff *head = NULL; struct net *net; struct ipq *qp; + int refs = 1; qp = container_of(frag, struct ipq, q); net = qp->q.fqdir->net; @@ -159,7 +145,7 @@ static void ip_expire(struct timer_list *t) goto out; qp->q.flags |= INET_FRAG_DROP; - ipq_kill(qp); + inet_frag_kill(&qp->q, &refs); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); @@ -202,7 +188,7 @@ static void ip_expire(struct timer_list *t) out_rcu_unlock: rcu_read_unlock(); kfree_skb_reason(head, reason); - ipq_put(qp); + inet_frag_putn(&qp->q, refs); } /* Find the correct entry in the "incomplete datagrams" queue for @@ -278,7 +264,7 @@ static int ip_frag_reinit(struct ipq *qp) } /* Add new segment to existing queue. */ -static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) +static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb, int *refs) { struct net *net = qp->q.fqdir->net; int ihl, end, flags, offset; @@ -298,7 +284,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && unlikely(ip_frag_too_far(qp)) && unlikely(err = ip_frag_reinit(qp))) { - ipq_kill(qp); + inet_frag_kill(&qp->q, refs); goto err; } @@ -382,10 +368,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; - err = ip_frag_reasm(qp, skb, prev_tail, dev); + err = ip_frag_reasm(qp, skb, prev_tail, dev, refs); skb->_skb_refdst = orefdst; if (err) - inet_frag_kill(&qp->q); + inet_frag_kill(&qp->q, refs); return err; } @@ -402,7 +388,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) err = -EINVAL; __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS); discard_qp: - inet_frag_kill(&qp->q); + inet_frag_kill(&qp->q, refs); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); err: kfree_skb_reason(skb, reason); @@ -416,7 +402,8 @@ static bool ip_frag_coalesce_ok(const struct ipq *qp) /* Build a new IP datagram from all its fragments. */ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, - struct sk_buff *prev_tail, struct net_device *dev) + struct sk_buff *prev_tail, struct net_device *dev, + int *refs) { struct net *net = qp->q.fqdir->net; struct iphdr *iph; @@ -424,7 +411,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, int len, err; u8 ecn; - ipq_kill(qp); + inet_frag_kill(&qp->q, refs); ecn = ip_frag_ecn_table[qp->ecn]; if (unlikely(ecn == 0xff)) { @@ -496,18 +483,21 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS); /* Lookup (or create) queue header */ + rcu_read_lock(); qp = ip_find(net, ip_hdr(skb), user, vif); if (qp) { - int ret; + int ret, refs = 0; spin_lock(&qp->q.lock); - ret = ip_frag_queue(qp, skb); + ret = ip_frag_queue(qp, skb, &refs); spin_unlock(&qp->q.lock); - ipq_put(qp); + rcu_read_unlock(); + inet_frag_putn(&qp->q, refs); return ret; } + rcu_read_unlock(); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); kfree_skb(skb); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index ed1b6b44faf80..26d15f907551e 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -141,7 +141,6 @@ static int ipgre_err(struct sk_buff *skb, u32 info, const struct iphdr *iph; const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; - unsigned int data_len = 0; struct ip_tunnel *t; if (tpi->proto == htons(ETH_P_TEB)) @@ -182,7 +181,6 @@ static int ipgre_err(struct sk_buff *skb, u32 info, case ICMP_TIME_EXCEEDED: if (code != ICMP_EXC_TTL) return 0; - data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ break; case ICMP_REDIRECT: @@ -190,10 +188,16 @@ static int ipgre_err(struct sk_buff *skb, u32 info, } #if IS_ENABLED(CONFIG_IPV6) - if (tpi->proto == htons(ETH_P_IPV6) && - !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, - type, data_len)) - return 0; + if (tpi->proto == htons(ETH_P_IPV6)) { + unsigned int data_len = 0; + + if (type == ICMP_TIME_EXCEEDED) + data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */ + + if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len, + type, data_len)) + return 0; + } #endif if (t->parms.iph.daddr == 0 || @@ -1392,10 +1396,12 @@ ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[]) return 0; } -static int ipgre_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int ipgre_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct ip_tunnel_parm_kern p; __u32 fwmark = 0; int err; @@ -1407,13 +1413,16 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark); if (err < 0) return err; - return ip_tunnel_newlink(dev, tb, &p, fwmark); + return ip_tunnel_newlink(params->link_net ? : dev_net(dev), dev, tb, &p, + fwmark); } -static int erspan_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int erspan_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct ip_tunnel_parm_kern p; __u32 fwmark = 0; int err; @@ -1425,7 +1434,8 @@ static int erspan_newlink(struct net *src_net, struct net_device *dev, err = erspan_netlink_parms(dev, data, tb, &p, &fwmark); if (err) return err; - return ip_tunnel_newlink(dev, tb, &p, fwmark); + return ip_tunnel_newlink(params->link_net ? : dev_net(dev), dev, tb, &p, + fwmark); } static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], @@ -1693,6 +1703,7 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = { struct net_device *gretap_fb_dev_create(struct net *net, const char *name, u8 name_assign_type) { + struct rtnl_newlink_params params = { .src_net = net }; struct nlattr *tb[IFLA_MAX + 1]; struct net_device *dev; LIST_HEAD(list_kill); @@ -1700,6 +1711,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, int err; memset(&tb, 0, sizeof(tb)); + params.tb = tb; dev = rtnl_create_link(net, name, name_assign_type, &ipgre_tap_ops, tb, NULL); @@ -1710,7 +1722,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, t = netdev_priv(dev); t->collect_md = true; - err = ipgre_newlink(net, dev, tb, NULL, NULL); + err = ipgre_newlink(dev, ¶ms, NULL); if (err < 0) { free_netdev(dev); return ERR_PTR(err); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ea7a260bec8aa..7b7c5e4ea5d81 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -75,7 +75,6 @@ #include #include #include -#include #include #include #include @@ -1640,11 +1639,11 @@ void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk, if (IS_ERR(rt)) return; - inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; + inet_sk(sk)->tos = arg->tos; sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; - sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); + sk->sk_sndbuf = READ_ONCE(net->core.sysctl_wmem_default); ipc.sockc.mark = fl4.flowi4_mark; err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 09b73acf037ae..1024f961ec9ad 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -1162,7 +1163,7 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id, * Allowing to move it to another netns is clearly unsafe. */ if (!IS_ERR(itn->fb_tunnel_dev)) { - itn->fb_tunnel_dev->netns_local = true; + itn->fb_tunnel_dev->netns_immutable = true; itn->fb_tunnel_dev->mtu = ip_tunnel_bind_dev(itn->fb_tunnel_dev); ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev)); itn->type = itn->fb_tunnel_dev->type; @@ -1213,11 +1214,11 @@ void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id, } EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets); -int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm_kern *p, __u32 fwmark) +int ip_tunnel_newlink(struct net *net, struct net_device *dev, + struct nlattr *tb[], struct ip_tunnel_parm_kern *p, + __u32 fwmark) { struct ip_tunnel *nt; - struct net *net = dev_net(dev); struct ip_tunnel_net *itn; int mtu; int err; @@ -1326,7 +1327,6 @@ int ip_tunnel_init(struct net_device *dev) } tunnel->dev = dev; - tunnel->net = dev_net(dev); strscpy(tunnel->parms.name, dev->name); iph->version = 4; iph->ihl = 5; diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index f0b4419cef349..159b4473290e0 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -575,15 +575,18 @@ static void vti_netlink_parms(struct nlattr *data[], *fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]); } -static int vti_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int vti_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct nlattr **data = params->data; struct ip_tunnel_parm_kern parms; + struct nlattr **tb = params->tb; __u32 fwmark = 0; vti_netlink_parms(data, &parms, &fwmark); - return ip_tunnel_newlink(dev, tb, &parms, fwmark); + return ip_tunnel_newlink(params->link_net ? : dev_net(dev), dev, tb, + &parms, fwmark); } static int vti_changelink(struct net_device *dev, struct nlattr *tb[], diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index dc0db5895e0e0..bab0bf90c908d 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -436,11 +436,13 @@ static void ipip_netlink_parms(struct nlattr *data[], *fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); } -static int ipip_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int ipip_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { struct ip_tunnel *t = netdev_priv(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct ip_tunnel_encap ipencap; struct ip_tunnel_parm_kern p; __u32 fwmark = 0; @@ -453,7 +455,8 @@ static int ipip_newlink(struct net *src_net, struct net_device *dev, } ipip_netlink_parms(data, &p, &t->collect_md, &fwmark); - return ip_tunnel_newlink(dev, tb, &p, fwmark); + return ip_tunnel_newlink(params->link_net ? : dev_net(dev), dev, tb, &p, + fwmark); } static int ipip_changelink(struct net_device *dev, struct nlattr *tb[], diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 21ae7594a8525..b81c8131e23fc 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -563,7 +563,7 @@ static void reg_vif_setup(struct net_device *dev) dev->flags = IFF_NOARP; dev->netdev_ops = ®_vif_netdev_ops; dev->needs_free_netdev = true; - dev->netns_local = true; + dev->netns_immutable = true; } static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index 625adbc420370..9082ca17e845c 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -71,6 +71,11 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct net_device *oif; const struct net_device *found; + if (nft_fib_can_skip(pkt)) { + nft_fib_store_result(dest, priv, nft_in(pkt)); + return; + } + /* * Do not set flowi4_oif, it restricts results (for example, asking * for oif 3 will get RTN_UNICAST result even if the daddr exits @@ -85,12 +90,6 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, else oif = NULL; - if (nft_hook(pkt) == NF_INET_PRE_ROUTING && - nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { - nft_fib_store_result(dest, priv, nft_in(pkt)); - return; - } - iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); if (!iph) { regs->verdict.code = NFT_BREAK; diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 09a3d73b45baa..467151517023d 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -1272,10 +1272,8 @@ static int nh_check_attr_group(struct net *net, u16 nh_grp_type, struct netlink_ext_ack *extack) { unsigned int len = nla_len(tb[NHA_GROUP]); - u8 nh_family = AF_UNSPEC; struct nexthop_grp *nhg; unsigned int i, j; - u8 nhg_fdb = 0; if (!len || len & (sizeof(struct nexthop_grp) - 1)) { NL_SET_ERR_MSG(extack, @@ -1307,10 +1305,41 @@ static int nh_check_attr_group(struct net *net, } } - if (tb[NHA_FDB]) - nhg_fdb = 1; nhg = nla_data(tb[NHA_GROUP]); - for (i = 0; i < len; ++i) { + for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) { + if (!tb[i]) + continue; + switch (i) { + case NHA_HW_STATS_ENABLE: + case NHA_FDB: + continue; + case NHA_RES_GROUP: + if (nh_grp_type == NEXTHOP_GRP_TYPE_RES) + continue; + break; + } + NL_SET_ERR_MSG(extack, + "No other attributes can be set in nexthop groups"); + return -EINVAL; + } + + return 0; +} + +static int nh_check_attr_group_rtnl(struct net *net, struct nlattr *tb[], + struct netlink_ext_ack *extack) +{ + u8 nh_family = AF_UNSPEC; + struct nexthop_grp *nhg; + unsigned int len; + unsigned int i; + u8 nhg_fdb; + + len = nla_len(tb[NHA_GROUP]) / sizeof(*nhg); + nhg = nla_data(tb[NHA_GROUP]); + nhg_fdb = !!tb[NHA_FDB]; + + for (i = 0; i < len; i++) { struct nexthop *nh; bool is_fdb_nh; @@ -1330,22 +1359,6 @@ static int nh_check_attr_group(struct net *net, return -EINVAL; } } - for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) { - if (!tb[i]) - continue; - switch (i) { - case NHA_HW_STATS_ENABLE: - case NHA_FDB: - continue; - case NHA_RES_GROUP: - if (nh_grp_type == NEXTHOP_GRP_TYPE_RES) - continue; - break; - } - NL_SET_ERR_MSG(extack, - "No other attributes can be set in nexthop groups"); - return -EINVAL; - } return 0; } @@ -2679,9 +2692,6 @@ static struct nexthop *nexthop_create_group(struct net *net, int err; int i; - if (WARN_ON(!num_nh)) - return ERR_PTR(-EINVAL); - nh = nexthop_alloc(); if (!nh) return ERR_PTR(-ENOMEM); @@ -2915,11 +2925,6 @@ static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg, struct nexthop *nh; int err; - if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) { - NL_SET_ERR_MSG(extack, "Replace requires nexthop id"); - return ERR_PTR(-EINVAL); - } - if (!cfg->nh_id) { cfg->nh_id = nh_find_unused_id(net); if (!cfg->nh_id) { @@ -3016,19 +3021,13 @@ static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg, } static int rtm_to_nh_config(struct net *net, struct sk_buff *skb, - struct nlmsghdr *nlh, struct nh_config *cfg, + struct nlmsghdr *nlh, struct nlattr **tb, + struct nh_config *cfg, struct netlink_ext_ack *extack) { struct nhmsg *nhm = nlmsg_data(nlh); - struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)]; int err; - err = nlmsg_parse(nlh, sizeof(*nhm), tb, - ARRAY_SIZE(rtm_nh_policy_new) - 1, - rtm_nh_policy_new, extack); - if (err < 0) - return err; - err = -EINVAL; if (nhm->resvd || nhm->nh_scope) { NL_SET_ERR_MSG(extack, "Invalid values in ancillary header"); @@ -3093,7 +3092,8 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb, NL_SET_ERR_MSG(extack, "Invalid group type"); goto out; } - err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb), + + err = nh_check_attr_group(net, tb, ARRAY_SIZE(rtm_nh_policy_new), cfg->nh_grp_type, extack); if (err) goto out; @@ -3126,25 +3126,6 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb, goto out; } - if (!cfg->nh_fdb && tb[NHA_OIF]) { - cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]); - if (cfg->nh_ifindex) - cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex); - - if (!cfg->dev) { - NL_SET_ERR_MSG(extack, "Invalid device index"); - goto out; - } else if (!(cfg->dev->flags & IFF_UP)) { - NL_SET_ERR_MSG(extack, "Nexthop device is not up"); - err = -ENETDOWN; - goto out; - } else if (!netif_carrier_ok(cfg->dev)) { - NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down"); - err = -ENETDOWN; - goto out; - } - } - err = -EINVAL; if (tb[NHA_GATEWAY]) { struct nlattr *gwa = tb[NHA_GATEWAY]; @@ -3187,7 +3168,8 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb, } cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]); - err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack); + err = lwtunnel_valid_encap_type(cfg->nh_encap_type, + extack, false); if (err < 0) goto out; @@ -3206,22 +3188,76 @@ static int rtm_to_nh_config(struct net *net, struct sk_buff *skb, return err; } +static int rtm_to_nh_config_rtnl(struct net *net, struct nlattr **tb, + struct nh_config *cfg, + struct netlink_ext_ack *extack) +{ + if (tb[NHA_GROUP]) + return nh_check_attr_group_rtnl(net, tb, extack); + + if (tb[NHA_OIF]) { + cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]); + if (cfg->nh_ifindex) + cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex); + + if (!cfg->dev) { + NL_SET_ERR_MSG(extack, "Invalid device index"); + return -EINVAL; + } + + if (!(cfg->dev->flags & IFF_UP)) { + NL_SET_ERR_MSG(extack, "Nexthop device is not up"); + return -ENETDOWN; + } + + if (!netif_carrier_ok(cfg->dev)) { + NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down"); + return -ENETDOWN; + } + } + + return 0; +} + /* rtnl */ static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)]; struct net *net = sock_net(skb->sk); struct nh_config cfg; struct nexthop *nh; int err; - err = rtm_to_nh_config(net, skb, nlh, &cfg, extack); - if (!err) { - nh = nexthop_add(net, &cfg, extack); - if (IS_ERR(nh)) - err = PTR_ERR(nh); + err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb, + ARRAY_SIZE(rtm_nh_policy_new) - 1, + rtm_nh_policy_new, extack); + if (err < 0) + goto out; + + err = rtm_to_nh_config(net, skb, nlh, tb, &cfg, extack); + if (err) + goto out; + + if (cfg.nlflags & NLM_F_REPLACE && !cfg.nh_id) { + NL_SET_ERR_MSG(extack, "Replace requires nexthop id"); + err = -EINVAL; + goto out; } + rtnl_net_lock(net); + + err = rtm_to_nh_config_rtnl(net, tb, &cfg, extack); + if (err) + goto unlock; + + nh = nexthop_add(net, &cfg, extack); + if (IS_ERR(nh)) + err = PTR_ERR(nh); + +unlock: + rtnl_net_unlock(net); +out: return err; } @@ -3278,13 +3314,17 @@ static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh, if (err) return err; + rtnl_net_lock(net); + nh = nexthop_find_by_id(net, id); - if (!nh) - return -ENOENT; + if (nh) + remove_nexthop(net, nh, &nlinfo); + else + err = -ENOENT; - remove_nexthop(net, nh, &nlinfo); + rtnl_net_unlock(net); - return 0; + return err; } /* rtnl */ @@ -4036,18 +4076,20 @@ static struct pernet_operations nexthop_net_ops = { }; static const struct rtnl_msg_handler nexthop_rtnl_msg_handlers[] __initconst = { - {.msgtype = RTM_NEWNEXTHOP, .doit = rtm_new_nexthop}, - {.msgtype = RTM_DELNEXTHOP, .doit = rtm_del_nexthop}, + {.msgtype = RTM_NEWNEXTHOP, .doit = rtm_new_nexthop, + .flags = RTNL_FLAG_DOIT_PERNET}, + {.msgtype = RTM_DELNEXTHOP, .doit = rtm_del_nexthop, + .flags = RTNL_FLAG_DOIT_PERNET}, {.msgtype = RTM_GETNEXTHOP, .doit = rtm_get_nexthop, .dumpit = rtm_dump_nexthop}, {.msgtype = RTM_GETNEXTHOPBUCKET, .doit = rtm_get_nexthop_bucket, .dumpit = rtm_dump_nexthop_bucket}, {.protocol = PF_INET, .msgtype = RTM_NEWNEXTHOP, - .doit = rtm_new_nexthop}, + .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET}, {.protocol = PF_INET, .msgtype = RTM_GETNEXTHOP, .dumpit = rtm_dump_nexthop}, {.protocol = PF_INET6, .msgtype = RTM_NEWNEXTHOP, - .doit = rtm_new_nexthop}, + .doit = rtm_new_nexthop, .flags = RTNL_FLAG_DOIT_PERNET}, {.protocol = PF_INET6, .msgtype = RTM_GETNEXTHOP, .dumpit = rtm_dump_nexthop}, }; diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 619ddc087957f..c14baa6589c74 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -705,7 +705,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ip_options_data opt_copy; int free = 0; __be32 saddr, daddr, faddr; - u8 tos, scope; + u8 scope; int err; pr_debug("ping_v4_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); @@ -768,7 +768,6 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } faddr = ipc.opt->opt.faddr; } - tos = get_rttos(&ipc, inet); scope = ip_sendmsg_scope(inet, &ipc, msg); if (ipv4_is_multicast(daddr)) { @@ -779,7 +778,8 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } else if (!ipc.oif) ipc.oif = READ_ONCE(inet->uc_index); - flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos, scope, + flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, + ipc.tos & INET_DSCP_MASK, scope, sk->sk_protocol, inet_sk_flowi_flags(sk), faddr, saddr, 0, 0, sk->sk_uid); @@ -966,10 +966,9 @@ EXPORT_SYMBOL_GPL(ping_queue_rcv_skb); enum skb_drop_reason ping_rcv(struct sk_buff *skb) { - enum skb_drop_reason reason = SKB_DROP_REASON_NO_SOCKET; - struct sock *sk; struct net *net = dev_net(skb->dev); struct icmphdr *icmph = icmp_hdr(skb); + struct sock *sk; /* We assume the packet has already been checked by icmp_rcv */ @@ -980,20 +979,11 @@ enum skb_drop_reason ping_rcv(struct sk_buff *skb) skb_push(skb, skb->data - (u8 *)icmph); sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id)); - if (sk) { - struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); - - pr_debug("rcv on socket %p\n", sk); - if (skb2) - reason = __ping_queue_rcv_skb(sk, skb2); - else - reason = SKB_DROP_REASON_NOMEM; - } - - if (reason) - pr_debug("no socket, dropping\n"); + if (sk) + return __ping_queue_rcv_skb(sk, skb); - return reason; + kfree_skb_reason(skb, SKB_DROP_REASON_NO_SOCKET); + return SKB_DROP_REASON_NO_SOCKET; } EXPORT_SYMBOL_GPL(ping_rcv); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index affd21a0f5728..10cbeb76c2745 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -189,6 +189,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TWKilled", LINUX_MIB_TIMEWAITKILLED), SNMP_MIB_ITEM("PAWSActive", LINUX_MIB_PAWSACTIVEREJECTED), SNMP_MIB_ITEM("PAWSEstab", LINUX_MIB_PAWSESTABREJECTED), + SNMP_MIB_ITEM("TSEcrRejected", LINUX_MIB_TSECRREJECTED), SNMP_MIB_ITEM("PAWSOldAck", LINUX_MIB_PAWS_OLD_ACK), SNMP_MIB_ITEM("DelayedACKs", LINUX_MIB_DELAYEDACKS), SNMP_MIB_ITEM("DelayedACKLocked", LINUX_MIB_DELAYEDACKLOCKED), diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 4304a68d1db05..6aace4d55733e 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -486,7 +486,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ipcm_cookie ipc; struct rtable *rt = NULL; struct flowi4 fl4; - u8 tos, scope; + u8 scope; int free = 0; __be32 daddr; __be32 saddr; @@ -581,7 +581,6 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) daddr = ipc.opt->opt.faddr; } } - tos = get_rttos(&ipc, inet); scope = ip_sendmsg_scope(inet, &ipc, msg); uc_index = READ_ONCE(inet->uc_index); @@ -606,7 +605,8 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } } - flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos, scope, + flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, + ipc.tos & INET_DSCP_MASK, scope, hdrincl ? ipc.protocol : sk->sk_protocol, inet_sk_flowi_flags(sk) | (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 1948d15f1f281..5459a78b98095 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -222,7 +222,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, return NULL; } -EXPORT_SYMBOL(tcp_get_cookie_sock); +EXPORT_IPV6_MOD(tcp_get_cookie_sock); /* * when syncookies are in effect and tcp timestamps are enabled we stored @@ -259,7 +259,7 @@ bool cookie_timestamp_decode(const struct net *net, return READ_ONCE(net->ipv4.sysctl_tcp_window_scaling) != 0; } -EXPORT_SYMBOL(cookie_timestamp_decode); +EXPORT_IPV6_MOD(cookie_timestamp_decode); static int cookie_tcp_reqsk_init(struct sock *sk, struct sk_buff *skb, struct request_sock *req) @@ -279,6 +279,7 @@ static int cookie_tcp_reqsk_init(struct sock *sk, struct sk_buff *skb, ireq->smc_ok = 0; treq->snt_synack = 0; + treq->snt_tsval_first = 0; treq->tfo_listener = false; treq->txhash = net_tx_rndhash(); treq->rcv_isn = ntohl(th->seq) - 1; @@ -310,7 +311,7 @@ struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb) return req; } -EXPORT_SYMBOL_GPL(cookie_bpf_check); +EXPORT_IPV6_MOD_GPL(cookie_bpf_check); #endif struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, @@ -351,7 +352,7 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, return req; } -EXPORT_SYMBOL_GPL(cookie_tcp_reqsk_alloc); +EXPORT_IPV6_MOD_GPL(cookie_tcp_reqsk_alloc); static struct request_sock *cookie_tcp_check(struct net *net, struct sock *sk, struct sk_buff *skb) diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 42cb5dc9cb245..3a43010d726fb 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -28,6 +28,7 @@ static int tcp_adv_win_scale_max = 31; static int tcp_app_win_max = 31; static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS; static int tcp_min_snd_mss_max = 65535; +static int tcp_rto_max_max = TCP_RTO_MAX_SEC * MSEC_PER_SEC; static int ip_privileged_port_min; static int ip_privileged_port_max = 65535; static int ip_ttl_min = 1; @@ -1583,6 +1584,15 @@ static struct ctl_table ipv4_net_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ONE, }, + { + .procname = "tcp_rto_max_ms", + .data = &init_net.ipv4.sysctl_tcp_rto_max_ms, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ONE_THOUSAND, + .extra2 = &tcp_rto_max_max, + }, }; static __net_init int ipv4_sysctl_init_net(struct net *net) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 285678d8ce077..ea8de00f669d0 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -300,10 +300,10 @@ DEFINE_PER_CPU(u32, tcp_tw_isn); EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn); long sysctl_tcp_mem[3] __read_mostly; -EXPORT_SYMBOL(sysctl_tcp_mem); +EXPORT_IPV6_MOD(sysctl_tcp_mem); atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ -EXPORT_SYMBOL(tcp_memory_allocated); +EXPORT_IPV6_MOD(tcp_memory_allocated); DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); @@ -316,7 +316,7 @@ EXPORT_SYMBOL(tcp_have_smc); * Current number of TCP sockets. */ struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; -EXPORT_SYMBOL(tcp_sockets_allocated); +EXPORT_IPV6_MOD(tcp_sockets_allocated); /* * TCP splice context @@ -349,7 +349,7 @@ void tcp_enter_memory_pressure(struct sock *sk) if (!cmpxchg(&tcp_memory_pressure, 0, val)) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); } -EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure); +EXPORT_IPV6_MOD_GPL(tcp_enter_memory_pressure); void tcp_leave_memory_pressure(struct sock *sk) { @@ -362,7 +362,7 @@ void tcp_leave_memory_pressure(struct sock *sk) NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, jiffies_to_msecs(jiffies - val)); } -EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure); +EXPORT_IPV6_MOD_GPL(tcp_leave_memory_pressure); /* Convert seconds to retransmits based on initial and max timeout */ static u8 secs_to_retrans(int seconds, int timeout, int rto_max) @@ -423,7 +423,7 @@ void tcp_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); - int rto_min_us; + int rto_min_us, rto_max_ms; tp->out_of_order_queue = RB_ROOT; sk->tcp_rtx_queue = RB_ROOT; @@ -432,6 +432,10 @@ void tcp_init_sock(struct sock *sk) INIT_LIST_HEAD(&tp->tsorted_sent_queue); icsk->icsk_rto = TCP_TIMEOUT_INIT; + + rto_max_ms = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_max_ms); + icsk->icsk_rto_max = msecs_to_jiffies(rto_max_ms); + rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us); icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us); icsk->icsk_delack_max = TCP_DELACK_MAX; @@ -475,7 +479,7 @@ void tcp_init_sock(struct sock *sk) sk_sockets_allocated_inc(sk); xa_init_flags(&sk->sk_user_frags, XA_FLAGS_ALLOC1); } -EXPORT_SYMBOL(tcp_init_sock); +EXPORT_IPV6_MOD(tcp_init_sock); static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc) { @@ -488,10 +492,14 @@ static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc) sock_tx_timestamp(sk, sockc, &shinfo->tx_flags); if (tsflags & SOF_TIMESTAMPING_TX_ACK) - tcb->txstamp_ack = 1; + tcb->txstamp_ack |= TSTAMP_ACK_SK; if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; } + + if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && + SK_BPF_CB_FLAG_TEST(sk, SK_BPF_CB_TX_TIMESTAMPING) && skb) + bpf_skops_tx_timestamping(sk, skb, BPF_SOCK_OPS_TSTAMP_SENDMSG_CB); } static bool tcp_stream_is_readable(struct sock *sk, int target) @@ -660,7 +668,7 @@ int tcp_ioctl(struct sock *sk, int cmd, int *karg) *karg = answ; return 0; } -EXPORT_SYMBOL(tcp_ioctl); +EXPORT_IPV6_MOD(tcp_ioctl); void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { @@ -876,7 +884,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, return ret; } -EXPORT_SYMBOL(tcp_splice_read); +EXPORT_IPV6_MOD(tcp_splice_read); struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, bool force_schedule) @@ -1123,7 +1131,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) /* 'common' sending to sendq */ } - sockcm_init(&sockc, sk); + sockc = (struct sockcm_cookie) { .tsflags = READ_ONCE(sk->sk_tsflags)}; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) { @@ -1376,7 +1384,7 @@ void tcp_splice_eof(struct socket *sock) tcp_push(sk, 0, mss_now, tp->nonagle, size_goal); release_sock(sk); } -EXPORT_SYMBOL_GPL(tcp_splice_eof); +EXPORT_IPV6_MOD_GPL(tcp_splice_eof); /* * Handle reading urgent data. BSD has very simple semantics for @@ -1517,11 +1525,25 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) __tcp_cleanup_rbuf(sk, copied); } +/* private version of sock_rfree() avoiding one atomic_sub() */ +void tcp_sock_rfree(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + unsigned int len = skb->truesize; + + sock_owned_by_me(sk); + atomic_set(&sk->sk_rmem_alloc, + atomic_read(&sk->sk_rmem_alloc) - len); + + sk_forward_alloc_add(sk, len); + sk_mem_reclaim(sk); +} + static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) { __skb_unlink(skb, &sk->sk_receive_queue); - if (likely(skb->destructor == sock_rfree)) { - sock_rfree(skb); + if (likely(skb->destructor == tcp_sock_rfree)) { + tcp_sock_rfree(skb); skb->destructor = NULL; skb->sk = NULL; return skb_attempt_defer_free(skb); @@ -1686,7 +1708,7 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) } return copied; } -EXPORT_SYMBOL(tcp_read_skb); +EXPORT_IPV6_MOD(tcp_read_skb); void tcp_read_done(struct sock *sk, size_t len) { @@ -1731,7 +1753,7 @@ int tcp_peek_len(struct socket *sock) { return tcp_inq(sock->sk); } -EXPORT_SYMBOL(tcp_peek_len); +EXPORT_IPV6_MOD(tcp_peek_len); /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ int tcp_set_rcvlowat(struct sock *sk, int val) @@ -1758,7 +1780,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val) } return 0; } -EXPORT_SYMBOL(tcp_set_rcvlowat); +EXPORT_IPV6_MOD(tcp_set_rcvlowat); void tcp_update_recv_tstamps(struct sk_buff *skb, struct scm_timestamping_internal *tss) @@ -1791,7 +1813,7 @@ int tcp_mmap(struct file *file, struct socket *sock, vma->vm_ops = &tcp_vm_ops; return 0; } -EXPORT_SYMBOL(tcp_mmap); +EXPORT_IPV6_MOD(tcp_mmap); static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, u32 *offset_frag) @@ -2457,14 +2479,12 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb, */ memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg)); dmabuf_cmsg.frag_size = copy; - err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_LINEAR, - sizeof(dmabuf_cmsg), &dmabuf_cmsg); - if (err || msg->msg_flags & MSG_CTRUNC) { - msg->msg_flags &= ~MSG_CTRUNC; - if (!err) - err = -ETOOSMALL; + err = put_cmsg_notrunc(msg, SOL_SOCKET, + SO_DEVMEM_LINEAR, + sizeof(dmabuf_cmsg), + &dmabuf_cmsg); + if (err) goto out; - } sent += copy; @@ -2495,6 +2515,11 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb, } niov = skb_frag_net_iov(frag); + if (!net_is_devmem_iov(niov)) { + err = -ENODEV; + goto out; + } + end = start + skb_frag_size(frag); copy = end - offset; @@ -2513,21 +2538,17 @@ static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb, /* Will perform the exchange later */ dmabuf_cmsg.frag_token = tcp_xa_pool.tokens[tcp_xa_pool.idx]; - dmabuf_cmsg.dmabuf_id = net_iov_binding_id(niov); + dmabuf_cmsg.dmabuf_id = net_devmem_iov_binding_id(niov); offset += copy; remaining_len -= copy; - err = put_cmsg(msg, SOL_SOCKET, - SO_DEVMEM_DMABUF, - sizeof(dmabuf_cmsg), - &dmabuf_cmsg); - if (err || msg->msg_flags & MSG_CTRUNC) { - msg->msg_flags &= ~MSG_CTRUNC; - if (!err) - err = -ETOOSMALL; + err = put_cmsg_notrunc(msg, SOL_SOCKET, + SO_DEVMEM_DMABUF, + sizeof(dmabuf_cmsg), + &dmabuf_cmsg); + if (err) goto out; - } atomic_long_inc(&niov->pp_ref_count); tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag); @@ -2883,7 +2904,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, } return ret; } -EXPORT_SYMBOL(tcp_recvmsg); +EXPORT_IPV6_MOD(tcp_recvmsg); void tcp_set_state(struct sock *sk, int state) { @@ -3013,7 +3034,7 @@ void tcp_shutdown(struct sock *sk, int how) tcp_send_fin(sk); } } -EXPORT_SYMBOL(tcp_shutdown); +EXPORT_IPV6_MOD(tcp_shutdown); int tcp_orphan_count_sum(void) { @@ -3193,7 +3214,7 @@ void __tcp_close(struct sock *sk, long timeout) const int tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { - inet_csk_reset_keepalive_timer(sk, + tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); @@ -3345,8 +3366,8 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_probes_out = 0; icsk->icsk_probes_tstamp = 0; icsk->icsk_rto = TCP_TIMEOUT_INIT; - icsk->icsk_rto_min = TCP_RTO_MIN; - icsk->icsk_delack_max = TCP_DELACK_MAX; + WRITE_ONCE(icsk->icsk_rto_min, TCP_RTO_MIN); + WRITE_ONCE(icsk->icsk_delack_max, TCP_DELACK_MAX); tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tcp_snd_cwnd_set(tp, TCP_INIT_CWND); tp->snd_cwnd_cnt = 0; @@ -3512,7 +3533,7 @@ static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, } DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); -EXPORT_SYMBOL(tcp_tx_delay_enabled); +EXPORT_IPV6_MOD(tcp_tx_delay_enabled); static void tcp_enable_tx_delay(void) { @@ -3646,7 +3667,7 @@ int tcp_sock_set_keepidle_locked(struct sock *sk, int val) elapsed = tp->keepalive_time - elapsed; else elapsed = 0; - inet_csk_reset_keepalive_timer(sk, elapsed); + tcp_reset_keepalive_timer(sk, elapsed); } return 0; @@ -3686,32 +3707,32 @@ EXPORT_SYMBOL(tcp_sock_set_keepcnt); int tcp_set_window_clamp(struct sock *sk, int val) { + u32 old_window_clamp, new_window_clamp, new_rcv_ssthresh; struct tcp_sock *tp = tcp_sk(sk); if (!val) { if (sk->sk_state != TCP_CLOSE) return -EINVAL; WRITE_ONCE(tp->window_clamp, 0); - } else { - u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp; - u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ? - SOCK_MIN_RCVBUF / 2 : val; + return 0; + } - if (new_window_clamp == old_window_clamp) - return 0; + old_window_clamp = tp->window_clamp; + new_window_clamp = max_t(int, SOCK_MIN_RCVBUF / 2, val); - WRITE_ONCE(tp->window_clamp, new_window_clamp); - if (new_window_clamp < old_window_clamp) { - /* need to apply the reserved mem provisioning only - * when shrinking the window clamp - */ - __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp); + if (new_window_clamp == old_window_clamp) + return 0; - } else { - new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); - tp->rcv_ssthresh = max(new_rcv_ssthresh, - tp->rcv_ssthresh); - } + WRITE_ONCE(tp->window_clamp, new_window_clamp); + + /* Need to apply the reserved mem provisioning only + * when shrinking the window clamp. + */ + if (new_window_clamp < old_window_clamp) { + __tcp_adjust_rcv_ssthresh(sk, new_window_clamp); + } else { + new_rcv_ssthresh = min(tp->rcv_wnd, new_window_clamp); + tp->rcv_ssthresh = max(new_rcv_ssthresh, tp->rcv_ssthresh); } return 0; } @@ -3821,6 +3842,27 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname, secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ)); return 0; + case TCP_RTO_MAX_MS: + if (val < MSEC_PER_SEC || val > TCP_RTO_MAX_SEC * MSEC_PER_SEC) + return -EINVAL; + WRITE_ONCE(inet_csk(sk)->icsk_rto_max, msecs_to_jiffies(val)); + return 0; + case TCP_RTO_MIN_US: { + int rto_min = usecs_to_jiffies(val); + + if (rto_min > TCP_RTO_MIN || rto_min < TCP_TIMEOUT_MIN) + return -EINVAL; + WRITE_ONCE(inet_csk(sk)->icsk_rto_min, rto_min); + return 0; + } + case TCP_DELACK_MAX_US: { + int delack_max = usecs_to_jiffies(val); + + if (delack_max > TCP_DELACK_MAX || delack_max < TCP_TIMEOUT_MIN) + return -EINVAL; + WRITE_ONCE(inet_csk(sk)->icsk_delack_max, delack_max); + return 0; + } } sockopt_lock_sock(sk); @@ -4050,7 +4092,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, optval, optlen); return do_tcp_setsockopt(sk, level, optname, optval, optlen); } -EXPORT_SYMBOL(tcp_setsockopt); +EXPORT_IPV6_MOD(tcp_setsockopt); static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, struct tcp_info *info) @@ -4126,7 +4168,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; } - if (tp->ecn_flags & TCP_ECN_OK) + if (tcp_ecn_mode_any(tp)) info->tcpi_options |= TCPI_OPT_ECN; if (tp->ecn_flags & TCP_ECN_SEEN) info->tcpi_options |= TCPI_OPT_ECN_SEEN; @@ -4657,6 +4699,15 @@ int do_tcp_getsockopt(struct sock *sk, int level, case TCP_IS_MPTCP: val = 0; break; + case TCP_RTO_MAX_MS: + val = jiffies_to_msecs(tcp_rto_max(sk)); + break; + case TCP_RTO_MIN_US: + val = jiffies_to_usecs(READ_ONCE(inet_csk(sk)->icsk_rto_min)); + break; + case TCP_DELACK_MAX_US: + val = jiffies_to_usecs(READ_ONCE(inet_csk(sk)->icsk_delack_max)); + break; default: return -ENOPROTOOPT; } @@ -4678,7 +4729,7 @@ bool tcp_bpf_bypass_getsockopt(int level, int optname) return false; } -EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt); +EXPORT_IPV6_MOD(tcp_bpf_bypass_getsockopt); int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) @@ -4692,11 +4743,11 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), USER_SOCKPTR(optlen)); } -EXPORT_SYMBOL(tcp_getsockopt); +EXPORT_IPV6_MOD(tcp_getsockopt); #ifdef CONFIG_TCP_MD5SIG int tcp_md5_sigpool_id = -1; -EXPORT_SYMBOL_GPL(tcp_md5_sigpool_id); +EXPORT_IPV6_MOD_GPL(tcp_md5_sigpool_id); int tcp_md5_alloc_sigpool(void) { @@ -4742,7 +4793,7 @@ int tcp_md5_hash_key(struct tcp_sigpool *hp, */ return data_race(crypto_ahash_update(hp->req)); } -EXPORT_SYMBOL(tcp_md5_hash_key); +EXPORT_IPV6_MOD(tcp_md5_hash_key); /* Called with rcu_read_lock() */ static enum skb_drop_reason @@ -4862,7 +4913,7 @@ tcp_inbound_hash(struct sock *sk, const struct request_sock *req, return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family, l3index, md5_location); } -EXPORT_SYMBOL_GPL(tcp_inbound_hash); +EXPORT_IPV6_MOD_GPL(tcp_inbound_hash); void tcp_done(struct sock *sk) { @@ -5011,7 +5062,12 @@ static void __init tcp_struct_check(void) CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh); +#if IS_ENABLED(CONFIG_TLS_DEVICE) + CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tcp_clean_acked); + CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 77); +#else CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69); +#endif /* TX read-write hotpath cache lines */ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out); diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 8a45a4aea9335..03abe0848420d 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -90,7 +90,7 @@ __bpf_kfunc static void dctcp_init(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); - if ((tp->ecn_flags & TCP_ECN_OK) || + if (tcp_ecn_mode_any(tp) || (sk->sk_state == TCP_LISTEN || sk->sk_state == TCP_CLOSE)) { struct dctcp *ca = inet_csk_ca(sk); diff --git a/net/ipv4/tcp_dctcp.h b/net/ipv4/tcp_dctcp.h index d69a77cbd0c75..4b0259111d816 100644 --- a/net/ipv4/tcp_dctcp.h +++ b/net/ipv4/tcp_dctcp.h @@ -28,7 +28,7 @@ static inline void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt, */ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { dctcp_ece_ack_cwr(sk, *ce_state); - __tcp_send_ack(sk, *prior_rcv_nxt); + __tcp_send_ack(sk, *prior_rcv_nxt, 0); } inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; } diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index f428ecf9120f2..45e174b8cd221 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -83,7 +83,7 @@ static int tcp_diag_put_md5sig(struct sk_buff *skb, #endif static int tcp_diag_put_ulp(struct sk_buff *skb, struct sock *sk, - const struct tcp_ulp_ops *ulp_ops) + const struct tcp_ulp_ops *ulp_ops, bool net_admin) { struct nlattr *nest; int err; @@ -97,7 +97,7 @@ static int tcp_diag_put_ulp(struct sk_buff *skb, struct sock *sk, goto nla_failure; if (ulp_ops->get_info) - err = ulp_ops->get_info(sk, skb); + err = ulp_ops->get_info(sk, skb, net_admin); if (err) goto nla_failure; @@ -113,6 +113,7 @@ static int tcp_diag_get_aux(struct sock *sk, bool net_admin, struct sk_buff *skb) { struct inet_connection_sock *icsk = inet_csk(sk); + const struct tcp_ulp_ops *ulp_ops; int err = 0; #ifdef CONFIG_TCP_MD5SIG @@ -129,15 +130,13 @@ static int tcp_diag_get_aux(struct sock *sk, bool net_admin, } #endif - if (net_admin) { - const struct tcp_ulp_ops *ulp_ops; - - ulp_ops = icsk->icsk_ulp_ops; - if (ulp_ops) - err = tcp_diag_put_ulp(skb, sk, ulp_ops); - if (err) + ulp_ops = icsk->icsk_ulp_ops; + if (ulp_ops) { + err = tcp_diag_put_ulp(skb, sk, ulp_ops, net_admin); + if (err < 0) return err; } + return 0; } @@ -164,7 +163,7 @@ static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin) } #endif - if (net_admin && sk_fullsock(sk)) { + if (sk_fullsock(sk)) { const struct tcp_ulp_ops *ulp_ops; ulp_ops = icsk->icsk_ulp_ops; @@ -172,7 +171,7 @@ static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin) size += nla_total_size(0) + nla_total_size(TCP_ULP_NAME_MAX); if (ulp_ops->get_info_size) - size += ulp_ops->get_info_size(sk); + size += ulp_ops->get_info_size(sk, net_admin); } } return size; diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 32b28fc21b63c..ca40665145c69 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -189,7 +189,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) tcp_segs_in(tp, skb); __skb_pull(skb, tcp_hdrlen(skb)); sk_forced_mem_schedule(sk, skb->truesize); - skb_set_owner_r(skb, sk); + tcp_skb_set_owner_r(skb, sk); TCP_SKB_CB(skb)->seq++; TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; @@ -274,8 +274,8 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, * because it's been added to the accept queue directly. */ req->timeout = tcp_timeout_init(child); - inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS, - req->timeout, TCP_RTO_MAX); + tcp_reset_xmit_timer(child, ICSK_TIME_RETRANS, + req->timeout, false); refcount_set(&req->rsk_refcnt, 2); @@ -468,7 +468,7 @@ bool tcp_fastopen_defer_connect(struct sock *sk, int *err) } return false; } -EXPORT_SYMBOL(tcp_fastopen_defer_connect); +EXPORT_IPV6_MOD(tcp_fastopen_defer_connect); /* * The following code block is to deal with middle box issues with TFO: diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0cbf81bf3d451..e1f952fbac48d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -102,6 +102,7 @@ int sysctl_tcp_max_orphans __read_mostly = NR_FILE; #define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ #define FLAG_ACK_MAYBE_DELAYED 0x10000 /* Likely a delayed ACK */ #define FLAG_DSACK_TLP 0x20000 /* DSACK for tail loss probe */ +#define FLAG_TS_PROGRESS 0x40000 /* Positive timestamp delta */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) @@ -118,18 +119,18 @@ int sysctl_tcp_max_orphans __read_mostly = NR_FILE; #if IS_ENABLED(CONFIG_TLS_DEVICE) static DEFINE_STATIC_KEY_DEFERRED_FALSE(clean_acked_data_enabled, HZ); -void clean_acked_data_enable(struct inet_connection_sock *icsk, +void clean_acked_data_enable(struct tcp_sock *tp, void (*cad)(struct sock *sk, u32 ack_seq)) { - icsk->icsk_clean_acked = cad; + tp->tcp_clean_acked = cad; static_branch_deferred_inc(&clean_acked_data_enabled); } EXPORT_SYMBOL_GPL(clean_acked_data_enable); -void clean_acked_data_disable(struct inet_connection_sock *icsk) +void clean_acked_data_disable(struct tcp_sock *tp) { static_branch_slow_dec_deferred(&clean_acked_data_enabled); - icsk->icsk_clean_acked = NULL; + tp->tcp_clean_acked = NULL; } EXPORT_SYMBOL_GPL(clean_acked_data_disable); @@ -169,6 +170,7 @@ static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb) memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB; sock_ops.is_fullsock = 1; + sock_ops.is_locked_tcp_sock = 1; sock_ops.sk = sk; bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb)); @@ -185,6 +187,7 @@ static void bpf_skops_established(struct sock *sk, int bpf_op, memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); sock_ops.op = bpf_op; sock_ops.is_fullsock = 1; + sock_ops.is_locked_tcp_sock = 1; sock_ops.sk = sk; /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */ if (skb) @@ -331,15 +334,14 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) static bool tcp_in_quickack_mode(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); - const struct dst_entry *dst = __sk_dst_get(sk); - return (dst && dst_metric(dst, RTAX_QUICKACK)) || + return icsk->icsk_ack.dst_quick_ack || (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk)); } static void tcp_ecn_queue_cwr(struct tcp_sock *tp) { - if (tp->ecn_flags & TCP_ECN_OK) + if (tcp_ecn_mode_rfc3168(tp)) tp->ecn_flags |= TCP_ECN_QUEUE_CWR; } @@ -362,10 +364,13 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; } -static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) +static void tcp_data_ecn_check(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); + if (tcp_ecn_disabled(tp)) + return; + switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { case INET_ECN_NOT_ECT: /* Funny extension: if ECT is not set on a segment, @@ -394,31 +399,39 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) } } -static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) -{ - if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK) - __tcp_ecn_check_ce(sk, skb); -} - static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) { - if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) - tp->ecn_flags &= ~TCP_ECN_OK; + if (tcp_ecn_mode_rfc3168(tp) && (!th->ece || th->cwr)) + tcp_ecn_mode_set(tp, TCP_ECN_DISABLED); } static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) { - if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) - tp->ecn_flags &= ~TCP_ECN_OK; + if (tcp_ecn_mode_rfc3168(tp) && (!th->ece || !th->cwr)) + tcp_ecn_mode_set(tp, TCP_ECN_DISABLED); } static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) { - if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) + if (th->ece && !th->syn && tcp_ecn_mode_rfc3168(tp)) return true; return false; } +static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count) +{ + tp->delivered_ce += ecn_count; +} + +/* Updates the delivered and delivered_ce counts */ +static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, + bool ece_ack) +{ + tp->delivered += delivered; + if (ece_ack) + tcp_count_delivered_ce(tp, delivered); +} + /* Buffer size and advertised window tuning. * * 1. Tuning sk->sk_sndbuf, when connection enters established state. @@ -636,7 +649,7 @@ void tcp_initialize_rcv_mss(struct sock *sk) inet_csk(sk)->icsk_ack.rcv_mss = hint; } -EXPORT_SYMBOL(tcp_initialize_rcv_mss); +EXPORT_IPV6_MOD(tcp_initialize_rcv_mss); /* Receiver "autotuning" code. * @@ -857,7 +870,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) icsk->icsk_ack.lrcvtime = now; tcp_save_lrcv_flowlabel(sk, skb); - tcp_ecn_check_ce(sk, skb); + tcp_data_ecn_check(sk, skb); if (skb->len >= 128) tcp_grow_window(sk, skb, true); @@ -1154,15 +1167,6 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb) } } -/* Updates the delivered and delivered_ce counts */ -static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, - bool ece_ack) -{ - tp->delivered += delivered; - if (ece_ack) - tp->delivered_ce += delivered; -} - /* This procedure tags the retransmission queue when SACKs arrive. * * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). @@ -2258,8 +2262,7 @@ static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag) unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), msecs_to_jiffies(10)); - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - delay, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, false); *ack_flag &= ~FLAG_SET_XMIT_TIMER; return true; } @@ -2716,6 +2719,8 @@ void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) return; + trace_tcp_cwnd_reduction_tp(sk, newly_acked_sacked, newly_lost, flag); + tp->prr_delivered += newly_acked_sacked; if (delta < 0) { u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + @@ -2898,7 +2903,7 @@ void tcp_simple_retransmit(struct sock *sk) */ tcp_non_congestion_loss_retransmit(sk); } -EXPORT_SYMBOL(tcp_simple_retransmit); +EXPORT_IPV6_MOD(tcp_simple_retransmit); void tcp_enter_recovery(struct sock *sk, bool ece_ack) { @@ -3288,8 +3293,7 @@ void tcp_rearm_rto(struct sock *sk) */ rto = usecs_to_jiffies(max_t(int, delta_us, 1)); } - tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, - TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, true); } } @@ -3566,10 +3570,10 @@ static void tcp_ack_probe(struct sock *sk) * This function is not for random using! */ } else { - unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); + unsigned long when = tcp_probe0_when(sk, tcp_rto_max(sk)); when = tcp_clamp_probe0_to_user_timeout(sk, when); - tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, true); } } @@ -3814,8 +3818,16 @@ static void tcp_store_ts_recent(struct tcp_sock *tp) tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); } -static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) +static int __tcp_replace_ts_recent(struct tcp_sock *tp, s32 tstamp_delta) +{ + tcp_store_ts_recent(tp); + return tstamp_delta > 0 ? FLAG_TS_PROGRESS : 0; +} + +static int tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) { + s32 delta; + if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { /* PAWS bug workaround wrt. ACK frames, the PAWS discard * extra check below makes sure this can only happen @@ -3824,9 +3836,13 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) * Not only, also it occurs for expired timestamps. */ - if (tcp_paws_check(&tp->rx_opt, 0)) - tcp_store_ts_recent(tp); + if (tcp_paws_check(&tp->rx_opt, 0)) { + delta = tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent; + return __tcp_replace_ts_recent(tp, delta); + } } + + return 0; } /* This routine deals with acks during a TLP episode and ends an episode by @@ -3862,12 +3878,23 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) } } -static inline void tcp_in_ack_event(struct sock *sk, u32 flags) +static void tcp_in_ack_event(struct sock *sk, int flag) { const struct inet_connection_sock *icsk = inet_csk(sk); - if (icsk->icsk_ca_ops->in_ack_event) - icsk->icsk_ca_ops->in_ack_event(sk, flags); + if (icsk->icsk_ca_ops->in_ack_event) { + u32 ack_ev_flags = 0; + + if (flag & FLAG_WIN_UPDATE) + ack_ev_flags |= CA_ACK_WIN_UPDATE; + if (flag & FLAG_SLOWPATH) { + ack_ev_flags |= CA_ACK_SLOWPATH; + if (flag & FLAG_ECE) + ack_ev_flags |= CA_ACK_ECE; + } + + icsk->icsk_ca_ops->in_ack_event(sk, ack_ev_flags); + } } /* Congestion control has updated the cwnd already. So if we're in @@ -3960,8 +3987,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) #if IS_ENABLED(CONFIG_TLS_DEVICE) if (static_branch_unlikely(&clean_acked_data_enabled.key)) - if (icsk->icsk_clean_acked) - icsk->icsk_clean_acked(sk, ack); + if (tp->tcp_clean_acked) + tp->tcp_clean_acked(sk, ack); #endif } @@ -3972,7 +3999,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) * is in window. */ if (flag & FLAG_UPDATE_TS_RECENT) - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + flag |= tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) == FLAG_SND_UNA_ADVANCED) { @@ -3984,12 +4011,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_snd_una_update(tp, ack); flag |= FLAG_WIN_UPDATE; - tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { - u32 ack_ev_flags = CA_ACK_SLOWPATH; - if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else @@ -4001,19 +4024,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, &sack_state); - if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { + if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) flag |= FLAG_ECE; - ack_ev_flags |= CA_ACK_ECE; - } if (sack_state.sack_delivered) tcp_count_delivered(tp, sack_state.sack_delivered, flag & FLAG_ECE); - - if (flag & FLAG_WIN_UPDATE) - ack_ev_flags |= CA_ACK_WIN_UPDATE; - - tcp_in_ack_event(sk, ack_ev_flags); } /* This is a deviation from RFC3168 since it states that: @@ -4040,6 +4056,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_rack_update_reo_wnd(sk, &rs); + tcp_in_ack_event(sk, flag); + if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); @@ -4071,6 +4089,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) return 1; no_queue: + tcp_in_ack_event(sk, flag); /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) { tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag, @@ -4180,7 +4199,6 @@ u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss) } return mss; } -EXPORT_SYMBOL_GPL(tcp_parse_mss_option); /* Look for tcp options. Normally only called on SYN and SYNACK packets. * But, this can also be called on packets in the established flow when @@ -4530,7 +4548,7 @@ void tcp_done_with_error(struct sock *sk, int err) if (!sock_flag(sk, SOCK_DEAD)) sk_error_report(sk); } -EXPORT_SYMBOL(tcp_done_with_error); +EXPORT_IPV6_MOD(tcp_done_with_error); /* When we get a reset we do this. */ void tcp_reset(struct sock *sk, struct sk_buff *skb) @@ -5019,7 +5037,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) bool fragstolen; tcp_save_lrcv_flowlabel(sk, skb); - tcp_ecn_check_ce(sk, skb); + tcp_data_ecn_check(sk, skb); if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); @@ -5153,7 +5171,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) if (tcp_is_sack(tp)) tcp_grow_window(sk, skb, false); skb_condense(skb); - skb_set_owner_r(skb, sk); + tcp_skb_set_owner_r(skb, sk); } } @@ -5169,7 +5187,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); if (!eaten) { tcp_add_receive_queue(sk, skb); - skb_set_owner_r(skb, sk); + tcp_skb_set_owner_r(skb, sk); } return eaten; } @@ -5486,7 +5504,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root, __skb_queue_before(list, skb, nskb); else __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */ - skb_set_owner_r(nskb, sk); + tcp_skb_set_owner_r(nskb, sk); mptcp_skb_ext_move(nskb, skb); /* Copy data, releasing collapsed skbs. */ @@ -6156,6 +6174,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) TCP_SKB_CB(skb)->seq == tp->rcv_nxt && !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { int tcp_header_len = tp->tcp_header_len; + s32 delta = 0; + int flag = 0; /* Timestamp header prediction: tcp_header_len * is automatically equal to th->doff*4 due to pred_flags @@ -6168,8 +6188,10 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) if (!tcp_parse_aligned_timestamp(tp, th)) goto slow_path; + delta = tp->rx_opt.rcv_tsval - + tp->rx_opt.ts_recent; /* If PAWS failed, check it more carefully in slow path */ - if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) + if (delta < 0) goto slow_path; /* DO NOT update ts_recent here, if checksum fails @@ -6189,12 +6211,13 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) - tcp_store_ts_recent(tp); + flag |= __tcp_replace_ts_recent(tp, + delta); /* We know that such packets are checksummed * on entry. */ - tcp_ack(sk, skb, 0); + tcp_ack(sk, skb, flag); __kfree_skb(skb); tcp_data_snd_check(sk); /* When receiving pure ack in fast path, update @@ -6225,7 +6248,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) if (tcp_header_len == (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && tp->rcv_nxt == tp->rcv_wup) - tcp_store_ts_recent(tp); + flag |= __tcp_replace_ts_recent(tp, + delta); tcp_rcv_rtt_measure_ts(sk, skb); @@ -6240,7 +6264,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { /* Well, only one small jumplet in fast path... */ - tcp_ack(sk, skb, FLAG_DATA); + tcp_ack(sk, skb, flag | FLAG_DATA); tcp_data_snd_check(sk); if (!inet_csk_ack_scheduled(sk)) goto no_ack; @@ -6300,7 +6324,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) discard: tcp_drop_reason(sk, skb, reason); } -EXPORT_SYMBOL(tcp_rcv_established); +EXPORT_IPV6_MOD(tcp_rcv_established); void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb) { @@ -6353,7 +6377,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) tp->lsndtime = tcp_jiffies32; if (sock_flag(sk, SOCK_KEEPOPEN)) - inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); + tcp_reset_keepalive_timer(sk, keepalive_time_when(tp)); if (!tp->rx_opt.snd_wscale) __tcp_fast_path_on(tp, tp->snd_wnd); @@ -6476,9 +6500,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { /* Previous FIN/ACK or RST/ACK might be ignored. */ if (icsk->icsk_retransmits == 0) - inet_csk_reset_xmit_timer(sk, - ICSK_TIME_RETRANS, - TCP_TIMEOUT_MIN, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, + TCP_TIMEOUT_MIN, false); SKB_DR_SET(reason, TCP_INVALID_ACK_SEQUENCE); goto reset_and_undo; } @@ -6593,8 +6616,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, */ inet_csk_schedule_ack(sk); tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, - TCP_DELACK_MAX, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_DACK, + TCP_DELACK_MAX, false); goto consume; } tcp_send_ack(sk); @@ -6812,10 +6835,9 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && sk->sk_state != TCP_FIN_WAIT1); - if (!tcp_check_req(sk, skb, req, true, &req_stolen)) { - SKB_DR_SET(reason, TCP_FASTOPEN); + SKB_DR_SET(reason, TCP_FASTOPEN); + if (!tcp_check_req(sk, skb, req, true, &req_stolen, &reason)) goto discard; - } } if (!th->ack && !th->rst && !th->syn) { @@ -6928,7 +6950,7 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { - inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); + tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else if (th->fin || sock_owned_by_user(sk)) { /* Bad case. We could lose such FIN otherwise. * It is not a big problem, but it looks confusing @@ -6936,7 +6958,7 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) * if it spins in bh_lock_sock(), but it is really * marginal case. */ - inet_csk_reset_keepalive_timer(sk, tmo); + tcp_reset_keepalive_timer(sk, tmo); } else { tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); goto consume; @@ -7014,7 +7036,7 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); return 0; } -EXPORT_SYMBOL(tcp_rcv_state_process); +EXPORT_IPV6_MOD(tcp_rcv_state_process); static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) { @@ -7081,6 +7103,7 @@ static void tcp_openreq_init(struct request_sock *req, tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tcp_rsk(req)->snt_synack = 0; + tcp_rsk(req)->snt_tsval_first = 0; tcp_rsk(req)->last_oow_ack_time = 0; req->mss = rx_opt->mss_clamp; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; @@ -7196,7 +7219,7 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, return mss; } -EXPORT_SYMBOL_GPL(tcp_get_syncookie_mss); +EXPORT_IPV6_MOD_GPL(tcp_get_syncookie_mss); int tcp_conn_request(struct request_sock_ops *rsk_ops, const struct tcp_request_sock_ops *af_ops, @@ -7377,4 +7400,4 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, tcp_listendrop(sk); return 0; } -EXPORT_SYMBOL(tcp_conn_request); +EXPORT_IPV6_MOD(tcp_conn_request); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 2632844d2c356..8cce0d5489dae 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -66,6 +66,7 @@ #include #include #include +#include #include #include #include @@ -92,7 +93,6 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, #endif struct inet_hashinfo tcp_hashinfo; -EXPORT_SYMBOL(tcp_hashinfo); static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = { .bh_lock = INIT_LOCAL_LOCK(bh_lock), @@ -199,7 +199,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) return 0; } -EXPORT_SYMBOL_GPL(tcp_twsk_unique); +EXPORT_IPV6_MOD_GPL(tcp_twsk_unique); static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) @@ -359,7 +359,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_dport = 0; return err; } -EXPORT_SYMBOL(tcp_v4_connect); +EXPORT_IPV6_MOD(tcp_v4_connect); /* * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191. @@ -400,7 +400,7 @@ void tcp_v4_mtu_reduced(struct sock *sk) tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ } -EXPORT_SYMBOL(tcp_v4_mtu_reduced); +EXPORT_IPV6_MOD(tcp_v4_mtu_reduced); static void do_redirect(struct sk_buff *skb, struct sock *sk) { @@ -434,7 +434,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort) } reqsk_put(req); } -EXPORT_SYMBOL(tcp_req_err); +EXPORT_IPV6_MOD(tcp_req_err); /* TCP-LD (RFC 6069) logic */ void tcp_ld_RTO_revert(struct sock *sk, u32 seq) @@ -458,15 +458,14 @@ void tcp_ld_RTO_revert(struct sock *sk, u32 seq) icsk->icsk_backoff--; icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT; - icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); + icsk->icsk_rto = inet_csk_rto_backoff(icsk, tcp_rto_max(sk)); tcp_mstamp_refresh(tp); delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us); if (remaining > 0) { - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - remaining, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, false); } else { /* RTO revert clocked out retransmission. * Will retransmit now. @@ -474,7 +473,7 @@ void tcp_ld_RTO_revert(struct sock *sk, u32 seq) tcp_retransmit_timer(sk); } } -EXPORT_SYMBOL(tcp_ld_RTO_revert); +EXPORT_IPV6_MOD(tcp_ld_RTO_revert); /* * This routine is called by the ICMP module when it gets some @@ -496,14 +495,14 @@ int tcp_v4_err(struct sk_buff *skb, u32 info) { const struct iphdr *iph = (const struct iphdr *)skb->data; struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); - struct tcp_sock *tp; + struct net *net = dev_net_rcu(skb->dev); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; - struct sock *sk; struct request_sock *fastopen; + struct tcp_sock *tp; u32 seq, snd_una; + struct sock *sk; int err; - struct net *net = dev_net(skb->dev); sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, iph->daddr, th->dest, iph->saddr, @@ -676,7 +675,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); } -EXPORT_SYMBOL(tcp_v4_send_check); +EXPORT_IPV6_MOD(tcp_v4_send_check); #define REPLY_OPTIONS_LEN (MAX_TCP_OPTION_SPACE / sizeof(__be32)) @@ -788,7 +787,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb, arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); - net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); + net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev); /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(tcp_hdr(skb), &md5_hash_location, &aoh)) @@ -889,7 +888,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb, BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) != offsetof(struct inet_timewait_sock, tw_bound_dev_if)); - arg.tos = ip_hdr(skb)->tos; + /* ECN bits of TW reset are cleared */ + arg.tos = ip_hdr(skb)->tos & ~INET_ECN_MASK; arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); local_bh_disable(); local_lock_nested_bh(&ipv4_tcp_sk.bh_lock); @@ -1035,11 +1035,21 @@ static void tcp_v4_send_ack(const struct sock *sk, local_bh_enable(); } -static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) +static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb, + enum tcp_tw_status tw_status) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); struct tcp_key key = {}; + u8 tos = tw->tw_tos; + + /* Cleaning only ECN bits of TW ACKs of oow data or is paws_reject, + * while not cleaning ECN bits of other TW ACKs to avoid these ACKs + * being placed in a different service queues (Classic rather than L4S) + */ + if (tw_status == TCP_TW_ACK_OOW) + tos &= ~INET_ECN_MASK; + #ifdef CONFIG_TCP_AO struct tcp_ao_info *ao_info; @@ -1083,7 +1093,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if, &key, tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, - tw->tw_tos, + tos, tw->tw_txhash); inet_twsk_put(tw); @@ -1153,14 +1163,15 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, key.type = TCP_KEY_MD5; } + /* Cleaning ECN bits of TW ACKs of oow data or is paws_reject */ tcp_v4_send_ack(sk, skb, seq, tcp_rsk(req)->rcv_nxt, tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale, tcp_rsk_tsval(tcp_rsk(req)), - READ_ONCE(req->ts_recent), + req->ts_recent, 0, &key, inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, - ip_hdr(skb)->tos, + ip_hdr(skb)->tos & ~INET_ECN_MASK, READ_ONCE(tcp_rsk(req)->txhash)); if (tcp_key_is_ao(&key)) kfree(key.traffic_key); @@ -1231,7 +1242,7 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req) */ DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_md5_needed, HZ); -EXPORT_SYMBOL(tcp_md5_needed); +EXPORT_IPV6_MOD(tcp_md5_needed); static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new) { @@ -1290,7 +1301,7 @@ struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index, } return best_match; } -EXPORT_SYMBOL(__tcp_md5_do_lookup); +EXPORT_IPV6_MOD(__tcp_md5_do_lookup); static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk, const union tcp_md5_addr *addr, @@ -1337,7 +1348,7 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr; return tcp_md5_do_lookup(sk, l3index, addr, AF_INET); } -EXPORT_SYMBOL(tcp_v4_md5_lookup); +EXPORT_IPV6_MOD(tcp_v4_md5_lookup); static int tcp_md5sig_info_add(struct sock *sk, gfp_t gfp) { @@ -1433,7 +1444,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, return __tcp_md5_do_add(sk, addr, family, prefixlen, l3index, flags, newkey, newkeylen, GFP_KERNEL); } -EXPORT_SYMBOL(tcp_md5_do_add); +EXPORT_IPV6_MOD(tcp_md5_do_add); int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr, int family, u8 prefixlen, int l3index, @@ -1465,7 +1476,7 @@ int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr, key->flags, key->key, key->keylen, sk_gfp_mask(sk, GFP_ATOMIC)); } -EXPORT_SYMBOL(tcp_md5_key_copy); +EXPORT_IPV6_MOD(tcp_md5_key_copy); int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family, u8 prefixlen, int l3index, u8 flags) @@ -1480,7 +1491,7 @@ int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family, kfree_rcu(key, rcu); return 0; } -EXPORT_SYMBOL(tcp_md5_do_del); +EXPORT_IPV6_MOD(tcp_md5_do_del); void tcp_clear_md5_list(struct sock *sk) { @@ -1659,7 +1670,7 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, memset(md5_hash, 0, 16); return 1; } -EXPORT_SYMBOL(tcp_v4_md5_hash_skb); +EXPORT_IPV6_MOD(tcp_v4_md5_hash_skb); #endif @@ -1732,7 +1743,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) tcp_listendrop(sk); return 0; } -EXPORT_SYMBOL(tcp_v4_conn_request); +EXPORT_IPV6_MOD(tcp_v4_conn_request); /* @@ -1770,10 +1781,6 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, newtp = tcp_sk(newsk); newinet = inet_sk(newsk); ireq = inet_rsk(req); - sk_daddr_set(newsk, ireq->ir_rmt_addr); - sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); - newsk->sk_bound_dev_if = ireq->ir_iif; - newinet->inet_saddr = ireq->ir_loc_addr; inet_opt = rcu_dereference(ireq->ireq_opt); RCU_INIT_POINTER(newinet->inet_opt, inet_opt); newinet->mc_index = inet_iif(skb); @@ -1856,7 +1863,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, tcp_done(newsk); goto exit; } -EXPORT_SYMBOL(tcp_v4_syn_recv_sock); +EXPORT_IPV6_MOD(tcp_v4_syn_recv_sock); static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) { @@ -1967,7 +1974,7 @@ EXPORT_SYMBOL(tcp_v4_do_rcv); int tcp_v4_early_demux(struct sk_buff *skb) { - struct net *net = dev_net(skb->dev); + struct net *net = dev_net_rcu(skb->dev); const struct iphdr *iph; const struct tcphdr *th; struct sock *sk; @@ -2057,7 +2064,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, !((TCP_SKB_CB(tail)->tcp_flags & TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) || ((TCP_SKB_CB(tail)->tcp_flags ^ - TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) || + TCP_SKB_CB(skb)->tcp_flags) & + (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)) || !tcp_skb_can_collapse_rx(tail, skb) || thtail->doff != th->doff || memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th))) @@ -2135,7 +2143,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, } return false; } -EXPORT_SYMBOL(tcp_add_backlog); +EXPORT_IPV6_MOD(tcp_add_backlog); int tcp_filter(struct sock *sk, struct sk_buff *skb) { @@ -2143,7 +2151,7 @@ int tcp_filter(struct sock *sk, struct sk_buff *skb) return sk_filter_trim_cap(sk, skb, th->doff * 4); } -EXPORT_SYMBOL(tcp_filter); +EXPORT_IPV6_MOD(tcp_filter); static void tcp_v4_restore_cb(struct sk_buff *skb) { @@ -2165,7 +2173,7 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); - TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); + TCP_SKB_CB(skb)->tcp_flags = tcp_flags_ntohs(th); TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->has_rxtstamp = @@ -2178,8 +2186,9 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, int tcp_v4_rcv(struct sk_buff *skb) { - struct net *net = dev_net(skb->dev); + struct net *net = dev_net_rcu(skb->dev); enum skb_drop_reason drop_reason; + enum tcp_tw_status tw_status; int sdif = inet_sdif(skb); int dif = inet_iif(skb); const struct iphdr *iph; @@ -2271,7 +2280,8 @@ int tcp_v4_rcv(struct sk_buff *skb) th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); tcp_v4_fill_cb(skb, iph, th); - nsk = tcp_check_req(sk, skb, req, false, &req_stolen); + nsk = tcp_check_req(sk, skb, req, false, &req_stolen, + &drop_reason); } else { drop_reason = SKB_DROP_REASON_SOCKET_FILTER; } @@ -2406,7 +2416,9 @@ int tcp_v4_rcv(struct sk_buff *skb) inet_twsk_put(inet_twsk(sk)); goto csum_error; } - switch (tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn)) { + + tw_status = tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn); + switch (tw_status) { case TCP_TW_SYN: { struct sock *sk2 = inet_lookup_listener(net, net->ipv4.tcp_death_row.hashinfo, @@ -2427,7 +2439,8 @@ int tcp_v4_rcv(struct sk_buff *skb) /* to ACK */ fallthrough; case TCP_TW_ACK: - tcp_v4_timewait_ack(sk, skb); + case TCP_TW_ACK_OOW: + tcp_v4_timewait_ack(sk, skb, tw_status); break; case TCP_TW_RST: tcp_v4_send_reset(sk, skb, SK_RST_REASON_TCP_TIMEWAIT_SOCKET); @@ -2452,7 +2465,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) sk->sk_rx_dst_ifindex = skb->skb_iif; } } -EXPORT_SYMBOL(inet_sk_rx_dst_set); +EXPORT_IPV6_MOD(inet_sk_rx_dst_set); const struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, @@ -2464,11 +2477,9 @@ const struct inet_connection_sock_af_ops ipv4_specific = { .net_header_len = sizeof(struct iphdr), .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, - .addr2sockaddr = inet_csk_addr2sockaddr, - .sockaddr_len = sizeof(struct sockaddr_in), .mtu_reduced = tcp_v4_mtu_reduced, }; -EXPORT_SYMBOL(ipv4_specific); +EXPORT_IPV6_MOD(ipv4_specific); #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { @@ -2578,7 +2589,7 @@ void tcp_v4_destroy_sock(struct sock *sk) sk_sockets_allocated_dec(sk); } -EXPORT_SYMBOL(tcp_v4_destroy_sock); +EXPORT_IPV6_MOD(tcp_v4_destroy_sock); #ifdef CONFIG_PROC_FS /* Proc filesystem TCP sock list dumping. */ @@ -2814,7 +2825,7 @@ void *tcp_seq_start(struct seq_file *seq, loff_t *pos) st->last_pos = *pos; return rc; } -EXPORT_SYMBOL(tcp_seq_start); +EXPORT_IPV6_MOD(tcp_seq_start); void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { @@ -2845,7 +2856,7 @@ void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) st->last_pos = *pos; return rc; } -EXPORT_SYMBOL(tcp_seq_next); +EXPORT_IPV6_MOD(tcp_seq_next); void tcp_seq_stop(struct seq_file *seq, void *v) { @@ -2863,7 +2874,7 @@ void tcp_seq_stop(struct seq_file *seq, void *v) break; } } -EXPORT_SYMBOL(tcp_seq_stop); +EXPORT_IPV6_MOD(tcp_seq_stop); static void get_openreq4(const struct request_sock *req, struct seq_file *f, int i) @@ -2912,10 +2923,10 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk_timeout(icsk); } else if (icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk_timeout(icsk); } else if (timer_pending(&sk->sk_timer)) { timer_active = 2; timer_expires = sk->sk_timer.expires; @@ -3533,6 +3544,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_pingpong_thresh = 1; net->ipv4.sysctl_tcp_rto_min_us = jiffies_to_usecs(TCP_RTO_MIN); + net->ipv4.sysctl_tcp_rto_max_ms = TCP_RTO_MAX_SEC * MSEC_PER_SEC; return 0; } diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 95669935494ef..4251670e328c8 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -170,7 +170,7 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, bool reclaim = false; spin_lock_bh(&tcp_metrics_lock); - net = dev_net(dst->dev); + net = dev_net_rcu(dst->dev); /* While waiting for the spin-lock the cache might have been populated * with this entry and so we have to check again. @@ -273,7 +273,7 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req, return NULL; } - net = dev_net(dst->dev); + net = dev_net_rcu(dst->dev); hash ^= net_hash_mix(net); hash = hash_32(hash, tcp_metrics_hash_log); @@ -318,7 +318,7 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, else return NULL; - net = dev_net(dst->dev); + net = dev_net_rcu(dst->dev); hash ^= net_hash_mix(net); hash = hash_32(hash, tcp_metrics_hash_log); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index b089b08e96178..fb9349be36b80 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -44,7 +44,7 @@ tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, /* Send ACK. Note, we do not put the bucket, * it will be released by caller. */ - return TCP_TW_ACK; + return TCP_TW_ACK_OOW; } /* We are rate-limiting, so just release the tw sock and drop skb. */ @@ -264,7 +264,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, inet_twsk_put(tw); return TCP_TW_SUCCESS; } -EXPORT_SYMBOL(tcp_timewait_state_process); +EXPORT_IPV6_MOD(tcp_timewait_state_process); static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw) { @@ -398,7 +398,7 @@ void tcp_twsk_destructor(struct sock *sk) #endif tcp_ao_destroy_sock(sk, true); } -EXPORT_SYMBOL_GPL(tcp_twsk_destructor); +EXPORT_IPV6_MOD_GPL(tcp_twsk_destructor); void tcp_twsk_purge(struct list_head *net_exit_list) { @@ -457,12 +457,13 @@ void tcp_openreq_init_rwin(struct request_sock *req, rcv_wnd); ireq->rcv_wscale = rcv_wscale; } -EXPORT_SYMBOL(tcp_openreq_init_rwin); static void tcp_ecn_openreq_child(struct tcp_sock *tp, const struct request_sock *req) { - tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; + tcp_ecn_mode_set(tp, inet_rsk(req)->ecn_ok ? + TCP_ECN_MODE_RFC3168 : + TCP_ECN_DISABLED); } void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) @@ -492,7 +493,7 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) tcp_set_ca_state(sk, TCP_CA_Open); } -EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); +EXPORT_IPV6_MOD_GPL(tcp_ca_openreq_child); static void smc_check_reset_syn_req(const struct tcp_sock *oldtp, struct request_sock *req, @@ -566,8 +567,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1); if (sock_flag(newsk, SOCK_KEEPOPEN)) - inet_csk_reset_keepalive_timer(newsk, - keepalive_time_when(newtp)); + tcp_reset_keepalive_timer(newsk, keepalive_time_when(newtp)); newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; newtp->rx_opt.sack_ok = ireq->sack_ok; @@ -587,7 +587,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, if (newtp->rx_opt.tstamp_ok) { newtp->tcp_usec_ts = treq->req_usec_ts; - newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent); + newtp->rx_opt.ts_recent = req->ts_recent; newtp->rx_opt.ts_recent_stamp = ktime_get_seconds(); newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; } else { @@ -659,12 +659,14 @@ EXPORT_SYMBOL(tcp_create_openreq_child); struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, struct request_sock *req, - bool fastopen, bool *req_stolen) + bool fastopen, bool *req_stolen, + enum skb_drop_reason *drop_reason) { struct tcp_options_received tmp_opt; struct sock *child; const struct tcphdr *th = tcp_hdr(skb); __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); + bool tsecr_reject = false; bool paws_reject = false; bool own_req; @@ -673,9 +675,14 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); if (tmp_opt.saw_tstamp) { - tmp_opt.ts_recent = READ_ONCE(req->ts_recent); - if (tmp_opt.rcv_tsecr) + tmp_opt.ts_recent = req->ts_recent; + if (tmp_opt.rcv_tsecr) { + if (inet_rsk(req)->tstamp_ok && !fastopen) + tsecr_reject = !between(tmp_opt.rcv_tsecr, + tcp_rsk(req)->snt_tsval_first, + READ_ONCE(tcp_rsk(req)->snt_tsval_last)); tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; + } /* We do not store true stamp, but it is not required, * it can be estimated (approximately) * from another data. @@ -790,37 +797,34 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, tcp_rsk(req)->snt_isn + 1)) return sk; - /* Also, it would be not so bad idea to check rcv_tsecr, which - * is essentially ACK extension and too early or too late values - * should cause reset in unsynchronized states. - */ - /* RFC793: "first check sequence number". */ - if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, - TCP_SKB_CB(skb)->end_seq, - tcp_rsk(req)->rcv_nxt, - tcp_rsk(req)->rcv_nxt + - tcp_synack_window(req))) { + if (paws_reject || tsecr_reject || + !tcp_in_window(TCP_SKB_CB(skb)->seq, + TCP_SKB_CB(skb)->end_seq, + tcp_rsk(req)->rcv_nxt, + tcp_rsk(req)->rcv_nxt + + tcp_synack_window(req))) { /* Out of window: send ACK and drop. */ if (!(flg & TCP_FLAG_RST) && !tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDSYNRECV, &tcp_rsk(req)->last_oow_ack_time)) req->rsk_ops->send_ack(sk, skb, req); - if (paws_reject) + if (paws_reject) { + SKB_DR_SET(*drop_reason, TCP_RFC7323_PAWS); NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); + } else if (tsecr_reject) { + SKB_DR_SET(*drop_reason, TCP_RFC7323_TSECR); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TSECRREJECTED); + } else { + SKB_DR_SET(*drop_reason, TCP_OVERWINDOW); + } return NULL; } /* In sequence, PAWS is OK. */ - /* TODO: We probably should defer ts_recent change once - * we take ownership of @req. - */ - if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) - WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval); - if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { /* Truncate SYN, it is out of window starting at tcp_rsk(req)->rcv_isn + 1. */ @@ -869,6 +873,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, if (!child) goto listen_overflow; + if (own_req && tmp_opt.saw_tstamp && + !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) + tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval; + if (own_req && rsk_drop_req(req)) { reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req); @@ -881,6 +889,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, return inet_csk_complete_hashdance(sk, child, req, own_req); listen_overflow: + SKB_DR_SET(*drop_reason, TCP_LISTEN_OVERFLOW); if (sk != req->rsk_listener) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE); @@ -910,7 +919,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, } return NULL; } -EXPORT_SYMBOL(tcp_check_req); +EXPORT_IPV6_MOD(tcp_check_req); /* * Queue segment on the new socket if the new socket is active, @@ -952,4 +961,4 @@ enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child, sock_put(child); return reason; } -EXPORT_SYMBOL(tcp_child_process); +EXPORT_IPV6_MOD(tcp_child_process); diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 2308665b51c53..934f777f29d36 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -13,12 +13,15 @@ #include #include -static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, +static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb, unsigned int seq, unsigned int mss) { + u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP; + u32 ts_seq = skb_shinfo(gso_skb)->tskey; + while (skb) { if (before(ts_seq, seq + mss)) { - skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; + skb_shinfo(skb)->tx_flags |= flags; skb_shinfo(skb)->tskey = ts_seq; return; } @@ -139,6 +142,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, struct sk_buff *gso_skb = skb; __sum16 newcheck; bool ooo_okay, copy_destructor; + bool ecn_cwr_mask; __wsum delta; th = tcp_hdr(skb); @@ -193,11 +197,13 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, th = tcp_hdr(skb); seq = ntohl(th->seq); - if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) - tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); + if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP)) + tcp_gso_tstamp(segs, gso_skb, seq, mss); newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); + ecn_cwr_mask = !!(skb_shinfo(gso_skb)->gso_type & SKB_GSO_TCP_ACCECN); + while (skb->next) { th->fin = th->psh = 0; th->check = newcheck; @@ -217,7 +223,8 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, th = tcp_hdr(skb); th->seq = htonl(seq); - th->cwr = 0; + + th->cwr &= ecn_cwr_mask; } /* Following permits TCP Small Queues to work well with GSO : @@ -325,7 +332,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb, th2 = tcp_hdr(p); flush = (__force int)(flags & TCP_FLAG_CWR); flush |= (__force int)((flags ^ tcp_flag_word(th2)) & - ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); + ~(TCP_FLAG_FIN | TCP_FLAG_PSH)); flush |= (__force int)(th->ack_seq ^ th2->ack_seq); for (i = sizeof(*th); i < thlen; i += 4) flush |= *(u32 *)((u8 *)th + i) ^ @@ -401,7 +408,7 @@ void tcp_gro_complete(struct sk_buff *skb) shinfo->gso_segs = NAPI_GRO_CB(skb)->count; if (th->cwr) - shinfo->gso_type |= SKB_GSO_TCP_ECN; + shinfo->gso_type |= SKB_GSO_TCP_ACCECN; } EXPORT_SYMBOL(tcp_gro_complete); @@ -425,7 +432,7 @@ static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, inet_get_iif_sdif(skb, &iif, &sdif); iph = skb_gro_network_header(skb); - net = dev_net(skb->dev); + net = dev_net_rcu(skb->dev); sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, iph->saddr, th->source, iph->daddr, ntohs(th->dest), diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index bc95d2a5924fd..d8dc4d121df16 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -241,7 +241,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, if (wscale_ok) { /* Set window scaling on max possible window */ space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); - space = max_t(u32, space, READ_ONCE(sysctl_rmem_max)); + space = max_t(u32, space, READ_ONCE(sock_net(sk)->core.sysctl_rmem_max)); space = min_t(u32, space, window_clamp); *rcv_wscale = clamp_t(int, ilog2(space) - 15, 0, TCP_MAX_WSCALE); @@ -250,7 +250,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss, WRITE_ONCE(*__window_clamp, min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp)); } -EXPORT_SYMBOL(tcp_select_initial_window); +EXPORT_IPV6_MOD(tcp_select_initial_window); /* Chose a new window to advertise, update state in tcp_sock for the * socket, and return result with RFC1323 scaling applied. The return @@ -325,7 +325,7 @@ static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) const struct tcp_sock *tp = tcp_sk(sk); TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; - if (!(tp->ecn_flags & TCP_ECN_OK)) + if (tcp_ecn_disabled(tp)) TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; else if (tcp_ca_needs_ecn(sk) || tcp_bpf_ca_needs_ecn(sk)) @@ -351,7 +351,7 @@ static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) if (use_ecn) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; - tp->ecn_flags = TCP_ECN_OK; + tcp_ecn_mode_set(tp, TCP_ECN_MODE_RFC3168); if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn) INET_ECN_xmit(sk); } @@ -381,7 +381,7 @@ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, { struct tcp_sock *tp = tcp_sk(sk); - if (tp->ecn_flags & TCP_ECN_OK) { + if (tcp_ecn_mode_rfc3168(tp)) { /* Not-retransmitted data segment: set ECT and inject CWR. */ if (skb->len != tcp_header_len && !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { @@ -403,7 +403,7 @@ static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb, /* Constructs common control bits of non-data skb. If SYN/FIN is present, * auto increment end seqno. */ -static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) +static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u16 flags) { skb->ip_summed = CHECKSUM_PARTIAL; @@ -525,6 +525,7 @@ static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb, sock_owned_by_me(sk); sock_ops.is_fullsock = 1; + sock_ops.is_locked_tcp_sock = 1; sock_ops.sk = sk; } @@ -570,6 +571,7 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb, sock_owned_by_me(sk); sock_ops.is_fullsock = 1; + sock_ops.is_locked_tcp_sock = 1; sock_ops.sk = sk; } @@ -941,7 +943,13 @@ static unsigned int tcp_synack_options(const struct sock *sk, opts->options |= OPTION_TS; opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) + tcp_rsk(req)->ts_off; - opts->tsecr = READ_ONCE(req->ts_recent); + if (!tcp_rsk(req)->snt_tsval_first) { + if (!opts->tsval) + opts->tsval = ~0U; + tcp_rsk(req)->snt_tsval_first = opts->tsval; + } + WRITE_ONCE(tcp_rsk(req)->snt_tsval_last, opts->tsval); + opts->tsecr = req->ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } if (likely(ireq->sack_ok)) { @@ -1171,7 +1179,7 @@ void tcp_release_cb(struct sock *sk) if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk)) tcp_send_ack(sk); } -EXPORT_SYMBOL(tcp_release_cb); +EXPORT_IPV6_MOD(tcp_release_cb); void __init tcp_tasklet_init(void) { @@ -1387,7 +1395,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, th->seq = htonl(tcb->seq); th->ack_seq = htonl(rcv_nxt); *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | - tcb->tcp_flags); + (tcb->tcp_flags & TCPHDR_FLAGS_MASK)); th->check = 0; th->urg_ptr = 0; @@ -1608,8 +1616,8 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, struct sk_buff *buff; int old_factor; long limit; + u16 flags; int nlen; - u8 flags; if (WARN_ON(len > skb->len)) return -EINVAL; @@ -1783,7 +1791,7 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu) return __tcp_mtu_to_mss(sk, pmtu) - (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); } -EXPORT_SYMBOL(tcp_mtu_to_mss); +EXPORT_IPV6_MOD(tcp_mtu_to_mss); /* Inverse of above */ int tcp_mss_to_mtu(struct sock *sk, int mss) @@ -1813,7 +1821,6 @@ void tcp_mtup_init(struct sock *sk) if (icsk->icsk_mtup.enabled) icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; } -EXPORT_SYMBOL(tcp_mtup_init); /* This function synchronize snd mss to current pmtu/exthdr set. @@ -1857,7 +1864,7 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) return mss_now; } -EXPORT_SYMBOL(tcp_sync_mss); +EXPORT_IPV6_MOD(tcp_sync_mss); /* Compute the current effective MSS, taking SACKs and IP options, * and even PMTU discovery events into account. @@ -2164,7 +2171,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, { int nlen = skb->len - len; struct sk_buff *buff; - u8 flags; + u16 flags; /* All of a TSO frame must be composed of paged data. */ DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len); @@ -2911,7 +2918,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) if (rto_delta_us > 0) timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us)); - tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, true); return true; } @@ -3545,8 +3552,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) } if (rearm_timer) tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - inet_csk(sk)->icsk_rto, - TCP_RTO_MAX); + inet_csk(sk)->icsk_rto, true); } /* We allow to exceed memory limits for FIN packets to expedite @@ -3853,7 +3859,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, return skb; } -EXPORT_SYMBOL(tcp_make_synack); +EXPORT_IPV6_MOD(tcp_make_synack); static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst) { @@ -4163,8 +4169,8 @@ int tcp_connect(struct sock *sk) TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); /* Timer for repeating the SYN until an answer. */ - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - inet_csk(sk)->icsk_rto, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, + inet_csk(sk)->icsk_rto, false); return 0; } EXPORT_SYMBOL(tcp_connect); @@ -4173,7 +4179,7 @@ u32 tcp_delack_max(const struct sock *sk) { u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1; - return min(inet_csk(sk)->icsk_delack_max, delack_from_rto_min); + return min(READ_ONCE(inet_csk(sk)->icsk_delack_max), delack_from_rto_min); } /* Send out a delayed ack, the caller does the policy checking @@ -4219,22 +4225,21 @@ void tcp_send_delayed_ack(struct sock *sk) /* Use new timeout only if there wasn't a older one earlier. */ if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { /* If delack timer is about to expire, send ACK now. */ - if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { + if (time_before_eq(icsk_delack_timeout(icsk), jiffies + (ato >> 2))) { tcp_send_ack(sk); return; } - if (!time_before(timeout, icsk->icsk_ack.timeout)) - timeout = icsk->icsk_ack.timeout; + if (!time_before(timeout, icsk_delack_timeout(icsk))) + timeout = icsk_delack_timeout(icsk); } smp_store_release(&icsk->icsk_ack.pending, icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); - icsk->icsk_ack.timeout = timeout; sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); } /* This routine sends an ack and also updates the window. */ -void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags) { struct sk_buff *buff; @@ -4253,17 +4258,17 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) unsigned long delay; delay = TCP_DELACK_MAX << icsk->icsk_ack.retry; - if (delay < TCP_RTO_MAX) + if (delay < tcp_rto_max(sk)) icsk->icsk_ack.retry++; inet_csk_schedule_ack(sk); icsk->icsk_ack.ato = TCP_ATO_MIN; - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, false); return; } /* Reserve space for headers and prepare control bits. */ skb_reserve(buff, MAX_TCP_HEADER); - tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); + tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK | flags); /* We do not want pure acks influencing TCP Small Queues or fq/pacing * too much. @@ -4278,7 +4283,7 @@ EXPORT_SYMBOL_GPL(__tcp_send_ack); void tcp_send_ack(struct sock *sk) { - __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); + __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt, 0); } /* This routine sends a packet with an out of date sequence @@ -4393,7 +4398,7 @@ void tcp_send_probe0(struct sock *sk) if (err <= 0) { if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) icsk->icsk_backoff++; - timeout = tcp_probe0_when(sk, TCP_RTO_MAX); + timeout = tcp_probe0_when(sk, tcp_rto_max(sk)); } else { /* If packet was not sent due to local congestion, * Let senders fight for local resources conservatively. @@ -4402,7 +4407,7 @@ void tcp_send_probe0(struct sock *sk) } timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout); - tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, true); } int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) @@ -4430,4 +4435,4 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req) } return res; } -EXPORT_SYMBOL(tcp_rtx_synack); +EXPORT_IPV6_MOD(tcp_rtx_synack); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index b412ed88ccd9a..d64383b06a295 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -109,7 +109,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) /* If peer does not open window for long time, or did not transmit * anything for long time, penalize it. */ - if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) + if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*tcp_rto_max(sk) || !do_reset) shift++; /* If some dubious ICMP arrived, penalize even more. */ @@ -189,12 +189,12 @@ static unsigned int tcp_model_timeout(struct sock *sk, { unsigned int linear_backoff_thresh, timeout; - linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base); + linear_backoff_thresh = ilog2(tcp_rto_max(sk) / rto_base); if (boundary <= linear_backoff_thresh) timeout = ((2 << boundary) - 1) * rto_base; else timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + - (boundary - linear_backoff_thresh) * TCP_RTO_MAX; + (boundary - linear_backoff_thresh) * tcp_rto_max(sk); return jiffies_to_msecs(timeout); } /** @@ -268,7 +268,7 @@ static int tcp_write_timeout(struct sock *sk) retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2); if (sock_flag(sk, SOCK_DEAD)) { - const bool alive = icsk->icsk_rto < TCP_RTO_MAX; + const bool alive = icsk->icsk_rto < tcp_rto_max(sk); retry_until = tcp_orphan_retries(sk, alive); do_reset = alive || @@ -322,8 +322,9 @@ void tcp_delack_timer_handler(struct sock *sk) if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) return; - if (time_after(icsk->icsk_ack.timeout, jiffies)) { - sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); + if (time_after(icsk_delack_timeout(icsk), jiffies)) { + sk_reset_timer(sk, &icsk->icsk_delack_timer, + icsk_delack_timeout(icsk)); return; } icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; @@ -416,7 +417,8 @@ static void tcp_probe_timer(struct sock *sk) } max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); if (sock_flag(sk, SOCK_DEAD)) { - const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; + unsigned int rto_max = tcp_rto_max(sk); + const bool alive = inet_csk_rto_backoff(icsk, rto_max) < rto_max; max_probes = tcp_orphan_retries(sk, alive); if (!alive && icsk->icsk_backoff >= max_probes) @@ -481,8 +483,8 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) tcp_update_rto_stats(sk); if (!tp->retrans_stamp) tp->retrans_stamp = tcp_time_stamp_ts(tp); - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - req->timeout << req->num_timeout, TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, + req->timeout << req->num_timeout, false); } static bool tcp_rtx_probe0_timed_out(const struct sock *sk, @@ -492,7 +494,7 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk, const struct inet_connection_sock *icsk = inet_csk(sk); u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); const struct tcp_sock *tp = tcp_sk(sk); - int timeout = TCP_RTO_MAX * 2; + int timeout = tcp_rto_max(sk) * 2; s32 rcv_delta; if (user_timeout) { @@ -508,7 +510,7 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk, * and tp->rcv_tstamp might very well have been written recently. * rcv_delta can thus be negative. */ - rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp; + rcv_delta = icsk_timeout(icsk) - tp->rcv_tstamp; if (rcv_delta <= timeout) return false; @@ -626,9 +628,9 @@ void tcp_retransmit_timer(struct sock *sk) /* Retransmission failed because of local congestion, * Let senders fight for local resources conservatively. */ - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - TCP_RESOURCE_PROBE_INTERVAL, - TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, + TCP_RESOURCE_PROBE_INTERVAL, + false); goto out; } @@ -665,7 +667,7 @@ void tcp_retransmit_timer(struct sock *sk) icsk->icsk_backoff = 0; icsk->icsk_rto = clamp(__tcp_set_rto(tp), tcp_rto_min(sk), - TCP_RTO_MAX); + tcp_rto_max(sk)); } else if (sk->sk_state != TCP_SYN_SENT || tp->total_rto > READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) { @@ -673,10 +675,10 @@ void tcp_retransmit_timer(struct sock *sk) * activated. */ icsk->icsk_backoff++; - icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); + icsk->icsk_rto = min(icsk->icsk_rto << 1, tcp_rto_max(sk)); } - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX); + tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, + tcp_clamp_rto_to_user_timeout(sk), false); if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0)) __sk_dst_reset(sk); @@ -684,7 +686,8 @@ out:; } /* Called with bottom-half processing disabled. - Called by tcp_write_timer() */ + * Called by tcp_write_timer() and tcp_release_cb(). + */ void tcp_write_timer_handler(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -694,11 +697,11 @@ void tcp_write_timer_handler(struct sock *sk) !icsk->icsk_pending) return; - if (time_after(icsk->icsk_timeout, jiffies)) { - sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); + if (time_after(icsk_timeout(icsk), jiffies)) { + sk_reset_timer(sk, &icsk->icsk_retransmit_timer, + icsk_timeout(icsk)); return; } - tcp_mstamp_refresh(tcp_sk(sk)); event = icsk->icsk_pending; @@ -749,7 +752,17 @@ void tcp_syn_ack_timeout(const struct request_sock *req) __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS); } -EXPORT_SYMBOL(tcp_syn_ack_timeout); +EXPORT_IPV6_MOD(tcp_syn_ack_timeout); + +void tcp_reset_keepalive_timer(struct sock *sk, unsigned long len) +{ + sk_reset_timer(sk, &sk->sk_timer, jiffies + len); +} + +static void tcp_delete_keepalive_timer(struct sock *sk) +{ + sk_stop_timer(sk, &sk->sk_timer); +} void tcp_set_keepalive(struct sock *sk, int val) { @@ -757,14 +770,13 @@ void tcp_set_keepalive(struct sock *sk, int val) return; if (val && !sock_flag(sk, SOCK_KEEPOPEN)) - inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); + tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); else if (!val) - inet_csk_delete_keepalive_timer(sk); + tcp_delete_keepalive_timer(sk); } -EXPORT_SYMBOL_GPL(tcp_set_keepalive); - +EXPORT_IPV6_MOD_GPL(tcp_set_keepalive); -static void tcp_keepalive_timer (struct timer_list *t) +static void tcp_keepalive_timer(struct timer_list *t) { struct sock *sk = from_timer(sk, t, sk_timer); struct inet_connection_sock *icsk = inet_csk(sk); @@ -775,7 +787,7 @@ static void tcp_keepalive_timer (struct timer_list *t) bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ - inet_csk_reset_keepalive_timer (sk, HZ/20); + tcp_reset_keepalive_timer(sk, HZ/20); goto out; } @@ -841,7 +853,7 @@ static void tcp_keepalive_timer (struct timer_list *t) } resched: - inet_csk_reset_keepalive_timer (sk, elapsed); + tcp_reset_keepalive_timer(sk, elapsed); goto out; death: diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index a9bb9ce5438ea..79efbf465fb04 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -121,13 +121,12 @@ #endif struct udp_table udp_table __read_mostly; -EXPORT_SYMBOL(udp_table); long sysctl_udp_mem[3] __read_mostly; -EXPORT_SYMBOL(sysctl_udp_mem); +EXPORT_IPV6_MOD(sysctl_udp_mem); atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp; -EXPORT_SYMBOL(udp_memory_allocated); +EXPORT_IPV6_MOD(udp_memory_allocated); DEFINE_PER_CPU(int, udp_memory_per_cpu_fw_alloc); EXPORT_PER_CPU_SYMBOL_GPL(udp_memory_per_cpu_fw_alloc); @@ -352,7 +351,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, fail: return error; } -EXPORT_SYMBOL(udp_lib_get_port); +EXPORT_IPV6_MOD(udp_lib_get_port); int udp_v4_get_port(struct sock *sk, unsigned short snum) { @@ -418,7 +417,7 @@ u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, return __inet_ehashfn(laddr, lport, faddr, fport, udp_ehash_secret + net_hash_mix(net)); } -EXPORT_SYMBOL(udp_ehashfn); +EXPORT_IPV6_MOD(udp_ehashfn); /** * udp4_lib_lookup1() - Simplified lookup using primary hash (destination port) @@ -653,7 +652,7 @@ void udp_lib_hash4(struct sock *sk, u16 hash) spin_unlock_bh(&hslot->lock); } -EXPORT_SYMBOL(udp_lib_hash4); +EXPORT_IPV6_MOD(udp_lib_hash4); /* call with sock lock */ void udp4_hash4(struct sock *sk) @@ -669,7 +668,7 @@ void udp4_hash4(struct sock *sk) udp_lib_hash4(sk, hash); } -EXPORT_SYMBOL(udp4_hash4); +EXPORT_IPV6_MOD(udp4_hash4); #endif /* CONFIG_BASE_SMALL */ /* UDP is nearly always wildcards out the wazoo, it makes no sense to try @@ -809,11 +808,11 @@ static inline bool __udp_is_mcast_sock(struct net *net, const struct sock *sk, } DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key); -EXPORT_SYMBOL(udp_encap_needed_key); +EXPORT_IPV6_MOD(udp_encap_needed_key); #if IS_ENABLED(CONFIG_IPV6) DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key); -EXPORT_SYMBOL(udpv6_encap_needed_key); +EXPORT_IPV6_MOD(udpv6_encap_needed_key); #endif void udp_encap_enable(void) @@ -1041,7 +1040,7 @@ void udp_flush_pending_frames(struct sock *sk) ip_flush_pending_frames(sk); } } -EXPORT_SYMBOL(udp_flush_pending_frames); +EXPORT_IPV6_MOD(udp_flush_pending_frames); /** * udp4_hwcsum - handle outgoing HW checksumming @@ -1229,7 +1228,7 @@ int udp_push_pending_frames(struct sock *sk) WRITE_ONCE(up->pending, 0); return err; } -EXPORT_SYMBOL(udp_push_pending_frames); +EXPORT_IPV6_MOD(udp_push_pending_frames); static int __udp_cmsg_send(struct cmsghdr *cmsg, u16 *gso_size) { @@ -1266,7 +1265,7 @@ int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size) return need_ip; } -EXPORT_SYMBOL_GPL(udp_cmsg_send); +EXPORT_IPV6_MOD_GPL(udp_cmsg_send); int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { @@ -1281,7 +1280,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int free = 0; int connected = 0; __be32 daddr, faddr, saddr; - u8 tos, scope; + u8 scope; __be16 dport; int err, is_udplite = IS_UDPLITE(sk); int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE; @@ -1405,7 +1404,6 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) faddr = ipc.opt->opt.faddr; connected = 0; } - tos = get_rttos(&ipc, inet); scope = ip_sendmsg_scope(inet, &ipc, msg); if (scope == RT_SCOPE_LINK) connected = 0; @@ -1442,7 +1440,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl4 = &fl4_stack; - flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos, scope, + flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, + ipc.tos & INET_DSCP_MASK, scope, sk->sk_protocol, flow_flags, faddr, saddr, dport, inet->inet_sport, sk->sk_uid); @@ -1561,7 +1560,7 @@ void udp_splice_eof(struct socket *sock) udp_push_pending_frames(sk); release_sock(sk); } -EXPORT_SYMBOL_GPL(udp_splice_eof); +EXPORT_IPV6_MOD_GPL(udp_splice_eof); #define UDP_SKB_IS_STATELESS 0x80000000 @@ -1678,7 +1677,7 @@ void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) prefetch(&skb->data); udp_rmem_release(sk, udp_skb_truesize(skb), 1, false); } -EXPORT_SYMBOL(udp_skb_destructor); +EXPORT_IPV6_MOD(udp_skb_destructor); /* as above, but the caller held the rx queue lock, too */ static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb) @@ -1785,7 +1784,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) busylock_release(busy); return err; } -EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb); +EXPORT_IPV6_MOD_GPL(__udp_enqueue_schedule_skb); void udp_destruct_common(struct sock *sk) { @@ -1801,7 +1800,7 @@ void udp_destruct_common(struct sock *sk) } udp_rmem_release(sk, total, 0, true); } -EXPORT_SYMBOL_GPL(udp_destruct_common); +EXPORT_IPV6_MOD_GPL(udp_destruct_common); static void udp_destruct_sock(struct sock *sk) { @@ -1832,7 +1831,7 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) skb_release_head_state(skb); __consume_stateless_skb(skb); } -EXPORT_SYMBOL_GPL(skb_consume_udp); +EXPORT_IPV6_MOD_GPL(skb_consume_udp); static struct sk_buff *__first_packet_length(struct sock *sk, struct sk_buff_head *rcvq, @@ -1849,7 +1848,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk, atomic_inc(&sk->sk_drops); __skb_unlink(skb, rcvq); *total += skb->truesize; - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM); } else { udp_skb_csum_unnecessary_set(skb); break; @@ -1914,7 +1913,7 @@ int udp_ioctl(struct sock *sk, int cmd, int *karg) return 0; } -EXPORT_SYMBOL(udp_ioctl); +EXPORT_IPV6_MOD(udp_ioctl); struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags, int *off, int *err) @@ -2003,14 +2002,14 @@ int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, is_udplite); __UDP_INC_STATS(net, UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM); goto try_again; } WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); return recv_actor(sk, skb); } -EXPORT_SYMBOL(udp_read_skb); +EXPORT_IPV6_MOD(udp_read_skb); /* * This should be easy, if there is something there we @@ -2118,7 +2117,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM); /* starting over for a new packet, but check if we need to yield */ cond_resched(); @@ -2137,7 +2136,7 @@ int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, &addr_len); } -EXPORT_SYMBOL(udp_pre_connect); +EXPORT_IPV6_MOD(udp_pre_connect); static int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { @@ -2186,7 +2185,7 @@ int udp_disconnect(struct sock *sk, int flags) release_sock(sk); return 0; } -EXPORT_SYMBOL(udp_disconnect); +EXPORT_IPV6_MOD(udp_disconnect); void udp_lib_unhash(struct sock *sk) { @@ -2216,7 +2215,7 @@ void udp_lib_unhash(struct sock *sk) spin_unlock_bh(&hslot->lock); } } -EXPORT_SYMBOL(udp_lib_unhash); +EXPORT_IPV6_MOD(udp_lib_unhash); /* * inet_rcv_saddr was changed, we must rehash secondary hash @@ -2280,7 +2279,7 @@ void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4) } } } -EXPORT_SYMBOL(udp_lib_rehash); +EXPORT_IPV6_MOD(udp_lib_rehash); void udp_v4_rehash(struct sock *sk) { @@ -2485,7 +2484,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) } return false; } -EXPORT_SYMBOL(udp_sk_rx_dst_set); +EXPORT_IPV6_MOD(udp_sk_rx_dst_set); /* * Multicasts and broadcasts go to each listener. @@ -2892,8 +2891,10 @@ void udp_destroy_sock(struct sock *sk) if (encap_destroy) encap_destroy(sk); } - if (udp_test_bit(ENCAP_ENABLED, sk)) + if (udp_test_bit(ENCAP_ENABLED, sk)) { static_branch_dec(&udp_encap_needed_key); + udp_tunnel_cleanup_gro(sk); + } } } @@ -2902,10 +2903,15 @@ static void set_xfrm_gro_udp_encap_rcv(__u16 encap_type, unsigned short family, { #ifdef CONFIG_XFRM if (udp_test_bit(GRO_ENABLED, sk) && encap_type == UDP_ENCAP_ESPINUDP) { + bool old_enabled = !!udp_sk(sk)->gro_receive; + if (family == AF_INET) WRITE_ONCE(udp_sk(sk)->gro_receive, xfrm4_gro_udp_encap_rcv); else if (IS_ENABLED(CONFIG_IPV6) && family == AF_INET6) WRITE_ONCE(udp_sk(sk)->gro_receive, ipv6_stub->xfrm6_gro_udp_encap_rcv); + + if (!old_enabled && udp_sk(sk)->gro_receive) + udp_tunnel_update_gro_rcv(sk, true); } #endif } @@ -3041,7 +3047,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, return err; } -EXPORT_SYMBOL(udp_lib_setsockopt); +EXPORT_IPV6_MOD(udp_lib_setsockopt); int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) @@ -3112,7 +3118,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname, return -EFAULT; return 0; } -EXPORT_SYMBOL(udp_lib_getsockopt); +EXPORT_IPV6_MOD(udp_lib_getsockopt); int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) @@ -3154,7 +3160,7 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait) return mask; } -EXPORT_SYMBOL(udp_poll); +EXPORT_IPV6_MOD(udp_poll); int udp_abort(struct sock *sk, int err) { @@ -3177,7 +3183,7 @@ int udp_abort(struct sock *sk, int err) return 0; } -EXPORT_SYMBOL_GPL(udp_abort); +EXPORT_IPV6_MOD_GPL(udp_abort); struct proto udp_prot = { .name = "UDP", @@ -3311,7 +3317,7 @@ void *udp_seq_start(struct seq_file *seq, loff_t *pos) return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } -EXPORT_SYMBOL(udp_seq_start); +EXPORT_IPV6_MOD(udp_seq_start); void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { @@ -3325,7 +3331,7 @@ void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) ++*pos; return sk; } -EXPORT_SYMBOL(udp_seq_next); +EXPORT_IPV6_MOD(udp_seq_next); void udp_seq_stop(struct seq_file *seq, void *v) { @@ -3337,7 +3343,7 @@ void udp_seq_stop(struct seq_file *seq, void *v) if (state->bucket <= udptable->mask) spin_unlock_bh(&udptable->hash[state->bucket].lock); } -EXPORT_SYMBOL(udp_seq_stop); +EXPORT_IPV6_MOD(udp_seq_stop); /* ------------------------------------------------------------------------ */ static void udp4_format_sock(struct sock *sp, struct seq_file *f, @@ -3616,7 +3622,7 @@ const struct seq_operations udp_seq_ops = { .stop = udp_seq_stop, .show = udp4_seq_show, }; -EXPORT_SYMBOL(udp_seq_ops); +EXPORT_IPV6_MOD(udp_seq_ops); static struct udp_seq_afinfo udp4_seq_afinfo = { .family = AF_INET, @@ -3805,6 +3811,15 @@ static void __net_init udp_set_table(struct net *net) static int __net_init udp_pernet_init(struct net *net) { +#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL) + int i; + + /* No tunnel is configured */ + for (i = 0; i < ARRAY_SIZE(net->ipv4.udp_tunnel_gro); ++i) { + INIT_HLIST_HEAD(&net->ipv4.udp_tunnel_gro[i].list); + RCU_INIT_POINTER(net->ipv4.udp_tunnel_gro[i].sk, NULL); + } +#endif udp_sysctl_init(net); udp_set_table(net); diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index a5be6e4ed326f..b124355a36aee 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -12,6 +12,167 @@ #include #include #include +#include + +#if IS_ENABLED(CONFIG_NET_UDP_TUNNEL) + +/* + * Dummy GRO tunnel callback, exists mainly to avoid dangling/NULL + * values for the udp tunnel static call. + */ +static struct sk_buff *dummy_gro_rcv(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) +{ + NAPI_GRO_CB(skb)->flush = 1; + return NULL; +} + +typedef struct sk_buff *(*udp_tunnel_gro_rcv_t)(struct sock *sk, + struct list_head *head, + struct sk_buff *skb); + +struct udp_tunnel_type_entry { + udp_tunnel_gro_rcv_t gro_receive; + refcount_t count; +}; + +/* vxlan, fou and xfrm have 2 different gro_receive hooks each */ +#define UDP_MAX_TUNNEL_TYPES (IS_ENABLED(CONFIG_GENEVE) + \ + IS_ENABLED(CONFIG_VXLAN) * 2 + \ + IS_ENABLED(CONFIG_NET_FOU) * 2 + \ + IS_ENABLED(CONFIG_XFRM) * 2) + +DEFINE_STATIC_CALL(udp_tunnel_gro_rcv, dummy_gro_rcv); +static DEFINE_STATIC_KEY_FALSE(udp_tunnel_static_call); +static struct mutex udp_tunnel_gro_type_lock; +static struct udp_tunnel_type_entry udp_tunnel_gro_types[UDP_MAX_TUNNEL_TYPES]; +static unsigned int udp_tunnel_gro_type_nr; +static DEFINE_SPINLOCK(udp_tunnel_gro_lock); + +void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add) +{ + bool is_ipv6 = sk->sk_family == AF_INET6; + struct udp_sock *tup, *up = udp_sk(sk); + struct udp_tunnel_gro *udp_tunnel_gro; + + spin_lock(&udp_tunnel_gro_lock); + + udp_tunnel_gro = &net->ipv4.udp_tunnel_gro[is_ipv6]; + if (add) + hlist_add_head(&up->tunnel_list, &udp_tunnel_gro->list); + else if (up->tunnel_list.pprev) + hlist_del_init(&up->tunnel_list); + + if (udp_tunnel_gro->list.first && + !udp_tunnel_gro->list.first->next) { + tup = hlist_entry(udp_tunnel_gro->list.first, struct udp_sock, + tunnel_list); + + rcu_assign_pointer(udp_tunnel_gro->sk, (struct sock *)tup); + } else { + RCU_INIT_POINTER(udp_tunnel_gro->sk, NULL); + } + + spin_unlock(&udp_tunnel_gro_lock); +} +EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_lookup); + +void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) +{ + struct udp_tunnel_type_entry *cur = NULL; + struct udp_sock *up = udp_sk(sk); + int i, old_gro_type_nr; + + if (!UDP_MAX_TUNNEL_TYPES || !up->gro_receive) + return; + + mutex_lock(&udp_tunnel_gro_type_lock); + for (i = 0; i < udp_tunnel_gro_type_nr; i++) + if (udp_tunnel_gro_types[i].gro_receive == up->gro_receive) + cur = &udp_tunnel_gro_types[i]; + + old_gro_type_nr = udp_tunnel_gro_type_nr; + if (add) { + /* + * Update the matching entry, if found, or add a new one + * if needed + */ + if (cur) { + refcount_inc(&cur->count); + goto out; + } + + if (unlikely(udp_tunnel_gro_type_nr == UDP_MAX_TUNNEL_TYPES)) { + pr_err_once("Too many UDP tunnel types, please increase UDP_MAX_TUNNEL_TYPES\n"); + /* Ensure static call will never be enabled */ + udp_tunnel_gro_type_nr = UDP_MAX_TUNNEL_TYPES + 2; + goto out; + } + + cur = &udp_tunnel_gro_types[udp_tunnel_gro_type_nr++]; + refcount_set(&cur->count, 1); + cur->gro_receive = up->gro_receive; + } else { + /* + * The stack cleanups only successfully added tunnel, the + * lookup on removal should never fail. + */ + if (WARN_ON_ONCE(!cur)) + goto out; + + if (!refcount_dec_and_test(&cur->count)) + goto out; + + /* avoid gaps, so that the enable tunnel has always id 0 */ + *cur = udp_tunnel_gro_types[--udp_tunnel_gro_type_nr]; + } + + if (udp_tunnel_gro_type_nr == 1) { + static_call_update(udp_tunnel_gro_rcv, + udp_tunnel_gro_types[0].gro_receive); + static_branch_enable(&udp_tunnel_static_call); + } else if (old_gro_type_nr == 1) { + static_branch_disable(&udp_tunnel_static_call); + static_call_update(udp_tunnel_gro_rcv, dummy_gro_rcv); + } + +out: + mutex_unlock(&udp_tunnel_gro_type_lock); +} +EXPORT_SYMBOL_GPL(udp_tunnel_update_gro_rcv); + +static void udp_tunnel_gro_init(void) +{ + mutex_init(&udp_tunnel_gro_type_lock); +} + +static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) +{ + if (static_branch_likely(&udp_tunnel_static_call)) { + if (unlikely(gro_recursion_inc_test(skb))) { + NAPI_GRO_CB(skb)->flush |= 1; + return NULL; + } + return static_call(udp_tunnel_gro_rcv)(sk, head, skb); + } + return call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); +} + +#else + +static void udp_tunnel_gro_init(void) {} + +static struct sk_buff *udp_tunnel_gro_rcv(struct sock *sk, + struct list_head *head, + struct sk_buff *skb) +{ + return call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); +} + +#endif static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, @@ -321,13 +482,17 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, /* clear destructor to avoid skb_segment assigning it to tail */ copy_dtor = gso_skb->destructor == sock_wfree; - if (copy_dtor) + if (copy_dtor) { gso_skb->destructor = NULL; + gso_skb->sk = NULL; + } segs = skb_segment(gso_skb, features); if (IS_ERR_OR_NULL(segs)) { - if (copy_dtor) + if (copy_dtor) { gso_skb->destructor = sock_wfree; + gso_skb->sk = sk; + } return segs; } @@ -618,7 +783,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); - pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); + pp = udp_tunnel_gro_rcv(sk, head, skb); out: skb_gro_flush_final(skb, pp, flush); @@ -630,9 +795,14 @@ static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport) { const struct iphdr *iph = skb_gro_network_header(skb); - struct net *net = dev_net(skb->dev); + struct net *net = dev_net_rcu(skb->dev); + struct sock *sk; int iif, sdif; + sk = udp_tunnel_sk(net, false); + if (sk && dport == htons(sk->sk_num)) + return sk; + inet_get_iif_sdif(skb, &iif, &sdif); return __udp4_lib_lookup(net, iph->saddr, sport, @@ -763,5 +933,7 @@ int __init udpv4_offload_init(void) .gro_complete = udp4_gro_complete, }, }; + + udp_tunnel_gro_init(); return inet_add_offload(&net_hotdata.udpv4_offload, IPPROTO_UDP); } diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c index 619a53eb672da..2326548997d3f 100644 --- a/net/ipv4/udp_tunnel_core.c +++ b/net/ipv4/udp_tunnel_core.c @@ -58,6 +58,15 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, } EXPORT_SYMBOL(udp_sock_create4); +static bool sk_saddr_any(struct sock *sk) +{ +#if IS_ENABLED(CONFIG_IPV6) + return ipv6_addr_any(&sk->sk_v6_rcv_saddr); +#else + return !sk->sk_rcv_saddr; +#endif +} + void setup_udp_tunnel_sock(struct net *net, struct socket *sock, struct udp_tunnel_sock_cfg *cfg) { @@ -80,6 +89,12 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, udp_sk(sk)->gro_complete = cfg->gro_complete; udp_tunnel_encap_enable(sk); + + udp_tunnel_update_gro_rcv(sk, true); + + if (!sk->sk_dport && !sk->sk_bound_dev_if && sk_saddr_any(sk) && + sk->sk_kern_sock) + udp_tunnel_update_gro_lookup(net, sk, true); } EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index f60ec8b0f8ea4..3e812187e125c 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -715,6 +715,7 @@ const struct proto_ops inet6_stream_ops = { #endif .set_rcvlowat = tcp_set_rcvlowat, }; +EXPORT_SYMBOL_GPL(inet6_stream_ops); const struct proto_ops inet6_dgram_ops = { .family = PF_INET6, diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 6789623b2b0d1..457de0745a338 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -1204,10 +1204,9 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) { struct ipv6_txoptions *opt2; - opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC); + opt2 = sock_kmemdup(sk, opt, opt->tot_len, GFP_ATOMIC); if (opt2) { long dif = (char *)opt2 - (char *)opt; - memcpy(opt2, opt, opt->tot_len); if (opt2->hopopt) *((char **)&opt2->hopopt) += dif; if (opt2->dst0opt) diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 67d39114d9a63..fd5f7112a51ff 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -29,6 +29,7 @@ struct fib6_rule { __be32 flowlabel; __be32 flowlabel_mask; dscp_t dscp; + dscp_t dscp_mask; u8 dscp_full:1; /* DSCP or TOS selector */ }; @@ -331,7 +332,7 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_match(struct fib_rule *rule, return 0; } - if (r->dscp && r->dscp != ip6_dscp(fl6->flowlabel)) + if ((r->dscp ^ ip6_dscp(fl6->flowlabel)) & r->dscp_mask) return 0; if ((r->flowlabel ^ flowi6_get_flowlabel(fl6)) & r->flowlabel_mask) @@ -340,12 +341,12 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_match(struct fib_rule *rule, if (rule->ip_proto && (rule->ip_proto != fl6->flowi6_proto)) return 0; - if (fib_rule_port_range_set(&rule->sport_range) && - !fib_rule_port_inrange(&rule->sport_range, fl6->fl6_sport)) + if (!fib_rule_port_match(&rule->sport_range, rule->sport_mask, + fl6->fl6_sport)) return 0; - if (fib_rule_port_range_set(&rule->dport_range) && - !fib_rule_port_inrange(&rule->dport_range, fl6->fl6_dport)) + if (!fib_rule_port_match(&rule->dport_range, rule->dport_mask, + fl6->fl6_dport)) return 0; return 1; @@ -360,11 +361,35 @@ static int fib6_nl2rule_dscp(const struct nlattr *nla, struct fib6_rule *rule6, } rule6->dscp = inet_dsfield_to_dscp(nla_get_u8(nla) << 2); + rule6->dscp_mask = inet_dsfield_to_dscp(INET_DSCP_MASK); rule6->dscp_full = true; return 0; } +static int fib6_nl2rule_dscp_mask(const struct nlattr *nla, + struct fib6_rule *rule6, + struct netlink_ext_ack *extack) +{ + dscp_t dscp_mask; + + if (!rule6->dscp_full) { + NL_SET_ERR_MSG_ATTR(extack, nla, + "Cannot specify DSCP mask without DSCP value"); + return -EINVAL; + } + + dscp_mask = inet_dsfield_to_dscp(nla_get_u8(nla) << 2); + if (rule6->dscp & ~dscp_mask) { + NL_SET_ERR_MSG_ATTR(extack, nla, "Invalid DSCP mask"); + return -EINVAL; + } + + rule6->dscp_mask = dscp_mask; + + return 0; +} + static int fib6_nl2rule_flowlabel(struct nlattr **tb, struct fib6_rule *rule6, struct netlink_ext_ack *extack) { @@ -399,9 +424,9 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct nlattr **tb, struct netlink_ext_ack *extack) { + struct fib6_rule *rule6 = (struct fib6_rule *)rule; + struct net *net = rule->fr_net; int err = -EINVAL; - struct net *net = sock_net(skb->sk); - struct fib6_rule *rule6 = (struct fib6_rule *) rule; if (!inet_validate_dscp(frh->tos)) { NL_SET_ERR_MSG(extack, @@ -409,10 +434,15 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, goto errout; } rule6->dscp = inet_dsfield_to_dscp(frh->tos); + rule6->dscp_mask = frh->tos ? inet_dsfield_to_dscp(INET_DSCP_MASK) : 0; if (tb[FRA_DSCP] && fib6_nl2rule_dscp(tb[FRA_DSCP], rule6, extack) < 0) goto errout; + if (tb[FRA_DSCP_MASK] && + fib6_nl2rule_dscp_mask(tb[FRA_DSCP_MASK], rule6, extack) < 0) + goto errout; + if ((tb[FRA_FLOWLABEL] || tb[FRA_FLOWLABEL_MASK]) && fib6_nl2rule_flowlabel(tb, rule6, extack) < 0) goto errout; @@ -482,6 +512,14 @@ static int fib6_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, return 0; } + if (tb[FRA_DSCP_MASK]) { + dscp_t dscp_mask; + + dscp_mask = inet_dsfield_to_dscp(nla_get_u8(tb[FRA_DSCP_MASK]) << 2); + if (!rule6->dscp_full || rule6->dscp_mask != dscp_mask) + return 0; + } + if (tb[FRA_FLOWLABEL] && nla_get_be32(tb[FRA_FLOWLABEL]) != rule6->flowlabel) return 0; @@ -512,7 +550,9 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb, if (rule6->dscp_full) { frh->tos = 0; if (nla_put_u8(skb, FRA_DSCP, - inet_dscp_to_dsfield(rule6->dscp) >> 2)) + inet_dscp_to_dsfield(rule6->dscp) >> 2) || + nla_put_u8(skb, FRA_DSCP_MASK, + inet_dscp_to_dsfield(rule6->dscp_mask) >> 2)) goto nla_put_failure; } else { frh->tos = inet_dscp_to_dsfield(rule6->dscp); @@ -539,6 +579,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule) return nla_total_size(16) /* dst */ + nla_total_size(16) /* src */ + nla_total_size(1) /* dscp */ + + nla_total_size(1) /* dscp mask */ + nla_total_size(4) /* flowlabel */ + nla_total_size(4); /* flowlabel mask */ } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 4d14ab7f7e99f..3fd19a84b358d 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -957,12 +957,9 @@ static int icmpv6_rcv(struct sk_buff *skb) break; case ICMPV6_ECHO_REPLY: - reason = ping_rcv(skb); - break; - case ICMPV6_EXT_ECHO_REPLY: - reason = ping_rcv(skb); - break; + ping_rcv(skb); + return 0; case ICMPV6_PKT_TOOBIG: /* BUGGG_FUTURE: if packet contains rthdr, we cannot update diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index ff7e734e335b0..7d574f5132e2f 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -88,13 +88,15 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } - if (ilwt->connected) { + /* cache only if we don't create a dst reference loop */ + if (ilwt->connected && orig_dst->lwtstate != dst->lwtstate) { local_bh_disable(); dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr); local_bh_enable(); } } + skb_dst_drop(skb); skb_dst_set(skb, dst); return dst_output(net, sk, skb); diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 80043e46117c5..dbcf556a35bbc 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -56,20 +56,6 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, } EXPORT_SYMBOL(inet6_csk_route_req); -void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) -{ - struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr; - - sin6->sin6_family = AF_INET6; - sin6->sin6_addr = sk->sk_v6_daddr; - sin6->sin6_port = inet_sk(sk)->inet_dport; - /* We do not store received flowlabel for TCP */ - sin6->sin6_flowinfo = 0; - sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, - sk->sk_bound_dev_if); -} -EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); - static inline struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) { diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 9ec05e354baa6..76ee521189eb7 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -35,8 +35,8 @@ u32 inet6_ehashfn(const struct net *net, lhash = (__force u32)laddr->s6_addr32[3]; fhash = __ipv6_addr_jhash(faddr, tcp_ipv6_hash_secret); - return __inet6_ehashfn(lhash, lport, fhash, fport, - inet6_ehash_secret + net_hash_mix(net)); + return lport + __inet6_ehashfn(lhash, 0, fhash, fport, + inet6_ehash_secret + net_hash_mix(net)); } EXPORT_SYMBOL_GPL(inet6_ehashfn); @@ -263,7 +263,9 @@ EXPORT_SYMBOL_GPL(inet6_lookup); static int __inet6_check_established(struct inet_timewait_death_row *death_row, struct sock *sk, const __u16 lport, - struct inet_timewait_sock **twp) + struct inet_timewait_sock **twp, + bool rcu_lookup, + u32 hash) { struct inet_hashinfo *hinfo = death_row->hashinfo; struct inet_sock *inet = inet_sk(sk); @@ -273,14 +275,26 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, struct net *net = sock_net(sk); const int sdif = l3mdev_master_ifindex_by_index(net, dif); const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); - const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, - inet->inet_dport); struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); - spinlock_t *lock = inet_ehash_lockp(hinfo, hash); - struct sock *sk2; - const struct hlist_nulls_node *node; struct inet_timewait_sock *tw = NULL; + const struct hlist_nulls_node *node; + struct sock *sk2; + spinlock_t *lock; + + if (rcu_lookup) { + sk_nulls_for_each(sk2, node, &head->chain) { + if (sk2->sk_hash != hash || + !inet6_match(net, sk2, saddr, daddr, + ports, dif, sdif)) + continue; + if (sk2->sk_state == TCP_TIME_WAIT) + break; + return -EADDRNOTAVAIL; + } + return 0; + } + lock = inet_ehash_lockp(hinfo, hash); spin_lock(lock); sk_nulls_for_each(sk2, node, &head->chain) { @@ -339,11 +353,19 @@ static u64 inet6_sk_port_offset(const struct sock *sk) int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk) { + const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr; + const struct in6_addr *saddr = &sk->sk_v6_daddr; + const struct inet_sock *inet = inet_sk(sk); + const struct net *net = sock_net(sk); u64 port_offset = 0; + u32 hash_port0; if (!inet_sk(sk)->inet_num) port_offset = inet6_sk_port_offset(sk); - return __inet_hash_connect(death_row, sk, port_offset, + + hash_port0 = inet6_ehashfn(net, daddr, 0, saddr, inet->inet_dport); + + return __inet_hash_connect(death_row, sk, port_offset, hash_port0, __inet6_check_established); } EXPORT_SYMBOL_GPL(inet6_hash_connect); diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c index 2c383c12a4315..09065187378e6 100644 --- a/net/ipv6/ioam6_iptunnel.c +++ b/net/ipv6/ioam6_iptunnel.c @@ -337,7 +337,6 @@ static int ioam6_do_encap(struct net *net, struct sk_buff *skb, static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb), *cache_dst = NULL; - struct in6_addr orig_daddr; struct ioam6_lwt *ilwt; int err = -EINVAL; u32 pkt_cnt; @@ -352,8 +351,6 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) if (pkt_cnt % ilwt->freq.n >= ilwt->freq.k) goto out; - orig_daddr = ipv6_hdr(skb)->daddr; - local_bh_disable(); cache_dst = dst_cache_get(&ilwt->cache); local_bh_enable(); @@ -422,7 +419,10 @@ static int ioam6_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } - if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) { + /* avoid lwtunnel_output() reentry loop when destination is the same + * after transformation (e.g., with the inline mode) + */ + if (dst->lwtstate != cache_dst->lwtstate) { skb_dst_drop(skb); skb_dst_set(skb, cache_dst); return dst_output(net, sk, skb); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 235808cfec705..957ca98fa70f9 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -1498,7 +1499,6 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) tunnel = netdev_priv(dev); tunnel->dev = dev; - tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name); ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); @@ -1621,7 +1621,7 @@ static int __net_init ip6gre_init_net(struct net *net) /* FB netdevice is special: we have one, and only one per netns. * Allowing to move it to another netns is clearly unsafe. */ - ign->fb_tunnel_dev->netns_local = true; + ign->fb_tunnel_dev->netns_immutable = true; ip6gre_fb_tunnel_init(ign->fb_tunnel_dev); ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops; @@ -1882,7 +1882,6 @@ static int ip6erspan_tap_init(struct net_device *dev) tunnel = netdev_priv(dev); tunnel->dev = dev; - tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name); ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); @@ -1971,7 +1970,7 @@ static bool ip6gre_netlink_encap_parms(struct nlattr *data[], return ret; } -static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev, +static int ip6gre_newlink_common(struct net *link_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { @@ -1992,7 +1991,7 @@ static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev, eth_hw_addr_random(dev); nt->dev = dev; - nt->net = dev_net(dev); + nt->net = link_net; err = register_netdevice(dev); if (err) @@ -2005,12 +2004,14 @@ static int ip6gre_newlink_common(struct net *src_net, struct net_device *dev, return err; } -static int ip6gre_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int ip6gre_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *net = params->link_net ? : dev_net(dev); struct ip6_tnl *nt = netdev_priv(dev); - struct net *net = dev_net(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct ip6gre_net *ign; int err; @@ -2025,7 +2026,7 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, return -EEXIST; } - err = ip6gre_newlink_common(src_net, dev, tb, data, extack); + err = ip6gre_newlink_common(net, dev, tb, data, extack); if (!err) { ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); ip6gre_tunnel_link_md(ign, nt); @@ -2241,12 +2242,14 @@ static void ip6erspan_tap_setup(struct net_device *dev) netif_keep_dst(dev); } -static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int ip6erspan_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { + struct net *net = params->link_net ? : dev_net(dev); struct ip6_tnl *nt = netdev_priv(dev); - struct net *net = dev_net(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct ip6gre_net *ign; int err; @@ -2262,7 +2265,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, return -EEXIST; } - err = ip6gre_newlink_common(src_net, dev, tb, data, extack); + err = ip6gre_newlink_common(net, dev, tb, data, extack); if (!err) { ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]); ip6erspan_tunnel_link_md(ign, nt); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index d577bf2f30538..581bc62890818 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1386,6 +1386,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, } v6_cork->hop_limit = ipc6->hlimit; v6_cork->tclass = ipc6->tclass; + v6_cork->dontfrag = ipc6->dontfrag; if (rt->dst.flags & DST_XFRM_TUNNEL) mtu = READ_ONCE(np->pmtudisc) >= IPV6_PMTUDISC_PROBE ? READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst); @@ -1421,7 +1422,7 @@ static int __ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, size_t length, int transhdrlen, - unsigned int flags, struct ipcm6_cookie *ipc6) + unsigned int flags) { struct sk_buff *skb, *skb_prev = NULL; struct inet_cork *cork = &cork_full->base; @@ -1475,7 +1476,7 @@ static int __ip6_append_data(struct sock *sk, if (headersize + transhdrlen > mtu) goto emsgsize; - if (cork->length + length > mtu - headersize && ipc6->dontfrag && + if (cork->length + length > mtu - headersize && v6_cork->dontfrag && (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_ICMPV6 || sk->sk_protocol == IPPROTO_RAW)) { @@ -1855,7 +1856,7 @@ int ip6_append_data(struct sock *sk, return __ip6_append_data(sk, &sk->sk_write_queue, &inet->cork, &np->cork, sk_page_frag(sk), getfrag, - from, length, transhdrlen, flags, ipc6); + from, length, transhdrlen, flags); } EXPORT_SYMBOL_GPL(ip6_append_data); @@ -2054,13 +2055,11 @@ struct sk_buff *ip6_make_skb(struct sock *sk, ip6_cork_release(cork, &v6_cork); return ERR_PTR(err); } - if (ipc6->dontfrag < 0) - ipc6->dontfrag = inet6_test_bit(DONTFRAG, sk); err = __ip6_append_data(sk, &queue, cork, &v6_cork, ¤t->task_frag, getfrag, from, length + exthdrlen, transhdrlen + exthdrlen, - flags, ipc6); + flags); if (err) { __ip6_flush_pending_frames(sk, &queue, cork, &v6_cork); return ERR_PTR(err); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 48fd53b989726..a04dd1bb4b195 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -253,8 +254,7 @@ static void ip6_dev_free(struct net_device *dev) static int ip6_tnl_create2(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); - struct net *net = dev_net(dev); - struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); + struct ip6_tnl_net *ip6n = net_generic(t->net, ip6_tnl_net_id); int err; dev->rtnl_link_ops = &ip6_link_ops; @@ -1878,7 +1878,6 @@ ip6_tnl_dev_init_gen(struct net_device *dev) int t_hlen; t->dev = dev; - t->net = dev_net(dev); ret = dst_cache_init(&t->dst_cache, GFP_KERNEL); if (ret) @@ -1940,6 +1939,7 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); + t->net = net; t->parms.proto = IPPROTO_IPV6; rcu_assign_pointer(ip6n->tnls_wc[0], t); @@ -2002,17 +2002,22 @@ static void ip6_tnl_netlink_parms(struct nlattr *data[], parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]); } -static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int ip6_tnl_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { - struct net *net = dev_net(dev); - struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct ip_tunnel_encap ipencap; + struct ip6_tnl_net *ip6n; struct ip6_tnl *nt, *t; + struct net *net; int err; + net = params->link_net ? : dev_net(dev); + ip6n = net_generic(net, ip6_tnl_net_id); nt = netdev_priv(dev); + nt->net = net; if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { err = ip6_tnl_encap_setup(nt, &ipencap); @@ -2261,7 +2266,7 @@ static int __net_init ip6_tnl_init_net(struct net *net) /* FB netdevice is special: we have one, and only one per netns. * Allowing to move it to another netns is clearly unsafe. */ - ip6n->fb_tnl_dev->netns_local = true; + ip6n->fb_tnl_dev->netns_immutable = true; err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev); if (err < 0) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 590737c275379..09ec4b0ad7dc6 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #define IP6_VTI_HASH_SIZE_SHIFT 5 @@ -177,8 +178,7 @@ vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t) static int vti6_tnl_create2(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); - struct net *net = dev_net(dev); - struct vti6_net *ip6n = net_generic(net, vti6_net_id); + struct vti6_net *ip6n = net_generic(t->net, vti6_net_id); int err; dev->rtnl_link_ops = &vti6_link_ops; @@ -925,7 +925,6 @@ static inline int vti6_dev_init_gen(struct net_device *dev) struct ip6_tnl *t = netdev_priv(dev); t->dev = dev; - t->net = dev_net(dev); netdev_hold(dev, &t->dev_tracker, GFP_KERNEL); netdev_lockdep_set_classes(dev); return 0; @@ -958,6 +957,7 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) struct net *net = dev_net(dev); struct vti6_net *ip6n = net_generic(net, vti6_net_id); + t->net = net; t->parms.proto = IPPROTO_IPV6; rcu_assign_pointer(ip6n->tnls_wc[0], t); @@ -997,17 +997,20 @@ static void vti6_netlink_parms(struct nlattr *data[], parms->fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]); } -static int vti6_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int vti6_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { - struct net *net = dev_net(dev); + struct nlattr **data = params->data; struct ip6_tnl *nt; + struct net *net; + net = params->link_net ? : dev_net(dev); nt = netdev_priv(dev); vti6_netlink_parms(data, &nt->parms); nt->parms.proto = IPPROTO_IPV6; + nt->net = net; if (vti6_locate(net, &nt->parms, 0)) return -EEXIST; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 535e9f72514c0..e8ade93a0f0eb 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -668,7 +668,7 @@ static void reg_vif_setup(struct net_device *dev) dev->flags = IFF_NOARP; dev->netdev_ops = ®_vif_netdev_ops; dev->needs_free_netdev = true; - dev->netns_local = true; + dev->netns_immutable = true; } static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt) diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 8699d1a188dc4..ecb5c4b8518fd 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1680,7 +1680,7 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb, void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) { struct net_device *dev = skb->dev; - struct net *net = dev_net(dev); + struct net *net = dev_net_rcu(dev); struct sock *sk = net->ipv6.ndisc_sk; int optlen = 0; struct inet_peer *peer; @@ -1695,8 +1695,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) ops_data_buf[NDISC_OPS_REDIRECT_DATA_SPACE], *ops_data = NULL; bool ret; - if (netif_is_l3_master(skb->dev)) { - dev = dev_get_by_index_rcu(dev_net(skb->dev), IPCB(skb)->iif); + if (netif_is_l3_master(dev)) { + dev = dev_get_by_index_rcu(net, IPCB(skb)->iif); if (!dev) return; } @@ -1734,10 +1734,8 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) goto release; } - rcu_read_lock(); peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr); ret = inet_peer_xrlim_allow(peer, 1*HZ); - rcu_read_unlock(); if (!ret) goto release; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 4120e67a8ce6b..d6bd8f7079bb7 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -123,7 +123,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) #endif static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb, - struct sk_buff *prev_tail, struct net_device *dev); + struct sk_buff *prev_tail, struct net_device *dev, + int *refs); static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) { @@ -167,7 +168,8 @@ static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user, static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, - const struct frag_hdr *fhdr, int nhoff) + const struct frag_hdr *fhdr, int nhoff, + int *refs) { unsigned int payload_len; struct net_device *dev; @@ -221,7 +223,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, * this case. -DaveM */ pr_debug("end of fragment not rounded to 8 bytes.\n"); - inet_frag_kill(&fq->q); + inet_frag_kill(&fq->q, refs); return -EPROTO; } if (end > fq->q.len) { @@ -287,7 +289,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; - err = nf_ct_frag6_reasm(fq, skb, prev, dev); + err = nf_ct_frag6_reasm(fq, skb, prev, dev, refs); skb->_skb_refdst = orefdst; /* After queue has assumed skb ownership, only 0 or @@ -301,7 +303,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, return -EINPROGRESS; insert_error: - inet_frag_kill(&fq->q); + inet_frag_kill(&fq->q, refs); err: skb_dst_drop(skb); return -EINVAL; @@ -315,13 +317,14 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, * the last and the first frames arrived and all the bits are here. */ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb, - struct sk_buff *prev_tail, struct net_device *dev) + struct sk_buff *prev_tail, struct net_device *dev, + int *refs) { void *reasm_data; int payload_len; u8 ecn; - inet_frag_kill(&fq->q); + inet_frag_kill(&fq->q, refs); ecn = ip_frag_ecn_table[fq->ecn]; if (unlikely(ecn == 0xff)) @@ -372,7 +375,7 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb, return 0; err: - inet_frag_kill(&fq->q); + inet_frag_kill(&fq->q, refs); return -EINVAL; } @@ -447,6 +450,7 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) struct frag_hdr *fhdr; struct frag_queue *fq; struct ipv6hdr *hdr; + int refs = 0; u8 prevhdr; /* Jumbo payload inhibits frag. header */ @@ -473,23 +477,26 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) hdr = ipv6_hdr(skb); fhdr = (struct frag_hdr *)skb_transport_header(skb); + rcu_read_lock(); fq = fq_find(net, fhdr->identification, user, hdr, skb->dev ? skb->dev->ifindex : 0); if (fq == NULL) { + rcu_read_unlock(); pr_debug("Can't find and can't create new queue\n"); return -ENOMEM; } spin_lock_bh(&fq->q.lock); - ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff); + ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff, &refs); if (ret == -EPROTO) { skb->transport_header = savethdr; ret = 0; } spin_unlock_bh(&fq->q.lock); - inet_frag_put(&fq->q); + rcu_read_unlock(); + inet_frag_putn(&fq->q, refs); return ret; } EXPORT_SYMBOL_GPL(nf_ct_frag6_gather); diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c index a7690ec623259..9ea5ef56cb270 100644 --- a/net/ipv6/netfilter/nf_socket_ipv6.c +++ b/net/ipv6/netfilter/nf_socket_ipv6.c @@ -103,6 +103,10 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, struct sk_buff *data_skb = NULL; int doff = 0; int thoff = 0, tproto; +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + enum ip_conntrack_info ctinfo; + struct nf_conn const *ct; +#endif tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); if (tproto < 0) { @@ -136,6 +140,25 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, return NULL; } +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + /* Do the lookup with the original socket address in + * case this is a reply packet of an established + * SNAT-ted connection. + */ + ct = nf_ct_get(skb, &ctinfo); + if (ct && + ((tproto != IPPROTO_ICMPV6 && + ctinfo == IP_CT_ESTABLISHED_REPLY) || + (tproto == IPPROTO_ICMPV6 && + ctinfo == IP_CT_RELATED_REPLY)) && + (ct->status & IPS_SRC_NAT_DONE)) { + daddr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in6; + dport = (tproto == IPPROTO_TCP) ? + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port : + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; + } +#endif + return nf_socket_get_sock_v6(net, data_skb, doff, tproto, saddr, daddr, sport, dport, indev); } diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index c9f1634b3838a..7fd9d7b21cd42 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -170,6 +170,11 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, struct rt6_info *rt; int lookup_flags; + if (nft_fib_can_skip(pkt)) { + nft_fib_store_result(dest, priv, nft_in(pkt)); + return; + } + if (priv->flags & NFTA_FIB_F_IIF) oif = nft_in(pkt); else if (priv->flags & NFTA_FIB_F_OIF) @@ -181,17 +186,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, return; } - lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph); - - if (nft_hook(pkt) == NF_INET_PRE_ROUTING || - nft_hook(pkt) == NF_INET_INGRESS) { - if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) || - nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) { - nft_fib_store_result(dest, priv, nft_in(pkt)); - return; - } + if (nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) { + nft_fib_store_result(dest, priv, nft_in(pkt)); + return; } + lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph); + *dest = 0; rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, pkt->skb, lookup_flags); diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 46b8adf6e7f8e..84d90dd8b3f0f 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -119,9 +119,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) return -EINVAL; ipcm6_init_sk(&ipc6, sk); - ipc6.sockc.priority = READ_ONCE(sk->sk_priority); - ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags); - ipc6.sockc.mark = READ_ONCE(sk->sk_mark); fl6.flowi6_oif = oif; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index a45aba090aa41..fda640ebd53f8 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -769,19 +769,16 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) hdrincl = inet_test_bit(HDRINCL, sk); + ipcm6_init_sk(&ipc6, sk); + /* * Get and verify the address. */ memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_mark = READ_ONCE(sk->sk_mark); + fl6.flowi6_mark = ipc6.sockc.mark; fl6.flowi6_uid = sk->sk_uid; - ipcm6_init(&ipc6); - ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags); - ipc6.sockc.mark = fl6.flowi6_mark; - ipc6.sockc.priority = READ_ONCE(sk->sk_priority); - if (sin6) { if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; @@ -891,9 +888,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (hdrincl) fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH; - if (ipc6.tclass < 0) - ipc6.tclass = np->tclass; - fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); @@ -904,9 +898,6 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - if (ipc6.dontfrag < 0) - ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk); - if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index a48be617a8ab5..49740898bc137 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -68,7 +68,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) static struct inet_frags ip6_frags; static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, - struct sk_buff *prev_tail, struct net_device *dev); + struct sk_buff *prev_tail, struct net_device *dev, + int *refs); static void ip6_frag_expire(struct timer_list *t) { @@ -105,7 +106,7 @@ fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif) static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, struct frag_hdr *fhdr, int nhoff, - u32 *prob_offset) + u32 *prob_offset, int *refs) { struct net *net = dev_net(skb_dst(skb)->dev); int offset, end, fragsize; @@ -220,7 +221,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; - err = ip6_frag_reasm(fq, skb, prev_tail, dev); + err = ip6_frag_reasm(fq, skb, prev_tail, dev, refs); skb->_skb_refdst = orefdst; return err; } @@ -238,7 +239,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASM_OVERLAPS); discard_fq: - inet_frag_kill(&fq->q); + inet_frag_kill(&fq->q, refs); __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); err: @@ -254,7 +255,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, * the last and the first frames arrived and all the bits are here. */ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, - struct sk_buff *prev_tail, struct net_device *dev) + struct sk_buff *prev_tail, struct net_device *dev, + int *refs) { struct net *net = fq->q.fqdir->net; unsigned int nhoff; @@ -262,7 +264,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, int payload_len; u8 ecn; - inet_frag_kill(&fq->q); + inet_frag_kill(&fq->q, refs); ecn = ip_frag_ecn_table[fq->ecn]; if (unlikely(ecn == 0xff)) @@ -303,9 +305,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, skb_postpush_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); - rcu_read_lock(); __IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMOKS); - rcu_read_unlock(); fq->q.rb_fragments = RB_ROOT; fq->q.fragments_tail = NULL; fq->q.last_run_head = NULL; @@ -317,10 +317,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, out_oom: net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n"); out_fail: - rcu_read_lock(); __IP6_INC_STATS(net, __in6_dev_stats_get(dev, skb), IPSTATS_MIB_REASMFAILS); - rcu_read_unlock(); - inet_frag_kill(&fq->q); + inet_frag_kill(&fq->q, refs); return -1; } @@ -377,19 +375,21 @@ static int ipv6_frag_rcv(struct sk_buff *skb) } iif = skb->dev ? skb->dev->ifindex : 0; + rcu_read_lock(); fq = fq_find(net, fhdr->identification, hdr, iif); if (fq) { u32 prob_offset = 0; - int ret; + int ret, refs = 0; spin_lock(&fq->q.lock); fq->iif = iif; ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff, - &prob_offset); + &prob_offset, &refs); spin_unlock(&fq->q.lock); - inet_frag_put(&fq->q); + rcu_read_unlock(); + inet_frag_putn(&fq->q, refs); if (prob_offset) { __IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev), IPSTATS_MIB_INHDRERRORS); @@ -398,6 +398,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) } return ret; } + rcu_read_unlock(); __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); kfree_skb(skb); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index ef2d23a1e3d53..3ac67e1c846c3 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2193,6 +2193,12 @@ int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif, redo_rt6_select: rt6_select(net, fn, oif, res, strict); + if (ipv6_addr_loopback(&fl6->daddr)) { + struct fib6_info *rt = res->f6i; + + if (!rt || !(rt->fib6_flags & RTF_LOCAL)) + res->f6i = net->ipv6.fib6_null_entry; + } if (res->f6i == net->ipv6.fib6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) @@ -3644,7 +3650,8 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh, in6_dev_put(idev); if (err) { - lwtstate_put(fib6_nh->fib_nh_lws); + fib_nh_common_release(&fib6_nh->nh_common); + fib6_nh->nh_common.nhc_pcpu_rth_output = NULL; fib6_nh->fib_nh_lws = NULL; netdev_put(dev, dev_tracker); } @@ -3802,10 +3809,12 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, if (nh) { if (rt->fib6_src.plen) { NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); + err = -EINVAL; goto out_free; } if (!nexthop_get(nh)) { NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); + err = -ENOENT; goto out_free; } rt->nh = nh; @@ -5128,7 +5137,8 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); err = lwtunnel_valid_encap_type_attr(cfg->fc_mp, - cfg->fc_mp_len, extack); + cfg->fc_mp_len, + extack, true); if (err < 0) goto errout; } @@ -5147,7 +5157,8 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[RTA_ENCAP_TYPE]) { cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); - err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack); + err = lwtunnel_valid_encap_type(cfg->fc_encap_type, + extack, true); if (err < 0) goto errout; } diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index 0ac4283acdf20..7c05ac846646f 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -262,10 +262,18 @@ static int rpl_input(struct sk_buff *skb) { struct dst_entry *orig_dst = skb_dst(skb); struct dst_entry *dst = NULL; + struct lwtunnel_state *lwtst; struct rpl_lwt *rlwt; int err; - rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate); + /* We cannot dereference "orig_dst" once ip6_route_input() or + * skb_dst_drop() is called. However, in order to detect a dst loop, we + * need the address of its lwtstate. So, save the address of lwtstate + * now and use it later as a comparison. + */ + lwtst = orig_dst->lwtstate; + + rlwt = rpl_lwt_lwtunnel(lwtst); local_bh_disable(); dst = dst_cache_get(&rlwt->cache); @@ -280,7 +288,9 @@ static int rpl_input(struct sk_buff *skb) if (!dst) { ip6_route_input(skb); dst = skb_dst(skb); - if (!dst->error) { + + /* cache only if we don't create a dst reference loop */ + if (!dst->error && lwtst != dst->lwtstate) { local_bh_disable(); dst_cache_set_ip6(&rlwt->cache, dst, &ipv6_hdr(skb)->saddr); diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 33833b2064c07..51583461ae29b 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -472,10 +472,18 @@ static int seg6_input_core(struct net *net, struct sock *sk, { struct dst_entry *orig_dst = skb_dst(skb); struct dst_entry *dst = NULL; + struct lwtunnel_state *lwtst; struct seg6_lwt *slwt; int err; - slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); + /* We cannot dereference "orig_dst" once ip6_route_input() or + * skb_dst_drop() is called. However, in order to detect a dst loop, we + * need the address of its lwtstate. So, save the address of lwtstate + * now and use it later as a comparison. + */ + lwtst = orig_dst->lwtstate; + + slwt = seg6_lwt_lwtunnel(lwtst); local_bh_disable(); dst = dst_cache_get(&slwt->cache); @@ -490,7 +498,9 @@ static int seg6_input_core(struct net *net, struct sock *sk, if (!dst) { ip6_route_input(skb); dst = skb_dst(skb); - if (!dst->error) { + + /* cache only if we don't create a dst reference loop */ + if (!dst->error && lwtst != dst->lwtstate) { local_bh_disable(); dst_cache_set_ip6(&slwt->cache, dst, &ipv6_hdr(skb)->saddr); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 39bd8951bfca1..9a0f32acb7500 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -51,6 +51,7 @@ #include #include #include +#include #include /* @@ -201,8 +202,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn) static int ipip6_tunnel_create(struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); - struct net *net = dev_net(dev); - struct sit_net *sitn = net_generic(net, sit_net_id); + struct sit_net *sitn = net_generic(t->net, sit_net_id); int err; __dev_addr_set(dev, &t->parms.iph.saddr, 4); @@ -269,6 +269,7 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, nt = netdev_priv(dev); + nt->net = net; nt->parms = *parms; if (ipip6_tunnel_create(dev) < 0) goto failed_free; @@ -1449,7 +1450,6 @@ static int ipip6_tunnel_init(struct net_device *dev) int err; tunnel->dev = dev; - tunnel->net = dev_net(dev); strcpy(tunnel->parms.name, dev->name); ipip6_tunnel_bind_dev(dev); @@ -1550,19 +1550,23 @@ static bool ipip6_netlink_6rd_parms(struct nlattr *data[], } #endif -static int ipip6_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], +static int ipip6_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, struct netlink_ext_ack *extack) { - struct net *net = dev_net(dev); + struct nlattr **data = params->data; + struct nlattr **tb = params->tb; struct ip_tunnel *nt; struct ip_tunnel_encap ipencap; #ifdef CONFIG_IPV6_SIT_6RD struct ip_tunnel_6rd ip6rd; #endif + struct net *net; int err; + net = params->link_net ? : dev_net(dev); nt = netdev_priv(dev); + nt->net = net; if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { err = ip_tunnel_encap_setup(nt, &ipencap); @@ -1856,7 +1860,10 @@ static int __net_init sit_init_net(struct net *net) /* FB netdevice is special: we have one, and only one per netns. * Allowing to move it to another netns is clearly unsafe. */ - sitn->fb_tunnel_dev->netns_local = true; + sitn->fb_tunnel_dev->netns_immutable = true; + + t = netdev_priv(sitn->fb_tunnel_dev); + t->net = net; err = register_netdev(sitn->fb_tunnel_dev); if (err) @@ -1865,8 +1872,6 @@ static int __net_init sit_init_net(struct net *net) ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn); ipip6_fb_tunnel_init(sitn->fb_tunnel_dev); - t = netdev_priv(sitn->fb_tunnel_dev); - strcpy(t->parms.name, sitn->fb_tunnel_dev->name); return 0; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 2debdf085a3b4..b03c223eda4fa 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -376,7 +376,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); - struct net *net = dev_net(skb->dev); + struct net *net = dev_net_rcu(skb->dev); struct request_sock *fastopen; struct ipv6_pinfo *np; struct tcp_sock *tp; @@ -798,6 +798,8 @@ static void tcp_v6_init_req(struct request_sock *req, ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; + ireq->ir_rmt_addr = LOOPBACK4_IPV6; + ireq->ir_loc_addr = LOOPBACK4_IPV6; /* So that link locals have meaning */ if ((!sk_listener->sk_bound_dev_if || l3_slave) && @@ -864,16 +866,16 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 int oif, int rst, u8 tclass, __be32 label, u32 priority, u32 txhash, struct tcp_key *key) { - const struct tcphdr *th = tcp_hdr(skb); - struct tcphdr *t1; - struct sk_buff *buff; - struct flowi6 fl6; - struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); - struct sock *ctl_sk = net->ipv6.tcp_sk; + struct net *net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev); unsigned int tot_len = sizeof(struct tcphdr); + struct sock *ctl_sk = net->ipv6.tcp_sk; + const struct tcphdr *th = tcp_hdr(skb); __be32 mrst = 0, *topt; struct dst_entry *dst; - __u32 mark = 0; + struct sk_buff *buff; + struct tcphdr *t1; + struct flowi6 fl6; + u32 mark = 0; if (tsecr) tot_len += TCPOLEN_TSTAMP_ALIGNED; @@ -997,7 +999,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, - tclass & ~INET_ECN_MASK, priority); + tclass, priority); TCP_INC_STATS(net, TCP_MIB_OUTSEGS); if (rst) TCP_INC_STATS(net, TCP_MIB_OUTRSTS); @@ -1039,7 +1041,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb, if (!sk && !ipv6_unicast_destination(skb)) return; - net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); + net = sk ? sock_net(sk) : dev_net_rcu(skb_dst(skb)->dev); /* Invalid TCP option size or twice included auth */ if (tcp_parse_auth_options(th, &md5_hash_location, &aoh)) return; @@ -1133,7 +1135,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb, trace_tcp_send_reset(sk, skb, reason); tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1, - ipv6_get_dsfield(ipv6h), label, priority, txhash, + ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK, + label, priority, txhash, &key); #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) @@ -1153,11 +1156,16 @@ static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, tclass, label, priority, txhash, key); } -static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) +static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb, + enum tcp_tw_status tw_status) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); + u8 tclass = tw->tw_tclass; struct tcp_key key = {}; + + if (tw_status == TCP_TW_ACK_OOW) + tclass &= ~INET_ECN_MASK; #ifdef CONFIG_TCP_AO struct tcp_ao_info *ao_info; @@ -1201,7 +1209,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_tw_tsval(tcptw), READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if, - &key, tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), + &key, tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority, tw->tw_txhash); #ifdef CONFIG_TCP_AO @@ -1277,8 +1285,9 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, tcp_rsk(req)->rcv_nxt, tcp_synack_window(req) >> inet_rsk(req)->rcv_wscale, tcp_rsk_tsval(tcp_rsk(req)), - READ_ONCE(req->ts_recent), sk->sk_bound_dev_if, - &key, ipv6_get_dsfield(ipv6_hdr(skb)), 0, + req->ts_recent, sk->sk_bound_dev_if, + &key, ipv6_get_dsfield(ipv6_hdr(skb)) & ~INET_ECN_MASK, + 0, READ_ONCE(sk->sk_priority), READ_ONCE(tcp_rsk(req)->txhash)); if (tcp_key_is_ao(&key)) @@ -1451,10 +1460,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * ip6_dst_store(newsk, dst, NULL, NULL); - newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; newnp->saddr = ireq->ir_v6_loc_addr; - newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; - newsk->sk_bound_dev_if = ireq->ir_iif; /* Now IPv6 options... @@ -1507,9 +1513,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * tcp_initialize_rcv_mss(newsk); - newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; - newinet->inet_rcv_saddr = LOOPBACK4_IPV6; - #ifdef CONFIG_TCP_MD5SIG l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif); @@ -1735,7 +1738,7 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); - TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); + TCP_SKB_CB(skb)->tcp_flags = tcp_flags_ntohs(th); TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); TCP_SKB_CB(skb)->sacked = 0; TCP_SKB_CB(skb)->has_rxtstamp = @@ -1744,7 +1747,9 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) { + struct net *net = dev_net_rcu(skb->dev); enum skb_drop_reason drop_reason; + enum tcp_tw_status tw_status; int sdif = inet6_sdif(skb); int dif = inet6_iif(skb); const struct tcphdr *th; @@ -1753,7 +1758,6 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) bool refcounted; int ret; u32 isn; - struct net *net = dev_net(skb->dev); drop_reason = SKB_DROP_REASON_NOT_SPECIFIED; if (skb->pkt_type != PACKET_HOST) @@ -1832,7 +1836,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); tcp_v6_fill_cb(skb, hdr, th); - nsk = tcp_check_req(sk, skb, req, false, &req_stolen); + nsk = tcp_check_req(sk, skb, req, false, &req_stolen, + &drop_reason); } else { drop_reason = SKB_DROP_REASON_SOCKET_FILTER; } @@ -1965,7 +1970,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) goto csum_error; } - switch (tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn)) { + tw_status = tcp_timewait_state_process(inet_twsk(sk), skb, th, &isn); + switch (tw_status) { case TCP_TW_SYN: { struct sock *sk2; @@ -1990,7 +1996,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) /* to ACK */ fallthrough; case TCP_TW_ACK: - tcp_v6_timewait_ack(sk, skb); + case TCP_TW_ACK_OOW: + tcp_v6_timewait_ack(sk, skb, tw_status); break; case TCP_TW_RST: tcp_v6_send_reset(sk, skb, SK_RST_REASON_TCP_TIMEWAIT_SOCKET); @@ -2004,7 +2011,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) void tcp_v6_early_demux(struct sk_buff *skb) { - struct net *net = dev_net(skb->dev); + struct net *net = dev_net_rcu(skb->dev); const struct ipv6hdr *hdr; const struct tcphdr *th; struct sock *sk; @@ -2061,8 +2068,6 @@ const struct inet_connection_sock_af_ops ipv6_specific = { .net_header_len = sizeof(struct ipv6hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, - .addr2sockaddr = inet6_csk_addr2sockaddr, - .sockaddr_len = sizeof(struct sockaddr_in6), .mtu_reduced = tcp_v6_mtu_reduced, }; @@ -2095,8 +2100,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { .net_header_len = sizeof(struct iphdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, - .addr2sockaddr = inet6_csk_addr2sockaddr, - .sockaddr_len = sizeof(struct sockaddr_in6), .mtu_reduced = tcp_v4_mtu_reduced, }; @@ -2192,10 +2195,10 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk_timeout(icsk); } else if (icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk_timeout(icsk); } else if (timer_pending(&sp->sk_timer)) { timer_active = 2; timer_expires = sp->sk_timer.expires; diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index a45bf17cb2a17..d9b11fe41bf0c 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -35,7 +35,7 @@ static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, inet6_get_iif_sdif(skb, &iif, &sdif); hdr = skb_gro_network_header(skb); - net = dev_net(skb->dev); + net = dev_net_rcu(skb->dev); sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &hdr->saddr, th->source, &hdr->daddr, ntohs(th->dest), @@ -94,14 +94,23 @@ INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff) } static void __tcpv6_gso_segment_csum(struct sk_buff *seg, + struct in6_addr *oldip, + const struct in6_addr *newip, __be16 *oldport, __be16 newport) { - struct tcphdr *th; + struct tcphdr *th = tcp_hdr(seg); + + if (!ipv6_addr_equal(oldip, newip)) { + inet_proto_csum_replace16(&th->check, seg, + oldip->s6_addr32, + newip->s6_addr32, + true); + *oldip = *newip; + } if (*oldport == newport) return; - th = tcp_hdr(seg); inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false); *oldport = newport; } @@ -129,10 +138,10 @@ static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs) th2 = tcp_hdr(seg); iph2 = ipv6_hdr(seg); - iph2->saddr = iph->saddr; - iph2->daddr = iph->daddr; - __tcpv6_gso_segment_csum(seg, &th2->source, th->source); - __tcpv6_gso_segment_csum(seg, &th2->dest, th->dest); + __tcpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr, + &th2->source, th->source); + __tcpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr, + &th2->dest, th->dest); } return segs; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c6ea438b5c758..02a8ec4fa5d35 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -586,7 +587,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS); SNMP_INC_STATS(mib, UDP_MIB_INERRORS); } - kfree_skb(skb); + kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM); /* starting over for a new packet, but check if we need to yield */ cond_resched(); @@ -1494,11 +1495,8 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int is_udplite = IS_UDPLITE(sk); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); - ipcm6_init(&ipc6); + ipcm6_init_sk(&ipc6, sk); ipc6.gso_size = READ_ONCE(up->gso_size); - ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags); - ipc6.sockc.mark = READ_ONCE(sk->sk_mark); - ipc6.sockc.priority = READ_ONCE(sk->sk_priority); /* destination address check */ if (sin6) { @@ -1704,9 +1702,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6)); - if (ipc6.tclass < 0) - ipc6.tclass = np->tclass; - fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel); dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected); @@ -1752,8 +1747,6 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) WRITE_ONCE(up->pending, AF_INET6); do_append_data: - if (ipc6.dontfrag < 0) - ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk); up->len += ulen; err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), &ipc6, fl6, dst_rt6_info(dst), @@ -1833,6 +1826,7 @@ void udpv6_destroy_sock(struct sock *sk) if (udp_test_bit(ENCAP_ENABLED, sk)) { static_branch_dec(&udpv6_encap_needed_key); udp_encap_disable(); + udp_tunnel_cleanup_gro(sk); } } } @@ -1941,6 +1935,7 @@ struct proto udpv6_prot = { .h.udp_table = NULL, .diag_destroy = udp_abort, }; +EXPORT_SYMBOL_GPL(udpv6_prot); static struct inet_protosw udpv6_protosw = { .type = SOCK_DGRAM, diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index b41152dd42469..d8445ac1b2e43 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -117,9 +117,14 @@ static struct sock *udp6_gro_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport) { const struct ipv6hdr *iph = skb_gro_network_header(skb); - struct net *net = dev_net(skb->dev); + struct net *net = dev_net_rcu(skb->dev); + struct sock *sk; int iif, sdif; + sk = udp_tunnel_sk(net, true); + if (sk && dport == htons(sk->sk_num)) + return sk; + inet6_get_iif_sdif(skb, &iif, &sdif); return __udp6_lib_lookup(net, &iph->saddr, sport, diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index e836910734967..cf0b66f4fb29b 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index f4c1da0708269..b98d13584c81f 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -547,7 +547,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl6.flowi6_mark = READ_ONCE(sk->sk_mark); fl6.flowi6_uid = sk->sk_uid; - ipcm6_init(&ipc6); + ipcm6_init_sk(&ipc6, sk); if (lsa) { if (addr_len < SIN6_LEN_RFC2133) @@ -634,9 +634,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); - if (ipc6.tclass < 0) - ipc6.tclass = np->tclass; - fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); @@ -648,9 +645,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); - if (ipc6.dontfrag < 0) - ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk); - if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 53baf2dd5d5da..fc5c2fd8f34c7 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -806,6 +806,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.private = sk; po->chan.ops = &pppol2tp_chan_ops; po->chan.mtu = pppol2tp_tunnel_mtu(tunnel); + po->chan.direct_xmit = true; error = ppp_register_net_channel(sock_net(sk), &po->chan); if (error) { diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c index 06fb8e6944b06..7a0cae9a81114 100644 --- a/net/llc/llc_s_ac.c +++ b/net/llc/llc_s_ac.c @@ -24,7 +24,7 @@ #include #include #include - +#include /** * llc_sap_action_unitdata_ind - forward UI PDU to network layer @@ -40,6 +40,26 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb) return 0; } +static int llc_prepare_and_xmit(struct sk_buff *skb) +{ + struct llc_sap_state_ev *ev = llc_sap_ev(skb); + struct sk_buff *nskb; + int rc; + + rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); + if (rc) + return rc; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + + if (skb->sk) + skb_set_owner_w(nskb, skb->sk); + + return dev_queue_xmit(nskb); +} + /** * llc_sap_action_send_ui - sends UI PDU resp to UNITDATA REQ to MAC layer * @sap: SAP @@ -52,17 +72,12 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb) int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); - int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(skb); - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) { - skb_get(skb); - rc = dev_queue_xmit(skb); - } - return rc; + + return llc_prepare_and_xmit(skb); } /** @@ -77,17 +92,12 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); - int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) { - skb_get(skb); - rc = dev_queue_xmit(skb); - } - return rc; + + return llc_prepare_and_xmit(skb); } /** @@ -133,17 +143,12 @@ int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb) int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); - int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_test_cmd(skb); - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) { - skb_get(skb); - rc = dev_queue_xmit(skb); - } - return rc; + + return llc_prepare_and_xmit(skb); } int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb) diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index f3fbe5a4395e5..aeb99d102c6e9 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ /** @@ -206,17 +206,19 @@ u8 ieee80211_retrieve_addba_ext_data(struct sta_info *sta, elems = ieee802_11_parse_elems(elem_data, elem_len, true, NULL); - if (elems && !elems->parse_error && elems->addba_ext_ie) { - data = elems->addba_ext_ie->data; + if (!elems || elems->parse_error || !elems->addba_ext_ie) + goto free; - if (!sta->sta.deflink.eht_cap.has_eht || !buf_size) - goto free; + data = elems->addba_ext_ie->data; + if (buf_size && + (sta->sta.valid_links || sta->sta.deflink.eht_cap.has_eht)) { buf_size_1k = u8_get_bits(elems->addba_ext_ie->data, IEEE80211_ADDBA_EXT_BUF_SIZE_MASK); *buf_size |= (u16)buf_size_1k << IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT; } + free: kfree(elems); @@ -258,7 +260,7 @@ static void ieee80211_send_addba_resp(struct sta_info *sta, u8 *da, u16 tid, mgmt->u.action.u.addba_resp.timeout = cpu_to_le16(timeout); mgmt->u.action.u.addba_resp.status = cpu_to_le16(status); - if (sta->sta.deflink.he_cap.has_he) + if (sta->sta.valid_links || sta->sta.deflink.he_cap.has_he) ieee80211_add_addbaext(skb, req_addba_ext_data, buf_size); ieee80211_tx_skb(sdata, skb); @@ -293,7 +295,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, goto end; } - if (!sta->sta.deflink.ht_cap.ht_supported && + if (!sta->sta.valid_links && + !sta->sta.deflink.ht_cap.ht_supported && !sta->sta.deflink.he_cap.has_he) { ht_dbg(sta->sdata, "STA %pM erroneously requests BA session on tid %d w/o HT\n", @@ -309,7 +312,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, goto end; } - if (sta->sta.deflink.eht_cap.has_eht) + if (sta->sta.valid_links || sta->sta.deflink.eht_cap.has_eht) max_buf_size = IEEE80211_MAX_AMPDU_BUF_EHT; else if (sta->sta.deflink.he_cap.has_he) max_buf_size = IEEE80211_MAX_AMPDU_BUF_HE; @@ -321,7 +324,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta, * and if buffer size does not exceeds max value */ /* XXX: check own ht delayed BA capability?? */ if (((ba_policy != 1) && - (!(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || + (sta->sta.valid_links || + !(sta->sta.deflink.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) || (buf_size > max_buf_size)) { status = WLAN_STATUS_INVALID_QOS_PARAM; ht_dbg_ratelimited(sta->sdata, diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 61f2cac37728c..63a5e48291ac3 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu * Copyright 2007-2010, Intel Corporation * Copyright(c) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2023 Intel Corporation + * Copyright (C) 2018 - 2024 Intel Corporation */ #include @@ -464,7 +464,9 @@ static void ieee80211_send_addba_with_timeout(struct sta_info *sta, sta->ampdu_mlme.addba_req_num[tid]++; spin_unlock_bh(&sta->lock); - if (sta->sta.deflink.eht_cap.has_eht) { + if (sta->sta.valid_links || + sta->sta.deflink.eht_cap.has_eht || + ieee80211_hw_check(&local->hw, STRICT)) { buf_size = local->hw.max_tx_aggregation_subframes; } else if (sta->sta.deflink.he_cap.has_he) { buf_size = min_t(u16, local->hw.max_tx_aggregation_subframes, @@ -608,7 +610,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, "Requested to start BA session on reserved tid=%d", tid)) return -EINVAL; - if (!pubsta->deflink.ht_cap.ht_supported && + if (!pubsta->valid_links && + !pubsta->deflink.ht_cap.ht_supported && !pubsta->deflink.vht_cap.vht_supported && !pubsta->deflink.he_cap.has_he && !pubsta->deflink.eht_cap.has_eht) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 9351c64608a99..9f683f838431d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -5,7 +5,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include @@ -89,15 +89,14 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata, /* check flags first */ if (params->flags && ieee80211_sdata_running(sdata)) { - u32 mask = MONITOR_FLAG_COOK_FRAMES | MONITOR_FLAG_ACTIVE; + u32 mask = MONITOR_FLAG_ACTIVE; /* - * Prohibit MONITOR_FLAG_COOK_FRAMES and - * MONITOR_FLAG_ACTIVE to be changed while the - * interface is up. + * Prohibit MONITOR_FLAG_ACTIVE to be changed + * while the interface is up. * Else we would need to add a lot of cruft * to update everything: - * cooked_mntrs, monitor and all fif_* counters + * monitor and all fif_* counters * reconfigure hardware */ if ((params->flags & mask) != (sdata->u.mntr.flags & mask)) @@ -920,7 +919,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy, sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (!ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) { if (cfg80211_chandef_identical(&local->monitor_chanreq.oper, - &chanreq.oper)) + &chanreq.oper)) return 0; sdata = wiphy_dereference(wiphy, local->monitor_sdata); @@ -929,7 +928,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy, } if (rcu_access_pointer(sdata->deflink.conf->chanctx_conf) && - cfg80211_chandef_identical(&sdata->vif.bss_conf.chanreq.oper, + cfg80211_chandef_identical(&sdata->vif.bss_conf.chanreq.oper, &chanreq.oper)) return 0; @@ -1908,12 +1907,12 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, } if (params->supported_rates && - params->supported_rates_len) { - ieee80211_parse_bitrates(link->conf->chanreq.oper.width, - sband, params->supported_rates, - params->supported_rates_len, - &link_sta->pub->supp_rates[sband->band]); - } + params->supported_rates_len && + !ieee80211_parse_bitrates(link->conf->chanreq.oper.width, + sband, params->supported_rates, + params->supported_rates_len, + &link_sta->pub->supp_rates[sband->band])) + return -EINVAL; if (params->ht_capa) ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, @@ -4371,9 +4370,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy, if (chanctx_conf) { *chandef = link->conf->chanreq.oper; ret = 0; - } else if (!ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) && - local->open_count > 0 && - local->open_count == local->monitors && + } else if (local->open_count > 0 && + local->open_count == local->virt_monitors && sdata->vif.type == NL80211_IFTYPE_MONITOR) { *chandef = local->monitor_chanreq.oper; ret = 0; @@ -5187,14 +5185,21 @@ ieee80211_set_ttlm(struct wiphy *wiphy, struct net_device *dev, static int ieee80211_assoc_ml_reconf(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_assoc_link *add_links, - u16 rem_links) + struct cfg80211_ml_reconf_req *req) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); lockdep_assert_wiphy(sdata->local->hw.wiphy); - return ieee80211_mgd_assoc_ml_reconf(sdata, add_links, rem_links); + return ieee80211_mgd_assoc_ml_reconf(sdata, req); +} + +static int +ieee80211_set_epcs(struct wiphy *wiphy, struct net_device *dev, bool enable) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return ieee80211_mgd_set_epcs(sdata, enable); } const struct cfg80211_ops mac80211_config_ops = { @@ -5312,4 +5317,5 @@ const struct cfg80211_ops mac80211_config_ops = { .set_ttlm = ieee80211_set_ttlm, .get_radio_mask = ieee80211_get_radio_mask, .assoc_ml_reconf = ieee80211_assoc_ml_reconf, + .set_epcs = ieee80211_set_epcs, }; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index dc28f2b0957ae..c3bfac58151f6 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * mac80211 - channel management - * Copyright 2020 - 2024 Intel Corporation + * Copyright 2020 - 2025 Intel Corporation */ #include @@ -2178,3 +2178,21 @@ void ieee80211_iter_chan_contexts_atomic( rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_atomic); + +void ieee80211_iter_chan_contexts_mtx( + struct ieee80211_hw *hw, + void (*iter)(struct ieee80211_hw *hw, + struct ieee80211_chanctx_conf *chanctx_conf, + void *data), + void *iter_data) +{ + struct ieee80211_local *local = hw_to_local(hw); + struct ieee80211_chanctx *ctx; + + lockdep_assert_wiphy(hw->wiphy); + + list_for_each_entry(ctx, &local->chanctx_list, list) + if (ctx->driver_present) + iter(hw, &ctx->conf, iter_data); +} +EXPORT_SYMBOL_GPL(ieee80211_iter_chan_contexts_mtx); diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index bf0a2902d93c6..69e03630f64c9 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -492,6 +492,7 @@ static const char *hw_flag_names[] = { FLAG(DISALLOW_PUNCTURING), FLAG(DISALLOW_PUNCTURING_5GHZ), FLAG(HANDLES_QUIET_CSA), + FLAG(STRICT), #undef FLAG }; @@ -524,6 +525,46 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf, return rv; } +static ssize_t hwflags_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_local *local = file->private_data; + char buf[100]; + int val; + + if (count >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + if (count && buf[count - 1] == '\n') + buf[count - 1] = '\0'; + else + buf[count] = '\0'; + + if (sscanf(buf, "strict=%d", &val) == 1) { + switch (val) { + case 0: + ieee80211_hw_set(&local->hw, STRICT); + return count; + case 1: + __clear_bit(IEEE80211_HW_STRICT, local->hw.flags); + return count; + default: + return -EINVAL; + } + } + + return -EINVAL; +} + +static const struct file_operations hwflags_ops = { + .open = simple_open, + .read = hwflags_read, + .write = hwflags_write, +}; + static ssize_t misc_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -574,7 +615,6 @@ static ssize_t queues_read(struct file *file, char __user *user_buf, return simple_read_from_buffer(user_buf, count, ppos, buf, res); } -DEBUGFS_READONLY_FILE_OPS(hwflags); DEBUGFS_READONLY_FILE_OPS(queues); DEBUGFS_READONLY_FILE_OPS(misc); @@ -651,7 +691,7 @@ void debugfs_hw_add(struct ieee80211_local *local) #ifdef CONFIG_PM DEBUGFS_ADD_MODE(reset, 0200); #endif - DEBUGFS_ADD(hwflags); + DEBUGFS_ADD_MODE(hwflags, 0600); DEBUGFS_ADD(user_power); DEBUGFS_ADD(power); DEBUGFS_ADD(hw_conf); diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index a67a9d3160086..a8948f4d983e5 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c @@ -457,11 +457,12 @@ static ssize_t link_sta_addr_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct link_sta_info *link_sta = file->private_data; - u8 mac[3 * ETH_ALEN + 1]; + u8 mac[MAC_ADDR_STR_LEN + 2]; snprintf(mac, sizeof(mac), "%pM\n", link_sta->pub->addr); - return simple_read_from_buffer(userbuf, count, ppos, mac, 3 * ETH_ALEN); + return simple_read_from_buffer(userbuf, count, ppos, mac, + MAC_ADDR_STR_LEN + 1); } LINK_STA_OPS(addr); @@ -1240,7 +1241,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) struct ieee80211_local *local = sta->local; struct ieee80211_sub_if_data *sdata = sta->sdata; struct dentry *stations_dir = sta->sdata->debugfs.subdir_stations; - u8 mac[3*ETH_ALEN]; + u8 mac[MAC_ADDR_STR_LEN + 1]; if (!stations_dir) return; diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index 299d38e9e8630..35349a7f16cb4 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -116,8 +116,14 @@ void drv_remove_interface(struct ieee80211_local *local, sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; - /* Remove driver debugfs entries */ - ieee80211_debugfs_recreate_netdev(sdata, sdata->vif.valid_links); + /* + * Remove driver debugfs entries. + * The virtual monitor interface doesn't get a debugfs + * entry, so it's exempt here. + */ + if (sdata != rcu_access_pointer(local->monitor_sdata)) + ieee80211_debugfs_recreate_netdev(sdata, + sdata->vif.valid_links); trace_drv_remove_interface(local, sdata); local->ops->remove_interface(&local->hw, &sdata->vif); diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 5acecc7bd4a99..307587c8a0037 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -2,7 +2,7 @@ /* * Portions of this file * Copyright(c) 2016 Intel Deutschland GmbH -* Copyright (C) 2018-2019, 2021-2024 Intel Corporation +* Copyright (C) 2018-2019, 2021-2025 Intel Corporation */ #ifndef __MAC80211_DRIVER_OPS @@ -955,6 +955,7 @@ static inline void drv_mgd_complete_tx(struct ieee80211_local *local, return; WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); + info->link_id = info->link_id < 0 ? 0 : info->link_id; trace_drv_mgd_complete_tx(local, sdata, info->duration, info->subtype, info->success); if (local->ops->mgd_complete_tx) diff --git a/net/mac80211/drop.h b/net/mac80211/drop.h index 59e3ec4dc9607..eb9ab310f91ca 100644 --- a/net/mac80211/drop.h +++ b/net/mac80211/drop.h @@ -11,12 +11,6 @@ typedef unsigned int __bitwise ieee80211_rx_result; -#define MAC80211_DROP_REASONS_MONITOR(R) \ - R(RX_DROP_M_UNEXPECTED_4ADDR_FRAME) \ - R(RX_DROP_M_BAD_BCN_KEYIDX) \ - R(RX_DROP_M_BAD_MGMT_KEYIDX) \ -/* this line for the trailing \ - add before this */ - #define MAC80211_DROP_REASONS_UNUSABLE(R) \ /* 0x00 == ___RX_DROP_UNUSABLE */ \ R(RX_DROP_U_MIC_FAIL) \ @@ -66,6 +60,10 @@ typedef unsigned int __bitwise ieee80211_rx_result; R(RX_DROP_U_UNEXPECTED_STA_4ADDR) \ R(RX_DROP_U_UNEXPECTED_VLAN_MCAST) \ R(RX_DROP_U_NOT_PORT_CONTROL) \ + R(RX_DROP_U_UNEXPECTED_4ADDR_FRAME) \ + R(RX_DROP_U_BAD_BCN_KEYIDX) \ + /* 0x30 */ \ + R(RX_DROP_U_BAD_MGMT_KEYIDX) \ R(RX_DROP_U_UNKNOWN_ACTION_REJECTED) \ /* this line for the trailing \ - add before this */ @@ -78,10 +76,6 @@ enum ___mac80211_drop_reason { ___RX_QUEUED = SKB_NOT_DROPPED_YET, #define ENUM(x) ___ ## x, - ___RX_DROP_MONITOR = SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR << - SKB_DROP_REASON_SUBSYS_SHIFT, - MAC80211_DROP_REASONS_MONITOR(ENUM) - ___RX_DROP_UNUSABLE = SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE << SKB_DROP_REASON_SUBSYS_SHIFT, MAC80211_DROP_REASONS_UNUSABLE(ENUM) @@ -89,11 +83,10 @@ enum ___mac80211_drop_reason { }; enum mac80211_drop_reason { - RX_CONTINUE = (__force ieee80211_rx_result)___RX_CONTINUE, - RX_QUEUED = (__force ieee80211_rx_result)___RX_QUEUED, - RX_DROP_MONITOR = (__force ieee80211_rx_result)___RX_DROP_MONITOR, + RX_CONTINUE = (__force ieee80211_rx_result)___RX_CONTINUE, + RX_QUEUED = (__force ieee80211_rx_result)___RX_QUEUED, + RX_DROP = (__force ieee80211_rx_result)___RX_DROP_UNUSABLE, #define DEF(x) x = (__force ieee80211_rx_result)___ ## x, - MAC80211_DROP_REASONS_MONITOR(DEF) MAC80211_DROP_REASONS_UNUSABLE(DEF) #undef DEF }; diff --git a/net/mac80211/eht.c b/net/mac80211/eht.c index 7a3116c36df9f..fd41046e3b681 100644 --- a/net/mac80211/eht.c +++ b/net/mac80211/eht.c @@ -2,7 +2,7 @@ /* * EHT handling * - * Copyright(c) 2021-2024 Intel Corporation + * Copyright(c) 2021-2025 Intel Corporation */ #include "ieee80211_i.h" @@ -76,6 +76,13 @@ ieee80211_eht_cap_ie_to_sta_eht_cap(struct ieee80211_sub_if_data *sdata, link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta); link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta); + /* + * The MPDU length bits are reserved on all but 2.4 GHz and get set via + * VHT (5 GHz) or HE (6 GHz) capabilities. + */ + if (sband->band != NL80211_BAND_2GHZ) + return; + switch (u8_get_bits(eht_cap->eht_cap_elem.mac_cap_info[0], IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK)) { case IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454: diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c index 42f7ee142ce3f..0397755a3bd1c 100644 --- a/net/mac80211/ethtool.c +++ b/net/mac80211/ethtool.c @@ -158,7 +158,7 @@ static void ieee80211_get_stats(struct net_device *dev, if (chanctx_conf) channel = chanctx_conf->def.chan; else if (local->open_count > 0 && - local->open_count == local->monitors && + local->open_count == local->virt_monitors && sdata->vif.type == NL80211_IFTYPE_MONITOR) channel = local->monitor_chanreq.oper.chan; else diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index e7dc3f0cfc9a9..fb05f3cd37ec4 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -5,7 +5,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007-2010 Johannes Berg * Copyright 2013-2015 Intel Mobile Communications GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #ifndef IEEE80211_I_H @@ -200,7 +200,6 @@ enum ieee80211_packet_rx_flags { /** * enum ieee80211_rx_flags - RX data flags * - * @IEEE80211_RX_CMNTR: received on cooked monitor already * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported * to cfg80211_report_obss_beacon(). * @@ -208,8 +207,7 @@ enum ieee80211_packet_rx_flags { * for a single frame. */ enum ieee80211_rx_flags { - IEEE80211_RX_CMNTR = BIT(0), - IEEE80211_RX_BEACON_REPORTED = BIT(1), + IEEE80211_RX_BEACON_REPORTED = BIT(0), }; struct ieee80211_rx_data { @@ -446,8 +444,6 @@ struct ieee80211_mgd_assoc_data { const u8 *supp_rates; u8 supp_rates_len; - unsigned long userspace_selectors[BITS_TO_LONGS(128)]; - unsigned long timeout; int tries; @@ -462,7 +458,9 @@ struct ieee80211_mgd_assoc_data { bool s1g; bool spp_amsdu; - unsigned int assoc_link_id; + s8 assoc_link_id; + + __le16 ext_mld_capa_ops; u8 fils_nonces[2 * FILS_NONCE_LEN]; u8 fils_kek[FILS_MAX_KEK_LEN]; @@ -524,6 +522,8 @@ struct ieee80211_if_managed { struct ieee80211_mgd_auth_data *auth_data; struct ieee80211_mgd_assoc_data *assoc_data; + unsigned long userspace_selectors[BITS_TO_LONGS(128)]; + bool powersave; /* powersave requested for this iface */ bool broken_ap; /* AP is broken -- turn off powersave */ @@ -615,6 +615,12 @@ struct ieee80211_if_managed { u16 added_links; u8 dialog_token; } reconf; + + /* Support for epcs */ + struct { + bool enabled; + u8 dialog_token; + } epcs; }; struct ieee80211_if_ibss { @@ -1380,7 +1386,7 @@ struct ieee80211_local { spinlock_t queue_stop_reason_lock; int open_count; - int monitors, cooked_mntrs, tx_mntrs; + int monitors, virt_monitors, tx_mntrs; /* number of interfaces with corresponding FIF_ flags */ int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll, fif_probe_req; @@ -1492,7 +1498,7 @@ struct ieee80211_local { /* see iface.c */ struct list_head interfaces; - struct list_head mon_list; /* only that are IFF_UP && !cooked */ + struct list_head mon_list; /* only that are IFF_UP */ struct mutex iflist_mtx; /* Scanning and BSS list */ @@ -2090,8 +2096,7 @@ struct sk_buff * ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 info_flags); void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, - int retry_count, bool send_to_cooked, - struct ieee80211_tx_status *status); + int retry_count, struct ieee80211_tx_status *status); void ieee80211_check_fast_xmit(struct sta_info *sta); void ieee80211_check_fast_xmit_all(struct ieee80211_local *local); @@ -2774,14 +2779,19 @@ void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len); int ieee80211_req_neg_ttlm(struct ieee80211_sub_if_data *sdata, struct cfg80211_ttlm_params *params); +void ieee80211_process_ttlm_teardown(struct ieee80211_sub_if_data *sdata); void ieee80211_check_wbrf_support(struct ieee80211_local *local); void ieee80211_add_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef); void ieee80211_remove_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef); +int ieee80211_mgd_set_epcs(struct ieee80211_sub_if_data *sdata, bool enable); +void ieee80211_process_epcs_ena_resp(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len); +void ieee80211_process_epcs_teardown(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len); int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, - struct cfg80211_assoc_link *add_links, - u16 rem_links); + struct cfg80211_ml_reconf_req *req); void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len); @@ -2795,6 +2805,13 @@ int ieee80211_calc_chandef_subchan_offset(const struct cfg80211_chan_def *ap, void ieee80211_rearrange_tpe_psd(struct ieee80211_parsed_tpe_psd *psd, const struct cfg80211_chan_def *ap, const struct cfg80211_chan_def *used); +struct ieee802_11_elems * +ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata, + struct ieee80211_conn_settings *conn, + struct cfg80211_bss *cbss, int link_id, + struct ieee80211_chan_req *chanreq, + struct cfg80211_chan_def *ap_chandef, + unsigned long *userspace_selectors); #else #define EXPORT_SYMBOL_IF_MAC80211_KUNIT(sym) #define VISIBLE_IF_MAC80211_KUNIT static diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 0ea7e77860b73..b0423046028c5 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -8,7 +8,7 @@ * Copyright 2008, Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (c) 2016 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include #include @@ -483,8 +483,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do ieee80211_ibss_stop(sdata); break; case NL80211_IFTYPE_MONITOR: - if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) - break; list_del_rcu(&sdata->u.mntr.list); break; default: @@ -584,18 +582,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do /* no need to tell driver */ break; case NL80211_IFTYPE_MONITOR: - if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) { - local->cooked_mntrs--; - break; - } - local->monitors--; - if (local->monitors == 0) { - local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; - hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; - } - ieee80211_adjust_monitor_flags(sdata, -1); + if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) && + !ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) { + + local->virt_monitors--; + if (local->virt_monitors == 0) { + local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; + hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; + } + + ieee80211_adjust_monitor_flags(sdata, -1); + } break; case NL80211_IFTYPE_NAN: /* clean all the functions */ @@ -686,7 +685,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do case NL80211_IFTYPE_AP_VLAN: break; case NL80211_IFTYPE_MONITOR: - if (local->monitors == 0) + if (local->virt_monitors == 0) ieee80211_del_virtual_monitor(local); ieee80211_recalc_idle(local); @@ -723,7 +722,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do ieee80211_configure_filter(local); ieee80211_hw_config(local, hw_reconf_flags); - if (local->monitors == local->open_count) + if (local->virt_monitors == local->open_count) ieee80211_add_virtual_monitor(local); } @@ -807,6 +806,9 @@ static void ieee80211_set_multicast_list(struct net_device *dev) */ static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata) { + if (WARN_ON(!list_empty(&sdata->work.entry))) + wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->work); + /* free extra data */ ieee80211_free_keys(sdata, false); @@ -979,7 +981,7 @@ static bool ieee80211_set_sdata_offload_flags(struct ieee80211_sub_if_data *sdat local->hw.wiphy->frag_threshold != (u32)-1) flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; - if (local->monitors) + if (local->virt_monitors) flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; } else { flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; @@ -989,7 +991,7 @@ static bool ieee80211_set_sdata_offload_flags(struct ieee80211_sub_if_data *sdat ieee80211_iftype_supports_hdr_offload(sdata->vif.type)) { flags |= IEEE80211_OFFLOAD_DECAP_ENABLED; - if (local->monitors && + if (local->virt_monitors && !ieee80211_hw_check(&local->hw, SUPPORTS_CONC_MON_RX_DECAP)) flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; } else { @@ -1206,16 +1208,17 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local) return; } - RCU_INIT_POINTER(local->monitor_sdata, NULL); - mutex_unlock(&local->iflist_mtx); - - synchronize_net(); - + clear_bit(SDATA_STATE_RUNNING, &sdata->state); ieee80211_link_release_channel(&sdata->deflink); if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) drv_remove_interface(local, sdata); + RCU_INIT_POINTER(local->monitor_sdata, NULL); + mutex_unlock(&local->iflist_mtx); + + synchronize_net(); + kfree(sdata); } @@ -1326,28 +1329,27 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) } break; case NL80211_IFTYPE_MONITOR: - if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) { - local->cooked_mntrs++; - break; - } - if ((sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) || ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) { res = drv_add_interface(local, sdata); if (res) goto err_stop; - } else if (local->monitors == 0 && local->open_count == 0) { - res = ieee80211_add_virtual_monitor(local); - if (res) - goto err_stop; + } else { + if (local->virt_monitors == 0 && local->open_count == 0) { + res = ieee80211_add_virtual_monitor(local); + if (res) + goto err_stop; + } + local->virt_monitors++; + + /* must be before the call to ieee80211_configure_filter */ + if (local->virt_monitors == 1) { + local->hw.conf.flags |= IEEE80211_CONF_MONITOR; + hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; + } } - /* must be before the call to ieee80211_configure_filter */ local->monitors++; - if (local->monitors == 1) { - local->hw.conf.flags |= IEEE80211_CONF_MONITOR; - hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; - } ieee80211_adjust_monitor_flags(sdata, 1); ieee80211_configure_filter(local); @@ -1423,8 +1425,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) rcu_assign_pointer(local->p2p_sdata, sdata); break; case NL80211_IFTYPE_MONITOR: - if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) - break; list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list); break; default: @@ -1560,10 +1560,21 @@ static void ieee80211_iface_process_skb(struct ieee80211_local *local, ieee80211_process_neg_ttlm_res(sdata, mgmt, skb->len); break; + case WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN: + ieee80211_process_ttlm_teardown(sdata); + break; case WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP: ieee80211_process_ml_reconf_resp(sdata, mgmt, skb->len); break; + case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP: + ieee80211_process_epcs_ena_resp(sdata, mgmt, + skb->len); + break; + case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN: + ieee80211_process_epcs_teardown(sdata, mgmt, + skb->len); + break; default: break; } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 53e5aee468856..741e6c7edcb7c 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -1744,18 +1744,7 @@ void ieee80211_free_hw(struct ieee80211_hw *hw) wiphy_free(local->hw.wiphy); } EXPORT_SYMBOL(ieee80211_free_hw); - -static const char * const drop_reasons_monitor[] = { -#define V(x) #x, - [0] = "RX_DROP_MONITOR", - MAC80211_DROP_REASONS_MONITOR(V) -}; - -static struct drop_reason_list drop_reason_list_monitor = { - .reasons = drop_reasons_monitor, - .n_reasons = ARRAY_SIZE(drop_reasons_monitor), -}; - +#define V(x) #x, static const char * const drop_reasons_unusable[] = { [0] = "RX_DROP_UNUSABLE", MAC80211_DROP_REASONS_UNUSABLE(V) @@ -1784,8 +1773,6 @@ static int __init ieee80211_init(void) if (ret) goto err_netdev; - drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR, - &drop_reason_list_monitor); drop_reasons_register_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE, &drop_reason_list_unusable); @@ -1804,7 +1791,6 @@ static void __exit ieee80211_exit(void) ieee80211_iface_exit(); - drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_MONITOR); drop_reasons_unregister_subsys(SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE); rcu_barrier(); diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 4e9546e998b6d..c94a9c7ca960e 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -367,6 +367,12 @@ u32 airtime_link_metric_get(struct ieee80211_local *local, return (u32)result; } +/* Check that the first metric is at least 10% better than the second one */ +static bool is_metric_better(u32 x, u32 y) +{ + return (x < y) && (x < (y - x / 10)); +} + /** * hwmp_route_info_get - Update routing info to originator and transmitter * @@ -458,8 +464,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, (mpath->sn == orig_sn && (rcu_access_pointer(mpath->next_hop) != sta ? - mult_frac(new_metric, 10, 9) : - new_metric) >= mpath->metric)) { + !is_metric_better(new_metric, mpath->metric) : + new_metric >= mpath->metric))) { process = false; fresh_info = false; } @@ -533,8 +539,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, if ((mpath->flags & MESH_PATH_FIXED) || ((mpath->flags & MESH_PATH_ACTIVE) && ((rcu_access_pointer(mpath->next_hop) != sta ? - mult_frac(last_hop_metric, 10, 9) : - last_hop_metric) > mpath->metric))) + !is_metric_better(last_hop_metric, mpath->metric) : + last_hop_metric > mpath->metric)))) fresh_info = false; } else { mpath = mesh_path_add(sdata, ta); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f8d52b3b0d0e4..c010bb3d24e3f 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -8,7 +8,7 @@ * Copyright 2007, Michael Wu * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2024 Intel Corporation + * Copyright (C) 2018 - 2025 Intel Corporation */ #include @@ -168,6 +168,9 @@ ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata, bool no_vht = false; u32 ht_cfreq; + if (ieee80211_hw_check(&sdata->local->hw, STRICT)) + ignore_ht_channel_mismatch = false; + *chandef = (struct cfg80211_chan_def) { .chan = channel, .width = NL80211_CHAN_WIDTH_20_NOHT, @@ -342,6 +345,115 @@ ieee80211_determine_ap_chan(struct ieee80211_sub_if_data *sdata, return IEEE80211_CONN_MODE_EHT; } +static bool +ieee80211_verify_sta_ht_mcs_support(struct ieee80211_sub_if_data *sdata, + struct ieee80211_supported_band *sband, + const struct ieee80211_ht_operation *ht_op) +{ + struct ieee80211_sta_ht_cap sta_ht_cap; + int i; + + if (sband->band == NL80211_BAND_6GHZ) + return true; + + if (!ht_op) + return false; + + memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap)); + ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap); + + /* + * P802.11REVme/D7.0 - 6.5.4.2.4 + * ... + * If the MLME of an HT STA receives an MLME-JOIN.request primitive + * with the SelectedBSS parameter containing a Basic HT-MCS Set field + * in the HT Operation parameter that contains any unsupported MCSs, + * the MLME response in the resulting MLME-JOIN.confirm primitive shall + * contain a ResultCode parameter that is not set to the value SUCCESS. + * ... + */ + + /* Simply check that all basic rates are in the STA RX mask */ + for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) { + if ((ht_op->basic_set[i] & sta_ht_cap.mcs.rx_mask[i]) != + ht_op->basic_set[i]) + return false; + } + + return true; +} + +static bool +ieee80211_verify_sta_vht_mcs_support(struct ieee80211_sub_if_data *sdata, + int link_id, + struct ieee80211_supported_band *sband, + const struct ieee80211_vht_operation *vht_op) +{ + struct ieee80211_sta_vht_cap sta_vht_cap; + u16 ap_min_req_set, sta_rx_mcs_map, sta_tx_mcs_map; + int nss; + + if (sband->band != NL80211_BAND_5GHZ) + return true; + + if (!vht_op) + return false; + + memcpy(&sta_vht_cap, &sband->vht_cap, sizeof(sta_vht_cap)); + ieee80211_apply_vhtcap_overrides(sdata, &sta_vht_cap); + + ap_min_req_set = le16_to_cpu(vht_op->basic_mcs_set); + sta_rx_mcs_map = le16_to_cpu(sta_vht_cap.vht_mcs.rx_mcs_map); + sta_tx_mcs_map = le16_to_cpu(sta_vht_cap.vht_mcs.tx_mcs_map); + + /* + * Many APs are incorrectly advertising an all-zero value here, + * which really means MCS 0-7 are required for 1-8 streams, but + * they don't really mean it that way. + * Some other APs are incorrectly advertising 3 spatial streams + * with MCS 0-7 are required, but don't really mean it that way + * and we'll connect only with HT, rather than even HE. + * As a result, unfortunately the VHT basic MCS/NSS set cannot + * be used at all, so check it only in strict mode. + */ + if (!ieee80211_hw_check(&sdata->local->hw, STRICT)) + return true; + + /* + * P802.11REVme/D7.0 - 6.5.4.2.4 + * ... + * If the MLME of a VHT STA receives an MLME-JOIN.request primitive + * with a SelectedBSS parameter containing a Basic VHT-MCS And NSS Set + * field in the VHT Operation parameter that contains any unsupported + * tuple, the MLME response in the resulting + * MLME-JOIN.confirm primitive shall contain a ResultCode parameter + * that is not set to the value SUCCESS. + * ... + */ + for (nss = 8; nss > 0; nss--) { + u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3; + u8 sta_rx_val; + u8 sta_tx_val; + + if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED) + continue; + + sta_rx_val = (sta_rx_mcs_map >> (2 * (nss - 1))) & 3; + sta_tx_val = (sta_tx_mcs_map >> (2 * (nss - 1))) & 3; + + if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || + sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED || + sta_rx_val < ap_op_val || sta_tx_val < ap_op_val) { + link_id_info(sdata, link_id, + "Missing mandatory rates for %d Nss, rx %d, tx %d oper %d, disable VHT\n", + nss, sta_rx_val, sta_tx_val, ap_op_val); + return false; + } + } + + return true; +} + static bool ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata, int link_id, @@ -388,7 +500,7 @@ ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata, * zeroes, which is nonsense, and completely inconsistent with itself * (it doesn't have 8 streams). Accept the settings in this case anyway. */ - if (!ap_min_req_set) + if (!ieee80211_hw_check(&sdata->local->hw, STRICT) && !ap_min_req_set) return true; /* make sure the AP is consistent with itself @@ -448,7 +560,7 @@ ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata, * zeroes, which is nonsense, and completely inconsistent with itself * (it doesn't have 8 streams). Accept the settings in this case anyway. */ - if (!ap_min_req_set) + if (!ieee80211_hw_check(&sdata->local->hw, STRICT) && !ap_min_req_set) return true; /* Need to go over for 80MHz, 160MHz and for 80+80 */ @@ -676,7 +788,7 @@ static int ieee80211_chandef_num_subchans(const struct cfg80211_chan_def *c) if (c->width == NL80211_CHAN_WIDTH_80P80) return 4 + 4; - return nl80211_chan_width_to_mhz(c->width) / 20; + return cfg80211_chandef_get_width(c) / 20; } static int ieee80211_chandef_num_widths(const struct cfg80211_chan_def *c) @@ -877,7 +989,7 @@ static void ieee80211_set_chanreq_ap(struct ieee80211_sub_if_data *sdata, chanreq->ap = *ap_chandef; } -static struct ieee802_11_elems * +VISIBLE_IF_MAC80211_KUNIT struct ieee802_11_elems * ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata, struct ieee80211_conn_settings *conn, struct cfg80211_bss *cbss, int link_id, @@ -1039,6 +1151,26 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata, link_id_info(sdata, link_id, "regulatory prevented using AP config, downgraded\n"); + if (conn->mode >= IEEE80211_CONN_MODE_HT && + !ieee80211_verify_sta_ht_mcs_support(sdata, sband, + elems->ht_operation)) { + conn->mode = IEEE80211_CONN_MODE_LEGACY; + conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20; + link_id_info(sdata, link_id, + "required MCSes not supported, disabling HT\n"); + } + + if (conn->mode >= IEEE80211_CONN_MODE_VHT && + !ieee80211_verify_sta_vht_mcs_support(sdata, link_id, sband, + elems->vht_operation)) { + conn->mode = IEEE80211_CONN_MODE_HT; + conn->bw_limit = min_t(enum ieee80211_conn_bw_limit, + conn->bw_limit, + IEEE80211_CONN_BW_LIMIT_40); + link_id_info(sdata, link_id, + "required MCSes not supported, disabling VHT\n"); + } + if (conn->mode >= IEEE80211_CONN_MODE_HE && (!ieee80211_verify_peer_he_mcs_support(sdata, link_id, (void *)elems->he_cap, @@ -1082,6 +1214,7 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata, kfree(elems); return ERR_PTR(ret); } +EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_determine_chan_mode); static int ieee80211_config_bw(struct ieee80211_link_data *link, struct ieee802_11_elems *elems, @@ -1313,13 +1446,15 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, * Some APs apparently get confused if our capabilities are better * than theirs, so restrict what we advertise in the assoc request. */ - if (!(ap_vht_cap->vht_cap_info & - cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE))) - cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | - IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); - else if (!(ap_vht_cap->vht_cap_info & - cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) - cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + if (!ieee80211_hw_check(&local->hw, STRICT)) { + if (!(ap_vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE))) + cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | + IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); + else if (!(ap_vht_cap->vht_cap_info & + cpu_to_le32(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) + cap &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + } /* * If some other vif is using the MU-MIMO capability we cannot associate @@ -1361,14 +1496,16 @@ static bool ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata, return mu_mimo_owner; } -static void ieee80211_assoc_add_rates(struct sk_buff *skb, +static void ieee80211_assoc_add_rates(struct ieee80211_local *local, + struct sk_buff *skb, enum nl80211_chan_width width, struct ieee80211_supported_band *sband, struct ieee80211_mgd_assoc_data *assoc_data) { u32 rates; - if (assoc_data->supp_rates_len) { + if (assoc_data->supp_rates_len && + !ieee80211_hw_check(&local->hw, STRICT)) { /* * Get all rates supported by the device and the AP as * some APs don't like getting a superset of their rates @@ -1584,7 +1721,7 @@ ieee80211_add_link_elems(struct ieee80211_sub_if_data *sdata, *capab |= WLAN_CAPABILITY_SPECTRUM_MGMT; if (sband->band != NL80211_BAND_S1GHZ) - ieee80211_assoc_add_rates(skb, width, sband, assoc_data); + ieee80211_assoc_add_rates(local, skb, width, sband, assoc_data); if (*capab & WLAN_CAPABILITY_SPECTRUM_MGMT || *capab & WLAN_CAPABILITY_RADIO_MEASURE) { @@ -1806,6 +1943,21 @@ ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata, } skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops)); + /* Many APs have broken parsing of the extended MLD capa/ops field, + * dropping (re-)association request frames or replying with association + * response with a failure status if it's present. Without a clear + * indication as to whether the AP supports parsing this field or not do + * not include it in the common information unless strict mode is set. + */ + if (ieee80211_hw_check(&local->hw, STRICT) && + assoc_data->ext_mld_capa_ops) { + ml_elem->control |= + cpu_to_le16(IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP); + common->len += 2; + skb_put_data(skb, &assoc_data->ext_mld_capa_ops, + sizeof(assoc_data->ext_mld_capa_ops)); + } + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { u16 link_present_elems[PRESENT_ELEMS_MAX] = {}; const u8 *extra_elems; @@ -1975,6 +2127,7 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) /* max common info field in basic multi-link element */ size += sizeof(struct ieee80211_mle_basic_common_info) + 2 + /* capa & op */ + 2 + /* ext capa & op */ 2; /* EML capa */ /* @@ -2051,7 +2204,8 @@ static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) * for some reason check it and want it to be set, set the bit for all * pre-EHT connections as we used to do. */ - if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT) + if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT && + !ieee80211_hw_check(&local->hw, STRICT)) capab |= WLAN_CAPABILITY_ESS; /* add the elements for the assoc (main) link */ @@ -3375,10 +3529,10 @@ void ieee80211_mgd_set_link_qos_params(struct ieee80211_link_data *link) /* MLME */ static bool -ieee80211_sta_wmm_params(struct ieee80211_local *local, - struct ieee80211_link_data *link, - const u8 *wmm_param, size_t wmm_param_len, - const struct ieee80211_mu_edca_param_set *mu_edca) +_ieee80211_sta_wmm_params(struct ieee80211_local *local, + struct ieee80211_link_data *link, + const u8 *wmm_param, size_t wmm_param_len, + const struct ieee80211_mu_edca_param_set *mu_edca) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_tx_queue_params params[IEEE80211_NUM_ACS]; @@ -3507,6 +3661,19 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local, for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) link->tx_conf[ac] = params[ac]; + return true; +} + +static bool +ieee80211_sta_wmm_params(struct ieee80211_local *local, + struct ieee80211_link_data *link, + const u8 *wmm_param, size_t wmm_param_len, + const struct ieee80211_mu_edca_param_set *mu_edca) +{ + if (!_ieee80211_sta_wmm_params(local, link, wmm_param, wmm_param_len, + mu_edca)) + return false; + ieee80211_mgd_set_link_qos_params(link); /* enable WMM or activate new settings */ @@ -3779,8 +3946,34 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, ifmgd->associated = false; + if (tx) { + bool tx_link_found = false; + + for (link_id = 0; + link_id < ARRAY_SIZE(sdata->link); + link_id++) { + struct ieee80211_link_data *link; + + if (!ieee80211_vif_link_active(&sdata->vif, link_id)) + continue; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (WARN_ON_ONCE(!link)) + continue; + + if (link->u.mgd.csa.blocked_tx) + continue; + + tx_link_found = true; + break; + } + + tx = tx_link_found; + } + /* other links will be destroyed */ sdata->deflink.conf->bss = NULL; + sdata->deflink.conf->epcs_support = false; sdata->deflink.smps_mode = IEEE80211_SMPS_OFF; netif_carrier_off(sdata->dev); @@ -3808,23 +4001,24 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, * insist sending these frames which can take time and delay * the disconnection and possible the roaming. */ - if (tx) - ieee80211_flush_queues(local, sdata, true); + ieee80211_flush_queues(local, sdata, true); - /* deauthenticate/disassociate now */ - if (tx || frame_buf) { + if (tx) { drv_mgd_prepare_tx(sdata->local, sdata, &info); ieee80211_send_deauth_disassoc(sdata, sdata->vif.cfg.ap_addr, sdata->vif.cfg.ap_addr, stype, - reason, tx, frame_buf); - } + reason, true, frame_buf); - /* flush out frame - make sure the deauth was actually sent */ - if (tx) + /* flush out frame - make sure the deauth was actually sent */ ieee80211_flush_queues(local, sdata, false); - drv_mgd_complete_tx(sdata->local, sdata, &info); + drv_mgd_complete_tx(sdata->local, sdata, &info); + } else if (frame_buf) { + ieee80211_send_deauth_disassoc(sdata, sdata->vif.cfg.ap_addr, + sdata->vif.cfg.ap_addr, stype, + reason, false, frame_buf); + } /* clear AP addr only after building the needed mgmt frames */ eth_zero_addr(sdata->deflink.u.mgd.bssid); @@ -3958,15 +4152,21 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, wiphy_work_cancel(sdata->local->hw.wiphy, &ifmgd->teardown_ttlm_work); - ieee80211_vif_set_links(sdata, 0, 0); - - ifmgd->mcast_seq_last = IEEE80211_SN_MODULO; - /* if disconnection happens in the middle of the ML reconfiguration * flow, cfg80211 must called to release the BSS references obtained * when the flow started. */ ieee80211_ml_reconf_reset(sdata); + + ieee80211_vif_set_links(sdata, 0, 0); + + ifmgd->mcast_seq_last = IEEE80211_SN_MODULO; + + ifmgd->epcs.enabled = false; + ifmgd->epcs.dialog_token = 0; + + memset(ifmgd->userspace_selectors, 0, + sizeof(ifmgd->userspace_selectors)); } static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) @@ -4247,33 +4447,12 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; - bool tx = false; lockdep_assert_wiphy(local->hw.wiphy); if (!ifmgd->associated) return; - /* only transmit if we have a link that makes that worthwhile */ - for (unsigned int link_id = 0; - link_id < ARRAY_SIZE(sdata->link); - link_id++) { - struct ieee80211_link_data *link; - - if (!ieee80211_vif_link_active(&sdata->vif, link_id)) - continue; - - link = sdata_dereference(sdata->link[link_id], sdata); - if (WARN_ON_ONCE(!link)) - continue; - - if (link->u.mgd.csa.blocked_tx) - continue; - - tx = true; - break; - } - if (!ifmgd->driver_disconnect) { unsigned int link_id; @@ -4290,7 +4469,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) struct ieee80211_link_data *link; link = sdata_dereference(sdata->link[link_id], sdata); - if (!link) + if (!link || !link->conf->bss) continue; cfg80211_unlink_bss(local->hw.wiphy, link->conf->bss); link->conf->bss = NULL; @@ -4301,14 +4480,14 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) ifmgd->driver_disconnect ? WLAN_REASON_DEAUTH_LEAVING : WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, - tx, frame_buf); + true, frame_buf); /* the other links will be destroyed */ sdata->vif.bss_conf.csa_active = false; sdata->deflink.u.mgd.csa.waiting_bcn = false; sdata->deflink.u.mgd.csa.blocked_tx = false; ieee80211_vif_unblock_queues_csa(sdata); - ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, + ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, ifmgd->reconnect); ifmgd->reconnect = false; @@ -4570,6 +4749,8 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); status_code = le16_to_cpu(mgmt->u.auth.status_code); + info.link_id = ifmgd->auth_data->link_id; + if (auth_alg != ifmgd->auth_data->algorithm || (auth_alg != WLAN_AUTH_SAE && auth_transaction != ifmgd->auth_data->expected_transaction) || @@ -4835,6 +5016,82 @@ static bool ieee80211_twt_bcast_support(struct ieee80211_sub_if_data *sdata, IEEE80211_HE_MAC_CAP2_BCAST_TWT); } +static void ieee80211_epcs_changed(struct ieee80211_sub_if_data *sdata, + bool enabled) +{ + /* in any case this is called, dialog token should be reset */ + sdata->u.mgd.epcs.dialog_token = 0; + + if (sdata->u.mgd.epcs.enabled == enabled) + return; + + sdata->u.mgd.epcs.enabled = enabled; + cfg80211_epcs_changed(sdata->dev, enabled); +} + +static void ieee80211_epcs_teardown(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + u8 link_id; + + if (!sdata->u.mgd.epcs.enabled) + return; + + lockdep_assert_wiphy(local->hw.wiphy); + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + struct ieee802_11_elems *elems; + struct ieee80211_link_data *link; + const struct cfg80211_bss_ies *ies; + bool ret; + + rcu_read_lock(); + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link || !link->conf || !link->conf->bss) { + rcu_read_unlock(); + continue; + } + + if (link->u.mgd.disable_wmm_tracking) { + rcu_read_unlock(); + ieee80211_set_wmm_default(link, false, false); + continue; + } + + ies = rcu_dereference(link->conf->bss->beacon_ies); + if (!ies) { + rcu_read_unlock(); + ieee80211_set_wmm_default(link, false, false); + continue; + } + + elems = ieee802_11_parse_elems(ies->data, ies->len, false, + NULL); + if (!elems) { + rcu_read_unlock(); + ieee80211_set_wmm_default(link, false, false); + continue; + } + + ret = _ieee80211_sta_wmm_params(local, link, + elems->wmm_param, + elems->wmm_param_len, + elems->mu_edca_param_set); + + kfree(elems); + rcu_read_unlock(); + + if (!ret) { + ieee80211_set_wmm_default(link, false, false); + continue; + } + + ieee80211_mgd_set_link_qos_params(link); + ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_QOS); + } +} + static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, struct link_sta_info *link_sta, struct cfg80211_bss *cbss, @@ -4936,7 +5193,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, * 2G/3G/4G wifi routers, reported models include the "Onda PN51T", * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device. */ - if (!is_6ghz && + if (!ieee80211_hw_check(&local->hw, STRICT) && !is_6ghz && ((assoc_data->wmm && !elems->wmm_param) || (link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HT && (!elems->ht_cap_elem || !elems->ht_operation)) || @@ -4959,6 +5216,7 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, parse_params.start = bss_ies->data; parse_params.len = bss_ies->len; parse_params.bss = cbss; + parse_params.link_id = -1; bss_elems = ieee802_11_parse_elems_full(&parse_params); if (!bss_elems) { ret = false; @@ -5071,6 +5329,15 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, bss_vht_cap = (const void *)elem->data; } + if (ieee80211_hw_check(&local->hw, STRICT) && + (!bss_vht_cap || memcmp(bss_vht_cap, elems->vht_cap_elem, + sizeof(*bss_vht_cap)))) { + rcu_read_unlock(); + ret = false; + link_info(link, "VHT capabilities mismatch\n"); + goto out; + } + ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband, elems->vht_cap_elem, bss_vht_cap, link_sta); @@ -5108,14 +5375,27 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, link_sta); bss_conf->eht_support = link_sta->pub->eht_cap.has_eht; + bss_conf->epcs_support = bss_conf->eht_support && + !!(elems->eht_cap->fixed.mac_cap_info[0] & + IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS); + + /* EPCS might be already enabled but a new added link + * does not support EPCS. This should not really happen + * in practice. + */ + if (sdata->u.mgd.epcs.enabled && + !bss_conf->epcs_support) + ieee80211_epcs_teardown(sdata); } else { bss_conf->eht_support = false; + bss_conf->epcs_support = false; } } else { bss_conf->he_support = false; bss_conf->twt_requester = false; bss_conf->twt_protected = false; bss_conf->eht_support = false; + bss_conf->epcs_support = false; } bss_conf->twt_broadcast = @@ -5860,7 +6140,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, err = ieee80211_prep_channel(sdata, link, link_id, cbss, true, &link->u.mgd.conn, - assoc_data->userspace_selectors); + sdata->u.mgd.userspace_selectors); if (err) { link_info(link, "prep_channel failed\n"); goto out_err; @@ -7146,7 +7426,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, ieee80211_mgd_update_bss_param_ch_cnt(sdata, bss_conf, elems); - if (!link->u.mgd.disable_wmm_tracking && + if (!sdata->u.mgd.epcs.enabled && + !link->u.mgd.disable_wmm_tracking && ieee80211_sta_wmm_params(local, link, elems->wmm_param, elems->wmm_param_len, elems->mu_edca_param_set)) @@ -7598,13 +7879,9 @@ void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata, __ieee80211_disconnect(sdata); } -static void ieee80211_teardown_ttlm_work(struct wiphy *wiphy, - struct wiphy_work *work) +void ieee80211_process_ttlm_teardown(struct ieee80211_sub_if_data *sdata) { u16 new_dormant_links; - struct ieee80211_sub_if_data *sdata = - container_of(work, struct ieee80211_sub_if_data, - u.mgd.teardown_ttlm_work); if (!sdata->vif.neg_ttlm.valid) return; @@ -7619,6 +7896,16 @@ static void ieee80211_teardown_ttlm_work(struct wiphy *wiphy, BSS_CHANGED_MLD_VALID_LINKS); } +static void ieee80211_teardown_ttlm_work(struct wiphy *wiphy, + struct wiphy_work *work) +{ + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, + u.mgd.teardown_ttlm_work); + + ieee80211_process_ttlm_teardown(sdata); +} + void ieee80211_send_teardown_neg_ttlm(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); @@ -9099,6 +9386,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, else memcpy(assoc_data->ap_addr, cbss->bssid, ETH_ALEN); + assoc_data->ext_mld_capa_ops = cpu_to_le16(req->ext_mld_capa_ops); + if (ifmgd->associated) { u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; @@ -9115,7 +9404,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, false); } - ieee80211_parse_cfg_selectors(assoc_data->userspace_selectors, + memset(sdata->u.mgd.userspace_selectors, 0, + sizeof(sdata->u.mgd.userspace_selectors)); + ieee80211_parse_cfg_selectors(sdata->u.mgd.userspace_selectors, req->supported_selectors, req->supported_selectors_len); @@ -9366,7 +9657,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, err = ieee80211_prep_channel(sdata, NULL, i, assoc_data->link[i].bss, true, &assoc_data->link[i].conn, - assoc_data->userspace_selectors); + sdata->u.mgd.userspace_selectors); if (err) { req->links[i].error = err; goto err_clear; @@ -9383,7 +9674,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, req->ap_mld_addr, true, &assoc_data->link[assoc_link_id].conn, override, - assoc_data->userspace_selectors); + sdata->u.mgd.userspace_selectors); if (err) goto err_clear; @@ -9489,7 +9780,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code, false); - drv_mgd_complete_tx(sdata->local, sdata, &info); return 0; } @@ -9628,16 +9918,6 @@ void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif) } EXPORT_SYMBOL(ieee80211_disable_rssi_reports); -static void ieee80211_ml_reconf_selectors(unsigned long *userspace_selectors) -{ - *userspace_selectors = 0; - - /* these selectors are mandatory for ML reconfiguration */ - set_bit(BSS_MEMBERSHIP_SELECTOR_SAE_H2E, userspace_selectors); - set_bit(BSS_MEMBERSHIP_SELECTOR_HE_PHY, userspace_selectors); - set_bit(BSS_MEMBERSHIP_SELECTOR_EHT_PHY, userspace_selectors); -} - void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, size_t len) { @@ -9651,7 +9931,6 @@ void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata, sdata->u.mgd.reconf.removed_links; u16 link_mask, valid_links; unsigned int link_id; - unsigned long userspace_selectors; size_t orig_len = len; u8 i, group_key_data_len; u8 *pos; @@ -9759,7 +10038,6 @@ void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata, } ieee80211_vif_set_links(sdata, valid_links, sdata->vif.dormant_links); - ieee80211_ml_reconf_selectors(&userspace_selectors); link_mask = 0; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct cfg80211_bss *cbss = add_links_data->link[link_id].bss; @@ -9805,7 +10083,7 @@ void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata, link->u.mgd.conn = add_links_data->link[link_id].conn; if (ieee80211_prep_channel(sdata, link, link_id, cbss, true, &link->u.mgd.conn, - &userspace_selectors)) { + sdata->u.mgd.userspace_selectors)) { link_info(link, "mlo: reconf: prep_channel failed\n"); goto disconnect; } @@ -9853,8 +10131,11 @@ void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata, done_data.len = orig_len; done_data.added_links = link_mask; - for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { done_data.links[link_id].bss = add_links_data->link[link_id].bss; + done_data.links[link_id].addr = + add_links_data->link[link_id].addr; + } cfg80211_mlo_reconf_add_done(sdata->dev, &done_data); kfree(sdata->u.mgd.reconf.add_links_data); @@ -9870,7 +10151,7 @@ void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata, static struct sk_buff * ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgd_assoc_data *add_links_data, - u16 removed_links) + u16 removed_links, __le16 ext_mld_capa_ops) { struct ieee80211_local *local = sdata->local; struct ieee80211_mgmt *mgmt; @@ -9919,6 +10200,9 @@ ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata, var_common_size += 2; } + if (ext_mld_capa_ops) + var_common_size += 2; + /* Add the common information length */ size += common_size + var_common_size; @@ -9945,8 +10229,8 @@ ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata, size += 2 + sizeof(struct ieee80211_mle_per_sta_profile) + ETH_ALEN; - /* SSID element + WMM */ - size += 2 + sdata->vif.cfg.ssid_len + 9; + /* WMM */ + size += 9; size += ieee80211_link_common_elems_size(sdata, iftype, cbss, elems_len); } @@ -10003,6 +10287,12 @@ ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata, skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops)); } + if (ext_mld_capa_ops) { + ml_elem->control |= + cpu_to_le16(IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP); + skb_put_data(skb, &ext_mld_capa_ops, sizeof(ext_mld_capa_ops)); + } + if (sdata->u.mgd.flags & IEEE80211_STA_ENABLE_RRM) capab |= WLAN_CAPABILITY_RADIO_MEASURE; @@ -10052,11 +10342,6 @@ ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata, capab_pos = skb_put(skb, 2); - skb_put_u8(skb, WLAN_EID_SSID); - skb_put_u8(skb, sdata->vif.cfg.ssid_len); - skb_put_data(skb, sdata->vif.cfg.ssid, - sdata->vif.cfg.ssid_len); - extra_used = ieee80211_add_link_elems(sdata, skb, &capab, NULL, add_links_data->link[link_id].elems, @@ -10096,8 +10381,7 @@ ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata, } int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, - struct cfg80211_assoc_link *add_links, - u16 rem_links) + struct cfg80211_ml_reconf_req *req) { struct ieee80211_local *local = sdata->local; struct ieee80211_mgd_assoc_data *data = NULL; @@ -10117,9 +10401,8 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, return -EBUSY; added_links = 0; - for (link_id = 0; add_links && link_id < IEEE80211_MLD_MAX_NUM_LINKS; - link_id++) { - if (!add_links[link_id].bss) + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + if (!req->add_links[link_id].bss) continue; added_links |= BIT(link_id); @@ -10129,9 +10412,6 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, if (WARN_ON(!sta)) return -ENOLINK; - if (rem_links & BIT(sta->sta.deflink.link_id)) - return -EINVAL; - /* Adding links to the set of valid link is done only after a successful * ML reconfiguration frame exchange. Here prepare the data for the ML * reconfiguration frame construction and allocate the required @@ -10139,18 +10419,20 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, */ if (added_links) { bool uapsd_supported; - unsigned long userspace_selectors; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; + data->assoc_link_id = -1; + data->wmm = true; + uapsd_supported = true; - ieee80211_ml_reconf_selectors(&userspace_selectors); for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { struct ieee80211_supported_band *sband; - struct cfg80211_bss *link_cbss = add_links[link_id].bss; + struct cfg80211_bss *link_cbss = + req->add_links[link_id].bss; struct ieee80211_bss *bss; if (!link_cbss) @@ -10180,11 +10462,11 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, data->link[link_id].bss = link_cbss; data->link[link_id].disabled = - add_links[link_id].disabled; + req->add_links[link_id].disabled; data->link[link_id].elems = - (u8 *)add_links[link_id].elems; + (u8 *)req->add_links[link_id].elems; data->link[link_id].elems_len = - add_links[link_id].elems_len; + req->add_links[link_id].elems_len; if (!bss->uapsd_supported) uapsd_supported = false; @@ -10203,12 +10485,11 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, } } - /* Require U-APSD support to be similar to the current valid - * links - */ - if (uapsd_supported != - !!(sdata->u.mgd.flags & IEEE80211_STA_UAPSD_ENABLED)) { + /* Require U-APSD support if we enabled it */ + if (sdata->u.mgd.flags & IEEE80211_STA_UAPSD_ENABLED && + !uapsd_supported) { err = -EINVAL; + sdata_info(sdata, "U-APSD on but not available on (all) new links\n"); goto err_free; } @@ -10222,7 +10503,7 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, data->link[link_id].bss, true, &data->link[link_id].conn, - &userspace_selectors); + sdata->u.mgd.userspace_selectors); if (err) goto err_free; } @@ -10234,10 +10515,11 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, * Section 35.3.6.4 in Draft P802.11be_D7.0 the AP MLD should accept the * link removal request. */ - if (rem_links) { - u16 new_active_links = sdata->vif.active_links & ~rem_links; + if (req->rem_links) { + u16 new_active_links = + sdata->vif.active_links & ~req->rem_links; - new_valid_links = sdata->vif.valid_links & ~rem_links; + new_valid_links = sdata->vif.valid_links & ~req->rem_links; /* Should not be left with no valid links to perform the * ML reconfiguration @@ -10272,14 +10554,16 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, * is expected to send the ML reconfiguration response frame on the link * on which the request was received. */ - skb = ieee80211_build_ml_reconf_req(sdata, data, rem_links); + skb = ieee80211_build_ml_reconf_req(sdata, data, req->rem_links, + cpu_to_le16(req->ext_mld_capa_ops)); if (!skb) { err = -ENOMEM; goto err_free; } - if (rem_links) { - u16 new_dormant_links = sdata->vif.dormant_links & ~rem_links; + if (req->rem_links) { + u16 new_dormant_links = + sdata->vif.dormant_links & ~req->rem_links; err = ieee80211_vif_set_links(sdata, new_valid_links, new_dormant_links); @@ -10292,7 +10576,7 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { - if (!(rem_links & BIT(link_id))) + if (!(req->rem_links & BIT(link_id))) continue; ieee80211_sta_remove_link(sta, link_id); @@ -10301,17 +10585,17 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, /* notify the driver and upper layers */ ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_VALID_LINKS); - cfg80211_links_removed(sdata->dev, rem_links); + cfg80211_links_removed(sdata->dev, req->rem_links); } sdata_info(sdata, "mlo: reconf: adding=0x%x, removed=0x%x\n", - added_links, rem_links); + added_links, req->rem_links); ieee80211_tx_skb(sdata, skb); sdata->u.mgd.reconf.added_links = added_links; sdata->u.mgd.reconf.add_links_data = data; - sdata->u.mgd.reconf.removed_links = rem_links; + sdata->u.mgd.reconf.removed_links = req->rem_links; wiphy_delayed_work_queue(sdata->local->hw.wiphy, &sdata->u.mgd.reconf.wk, IEEE80211_ASSOC_TIMEOUT_SHORT); @@ -10321,3 +10605,198 @@ int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata, kfree(data); return err; } + +static bool ieee80211_mgd_epcs_supp(struct ieee80211_sub_if_data *sdata) +{ + unsigned long valid_links = sdata->vif.valid_links; + u8 link_id; + + lockdep_assert_wiphy(sdata->local->hw.wiphy); + + if (!ieee80211_vif_is_mld(&sdata->vif)) + return false; + + for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *bss_conf = + sdata_dereference(sdata->vif.link_conf[link_id], sdata); + + if (WARN_ON(!bss_conf) || !bss_conf->epcs_support) + return false; + } + + return true; +} + +int ieee80211_mgd_set_epcs(struct ieee80211_sub_if_data *sdata, bool enable) +{ + struct ieee80211_local *local = sdata->local; + struct ieee80211_mgmt *mgmt; + struct sk_buff *skb; + int frame_len = offsetofend(struct ieee80211_mgmt, + u.action.u.epcs) + (enable ? 1 : 0); + + if (!ieee80211_mgd_epcs_supp(sdata)) + return -EINVAL; + + if (sdata->u.mgd.epcs.enabled == enable && + !sdata->u.mgd.epcs.dialog_token) + return 0; + + /* Do not allow enabling EPCS if the AP didn't respond yet. + * However, allow disabling EPCS in such a case. + */ + if (sdata->u.mgd.epcs.dialog_token && enable) + return -EALREADY; + + skb = dev_alloc_skb(local->hw.extra_tx_headroom + frame_len); + if (!skb) + return -ENOBUFS; + + skb_reserve(skb, local->hw.extra_tx_headroom); + mgmt = skb_put_zero(skb, frame_len); + mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION); + memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN); + memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); + memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); + + mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT; + if (enable) { + u8 *pos = mgmt->u.action.u.epcs.variable; + + mgmt->u.action.u.epcs.action_code = + WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_REQ; + + *pos = ++sdata->u.mgd.dialog_token_alloc; + sdata->u.mgd.epcs.dialog_token = *pos; + } else { + mgmt->u.action.u.epcs.action_code = + WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN; + + ieee80211_epcs_teardown(sdata); + ieee80211_epcs_changed(sdata, false); + } + + ieee80211_tx_skb(sdata, skb); + return 0; +} + +static void ieee80211_ml_epcs(struct ieee80211_sub_if_data *sdata, + struct ieee802_11_elems *elems) +{ + const struct element *sub; + size_t scratch_len = elems->ml_epcs_len; + u8 *scratch __free(kfree) = kzalloc(scratch_len, GFP_KERNEL); + + lockdep_assert_wiphy(sdata->local->hw.wiphy); + + if (!ieee80211_vif_is_mld(&sdata->vif) || !elems->ml_epcs) + return; + + if (WARN_ON(!scratch)) + return; + + /* Directly parse the sub elements as the common information doesn't + * hold any useful information. + */ + for_each_mle_subelement(sub, (const u8 *)elems->ml_epcs, + elems->ml_epcs_len) { + struct ieee80211_link_data *link; + struct ieee802_11_elems *link_elems __free(kfree); + u8 *pos = (void *)sub->data; + u16 control; + ssize_t len; + u8 link_id; + + if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE) + continue; + + if (sub->datalen < sizeof(control)) + break; + + control = get_unaligned_le16(pos); + link_id = control & IEEE80211_MLE_STA_EPCS_CONTROL_LINK_ID; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + continue; + + len = cfg80211_defragment_element(sub, (u8 *)elems->ml_epcs, + elems->ml_epcs_len, + scratch, scratch_len, + IEEE80211_MLE_SUBELEM_FRAGMENT); + if (len < (ssize_t)sizeof(control)) + continue; + + pos = scratch + sizeof(control); + len -= sizeof(control); + + link_elems = ieee802_11_parse_elems(pos, len, false, NULL); + if (!link_elems) + continue; + + if (ieee80211_sta_wmm_params(sdata->local, link, + link_elems->wmm_param, + link_elems->wmm_param_len, + link_elems->mu_edca_param_set)) + ieee80211_link_info_change_notify(sdata, link, + BSS_CHANGED_QOS); + } +} + +void ieee80211_process_epcs_ena_resp(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + struct ieee802_11_elems *elems __free(kfree) = NULL; + size_t ies_len; + u16 status_code; + u8 *pos, dialog_token; + + if (!ieee80211_mgd_epcs_supp(sdata)) + return; + + /* Handle dialog token and status code */ + pos = mgmt->u.action.u.epcs.variable; + dialog_token = *pos; + status_code = get_unaligned_le16(pos + 1); + + /* An EPCS enable response with dialog token == 0 is an unsolicited + * notification from the AP MLD. In such a case, EPCS should already be + * enabled and status must be success + */ + if (!dialog_token && + (!sdata->u.mgd.epcs.enabled || + status_code != WLAN_STATUS_SUCCESS)) + return; + + if (sdata->u.mgd.epcs.dialog_token != dialog_token) + return; + + sdata->u.mgd.epcs.dialog_token = 0; + + if (status_code != WLAN_STATUS_SUCCESS) + return; + + pos += IEEE80211_EPCS_ENA_RESP_BODY_LEN; + ies_len = len - offsetof(struct ieee80211_mgmt, + u.action.u.epcs.variable) - + IEEE80211_EPCS_ENA_RESP_BODY_LEN; + + elems = ieee802_11_parse_elems(pos, ies_len, true, NULL); + if (!elems) + return; + + ieee80211_ml_epcs(sdata, elems); + ieee80211_epcs_changed(sdata, true); +} + +void ieee80211_process_epcs_teardown(struct ieee80211_sub_if_data *sdata, + struct ieee80211_mgmt *mgmt, size_t len) +{ + if (!ieee80211_vif_is_mld(&sdata->vif) || + !sdata->u.mgd.epcs.enabled) + return; + + ieee80211_epcs_teardown(sdata); + ieee80211_epcs_changed(sdata, false); +} diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c index cd318c1c67bec..6da39c864f45b 100644 --- a/net/mac80211/parse.c +++ b/net/mac80211/parse.c @@ -47,6 +47,9 @@ struct ieee80211_elems_parse { /* The EPCS Multi-Link element in the original elements */ const struct element *ml_epcs_elem; + bool multi_link_inner; + bool skip_vendor; + /* * scratch buffer that can be used for various element parsing related * tasks, e.g., element de-fragmentation etc. @@ -152,12 +155,11 @@ ieee80211_parse_extension_element(u32 *crc, switch (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE)) { case IEEE80211_ML_CONTROL_TYPE_BASIC: - if (elems_parse->ml_basic_elem) { + if (elems_parse->multi_link_inner) { elems->parse_error |= IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC; break; } - elems_parse->ml_basic_elem = elem; break; case IEEE80211_ML_CONTROL_TYPE_RECONF: elems_parse->ml_reconf_elem = elem; @@ -399,6 +401,9 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_VENDOR_SPECIFIC: + if (elems_parse->skip_vendor) + break; + if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && pos[2] == 0xf2) { /* Microsoft OUI (00:50:F2) */ @@ -866,21 +871,36 @@ ieee80211_mle_get_sta_prof(struct ieee80211_elems_parse *elems_parse, } } -static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse, - struct ieee80211_elems_parse_params *params) +static const struct element * +ieee80211_prep_mle_link_parse(struct ieee80211_elems_parse *elems_parse, + struct ieee80211_elems_parse_params *params, + struct ieee80211_elems_parse_params *sub) { struct ieee802_11_elems *elems = &elems_parse->elems; struct ieee80211_mle_per_sta_profile *prof; - struct ieee80211_elems_parse_params sub = { - .mode = params->mode, - .action = params->action, - .from_ap = params->from_ap, - .link_id = -1, - }; - ssize_t ml_len = elems->ml_basic_len; - const struct element *non_inherit = NULL; + const struct element *tmp; + ssize_t ml_len; const u8 *end; + if (params->mode < IEEE80211_CONN_MODE_EHT) + return NULL; + + for_each_element_extid(tmp, WLAN_EID_EXT_EHT_MULTI_LINK, + elems->ie_start, elems->total_len) { + const struct ieee80211_multi_link_elem *mle = + (void *)tmp->data + 1; + + if (!ieee80211_mle_size_ok(tmp->data + 1, tmp->datalen - 1)) + continue; + + if (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE) != + IEEE80211_ML_CONTROL_TYPE_BASIC) + continue; + + elems_parse->ml_basic_elem = tmp; + break; + } + ml_len = cfg80211_defragment_element(elems_parse->ml_basic_elem, elems->ie_start, elems->total_len, @@ -891,26 +911,26 @@ static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse, WLAN_EID_FRAGMENT); if (ml_len < 0) - return; + return NULL; elems->ml_basic = (const void *)elems_parse->scratch_pos; elems->ml_basic_len = ml_len; elems_parse->scratch_pos += ml_len; if (params->link_id == -1) - return; + return NULL; ieee80211_mle_get_sta_prof(elems_parse, params->link_id); prof = elems->prof; if (!prof) - return; + return NULL; /* check if we have the 4 bytes for the fixed part in assoc response */ if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) { elems->prof = NULL; elems->sta_prof_len = 0; - return; + return NULL; } /* @@ -919,13 +939,17 @@ static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse, * the -1 is because the 'sta_info_len' is accounted to as part of the * per-STA profile, but not part of the 'u8 variable[]' portion. */ - sub.start = prof->variable + prof->sta_info_len - 1 + 4; + sub->start = prof->variable + prof->sta_info_len - 1 + 4; end = (const u8 *)prof + elems->sta_prof_len; - sub.len = end - sub.start; + sub->len = end - sub->start; - non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, - sub.start, sub.len); - _ieee802_11_parse_elems_full(&sub, elems_parse, non_inherit); + sub->mode = params->mode; + sub->action = params->action; + sub->from_ap = params->from_ap; + sub->link_id = -1; + + return cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, + sub->start, sub->len); } static void @@ -973,15 +997,19 @@ ieee80211_mle_defrag_epcs(struct ieee80211_elems_parse *elems_parse) struct ieee802_11_elems * ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) { + struct ieee80211_elems_parse_params sub = {}; struct ieee80211_elems_parse *elems_parse; - struct ieee802_11_elems *elems; const struct element *non_inherit = NULL; - u8 *nontransmitted_profile; - int nontransmitted_profile_len = 0; + struct ieee802_11_elems *elems; size_t scratch_len = 3 * params->len; + bool multi_link_inner = false; BUILD_BUG_ON(offsetof(typeof(*elems_parse), elems) != 0); + /* cannot parse for both a specific link and non-transmitted BSS */ + if (WARN_ON(params->link_id >= 0 && params->bss)) + return NULL; + elems_parse = kzalloc(struct_size(elems_parse, scratch, scratch_len), GFP_ATOMIC); if (!elems_parse) @@ -998,34 +1026,51 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) ieee80211_clear_tpe(&elems->tpe); ieee80211_clear_tpe(&elems->csa_tpe); - nontransmitted_profile = elems_parse->scratch_pos; - nontransmitted_profile_len = - ieee802_11_find_bssid_profile(params->start, params->len, - elems, params->bss, - nontransmitted_profile); - elems_parse->scratch_pos += nontransmitted_profile_len; - non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, - nontransmitted_profile, - nontransmitted_profile_len); + /* + * If we're looking for a non-transmitted BSS then we cannot at + * the same time be looking for a second link as the two can only + * appear in the same frame carrying info for different BSSes. + * + * In any case, we only look for one at a time, as encoded by + * the WARN_ON above. + */ + if (params->bss) { + int nontx_len = + ieee802_11_find_bssid_profile(params->start, + params->len, + elems, params->bss, + elems_parse->scratch_pos); + sub.start = elems_parse->scratch_pos; + sub.mode = params->mode; + sub.len = nontx_len; + sub.action = params->action; + sub.link_id = params->link_id; + + /* consume the space used for non-transmitted profile */ + elems_parse->scratch_pos += nontx_len; + + non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, + sub.start, nontx_len); + } else { + /* must always parse to get elems_parse->ml_basic_elem */ + non_inherit = ieee80211_prep_mle_link_parse(elems_parse, params, + &sub); + multi_link_inner = true; + } + elems_parse->skip_vendor = + cfg80211_find_elem(WLAN_EID_VENDOR_SPECIFIC, + sub.start, sub.len); elems->crc = _ieee802_11_parse_elems_full(params, elems_parse, non_inherit); - /* Override with nontransmitted profile, if found */ - if (nontransmitted_profile_len) { - struct ieee80211_elems_parse_params sub = { - .mode = params->mode, - .start = nontransmitted_profile, - .len = nontransmitted_profile_len, - .action = params->action, - .link_id = params->link_id, - }; - + /* Override with nontransmitted/per-STA profile if found */ + if (sub.len) { + elems_parse->multi_link_inner = multi_link_inner; + elems_parse->skip_vendor = false; _ieee802_11_parse_elems_full(&sub, elems_parse, NULL); } - ieee80211_mle_parse_link(elems_parse, params); - ieee80211_mle_defrag_reconf(elems_parse); ieee80211_mle_defrag_epcs(elems_parse); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 1e28efe4203c0..f7f89cd1b7d7d 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -6,7 +6,7 @@ * Copyright 2007-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include @@ -1045,14 +1045,14 @@ static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) if (is_multicast_ether_addr(hdr->addr1)) { if (ieee80211_has_tods(hdr->frame_control) || !ieee80211_has_fromds(hdr->frame_control)) - return RX_DROP_MONITOR; + return RX_DROP; if (ether_addr_equal(hdr->addr3, dev_addr)) - return RX_DROP_MONITOR; + return RX_DROP; } else { if (!ieee80211_has_a4(hdr->frame_control)) - return RX_DROP_MONITOR; + return RX_DROP; if (ether_addr_equal(hdr->addr4, dev_addr)) - return RX_DROP_MONITOR; + return RX_DROP; } } @@ -1064,20 +1064,20 @@ static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) struct ieee80211_mgmt *mgmt; if (!ieee80211_is_mgmt(hdr->frame_control)) - return RX_DROP_MONITOR; + return RX_DROP; if (ieee80211_is_action(hdr->frame_control)) { u8 category; /* make sure category field is present */ if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) - return RX_DROP_MONITOR; + return RX_DROP; mgmt = (struct ieee80211_mgmt *)hdr; category = mgmt->u.action.category; if (category != WLAN_CATEGORY_MESH_ACTION && category != WLAN_CATEGORY_SELF_PROTECTED) - return RX_DROP_MONITOR; + return RX_DROP; return RX_CONTINUE; } @@ -1087,7 +1087,7 @@ static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) ieee80211_is_auth(hdr->frame_control)) return RX_CONTINUE; - return RX_DROP_MONITOR; + return RX_DROP; } return RX_CONTINUE; @@ -1513,7 +1513,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) hdrlen = ieee80211_hdrlen(hdr->frame_control); if (rx->skb->len < hdrlen + 8) - return RX_DROP_MONITOR; + return RX_DROP; skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); if (ethertype == rx->sdata->control_port_protocol) @@ -1526,7 +1526,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) GFP_ATOMIC)) return RX_DROP_U_SPURIOUS; - return RX_DROP_MONITOR; + return RX_DROP; } return RX_CONTINUE; @@ -1862,7 +1862,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) cfg80211_rx_unexpected_4addr_frame( rx->sdata->dev, sta->sta.addr, GFP_ATOMIC); - return RX_DROP_M_UNEXPECTED_4ADDR_FRAME; + return RX_DROP_U_UNEXPECTED_4ADDR_FRAME; } /* * Update counter and free packet here to avoid @@ -1997,7 +1997,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, skb->data, skb->len); - return RX_DROP_M_BAD_BCN_KEYIDX; + return RX_DROP_U_BAD_BCN_KEYIDX; } rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx); @@ -2011,11 +2011,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) if (mmie_keyidx < NUM_DEFAULT_KEYS || mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) - return RX_DROP_M_BAD_MGMT_KEYIDX; /* unexpected BIP keyidx */ + return RX_DROP_U_BAD_MGMT_KEYIDX; /* unexpected BIP keyidx */ if (rx->link_sta) { if (ieee80211_is_group_privacy_action(skb) && test_sta_flag(rx->sta, WLAN_STA_MFP)) - return RX_DROP_MONITOR; + return RX_DROP; rx->key = rcu_dereference(rx->link_sta->gtk[mmie_keyidx]); } @@ -2100,11 +2100,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) if (rx->key) { if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) - return RX_DROP_MONITOR; + return RX_DROP; /* TODO: add threshold stuff again */ } else { - return RX_DROP_MONITOR; + return RX_DROP; } switch (rx->key->conf.cipher) { @@ -2278,7 +2278,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) goto out; if (is_multicast_ether_addr(hdr->addr1)) - return RX_DROP_MONITOR; + return RX_DROP; I802_DEBUG_INC(rx->local->rx_handlers_fragments); @@ -2333,7 +2333,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) rx->seqno_idx, hdr); if (!entry) { I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); - return RX_DROP_MONITOR; + return RX_DROP; } /* "The receiver shall discard MSDUs and MMPDUs whose constituent @@ -2855,25 +2855,25 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta return RX_CONTINUE; if (!pskb_may_pull(skb, sizeof(*eth) + 6)) - return RX_DROP_MONITOR; + return RX_DROP; mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(*eth)); mesh_hdrlen = ieee80211_get_mesh_hdrlen(mesh_hdr); if (!pskb_may_pull(skb, sizeof(*eth) + mesh_hdrlen)) - return RX_DROP_MONITOR; + return RX_DROP; eth = (struct ethhdr *)skb->data; multicast = is_multicast_ether_addr(eth->h_dest); mesh_hdr = (struct ieee80211s_hdr *)(eth + 1); if (!mesh_hdr->ttl) - return RX_DROP_MONITOR; + return RX_DROP; /* frame is in RMC, don't forward */ if (is_multicast_ether_addr(eth->h_dest) && mesh_rmc_check(sdata, eth->h_source, mesh_hdr)) - return RX_DROP_MONITOR; + return RX_DROP; /* forward packet */ if (sdata->crypto_tx_tailroom_needed_cnt) @@ -2890,7 +2890,7 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta /* has_a4 already checked in ieee80211_rx_mesh_check */ proxied_addr = mesh_hdr->eaddr2; else - return RX_DROP_MONITOR; + return RX_DROP; rcu_read_lock(); mppath = mpp_path_lookup(sdata, proxied_addr); @@ -2922,14 +2922,14 @@ ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta goto rx_accept; IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); - return RX_DROP_MONITOR; + return RX_DROP; } if (!ifmsh->mshcfg.dot11MeshForwarding) { if (is_multicast_ether_addr(eth->h_dest)) goto rx_accept; - return RX_DROP_MONITOR; + return RX_DROP; } skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]); @@ -3122,7 +3122,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) return RX_CONTINUE; if (unlikely(!ieee80211_is_data_present(fc))) - return RX_DROP_MONITOR; + return RX_DROP; if (unlikely(ieee80211_has_a4(hdr->frame_control))) { switch (rx->sdata->vif.type) { @@ -3179,19 +3179,16 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) return RX_CONTINUE; if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) - return RX_DROP_MONITOR; + return RX_DROP; - /* - * Send unexpected-4addr-frame event to hostapd. For older versions, - * also drop the frame to cooked monitor interfaces. - */ + /* Send unexpected-4addr-frame event to hostapd */ if (ieee80211_has_a4(hdr->frame_control) && sdata->vif.type == NL80211_IFTYPE_AP) { if (rx->sta && !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) cfg80211_rx_unexpected_4addr_frame( rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); - return RX_DROP_MONITOR; + return RX_DROP; } res = __ieee80211_data_to_8023(rx, &port_control); @@ -3203,7 +3200,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) return res; if (!ieee80211_frame_allowed(rx, fc)) - return RX_DROP_MONITOR; + return RX_DROP; /* directly handle TDLS channel switch requests/responses */ if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == @@ -3268,11 +3265,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) }; if (!rx->sta) - return RX_DROP_MONITOR; + return RX_DROP; if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), &bar_data, sizeof(bar_data))) - return RX_DROP_MONITOR; + return RX_DROP; tid = le16_to_cpu(bar_data.control) >> 12; @@ -3284,7 +3281,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); if (!tid_agg_rx) - return RX_DROP_MONITOR; + return RX_DROP; start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; event.u.ba.tid = tid; @@ -3308,12 +3305,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) return RX_QUEUED; } - /* - * After this point, we only want management frames, - * so we can drop all remaining control frames to - * cooked monitor interfaces. - */ - return RX_DROP_MONITOR; + return RX_DROP; } static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, @@ -3329,8 +3321,8 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, return; } - if (!ether_addr_equal(mgmt->sa, sdata->deflink.u.mgd.bssid) || - !ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) { + if (!ether_addr_equal(mgmt->sa, sdata->vif.cfg.ap_addr) || + !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) { /* Not from the current AP or not associated yet. */ return; } @@ -3346,9 +3338,9 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, skb_reserve(skb, local->hw.extra_tx_headroom); resp = skb_put_zero(skb, 24); - memcpy(resp->da, mgmt->sa, ETH_ALEN); + memcpy(resp->da, sdata->vif.cfg.ap_addr, ETH_ALEN); memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); - memcpy(resp->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN); + memcpy(resp->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); @@ -3422,10 +3414,10 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) * and unknown (reserved) frames are useless. */ if (rx->skb->len < 24) - return RX_DROP_MONITOR; + return RX_DROP; if (!ieee80211_is_mgmt(mgmt->frame_control)) - return RX_DROP_MONITOR; + return RX_DROP; /* drop too small action frames */ if (ieee80211_is_action(mgmt->frame_control) && @@ -3819,6 +3811,14 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) u.action.u.ttlm_res)) goto invalid; goto queue; + case WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN: + if (sdata->vif.type != NL80211_IFTYPE_STATION) + break; + + if (len < offsetofend(typeof(*mgmt), + u.action.u.ttlm_tear_down)) + goto invalid; + goto queue; case WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP: if (sdata->vif.type != NL80211_IFTYPE_STATION) break; @@ -3831,6 +3831,23 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) u.action.u.ml_reconf_resp) + 3) goto invalid; goto queue; + case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP: + if (sdata->vif.type != NL80211_IFTYPE_STATION) + break; + + if (len < offsetofend(typeof(*mgmt), + u.action.u.epcs) + + IEEE80211_EPCS_ENA_RESP_BODY_LEN) + goto invalid; + goto queue; + case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN: + if (sdata->vif.type != NL80211_IFTYPE_STATION) + break; + + if (len < offsetofend(typeof(*mgmt), + u.action.u.epcs)) + goto invalid; + goto queue; default: break; } @@ -3951,17 +3968,16 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) * ones. For all other modes we will return them to the sender, * setting the 0x80 bit in the action category, as required by * 802.11-2012 9.24.4. - * Newer versions of hostapd shall also use the management frame - * registration mechanisms, but older ones still use cooked - * monitor interfaces so push all frames there. + * Newer versions of hostapd use the management frame registration + * mechanisms and old cooked monitor interface is no longer supported. */ if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) - return RX_DROP_MONITOR; + return RX_DROP; if (is_multicast_ether_addr(mgmt->da)) - return RX_DROP_MONITOR; + return RX_DROP; /* do not return rejected action frames */ if (mgmt->u.action.category & 0x80) @@ -4006,7 +4022,7 @@ ieee80211_rx_h_ext(struct ieee80211_rx_data *rx) return RX_CONTINUE; if (sdata->vif.type != NL80211_IFTYPE_STATION) - return RX_DROP_MONITOR; + return RX_DROP; /* for now only beacons are ext, so queue them */ ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); @@ -4027,7 +4043,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_OCB && sdata->vif.type != NL80211_IFTYPE_STATION) - return RX_DROP_MONITOR; + return RX_DROP; switch (stype) { case cpu_to_le16(IEEE80211_STYPE_AUTH): @@ -4038,32 +4054,32 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) case cpu_to_le16(IEEE80211_STYPE_DEAUTH): if (is_multicast_ether_addr(mgmt->da) && !is_broadcast_ether_addr(mgmt->da)) - return RX_DROP_MONITOR; + return RX_DROP; /* process only for station/IBSS */ if (sdata->vif.type != NL80211_IFTYPE_STATION && sdata->vif.type != NL80211_IFTYPE_ADHOC) - return RX_DROP_MONITOR; + return RX_DROP; break; case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): case cpu_to_le16(IEEE80211_STYPE_DISASSOC): if (is_multicast_ether_addr(mgmt->da) && !is_broadcast_ether_addr(mgmt->da)) - return RX_DROP_MONITOR; + return RX_DROP; /* process only for station */ if (sdata->vif.type != NL80211_IFTYPE_STATION) - return RX_DROP_MONITOR; + return RX_DROP; break; case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): /* process only for ibss and mesh */ if (sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT) - return RX_DROP_MONITOR; + return RX_DROP; break; default: - return RX_DROP_MONITOR; + return RX_DROP; } ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); @@ -4071,82 +4087,9 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) return RX_QUEUED; } -static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, - struct ieee80211_rate *rate, - ieee80211_rx_result reason) -{ - struct ieee80211_sub_if_data *sdata; - struct ieee80211_local *local = rx->local; - struct sk_buff *skb = rx->skb, *skb2; - struct net_device *prev_dev = NULL; - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - int needed_headroom; - - /* - * If cooked monitor has been processed already, then - * don't do it again. If not, set the flag. - */ - if (rx->flags & IEEE80211_RX_CMNTR) - goto out_free_skb; - rx->flags |= IEEE80211_RX_CMNTR; - - /* If there are no cooked monitor interfaces, just free the SKB */ - if (!local->cooked_mntrs) - goto out_free_skb; - - /* room for the radiotap header based on driver features */ - needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); - - if (skb_headroom(skb) < needed_headroom && - pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) - goto out_free_skb; - - /* prepend radiotap information */ - ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, - false); - - skb_reset_mac_header(skb); - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->pkt_type = PACKET_OTHERHOST; - skb->protocol = htons(ETH_P_802_2); - - list_for_each_entry_rcu(sdata, &local->interfaces, list) { - if (!ieee80211_sdata_running(sdata)) - continue; - - if (sdata->vif.type != NL80211_IFTYPE_MONITOR || - !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)) - continue; - - if (prev_dev) { - skb2 = skb_clone(skb, GFP_ATOMIC); - if (skb2) { - skb2->dev = prev_dev; - netif_receive_skb(skb2); - } - } - - prev_dev = sdata->dev; - dev_sw_netstats_rx_add(sdata->dev, skb->len); - } - - if (prev_dev) { - skb->dev = prev_dev; - netif_receive_skb(skb); - return; - } - - out_free_skb: - kfree_skb_reason(skb, (__force u32)reason); -} - static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, ieee80211_rx_result res) { - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); - struct ieee80211_supported_band *sband; - struct ieee80211_rate *rate = NULL; - if (res == RX_QUEUED) { I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); return; @@ -4158,23 +4101,13 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, rx->link_sta->rx_stats.dropped++; } - if (u32_get_bits((__force u32)res, SKB_DROP_REASON_SUBSYS_MASK) == - SKB_DROP_REASON_SUBSYS_MAC80211_UNUSABLE) { - kfree_skb_reason(rx->skb, (__force u32)res); - return; - } - - sband = rx->local->hw.wiphy->bands[status->band]; - if (status->encoding == RX_ENC_LEGACY) - rate = &sband->bitrates[status->rate_idx]; - - ieee80211_rx_cooked_monitor(rx, rate, res); + kfree_skb_reason(rx->skb, (__force u32)res); } static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) { - ieee80211_rx_result res = RX_DROP_MONITOR; + ieee80211_rx_result res = RX_DROP; struct sk_buff *skb; #define CALL_RXH(rxh) \ @@ -4238,7 +4171,7 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) { struct sk_buff_head reorder_release; - ieee80211_rx_result res = RX_DROP_MONITOR; + ieee80211_rx_result res = RX_DROP; __skb_queue_head_init(&reorder_release); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index f83268fa9f928..30cdc783999db 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -4,7 +4,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include @@ -1335,9 +1335,13 @@ static int _sta_info_move_state(struct sta_info *sta, sta->sta.addr, new_state); /* notify the driver before the actual changes so it can - * fail the transition + * fail the transition if the state is increasing. + * The driver is required not to fail when the transition + * is decreasing the state, so first, do all the preparation + * work and only then, notify the driver. */ - if (test_sta_flag(sta, WLAN_STA_INSERTED)) { + if (new_state > sta->sta_state && + test_sta_flag(sta, WLAN_STA_INSERTED)) { int err = drv_sta_state(sta->local, sta->sdata, sta, sta->sta_state, new_state); if (err) @@ -1413,6 +1417,16 @@ static int _sta_info_move_state(struct sta_info *sta, break; } + if (new_state < sta->sta_state && + test_sta_flag(sta, WLAN_STA_INSERTED)) { + int err = drv_sta_state(sta->local, sta->sdata, sta, + sta->sta_state, new_state); + + WARN_ONCE(err, + "Driver is not allowed to fail if the sta_state is transitioning down the list: %d\n", + err); + } + sta->sta_state = new_state; return 0; @@ -2584,6 +2598,39 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats) return value; } +#ifdef CONFIG_MAC80211_MESH +static void sta_set_mesh_sinfo(struct sta_info *sta, + struct station_info *sinfo) +{ + struct ieee80211_local *local = sta->sdata->local; + + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | + BIT_ULL(NL80211_STA_INFO_PLID) | + BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | + BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | + BIT_ULL(NL80211_STA_INFO_PEER_PM) | + BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | + BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | + BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); + + sinfo->llid = sta->mesh->llid; + sinfo->plid = sta->mesh->plid; + sinfo->plink_state = sta->mesh->plink_state; + if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); + sinfo->t_offset = sta->mesh->t_offset; + } + sinfo->local_pm = sta->mesh->local_pm; + sinfo->peer_pm = sta->mesh->peer_pm; + sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; + sinfo->connected_to_gate = sta->mesh->connected_to_gate; + sinfo->connected_to_as = sta->mesh->connected_to_as; + + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); + sinfo->airtime_link_metric = airtime_link_metric_get(local, sta); +} +#endif + void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, bool tidstats) { @@ -2768,31 +2815,10 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sta_set_tidstats(sta, &sinfo->pertid[i], i); } - if (ieee80211_vif_is_mesh(&sdata->vif)) { #ifdef CONFIG_MAC80211_MESH - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_LLID) | - BIT_ULL(NL80211_STA_INFO_PLID) | - BIT_ULL(NL80211_STA_INFO_PLINK_STATE) | - BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | - BIT_ULL(NL80211_STA_INFO_PEER_PM) | - BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | - BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | - BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); - - sinfo->llid = sta->mesh->llid; - sinfo->plid = sta->mesh->plid; - sinfo->plink_state = sta->mesh->plink_state; - if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) { - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_T_OFFSET); - sinfo->t_offset = sta->mesh->t_offset; - } - sinfo->local_pm = sta->mesh->local_pm; - sinfo->peer_pm = sta->mesh->peer_pm; - sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; - sinfo->connected_to_gate = sta->mesh->connected_to_gate; - sinfo->connected_to_as = sta->mesh->connected_to_as; + if (ieee80211_vif_is_mesh(&sdata->vif)) + sta_set_mesh_sinfo(sta, sinfo); #endif - } sinfo->bss_param.flags = 0; if (sdata->vif.bss_conf.use_cts_prot) @@ -2848,12 +2874,6 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); } - - if (ieee80211_vif_is_mesh(&sdata->vif)) { - sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_LINK_METRIC); - sinfo->airtime_link_metric = - airtime_link_metric_get(local, sta); - } } u32 sta_get_expected_throughput(struct sta_info *sta) diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 5f28f3633fa0a..b17b3cc7fb903 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -895,8 +895,7 @@ static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, } void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, - int retry_count, bool send_to_cooked, - struct ieee80211_tx_status *status) + int retry_count, struct ieee80211_tx_status *status) { struct sk_buff *skb2; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -930,10 +929,6 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, if (sdata->u.mntr.flags & MONITOR_FLAG_SKIP_TX) continue; - if ((sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) && - !send_to_cooked) - continue; - if (prev_dev) { skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) { @@ -964,7 +959,6 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, struct ieee80211_tx_info *info = status->info; struct sta_info *sta; __le16 fc; - bool send_to_cooked; bool acked; bool noack_success; struct ieee80211_bar *bar; @@ -1091,28 +1085,10 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw, ieee80211_report_used_skb(local, skb, false, status->ack_hwtstamp); - /* this was a transmitted frame, but now we want to reuse it */ - skb_orphan(skb); - - /* Need to make a copy before skb->cb gets cleared */ - send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) || - !(ieee80211_is_data(fc)); - - /* - * This is a bit racy but we can avoid a lot of work - * with this test... - */ - if (!local->tx_mntrs && (!send_to_cooked || !local->cooked_mntrs)) { - if (status->free_list) - list_add_tail(&skb->list, status->free_list); - else - dev_kfree_skb(skb); - return; - } - - /* send to monitor interfaces */ - ieee80211_tx_monitor(local, skb, retry_count, - send_to_cooked, status); + if (status->free_list) + list_add_tail(&skb->list, status->free_list); + else + dev_kfree_skb(skb); } void ieee80211_tx_status_skb(struct ieee80211_hw *hw, struct sk_buff *skb) diff --git a/net/mac80211/tests/Makefile b/net/mac80211/tests/Makefile index 0f5336bc7314e..3b0c08356fc55 100644 --- a/net/mac80211/tests/Makefile +++ b/net/mac80211/tests/Makefile @@ -1,3 +1,3 @@ -mac80211-tests-y += module.o util.o elems.o mfp.o tpe.o +mac80211-tests-y += module.o util.o elems.o mfp.o tpe.o chan-mode.o obj-$(CONFIG_MAC80211_KUNIT_TEST) += mac80211-tests.o diff --git a/net/mac80211/tests/chan-mode.c b/net/mac80211/tests/chan-mode.c new file mode 100644 index 0000000000000..96c7b3ab27444 --- /dev/null +++ b/net/mac80211/tests/chan-mode.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * KUnit tests for channel mode functions + * + * Copyright (C) 2024 Intel Corporation + */ +#include +#include + +#include "util.h" + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +static const struct determine_chan_mode_case { + const char *desc; + u8 extra_supp_rate; + enum ieee80211_conn_mode conn_mode; + enum ieee80211_conn_mode expected_mode; + bool strict; + u8 userspace_selector; + struct ieee80211_ht_cap ht_capa_mask; + struct ieee80211_vht_cap vht_capa; + struct ieee80211_vht_cap vht_capa_mask; + u8 vht_basic_mcs_1_4_set:1, + vht_basic_mcs_5_8_set:1, + he_basic_mcs_1_4_set:1, + he_basic_mcs_5_8_set:1; + u8 vht_basic_mcs_1_4, vht_basic_mcs_5_8; + u8 he_basic_mcs_1_4, he_basic_mcs_5_8; + u8 eht_mcs7_min_nss; + int error; +} determine_chan_mode_cases[] = { + { + .desc = "Normal case, EHT is working", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .expected_mode = IEEE80211_CONN_MODE_EHT, + }, { + .desc = "Requiring EHT support is fine", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .expected_mode = IEEE80211_CONN_MODE_EHT, + .extra_supp_rate = 0x80 | BSS_MEMBERSHIP_SELECTOR_EHT_PHY, + }, { + .desc = "Lowering the mode limits us", + .conn_mode = IEEE80211_CONN_MODE_VHT, + .expected_mode = IEEE80211_CONN_MODE_VHT, + }, { + .desc = "Requesting a basic rate/selector that we do not support", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .extra_supp_rate = 0x80 | (BSS_MEMBERSHIP_SELECTOR_MIN - 1), + .error = EINVAL, + }, { + .desc = "As before, but userspace says it is taking care of it", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .userspace_selector = BSS_MEMBERSHIP_SELECTOR_MIN - 1, + .extra_supp_rate = 0x80 | (BSS_MEMBERSHIP_SELECTOR_MIN - 1), + .expected_mode = IEEE80211_CONN_MODE_EHT, + }, { + .desc = "Masking out a supported rate in HT capabilities", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .expected_mode = IEEE80211_CONN_MODE_LEGACY, + .ht_capa_mask = { + .mcs.rx_mask[0] = 0xf7, + }, + }, { + .desc = "Masking out a RX rate in VHT capabilities", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .expected_mode = IEEE80211_CONN_MODE_HT, + /* Only one RX stream at MCS 0-7 */ + .vht_capa = { + .supp_mcs.rx_mcs_map = + cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_7), + }, + .vht_capa_mask = { + .supp_mcs.rx_mcs_map = cpu_to_le16(0xffff), + }, + .strict = true, + }, { + .desc = "Masking out a TX rate in VHT capabilities", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .expected_mode = IEEE80211_CONN_MODE_HT, + /* Only one TX stream at MCS 0-7 */ + .vht_capa = { + .supp_mcs.tx_mcs_map = + cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_7), + }, + .vht_capa_mask = { + .supp_mcs.tx_mcs_map = cpu_to_le16(0xffff), + }, + .strict = true, + }, { + .desc = "AP has higher VHT requirement than client", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .expected_mode = IEEE80211_CONN_MODE_HT, + .vht_basic_mcs_5_8_set = 1, + .vht_basic_mcs_5_8 = 0xFE, /* require 5th stream */ + .strict = true, + }, { + .desc = "all zero VHT basic rates are ignored (many APs broken)", + .conn_mode = IEEE80211_CONN_MODE_VHT, + .expected_mode = IEEE80211_CONN_MODE_VHT, + .vht_basic_mcs_1_4_set = 1, + .vht_basic_mcs_5_8_set = 1, + }, { + .desc = "AP requires 3 HE streams but client only has two", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .expected_mode = IEEE80211_CONN_MODE_VHT, + .he_basic_mcs_1_4 = 0b11001010, + .he_basic_mcs_1_4_set = 1, + }, { + .desc = "all zero HE basic rates are ignored (iPhone workaround)", + .conn_mode = IEEE80211_CONN_MODE_HE, + .expected_mode = IEEE80211_CONN_MODE_HE, + .he_basic_mcs_1_4_set = 1, + .he_basic_mcs_5_8_set = 1, + }, { + .desc = "AP requires too many RX streams with EHT MCS 7", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .expected_mode = IEEE80211_CONN_MODE_HE, + .eht_mcs7_min_nss = 0x15, + }, { + .desc = "AP requires too many TX streams with EHT MCS 7", + .conn_mode = IEEE80211_CONN_MODE_EHT, + .expected_mode = IEEE80211_CONN_MODE_HE, + .eht_mcs7_min_nss = 0x51, + }, { + .desc = "AP requires too many RX streams with EHT MCS 7 and EHT is required", + .extra_supp_rate = 0x80 | BSS_MEMBERSHIP_SELECTOR_EHT_PHY, + .conn_mode = IEEE80211_CONN_MODE_EHT, + .eht_mcs7_min_nss = 0x15, + .error = EINVAL, + } +}; +KUNIT_ARRAY_PARAM_DESC(determine_chan_mode, determine_chan_mode_cases, desc) + +static void test_determine_chan_mode(struct kunit *test) +{ + const struct determine_chan_mode_case *params = test->param_value; + struct t_sdata *t_sdata = T_SDATA(test); + struct ieee80211_conn_settings conn = { + .mode = params->conn_mode, + .bw_limit = IEEE80211_CONN_BW_LIMIT_20, + }; + struct cfg80211_bss cbss = { + .channel = &t_sdata->band_5ghz.channels[0], + }; + unsigned long userspace_selectors[BITS_TO_LONGS(128)] = {}; + u8 bss_ies[] = { + /* Supported Rates */ + WLAN_EID_SUPP_RATES, 0x08, + 0x82, 0x84, 0x8b, 0x96, 0xc, 0x12, 0x18, 0x24, + /* Extended Supported Rates */ + WLAN_EID_EXT_SUPP_RATES, 0x05, + 0x30, 0x48, 0x60, 0x6c, params->extra_supp_rate, + /* HT Capabilities */ + WLAN_EID_HT_CAPABILITY, 0x1a, + 0x0c, 0x00, 0x1b, 0xff, 0xff, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, + /* HT Information (0xff for 1 stream) */ + WLAN_EID_HT_OPERATION, 0x16, + 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* VHT Capabilities */ + WLAN_EID_VHT_CAPABILITY, 0xc, + 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x00, + /* VHT Operation */ + WLAN_EID_VHT_OPERATION, 0x05, + 0x00, 0x00, 0x00, + params->vht_basic_mcs_1_4_set ? + params->vht_basic_mcs_1_4 : + le16_get_bits(t_sdata->band_5ghz.vht_cap.vht_mcs.rx_mcs_map, 0xff), + params->vht_basic_mcs_5_8_set ? + params->vht_basic_mcs_5_8 : + le16_get_bits(t_sdata->band_5ghz.vht_cap.vht_mcs.rx_mcs_map, 0xff00), + /* HE Capabilities */ + WLAN_EID_EXTENSION, 0x16, WLAN_EID_EXT_HE_CAPABILITY, + 0x01, 0x78, 0xc8, 0x1a, 0x40, 0x00, 0x00, 0xbf, + 0xce, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xfa, 0xff, 0xfa, 0xff, + /* HE Operation (permit overriding values) */ + WLAN_EID_EXTENSION, 0x07, WLAN_EID_EXT_HE_OPERATION, + 0xf0, 0x3f, 0x00, 0xb0, + params->he_basic_mcs_1_4_set ? params->he_basic_mcs_1_4 : 0xfc, + params->he_basic_mcs_5_8_set ? params->he_basic_mcs_5_8 : 0xff, + /* EHT Capabilities */ + WLAN_EID_EXTENSION, 0x12, WLAN_EID_EXT_EHT_CAPABILITY, + 0x07, 0x00, 0x1c, 0x00, 0x00, 0xfe, 0xff, 0xff, + 0x7f, 0x01, 0x00, 0x88, 0x88, 0x88, 0x00, 0x00, + 0x00, + /* EHT Operation */ + WLAN_EID_EXTENSION, 0x09, WLAN_EID_EXT_EHT_OPERATION, + 0x01, params->eht_mcs7_min_nss ? params->eht_mcs7_min_nss : 0x11, + 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, + }; + struct ieee80211_chan_req chanreq = {}; + struct cfg80211_chan_def ap_chandef = {}; + struct ieee802_11_elems *elems; + + if (params->strict) + set_bit(IEEE80211_HW_STRICT, t_sdata->local.hw.flags); + else + clear_bit(IEEE80211_HW_STRICT, t_sdata->local.hw.flags); + + t_sdata->sdata->u.mgd.ht_capa_mask = params->ht_capa_mask; + t_sdata->sdata->u.mgd.vht_capa = params->vht_capa; + t_sdata->sdata->u.mgd.vht_capa_mask = params->vht_capa_mask; + + if (params->userspace_selector) + set_bit(params->userspace_selector, userspace_selectors); + + rcu_assign_pointer(cbss.ies, + kunit_kzalloc(test, + sizeof(cbss) + sizeof(bss_ies), + GFP_KERNEL)); + KUNIT_ASSERT_NOT_NULL(test, rcu_access_pointer(cbss.ies)); + ((struct cfg80211_bss_ies *)rcu_access_pointer(cbss.ies))->len = sizeof(bss_ies); + + memcpy((void *)rcu_access_pointer(cbss.ies)->data, bss_ies, + sizeof(bss_ies)); + + rcu_read_lock(); + elems = ieee80211_determine_chan_mode(t_sdata->sdata, &conn, &cbss, + 0, &chanreq, &ap_chandef, + userspace_selectors); + rcu_read_unlock(); + + /* We do not need elems, free them if they are valid. */ + if (!IS_ERR_OR_NULL(elems)) + kfree(elems); + + if (params->error) { + KUNIT_ASSERT_TRUE(test, IS_ERR(elems)); + KUNIT_ASSERT_EQ(test, PTR_ERR(elems), -params->error); + } else { + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, elems); + KUNIT_ASSERT_EQ(test, conn.mode, params->expected_mode); + } +} + +static struct kunit_case chan_mode_cases[] = { + KUNIT_CASE_PARAM(test_determine_chan_mode, + determine_chan_mode_gen_params), + {} +}; + +static struct kunit_suite chan_mode = { + .name = "mac80211-mlme-chan-mode", + .test_cases = chan_mode_cases, +}; + +kunit_test_suite(chan_mode); diff --git a/net/mac80211/tests/util.c b/net/mac80211/tests/util.c index 0936a73e36175..9c2d63a5cd2bf 100644 --- a/net/mac80211/tests/util.c +++ b/net/mac80211/tests/util.c @@ -266,11 +266,7 @@ int t_sdata_init(struct kunit_resource *resource, void *ctx) cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | - IEEE80211_VHT_MCS_SUPPORT_0_9 << 14); + IEEE80211_VHT_MCS_SUPPORT_0_9 << 6); sband->vht_cap.vht_mcs.tx_mcs_map = sband->vht_cap.vht_mcs.rx_mcs_map; break; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index a24636bda6793..20179db88c4a6 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1182,7 +1182,8 @@ void ieee80211_aggr_check(struct ieee80211_sub_if_data *sdata, if (!ref || !(ref->ops->capa & RATE_CTRL_CAPA_AMPDU_TRIGGER)) return; - if (!sta || !sta->sta.deflink.ht_cap.ht_supported || + if (!sta || + (!sta->sta.valid_links && !sta->sta.deflink.ht_cap.ht_supported) || !sta->sta.wme || skb_get_queue_mapping(skb) == IEEE80211_AC_VO || skb->protocol == sdata->control_port_protocol) return; @@ -5617,7 +5618,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, if (!copy) return bcn; - ieee80211_tx_monitor(hw_to_local(hw), copy, 1, false, NULL); + ieee80211_tx_monitor(hw_to_local(hw), copy, 1, NULL); return bcn; } diff --git a/net/mac80211/util.c b/net/mac80211/util.c index f6b631faf4f7f..dec6e16b8c7d2 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -6,7 +6,7 @@ * Copyright 2007 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation * * utilities for mac80211 */ @@ -687,7 +687,7 @@ void __ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, unsigned int queues, bool drop) { - if (!local->ops->flush) + if (!local->ops->flush && !drop) return; /* @@ -714,7 +714,8 @@ void __ieee80211_flush_queues(struct ieee80211_local *local, } } - drv_flush(local, sdata, queues, drop); + if (local->ops->flush) + drv_flush(local, sdata, queues, drop); ieee80211_wake_queues_by_reason(&local->hw, queues, IEEE80211_QUEUE_STOP_REASON_FLUSH, @@ -2155,7 +2156,8 @@ int ieee80211_reconfig(struct ieee80211_local *local) wake_up: - if (local->monitors == local->open_count && local->monitors > 0) + if (local->virt_monitors > 0 && + local->virt_monitors == local->open_count) ieee80211_add_virtual_monitor(local); /* @@ -2192,8 +2194,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) ieee80211_reconfig_roc(local); /* Requeue all works */ - list_for_each_entry(sdata, &local->interfaces, list) - wiphy_work_queue(local->hw.wiphy, &sdata->work); + list_for_each_entry(sdata, &local->interfaces, list) { + if (ieee80211_sdata_running(sdata)) + wiphy_work_queue(local->hw.wiphy, &sdata->work); + } } ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, diff --git a/net/mac80211/wbrf.c b/net/mac80211/wbrf.c index 3a86123091373..478b34b81919b 100644 --- a/net/mac80211/wbrf.c +++ b/net/mac80211/wbrf.c @@ -2,6 +2,7 @@ /* * Wifi Band Exclusion Interface for WLAN * Copyright (C) 2023 Advanced Micro Devices + * Copyright (C) 2025 Intel Corporation * */ @@ -45,7 +46,7 @@ static void get_ranges_from_chandef(struct cfg80211_chan_def *chandef, u64 start_freq2, end_freq2; int bandwidth; - bandwidth = nl80211_chan_width_to_mhz(chandef->width); + bandwidth = cfg80211_chandef_get_width(chandef); get_chan_freq_boundary(chandef->center_freq1, bandwidth, &start_freq1, &end_freq1); diff --git a/net/mctp/route.c b/net/mctp/route.c index 3f2bd65ff5e3c..4c460160914f0 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -332,8 +332,14 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb) & MCTP_HDR_SEQ_MASK; if (!key->reasm_head) { - key->reasm_head = skb; - key->reasm_tailp = &(skb_shinfo(skb)->frag_list); + /* Since we're manipulating the shared frag_list, ensure it isn't + * shared with any other SKBs. + */ + key->reasm_head = skb_unshare(skb, GFP_ATOMIC); + if (!key->reasm_head) + return -ENOMEM; + + key->reasm_tailp = &(skb_shinfo(key->reasm_head)->frag_list); key->last_seq = this_seq; return 0; } diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c index 17165b86ce22d..06c1897b685a8 100644 --- a/net/mctp/test/route-test.c +++ b/net/mctp/test/route-test.c @@ -921,6 +921,114 @@ static void mctp_test_route_input_sk_fail_frag(struct kunit *test) __mctp_route_test_fini(test, dev, rt, sock); } +/* Input route to socket, using a fragmented message created from clones. + */ +static void mctp_test_route_input_cloned_frag(struct kunit *test) +{ + /* 5 packet fragments, forming 2 complete messages */ + const struct mctp_hdr hdrs[5] = { + RX_FRAG(FL_S, 0), + RX_FRAG(0, 1), + RX_FRAG(FL_E, 2), + RX_FRAG(FL_S, 0), + RX_FRAG(FL_E, 1), + }; + struct mctp_test_route *rt; + struct mctp_test_dev *dev; + struct sk_buff *skb[5]; + struct sk_buff *rx_skb; + struct socket *sock; + size_t data_len; + u8 compare[100]; + u8 flat[100]; + size_t total; + void *p; + int rc; + + /* Arbitrary length */ + data_len = 3; + total = data_len + sizeof(struct mctp_hdr); + + __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY); + + /* Create a single skb initially with concatenated packets */ + skb[0] = mctp_test_create_skb(&hdrs[0], 5 * total); + mctp_test_skb_set_dev(skb[0], dev); + memset(skb[0]->data, 0 * 0x11, skb[0]->len); + memcpy(skb[0]->data, &hdrs[0], sizeof(struct mctp_hdr)); + + /* Extract and populate packets */ + for (int i = 1; i < 5; i++) { + skb[i] = skb_clone(skb[i - 1], GFP_ATOMIC); + KUNIT_ASSERT_TRUE(test, skb[i]); + p = skb_pull(skb[i], total); + KUNIT_ASSERT_TRUE(test, p); + skb_reset_network_header(skb[i]); + memcpy(skb[i]->data, &hdrs[i], sizeof(struct mctp_hdr)); + memset(&skb[i]->data[sizeof(struct mctp_hdr)], i * 0x11, data_len); + } + for (int i = 0; i < 5; i++) + skb_trim(skb[i], total); + + /* SOM packets have a type byte to match the socket */ + skb[0]->data[4] = 0; + skb[3]->data[4] = 0; + + skb_dump("pkt1 ", skb[0], false); + skb_dump("pkt2 ", skb[1], false); + skb_dump("pkt3 ", skb[2], false); + skb_dump("pkt4 ", skb[3], false); + skb_dump("pkt5 ", skb[4], false); + + for (int i = 0; i < 5; i++) { + KUNIT_EXPECT_EQ(test, refcount_read(&skb[i]->users), 1); + /* Take a reference so we can check refcounts at the end */ + skb_get(skb[i]); + } + + /* Feed the fragments into MCTP core */ + for (int i = 0; i < 5; i++) { + rc = mctp_route_input(&rt->rt, skb[i]); + KUNIT_EXPECT_EQ(test, rc, 0); + } + + /* Receive first reassembled message */ + rx_skb = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc); + KUNIT_EXPECT_EQ(test, rc, 0); + KUNIT_EXPECT_EQ(test, rx_skb->len, 3 * data_len); + rc = skb_copy_bits(rx_skb, 0, flat, rx_skb->len); + for (int i = 0; i < rx_skb->len; i++) + compare[i] = (i / data_len) * 0x11; + /* Set type byte */ + compare[0] = 0; + + KUNIT_EXPECT_MEMEQ(test, flat, compare, rx_skb->len); + KUNIT_EXPECT_EQ(test, refcount_read(&rx_skb->users), 1); + kfree_skb(rx_skb); + + /* Receive second reassembled message */ + rx_skb = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc); + KUNIT_EXPECT_EQ(test, rc, 0); + KUNIT_EXPECT_EQ(test, rx_skb->len, 2 * data_len); + rc = skb_copy_bits(rx_skb, 0, flat, rx_skb->len); + for (int i = 0; i < rx_skb->len; i++) + compare[i] = (i / data_len + 3) * 0x11; + /* Set type byte */ + compare[0] = 0; + + KUNIT_EXPECT_MEMEQ(test, flat, compare, rx_skb->len); + KUNIT_EXPECT_EQ(test, refcount_read(&rx_skb->users), 1); + kfree_skb(rx_skb); + + /* Check input skb refcounts */ + for (int i = 0; i < 5; i++) { + KUNIT_EXPECT_EQ(test, refcount_read(&skb[i]->users), 1); + kfree_skb(skb[i]); + } + + __mctp_route_test_fini(test, dev, rt, sock); +} + #if IS_ENABLED(CONFIG_MCTP_FLOWS) static void mctp_test_flow_init(struct kunit *test, @@ -1144,6 +1252,7 @@ static struct kunit_case mctp_test_cases[] = { KUNIT_CASE(mctp_test_packet_flow), KUNIT_CASE(mctp_test_fragment_flow), KUNIT_CASE(mctp_test_route_output_key_create), + KUNIT_CASE(mctp_test_route_input_cloned_frag), {} }; diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile index bcf1dbf3a432f..89bf6c47c818d 100644 --- a/net/mptcp/Makefile +++ b/net/mptcp/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_MPTCP) += mptcp.o mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \ mib.o pm_netlink.o sockopt.o pm_userspace.o fastopen.o sched.o \ - mptcp_pm_gen.o + mptcp_pm_gen.o pm_kernel.o obj-$(CONFIG_SYN_COOKIES) += syncookies.o obj-$(CONFIG_INET_MPTCP_DIAG) += mptcp_diag.o diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c index 2dd81e6c26bdb..d9290c5bb6c79 100644 --- a/net/mptcp/ctrl.c +++ b/net/mptcp/ctrl.c @@ -39,6 +39,7 @@ struct mptcp_pernet { u8 allow_join_initial_addr_port; u8 pm_type; char scheduler[MPTCP_SCHED_NAME_MAX]; + char path_manager[MPTCP_PM_NAME_MAX]; }; static struct mptcp_pernet *mptcp_get_pernet(const struct net *net) @@ -83,6 +84,11 @@ int mptcp_get_pm_type(const struct net *net) return mptcp_get_pernet(net)->pm_type; } +const char *mptcp_get_path_manager(const struct net *net) +{ + return mptcp_get_pernet(net)->path_manager; +} + const char *mptcp_get_scheduler(const struct net *net) { return mptcp_get_pernet(net)->scheduler; @@ -101,6 +107,7 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet) pernet->stale_loss_cnt = 4; pernet->pm_type = MPTCP_PM_TYPE_KERNEL; strscpy(pernet->scheduler, "default", sizeof(pernet->scheduler)); + strscpy(pernet->path_manager, "kernel", sizeof(pernet->path_manager)); } #ifdef CONFIG_SYSCTL @@ -174,6 +181,96 @@ static int proc_blackhole_detect_timeout(const struct ctl_table *table, return ret; } +static int mptcp_set_path_manager(char *path_manager, const char *name) +{ + struct mptcp_pm_ops *pm_ops; + int ret = 0; + + rcu_read_lock(); + pm_ops = mptcp_pm_find(name); + if (pm_ops) + strscpy(path_manager, name, MPTCP_PM_NAME_MAX); + else + ret = -ENOENT; + rcu_read_unlock(); + + return ret; +} + +static int proc_path_manager(const struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + struct mptcp_pernet *pernet = container_of(ctl->data, + struct mptcp_pernet, + path_manager); + char (*path_manager)[MPTCP_PM_NAME_MAX] = ctl->data; + char pm_name[MPTCP_PM_NAME_MAX]; + const struct ctl_table tbl = { + .data = pm_name, + .maxlen = MPTCP_PM_NAME_MAX, + }; + int ret; + + strscpy(pm_name, *path_manager, MPTCP_PM_NAME_MAX); + + ret = proc_dostring(&tbl, write, buffer, lenp, ppos); + if (write && ret == 0) { + ret = mptcp_set_path_manager(*path_manager, pm_name); + if (ret == 0) { + u8 pm_type = __MPTCP_PM_TYPE_NR; + + if (strncmp(pm_name, "kernel", MPTCP_PM_NAME_MAX) == 0) + pm_type = MPTCP_PM_TYPE_KERNEL; + else if (strncmp(pm_name, "userspace", MPTCP_PM_NAME_MAX) == 0) + pm_type = MPTCP_PM_TYPE_USERSPACE; + pernet->pm_type = pm_type; + } + } + + return ret; +} + +static int proc_pm_type(const struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + struct mptcp_pernet *pernet = container_of(ctl->data, + struct mptcp_pernet, + pm_type); + int ret; + + ret = proc_dou8vec_minmax(ctl, write, buffer, lenp, ppos); + if (write && ret == 0) { + u8 pm_type = READ_ONCE(*(u8 *)ctl->data); + char *pm_name = ""; + + if (pm_type == MPTCP_PM_TYPE_KERNEL) + pm_name = "kernel"; + else if (pm_type == MPTCP_PM_TYPE_USERSPACE) + pm_name = "userspace"; + mptcp_set_path_manager(pernet->path_manager, pm_name); + } + + return ret; +} + +static int proc_available_path_managers(const struct ctl_table *ctl, + int write, void *buffer, + size_t *lenp, loff_t *ppos) +{ + struct ctl_table tbl = { .maxlen = MPTCP_PM_BUF_MAX, }; + int ret; + + tbl.data = kmalloc(tbl.maxlen, GFP_USER); + if (!tbl.data) + return -ENOMEM; + + mptcp_pm_get_available(tbl.data, MPTCP_PM_BUF_MAX); + ret = proc_dostring(&tbl, write, buffer, lenp, ppos); + kfree(tbl.data); + + return ret; +} + static struct ctl_table mptcp_sysctl_table[] = { { .procname = "enabled", @@ -218,7 +315,7 @@ static struct ctl_table mptcp_sysctl_table[] = { .procname = "pm_type", .maxlen = sizeof(u8), .mode = 0644, - .proc_handler = proc_dou8vec_minmax, + .proc_handler = proc_pm_type, .extra1 = SYSCTL_ZERO, .extra2 = &mptcp_pm_type_max }, @@ -253,6 +350,18 @@ static struct ctl_table mptcp_sysctl_table[] = { .mode = 0644, .proc_handler = proc_dou8vec_minmax, }, + { + .procname = "path_manager", + .maxlen = MPTCP_PM_NAME_MAX, + .mode = 0644, + .proc_handler = proc_path_manager, + }, + { + .procname = "available_path_managers", + .maxlen = MPTCP_PM_BUF_MAX, + .mode = 0444, + .proc_handler = proc_available_path_managers, + }, }; static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet) @@ -278,6 +387,8 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet) table[8].data = &pernet->close_timeout; table[9].data = &pernet->blackhole_timeout; table[10].data = &pernet->syn_retrans_before_tcp_fallback; + table[11].data = &pernet->path_manager; + /* table[12] is for available_path_managers which is read-only info */ hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table, ARRAY_SIZE(mptcp_sysctl_table)); @@ -401,26 +512,30 @@ void mptcp_active_enable(struct sock *sk) void mptcp_active_detect_blackhole(struct sock *ssk, bool expired) { struct mptcp_subflow_context *subflow; + u8 timeouts, to_max; + struct net *net; - if (!sk_is_mptcp(ssk)) + /* Only check MPTCP SYN ... */ + if (likely(!sk_is_mptcp(ssk) || ssk->sk_state != TCP_SYN_SENT)) return; subflow = mptcp_subflow_ctx(ssk); - if (subflow->request_mptcp && ssk->sk_state == TCP_SYN_SENT) { - struct net *net = sock_net(ssk); - u8 timeouts, to_max; + /* ... + MP_CAPABLE */ + if (!subflow->request_mptcp) { + /* Mark as blackhole iif the 1st non-MPTCP SYN is accepted */ + subflow->mpc_drop = 0; + return; + } - timeouts = inet_csk(ssk)->icsk_retransmits; - to_max = mptcp_get_pernet(net)->syn_retrans_before_tcp_fallback; + net = sock_net(ssk); + timeouts = inet_csk(ssk)->icsk_retransmits; + to_max = mptcp_get_pernet(net)->syn_retrans_before_tcp_fallback; - if (timeouts == to_max || (timeouts < to_max && expired)) { - MPTCP_INC_STATS(net, MPTCP_MIB_MPCAPABLEACTIVEDROP); - subflow->mpc_drop = 1; - mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow); - } - } else if (ssk->sk_state == TCP_SYN_SENT) { - subflow->mpc_drop = 0; + if (timeouts == to_max || (timeouts < to_max && expired)) { + MPTCP_INC_STATS(net, MPTCP_MIB_MPCAPABLEACTIVEDROP); + subflow->mpc_drop = 1; + mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow); } } diff --git a/net/mptcp/diag.c b/net/mptcp/diag.c index 02205f7994d75..70cf9ebce8338 100644 --- a/net/mptcp/diag.c +++ b/net/mptcp/diag.c @@ -12,7 +12,7 @@ #include #include "protocol.h" -static int subflow_get_info(struct sock *sk, struct sk_buff *skb) +static int subflow_get_info(struct sock *sk, struct sk_buff *skb, bool net_admin) { struct mptcp_subflow_context *sf; struct nlattr *start; @@ -56,15 +56,6 @@ static int subflow_get_info(struct sock *sk, struct sk_buff *skb) if (nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_TOKEN_REM, sf->remote_token) || nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_TOKEN_LOC, sf->token) || - nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ, - sf->rel_write_seq) || - nla_put_u64_64bit(skb, MPTCP_SUBFLOW_ATTR_MAP_SEQ, sf->map_seq, - MPTCP_SUBFLOW_ATTR_PAD) || - nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_MAP_SFSEQ, - sf->map_subflow_seq) || - nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_SSN_OFFSET, sf->ssn_offset) || - nla_put_u16(skb, MPTCP_SUBFLOW_ATTR_MAP_DATALEN, - sf->map_data_len) || nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_FLAGS, flags) || nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_REM, sf->remote_id) || nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, subflow_get_local_id(sf))) { @@ -72,6 +63,21 @@ static int subflow_get_info(struct sock *sk, struct sk_buff *skb) goto nla_failure; } + /* Only export seq related counters to user with CAP_NET_ADMIN */ + if (net_admin && + (nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ, + sf->rel_write_seq) || + nla_put_u64_64bit(skb, MPTCP_SUBFLOW_ATTR_MAP_SEQ, sf->map_seq, + MPTCP_SUBFLOW_ATTR_PAD) || + nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_MAP_SFSEQ, + sf->map_subflow_seq) || + nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_SSN_OFFSET, sf->ssn_offset) || + nla_put_u16(skb, MPTCP_SUBFLOW_ATTR_MAP_DATALEN, + sf->map_data_len))) { + err = -EMSGSIZE; + goto nla_failure; + } + rcu_read_unlock(); unlock_sock_fast(sk, slow); nla_nest_end(skb, start); @@ -84,22 +90,26 @@ static int subflow_get_info(struct sock *sk, struct sk_buff *skb) return err; } -static size_t subflow_get_info_size(const struct sock *sk) +static size_t subflow_get_info_size(const struct sock *sk, bool net_admin) { size_t size = 0; size += nla_total_size(0) + /* INET_ULP_INFO_MPTCP */ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_TOKEN_REM */ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_TOKEN_LOC */ - nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */ - nla_total_size_64bit(8) + /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */ - nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */ - nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */ - nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_FLAGS */ nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_REM */ nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_LOC */ 0; + + if (net_admin) + size += nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */ + nla_total_size_64bit(8) + /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */ + nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */ + nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */ + nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */ + 0; + return size; } diff --git a/net/mptcp/fastopen.c b/net/mptcp/fastopen.c index a29ff901df758..b9e4511979028 100644 --- a/net/mptcp/fastopen.c +++ b/net/mptcp/fastopen.c @@ -40,17 +40,17 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf tp->copied_seq += skb->len; subflow->ssn_offset += skb->len; - /* initialize a dummy sequence number, we will update it at MPC - * completion, if needed - */ + /* Only the sequence delta is relevant */ MPTCP_SKB_CB(skb)->map_seq = -skb->len; MPTCP_SKB_CB(skb)->end_seq = 0; MPTCP_SKB_CB(skb)->offset = 0; MPTCP_SKB_CB(skb)->has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp; + MPTCP_SKB_CB(skb)->cant_coalesce = 1; mptcp_data_lock(sk); + DEBUG_NET_WARN_ON_ONCE(sock_owned_by_user_nocheck(sk)); - mptcp_set_owner_r(skb, sk); + skb_set_owner_r(skb, sk); __skb_queue_tail(&sk->sk_receive_queue, skb); mptcp_sk(sk)->bytes_received += skb->len; @@ -58,22 +58,3 @@ void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subf mptcp_data_unlock(sk); } - -void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, - const struct mptcp_options_received *mp_opt) -{ - struct sock *sk = (struct sock *)msk; - struct sk_buff *skb; - - skb = skb_peek_tail(&sk->sk_receive_queue); - if (skb) { - WARN_ON_ONCE(MPTCP_SKB_CB(skb)->end_seq); - pr_debug("msk %p moving seq %llx -> %llx end_seq %llx -> %llx\n", sk, - MPTCP_SKB_CB(skb)->map_seq, MPTCP_SKB_CB(skb)->map_seq + msk->ack_seq, - MPTCP_SKB_CB(skb)->end_seq, MPTCP_SKB_CB(skb)->end_seq + msk->ack_seq); - MPTCP_SKB_CB(skb)->map_seq += msk->ack_seq; - MPTCP_SKB_CB(skb)->end_seq += msk->ack_seq; - } - - pr_debug("msk=%p ack_seq=%llx\n", msk, msk->ack_seq); -} diff --git a/net/mptcp/options.c b/net/mptcp/options.c index fd2de185bc939..421ced0312890 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -432,7 +432,6 @@ static void clear_3rdack_retransmission(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); sk_stop_timer(sk, &icsk->icsk_delack_timer); - icsk->icsk_ack.timeout = 0; icsk->icsk_ack.ato = 0; icsk->icsk_ack.pending &= ~(ICSK_ACK_SCHED | ICSK_ACK_TIMER); } @@ -651,6 +650,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * struct mptcp_sock *msk = mptcp_sk(subflow->conn); bool drop_other_suboptions = false; unsigned int opt_size = *size; + struct mptcp_addr_info addr; bool echo; int len; @@ -659,7 +659,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * */ if (!mptcp_pm_should_add_signal(msk) || (opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) || - !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr, + !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &addr, &echo, &drop_other_suboptions)) return false; @@ -672,7 +672,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * else if (opts->suboptions & OPTION_MPTCP_DSS) return false; - len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port); + len = mptcp_add_addr_len(addr.family, echo, !!addr.port); if (remaining < len) return false; @@ -689,6 +689,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * opts->ahmac = 0; *size -= opt_size; } + opts->addr = addr; opts->suboptions |= OPTION_MPTCP_ADD_ADDR; if (!echo) { MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDRTX); diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 16c336c519403..18b19dbccbba7 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -5,12 +5,390 @@ */ #define pr_fmt(fmt) "MPTCP: " fmt -#include -#include +#include +#include #include "protocol.h" - #include "mib.h" +#define ADD_ADDR_RETRANS_MAX 3 + +struct mptcp_pm_add_entry { + struct list_head list; + struct mptcp_addr_info addr; + u8 retrans_times; + struct timer_list add_timer; + struct mptcp_sock *sock; +}; + +static DEFINE_SPINLOCK(mptcp_pm_list_lock); +static LIST_HEAD(mptcp_pm_list); + +/* path manager helpers */ + +/* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses, + * otherwise allow any matching local/remote pair + */ +bool mptcp_pm_addr_families_match(const struct sock *sk, + const struct mptcp_addr_info *loc, + const struct mptcp_addr_info *rem) +{ + bool mptcp_is_v4 = sk->sk_family == AF_INET; + +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6); + bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6); + + if (mptcp_is_v4) + return loc_is_v4 && rem_is_v4; + + if (ipv6_only_sock(sk)) + return !loc_is_v4 && !rem_is_v4; + + return loc_is_v4 == rem_is_v4; +#else + return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET; +#endif +} + +bool mptcp_addresses_equal(const struct mptcp_addr_info *a, + const struct mptcp_addr_info *b, bool use_port) +{ + bool addr_equals = false; + + if (a->family == b->family) { + if (a->family == AF_INET) + addr_equals = a->addr.s_addr == b->addr.s_addr; +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + else + addr_equals = ipv6_addr_equal(&a->addr6, &b->addr6); + } else if (a->family == AF_INET) { + if (ipv6_addr_v4mapped(&b->addr6)) + addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3]; + } else if (b->family == AF_INET) { + if (ipv6_addr_v4mapped(&a->addr6)) + addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr; +#endif + } + + if (!addr_equals) + return false; + if (!use_port) + return true; + + return a->port == b->port; +} + +void mptcp_local_address(const struct sock_common *skc, + struct mptcp_addr_info *addr) +{ + addr->family = skc->skc_family; + addr->port = htons(skc->skc_num); + if (addr->family == AF_INET) + addr->addr.s_addr = skc->skc_rcv_saddr; +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + else if (addr->family == AF_INET6) + addr->addr6 = skc->skc_v6_rcv_saddr; +#endif +} + +void mptcp_remote_address(const struct sock_common *skc, + struct mptcp_addr_info *addr) +{ + addr->family = skc->skc_family; + addr->port = skc->skc_dport; + if (addr->family == AF_INET) + addr->addr.s_addr = skc->skc_daddr; +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + else if (addr->family == AF_INET6) + addr->addr6 = skc->skc_v6_daddr; +#endif +} + +static bool mptcp_pm_is_init_remote_addr(struct mptcp_sock *msk, + const struct mptcp_addr_info *remote) +{ + struct mptcp_addr_info mpc_remote; + + mptcp_remote_address((struct sock_common *)msk, &mpc_remote); + return mptcp_addresses_equal(&mpc_remote, remote, remote->port); +} + +bool mptcp_lookup_subflow_by_saddr(const struct list_head *list, + const struct mptcp_addr_info *saddr) +{ + struct mptcp_subflow_context *subflow; + struct mptcp_addr_info cur; + struct sock_common *skc; + + list_for_each_entry(subflow, list, node) { + skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow); + + mptcp_local_address(skc, &cur); + if (mptcp_addresses_equal(&cur, saddr, saddr->port)) + return true; + } + + return false; +} + +static struct mptcp_pm_add_entry * +mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk, + const struct mptcp_addr_info *addr) +{ + struct mptcp_pm_add_entry *entry; + + lockdep_assert_held(&msk->pm.lock); + + list_for_each_entry(entry, &msk->pm.anno_list, list) { + if (mptcp_addresses_equal(&entry->addr, addr, true)) + return entry; + } + + return NULL; +} + +bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk, + const struct mptcp_addr_info *addr) +{ + struct mptcp_pm_add_entry *entry; + + entry = mptcp_pm_del_add_timer(msk, addr, false); + kfree(entry); + return entry; +} + +bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk) +{ + struct mptcp_pm_add_entry *entry; + struct mptcp_addr_info saddr; + bool ret = false; + + mptcp_local_address((struct sock_common *)sk, &saddr); + + spin_lock_bh(&msk->pm.lock); + list_for_each_entry(entry, &msk->pm.anno_list, list) { + if (mptcp_addresses_equal(&entry->addr, &saddr, true)) { + ret = true; + goto out; + } + } + +out: + spin_unlock_bh(&msk->pm.lock); + return ret; +} + +static void __mptcp_pm_send_ack(struct mptcp_sock *msk, + struct mptcp_subflow_context *subflow, + bool prio, bool backup) +{ + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + bool slow; + + pr_debug("send ack for %s\n", + prio ? "mp_prio" : + (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr")); + + slow = lock_sock_fast(ssk); + if (prio) { + subflow->send_mp_prio = 1; + subflow->request_bkup = backup; + } + + __mptcp_subflow_send_ack(ssk); + unlock_sock_fast(ssk, slow); +} + +void mptcp_pm_send_ack(struct mptcp_sock *msk, + struct mptcp_subflow_context *subflow, + bool prio, bool backup) +{ + spin_unlock_bh(&msk->pm.lock); + __mptcp_pm_send_ack(msk, subflow, prio, backup); + spin_lock_bh(&msk->pm.lock); +} + +void mptcp_pm_addr_send_ack(struct mptcp_sock *msk) +{ + struct mptcp_subflow_context *subflow, *alt = NULL; + + msk_owned_by_me(msk); + lockdep_assert_held(&msk->pm.lock); + + if (!mptcp_pm_should_add_signal(msk) && + !mptcp_pm_should_rm_signal(msk)) + return; + + mptcp_for_each_subflow(msk, subflow) { + if (__mptcp_subflow_active(subflow)) { + if (!subflow->stale) { + mptcp_pm_send_ack(msk, subflow, false, false); + return; + } + + if (!alt) + alt = subflow; + } + } + + if (alt) + mptcp_pm_send_ack(msk, alt, false, false); +} + +int mptcp_pm_mp_prio_send_ack(struct mptcp_sock *msk, + struct mptcp_addr_info *addr, + struct mptcp_addr_info *rem, + u8 bkup) +{ + struct mptcp_subflow_context *subflow; + + pr_debug("bkup=%d\n", bkup); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + struct mptcp_addr_info local, remote; + + mptcp_local_address((struct sock_common *)ssk, &local); + if (!mptcp_addresses_equal(&local, addr, addr->port)) + continue; + + if (rem && rem->family != AF_UNSPEC) { + mptcp_remote_address((struct sock_common *)ssk, &remote); + if (!mptcp_addresses_equal(&remote, rem, rem->port)) + continue; + } + + __mptcp_pm_send_ack(msk, subflow, true, bkup); + return 0; + } + + return -EINVAL; +} + +static void mptcp_pm_add_timer(struct timer_list *timer) +{ + struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer); + struct mptcp_sock *msk = entry->sock; + struct sock *sk = (struct sock *)msk; + + pr_debug("msk=%p\n", msk); + + if (!msk) + return; + + if (inet_sk_state_load(sk) == TCP_CLOSE) + return; + + if (!entry->addr.id) + return; + + if (mptcp_pm_should_add_signal_addr(msk)) { + sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8); + goto out; + } + + spin_lock_bh(&msk->pm.lock); + + if (!mptcp_pm_should_add_signal_addr(msk)) { + pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id); + mptcp_pm_announce_addr(msk, &entry->addr, false); + mptcp_pm_add_addr_send_ack(msk); + entry->retrans_times++; + } + + if (entry->retrans_times < ADD_ADDR_RETRANS_MAX) + sk_reset_timer(sk, timer, + jiffies + mptcp_get_add_addr_timeout(sock_net(sk))); + + spin_unlock_bh(&msk->pm.lock); + + if (entry->retrans_times == ADD_ADDR_RETRANS_MAX) + mptcp_pm_subflow_established(msk); + +out: + __sock_put(sk); +} + +struct mptcp_pm_add_entry * +mptcp_pm_del_add_timer(struct mptcp_sock *msk, + const struct mptcp_addr_info *addr, bool check_id) +{ + struct mptcp_pm_add_entry *entry; + struct sock *sk = (struct sock *)msk; + struct timer_list *add_timer = NULL; + + spin_lock_bh(&msk->pm.lock); + entry = mptcp_lookup_anno_list_by_saddr(msk, addr); + if (entry && (!check_id || entry->addr.id == addr->id)) { + entry->retrans_times = ADD_ADDR_RETRANS_MAX; + add_timer = &entry->add_timer; + } + if (!check_id && entry) + list_del(&entry->list); + spin_unlock_bh(&msk->pm.lock); + + /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */ + if (add_timer) + sk_stop_timer_sync(sk, add_timer); + + return entry; +} + +bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, + const struct mptcp_addr_info *addr) +{ + struct mptcp_pm_add_entry *add_entry = NULL; + struct sock *sk = (struct sock *)msk; + struct net *net = sock_net(sk); + + lockdep_assert_held(&msk->pm.lock); + + add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr); + + if (add_entry) { + if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) + return false; + + sk_reset_timer(sk, &add_entry->add_timer, + jiffies + mptcp_get_add_addr_timeout(net)); + return true; + } + + add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC); + if (!add_entry) + return false; + + list_add(&add_entry->list, &msk->pm.anno_list); + + add_entry->addr = *addr; + add_entry->sock = msk; + add_entry->retrans_times = 0; + + timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0); + sk_reset_timer(sk, &add_entry->add_timer, + jiffies + mptcp_get_add_addr_timeout(net)); + + return true; +} + +static void mptcp_pm_free_anno_list(struct mptcp_sock *msk) +{ + struct mptcp_pm_add_entry *entry, *tmp; + struct sock *sk = (struct sock *)msk; + LIST_HEAD(free_list); + + pr_debug("msk=%p\n", msk); + + spin_lock_bh(&msk->pm.lock); + list_splice_init(&msk->pm.anno_list, &free_list); + spin_unlock_bh(&msk->pm.lock); + + list_for_each_entry_safe(entry, tmp, &free_list, list) { + sk_stop_timer_sync(sk, &entry->add_timer); + kfree(entry); + } +} + /* path manager command handlers */ int mptcp_pm_announce_addr(struct mptcp_sock *msk, @@ -56,7 +434,7 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_ msk->pm.rm_list_tx = *rm_list; rm_addr |= BIT(MPTCP_RM_ADDR_SIGNAL); WRITE_ONCE(msk->pm.addr_signal, rm_addr); - mptcp_pm_nl_addr_send_ack(msk); + mptcp_pm_addr_send_ack(msk); return 0; } @@ -138,13 +516,13 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk) * be sure to serve this event only once. */ if (READ_ONCE(pm->work_pending) && - !(msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) + !(pm->status & BIT(MPTCP_PM_ALREADY_ESTABLISHED))) mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); - if ((msk->pm.status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) + if ((pm->status & BIT(MPTCP_PM_ALREADY_ESTABLISHED)) == 0) announce = true; - msk->pm.status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); + pm->status |= BIT(MPTCP_PM_ALREADY_ESTABLISHED); spin_unlock_bh(&pm->lock); if (announce) @@ -230,7 +608,7 @@ void mptcp_pm_add_addr_received(const struct sock *ssk, __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP); } /* id0 should not have a different address */ - } else if ((addr->id == 0 && !mptcp_pm_nl_is_init_remote_addr(msk, addr)) || + } else if ((addr->id == 0 && !mptcp_pm_is_init_remote_addr(msk, addr)) || (addr->id > 0 && !READ_ONCE(pm->accept_addr))) { mptcp_pm_announce_addr(msk, addr, true); mptcp_pm_add_addr_send_ack(msk); @@ -250,6 +628,9 @@ void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, pr_debug("msk=%p\n", msk); + if (!READ_ONCE(pm->work_pending)) + return; + spin_lock_bh(&pm->lock); if (mptcp_lookup_anno_list_by_saddr(msk, addr) && READ_ONCE(pm->work_pending)) @@ -266,6 +647,80 @@ void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); } +static void mptcp_pm_rm_addr_or_subflow(struct mptcp_sock *msk, + const struct mptcp_rm_list *rm_list, + enum linux_mptcp_mib_field rm_type) +{ + struct mptcp_subflow_context *subflow, *tmp; + struct sock *sk = (struct sock *)msk; + u8 i; + + pr_debug("%s rm_list_nr %d\n", + rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr); + + msk_owned_by_me(msk); + + if (sk->sk_state == TCP_LISTEN) + return; + + if (!rm_list->nr) + return; + + if (list_empty(&msk->conn_list)) + return; + + for (i = 0; i < rm_list->nr; i++) { + u8 rm_id = rm_list->ids[i]; + bool removed = false; + + mptcp_for_each_subflow_safe(msk, subflow, tmp) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + u8 remote_id = READ_ONCE(subflow->remote_id); + int how = RCV_SHUTDOWN | SEND_SHUTDOWN; + u8 id = subflow_get_local_id(subflow); + + if ((1 << inet_sk_state_load(ssk)) & + (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | TCPF_CLOSE)) + continue; + if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id) + continue; + if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id) + continue; + + pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n", + rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", + i, rm_id, id, remote_id, msk->mpc_endpoint_id); + spin_unlock_bh(&msk->pm.lock); + mptcp_subflow_shutdown(sk, ssk, how); + removed |= subflow->request_join; + + /* the following takes care of updating the subflows counter */ + mptcp_close_ssk(sk, ssk, subflow); + spin_lock_bh(&msk->pm.lock); + + if (rm_type == MPTCP_MIB_RMSUBFLOW) + __MPTCP_INC_STATS(sock_net(sk), rm_type); + } + + if (rm_type == MPTCP_MIB_RMADDR) { + __MPTCP_INC_STATS(sock_net(sk), rm_type); + if (removed && mptcp_pm_is_kernel(msk)) + mptcp_pm_nl_rm_addr(msk, rm_id); + } + } +} + +static void mptcp_pm_rm_addr_recv(struct mptcp_sock *msk) +{ + mptcp_pm_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR); +} + +void mptcp_pm_rm_subflow(struct mptcp_sock *msk, + const struct mptcp_rm_list *rm_list) +{ + mptcp_pm_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW); +} + void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list) { @@ -321,8 +776,6 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) } } -/* path manager helpers */ - bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb, unsigned int opt_size, unsigned int remaining, struct mptcp_addr_info *addr, bool *echo, @@ -402,7 +855,7 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) { - struct mptcp_addr_info skc_local; + struct mptcp_pm_addr_entry skc_local = { 0 }; struct mptcp_addr_info msk_local; if (WARN_ON_ONCE(!msk)) @@ -412,10 +865,13 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) * addr */ mptcp_local_address((struct sock_common *)msk, &msk_local); - mptcp_local_address((struct sock_common *)skc, &skc_local); - if (mptcp_addresses_equal(&msk_local, &skc_local, false)) + mptcp_local_address((struct sock_common *)skc, &skc_local.addr); + if (mptcp_addresses_equal(&msk_local, &skc_local.addr, false)) return 0; + skc_local.addr.id = 0; + skc_local.flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; + if (mptcp_pm_is_userspace(msk)) return mptcp_userspace_pm_get_local_id(msk, &skc_local); return mptcp_pm_nl_get_local_id(msk, &skc_local); @@ -433,27 +889,41 @@ bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc) return mptcp_pm_nl_is_backup(msk, &skc_local); } -int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info) -{ - if (info->attrs[MPTCP_PM_ATTR_TOKEN]) - return mptcp_userspace_pm_get_addr(skb, info); - return mptcp_pm_nl_get_addr(skb, info); -} - -int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb) +static void mptcp_pm_subflows_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) { - const struct genl_info *info = genl_info_dump(cb); - - if (info->attrs[MPTCP_PM_ATTR_TOKEN]) - return mptcp_userspace_pm_dump_addr(msg, cb); - return mptcp_pm_nl_dump_addr(msg, cb); -} + struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk); + struct sock *sk = (struct sock *)msk; + unsigned int active_max_loss_cnt; + struct net *net = sock_net(sk); + unsigned int stale_loss_cnt; + bool slow; + + stale_loss_cnt = mptcp_stale_loss_cnt(net); + if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt) + return; -int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info) -{ - if (info->attrs[MPTCP_PM_ATTR_TOKEN]) - return mptcp_userspace_pm_set_flags(skb, info); - return mptcp_pm_nl_set_flags(skb, info); + /* look for another available subflow not in loss state */ + active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1); + mptcp_for_each_subflow(msk, iter) { + if (iter != subflow && mptcp_subflow_active(iter) && + iter->stale_count < active_max_loss_cnt) { + /* we have some alternatives, try to mark this subflow as idle ...*/ + slow = lock_sock_fast(ssk); + if (!tcp_rtx_and_write_queues_empty(ssk)) { + subflow->stale = 1; + __mptcp_retransmit_pending_data(sk); + MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE); + } + unlock_sock_fast(ssk, slow); + + /* always try to push the pending data regardless of re-injections: + * we can possibly use backup subflows now, and subflow selection + * is cheap under the msk socket lock + */ + __mptcp_push_pending(sk, 0); + return; + } + } } void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) @@ -468,36 +938,44 @@ void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) } else if (subflow->stale_rcv_tstamp == rcv_tstamp) { if (subflow->stale_count < U8_MAX) subflow->stale_count++; - mptcp_pm_nl_subflow_chk_stale(msk, ssk); + mptcp_pm_subflows_chk_stale(msk, ssk); } else { subflow->stale_count = 0; mptcp_subflow_set_active(subflow); } } -/* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses, - * otherwise allow any matching local/remote pair - */ -bool mptcp_pm_addr_families_match(const struct sock *sk, - const struct mptcp_addr_info *loc, - const struct mptcp_addr_info *rem) +void mptcp_pm_worker(struct mptcp_sock *msk) { - bool mptcp_is_v4 = sk->sk_family == AF_INET; + struct mptcp_pm_data *pm = &msk->pm; -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6); - bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6); + msk_owned_by_me(msk); - if (mptcp_is_v4) - return loc_is_v4 && rem_is_v4; + if (!(pm->status & MPTCP_PM_WORK_MASK)) + return; - if (ipv6_only_sock(sk)) - return !loc_is_v4 && !rem_is_v4; + spin_lock_bh(&msk->pm.lock); - return loc_is_v4 == rem_is_v4; -#else - return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET; -#endif + pr_debug("msk=%p status=%x\n", msk, pm->status); + if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) { + pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK); + mptcp_pm_addr_send_ack(msk); + } + if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) { + pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED); + mptcp_pm_rm_addr_recv(msk); + } + __mptcp_pm_kernel_worker(msk); + + spin_unlock_bh(&msk->pm.lock); +} + +void mptcp_pm_destroy(struct mptcp_sock *msk) +{ + mptcp_pm_free_anno_list(msk); + + if (mptcp_pm_is_userspace(msk)) + mptcp_userspace_pm_free_local_addr_list(msk); } void mptcp_pm_data_reset(struct mptcp_sock *msk) @@ -505,10 +983,7 @@ void mptcp_pm_data_reset(struct mptcp_sock *msk) u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk)); struct mptcp_pm_data *pm = &msk->pm; - pm->add_addr_signaled = 0; - pm->add_addr_accepted = 0; - pm->local_addr_used = 0; - pm->subflows = 0; + memset(&pm->reset, 0, sizeof(pm->reset)); pm->rm_list_tx.nr = 0; pm->rm_list_rx.nr = 0; WRITE_ONCE(pm->pm_type, pm_type); @@ -527,16 +1002,9 @@ void mptcp_pm_data_reset(struct mptcp_sock *msk) !!mptcp_pm_get_add_addr_accept_max(msk) && subflows_allowed); WRITE_ONCE(pm->accept_subflow, subflows_allowed); - } else { - WRITE_ONCE(pm->work_pending, 0); - WRITE_ONCE(pm->accept_addr, 0); - WRITE_ONCE(pm->accept_subflow, 0); - } - WRITE_ONCE(pm->addr_signal, 0); - WRITE_ONCE(pm->remote_deny_join_id0, false); - pm->status = 0; - bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); + bitmap_fill(pm->id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); + } } void mptcp_pm_data_init(struct mptcp_sock *msk) @@ -549,5 +1017,75 @@ void mptcp_pm_data_init(struct mptcp_sock *msk) void __init mptcp_pm_init(void) { + mptcp_pm_kernel_register(); + mptcp_pm_userspace_register(); mptcp_pm_nl_init(); } + +/* Must be called with rcu read lock held */ +struct mptcp_pm_ops *mptcp_pm_find(const char *name) +{ + struct mptcp_pm_ops *pm_ops; + + list_for_each_entry_rcu(pm_ops, &mptcp_pm_list, list) { + if (!strcmp(pm_ops->name, name)) + return pm_ops; + } + + return NULL; +} + +int mptcp_pm_validate(struct mptcp_pm_ops *pm_ops) +{ + return 0; +} + +int mptcp_pm_register(struct mptcp_pm_ops *pm_ops) +{ + int ret; + + ret = mptcp_pm_validate(pm_ops); + if (ret) + return ret; + + spin_lock(&mptcp_pm_list_lock); + if (mptcp_pm_find(pm_ops->name)) { + spin_unlock(&mptcp_pm_list_lock); + return -EEXIST; + } + list_add_tail_rcu(&pm_ops->list, &mptcp_pm_list); + spin_unlock(&mptcp_pm_list_lock); + + pr_debug("%s registered\n", pm_ops->name); + return 0; +} + +void mptcp_pm_unregister(struct mptcp_pm_ops *pm_ops) +{ + /* skip unregistering the default path manager */ + if (WARN_ON_ONCE(pm_ops == &mptcp_pm_kernel)) + return; + + spin_lock(&mptcp_pm_list_lock); + list_del_rcu(&pm_ops->list); + spin_unlock(&mptcp_pm_list_lock); +} + +/* Build string with list of available path manager values. + * Similar to tcp_get_available_congestion_control() + */ +void mptcp_pm_get_available(char *buf, size_t maxlen) +{ + struct mptcp_pm_ops *pm_ops; + size_t offs = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(pm_ops, &mptcp_pm_list, list) { + offs += snprintf(buf + offs, maxlen - offs, "%s%s", + offs == 0 ? "" : " ", pm_ops->name); + + if (WARN_ON_ONCE(offs >= maxlen)) + break; + } + rcu_read_unlock(); +} diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c new file mode 100644 index 0000000000000..d39e7c1784608 --- /dev/null +++ b/net/mptcp/pm_kernel.c @@ -0,0 +1,1412 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Multipath TCP + * + * Copyright (c) 2025, Matthieu Baerts. + */ + +#define pr_fmt(fmt) "MPTCP: " fmt + +#include + +#include "protocol.h" +#include "mib.h" +#include "mptcp_pm_gen.h" + +static int pm_nl_pernet_id; + +struct pm_nl_pernet { + /* protects pernet updates */ + spinlock_t lock; + struct list_head local_addr_list; + unsigned int addrs; + unsigned int stale_loss_cnt; + unsigned int add_addr_signal_max; + unsigned int add_addr_accept_max; + unsigned int local_addr_max; + unsigned int subflows_max; + unsigned int next_id; + DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); +}; + +#define MPTCP_PM_ADDR_MAX 8 + +static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net) +{ + return net_generic(net, pm_nl_pernet_id); +} + +static struct pm_nl_pernet * +pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk) +{ + return pm_nl_get_pernet(sock_net((struct sock *)msk)); +} + +static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info) +{ + return pm_nl_get_pernet(genl_info_net(info)); +} + +unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk) +{ + const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); + + return READ_ONCE(pernet->add_addr_signal_max); +} +EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max); + +unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk) +{ + struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); + + return READ_ONCE(pernet->add_addr_accept_max); +} +EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max); + +unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk) +{ + struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); + + return READ_ONCE(pernet->subflows_max); +} +EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max); + +unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk) +{ + struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); + + return READ_ONCE(pernet->local_addr_max); +} +EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max); + +static bool lookup_subflow_by_daddr(const struct list_head *list, + const struct mptcp_addr_info *daddr) +{ + struct mptcp_subflow_context *subflow; + struct mptcp_addr_info cur; + + list_for_each_entry(subflow, list, node) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + if (!((1 << inet_sk_state_load(ssk)) & + (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV))) + continue; + + mptcp_remote_address((struct sock_common *)ssk, &cur); + if (mptcp_addresses_equal(&cur, daddr, daddr->port)) + return true; + } + + return false; +} + +static bool +select_local_address(const struct pm_nl_pernet *pernet, + const struct mptcp_sock *msk, + struct mptcp_pm_local *new_local) +{ + struct mptcp_pm_addr_entry *entry; + bool found = false; + + msk_owned_by_me(msk); + + rcu_read_lock(); + list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { + if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) + continue; + + if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) + continue; + + new_local->addr = entry->addr; + new_local->flags = entry->flags; + new_local->ifindex = entry->ifindex; + found = true; + break; + } + rcu_read_unlock(); + + return found; +} + +static bool +select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk, + struct mptcp_pm_local *new_local) +{ + struct mptcp_pm_addr_entry *entry; + bool found = false; + + rcu_read_lock(); + /* do not keep any additional per socket state, just signal + * the address list in order. + * Note: removal from the local address list during the msk life-cycle + * can lead to additional addresses not being announced. + */ + list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { + if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) + continue; + + if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) + continue; + + new_local->addr = entry->addr; + new_local->flags = entry->flags; + new_local->ifindex = entry->ifindex; + found = true; + break; + } + rcu_read_unlock(); + + return found; +} + +/* Fill all the remote addresses into the array addrs[], + * and return the array size. + */ +static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, + struct mptcp_addr_info *local, + bool fullmesh, + struct mptcp_addr_info *addrs) +{ + bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0); + struct sock *sk = (struct sock *)msk, *ssk; + struct mptcp_subflow_context *subflow; + struct mptcp_addr_info remote = { 0 }; + unsigned int subflows_max; + int i = 0; + + subflows_max = mptcp_pm_get_subflows_max(msk); + mptcp_remote_address((struct sock_common *)sk, &remote); + + /* Non-fullmesh endpoint, fill in the single entry + * corresponding to the primary MPC subflow remote address + */ + if (!fullmesh) { + if (deny_id0) + return 0; + + if (!mptcp_pm_addr_families_match(sk, local, &remote)) + return 0; + + msk->pm.subflows++; + addrs[i++] = remote; + } else { + DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); + + /* Forbid creation of new subflows matching existing + * ones, possibly already created by incoming ADD_ADDR + */ + bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); + mptcp_for_each_subflow(msk, subflow) + if (READ_ONCE(subflow->local_id) == local->id) + __set_bit(subflow->remote_id, unavail_id); + + mptcp_for_each_subflow(msk, subflow) { + ssk = mptcp_subflow_tcp_sock(subflow); + mptcp_remote_address((struct sock_common *)ssk, &addrs[i]); + addrs[i].id = READ_ONCE(subflow->remote_id); + if (deny_id0 && !addrs[i].id) + continue; + + if (test_bit(addrs[i].id, unavail_id)) + continue; + + if (!mptcp_pm_addr_families_match(sk, local, &addrs[i])) + continue; + + if (msk->pm.subflows < subflows_max) { + /* forbid creating multiple address towards + * this id + */ + __set_bit(addrs[i].id, unavail_id); + msk->pm.subflows++; + i++; + } + } + } + + return i; +} + +static struct mptcp_pm_addr_entry * +__lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id) +{ + struct mptcp_pm_addr_entry *entry; + + list_for_each_entry_rcu(entry, &pernet->local_addr_list, list, + lockdep_is_held(&pernet->lock)) { + if (entry->addr.id == id) + return entry; + } + return NULL; +} + +static struct mptcp_pm_addr_entry * +__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info) +{ + struct mptcp_pm_addr_entry *entry; + + list_for_each_entry_rcu(entry, &pernet->local_addr_list, list, + lockdep_is_held(&pernet->lock)) { + if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) + return entry; + } + return NULL; +} + +static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) +{ + struct sock *sk = (struct sock *)msk; + unsigned int add_addr_signal_max; + bool signal_and_subflow = false; + unsigned int local_addr_max; + struct pm_nl_pernet *pernet; + struct mptcp_pm_local local; + unsigned int subflows_max; + + pernet = pm_nl_get_pernet(sock_net(sk)); + + add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk); + local_addr_max = mptcp_pm_get_local_addr_max(msk); + subflows_max = mptcp_pm_get_subflows_max(msk); + + /* do lazy endpoint usage accounting for the MPC subflows */ + if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first); + struct mptcp_pm_addr_entry *entry; + struct mptcp_addr_info mpc_addr; + bool backup = false; + + mptcp_local_address((struct sock_common *)msk->first, &mpc_addr); + rcu_read_lock(); + entry = __lookup_addr(pernet, &mpc_addr); + if (entry) { + __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap); + msk->mpc_endpoint_id = entry->addr.id; + backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); + } + rcu_read_unlock(); + + if (backup) + mptcp_pm_send_ack(msk, subflow, true, backup); + + msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED); + } + + pr_debug("local %d:%d signal %d:%d subflows %d:%d\n", + msk->pm.local_addr_used, local_addr_max, + msk->pm.add_addr_signaled, add_addr_signal_max, + msk->pm.subflows, subflows_max); + + /* check first for announce */ + if (msk->pm.add_addr_signaled < add_addr_signal_max) { + /* due to racing events on both ends we can reach here while + * previous add address is still running: if we invoke now + * mptcp_pm_announce_addr(), that will fail and the + * corresponding id will be marked as used. + * Instead let the PM machinery reschedule us when the + * current address announce will be completed. + */ + if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) + return; + + if (!select_signal_address(pernet, msk, &local)) + goto subflow; + + /* If the alloc fails, we are on memory pressure, not worth + * continuing, and trying to create subflows. + */ + if (!mptcp_pm_alloc_anno_list(msk, &local.addr)) + return; + + __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); + msk->pm.add_addr_signaled++; + + /* Special case for ID0: set the correct ID */ + if (local.addr.id == msk->mpc_endpoint_id) + local.addr.id = 0; + + mptcp_pm_announce_addr(msk, &local.addr, false); + mptcp_pm_addr_send_ack(msk); + + if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) + signal_and_subflow = true; + } + +subflow: + /* check if should create a new subflow */ + while (msk->pm.local_addr_used < local_addr_max && + msk->pm.subflows < subflows_max) { + struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; + bool fullmesh; + int i, nr; + + if (signal_and_subflow) + signal_and_subflow = false; + else if (!select_local_address(pernet, msk, &local)) + break; + + fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); + + __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); + + /* Special case for ID0: set the correct ID */ + if (local.addr.id == msk->mpc_endpoint_id) + local.addr.id = 0; + else /* local_addr_used is not decr for ID 0 */ + msk->pm.local_addr_used++; + + nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); + if (nr == 0) + continue; + + spin_unlock_bh(&msk->pm.lock); + for (i = 0; i < nr; i++) + __mptcp_subflow_connect(sk, &local, &addrs[i]); + spin_lock_bh(&msk->pm.lock); + } + mptcp_pm_nl_check_work_pending(msk); +} + +static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk) +{ + mptcp_pm_create_subflow_or_signal_addr(msk); +} + +static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk) +{ + mptcp_pm_create_subflow_or_signal_addr(msk); +} + +/* Fill all the local addresses into the array addrs[], + * and return the array size. + */ +static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, + struct mptcp_addr_info *remote, + struct mptcp_pm_local *locals) +{ + struct sock *sk = (struct sock *)msk; + struct mptcp_pm_addr_entry *entry; + struct mptcp_addr_info mpc_addr; + struct pm_nl_pernet *pernet; + unsigned int subflows_max; + int i = 0; + + pernet = pm_nl_get_pernet_from_msk(msk); + subflows_max = mptcp_pm_get_subflows_max(msk); + + mptcp_local_address((struct sock_common *)msk, &mpc_addr); + + rcu_read_lock(); + list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { + if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) + continue; + + if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote)) + continue; + + if (msk->pm.subflows < subflows_max) { + locals[i].addr = entry->addr; + locals[i].flags = entry->flags; + locals[i].ifindex = entry->ifindex; + + /* Special case for ID0: set the correct ID */ + if (mptcp_addresses_equal(&locals[i].addr, &mpc_addr, locals[i].addr.port)) + locals[i].addr.id = 0; + + msk->pm.subflows++; + i++; + } + } + rcu_read_unlock(); + + /* If the array is empty, fill in the single + * 'IPADDRANY' local address + */ + if (!i) { + memset(&locals[i], 0, sizeof(locals[i])); + locals[i].addr.family = +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + remote->family == AF_INET6 && + ipv6_addr_v4mapped(&remote->addr6) ? AF_INET : +#endif + remote->family; + + if (!mptcp_pm_addr_families_match(sk, &locals[i].addr, remote)) + return 0; + + msk->pm.subflows++; + i++; + } + + return i; +} + +static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) +{ + struct mptcp_pm_local locals[MPTCP_PM_ADDR_MAX]; + struct sock *sk = (struct sock *)msk; + unsigned int add_addr_accept_max; + struct mptcp_addr_info remote; + unsigned int subflows_max; + bool sf_created = false; + int i, nr; + + add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); + subflows_max = mptcp_pm_get_subflows_max(msk); + + pr_debug("accepted %d:%d remote family %d\n", + msk->pm.add_addr_accepted, add_addr_accept_max, + msk->pm.remote.family); + + remote = msk->pm.remote; + mptcp_pm_announce_addr(msk, &remote, true); + mptcp_pm_addr_send_ack(msk); + + if (lookup_subflow_by_daddr(&msk->conn_list, &remote)) + return; + + /* pick id 0 port, if none is provided the remote address */ + if (!remote.port) + remote.port = sk->sk_dport; + + /* connect to the specified remote address, using whatever + * local address the routing configuration will pick. + */ + nr = fill_local_addresses_vec(msk, &remote, locals); + if (nr == 0) + return; + + spin_unlock_bh(&msk->pm.lock); + for (i = 0; i < nr; i++) + if (__mptcp_subflow_connect(sk, &locals[i], &remote) == 0) + sf_created = true; + spin_lock_bh(&msk->pm.lock); + + if (sf_created) { + /* add_addr_accepted is not decr for ID 0 */ + if (remote.id) + msk->pm.add_addr_accepted++; + if (msk->pm.add_addr_accepted >= add_addr_accept_max || + msk->pm.subflows >= subflows_max) + WRITE_ONCE(msk->pm.accept_addr, false); + } +} + +void mptcp_pm_nl_rm_addr(struct mptcp_sock *msk, u8 rm_id) +{ + if (rm_id && WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) { + /* Note: if the subflow has been closed before, this + * add_addr_accepted counter will not be decremented. + */ + if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk)) + WRITE_ONCE(msk->pm.accept_addr, true); + } +} + +static bool address_use_port(struct mptcp_pm_addr_entry *entry) +{ + return (entry->flags & + (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) == + MPTCP_PM_ADDR_FLAG_SIGNAL; +} + +/* caller must ensure the RCU grace period is already elapsed */ +static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) +{ + if (entry->lsk) + sock_release(entry->lsk); + kfree(entry); +} + +static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, + struct mptcp_pm_addr_entry *entry, + bool needs_id, bool replace) +{ + struct mptcp_pm_addr_entry *cur, *del_entry = NULL; + unsigned int addr_max; + int ret = -EINVAL; + + spin_lock_bh(&pernet->lock); + /* to keep the code simple, don't do IDR-like allocation for address ID, + * just bail when we exceed limits + */ + if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID) + pernet->next_id = 1; + if (pernet->addrs >= MPTCP_PM_ADDR_MAX) { + ret = -ERANGE; + goto out; + } + if (test_bit(entry->addr.id, pernet->id_bitmap)) { + ret = -EBUSY; + goto out; + } + + /* do not insert duplicate address, differentiate on port only + * singled addresses + */ + if (!address_use_port(entry)) + entry->addr.port = 0; + list_for_each_entry(cur, &pernet->local_addr_list, list) { + if (mptcp_addresses_equal(&cur->addr, &entry->addr, + cur->addr.port || entry->addr.port)) { + /* allow replacing the exiting endpoint only if such + * endpoint is an implicit one and the user-space + * did not provide an endpoint id + */ + if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) { + ret = -EEXIST; + goto out; + } + if (entry->addr.id) + goto out; + + /* allow callers that only need to look up the local + * addr's id to skip replacement. This allows them to + * avoid calling synchronize_rcu in the packet recv + * path. + */ + if (!replace) { + kfree(entry); + ret = cur->addr.id; + goto out; + } + + pernet->addrs--; + entry->addr.id = cur->addr.id; + list_del_rcu(&cur->list); + del_entry = cur; + break; + } + } + + if (!entry->addr.id && needs_id) { +find_next: + entry->addr.id = find_next_zero_bit(pernet->id_bitmap, + MPTCP_PM_MAX_ADDR_ID + 1, + pernet->next_id); + if (!entry->addr.id && pernet->next_id != 1) { + pernet->next_id = 1; + goto find_next; + } + } + + if (!entry->addr.id && needs_id) + goto out; + + __set_bit(entry->addr.id, pernet->id_bitmap); + if (entry->addr.id > pernet->next_id) + pernet->next_id = entry->addr.id; + + if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { + addr_max = pernet->add_addr_signal_max; + WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1); + } + if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { + addr_max = pernet->local_addr_max; + WRITE_ONCE(pernet->local_addr_max, addr_max + 1); + } + + pernet->addrs++; + if (!entry->addr.port) + list_add_tail_rcu(&entry->list, &pernet->local_addr_list); + else + list_add_rcu(&entry->list, &pernet->local_addr_list); + ret = entry->addr.id; + +out: + spin_unlock_bh(&pernet->lock); + + /* just replaced an existing entry, free it */ + if (del_entry) { + synchronize_rcu(); + __mptcp_pm_release_addr_entry(del_entry); + } + return ret; +} + +static struct lock_class_key mptcp_slock_keys[2]; +static struct lock_class_key mptcp_keys[2]; + +static int mptcp_pm_nl_create_listen_socket(struct sock *sk, + struct mptcp_pm_addr_entry *entry) +{ + bool is_ipv6 = sk->sk_family == AF_INET6; + int addrlen = sizeof(struct sockaddr_in); + struct sockaddr_storage addr; + struct sock *newsk, *ssk; + int backlog = 1024; + int err; + + err = sock_create_kern(sock_net(sk), entry->addr.family, + SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk); + if (err) + return err; + + newsk = entry->lsk->sk; + if (!newsk) + return -EINVAL; + + /* The subflow socket lock is acquired in a nested to the msk one + * in several places, even by the TCP stack, and this msk is a kernel + * socket: lockdep complains. Instead of propagating the _nested + * modifiers in several places, re-init the lock class for the msk + * socket to an mptcp specific one. + */ + sock_lock_init_class_and_name(newsk, + is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET", + &mptcp_slock_keys[is_ipv6], + is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET", + &mptcp_keys[is_ipv6]); + + lock_sock(newsk); + ssk = __mptcp_nmpc_sk(mptcp_sk(newsk)); + release_sock(newsk); + if (IS_ERR(ssk)) + return PTR_ERR(ssk); + + mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family); +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + if (entry->addr.family == AF_INET6) + addrlen = sizeof(struct sockaddr_in6); +#endif + if (ssk->sk_family == AF_INET) + err = inet_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + else if (ssk->sk_family == AF_INET6) + err = inet6_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); +#endif + if (err) + return err; + + /* We don't use mptcp_set_state() here because it needs to be called + * under the msk socket lock. For the moment, that will not bring + * anything more than only calling inet_sk_state_store(), because the + * old status is known (TCP_CLOSE). + */ + inet_sk_state_store(newsk, TCP_LISTEN); + lock_sock(ssk); + WRITE_ONCE(mptcp_subflow_ctx(ssk)->pm_listener, true); + err = __inet_listen_sk(ssk, backlog); + if (!err) + mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); + release_sock(ssk); + return err; +} + +int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, + struct mptcp_pm_addr_entry *skc) +{ + struct mptcp_pm_addr_entry *entry; + struct pm_nl_pernet *pernet; + int ret; + + pernet = pm_nl_get_pernet_from_msk(msk); + + rcu_read_lock(); + entry = __lookup_addr(pernet, &skc->addr); + ret = entry ? entry->addr.id : -1; + rcu_read_unlock(); + if (ret >= 0) + return ret; + + /* address not found, add to local list */ + entry = kmemdup(skc, sizeof(*skc), GFP_ATOMIC); + if (!entry) + return -ENOMEM; + + entry->addr.port = 0; + ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true, false); + if (ret < 0) + kfree(entry); + + return ret; +} + +bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc) +{ + struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); + struct mptcp_pm_addr_entry *entry; + bool backup; + + rcu_read_lock(); + entry = __lookup_addr(pernet, skc); + backup = entry && !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); + rcu_read_unlock(); + + return backup; +} + +static int mptcp_nl_add_subflow_or_signal_addr(struct net *net, + struct mptcp_addr_info *addr) +{ + struct mptcp_sock *msk; + long s_slot = 0, s_num = 0; + + while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { + struct sock *sk = (struct sock *)msk; + struct mptcp_addr_info mpc_addr; + + if (!READ_ONCE(msk->fully_established) || + mptcp_pm_is_userspace(msk)) + goto next; + + /* if the endp linked to the init sf is re-added with a != ID */ + mptcp_local_address((struct sock_common *)msk, &mpc_addr); + + lock_sock(sk); + spin_lock_bh(&msk->pm.lock); + if (mptcp_addresses_equal(addr, &mpc_addr, addr->port)) + msk->mpc_endpoint_id = addr->id; + mptcp_pm_create_subflow_or_signal_addr(msk); + spin_unlock_bh(&msk->pm.lock); + release_sock(sk); + +next: + sock_put(sk); + cond_resched(); + } + + return 0; +} + +static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr, + struct genl_info *info) +{ + struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; + + if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, + mptcp_pm_address_nl_policy, info->extack) && + tb[MPTCP_PM_ADDR_ATTR_ID]) + return true; + return false; +} + +/* Add an MPTCP endpoint */ +int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct pm_nl_pernet *pernet = genl_info_pm_nl(info); + struct mptcp_pm_addr_entry addr, *entry; + struct nlattr *attr; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ENDPOINT_ADDR)) + return -EINVAL; + + attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; + ret = mptcp_pm_parse_entry(attr, info, true, &addr); + if (ret < 0) + return ret; + + if (addr.addr.port && !address_use_port(&addr)) { + NL_SET_ERR_MSG_ATTR(info->extack, attr, + "flags must have signal and not subflow when using port"); + return -EINVAL; + } + + if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL && + addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { + NL_SET_ERR_MSG_ATTR(info->extack, attr, + "flags mustn't have both signal and fullmesh"); + return -EINVAL; + } + + if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) { + NL_SET_ERR_MSG_ATTR(info->extack, attr, + "can't create IMPLICIT endpoint"); + return -EINVAL; + } + + entry = kmemdup(&addr, sizeof(addr), GFP_KERNEL_ACCOUNT); + if (!entry) { + GENL_SET_ERR_MSG(info, "can't allocate addr"); + return -ENOMEM; + } + + if (entry->addr.port) { + ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry); + if (ret) { + GENL_SET_ERR_MSG_FMT(info, "create listen socket error: %d", ret); + goto out_free; + } + } + ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, + !mptcp_pm_has_addr_attr_id(attr, info), + true); + if (ret < 0) { + GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); + goto out_free; + } + + mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr); + return 0; + +out_free: + __mptcp_pm_release_addr_entry(entry); + return ret; +} + +static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk, + const struct mptcp_addr_info *addr) +{ + return msk->mpc_endpoint_id == addr->id ? 0 : addr->id; +} + +static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, + const struct mptcp_addr_info *addr, + bool force) +{ + struct mptcp_rm_list list = { .nr = 0 }; + bool ret; + + list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); + + ret = mptcp_remove_anno_list_by_saddr(msk, addr); + if (ret || force) { + spin_lock_bh(&msk->pm.lock); + if (ret) { + __set_bit(addr->id, msk->pm.id_avail_bitmap); + msk->pm.add_addr_signaled--; + } + mptcp_pm_remove_addr(msk, &list); + spin_unlock_bh(&msk->pm.lock); + } + return ret; +} + +static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id) +{ + /* If it was marked as used, and not ID 0, decrement local_addr_used */ + if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) && + id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0)) + msk->pm.local_addr_used--; +} + +static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, + const struct mptcp_pm_addr_entry *entry) +{ + const struct mptcp_addr_info *addr = &entry->addr; + struct mptcp_rm_list list = { .nr = 1 }; + long s_slot = 0, s_num = 0; + struct mptcp_sock *msk; + + pr_debug("remove_id=%d\n", addr->id); + + while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { + struct sock *sk = (struct sock *)msk; + bool remove_subflow; + + if (mptcp_pm_is_userspace(msk)) + goto next; + + lock_sock(sk); + remove_subflow = mptcp_lookup_subflow_by_saddr(&msk->conn_list, addr); + mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && + !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); + + list.ids[0] = mptcp_endp_get_local_id(msk, addr); + if (remove_subflow) { + spin_lock_bh(&msk->pm.lock); + mptcp_pm_rm_subflow(msk, &list); + spin_unlock_bh(&msk->pm.lock); + } + + if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { + spin_lock_bh(&msk->pm.lock); + __mark_subflow_endp_available(msk, list.ids[0]); + spin_unlock_bh(&msk->pm.lock); + } + + if (msk->mpc_endpoint_id == entry->addr.id) + msk->mpc_endpoint_id = 0; + release_sock(sk); + +next: + sock_put(sk); + cond_resched(); + } + + return 0; +} + +static int mptcp_nl_remove_id_zero_address(struct net *net, + struct mptcp_addr_info *addr) +{ + struct mptcp_rm_list list = { .nr = 0 }; + long s_slot = 0, s_num = 0; + struct mptcp_sock *msk; + + list.ids[list.nr++] = 0; + + while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { + struct sock *sk = (struct sock *)msk; + struct mptcp_addr_info msk_local; + + if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) + goto next; + + mptcp_local_address((struct sock_common *)msk, &msk_local); + if (!mptcp_addresses_equal(&msk_local, addr, addr->port)) + goto next; + + lock_sock(sk); + spin_lock_bh(&msk->pm.lock); + mptcp_pm_remove_addr(msk, &list); + mptcp_pm_rm_subflow(msk, &list); + __mark_subflow_endp_available(msk, 0); + spin_unlock_bh(&msk->pm.lock); + release_sock(sk); + +next: + sock_put(sk); + cond_resched(); + } + + return 0; +} + +/* Remove an MPTCP endpoint */ +int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct pm_nl_pernet *pernet = genl_info_pm_nl(info); + struct mptcp_pm_addr_entry addr, *entry; + unsigned int addr_max; + struct nlattr *attr; + int ret; + + if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ENDPOINT_ADDR)) + return -EINVAL; + + attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; + ret = mptcp_pm_parse_entry(attr, info, false, &addr); + if (ret < 0) + return ret; + + /* the zero id address is special: the first address used by the msk + * always gets such an id, so different subflows can have different zero + * id addresses. Additionally zero id is not accounted for in id_bitmap. + * Let's use an 'mptcp_rm_list' instead of the common remove code. + */ + if (addr.addr.id == 0) + return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr); + + spin_lock_bh(&pernet->lock); + entry = __lookup_addr_by_id(pernet, addr.addr.id); + if (!entry) { + NL_SET_ERR_MSG_ATTR(info->extack, attr, "address not found"); + spin_unlock_bh(&pernet->lock); + return -EINVAL; + } + if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { + addr_max = pernet->add_addr_signal_max; + WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1); + } + if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { + addr_max = pernet->local_addr_max; + WRITE_ONCE(pernet->local_addr_max, addr_max - 1); + } + + pernet->addrs--; + list_del_rcu(&entry->list); + __clear_bit(entry->addr.id, pernet->id_bitmap); + spin_unlock_bh(&pernet->lock); + + mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry); + synchronize_rcu(); + __mptcp_pm_release_addr_entry(entry); + + return ret; +} + +static void mptcp_pm_flush_addrs_and_subflows(struct mptcp_sock *msk, + struct list_head *rm_list) +{ + struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 }; + struct mptcp_pm_addr_entry *entry; + + list_for_each_entry(entry, rm_list, list) { + if (slist.nr < MPTCP_RM_IDS_MAX && + mptcp_lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) + slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); + + if (alist.nr < MPTCP_RM_IDS_MAX && + mptcp_remove_anno_list_by_saddr(msk, &entry->addr)) + alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); + } + + spin_lock_bh(&msk->pm.lock); + if (alist.nr) { + msk->pm.add_addr_signaled -= alist.nr; + mptcp_pm_remove_addr(msk, &alist); + } + if (slist.nr) + mptcp_pm_rm_subflow(msk, &slist); + /* Reset counters: maybe some subflows have been removed before */ + bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); + msk->pm.local_addr_used = 0; + spin_unlock_bh(&msk->pm.lock); +} + +static void mptcp_nl_flush_addrs_list(struct net *net, + struct list_head *rm_list) +{ + long s_slot = 0, s_num = 0; + struct mptcp_sock *msk; + + if (list_empty(rm_list)) + return; + + while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { + struct sock *sk = (struct sock *)msk; + + if (!mptcp_pm_is_userspace(msk)) { + lock_sock(sk); + mptcp_pm_flush_addrs_and_subflows(msk, rm_list); + release_sock(sk); + } + + sock_put(sk); + cond_resched(); + } +} + +/* caller must ensure the RCU grace period is already elapsed */ +static void __flush_addrs(struct list_head *list) +{ + while (!list_empty(list)) { + struct mptcp_pm_addr_entry *cur; + + cur = list_entry(list->next, + struct mptcp_pm_addr_entry, list); + list_del_rcu(&cur->list); + __mptcp_pm_release_addr_entry(cur); + } +} + +static void __reset_counters(struct pm_nl_pernet *pernet) +{ + WRITE_ONCE(pernet->add_addr_signal_max, 0); + WRITE_ONCE(pernet->add_addr_accept_max, 0); + WRITE_ONCE(pernet->local_addr_max, 0); + pernet->addrs = 0; +} + +int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct pm_nl_pernet *pernet = genl_info_pm_nl(info); + LIST_HEAD(free_list); + + spin_lock_bh(&pernet->lock); + list_splice_init(&pernet->local_addr_list, &free_list); + __reset_counters(pernet); + pernet->next_id = 1; + bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); + spin_unlock_bh(&pernet->lock); + mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); + synchronize_rcu(); + __flush_addrs(&free_list); + return 0; +} + +int mptcp_pm_nl_get_addr(u8 id, struct mptcp_pm_addr_entry *addr, + struct genl_info *info) +{ + struct pm_nl_pernet *pernet = genl_info_pm_nl(info); + struct mptcp_pm_addr_entry *entry; + int ret = -EINVAL; + + rcu_read_lock(); + entry = __lookup_addr_by_id(pernet, id); + if (entry) { + *addr = *entry; + ret = 0; + } + rcu_read_unlock(); + + return ret; +} + +int mptcp_pm_nl_dump_addr(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct net *net = sock_net(msg->sk); + struct mptcp_pm_addr_entry *entry; + struct pm_nl_pernet *pernet; + int id = cb->args[0]; + int i; + + pernet = pm_nl_get_pernet(net); + + rcu_read_lock(); + for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) { + if (test_bit(i, pernet->id_bitmap)) { + entry = __lookup_addr_by_id(pernet, i); + if (!entry) + break; + + if (entry->addr.id <= id) + continue; + + if (mptcp_pm_genl_fill_addr(msg, cb, entry) < 0) + break; + + id = entry->addr.id; + } + } + rcu_read_unlock(); + + cb->args[0] = id; + return msg->len; +} + +static int parse_limit(struct genl_info *info, int id, unsigned int *limit) +{ + struct nlattr *attr = info->attrs[id]; + + if (!attr) + return 0; + + *limit = nla_get_u32(attr); + if (*limit > MPTCP_PM_ADDR_MAX) { + NL_SET_ERR_MSG_ATTR_FMT(info->extack, attr, + "limit greater than maximum (%u)", + MPTCP_PM_ADDR_MAX); + return -EINVAL; + } + return 0; +} + +int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct pm_nl_pernet *pernet = genl_info_pm_nl(info); + unsigned int rcv_addrs, subflows; + int ret; + + spin_lock_bh(&pernet->lock); + rcv_addrs = pernet->add_addr_accept_max; + ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs); + if (ret) + goto unlock; + + subflows = pernet->subflows_max; + ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows); + if (ret) + goto unlock; + + WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs); + WRITE_ONCE(pernet->subflows_max, subflows); + +unlock: + spin_unlock_bh(&pernet->lock); + return ret; +} + +int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct pm_nl_pernet *pernet = genl_info_pm_nl(info); + struct sk_buff *msg; + void *reply; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, + MPTCP_PM_CMD_GET_LIMITS); + if (!reply) + goto fail; + + if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS, + READ_ONCE(pernet->add_addr_accept_max))) + goto fail; + + if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS, + READ_ONCE(pernet->subflows_max))) + goto fail; + + genlmsg_end(msg, reply); + return genlmsg_reply(msg, info); + +fail: + GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); + nlmsg_free(msg); + return -EMSGSIZE; +} + +static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk, + struct mptcp_addr_info *addr) +{ + struct mptcp_rm_list list = { .nr = 0 }; + + list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); + + spin_lock_bh(&msk->pm.lock); + mptcp_pm_rm_subflow(msk, &list); + __mark_subflow_endp_available(msk, list.ids[0]); + mptcp_pm_create_subflow_or_signal_addr(msk); + spin_unlock_bh(&msk->pm.lock); +} + +static void mptcp_pm_nl_set_flags_all(struct net *net, + struct mptcp_pm_addr_entry *local, + u8 changed) +{ + u8 is_subflow = !!(local->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW); + u8 bkup = !!(local->flags & MPTCP_PM_ADDR_FLAG_BACKUP); + long s_slot = 0, s_num = 0; + struct mptcp_sock *msk; + + if (changed == MPTCP_PM_ADDR_FLAG_FULLMESH && !is_subflow) + return; + + while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { + struct sock *sk = (struct sock *)msk; + + if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) + goto next; + + lock_sock(sk); + if (changed & MPTCP_PM_ADDR_FLAG_BACKUP) + mptcp_pm_mp_prio_send_ack(msk, &local->addr, NULL, bkup); + /* Subflows will only be recreated if the SUBFLOW flag is set */ + if (is_subflow && (changed & MPTCP_PM_ADDR_FLAG_FULLMESH)) + mptcp_pm_nl_fullmesh(msk, &local->addr); + release_sock(sk); + +next: + sock_put(sk); + cond_resched(); + } +} + +int mptcp_pm_nl_set_flags(struct mptcp_pm_addr_entry *local, + struct genl_info *info) +{ + struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; + u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP | + MPTCP_PM_ADDR_FLAG_FULLMESH; + struct net *net = genl_info_net(info); + struct mptcp_pm_addr_entry *entry; + struct pm_nl_pernet *pernet; + u8 lookup_by_id = 0; + + pernet = pm_nl_get_pernet(net); + + if (local->addr.family == AF_UNSPEC) { + lookup_by_id = 1; + if (!local->addr.id) { + NL_SET_ERR_MSG_ATTR(info->extack, attr, + "missing address ID"); + return -EOPNOTSUPP; + } + } + + spin_lock_bh(&pernet->lock); + entry = lookup_by_id ? __lookup_addr_by_id(pernet, local->addr.id) : + __lookup_addr(pernet, &local->addr); + if (!entry) { + spin_unlock_bh(&pernet->lock); + NL_SET_ERR_MSG_ATTR(info->extack, attr, "address not found"); + return -EINVAL; + } + if ((local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && + (entry->flags & (MPTCP_PM_ADDR_FLAG_SIGNAL | + MPTCP_PM_ADDR_FLAG_IMPLICIT))) { + spin_unlock_bh(&pernet->lock); + NL_SET_ERR_MSG_ATTR(info->extack, attr, "invalid addr flags"); + return -EINVAL; + } + + changed = (local->flags ^ entry->flags) & mask; + entry->flags = (entry->flags & ~mask) | (local->flags & mask); + *local = *entry; + spin_unlock_bh(&pernet->lock); + + mptcp_pm_nl_set_flags_all(net, local, changed); + return 0; +} + +bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk) +{ + struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); + + if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) || + (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap, + MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) { + WRITE_ONCE(msk->pm.work_pending, false); + return false; + } + return true; +} + +/* Called under PM lock */ +void __mptcp_pm_kernel_worker(struct mptcp_sock *msk) +{ + struct mptcp_pm_data *pm = &msk->pm; + + if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { + pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); + mptcp_pm_nl_add_addr_received(msk); + } + if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { + pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); + mptcp_pm_nl_fully_established(msk); + } + if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { + pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); + mptcp_pm_nl_subflow_established(msk); + } +} + +static int __net_init pm_nl_init_net(struct net *net) +{ + struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); + + INIT_LIST_HEAD_RCU(&pernet->local_addr_list); + + /* Cit. 2 subflows ought to be enough for anybody. */ + pernet->subflows_max = 2; + pernet->next_id = 1; + pernet->stale_loss_cnt = 4; + spin_lock_init(&pernet->lock); + + /* No need to initialize other pernet fields, the struct is zeroed at + * allocation time. + */ + + return 0; +} + +static void __net_exit pm_nl_exit_net(struct list_head *net_list) +{ + struct net *net; + + list_for_each_entry(net, net_list, exit_list) { + struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); + + /* net is removed from namespace list, can't race with + * other modifiers, also netns core already waited for a + * RCU grace period. + */ + __flush_addrs(&pernet->local_addr_list); + } +} + +static struct pernet_operations mptcp_pm_pernet_ops = { + .init = pm_nl_init_net, + .exit_batch = pm_nl_exit_net, + .id = &pm_nl_pernet_id, + .size = sizeof(struct pm_nl_pernet), +}; + +struct mptcp_pm_ops mptcp_pm_kernel = { + .name = "kernel", + .owner = THIS_MODULE, +}; + +void __init mptcp_pm_kernel_register(void) +{ + if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0) + panic("Failed to register MPTCP PM pernet subsystem.\n"); + + mptcp_pm_register(&mptcp_pm_kernel); +} diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 572d160edca33..50aaf259959ae 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -6,1185 +6,8 @@ #define pr_fmt(fmt) "MPTCP: " fmt -#include -#include -#include -#include -#include - -#include "protocol.h" -#include "mib.h" -#include "mptcp_pm_gen.h" - -static int pm_nl_pernet_id; - -struct mptcp_pm_add_entry { - struct list_head list; - struct mptcp_addr_info addr; - u8 retrans_times; - struct timer_list add_timer; - struct mptcp_sock *sock; -}; - -struct pm_nl_pernet { - /* protects pernet updates */ - spinlock_t lock; - struct list_head local_addr_list; - unsigned int addrs; - unsigned int stale_loss_cnt; - unsigned int add_addr_signal_max; - unsigned int add_addr_accept_max; - unsigned int local_addr_max; - unsigned int subflows_max; - unsigned int next_id; - DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); -}; - -#define MPTCP_PM_ADDR_MAX 8 -#define ADD_ADDR_RETRANS_MAX 3 - -static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net) -{ - return net_generic(net, pm_nl_pernet_id); -} - -static struct pm_nl_pernet * -pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk) -{ - return pm_nl_get_pernet(sock_net((struct sock *)msk)); -} - -bool mptcp_addresses_equal(const struct mptcp_addr_info *a, - const struct mptcp_addr_info *b, bool use_port) -{ - bool addr_equals = false; - - if (a->family == b->family) { - if (a->family == AF_INET) - addr_equals = a->addr.s_addr == b->addr.s_addr; -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - else - addr_equals = !ipv6_addr_cmp(&a->addr6, &b->addr6); - } else if (a->family == AF_INET) { - if (ipv6_addr_v4mapped(&b->addr6)) - addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3]; - } else if (b->family == AF_INET) { - if (ipv6_addr_v4mapped(&a->addr6)) - addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr; -#endif - } - - if (!addr_equals) - return false; - if (!use_port) - return true; - - return a->port == b->port; -} - -void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr) -{ - addr->family = skc->skc_family; - addr->port = htons(skc->skc_num); - if (addr->family == AF_INET) - addr->addr.s_addr = skc->skc_rcv_saddr; -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - else if (addr->family == AF_INET6) - addr->addr6 = skc->skc_v6_rcv_saddr; -#endif -} - -static void remote_address(const struct sock_common *skc, - struct mptcp_addr_info *addr) -{ - addr->family = skc->skc_family; - addr->port = skc->skc_dport; - if (addr->family == AF_INET) - addr->addr.s_addr = skc->skc_daddr; -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - else if (addr->family == AF_INET6) - addr->addr6 = skc->skc_v6_daddr; -#endif -} - -bool mptcp_lookup_subflow_by_saddr(const struct list_head *list, - const struct mptcp_addr_info *saddr) -{ - struct mptcp_subflow_context *subflow; - struct mptcp_addr_info cur; - struct sock_common *skc; - - list_for_each_entry(subflow, list, node) { - skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow); - - mptcp_local_address(skc, &cur); - if (mptcp_addresses_equal(&cur, saddr, saddr->port)) - return true; - } - - return false; -} - -static bool lookup_subflow_by_daddr(const struct list_head *list, - const struct mptcp_addr_info *daddr) -{ - struct mptcp_subflow_context *subflow; - struct mptcp_addr_info cur; - - list_for_each_entry(subflow, list, node) { - struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - - if (!((1 << inet_sk_state_load(ssk)) & - (TCPF_ESTABLISHED | TCPF_SYN_SENT | TCPF_SYN_RECV))) - continue; - - remote_address((struct sock_common *)ssk, &cur); - if (mptcp_addresses_equal(&cur, daddr, daddr->port)) - return true; - } - - return false; -} - -static bool -select_local_address(const struct pm_nl_pernet *pernet, - const struct mptcp_sock *msk, - struct mptcp_pm_local *new_local) -{ - struct mptcp_pm_addr_entry *entry; - bool found = false; - - msk_owned_by_me(msk); - - rcu_read_lock(); - list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { - if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) - continue; - - if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) - continue; - - new_local->addr = entry->addr; - new_local->flags = entry->flags; - new_local->ifindex = entry->ifindex; - found = true; - break; - } - rcu_read_unlock(); - - return found; -} - -static bool -select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk, - struct mptcp_pm_local *new_local) -{ - struct mptcp_pm_addr_entry *entry; - bool found = false; - - rcu_read_lock(); - /* do not keep any additional per socket state, just signal - * the address list in order. - * Note: removal from the local address list during the msk life-cycle - * can lead to additional addresses not being announced. - */ - list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { - if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap)) - continue; - - if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) - continue; - - new_local->addr = entry->addr; - new_local->flags = entry->flags; - new_local->ifindex = entry->ifindex; - found = true; - break; - } - rcu_read_unlock(); - - return found; -} - -unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk) -{ - const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); - - return READ_ONCE(pernet->add_addr_signal_max); -} -EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max); - -unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk) -{ - struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); - - return READ_ONCE(pernet->add_addr_accept_max); -} -EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max); - -unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk) -{ - struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); - - return READ_ONCE(pernet->subflows_max); -} -EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max); - -unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk) -{ - struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); - - return READ_ONCE(pernet->local_addr_max); -} -EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max); - -bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk) -{ - struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); - - if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) || - (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap, - MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) { - WRITE_ONCE(msk->pm.work_pending, false); - return false; - } - return true; -} - -struct mptcp_pm_add_entry * -mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk, - const struct mptcp_addr_info *addr) -{ - struct mptcp_pm_add_entry *entry; - - lockdep_assert_held(&msk->pm.lock); - - list_for_each_entry(entry, &msk->pm.anno_list, list) { - if (mptcp_addresses_equal(&entry->addr, addr, true)) - return entry; - } - - return NULL; -} - -bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk) -{ - struct mptcp_pm_add_entry *entry; - struct mptcp_addr_info saddr; - bool ret = false; - - mptcp_local_address((struct sock_common *)sk, &saddr); - - spin_lock_bh(&msk->pm.lock); - list_for_each_entry(entry, &msk->pm.anno_list, list) { - if (mptcp_addresses_equal(&entry->addr, &saddr, true)) { - ret = true; - goto out; - } - } - -out: - spin_unlock_bh(&msk->pm.lock); - return ret; -} - -static void mptcp_pm_add_timer(struct timer_list *timer) -{ - struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer); - struct mptcp_sock *msk = entry->sock; - struct sock *sk = (struct sock *)msk; - - pr_debug("msk=%p\n", msk); - - if (!msk) - return; - - if (inet_sk_state_load(sk) == TCP_CLOSE) - return; - - if (!entry->addr.id) - return; - - if (mptcp_pm_should_add_signal_addr(msk)) { - sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8); - goto out; - } - - spin_lock_bh(&msk->pm.lock); - - if (!mptcp_pm_should_add_signal_addr(msk)) { - pr_debug("retransmit ADD_ADDR id=%d\n", entry->addr.id); - mptcp_pm_announce_addr(msk, &entry->addr, false); - mptcp_pm_add_addr_send_ack(msk); - entry->retrans_times++; - } - - if (entry->retrans_times < ADD_ADDR_RETRANS_MAX) - sk_reset_timer(sk, timer, - jiffies + mptcp_get_add_addr_timeout(sock_net(sk))); - - spin_unlock_bh(&msk->pm.lock); - - if (entry->retrans_times == ADD_ADDR_RETRANS_MAX) - mptcp_pm_subflow_established(msk); - -out: - __sock_put(sk); -} - -struct mptcp_pm_add_entry * -mptcp_pm_del_add_timer(struct mptcp_sock *msk, - const struct mptcp_addr_info *addr, bool check_id) -{ - struct mptcp_pm_add_entry *entry; - struct sock *sk = (struct sock *)msk; - struct timer_list *add_timer = NULL; - - spin_lock_bh(&msk->pm.lock); - entry = mptcp_lookup_anno_list_by_saddr(msk, addr); - if (entry && (!check_id || entry->addr.id == addr->id)) { - entry->retrans_times = ADD_ADDR_RETRANS_MAX; - add_timer = &entry->add_timer; - } - if (!check_id && entry) - list_del(&entry->list); - spin_unlock_bh(&msk->pm.lock); - - /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */ - if (add_timer) - sk_stop_timer_sync(sk, add_timer); - - return entry; -} - -bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, - const struct mptcp_addr_info *addr) -{ - struct mptcp_pm_add_entry *add_entry = NULL; - struct sock *sk = (struct sock *)msk; - struct net *net = sock_net(sk); - - lockdep_assert_held(&msk->pm.lock); - - add_entry = mptcp_lookup_anno_list_by_saddr(msk, addr); - - if (add_entry) { - if (WARN_ON_ONCE(mptcp_pm_is_kernel(msk))) - return false; - - sk_reset_timer(sk, &add_entry->add_timer, - jiffies + mptcp_get_add_addr_timeout(net)); - return true; - } - - add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC); - if (!add_entry) - return false; - - list_add(&add_entry->list, &msk->pm.anno_list); - - add_entry->addr = *addr; - add_entry->sock = msk; - add_entry->retrans_times = 0; - - timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0); - sk_reset_timer(sk, &add_entry->add_timer, - jiffies + mptcp_get_add_addr_timeout(net)); - - return true; -} - -void mptcp_pm_free_anno_list(struct mptcp_sock *msk) -{ - struct mptcp_pm_add_entry *entry, *tmp; - struct sock *sk = (struct sock *)msk; - LIST_HEAD(free_list); - - pr_debug("msk=%p\n", msk); - - spin_lock_bh(&msk->pm.lock); - list_splice_init(&msk->pm.anno_list, &free_list); - spin_unlock_bh(&msk->pm.lock); - - list_for_each_entry_safe(entry, tmp, &free_list, list) { - sk_stop_timer_sync(sk, &entry->add_timer); - kfree(entry); - } -} - -/* Fill all the remote addresses into the array addrs[], - * and return the array size. - */ -static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, - struct mptcp_addr_info *local, - bool fullmesh, - struct mptcp_addr_info *addrs) -{ - bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0); - struct sock *sk = (struct sock *)msk, *ssk; - struct mptcp_subflow_context *subflow; - struct mptcp_addr_info remote = { 0 }; - unsigned int subflows_max; - int i = 0; - - subflows_max = mptcp_pm_get_subflows_max(msk); - remote_address((struct sock_common *)sk, &remote); - - /* Non-fullmesh endpoint, fill in the single entry - * corresponding to the primary MPC subflow remote address - */ - if (!fullmesh) { - if (deny_id0) - return 0; - - if (!mptcp_pm_addr_families_match(sk, local, &remote)) - return 0; - - msk->pm.subflows++; - addrs[i++] = remote; - } else { - DECLARE_BITMAP(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); - - /* Forbid creation of new subflows matching existing - * ones, possibly already created by incoming ADD_ADDR - */ - bitmap_zero(unavail_id, MPTCP_PM_MAX_ADDR_ID + 1); - mptcp_for_each_subflow(msk, subflow) - if (READ_ONCE(subflow->local_id) == local->id) - __set_bit(subflow->remote_id, unavail_id); - - mptcp_for_each_subflow(msk, subflow) { - ssk = mptcp_subflow_tcp_sock(subflow); - remote_address((struct sock_common *)ssk, &addrs[i]); - addrs[i].id = READ_ONCE(subflow->remote_id); - if (deny_id0 && !addrs[i].id) - continue; - - if (test_bit(addrs[i].id, unavail_id)) - continue; - - if (!mptcp_pm_addr_families_match(sk, local, &addrs[i])) - continue; - - if (msk->pm.subflows < subflows_max) { - /* forbid creating multiple address towards - * this id - */ - __set_bit(addrs[i].id, unavail_id); - msk->pm.subflows++; - i++; - } - } - } - - return i; -} - -static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, - bool prio, bool backup) -{ - struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - bool slow; - - pr_debug("send ack for %s\n", - prio ? "mp_prio" : (mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr")); - - slow = lock_sock_fast(ssk); - if (prio) { - subflow->send_mp_prio = 1; - subflow->request_bkup = backup; - } - - __mptcp_subflow_send_ack(ssk); - unlock_sock_fast(ssk, slow); -} - -static void mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, - bool prio, bool backup) -{ - spin_unlock_bh(&msk->pm.lock); - __mptcp_pm_send_ack(msk, subflow, prio, backup); - spin_lock_bh(&msk->pm.lock); -} - -static struct mptcp_pm_addr_entry * -__lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id) -{ - struct mptcp_pm_addr_entry *entry; - - list_for_each_entry_rcu(entry, &pernet->local_addr_list, list, - lockdep_is_held(&pernet->lock)) { - if (entry->addr.id == id) - return entry; - } - return NULL; -} - -static struct mptcp_pm_addr_entry * -__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info) -{ - struct mptcp_pm_addr_entry *entry; - - list_for_each_entry_rcu(entry, &pernet->local_addr_list, list, - lockdep_is_held(&pernet->lock)) { - if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) - return entry; - } - return NULL; -} - -static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) -{ - struct sock *sk = (struct sock *)msk; - unsigned int add_addr_signal_max; - bool signal_and_subflow = false; - unsigned int local_addr_max; - struct pm_nl_pernet *pernet; - struct mptcp_pm_local local; - unsigned int subflows_max; - - pernet = pm_nl_get_pernet(sock_net(sk)); - - add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk); - local_addr_max = mptcp_pm_get_local_addr_max(msk); - subflows_max = mptcp_pm_get_subflows_max(msk); - - /* do lazy endpoint usage accounting for the MPC subflows */ - if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) { - struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(msk->first); - struct mptcp_pm_addr_entry *entry; - struct mptcp_addr_info mpc_addr; - bool backup = false; - - mptcp_local_address((struct sock_common *)msk->first, &mpc_addr); - rcu_read_lock(); - entry = __lookup_addr(pernet, &mpc_addr); - if (entry) { - __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap); - msk->mpc_endpoint_id = entry->addr.id; - backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); - } - rcu_read_unlock(); - - if (backup) - mptcp_pm_send_ack(msk, subflow, true, backup); - - msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED); - } - - pr_debug("local %d:%d signal %d:%d subflows %d:%d\n", - msk->pm.local_addr_used, local_addr_max, - msk->pm.add_addr_signaled, add_addr_signal_max, - msk->pm.subflows, subflows_max); - - /* check first for announce */ - if (msk->pm.add_addr_signaled < add_addr_signal_max) { - /* due to racing events on both ends we can reach here while - * previous add address is still running: if we invoke now - * mptcp_pm_announce_addr(), that will fail and the - * corresponding id will be marked as used. - * Instead let the PM machinery reschedule us when the - * current address announce will be completed. - */ - if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL)) - return; - - if (!select_signal_address(pernet, msk, &local)) - goto subflow; - - /* If the alloc fails, we are on memory pressure, not worth - * continuing, and trying to create subflows. - */ - if (!mptcp_pm_alloc_anno_list(msk, &local.addr)) - return; - - __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); - msk->pm.add_addr_signaled++; - - /* Special case for ID0: set the correct ID */ - if (local.addr.id == msk->mpc_endpoint_id) - local.addr.id = 0; - - mptcp_pm_announce_addr(msk, &local.addr, false); - mptcp_pm_nl_addr_send_ack(msk); - - if (local.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) - signal_and_subflow = true; - } - -subflow: - /* check if should create a new subflow */ - while (msk->pm.local_addr_used < local_addr_max && - msk->pm.subflows < subflows_max) { - struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX]; - bool fullmesh; - int i, nr; - - if (signal_and_subflow) - signal_and_subflow = false; - else if (!select_local_address(pernet, msk, &local)) - break; - - fullmesh = !!(local.flags & MPTCP_PM_ADDR_FLAG_FULLMESH); - - __clear_bit(local.addr.id, msk->pm.id_avail_bitmap); - - /* Special case for ID0: set the correct ID */ - if (local.addr.id == msk->mpc_endpoint_id) - local.addr.id = 0; - else /* local_addr_used is not decr for ID 0 */ - msk->pm.local_addr_used++; - - nr = fill_remote_addresses_vec(msk, &local.addr, fullmesh, addrs); - if (nr == 0) - continue; - - spin_unlock_bh(&msk->pm.lock); - for (i = 0; i < nr; i++) - __mptcp_subflow_connect(sk, &local, &addrs[i]); - spin_lock_bh(&msk->pm.lock); - } - mptcp_pm_nl_check_work_pending(msk); -} - -static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk) -{ - mptcp_pm_create_subflow_or_signal_addr(msk); -} - -static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk) -{ - mptcp_pm_create_subflow_or_signal_addr(msk); -} - -/* Fill all the local addresses into the array addrs[], - * and return the array size. - */ -static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk, - struct mptcp_addr_info *remote, - struct mptcp_pm_local *locals) -{ - struct sock *sk = (struct sock *)msk; - struct mptcp_pm_addr_entry *entry; - struct mptcp_addr_info mpc_addr; - struct pm_nl_pernet *pernet; - unsigned int subflows_max; - int i = 0; - - pernet = pm_nl_get_pernet_from_msk(msk); - subflows_max = mptcp_pm_get_subflows_max(msk); - - mptcp_local_address((struct sock_common *)msk, &mpc_addr); - - rcu_read_lock(); - list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { - if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH)) - continue; - - if (!mptcp_pm_addr_families_match(sk, &entry->addr, remote)) - continue; - - if (msk->pm.subflows < subflows_max) { - locals[i].addr = entry->addr; - locals[i].flags = entry->flags; - locals[i].ifindex = entry->ifindex; - - /* Special case for ID0: set the correct ID */ - if (mptcp_addresses_equal(&locals[i].addr, &mpc_addr, locals[i].addr.port)) - locals[i].addr.id = 0; - - msk->pm.subflows++; - i++; - } - } - rcu_read_unlock(); - - /* If the array is empty, fill in the single - * 'IPADDRANY' local address - */ - if (!i) { - memset(&locals[i], 0, sizeof(locals[i])); - locals[i].addr.family = -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - remote->family == AF_INET6 && - ipv6_addr_v4mapped(&remote->addr6) ? AF_INET : -#endif - remote->family; - - if (!mptcp_pm_addr_families_match(sk, &locals[i].addr, remote)) - return 0; - - msk->pm.subflows++; - i++; - } - - return i; -} - -static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk) -{ - struct mptcp_pm_local locals[MPTCP_PM_ADDR_MAX]; - struct sock *sk = (struct sock *)msk; - unsigned int add_addr_accept_max; - struct mptcp_addr_info remote; - unsigned int subflows_max; - bool sf_created = false; - int i, nr; - - add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk); - subflows_max = mptcp_pm_get_subflows_max(msk); - - pr_debug("accepted %d:%d remote family %d\n", - msk->pm.add_addr_accepted, add_addr_accept_max, - msk->pm.remote.family); - - remote = msk->pm.remote; - mptcp_pm_announce_addr(msk, &remote, true); - mptcp_pm_nl_addr_send_ack(msk); - - if (lookup_subflow_by_daddr(&msk->conn_list, &remote)) - return; - - /* pick id 0 port, if none is provided the remote address */ - if (!remote.port) - remote.port = sk->sk_dport; - - /* connect to the specified remote address, using whatever - * local address the routing configuration will pick. - */ - nr = fill_local_addresses_vec(msk, &remote, locals); - if (nr == 0) - return; - - spin_unlock_bh(&msk->pm.lock); - for (i = 0; i < nr; i++) - if (__mptcp_subflow_connect(sk, &locals[i], &remote) == 0) - sf_created = true; - spin_lock_bh(&msk->pm.lock); - - if (sf_created) { - /* add_addr_accepted is not decr for ID 0 */ - if (remote.id) - msk->pm.add_addr_accepted++; - if (msk->pm.add_addr_accepted >= add_addr_accept_max || - msk->pm.subflows >= subflows_max) - WRITE_ONCE(msk->pm.accept_addr, false); - } -} - -bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk, - const struct mptcp_addr_info *remote) -{ - struct mptcp_addr_info mpc_remote; - - remote_address((struct sock_common *)msk, &mpc_remote); - return mptcp_addresses_equal(&mpc_remote, remote, remote->port); -} - -void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk) -{ - struct mptcp_subflow_context *subflow, *alt = NULL; - - msk_owned_by_me(msk); - lockdep_assert_held(&msk->pm.lock); - - if (!mptcp_pm_should_add_signal(msk) && - !mptcp_pm_should_rm_signal(msk)) - return; - - mptcp_for_each_subflow(msk, subflow) { - if (__mptcp_subflow_active(subflow)) { - if (!subflow->stale) { - mptcp_pm_send_ack(msk, subflow, false, false); - return; - } - - if (!alt) - alt = subflow; - } - } - - if (alt) - mptcp_pm_send_ack(msk, alt, false, false); -} - -int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, - struct mptcp_addr_info *addr, - struct mptcp_addr_info *rem, - u8 bkup) -{ - struct mptcp_subflow_context *subflow; - - pr_debug("bkup=%d\n", bkup); - - mptcp_for_each_subflow(msk, subflow) { - struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - struct mptcp_addr_info local, remote; - - mptcp_local_address((struct sock_common *)ssk, &local); - if (!mptcp_addresses_equal(&local, addr, addr->port)) - continue; - - if (rem && rem->family != AF_UNSPEC) { - remote_address((struct sock_common *)ssk, &remote); - if (!mptcp_addresses_equal(&remote, rem, rem->port)) - continue; - } - - __mptcp_pm_send_ack(msk, subflow, true, bkup); - return 0; - } - - return -EINVAL; -} - -static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk, - const struct mptcp_rm_list *rm_list, - enum linux_mptcp_mib_field rm_type) -{ - struct mptcp_subflow_context *subflow, *tmp; - struct sock *sk = (struct sock *)msk; - u8 i; - - pr_debug("%s rm_list_nr %d\n", - rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr); - - msk_owned_by_me(msk); - - if (sk->sk_state == TCP_LISTEN) - return; - - if (!rm_list->nr) - return; - - if (list_empty(&msk->conn_list)) - return; - - for (i = 0; i < rm_list->nr; i++) { - u8 rm_id = rm_list->ids[i]; - bool removed = false; - - mptcp_for_each_subflow_safe(msk, subflow, tmp) { - struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - u8 remote_id = READ_ONCE(subflow->remote_id); - int how = RCV_SHUTDOWN | SEND_SHUTDOWN; - u8 id = subflow_get_local_id(subflow); - - if ((1 << inet_sk_state_load(ssk)) & - (TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | TCPF_CLOSING | TCPF_CLOSE)) - continue; - if (rm_type == MPTCP_MIB_RMADDR && remote_id != rm_id) - continue; - if (rm_type == MPTCP_MIB_RMSUBFLOW && id != rm_id) - continue; - - pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u mpc_id=%u\n", - rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", - i, rm_id, id, remote_id, msk->mpc_endpoint_id); - spin_unlock_bh(&msk->pm.lock); - mptcp_subflow_shutdown(sk, ssk, how); - removed |= subflow->request_join; - - /* the following takes care of updating the subflows counter */ - mptcp_close_ssk(sk, ssk, subflow); - spin_lock_bh(&msk->pm.lock); - - if (rm_type == MPTCP_MIB_RMSUBFLOW) - __MPTCP_INC_STATS(sock_net(sk), rm_type); - } - - if (rm_type == MPTCP_MIB_RMADDR) - __MPTCP_INC_STATS(sock_net(sk), rm_type); - - if (!removed) - continue; - - if (!mptcp_pm_is_kernel(msk)) - continue; - - if (rm_type == MPTCP_MIB_RMADDR && rm_id && - !WARN_ON_ONCE(msk->pm.add_addr_accepted == 0)) { - /* Note: if the subflow has been closed before, this - * add_addr_accepted counter will not be decremented. - */ - if (--msk->pm.add_addr_accepted < mptcp_pm_get_add_addr_accept_max(msk)) - WRITE_ONCE(msk->pm.accept_addr, true); - } - } -} - -static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk) -{ - mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR); -} - -static void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, - const struct mptcp_rm_list *rm_list) -{ - mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW); -} - -void mptcp_pm_nl_work(struct mptcp_sock *msk) -{ - struct mptcp_pm_data *pm = &msk->pm; - - msk_owned_by_me(msk); - - if (!(pm->status & MPTCP_PM_WORK_MASK)) - return; - - spin_lock_bh(&msk->pm.lock); - - pr_debug("msk=%p status=%x\n", msk, pm->status); - if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { - pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); - mptcp_pm_nl_add_addr_received(msk); - } - if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) { - pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK); - mptcp_pm_nl_addr_send_ack(msk); - } - if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) { - pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED); - mptcp_pm_nl_rm_addr_received(msk); - } - if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { - pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); - mptcp_pm_nl_fully_established(msk); - } - if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { - pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); - mptcp_pm_nl_subflow_established(msk); - } - - spin_unlock_bh(&msk->pm.lock); -} - -static bool address_use_port(struct mptcp_pm_addr_entry *entry) -{ - return (entry->flags & - (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) == - MPTCP_PM_ADDR_FLAG_SIGNAL; -} - -/* caller must ensure the RCU grace period is already elapsed */ -static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) -{ - if (entry->lsk) - sock_release(entry->lsk); - kfree(entry); -} - -static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, - struct mptcp_pm_addr_entry *entry, - bool needs_id) -{ - struct mptcp_pm_addr_entry *cur, *del_entry = NULL; - unsigned int addr_max; - int ret = -EINVAL; - - spin_lock_bh(&pernet->lock); - /* to keep the code simple, don't do IDR-like allocation for address ID, - * just bail when we exceed limits - */ - if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID) - pernet->next_id = 1; - if (pernet->addrs >= MPTCP_PM_ADDR_MAX) { - ret = -ERANGE; - goto out; - } - if (test_bit(entry->addr.id, pernet->id_bitmap)) { - ret = -EBUSY; - goto out; - } - - /* do not insert duplicate address, differentiate on port only - * singled addresses - */ - if (!address_use_port(entry)) - entry->addr.port = 0; - list_for_each_entry(cur, &pernet->local_addr_list, list) { - if (mptcp_addresses_equal(&cur->addr, &entry->addr, - cur->addr.port || entry->addr.port)) { - /* allow replacing the exiting endpoint only if such - * endpoint is an implicit one and the user-space - * did not provide an endpoint id - */ - if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)) { - ret = -EEXIST; - goto out; - } - if (entry->addr.id) - goto out; - - pernet->addrs--; - entry->addr.id = cur->addr.id; - list_del_rcu(&cur->list); - del_entry = cur; - break; - } - } - - if (!entry->addr.id && needs_id) { -find_next: - entry->addr.id = find_next_zero_bit(pernet->id_bitmap, - MPTCP_PM_MAX_ADDR_ID + 1, - pernet->next_id); - if (!entry->addr.id && pernet->next_id != 1) { - pernet->next_id = 1; - goto find_next; - } - } - - if (!entry->addr.id && needs_id) - goto out; - - __set_bit(entry->addr.id, pernet->id_bitmap); - if (entry->addr.id > pernet->next_id) - pernet->next_id = entry->addr.id; - - if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { - addr_max = pernet->add_addr_signal_max; - WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1); - } - if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { - addr_max = pernet->local_addr_max; - WRITE_ONCE(pernet->local_addr_max, addr_max + 1); - } - - pernet->addrs++; - if (!entry->addr.port) - list_add_tail_rcu(&entry->list, &pernet->local_addr_list); - else - list_add_rcu(&entry->list, &pernet->local_addr_list); - ret = entry->addr.id; - -out: - spin_unlock_bh(&pernet->lock); - - /* just replaced an existing entry, free it */ - if (del_entry) { - synchronize_rcu(); - __mptcp_pm_release_addr_entry(del_entry); - } - return ret; -} - -static struct lock_class_key mptcp_slock_keys[2]; -static struct lock_class_key mptcp_keys[2]; - -static int mptcp_pm_nl_create_listen_socket(struct sock *sk, - struct mptcp_pm_addr_entry *entry) -{ - bool is_ipv6 = sk->sk_family == AF_INET6; - int addrlen = sizeof(struct sockaddr_in); - struct sockaddr_storage addr; - struct sock *newsk, *ssk; - int backlog = 1024; - int err; - - err = sock_create_kern(sock_net(sk), entry->addr.family, - SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk); - if (err) - return err; - - newsk = entry->lsk->sk; - if (!newsk) - return -EINVAL; - - /* The subflow socket lock is acquired in a nested to the msk one - * in several places, even by the TCP stack, and this msk is a kernel - * socket: lockdep complains. Instead of propagating the _nested - * modifiers in several places, re-init the lock class for the msk - * socket to an mptcp specific one. - */ - sock_lock_init_class_and_name(newsk, - is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET", - &mptcp_slock_keys[is_ipv6], - is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET", - &mptcp_keys[is_ipv6]); - - lock_sock(newsk); - ssk = __mptcp_nmpc_sk(mptcp_sk(newsk)); - release_sock(newsk); - if (IS_ERR(ssk)) - return PTR_ERR(ssk); - - mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family); -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - if (entry->addr.family == AF_INET6) - addrlen = sizeof(struct sockaddr_in6); -#endif - if (ssk->sk_family == AF_INET) - err = inet_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - else if (ssk->sk_family == AF_INET6) - err = inet6_bind_sk(ssk, (struct sockaddr *)&addr, addrlen); -#endif - if (err) - return err; - - /* We don't use mptcp_set_state() here because it needs to be called - * under the msk socket lock. For the moment, that will not bring - * anything more than only calling inet_sk_state_store(), because the - * old status is known (TCP_CLOSE). - */ - inet_sk_state_store(newsk, TCP_LISTEN); - lock_sock(ssk); - WRITE_ONCE(mptcp_subflow_ctx(ssk)->pm_listener, true); - err = __inet_listen_sk(ssk, backlog); - if (!err) - mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); - release_sock(ssk); - return err; -} - -int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc) -{ - struct mptcp_pm_addr_entry *entry; - struct pm_nl_pernet *pernet; - int ret; - - pernet = pm_nl_get_pernet_from_msk(msk); - - rcu_read_lock(); - entry = __lookup_addr(pernet, skc); - ret = entry ? entry->addr.id : -1; - rcu_read_unlock(); - if (ret >= 0) - return ret; - - /* address not found, add to local list */ - entry = kmalloc(sizeof(*entry), GFP_ATOMIC); - if (!entry) - return -ENOMEM; - - entry->addr = *skc; - entry->addr.id = 0; - entry->addr.port = 0; - entry->ifindex = 0; - entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; - entry->lsk = NULL; - ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); - if (ret < 0) - kfree(entry); - - return ret; -} - -bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc) -{ - struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk); - struct mptcp_pm_addr_entry *entry; - bool backup; - - rcu_read_lock(); - entry = __lookup_addr(pernet, skc); - backup = entry && !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP); - rcu_read_unlock(); - - return backup; -} +#include "protocol.h" +#include "mptcp_pm_gen.h" #define MPTCP_PM_CMD_GRP_OFFSET 0 #define MPTCP_PM_EV_GRP_OFFSET 1 @@ -1196,43 +19,6 @@ static const struct genl_multicast_group mptcp_pm_mcgrps[] = { }, }; -void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk) -{ - struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk); - struct sock *sk = (struct sock *)msk; - unsigned int active_max_loss_cnt; - struct net *net = sock_net(sk); - unsigned int stale_loss_cnt; - bool slow; - - stale_loss_cnt = mptcp_stale_loss_cnt(net); - if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt) - return; - - /* look for another available subflow not in loss state */ - active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1); - mptcp_for_each_subflow(msk, iter) { - if (iter != subflow && mptcp_subflow_active(iter) && - iter->stale_count < active_max_loss_cnt) { - /* we have some alternatives, try to mark this subflow as idle ...*/ - slow = lock_sock_fast(ssk); - if (!tcp_rtx_and_write_queues_empty(ssk)) { - subflow->stale = 1; - __mptcp_retransmit_pending_data(sk); - MPTCP_INC_STATS(net, MPTCP_MIB_SUBFLOWSTALE); - } - unlock_sock_fast(ssk, slow); - - /* always try to push the pending data regardless of re-injections: - * we can possibly use backup subflows now, and subflow selection - * is cheap under the msk socket lock - */ - __mptcp_push_pending(sk, 0); - return; - } - } -} - static int mptcp_pm_family_to_addr(int family) { #if IS_ENABLED(CONFIG_MPTCP_IPV6) @@ -1341,390 +127,8 @@ int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, return 0; } -static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info) -{ - return pm_nl_get_pernet(genl_info_net(info)); -} - -static int mptcp_nl_add_subflow_or_signal_addr(struct net *net, - struct mptcp_addr_info *addr) -{ - struct mptcp_sock *msk; - long s_slot = 0, s_num = 0; - - while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { - struct sock *sk = (struct sock *)msk; - struct mptcp_addr_info mpc_addr; - - if (!READ_ONCE(msk->fully_established) || - mptcp_pm_is_userspace(msk)) - goto next; - - /* if the endp linked to the init sf is re-added with a != ID */ - mptcp_local_address((struct sock_common *)msk, &mpc_addr); - - lock_sock(sk); - spin_lock_bh(&msk->pm.lock); - if (mptcp_addresses_equal(addr, &mpc_addr, addr->port)) - msk->mpc_endpoint_id = addr->id; - mptcp_pm_create_subflow_or_signal_addr(msk); - spin_unlock_bh(&msk->pm.lock); - release_sock(sk); - -next: - sock_put(sk); - cond_resched(); - } - - return 0; -} - -static bool mptcp_pm_has_addr_attr_id(const struct nlattr *attr, - struct genl_info *info) -{ - struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1]; - - if (!nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr, - mptcp_pm_address_nl_policy, info->extack) && - tb[MPTCP_PM_ADDR_ATTR_ID]) - return true; - return false; -} - -int mptcp_pm_nl_add_addr_doit(struct sk_buff *skb, struct genl_info *info) -{ - struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; - struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - struct mptcp_pm_addr_entry addr, *entry; - int ret; - - ret = mptcp_pm_parse_entry(attr, info, true, &addr); - if (ret < 0) - return ret; - - if (addr.addr.port && !address_use_port(&addr)) { - GENL_SET_ERR_MSG(info, "flags must have signal and not subflow when using port"); - return -EINVAL; - } - - if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL && - addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) { - GENL_SET_ERR_MSG(info, "flags mustn't have both signal and fullmesh"); - return -EINVAL; - } - - if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) { - GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint"); - return -EINVAL; - } - - entry = kzalloc(sizeof(*entry), GFP_KERNEL_ACCOUNT); - if (!entry) { - GENL_SET_ERR_MSG(info, "can't allocate addr"); - return -ENOMEM; - } - - *entry = addr; - if (entry->addr.port) { - ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry); - if (ret) { - GENL_SET_ERR_MSG_FMT(info, "create listen socket error: %d", ret); - goto out_free; - } - } - ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, - !mptcp_pm_has_addr_attr_id(attr, info)); - if (ret < 0) { - GENL_SET_ERR_MSG_FMT(info, "too many addresses or duplicate one: %d", ret); - goto out_free; - } - - mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk), &entry->addr); - return 0; - -out_free: - __mptcp_pm_release_addr_entry(entry); - return ret; -} - -bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk, - const struct mptcp_addr_info *addr) -{ - struct mptcp_pm_add_entry *entry; - - entry = mptcp_pm_del_add_timer(msk, addr, false); - if (entry) { - kfree(entry); - return true; - } - - return false; -} - -static u8 mptcp_endp_get_local_id(struct mptcp_sock *msk, - const struct mptcp_addr_info *addr) -{ - return msk->mpc_endpoint_id == addr->id ? 0 : addr->id; -} - -static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk, - const struct mptcp_addr_info *addr, - bool force) -{ - struct mptcp_rm_list list = { .nr = 0 }; - bool ret; - - list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); - - ret = mptcp_remove_anno_list_by_saddr(msk, addr); - if (ret || force) { - spin_lock_bh(&msk->pm.lock); - if (ret) { - __set_bit(addr->id, msk->pm.id_avail_bitmap); - msk->pm.add_addr_signaled--; - } - mptcp_pm_remove_addr(msk, &list); - spin_unlock_bh(&msk->pm.lock); - } - return ret; -} - -static void __mark_subflow_endp_available(struct mptcp_sock *msk, u8 id) -{ - /* If it was marked as used, and not ID 0, decrement local_addr_used */ - if (!__test_and_set_bit(id ? : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap) && - id && !WARN_ON_ONCE(msk->pm.local_addr_used == 0)) - msk->pm.local_addr_used--; -} - -static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, - const struct mptcp_pm_addr_entry *entry) -{ - const struct mptcp_addr_info *addr = &entry->addr; - struct mptcp_rm_list list = { .nr = 1 }; - long s_slot = 0, s_num = 0; - struct mptcp_sock *msk; - - pr_debug("remove_id=%d\n", addr->id); - - while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { - struct sock *sk = (struct sock *)msk; - bool remove_subflow; - - if (mptcp_pm_is_userspace(msk)) - goto next; - - if (list_empty(&msk->conn_list)) { - mptcp_pm_remove_anno_addr(msk, addr, false); - goto next; - } - - lock_sock(sk); - remove_subflow = mptcp_lookup_subflow_by_saddr(&msk->conn_list, addr); - mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && - !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT)); - - list.ids[0] = mptcp_endp_get_local_id(msk, addr); - if (remove_subflow) { - spin_lock_bh(&msk->pm.lock); - mptcp_pm_nl_rm_subflow_received(msk, &list); - spin_unlock_bh(&msk->pm.lock); - } - - if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { - spin_lock_bh(&msk->pm.lock); - __mark_subflow_endp_available(msk, list.ids[0]); - spin_unlock_bh(&msk->pm.lock); - } - - if (msk->mpc_endpoint_id == entry->addr.id) - msk->mpc_endpoint_id = 0; - release_sock(sk); - -next: - sock_put(sk); - cond_resched(); - } - - return 0; -} - -static int mptcp_nl_remove_id_zero_address(struct net *net, - struct mptcp_addr_info *addr) -{ - struct mptcp_rm_list list = { .nr = 0 }; - long s_slot = 0, s_num = 0; - struct mptcp_sock *msk; - - list.ids[list.nr++] = 0; - - while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { - struct sock *sk = (struct sock *)msk; - struct mptcp_addr_info msk_local; - - if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) - goto next; - - mptcp_local_address((struct sock_common *)msk, &msk_local); - if (!mptcp_addresses_equal(&msk_local, addr, addr->port)) - goto next; - - lock_sock(sk); - spin_lock_bh(&msk->pm.lock); - mptcp_pm_remove_addr(msk, &list); - mptcp_pm_nl_rm_subflow_received(msk, &list); - __mark_subflow_endp_available(msk, 0); - spin_unlock_bh(&msk->pm.lock); - release_sock(sk); - -next: - sock_put(sk); - cond_resched(); - } - - return 0; -} - -int mptcp_pm_nl_del_addr_doit(struct sk_buff *skb, struct genl_info *info) -{ - struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; - struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - struct mptcp_pm_addr_entry addr, *entry; - unsigned int addr_max; - int ret; - - ret = mptcp_pm_parse_entry(attr, info, false, &addr); - if (ret < 0) - return ret; - - /* the zero id address is special: the first address used by the msk - * always gets such an id, so different subflows can have different zero - * id addresses. Additionally zero id is not accounted for in id_bitmap. - * Let's use an 'mptcp_rm_list' instead of the common remove code. - */ - if (addr.addr.id == 0) - return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr); - - spin_lock_bh(&pernet->lock); - entry = __lookup_addr_by_id(pernet, addr.addr.id); - if (!entry) { - GENL_SET_ERR_MSG(info, "address not found"); - spin_unlock_bh(&pernet->lock); - return -EINVAL; - } - if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { - addr_max = pernet->add_addr_signal_max; - WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1); - } - if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) { - addr_max = pernet->local_addr_max; - WRITE_ONCE(pernet->local_addr_max, addr_max - 1); - } - - pernet->addrs--; - list_del_rcu(&entry->list); - __clear_bit(entry->addr.id, pernet->id_bitmap); - spin_unlock_bh(&pernet->lock); - - mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry); - synchronize_rcu(); - __mptcp_pm_release_addr_entry(entry); - - return ret; -} - -static void mptcp_pm_flush_addrs_and_subflows(struct mptcp_sock *msk, - struct list_head *rm_list) -{ - struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 }; - struct mptcp_pm_addr_entry *entry; - - list_for_each_entry(entry, rm_list, list) { - if (slist.nr < MPTCP_RM_IDS_MAX && - mptcp_lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) - slist.ids[slist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); - - if (alist.nr < MPTCP_RM_IDS_MAX && - mptcp_remove_anno_list_by_saddr(msk, &entry->addr)) - alist.ids[alist.nr++] = mptcp_endp_get_local_id(msk, &entry->addr); - } - - spin_lock_bh(&msk->pm.lock); - if (alist.nr) { - msk->pm.add_addr_signaled -= alist.nr; - mptcp_pm_remove_addr(msk, &alist); - } - if (slist.nr) - mptcp_pm_nl_rm_subflow_received(msk, &slist); - /* Reset counters: maybe some subflows have been removed before */ - bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); - msk->pm.local_addr_used = 0; - spin_unlock_bh(&msk->pm.lock); -} - -static void mptcp_nl_flush_addrs_list(struct net *net, - struct list_head *rm_list) -{ - long s_slot = 0, s_num = 0; - struct mptcp_sock *msk; - - if (list_empty(rm_list)) - return; - - while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { - struct sock *sk = (struct sock *)msk; - - if (!mptcp_pm_is_userspace(msk)) { - lock_sock(sk); - mptcp_pm_flush_addrs_and_subflows(msk, rm_list); - release_sock(sk); - } - - sock_put(sk); - cond_resched(); - } -} - -/* caller must ensure the RCU grace period is already elapsed */ -static void __flush_addrs(struct list_head *list) -{ - while (!list_empty(list)) { - struct mptcp_pm_addr_entry *cur; - - cur = list_entry(list->next, - struct mptcp_pm_addr_entry, list); - list_del_rcu(&cur->list); - __mptcp_pm_release_addr_entry(cur); - } -} - -static void __reset_counters(struct pm_nl_pernet *pernet) -{ - WRITE_ONCE(pernet->add_addr_signal_max, 0); - WRITE_ONCE(pernet->add_addr_accept_max, 0); - WRITE_ONCE(pernet->local_addr_max, 0); - pernet->addrs = 0; -} - -int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) -{ - struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - LIST_HEAD(free_list); - - spin_lock_bh(&pernet->lock); - list_splice_init(&pernet->local_addr_list, &free_list); - __reset_counters(pernet); - pernet->next_id = 1; - bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); - spin_unlock_bh(&pernet->lock); - mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); - synchronize_rcu(); - __flush_addrs(&free_list); - return 0; -} - -int mptcp_nl_fill_addr(struct sk_buff *skb, - struct mptcp_pm_addr_entry *entry) +static int mptcp_nl_fill_addr(struct sk_buff *skb, + struct mptcp_pm_addr_entry *entry) { struct mptcp_addr_info *addr = &entry->addr; struct nlattr *attr; @@ -1762,15 +166,26 @@ int mptcp_nl_fill_addr(struct sk_buff *skb, return -EMSGSIZE; } -int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info) +static int mptcp_pm_get_addr(u8 id, struct mptcp_pm_addr_entry *addr, + struct genl_info *info) +{ + if (info->attrs[MPTCP_PM_ATTR_TOKEN]) + return mptcp_userspace_pm_get_addr(id, addr, info); + return mptcp_pm_nl_get_addr(id, addr, info); +} + +int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info) { - struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; - struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - struct mptcp_pm_addr_entry addr, *entry; + struct mptcp_pm_addr_entry addr; + struct nlattr *attr; struct sk_buff *msg; void *reply; int ret; + if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ENDPOINT_ADDR)) + return -EINVAL; + + attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; ret = mptcp_pm_parse_entry(attr, info, false, &addr); if (ret < 0) return ret; @@ -1787,258 +202,83 @@ int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info) goto fail; } - rcu_read_lock(); - entry = __lookup_addr_by_id(pernet, addr.addr.id); - if (!entry) { - GENL_SET_ERR_MSG(info, "address not found"); - ret = -EINVAL; - goto unlock_fail; + ret = mptcp_pm_get_addr(addr.addr.id, &addr, info); + if (ret) { + NL_SET_ERR_MSG_ATTR(info->extack, attr, "address not found"); + goto fail; } - ret = mptcp_nl_fill_addr(msg, entry); + ret = mptcp_nl_fill_addr(msg, &addr); if (ret) - goto unlock_fail; + goto fail; genlmsg_end(msg, reply); ret = genlmsg_reply(msg, info); - rcu_read_unlock(); return ret; -unlock_fail: - rcu_read_unlock(); - fail: nlmsg_free(msg); return ret; } -int mptcp_pm_nl_get_addr_doit(struct sk_buff *skb, struct genl_info *info) -{ - return mptcp_pm_get_addr(skb, info); -} - -int mptcp_pm_nl_dump_addr(struct sk_buff *msg, - struct netlink_callback *cb) +int mptcp_pm_genl_fill_addr(struct sk_buff *msg, + struct netlink_callback *cb, + struct mptcp_pm_addr_entry *entry) { - struct net *net = sock_net(msg->sk); - struct mptcp_pm_addr_entry *entry; - struct pm_nl_pernet *pernet; - int id = cb->args[0]; void *hdr; - int i; - - pernet = pm_nl_get_pernet(net); - - rcu_read_lock(); - for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) { - if (test_bit(i, pernet->id_bitmap)) { - entry = __lookup_addr_by_id(pernet, i); - if (!entry) - break; - - if (entry->addr.id <= id) - continue; - - hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, &mptcp_genl_family, - NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR); - if (!hdr) - break; - - if (mptcp_nl_fill_addr(msg, entry) < 0) { - genlmsg_cancel(msg, hdr); - break; - } - - id = entry->addr.id; - genlmsg_end(msg, hdr); - } - } - rcu_read_unlock(); - - cb->args[0] = id; - return msg->len; -} - -int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg, - struct netlink_callback *cb) -{ - return mptcp_pm_dump_addr(msg, cb); -} - -static int parse_limit(struct genl_info *info, int id, unsigned int *limit) -{ - struct nlattr *attr = info->attrs[id]; - if (!attr) - return 0; + hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, &mptcp_genl_family, + NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR); + if (!hdr) + return -EINVAL; - *limit = nla_get_u32(attr); - if (*limit > MPTCP_PM_ADDR_MAX) { - GENL_SET_ERR_MSG(info, "limit greater than maximum"); + if (mptcp_nl_fill_addr(msg, entry) < 0) { + genlmsg_cancel(msg, hdr); return -EINVAL; } - return 0; -} - -int mptcp_pm_nl_set_limits_doit(struct sk_buff *skb, struct genl_info *info) -{ - struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - unsigned int rcv_addrs, subflows; - int ret; - spin_lock_bh(&pernet->lock); - rcv_addrs = pernet->add_addr_accept_max; - ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs); - if (ret) - goto unlock; - - subflows = pernet->subflows_max; - ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows); - if (ret) - goto unlock; - - WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs); - WRITE_ONCE(pernet->subflows_max, subflows); - -unlock: - spin_unlock_bh(&pernet->lock); - return ret; + genlmsg_end(msg, hdr); + return 0; } -int mptcp_pm_nl_get_limits_doit(struct sk_buff *skb, struct genl_info *info) +static int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb) { - struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - struct sk_buff *msg; - void *reply; - - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; - - reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, - MPTCP_PM_CMD_GET_LIMITS); - if (!reply) - goto fail; + const struct genl_info *info = genl_info_dump(cb); - if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS, - READ_ONCE(pernet->add_addr_accept_max))) - goto fail; - - if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS, - READ_ONCE(pernet->subflows_max))) - goto fail; - - genlmsg_end(msg, reply); - return genlmsg_reply(msg, info); - -fail: - GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); - nlmsg_free(msg); - return -EMSGSIZE; + if (info->attrs[MPTCP_PM_ATTR_TOKEN]) + return mptcp_userspace_pm_dump_addr(msg, cb); + return mptcp_pm_nl_dump_addr(msg, cb); } -static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk, - struct mptcp_addr_info *addr) +int mptcp_pm_nl_get_addr_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) { - struct mptcp_rm_list list = { .nr = 0 }; - - list.ids[list.nr++] = mptcp_endp_get_local_id(msk, addr); - - spin_lock_bh(&msk->pm.lock); - mptcp_pm_nl_rm_subflow_received(msk, &list); - __mark_subflow_endp_available(msk, list.ids[0]); - mptcp_pm_create_subflow_or_signal_addr(msk); - spin_unlock_bh(&msk->pm.lock); + return mptcp_pm_dump_addr(msg, cb); } -static int mptcp_nl_set_flags(struct net *net, - struct mptcp_addr_info *addr, - u8 bkup, u8 changed) +static int mptcp_pm_set_flags(struct genl_info *info) { - long s_slot = 0, s_num = 0; - struct mptcp_sock *msk; + struct mptcp_pm_addr_entry loc = { .addr = { .family = AF_UNSPEC }, }; + struct nlattr *attr_loc; int ret = -EINVAL; - while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) { - struct sock *sk = (struct sock *)msk; - - if (list_empty(&msk->conn_list) || mptcp_pm_is_userspace(msk)) - goto next; - - lock_sock(sk); - if (changed & MPTCP_PM_ADDR_FLAG_BACKUP) - ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, NULL, bkup); - if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH) - mptcp_pm_nl_fullmesh(msk, addr); - release_sock(sk); - -next: - sock_put(sk); - cond_resched(); - } - - return ret; -} - -int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info) -{ - struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }; - struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; - u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP | - MPTCP_PM_ADDR_FLAG_FULLMESH; - struct net *net = sock_net(skb->sk); - struct mptcp_pm_addr_entry *entry; - struct pm_nl_pernet *pernet; - u8 lookup_by_id = 0; - u8 bkup = 0; - int ret; - - pernet = pm_nl_get_pernet(net); + if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ATTR_ADDR)) + return ret; - ret = mptcp_pm_parse_entry(attr, info, false, &addr); + attr_loc = info->attrs[MPTCP_PM_ATTR_ADDR]; + ret = mptcp_pm_parse_entry(attr_loc, info, false, &loc); if (ret < 0) return ret; - if (addr.addr.family == AF_UNSPEC) { - lookup_by_id = 1; - if (!addr.addr.id) { - GENL_SET_ERR_MSG(info, "missing required inputs"); - return -EOPNOTSUPP; - } - } - - if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP) - bkup = 1; - - spin_lock_bh(&pernet->lock); - entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) : - __lookup_addr(pernet, &addr.addr); - if (!entry) { - spin_unlock_bh(&pernet->lock); - GENL_SET_ERR_MSG(info, "address not found"); - return -EINVAL; - } - if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && - (entry->flags & (MPTCP_PM_ADDR_FLAG_SIGNAL | - MPTCP_PM_ADDR_FLAG_IMPLICIT))) { - spin_unlock_bh(&pernet->lock); - GENL_SET_ERR_MSG(info, "invalid addr flags"); - return -EINVAL; - } - - changed = (addr.flags ^ entry->flags) & mask; - entry->flags = (entry->flags & ~mask) | (addr.flags & mask); - addr = *entry; - spin_unlock_bh(&pernet->lock); - - mptcp_nl_set_flags(net, &addr.addr, bkup, changed); - return 0; + if (info->attrs[MPTCP_PM_ATTR_TOKEN]) + return mptcp_userspace_pm_set_flags(&loc, info); + return mptcp_pm_nl_set_flags(&loc, info); } int mptcp_pm_nl_set_flags_doit(struct sk_buff *skb, struct genl_info *info) { - return mptcp_pm_set_flags(skb, info); + return mptcp_pm_set_flags(info); } static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp) @@ -2071,9 +311,7 @@ static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk) break; #if IS_ENABLED(CONFIG_MPTCP_IPV6) case AF_INET6: { - const struct ipv6_pinfo *np = inet6_sk(ssk); - - if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr)) + if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &issk->pinet6->saddr)) return -EMSGSIZE; if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &ssk->sk_v6_daddr)) return -EMSGSIZE; @@ -2300,9 +538,7 @@ void mptcp_event_pm_listener(const struct sock *ssk, break; #if IS_ENABLED(CONFIG_MPTCP_IPV6) case AF_INET6: { - const struct ipv6_pinfo *np = inet6_sk(ssk); - - if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr)) + if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &issk->pinet6->saddr)) goto nla_put_failure; break; } @@ -2390,52 +626,8 @@ struct genl_family mptcp_genl_family __ro_after_init = { .n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps), }; -static int __net_init pm_nl_init_net(struct net *net) -{ - struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); - - INIT_LIST_HEAD_RCU(&pernet->local_addr_list); - - /* Cit. 2 subflows ought to be enough for anybody. */ - pernet->subflows_max = 2; - pernet->next_id = 1; - pernet->stale_loss_cnt = 4; - spin_lock_init(&pernet->lock); - - /* No need to initialize other pernet fields, the struct is zeroed at - * allocation time. - */ - - return 0; -} - -static void __net_exit pm_nl_exit_net(struct list_head *net_list) -{ - struct net *net; - - list_for_each_entry(net, net_list, exit_list) { - struct pm_nl_pernet *pernet = pm_nl_get_pernet(net); - - /* net is removed from namespace list, can't race with - * other modifiers, also netns core already waited for a - * RCU grace period. - */ - __flush_addrs(&pernet->local_addr_list); - } -} - -static struct pernet_operations mptcp_pm_pernet_ops = { - .init = pm_nl_init_net, - .exit_batch = pm_nl_exit_net, - .id = &pm_nl_pernet_id, - .size = sizeof(struct pm_nl_pernet), -}; - void __init mptcp_pm_nl_init(void) { - if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0) - panic("Failed to register MPTCP PM pernet subsystem.\n"); - if (genl_register_family(&mptcp_genl_family)) panic("Failed to register MPTCP PM netlink family\n"); } diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c index a3d477059b11c..2cb62f026b1f4 100644 --- a/net/mptcp/pm_userspace.c +++ b/net/mptcp/pm_userspace.c @@ -12,15 +12,12 @@ list_for_each_entry(__entry, \ &((__msk)->pm.userspace_pm_local_addr_list), list) -void mptcp_free_local_addr_list(struct mptcp_sock *msk) +void mptcp_userspace_pm_free_local_addr_list(struct mptcp_sock *msk) { struct mptcp_pm_addr_entry *entry, *tmp; struct sock *sk = (struct sock *)msk; LIST_HEAD(free_list); - if (!mptcp_pm_is_userspace(msk)) - return; - spin_lock_bh(&msk->pm.lock); list_splice_init(&msk->pm.userspace_pm_local_addr_list, &free_list); spin_unlock_bh(&msk->pm.lock); @@ -48,7 +45,6 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk, bool needs_id) { DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); - struct mptcp_pm_addr_entry *match = NULL; struct sock *sk = (struct sock *)msk; struct mptcp_pm_addr_entry *e; bool addr_match = false; @@ -63,26 +59,21 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk, if (addr_match && entry->addr.id == 0 && needs_id) entry->addr.id = e->addr.id; id_match = (e->addr.id == entry->addr.id); - if (addr_match && id_match) { - match = e; - break; - } else if (addr_match || id_match) { + if (addr_match || id_match) break; - } __set_bit(e->addr.id, id_bitmap); } - if (!match && !addr_match && !id_match) { + if (!addr_match && !id_match) { /* Memory for the entry is allocated from the * sock option buffer. */ - e = sock_kmalloc(sk, sizeof(*e), GFP_ATOMIC); + e = sock_kmemdup(sk, entry, sizeof(*entry), GFP_ATOMIC); if (!e) { ret = -ENOMEM; goto append_err; } - *e = *entry; if (!e->addr.id && needs_id) e->addr.id = find_next_zero_bit(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1, @@ -90,7 +81,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk, list_add_tail_rcu(&e->list, &msk->pm.userspace_pm_local_addr_list); msk->pm.local_addr_used++; ret = e->addr.id; - } else if (match) { + } else if (addr_match && id_match) { ret = entry->addr.id; } @@ -136,27 +127,22 @@ mptcp_userspace_pm_lookup_addr_by_id(struct mptcp_sock *msk, unsigned int id) } int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, - struct mptcp_addr_info *skc) + struct mptcp_pm_addr_entry *skc) { - struct mptcp_pm_addr_entry *entry = NULL, new_entry; __be16 msk_sport = ((struct inet_sock *) inet_sk((struct sock *)msk))->inet_sport; + struct mptcp_pm_addr_entry *entry; spin_lock_bh(&msk->pm.lock); - entry = mptcp_userspace_pm_lookup_addr(msk, skc); + entry = mptcp_userspace_pm_lookup_addr(msk, &skc->addr); spin_unlock_bh(&msk->pm.lock); if (entry) return entry->addr.id; - memset(&new_entry, 0, sizeof(struct mptcp_pm_addr_entry)); - new_entry.addr = *skc; - new_entry.addr.id = 0; - new_entry.flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; - - if (new_entry.addr.port == msk_sport) - new_entry.addr.port = 0; + if (skc->addr.port == msk_sport) + skc->addr.port = 0; - return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true); + return mptcp_userspace_pm_append_new_local_addr(msk, skc, true); } bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, @@ -175,14 +161,13 @@ bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, static struct mptcp_sock *mptcp_userspace_pm_get_sock(const struct genl_info *info) { - struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; struct mptcp_sock *msk; + struct nlattr *token; - if (!token) { - GENL_SET_ERR_MSG(info, "missing required token"); + if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ATTR_TOKEN)) return NULL; - } + token = info->attrs[MPTCP_PM_ATTR_TOKEN]; msk = mptcp_token_get_sock(genl_info_net(info), nla_get_u32(token)); if (!msk) { NL_SET_ERR_MSG_ATTR(info->extack, token, "invalid token"); @@ -190,7 +175,8 @@ static struct mptcp_sock *mptcp_userspace_pm_get_sock(const struct genl_info *in } if (!mptcp_pm_is_userspace(msk)) { - GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected"); + NL_SET_ERR_MSG_ATTR(info->extack, token, + "userspace PM not selected"); sock_put((struct sock *)msk); return NULL; } @@ -200,16 +186,14 @@ static struct mptcp_sock *mptcp_userspace_pm_get_sock(const struct genl_info *in int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info) { - struct nlattr *addr = info->attrs[MPTCP_PM_ATTR_ADDR]; struct mptcp_pm_addr_entry addr_val; struct mptcp_sock *msk; + struct nlattr *addr; int err = -EINVAL; struct sock *sk; - if (!addr) { - GENL_SET_ERR_MSG(info, "missing required address"); + if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ATTR_ADDR)) return err; - } msk = mptcp_userspace_pm_get_sock(info); if (!msk) @@ -217,21 +201,27 @@ int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info) sk = (struct sock *)msk; + addr = info->attrs[MPTCP_PM_ATTR_ADDR]; err = mptcp_pm_parse_entry(addr, info, true, &addr_val); - if (err < 0) { - GENL_SET_ERR_MSG(info, "error parsing local address"); + if (err < 0) + goto announce_err; + + if (addr_val.addr.id == 0) { + NL_SET_ERR_MSG_ATTR(info->extack, addr, "invalid addr id"); + err = -EINVAL; goto announce_err; } - if (addr_val.addr.id == 0 || !(addr_val.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { - GENL_SET_ERR_MSG(info, "invalid addr id or flags"); + if (!(addr_val.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { + NL_SET_ERR_MSG_ATTR(info->extack, addr, "invalid addr flags"); err = -EINVAL; goto announce_err; } err = mptcp_userspace_pm_append_new_local_addr(msk, &addr_val, false); if (err < 0) { - GENL_SET_ERR_MSG(info, "did not match address and id"); + NL_SET_ERR_MSG_ATTR(info->extack, addr, + "did not match address and id"); goto announce_err; } @@ -241,7 +231,7 @@ int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info) if (mptcp_pm_alloc_anno_list(msk, &addr_val.addr)) { msk->pm.add_addr_signaled++; mptcp_pm_announce_addr(msk, &addr_val.addr, false); - mptcp_pm_nl_addr_send_ack(msk); + mptcp_pm_addr_send_ack(msk); } spin_unlock_bh(&msk->pm.lock); @@ -253,8 +243,7 @@ int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info) return err; } -static int mptcp_userspace_pm_remove_id_zero_address(struct mptcp_sock *msk, - struct genl_info *info) +static int mptcp_userspace_pm_remove_id_zero_address(struct mptcp_sock *msk) { struct mptcp_rm_list list = { .nr = 0 }; struct mptcp_subflow_context *subflow; @@ -269,10 +258,8 @@ static int mptcp_userspace_pm_remove_id_zero_address(struct mptcp_sock *msk, break; } } - if (!has_id_0) { - GENL_SET_ERR_MSG(info, "address with id 0 not found"); + if (!has_id_0) goto remove_err; - } list.ids[list.nr++] = 0; @@ -309,18 +296,17 @@ void mptcp_pm_remove_addr_entry(struct mptcp_sock *msk, int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info) { - struct nlattr *id = info->attrs[MPTCP_PM_ATTR_LOC_ID]; struct mptcp_pm_addr_entry *match; struct mptcp_sock *msk; + struct nlattr *id; int err = -EINVAL; struct sock *sk; u8 id_val; - if (!id) { - GENL_SET_ERR_MSG(info, "missing required ID"); + if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ATTR_LOC_ID)) return err; - } + id = info->attrs[MPTCP_PM_ATTR_LOC_ID]; id_val = nla_get_u8(id); msk = mptcp_userspace_pm_get_sock(info); @@ -330,7 +316,7 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info) sk = (struct sock *)msk; if (id_val == 0) { - err = mptcp_userspace_pm_remove_id_zero_address(msk, info); + err = mptcp_userspace_pm_remove_id_zero_address(msk); goto out; } @@ -339,7 +325,6 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info) spin_lock_bh(&msk->pm.lock); match = mptcp_userspace_pm_lookup_addr_by_id(msk, id_val); if (!match) { - GENL_SET_ERR_MSG(info, "address with specified id not found"); spin_unlock_bh(&msk->pm.lock); release_sock(sk); goto out; @@ -356,25 +341,28 @@ int mptcp_pm_nl_remove_doit(struct sk_buff *skb, struct genl_info *info) err = 0; out: + if (err) + NL_SET_ERR_MSG_ATTR_FMT(info->extack, id, + "address with id %u not found", + id_val); + sock_put(sk); return err; } int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info) { - struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; - struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR]; struct mptcp_pm_addr_entry entry = { 0 }; struct mptcp_addr_info addr_r; + struct nlattr *raddr, *laddr; struct mptcp_pm_local local; struct mptcp_sock *msk; int err = -EINVAL; struct sock *sk; - if (!laddr || !raddr) { - GENL_SET_ERR_MSG(info, "missing required address(es)"); + if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ATTR_ADDR) || + GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ATTR_ADDR_REMOTE)) return err; - } msk = mptcp_userspace_pm_get_sock(info); if (!msk) @@ -382,24 +370,22 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info) sk = (struct sock *)msk; + laddr = info->attrs[MPTCP_PM_ATTR_ADDR]; err = mptcp_pm_parse_entry(laddr, info, true, &entry); - if (err < 0) { - NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr"); + if (err < 0) goto create_err; - } if (entry.flags & MPTCP_PM_ADDR_FLAG_SIGNAL) { - GENL_SET_ERR_MSG(info, "invalid addr flags"); + NL_SET_ERR_MSG_ATTR(info->extack, laddr, "invalid addr flags"); err = -EINVAL; goto create_err; } entry.flags |= MPTCP_PM_ADDR_FLAG_SUBFLOW; + raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; err = mptcp_pm_parse_addr(raddr, info, &addr_r); - if (err < 0) { - NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr"); + if (err < 0) goto create_err; - } if (!mptcp_pm_addr_families_match(sk, &entry.addr, &addr_r)) { GENL_SET_ERR_MSG(info, "families mismatch"); @@ -409,7 +395,8 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info) err = mptcp_userspace_pm_append_new_local_addr(msk, &entry, false); if (err < 0) { - GENL_SET_ERR_MSG(info, "did not match address and id"); + NL_SET_ERR_MSG_ATTR(info->extack, laddr, + "did not match address and id"); goto create_err; } @@ -421,6 +408,9 @@ int mptcp_pm_nl_subflow_create_doit(struct sk_buff *skb, struct genl_info *info) err = __mptcp_subflow_connect(sk, &local, &addr_r); release_sock(sk); + if (err) + GENL_SET_ERR_MSG_FMT(info, "connect error: %d", err); + spin_lock_bh(&msk->pm.lock); if (err) mptcp_userspace_pm_delete_local_addr(msk, &entry); @@ -461,9 +451,7 @@ static struct sock *mptcp_nl_find_ssk(struct mptcp_sock *msk, break; #if IS_ENABLED(CONFIG_MPTCP_IPV6) case AF_INET6: { - const struct ipv6_pinfo *pinfo = inet6_sk(ssk); - - if (!ipv6_addr_equal(&local->addr6, &pinfo->saddr) || + if (!ipv6_addr_equal(&local->addr6, &issk->pinet6->saddr) || !ipv6_addr_equal(&remote->addr6, &ssk->sk_v6_daddr)) continue; break; @@ -483,18 +471,16 @@ static struct sock *mptcp_nl_find_ssk(struct mptcp_sock *msk, int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb, struct genl_info *info) { - struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; - struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR]; struct mptcp_pm_addr_entry addr_l; struct mptcp_addr_info addr_r; + struct nlattr *raddr, *laddr; struct mptcp_sock *msk; struct sock *sk, *ssk; int err = -EINVAL; - if (!laddr || !raddr) { - GENL_SET_ERR_MSG(info, "missing required address(es)"); + if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ATTR_ADDR) || + GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ATTR_ADDR_REMOTE)) return err; - } msk = mptcp_userspace_pm_get_sock(info); if (!msk) @@ -502,17 +488,15 @@ int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb, struct genl_info *info sk = (struct sock *)msk; + laddr = info->attrs[MPTCP_PM_ATTR_ADDR]; err = mptcp_pm_parse_entry(laddr, info, true, &addr_l); - if (err < 0) { - NL_SET_ERR_MSG_ATTR(info->extack, laddr, "error parsing local addr"); + if (err < 0) goto destroy_err; - } + raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; err = mptcp_pm_parse_addr(raddr, info, &addr_r); - if (err < 0) { - NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr"); + if (err < 0) goto destroy_err; - } #if IS_ENABLED(CONFIG_MPTCP_IPV6) if (addr_l.addr.family == AF_INET && ipv6_addr_v4mapped(&addr_r.addr6)) { @@ -530,8 +514,14 @@ int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb, struct genl_info *info goto destroy_err; } - if (!addr_l.addr.port || !addr_r.port) { - GENL_SET_ERR_MSG(info, "missing local or remote port"); + if (!addr_l.addr.port) { + NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local port"); + err = -EINVAL; + goto destroy_err; + } + + if (!addr_r.port) { + NL_SET_ERR_MSG_ATTR(info->extack, raddr, "missing remote port"); err = -EINVAL; goto destroy_err; } @@ -539,6 +529,7 @@ int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb, struct genl_info *info lock_sock(sk); ssk = mptcp_nl_find_ssk(msk, &addr_l.addr, &addr_r); if (!ssk) { + GENL_SET_ERR_MSG(info, "subflow not found"); err = -ESRCH; goto release_sock; } @@ -557,46 +548,51 @@ int mptcp_pm_nl_subflow_destroy_doit(struct sk_buff *skb, struct genl_info *info return err; } -int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info) +int mptcp_userspace_pm_set_flags(struct mptcp_pm_addr_entry *local, + struct genl_info *info) { - struct mptcp_pm_addr_entry loc = { .addr = { .family = AF_UNSPEC }, }; - struct mptcp_pm_addr_entry rem = { .addr = { .family = AF_UNSPEC }, }; - struct nlattr *attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; - struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR]; + struct mptcp_addr_info rem = { .family = AF_UNSPEC, }; struct mptcp_pm_addr_entry *entry; + struct nlattr *attr, *attr_rem; struct mptcp_sock *msk; int ret = -EINVAL; struct sock *sk; u8 bkup = 0; + if (GENL_REQ_ATTR_CHECK(info, MPTCP_PM_ATTR_ADDR_REMOTE)) + return ret; + msk = mptcp_userspace_pm_get_sock(info); if (!msk) return ret; sk = (struct sock *)msk; - ret = mptcp_pm_parse_entry(attr, info, false, &loc); - if (ret < 0) + attr = info->attrs[MPTCP_PM_ATTR_ADDR]; + if (local->addr.family == AF_UNSPEC) { + NL_SET_ERR_MSG_ATTR(info->extack, attr, + "invalid local address family"); + ret = -EINVAL; goto set_flags_err; - - if (attr_rem) { - ret = mptcp_pm_parse_entry(attr_rem, info, false, &rem); - if (ret < 0) - goto set_flags_err; } - if (loc.addr.family == AF_UNSPEC || - rem.addr.family == AF_UNSPEC) { - GENL_SET_ERR_MSG(info, "invalid address families"); + attr_rem = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE]; + ret = mptcp_pm_parse_addr(attr_rem, info, &rem); + if (ret < 0) + goto set_flags_err; + + if (rem.family == AF_UNSPEC) { + NL_SET_ERR_MSG_ATTR(info->extack, attr_rem, + "invalid remote address family"); ret = -EINVAL; goto set_flags_err; } - if (loc.flags & MPTCP_PM_ADDR_FLAG_BACKUP) + if (local->flags & MPTCP_PM_ADDR_FLAG_BACKUP) bkup = 1; spin_lock_bh(&msk->pm.lock); - entry = mptcp_userspace_pm_lookup_addr(msk, &loc.addr); + entry = mptcp_userspace_pm_lookup_addr(msk, &local->addr); if (entry) { if (bkup) entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP; @@ -606,9 +602,13 @@ int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info) spin_unlock_bh(&msk->pm.lock); lock_sock(sk); - ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc.addr, &rem.addr, bkup); + ret = mptcp_pm_mp_prio_send_ack(msk, &local->addr, &rem, bkup); release_sock(sk); + /* mptcp_pm_mp_prio_send_ack() only fails in one case */ + if (ret < 0) + GENL_SET_ERR_MSG(info, "subflow not found"); + set_flags_err: sock_put(sk); return ret; @@ -625,7 +625,8 @@ int mptcp_userspace_pm_dump_addr(struct sk_buff *msg, struct mptcp_sock *msk; int ret = -EINVAL; struct sock *sk; - void *hdr; + + BUILD_BUG_ON(sizeof(struct id_bitmap) > sizeof(cb->ctx)); bitmap = (struct id_bitmap *)cb->ctx; @@ -641,19 +642,10 @@ int mptcp_userspace_pm_dump_addr(struct sk_buff *msg, if (test_bit(entry->addr.id, bitmap->map)) continue; - hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, &mptcp_genl_family, - NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR); - if (!hdr) + if (mptcp_pm_genl_fill_addr(msg, cb, entry) < 0) break; - if (mptcp_nl_fill_addr(msg, entry) < 0) { - genlmsg_cancel(msg, hdr); - break; - } - __set_bit(entry->addr.id, bitmap->map); - genlmsg_end(msg, hdr); } spin_unlock_bh(&msk->pm.lock); release_sock(sk); @@ -663,16 +655,13 @@ int mptcp_userspace_pm_dump_addr(struct sk_buff *msg, return ret; } -int mptcp_userspace_pm_get_addr(struct sk_buff *skb, +int mptcp_userspace_pm_get_addr(u8 id, struct mptcp_pm_addr_entry *addr, struct genl_info *info) { - struct nlattr *attr = info->attrs[MPTCP_PM_ENDPOINT_ADDR]; - struct mptcp_pm_addr_entry addr, *entry; + struct mptcp_pm_addr_entry *entry; struct mptcp_sock *msk; - struct sk_buff *msg; int ret = -EINVAL; struct sock *sk; - void *reply; msk = mptcp_userspace_pm_get_sock(info); if (!msk) @@ -680,50 +669,26 @@ int mptcp_userspace_pm_get_addr(struct sk_buff *skb, sk = (struct sock *)msk; - ret = mptcp_pm_parse_entry(attr, info, false, &addr); - if (ret < 0) - goto out; - - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) { - ret = -ENOMEM; - goto out; - } - - reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0, - info->genlhdr->cmd); - if (!reply) { - GENL_SET_ERR_MSG(info, "not enough space in Netlink message"); - ret = -EMSGSIZE; - goto fail; - } - lock_sock(sk); spin_lock_bh(&msk->pm.lock); - entry = mptcp_userspace_pm_lookup_addr_by_id(msk, addr.addr.id); - if (!entry) { - GENL_SET_ERR_MSG(info, "address not found"); - ret = -EINVAL; - goto unlock_fail; + entry = mptcp_userspace_pm_lookup_addr_by_id(msk, id); + if (entry) { + *addr = *entry; + ret = 0; } - - ret = mptcp_nl_fill_addr(msg, entry); - if (ret) - goto unlock_fail; - - genlmsg_end(msg, reply); - ret = genlmsg_reply(msg, info); spin_unlock_bh(&msk->pm.lock); release_sock(sk); - sock_put(sk); - return ret; -unlock_fail: - spin_unlock_bh(&msk->pm.lock); - release_sock(sk); -fail: - nlmsg_free(msg); -out: sock_put(sk); return ret; } + +static struct mptcp_pm_ops mptcp_pm_userspace = { + .name = "userspace", + .owner = THIS_MODULE, +}; + +void __init mptcp_pm_userspace_register(void) +{ + mptcp_pm_register(&mptcp_pm_userspace); +} diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 6bd8190474706..44f7ab463d755 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -118,24 +118,14 @@ static void mptcp_drop(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); } -static void mptcp_rmem_fwd_alloc_add(struct sock *sk, int size) -{ - WRITE_ONCE(mptcp_sk(sk)->rmem_fwd_alloc, - mptcp_sk(sk)->rmem_fwd_alloc + size); -} - -static void mptcp_rmem_charge(struct sock *sk, int size) -{ - mptcp_rmem_fwd_alloc_add(sk, -size); -} - static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, struct sk_buff *from) { bool fragstolen; int delta; - if (MPTCP_SKB_CB(from)->offset || + if (unlikely(MPTCP_SKB_CB(to)->cant_coalesce) || + MPTCP_SKB_CB(from)->offset || ((to->len + from->len) > (sk->sk_rcvbuf >> 3)) || !skb_try_coalesce(to, from, &fragstolen, &delta)) return false; @@ -150,7 +140,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, * negative one */ atomic_add(delta, &sk->sk_rmem_alloc); - mptcp_rmem_charge(sk, delta); + sk_mem_charge(sk, delta); kfree_skb_partial(from, fragstolen); return true; @@ -165,44 +155,6 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, return mptcp_try_coalesce((struct sock *)msk, to, from); } -static void __mptcp_rmem_reclaim(struct sock *sk, int amount) -{ - amount >>= PAGE_SHIFT; - mptcp_rmem_charge(sk, amount << PAGE_SHIFT); - __sk_mem_reduce_allocated(sk, amount); -} - -static void mptcp_rmem_uncharge(struct sock *sk, int size) -{ - struct mptcp_sock *msk = mptcp_sk(sk); - int reclaimable; - - mptcp_rmem_fwd_alloc_add(sk, size); - reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk); - - /* see sk_mem_uncharge() for the rationale behind the following schema */ - if (unlikely(reclaimable >= PAGE_SIZE)) - __mptcp_rmem_reclaim(sk, reclaimable); -} - -static void mptcp_rfree(struct sk_buff *skb) -{ - unsigned int len = skb->truesize; - struct sock *sk = skb->sk; - - atomic_sub(len, &sk->sk_rmem_alloc); - mptcp_rmem_uncharge(sk, len); -} - -void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk) -{ - skb_orphan(skb); - skb->sk = sk; - skb->destructor = mptcp_rfree; - atomic_add(skb->truesize, &sk->sk_rmem_alloc); - mptcp_rmem_charge(sk, skb->truesize); -} - /* "inspired" by tcp_data_queue_ofo(), main differences: * - use mptcp seqs * - don't cope with sacks @@ -315,25 +267,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) end: skb_condense(skb); - mptcp_set_owner_r(skb, sk); -} - -static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size) -{ - struct mptcp_sock *msk = mptcp_sk(sk); - int amt, amount; - - if (size <= msk->rmem_fwd_alloc) - return true; - - size -= msk->rmem_fwd_alloc; - amt = sk_mem_pages(size); - amount = amt << PAGE_SHIFT; - if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) - return false; - - mptcp_rmem_fwd_alloc_add(sk, amount); - return true; + skb_set_owner_r(skb, sk); } static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, @@ -351,7 +285,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, skb_orphan(skb); /* try to fetch required memory from subflow */ - if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) { + if (!sk_rmem_schedule(sk, skb, skb->truesize)) { MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); goto drop; } @@ -366,6 +300,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len; MPTCP_SKB_CB(skb)->offset = offset; MPTCP_SKB_CB(skb)->has_rxtstamp = has_rxtstamp; + MPTCP_SKB_CB(skb)->cant_coalesce = 0; if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { /* in sequence */ @@ -375,7 +310,7 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, if (tail && mptcp_try_coalesce(sk, tail, skb)) return true; - mptcp_set_owner_r(skb, sk); + skb_set_owner_r(skb, sk); __skb_queue_tail(&sk->sk_receive_queue, skb); return true; } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { @@ -487,7 +422,7 @@ static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subfl const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? - inet_csk(ssk)->icsk_timeout - jiffies : 0; + icsk_timeout(inet_csk(ssk)) - jiffies : 0; } static void mptcp_set_timeout(struct sock *sk) @@ -561,7 +496,7 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied) bool cleanup, rx_empty; cleanup = (space > 0) && (space >= (old_space << 1)) && copied; - rx_empty = !__mptcp_rmem(sk) && copied; + rx_empty = !sk_rmem_alloc_get(sk) && copied; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); @@ -634,27 +569,13 @@ static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) } static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, - struct sock *ssk, - unsigned int *bytes) + struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct sock *sk = (struct sock *)msk; - unsigned int moved = 0; bool more_data_avail; struct tcp_sock *tp; - bool done = false; - int sk_rbuf; - - sk_rbuf = READ_ONCE(sk->sk_rcvbuf); - - if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { - int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); - - if (unlikely(ssk_rbuf > sk_rbuf)) { - WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf); - sk_rbuf = ssk_rbuf; - } - } + bool ret = false; pr_debug("msk=%p ssk=%p\n", msk, ssk); tp = tcp_sk(ssk); @@ -664,20 +585,16 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, struct sk_buff *skb; bool fin; + if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) + break; + /* try to move as much data as available */ map_remaining = subflow->map_data_len - mptcp_subflow_get_map_offset(subflow); skb = skb_peek(&ssk->sk_receive_queue); - if (!skb) { - /* With racing move_skbs_to_msk() and __mptcp_move_skbs(), - * a different CPU can have already processed the pending - * data, stop here or we can enter an infinite loop - */ - if (!moved) - done = true; + if (unlikely(!skb)) break; - } if (__mptcp_check_fallback(msk)) { /* Under fallback skbs have no MPTCP extension and TCP could @@ -690,19 +607,13 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, offset = seq - TCP_SKB_CB(skb)->seq; fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; - if (fin) { - done = true; + if (fin) seq++; - } if (offset < skb->len) { size_t len = skb->len - offset; - if (tp->urg_data) - done = true; - - if (__mptcp_move_skb(msk, ssk, skb, offset, len)) - moved += len; + ret = __mptcp_move_skb(msk, ssk, skb, offset, len) || ret; seq += len; if (unlikely(map_remaining < len)) { @@ -716,22 +627,16 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, } sk_eat_skb(ssk, skb); - done = true; } WRITE_ONCE(tp->copied_seq, seq); more_data_avail = mptcp_subflow_data_available(ssk); - if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) { - done = true; - break; - } } while (more_data_avail); - if (moved > 0) + if (ret) msk->last_data_recv = tcp_jiffies32; - *bytes += moved; - return done; + return ret; } static bool __mptcp_ofo_queue(struct mptcp_sock *msk) @@ -825,9 +730,9 @@ void __mptcp_error_report(struct sock *sk) static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) { struct sock *sk = (struct sock *)msk; - unsigned int moved = 0; + bool moved; - __mptcp_move_skbs_from_subflow(msk, ssk, &moved); + moved = __mptcp_move_skbs_from_subflow(msk, ssk); __mptcp_ofo_queue(msk); if (unlikely(ssk->sk_err)) { if (!sock_owned_by_user(sk)) @@ -843,14 +748,29 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) */ if (mptcp_pending_data_fin(sk, NULL)) mptcp_schedule_work(sk); - return moved > 0; + return moved; +} + +static void __mptcp_rcvbuf_update(struct sock *sk, struct sock *ssk) +{ + if (unlikely(ssk->sk_rcvbuf > sk->sk_rcvbuf)) + WRITE_ONCE(sk->sk_rcvbuf, ssk->sk_rcvbuf); +} + +static void __mptcp_data_ready(struct sock *sk, struct sock *ssk) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + + __mptcp_rcvbuf_update(sk, ssk); + + /* Wake-up the reader only for in-sequence data */ + if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) + sk->sk_data_ready(sk); } void mptcp_data_ready(struct sock *sk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); - struct mptcp_sock *msk = mptcp_sk(sk); - int sk_rbuf, ssk_rbuf; /* The peer can send data while we are shutting down this * subflow at msk destruction time, but we must avoid enqueuing @@ -859,19 +779,11 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk) if (unlikely(subflow->disposable)) return; - ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf); - sk_rbuf = READ_ONCE(sk->sk_rcvbuf); - if (unlikely(ssk_rbuf > sk_rbuf)) - sk_rbuf = ssk_rbuf; - - /* over limit? can't append more skbs to msk, Also, no need to wake-up*/ - if (__mptcp_rmem(sk) > sk_rbuf) - return; - - /* Wake-up the reader only for in-sequence data */ mptcp_data_lock(sk); - if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) - sk->sk_data_ready(sk); + if (!sock_owned_by_user(sk)) + __mptcp_data_ready(sk, ssk); + else + __set_bit(MPTCP_DEQUEUE, &mptcp_sk(sk)->cb_flags); mptcp_data_unlock(sk); } @@ -950,20 +862,6 @@ bool mptcp_schedule_work(struct sock *sk) return false; } -static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk) -{ - struct mptcp_subflow_context *subflow; - - msk_owned_by_me(msk); - - mptcp_for_each_subflow(msk, subflow) { - if (READ_ONCE(subflow->data_avail)) - return mptcp_subflow_tcp_sock(subflow); - } - - return NULL; -} - static bool mptcp_skb_can_collapse_to(u64 write_seq, const struct sk_buff *skb, const struct mptcp_ext *mpext) @@ -1944,16 +1842,17 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); -static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, +static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg, size_t len, int flags, struct scm_timestamping_internal *tss, int *cmsg_flags) { + struct mptcp_sock *msk = mptcp_sk(sk); struct sk_buff *skb, *tmp; int copied = 0; - skb_queue_walk_safe(&msk->receive_queue, skb, tmp) { + skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) { u32 offset = MPTCP_SKB_CB(skb)->offset; u32 data_len = skb->len - offset; u32 count = min_t(size_t, len - copied, data_len); @@ -1985,10 +1884,11 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, } if (!(flags & MSG_PEEK)) { - /* we will bulk release the skb memory later */ + /* avoid the indirect call, we know the destructor is sock_wfree */ skb->destructor = NULL; - WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize); - __skb_unlink(skb, &msk->receive_queue); + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); + sk_mem_uncharge(sk, skb->truesize); + __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); msk->bytes_consumed += count; } @@ -2101,66 +2001,65 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) msk->rcvq_space.time = mstamp; } -static void __mptcp_update_rmem(struct sock *sk) +static struct mptcp_subflow_context * +__mptcp_first_ready_from(struct mptcp_sock *msk, + struct mptcp_subflow_context *subflow) { - struct mptcp_sock *msk = mptcp_sk(sk); - - if (!msk->rmem_released) - return; + struct mptcp_subflow_context *start_subflow = subflow; - atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc); - mptcp_rmem_uncharge(sk, msk->rmem_released); - WRITE_ONCE(msk->rmem_released, 0); + while (!READ_ONCE(subflow->data_avail)) { + subflow = mptcp_next_subflow(msk, subflow); + if (subflow == start_subflow) + return NULL; + } + return subflow; } -static void __mptcp_splice_receive_queue(struct sock *sk) +static bool __mptcp_move_skbs(struct sock *sk) { + struct mptcp_subflow_context *subflow; struct mptcp_sock *msk = mptcp_sk(sk); + bool ret = false; - skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue); -} + if (list_empty(&msk->conn_list)) + return false; -static bool __mptcp_move_skbs(struct mptcp_sock *msk) -{ - struct sock *sk = (struct sock *)msk; - unsigned int moved = 0; - bool ret, done; + /* verify we can move any data from the subflow, eventually updating */ + if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) + mptcp_for_each_subflow(msk, subflow) + __mptcp_rcvbuf_update(sk, subflow->tcp_sock); - do { - struct sock *ssk = mptcp_subflow_recv_lookup(msk); + subflow = list_first_entry(&msk->conn_list, + struct mptcp_subflow_context, node); + for (;;) { + struct sock *ssk; bool slowpath; - /* we can have data pending in the subflows only if the msk - * receive buffer was full at subflow_data_ready() time, - * that is an unlikely slow path. + /* + * As an optimization avoid traversing the subflows list + * and ev. acquiring the subflow socket lock before baling out */ - if (likely(!ssk)) + if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf) break; - slowpath = lock_sock_fast(ssk); - mptcp_data_lock(sk); - __mptcp_update_rmem(sk); - done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); - mptcp_data_unlock(sk); + subflow = __mptcp_first_ready_from(msk, subflow); + if (!subflow) + break; + ssk = mptcp_subflow_tcp_sock(subflow); + slowpath = lock_sock_fast(ssk); + ret = __mptcp_move_skbs_from_subflow(msk, ssk) || ret; if (unlikely(ssk->sk_err)) __mptcp_error_report(sk); unlock_sock_fast(ssk, slowpath); - } while (!done); - /* acquire the data lock only if some input data is pending */ - ret = moved > 0; - if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) || - !skb_queue_empty_lockless(&sk->sk_receive_queue)) { - mptcp_data_lock(sk); - __mptcp_update_rmem(sk); - ret |= __mptcp_ofo_queue(msk); - __mptcp_splice_receive_queue(sk); - mptcp_data_unlock(sk); + subflow = mptcp_next_subflow(msk, subflow); } + + __mptcp_ofo_queue(msk); if (ret) mptcp_check_data_fin((struct sock *)msk); - return !skb_queue_empty(&msk->receive_queue); + return ret; } static unsigned int mptcp_inq_hint(const struct sock *sk) @@ -2168,7 +2067,7 @@ static unsigned int mptcp_inq_hint(const struct sock *sk) const struct mptcp_sock *msk = mptcp_sk(sk); const struct sk_buff *skb; - skb = skb_peek(&msk->receive_queue); + skb = skb_peek(&sk->sk_receive_queue); if (skb) { u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq; @@ -2214,7 +2113,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, while (copied < len) { int err, bytes_read; - bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags); + bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, &tss, &cmsg_flags); if (unlikely(bytes_read < 0)) { if (!copied) copied = bytes_read; @@ -2223,7 +2122,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, copied += bytes_read; - if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk)) + if (skb_queue_empty(&sk->sk_receive_queue) && __mptcp_move_skbs(sk)) continue; /* only the MPTCP socket status is relevant here. The exit @@ -2249,7 +2148,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, /* race breaker: the shutdown could be after the * previous receive queue check */ - if (__mptcp_move_skbs(msk)) + if (__mptcp_move_skbs(sk)) continue; break; } @@ -2293,9 +2192,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } } - pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n", - msk, skb_queue_empty_lockless(&sk->sk_receive_queue), - skb_queue_empty(&msk->receive_queue), copied); + pr_debug("msk=%p rx queue empty=%d copied=%d\n", + msk, skb_queue_empty(&sk->sk_receive_queue), copied); release_sock(sk); return copied; @@ -2783,7 +2681,7 @@ static void mptcp_worker(struct work_struct *work) mptcp_check_fastclose(msk); - mptcp_pm_nl_work(msk); + mptcp_pm_worker(msk); mptcp_check_send_data_fin(sk); mptcp_check_data_fin_ack(sk); @@ -2822,11 +2720,8 @@ static void __mptcp_init_sock(struct sock *sk) INIT_LIST_HEAD(&msk->join_list); INIT_LIST_HEAD(&msk->rtx_queue); INIT_WORK(&msk->work, mptcp_worker); - __skb_queue_head_init(&msk->receive_queue); msk->out_of_order_queue = RB_ROOT; msk->first_pending = NULL; - WRITE_ONCE(msk->rmem_fwd_alloc, 0); - WRITE_ONCE(msk->rmem_released, 0); msk->timer_ival = TCP_RTO_MIN; msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; @@ -3052,8 +2947,6 @@ static void __mptcp_destroy_sock(struct sock *sk) sk->sk_prot->destroy(sk); - WARN_ON_ONCE(READ_ONCE(msk->rmem_fwd_alloc)); - WARN_ON_ONCE(msk->rmem_released); sk_stream_kill_queues(sk); xfrm_sk_free_policy(sk); @@ -3285,12 +3178,9 @@ static void mptcp_copy_ip_options(struct sock *newsk, const struct sock *sk) rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { - newopt = sock_kmalloc(newsk, sizeof(*inet_opt) + + newopt = sock_kmemdup(newsk, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen, GFP_ATOMIC); - if (newopt) - memcpy(newopt, inet_opt, sizeof(*inet_opt) + - inet_opt->opt.optlen); - else + if (!newopt) net_warn_ratelimited("%s: Failed to copy ip options\n", __func__); } RCU_INIT_POINTER(newinet->inet_opt, newopt); @@ -3405,21 +3295,14 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) mptcp_for_each_subflow_safe(msk, subflow, tmp) __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); - /* move to sk_receive_queue, sk_stream_kill_queues will purge it */ - mptcp_data_lock(sk); - skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue); __skb_queue_purge(&sk->sk_receive_queue); skb_rbtree_purge(&msk->out_of_order_queue); - mptcp_data_unlock(sk); /* move all the rx fwd alloc into the sk_mem_reclaim_final in * inet_sock_destruct() will dispose it */ - sk_forward_alloc_add(sk, msk->rmem_fwd_alloc); - WRITE_ONCE(msk->rmem_fwd_alloc, 0); mptcp_token_destroy(msk); - mptcp_pm_free_anno_list(msk); - mptcp_free_local_addr_list(msk); + mptcp_pm_destroy(msk); } static void mptcp_destroy(struct sock *sk) @@ -3453,7 +3336,8 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk) #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \ BIT(MPTCP_RETRANSMIT) | \ - BIT(MPTCP_FLUSH_JOIN_LIST)) + BIT(MPTCP_FLUSH_JOIN_LIST) | \ + BIT(MPTCP_DEQUEUE)) /* processes deferred events and flush wmem */ static void mptcp_release_cb(struct sock *sk) @@ -3487,6 +3371,11 @@ static void mptcp_release_cb(struct sock *sk) __mptcp_push_pending(sk, 0); if (flags & BIT(MPTCP_RETRANSMIT)) __mptcp_retrans(sk); + if ((flags & BIT(MPTCP_DEQUEUE)) && __mptcp_move_skbs(sk)) { + /* notify ack seq update */ + mptcp_cleanup_rbuf(msk, 0); + sk->sk_data_ready(sk); + } cond_resched(); spin_lock_bh(&sk->sk_lock.slock); @@ -3506,8 +3395,6 @@ static void mptcp_release_cb(struct sock *sk) if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags)) __mptcp_sync_sndbuf(sk); } - - __mptcp_update_rmem(sk); } /* MP_JOIN client subflow must wait for 4th ack before sending any data: @@ -3533,7 +3420,6 @@ static void schedule_3rdack_retransmission(struct sock *ssk) WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER); smp_store_release(&icsk->icsk_ack.pending, icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER); - icsk->icsk_ack.timeout = timeout; sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); } @@ -3678,12 +3564,6 @@ static void mptcp_shutdown(struct sock *sk, int how) __mptcp_wr_shutdown(sk); } -static int mptcp_forward_alloc_get(const struct sock *sk) -{ - return READ_ONCE(sk->sk_forward_alloc) + - READ_ONCE(mptcp_sk(sk)->rmem_fwd_alloc); -} - static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) { const struct sock *sk = (void *)msk; @@ -3724,7 +3604,8 @@ static int mptcp_ioctl(struct sock *sk, int cmd, int *karg) return -EINVAL; lock_sock(sk); - __mptcp_move_skbs(msk); + if (__mptcp_move_skbs(sk)) + mptcp_cleanup_rbuf(msk, 0); *karg = mptcp_inq_hint(sk); release_sock(sk); break; @@ -3841,7 +3722,6 @@ static struct proto mptcp_prot = { .hash = mptcp_hash, .unhash = mptcp_unhash, .get_port = mptcp_get_port, - .forward_alloc_get = mptcp_forward_alloc_get, .stream_memory_free = mptcp_stream_memory_free, .sockets_allocated = &mptcp_sockets_allocated, diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index f6a207958459d..d409586b5977f 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -124,12 +124,14 @@ #define MPTCP_FLUSH_JOIN_LIST 5 #define MPTCP_SYNC_STATE 6 #define MPTCP_SYNC_SNDBUF 7 +#define MPTCP_DEQUEUE 8 struct mptcp_skb_cb { u64 map_seq; u64 end_seq; u32 offset; - u8 has_rxtstamp:1; + u8 has_rxtstamp; + u8 cant_coalesce; }; #define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0])) @@ -221,6 +223,8 @@ struct mptcp_pm_data { spinlock_t lock; /*protects the whole PM data */ + struct_group(reset, + u8 addr_signal; bool server_side; bool work_pending; @@ -233,6 +237,9 @@ struct mptcp_pm_data { u8 pm_type; u8 subflows; u8 status; + + ); + DECLARE_BITMAP(id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); struct mptcp_rm_list rm_list_tx; struct mptcp_rm_list rm_list_rx; @@ -279,7 +286,6 @@ struct mptcp_sock { u64 rcv_data_fin_seq; u64 bytes_retrans; u64 bytes_consumed; - int rmem_fwd_alloc; int snd_burst; int old_wspace; u64 recovery_snd_nxt; /* in recovery mode accept up to this seq; @@ -294,7 +300,6 @@ struct mptcp_sock { u32 last_ack_recv; unsigned long timer_ival; u32 token; - int rmem_released; unsigned long flags; unsigned long cb_flags; bool recovery; /* closing subflow write queue reinjected */ @@ -324,7 +329,6 @@ struct mptcp_sock { struct work_struct work; struct sk_buff *ooo_last_skb; struct rb_root out_of_order_queue; - struct sk_buff_head receive_queue; struct list_head conn_list; struct list_head rtx_queue; struct mptcp_data_frag *first_pending; @@ -355,6 +359,8 @@ struct mptcp_sock { list_for_each_entry(__subflow, &((__msk)->conn_list), node) #define mptcp_for_each_subflow_safe(__msk, __subflow, __tmp) \ list_for_each_entry_safe(__subflow, __tmp, &((__msk)->conn_list), node) +#define mptcp_next_subflow(__msk, __subflow) \ + list_next_entry_circular(__subflow, &((__msk)->conn_list), node) extern struct genl_family mptcp_genl_family; @@ -381,14 +387,6 @@ static inline void msk_owned_by_me(const struct mptcp_sock *msk) #define mptcp_sk(ptr) container_of_const(ptr, struct mptcp_sock, sk.icsk_inet.sk) #endif -/* the msk socket don't use the backlog, also account for the bulk - * free memory - */ -static inline int __mptcp_rmem(const struct sock *sk) -{ - return atomic_read(&sk->sk_rmem_alloc) - READ_ONCE(mptcp_sk(sk)->rmem_released); -} - static inline int mptcp_win_from_space(const struct sock *sk, int space) { return __tcp_win_from_space(mptcp_sk(sk)->scaling_ratio, space); @@ -401,7 +399,8 @@ static inline int mptcp_space_from_win(const struct sock *sk, int win) static inline int __mptcp_space(const struct sock *sk) { - return mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - __mptcp_rmem(sk)); + return mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - + sk_rmem_alloc_get(sk)); } static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk) @@ -700,6 +699,7 @@ int mptcp_allow_join_id0(const struct net *net); unsigned int mptcp_stale_loss_cnt(const struct net *net); unsigned int mptcp_close_timeout(const struct sock *sk); int mptcp_get_pm_type(const struct net *net); +const char *mptcp_get_path_manager(const struct net *net); const char *mptcp_get_scheduler(const struct net *net); void mptcp_active_disable(struct sock *sk); @@ -726,12 +726,14 @@ struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk); bool __mptcp_close(struct sock *sk, long timeout); void mptcp_cancel_work(struct sock *sk); void __mptcp_unaccepted_force_close(struct sock *sk); -void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk); void mptcp_set_state(struct sock *sk, int state); bool mptcp_addresses_equal(const struct mptcp_addr_info *a, const struct mptcp_addr_info *b, bool use_port); -void mptcp_local_address(const struct sock_common *skc, struct mptcp_addr_info *addr); +void mptcp_local_address(const struct sock_common *skc, + struct mptcp_addr_info *addr); +void mptcp_remote_address(const struct sock_common *skc, + struct mptcp_addr_info *addr); /* called with sk socket lock held */ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local, @@ -990,6 +992,7 @@ __sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum su void __init mptcp_pm_init(void); void mptcp_pm_data_init(struct mptcp_sock *msk); void mptcp_pm_data_reset(struct mptcp_sock *msk); +void mptcp_pm_destroy(struct mptcp_sock *msk); int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info, struct mptcp_addr_info *addr); int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info, @@ -999,7 +1002,6 @@ bool mptcp_pm_addr_families_match(const struct sock *sk, const struct mptcp_addr_info *loc, const struct mptcp_addr_info *rem); void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk); -void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk); void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side); void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk); bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk); @@ -1013,34 +1015,35 @@ void mptcp_pm_add_addr_received(const struct sock *ssk, void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk, const struct mptcp_addr_info *addr); void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk); -bool mptcp_pm_nl_is_init_remote_addr(struct mptcp_sock *msk, - const struct mptcp_addr_info *remote); -void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk); +void mptcp_pm_send_ack(struct mptcp_sock *msk, + struct mptcp_subflow_context *subflow, + bool prio, bool backup); +void mptcp_pm_addr_send_ack(struct mptcp_sock *msk); +void mptcp_pm_nl_rm_addr(struct mptcp_sock *msk, u8 rm_id); +void mptcp_pm_rm_subflow(struct mptcp_sock *msk, + const struct mptcp_rm_list *rm_list); void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list); void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup); void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq); -int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk, - struct mptcp_addr_info *addr, - struct mptcp_addr_info *rem, - u8 bkup); +int mptcp_pm_mp_prio_send_ack(struct mptcp_sock *msk, + struct mptcp_addr_info *addr, + struct mptcp_addr_info *rem, + u8 bkup); bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, const struct mptcp_addr_info *addr); -void mptcp_pm_free_anno_list(struct mptcp_sock *msk); bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk); struct mptcp_pm_add_entry * mptcp_pm_del_add_timer(struct mptcp_sock *msk, const struct mptcp_addr_info *addr, bool check_id); -struct mptcp_pm_add_entry * -mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk, - const struct mptcp_addr_info *addr); bool mptcp_lookup_subflow_by_saddr(const struct list_head *list, const struct mptcp_addr_info *saddr); bool mptcp_remove_anno_list_by_saddr(struct mptcp_sock *msk, const struct mptcp_addr_info *addr); -int mptcp_pm_set_flags(struct sk_buff *skb, struct genl_info *info); -int mptcp_pm_nl_set_flags(struct sk_buff *skb, struct genl_info *info); -int mptcp_userspace_pm_set_flags(struct sk_buff *skb, struct genl_info *info); +int mptcp_pm_nl_set_flags(struct mptcp_pm_addr_entry *local, + struct genl_info *info); +int mptcp_userspace_pm_set_flags(struct mptcp_pm_addr_entry *local, + struct genl_info *info); int mptcp_pm_announce_addr(struct mptcp_sock *msk, const struct mptcp_addr_info *addr, bool echo); @@ -1048,7 +1051,16 @@ int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_ void mptcp_pm_remove_addr_entry(struct mptcp_sock *msk, struct mptcp_pm_addr_entry *entry); -void mptcp_free_local_addr_list(struct mptcp_sock *msk); +/* the default path manager, used in mptcp_pm_unregister */ +extern struct mptcp_pm_ops mptcp_pm_kernel; + +struct mptcp_pm_ops *mptcp_pm_find(const char *name); +int mptcp_pm_register(struct mptcp_pm_ops *pm_ops); +void mptcp_pm_unregister(struct mptcp_pm_ops *pm_ops); +int mptcp_pm_validate(struct mptcp_pm_ops *pm_ops); +void mptcp_pm_get_available(char *buf, size_t maxlen); + +void mptcp_userspace_pm_free_local_addr_list(struct mptcp_sock *msk); void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk, const struct sock *ssk, gfp_t gfp); @@ -1058,12 +1070,11 @@ void mptcp_event_pm_listener(const struct sock *ssk, enum mptcp_event_type event); bool mptcp_userspace_pm_active(const struct mptcp_sock *msk); -void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow, - const struct mptcp_options_received *mp_opt); void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow, struct request_sock *req); -int mptcp_nl_fill_addr(struct sk_buff *skb, - struct mptcp_pm_addr_entry *entry); +int mptcp_pm_genl_fill_addr(struct sk_buff *msg, + struct netlink_callback *cb, + struct mptcp_pm_addr_entry *entry); static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk) { @@ -1126,19 +1137,20 @@ bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb, bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, struct mptcp_rm_list *rm_list); int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); -int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc); -int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc); +int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, + struct mptcp_pm_addr_entry *skc); +int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, + struct mptcp_pm_addr_entry *skc); bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc); bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc); bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc); -int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb); int mptcp_pm_nl_dump_addr(struct sk_buff *msg, struct netlink_callback *cb); int mptcp_userspace_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb); -int mptcp_pm_get_addr(struct sk_buff *skb, struct genl_info *info); -int mptcp_pm_nl_get_addr(struct sk_buff *skb, struct genl_info *info); -int mptcp_userspace_pm_get_addr(struct sk_buff *skb, +int mptcp_pm_nl_get_addr(u8 id, struct mptcp_pm_addr_entry *addr, + struct genl_info *info); +int mptcp_userspace_pm_get_addr(u8 id, struct mptcp_pm_addr_entry *addr, struct genl_info *info); static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow) @@ -1150,8 +1162,11 @@ static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflo return local_id; } +void __init mptcp_pm_kernel_register(void); +void __init mptcp_pm_userspace_register(void); void __init mptcp_pm_nl_init(void); -void mptcp_pm_nl_work(struct mptcp_sock *msk); +void mptcp_pm_worker(struct mptcp_sock *msk); +void __mptcp_pm_kernel_worker(struct mptcp_sock *msk); unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk); unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk); unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk); @@ -1199,6 +1214,8 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk) pr_debug("TCP fallback already done (msk=%p)\n", msk); return; } + if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback))) + return; set_bit(MPTCP_FALLBACK_DONE, &msk->flags); } diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c index df7dbcfa3b713..c16c6fbd4ba2f 100644 --- a/net/mptcp/sched.c +++ b/net/mptcp/sched.c @@ -16,13 +16,25 @@ static DEFINE_SPINLOCK(mptcp_sched_list_lock); static LIST_HEAD(mptcp_sched_list); -static int mptcp_sched_default_get_subflow(struct mptcp_sock *msk, +static int mptcp_sched_default_get_send(struct mptcp_sock *msk, + struct mptcp_sched_data *data) +{ + struct sock *ssk; + + ssk = mptcp_subflow_get_send(msk); + if (!ssk) + return -EINVAL; + + mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true); + return 0; +} + +static int mptcp_sched_default_get_retrans(struct mptcp_sock *msk, struct mptcp_sched_data *data) { struct sock *ssk; - ssk = data->reinject ? mptcp_subflow_get_retrans(msk) : - mptcp_subflow_get_send(msk); + ssk = mptcp_subflow_get_retrans(msk); if (!ssk) return -EINVAL; @@ -31,7 +43,8 @@ static int mptcp_sched_default_get_subflow(struct mptcp_sock *msk, } static struct mptcp_sched_ops mptcp_sched_default = { - .get_subflow = mptcp_sched_default_get_subflow, + .get_send = mptcp_sched_default_get_send, + .get_retrans = mptcp_sched_default_get_retrans, .name = "default", .owner = THIS_MODULE, }; @@ -73,7 +86,7 @@ void mptcp_get_available_schedulers(char *buf, size_t maxlen) int mptcp_register_scheduler(struct mptcp_sched_ops *sched) { - if (!sched->get_subflow) + if (!sched->get_send) return -EINVAL; spin_lock(&mptcp_sched_list_lock); @@ -144,7 +157,7 @@ void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow, int mptcp_sched_get_send(struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow; - struct mptcp_sched_data data; + struct mptcp_sched_data *data = NULL; msk_owned_by_me(msk); @@ -164,16 +177,15 @@ int mptcp_sched_get_send(struct mptcp_sock *msk) return 0; } - data.reinject = false; if (msk->sched == &mptcp_sched_default || !msk->sched) - return mptcp_sched_default_get_subflow(msk, &data); - return msk->sched->get_subflow(msk, &data); + return mptcp_sched_default_get_send(msk, data); + return msk->sched->get_send(msk, data); } int mptcp_sched_get_retrans(struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow; - struct mptcp_sched_data data; + struct mptcp_sched_data *data = NULL; msk_owned_by_me(msk); @@ -186,8 +198,9 @@ int mptcp_sched_get_retrans(struct mptcp_sock *msk) return 0; } - data.reinject = true; if (msk->sched == &mptcp_sched_default || !msk->sched) - return mptcp_sched_default_get_subflow(msk, &data); - return msk->sched->get_subflow(msk, &data); + return mptcp_sched_default_get_retrans(msk, data); + if (msk->sched->get_retrans) + return msk->sched->get_retrans(msk, data); + return msk->sched->get_send(msk, data); } diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 505445a9598fa..3caa0a9d3b388 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -1419,6 +1419,12 @@ static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname, switch (optname) { case IP_TOS: return mptcp_put_int_option(msk, optval, optlen, READ_ONCE(inet_sk(sk)->tos)); + case IP_FREEBIND: + return mptcp_put_int_option(msk, optval, optlen, + inet_test_bit(FREEBIND, sk)); + case IP_TRANSPARENT: + return mptcp_put_int_option(msk, optval, optlen, + inet_test_bit(TRANSPARENT, sk)); case IP_BIND_ADDRESS_NO_PORT: return mptcp_put_int_option(msk, optval, optlen, inet_test_bit(BIND_ADDRESS_NO_PORT, sk)); @@ -1430,6 +1436,26 @@ static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname, return -EOPNOTSUPP; } +static int mptcp_getsockopt_v6(struct mptcp_sock *msk, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = (void *)msk; + + switch (optname) { + case IPV6_V6ONLY: + return mptcp_put_int_option(msk, optval, optlen, + sk->sk_ipv6only); + case IPV6_TRANSPARENT: + return mptcp_put_int_option(msk, optval, optlen, + inet_test_bit(TRANSPARENT, sk)); + case IPV6_FREEBIND: + return mptcp_put_int_option(msk, optval, optlen, + inet_test_bit(FREEBIND, sk)); + } + + return -EOPNOTSUPP; +} + static int mptcp_getsockopt_sol_mptcp(struct mptcp_sock *msk, int optname, char __user *optval, int __user *optlen) { @@ -1469,6 +1495,8 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname, if (level == SOL_IP) return mptcp_getsockopt_v4(msk, optname, optval, option); + if (level == SOL_IPV6) + return mptcp_getsockopt_v6(msk, optname, optval, option); if (level == SOL_TCP) return mptcp_getsockopt_sol_tcp(msk, optname, optval, option); if (level == SOL_MPTCP) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index fd021cf8286ef..efe8d86496dbd 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -802,9 +802,6 @@ void __mptcp_subflow_fully_established(struct mptcp_sock *msk, subflow_set_remote_key(msk, subflow, mp_opt); WRITE_ONCE(subflow->fully_established, true); WRITE_ONCE(msk->fully_established, true); - - if (subflow->is_mptfo) - __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt); } static struct sock *subflow_syn_recv_sock(const struct sock *sk, @@ -1142,7 +1139,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk, if (data_len == 0) { pr_debug("infinite mapping received\n"); MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); - subflow->map_data_len = 0; return MAPPING_INVALID; } @@ -1271,7 +1267,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb, subflow->map_valid = 0; } -/* sched mptcp worker to remove the subflow if no more data is pending */ +static bool subflow_is_done(const struct sock *sk) +{ + return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; +} + +/* sched mptcp worker for subflow cleanup if no more data is pending */ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk) { struct sock *sk = (struct sock *)msk; @@ -1281,21 +1282,19 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss inet_sk_state_load(sk) != TCP_ESTABLISHED))) return; - if (skb_queue_empty(&ssk->sk_receive_queue) && - !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) - mptcp_schedule_work(sk); -} + if (!skb_queue_empty(&ssk->sk_receive_queue)) + return; -static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) -{ - struct mptcp_sock *msk = mptcp_sk(subflow->conn); + if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) + mptcp_schedule_work(sk); - if (subflow->mp_join) - return false; - else if (READ_ONCE(msk->csum_enabled)) - return !subflow->valid_csum_seen; - else - return READ_ONCE(msk->allow_infinite_fallback); + /* when the fallback subflow closes the rx side, trigger a 'dummy' + * ingress data fin, so that the msk state will follow along + */ + if (__mptcp_check_fallback(msk) && subflow_is_done(ssk) && + msk->first == ssk && + mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true)) + mptcp_schedule_work(sk); } static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) @@ -1393,7 +1392,7 @@ static bool subflow_check_data_avail(struct sock *ssk) return true; } - if (!subflow_can_fallback(subflow) && subflow->map_data_len) { + if (!READ_ONCE(msk->allow_infinite_fallback)) { /* fatal protocol error, close the socket. * subflow_error_report() will introduce the appropriate barriers */ @@ -1772,10 +1771,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, * needs it. * Update ns_tracker to current stack trace and refcounted tracker. */ - __netns_tracker_free(net, &sf->sk->ns_tracker, false); - sf->sk->sk_net_refcnt = 1; - get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sf->sk); err = tcp_set_ulp(sf->sk, "mptcp"); if (err) goto err_free; @@ -1842,11 +1838,6 @@ static void __subflow_state_change(struct sock *sk) rcu_read_unlock(); } -static bool subflow_is_done(const struct sock *sk) -{ - return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE; -} - static void subflow_state_change(struct sock *sk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); @@ -1873,13 +1864,6 @@ static void subflow_state_change(struct sock *sk) subflow_error_report(sk); subflow_sched_work_if_closed(mptcp_sk(parent), sk); - - /* when the fallback subflow closes the rx side, trigger a 'dummy' - * ingress data fin, so that the msk state will follow along - */ - if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk && - mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true)) - mptcp_schedule_work(parent); } void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 7d13110ce1882..0633276d96bfb 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -3091,12 +3091,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) case IP_VS_SO_GET_SERVICES: { struct ip_vs_get_services *get; - int size; + size_t size; get = (struct ip_vs_get_services *)arg; size = struct_size(get, entrytable, get->num_services); if (*len != size) { - pr_err("length: %u != %u\n", *len, size); + pr_err("length: %u != %zu\n", *len, size); ret = -EINVAL; goto out; } @@ -3132,12 +3132,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) case IP_VS_SO_GET_DESTS: { struct ip_vs_get_dests *get; - int size; + size_t size; get = (struct ip_vs_get_dests *)arg; size = struct_size(get, entrytable, get->num_dests); if (*len != size) { - pr_err("length: %u != %u\n", *len, size); + pr_err("length: %u != %zu\n", *len, size); ret = -EINVAL; goto out; } diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 3402675bf5215..62f30d5c25c7e 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1280,12 +1280,12 @@ static void set_sock_size(struct sock *sk, int mode, int val) lock_sock(sk); if (mode) { val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2, - READ_ONCE(sysctl_wmem_max)); + READ_ONCE(sock_net(sk)->core.sysctl_wmem_max)); sk->sk_sndbuf = val * 2; sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } else { val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2, - READ_ONCE(sysctl_rmem_max)); + READ_ONCE(sock_net(sk)->core.sysctl_rmem_max)); sk->sk_rcvbuf = val * 2; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 4890af4dc263f..913ede2f57f9a 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -132,7 +132,7 @@ static int __nf_conncount_add(struct net *net, struct nf_conn *found_ct; unsigned int collect = 0; - if (time_is_after_eq_jiffies((unsigned long)list->last_gc)) + if ((u32)jiffies == list->last_gc) goto add_new_node; /* check the saved connections */ @@ -234,7 +234,7 @@ bool nf_conncount_gc_list(struct net *net, bool ret = false; /* don't bother if we just did GC */ - if (time_is_after_eq_jiffies((unsigned long)READ_ONCE(list->last_gc))) + if ((u32)jiffies == READ_ONCE(list->last_gc)) return false; /* don't bother if other cpu is already doing GC */ @@ -377,6 +377,8 @@ insert_tree(struct net *net, conn->tuple = *tuple; conn->zone = *zone; + conn->cpu = raw_smp_processor_id(); + conn->jiffies32 = (u32)jiffies; memcpy(rbconn->key, key, sizeof(u32) * data->keylen); nf_conncount_list_init(&rbconn->list); diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 502cf10aab41d..2f666751c7e7c 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -618,7 +618,9 @@ static struct ctl_table nf_ct_sysctl_table[] = { .data = &nf_conntrack_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, [NF_SYSCTL_CT_COUNT] = { .procname = "nf_conntrack_count", @@ -654,7 +656,9 @@ static struct ctl_table nf_ct_sysctl_table[] = { .data = &nf_ct_expect_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ONE, + .extra2 = SYSCTL_INT_MAX, }, [NF_SYSCTL_CT_ACCT] = { .procname = "nf_conntrack_acct", @@ -947,7 +951,9 @@ static struct ctl_table nf_ct_netfilter_table[] = { .data = &nf_conntrack_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, }; diff --git a/net/netfilter/nf_log_syslog.c b/net/netfilter/nf_log_syslog.c index 58402226045e8..86d5fc5d28e3b 100644 --- a/net/netfilter/nf_log_syslog.c +++ b/net/netfilter/nf_log_syslog.c @@ -216,7 +216,9 @@ nf_log_dump_tcp_header(struct nf_log_buf *m, /* Max length: 9 "RES=0x3C " */ nf_log_buf_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22)); - /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */ + /* Max length: 35 "AE CWR ECE URG ACK PSH RST SYN FIN " */ + if (th->ae) + nf_log_buf_add(m, "AE "); if (th->cwr) nf_log_buf_add(m, "CWR "); if (th->ece) @@ -516,7 +518,7 @@ dump_ipv4_packet(struct net *net, struct nf_log_buf *m, /* Proto Max log string length */ /* IP: 40+46+6+11+127 = 230 */ - /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */ + /* TCP: 10+max(25,20+30+13+9+35+11+127) = 255 */ /* UDP: 10+max(25,20) = 35 */ /* UDPLITE: 14+max(25,20) = 39 */ /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */ @@ -526,7 +528,7 @@ dump_ipv4_packet(struct net *net, struct nf_log_buf *m, /* (ICMP allows recursion one level deep) */ /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */ - /* maxlen = 230+ 91 + 230 + 252 = 803 */ + /* maxlen = 230+ 91 + 230 + 255 = 806 */ } static noinline_for_stack void diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index a34de9c17cf10..c2df81b7e9505 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -34,7 +34,6 @@ unsigned int nf_tables_net_id __read_mostly; static LIST_HEAD(nf_tables_expressions); static LIST_HEAD(nf_tables_objects); static LIST_HEAD(nf_tables_flowtables); -static LIST_HEAD(nf_tables_destroy_list); static LIST_HEAD(nf_tables_gc_list); static DEFINE_SPINLOCK(nf_tables_destroy_list_lock); static DEFINE_SPINLOCK(nf_tables_gc_list_lock); @@ -125,7 +124,6 @@ static void nft_validate_state_update(struct nft_table *table, u8 new_validate_s table->validate_state = new_validate_state; } static void nf_tables_trans_destroy_work(struct work_struct *w); -static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work); static void nft_trans_gc_work(struct work_struct *work); static DECLARE_WORK(trans_gc_work, nft_trans_gc_work); @@ -10006,11 +10004,12 @@ static void nft_commit_release(struct nft_trans *trans) static void nf_tables_trans_destroy_work(struct work_struct *w) { + struct nftables_pernet *nft_net = container_of(w, struct nftables_pernet, destroy_work); struct nft_trans *trans, *next; LIST_HEAD(head); spin_lock(&nf_tables_destroy_list_lock); - list_splice_init(&nf_tables_destroy_list, &head); + list_splice_init(&nft_net->destroy_list, &head); spin_unlock(&nf_tables_destroy_list_lock); if (list_empty(&head)) @@ -10024,9 +10023,11 @@ static void nf_tables_trans_destroy_work(struct work_struct *w) } } -void nf_tables_trans_destroy_flush_work(void) +void nf_tables_trans_destroy_flush_work(struct net *net) { - flush_work(&trans_destroy_work); + struct nftables_pernet *nft_net = nft_pernet(net); + + flush_work(&nft_net->destroy_work); } EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work); @@ -10484,11 +10485,11 @@ static void nf_tables_commit_release(struct net *net) trans->put_net = true; spin_lock(&nf_tables_destroy_list_lock); - list_splice_tail_init(&nft_net->commit_list, &nf_tables_destroy_list); + list_splice_tail_init(&nft_net->commit_list, &nft_net->destroy_list); spin_unlock(&nf_tables_destroy_list_lock); nf_tables_module_autoload_cleanup(net); - schedule_work(&trans_destroy_work); + schedule_work(&nft_net->destroy_work); mutex_unlock(&nft_net->commit_mutex); } @@ -11853,7 +11854,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, gc_seq = nft_gc_seq_begin(nft_net); - nf_tables_trans_destroy_flush_work(); + nf_tables_trans_destroy_flush_work(net); again: list_for_each_entry(table, &nft_net->tables, list) { if (nft_table_has_owner(table) && @@ -11895,6 +11896,7 @@ static int __net_init nf_tables_init_net(struct net *net) INIT_LIST_HEAD(&nft_net->tables); INIT_LIST_HEAD(&nft_net->commit_list); + INIT_LIST_HEAD(&nft_net->destroy_list); INIT_LIST_HEAD(&nft_net->commit_set_list); INIT_LIST_HEAD(&nft_net->binding_list); INIT_LIST_HEAD(&nft_net->module_list); @@ -11903,6 +11905,7 @@ static int __net_init nf_tables_init_net(struct net *net) nft_net->base_seq = 1; nft_net->gc_seq = 0; nft_net->validate_state = NFT_VALIDATE_SKIP; + INIT_WORK(&nft_net->destroy_work, nf_tables_trans_destroy_work); return 0; } @@ -11931,14 +11934,17 @@ static void __net_exit nf_tables_exit_net(struct net *net) if (!list_empty(&nft_net->module_list)) nf_tables_module_autoload_cleanup(net); + cancel_work_sync(&nft_net->destroy_work); __nft_release_tables(net); nft_gc_seq_end(nft_net, gc_seq); mutex_unlock(&nft_net->commit_mutex); + WARN_ON_ONCE(!list_empty(&nft_net->tables)); WARN_ON_ONCE(!list_empty(&nft_net->module_list)); WARN_ON_ONCE(!list_empty(&nft_net->notify_list)); + WARN_ON_ONCE(!list_empty(&nft_net->destroy_list)); } static void nf_tables_exit_batch(struct list_head *net_exit_list) @@ -12029,10 +12035,8 @@ static void __exit nf_tables_module_exit(void) unregister_netdevice_notifier(&nf_tables_flowtable_notifier); nft_chain_filter_fini(); nft_chain_route_fini(); - nf_tables_trans_destroy_flush_work(); unregister_pernet_subsys(&nf_tables_net_ops); cancel_work_sync(&trans_gc_work); - cancel_work_sync(&trans_destroy_work); rcu_barrier(); rhltable_destroy(&nft_objname_ht); nf_tables_core_module_exit(); diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 75598520b0fa0..6557a4018c099 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -21,25 +21,22 @@ #include #include -#if defined(CONFIG_MITIGATION_RETPOLINE) && defined(CONFIG_X86) - +#ifdef CONFIG_MITIGATION_RETPOLINE static struct static_key_false nf_tables_skip_direct_calls; -static bool nf_skip_indirect_calls(void) +static inline bool nf_skip_indirect_calls(void) { return static_branch_likely(&nf_tables_skip_direct_calls); } -static void __init nf_skip_indirect_calls_enable(void) +static inline void __init nf_skip_indirect_calls_enable(void) { if (!cpu_feature_enabled(X86_FEATURE_RETPOLINE)) static_branch_enable(&nf_tables_skip_direct_calls); } #else -static inline bool nf_skip_indirect_calls(void) { return false; } - static inline void nf_skip_indirect_calls_enable(void) { } -#endif +#endif /* CONFIG_MITIGATION_RETPOLINE */ static noinline void __nft_trace_packet(const struct nft_pktinfo *pkt, const struct nft_verdict *verdict, diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 5c913987901ab..8b7b39d8a1091 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -567,7 +567,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, enum ip_conntrack_info ctinfo = 0; const struct nfnl_ct_hook *nfnl_ct; bool csum_verify; - struct lsm_context ctx; + struct lsm_context ctx = { NULL, 0, 0 }; int seclen = 0; ktime_t tstamp; diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 7ca4f0d21fe2a..72711d62fddfa 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -228,7 +228,7 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv) return 0; } -static void nft_compat_wait_for_destructors(void) +static void nft_compat_wait_for_destructors(struct net *net) { /* xtables matches or targets can have side effects, e.g. * creation/destruction of /proc files. @@ -236,7 +236,7 @@ static void nft_compat_wait_for_destructors(void) * work queue. If we have pending invocations we thus * need to wait for those to finish. */ - nf_tables_trans_destroy_flush_work(); + nf_tables_trans_destroy_flush_work(net); } static int @@ -262,7 +262,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); - nft_compat_wait_for_destructors(); + nft_compat_wait_for_destructors(ctx->net); ret = xt_check_target(&par, size, proto, inv); if (ret < 0) { @@ -515,7 +515,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); - nft_compat_wait_for_destructors(); + nft_compat_wait_for_destructors(ctx->net); return xt_check_match(&par, size, proto, inv); } diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 2e59aba681a13..d526e69a2a2b8 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -230,6 +230,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr, enum ip_conntrack_info ctinfo; u16 value = nft_reg_load16(®s->data[priv->sreg]); struct nf_conn *ct; + int oldcnt; ct = nf_ct_get(skb, &ctinfo); if (ct) /* already tracked */ @@ -250,10 +251,11 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr, ct = this_cpu_read(nft_ct_pcpu_template); - if (likely(refcount_read(&ct->ct_general.use) == 1)) { - refcount_inc(&ct->ct_general.use); + __refcount_inc(&ct->ct_general.use, &oldcnt); + if (likely(oldcnt == 1)) { nf_ct_zone_add(ct, &zone); } else { + refcount_dec(&ct->ct_general.use); /* previous skb got queued to userspace, allocate temporary * one until percpu template can be reused. */ diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index b8d03364566c1..c74012c991255 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -85,7 +85,6 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, unsigned char optbuf[sizeof(struct ip_options) + 40]; struct ip_options *opt = (struct ip_options *)optbuf; struct iphdr *iph, _iph; - unsigned int start; bool found = false; __be32 info; int optlen; @@ -93,7 +92,6 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (!iph) return -EBADMSG; - start = sizeof(struct iphdr); optlen = iph->ihl * 4 - (int)sizeof(struct iphdr); if (optlen <= 0) @@ -103,7 +101,7 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, /* Copy the options since __ip_options_compile() modifies * the options. */ - if (skb_copy_bits(skb, start, opt->__data, optlen)) + if (skb_copy_bits(skb, sizeof(struct iphdr), opt->__data, optlen)) return -EBADMSG; opt->optlen = optlen; @@ -118,18 +116,18 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, found = target == IPOPT_SSRR ? opt->is_strictroute : !opt->is_strictroute; if (found) - *offset = opt->srr + start; + *offset = opt->srr; break; case IPOPT_RR: if (!opt->rr) break; - *offset = opt->rr + start; + *offset = opt->rr; found = true; break; case IPOPT_RA: if (!opt->router_alert) break; - *offset = opt->router_alert + start; + *offset = opt->router_alert; found = true; break; default: diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index fa02aab567245..3b507694e81e5 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include @@ -294,8 +293,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg, if (size < 16) size = 16; } - /* FIXME: don't use vmalloc() here or anywhere else -HW */ - hinfo = vmalloc(struct_size(hinfo, hash, size)); + hinfo = kvmalloc(struct_size(hinfo, hash, size), GFP_KERNEL); if (hinfo == NULL) return -ENOMEM; *out_hinfo = hinfo; @@ -303,7 +301,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg, /* copy match config into hashtable config */ ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3); if (ret) { - vfree(hinfo); + kvfree(hinfo); return ret; } @@ -322,7 +320,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg, hinfo->rnd_initialized = false; hinfo->name = kstrdup(name, GFP_KERNEL); if (!hinfo->name) { - vfree(hinfo); + kvfree(hinfo); return -ENOMEM; } spin_lock_init(&hinfo->lock); @@ -344,7 +342,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg, ops, hinfo); if (hinfo->pde == NULL) { kfree(hinfo->name); - vfree(hinfo); + kvfree(hinfo); return -ENOMEM; } hinfo->net = net; @@ -433,7 +431,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo) cancel_delayed_work_sync(&hinfo->gc_work); htable_selective_cleanup(hinfo, true); kfree(hinfo->name); - vfree(hinfo); + kvfree(hinfo); } } diff --git a/net/netfilter/xt_repldata.h b/net/netfilter/xt_repldata.h index 5d1fb7018dbac..600060ca940af 100644 --- a/net/netfilter/xt_repldata.h +++ b/net/netfilter/xt_repldata.h @@ -29,7 +29,7 @@ if (tbl == NULL) \ return NULL; \ term = (struct type##_error *)&(((char *)tbl)[term_offset]); \ - strscpy_pad(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \ + strscpy(tbl->repl.name, info->name); \ *term = (struct type##_error)typ2##_ERROR_INIT; \ tbl->repl.valid_hooks = hook_mask; \ tbl->repl.num_entries = nhooks + 1; \ diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 85311226183a2..e8972a857e51e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -771,6 +771,7 @@ static int netlink_release(struct socket *sock) nlk->cb.done(&nlk->cb); module_put(nlk->cb.module); kfree_skb(nlk->cb.skb); + WRITE_ONCE(nlk->cb_running, false); } module_put(nlk->module); @@ -795,16 +796,6 @@ static int netlink_release(struct socket *sock) sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); - /* Because struct net might disappear soon, do not keep a pointer. */ - if (!sk->sk_net_refcnt && sock_net(sk) != &init_net) { - __netns_tracker_free(sock_net(sk), &sk->ns_tracker, false); - /* Because of deferred_put_nlk_sk and use of work queue, - * it is possible netns will be freed before this socket. - */ - sock_net_set(sk, &init_net); - __netns_tracker_alloc(&init_net, &sk->ns_tracker, - false, GFP_KERNEL); - } call_rcu(&nlk->rcu, deferred_put_nlk_sk); return 0; } diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c index ba91284f4086e..e6cf4eb06b46c 100644 --- a/net/nfc/hci/llc.c +++ b/net/nfc/hci/llc.c @@ -78,17 +78,6 @@ static struct nfc_llc_engine *nfc_llc_name_to_engine(const char *name) return NULL; } -void nfc_llc_unregister(const char *name) -{ - struct nfc_llc_engine *llc_engine; - - llc_engine = nfc_llc_name_to_engine(name); - if (llc_engine == NULL) - return; - - nfc_llc_del_engine(llc_engine); -} - struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv, rcv_to_hci_t rcv_to_hci, int tx_headroom, diff --git a/net/nfc/hci/llc.h b/net/nfc/hci/llc.h index d66271d211a51..09914608ec43e 100644 --- a/net/nfc/hci/llc.h +++ b/net/nfc/hci/llc.h @@ -40,7 +40,6 @@ struct nfc_llc { void *nfc_llc_get_data(struct nfc_llc *llc); int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops); -void nfc_llc_unregister(const char *name); int nfc_llc_nop_register(void); diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 3bb4810234aac..e573e92213029 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1368,8 +1368,11 @@ bool ovs_ct_verify(struct net *net, enum ovs_key_attr attr) attr == OVS_KEY_ATTR_CT_MARK) return true; if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && - attr == OVS_KEY_ATTR_CT_LABELS) - return true; + attr == OVS_KEY_ATTR_CT_LABELS) { + struct ovs_net *ovs_net = net_generic(net, ovs_net_id); + + return ovs_net->xt_label; + } return false; } @@ -1378,7 +1381,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr, const struct sw_flow_key *key, struct sw_flow_actions **sfa, bool log) { - unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE; struct ovs_conntrack_info ct_info; const char *helper = NULL; u16 family; @@ -1407,12 +1409,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr, return -ENOMEM; } - if (nf_connlabels_get(net, n_bits - 1)) { - nf_ct_tmpl_free(ct_info.ct); - OVS_NLERR(log, "Failed to set connlabel length"); - return -EOPNOTSUPP; - } - if (ct_info.timeout[0]) { if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto, ct_info.timeout)) @@ -1581,7 +1577,6 @@ static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) if (ct_info->ct) { if (ct_info->timeout[0]) nf_ct_destroy_timeout(ct_info->ct); - nf_connlabels_put(nf_ct_net(ct_info->ct)); nf_ct_tmpl_free(ct_info->ct); } } @@ -2006,9 +2001,17 @@ struct genl_family dp_ct_limit_genl_family __ro_after_init = { int ovs_ct_init(struct net *net) { -#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) + unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE; struct ovs_net *ovs_net = net_generic(net, ovs_net_id); + if (nf_connlabels_get(net, n_bits - 1)) { + ovs_net->xt_label = false; + OVS_NLERR(true, "Failed to set connlabel length"); + } else { + ovs_net->xt_label = true; + } + +#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) return ovs_ct_limit_init(net, ovs_net); #else return 0; @@ -2017,9 +2020,12 @@ int ovs_ct_init(struct net *net) void ovs_ct_exit(struct net *net) { -#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) struct ovs_net *ovs_net = net_generic(net, ovs_net_id); +#if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) ovs_ct_limit_exit(net, ovs_net); #endif + + if (ovs_net->xt_label) + nf_connlabels_put(net); } diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 365b9bb7f546e..384ca77f4e794 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -29,8 +29,8 @@ * datapath. * @n_hit: Number of received packets for which a matching flow was found in * the flow table. - * @n_miss: Number of received packets that had no matching flow in the flow - * table. The sum of @n_hit and @n_miss is the number of packets that have + * @n_missed: Number of received packets that had no matching flow in the flow + * table. The sum of @n_hit and @n_missed is the number of packets that have * been received by the datapath. * @n_lost: Number of received packets that had no matching flow in the flow * table that could not be sent to userspace (normally due to an overflow in @@ -40,6 +40,7 @@ * up per packet. * @n_cache_hit: The number of received packets that had their mask found using * the mask cache. + * @syncp: Synchronization point for 64bit counters. */ struct dp_stats_percpu { u64 n_hit; @@ -74,8 +75,10 @@ struct dp_nlsk_pids { * ovs_mutex and RCU. * @stats_percpu: Per-CPU datapath statistics. * @net: Reference to net namespace. - * @max_headroom: the maximum headroom of all vports in this datapath; it will + * @user_features: Bitmap of enabled %OVS_DP_F_* features. + * @max_headroom: The maximum headroom of all vports in this datapath; it will * be used by all the internal vports in this dp. + * @meter_tbl: Meter table. * @upcall_portids: RCU protected 'struct dp_nlsk_pids'. * * Context: See the comment on locking at the top of datapath.c for additional @@ -128,10 +131,13 @@ struct ovs_skb_cb { #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) /** - * struct dp_upcall - metadata to include with a packet to send to userspace + * struct dp_upcall_info - metadata to include with a packet sent to userspace * @cmd: One of %OVS_PACKET_CMD_*. * @userdata: If nonnull, its variable-length value is passed to userspace as * %OVS_PACKET_ATTR_USERDATA. + * @actions: If nonnull, its variable-length value is passed to userspace as + * %OVS_PACKET_ATTR_ACTIONS. + * @actions_len: The length of the @actions. * @portid: Netlink portid to which packet should be sent. If @portid is 0 * then no packet is sent and the packet is accounted in the datapath's @n_lost * counter. @@ -152,6 +158,10 @@ struct dp_upcall_info { * struct ovs_net - Per net-namespace data for ovs. * @dps: List of datapaths to enable dumping them all out. * Protected by genl_mutex. + * @dp_notify_work: A work notifier to handle port unregistering. + * @masks_rebalance: A work to periodically optimize flow table caches. + * @ct_limit_info: A hash table of conntrack zone connection limits. + * @xt_label: Whether connlables are configured for the network or not. */ struct ovs_net { struct list_head dps; @@ -160,6 +170,7 @@ struct ovs_net { #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) struct ovs_ct_limit_info *ct_limit_info; #endif + bool xt_label; }; /** diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 881ddd3696d54..95e0dd14dc1a3 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2317,14 +2317,10 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb) OVS_FLOW_ATTR_MASK, true, skb); } -#define MAX_ACTIONS_BUFSIZE (32 * 1024) - static struct sw_flow_actions *nla_alloc_flow_actions(int size) { struct sw_flow_actions *sfa; - WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE); - sfa = kmalloc(kmalloc_size_roundup(sizeof(*sfa) + size), GFP_KERNEL); if (!sfa) return ERR_PTR(-ENOMEM); @@ -2480,15 +2476,6 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2); - if (new_acts_size > MAX_ACTIONS_BUFSIZE) { - if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) { - OVS_NLERR(log, "Flow action size exceeds max %u", - MAX_ACTIONS_BUFSIZE); - return ERR_PTR(-EMSGSIZE); - } - new_acts_size = MAX_ACTIONS_BUFSIZE; - } - acts = nla_alloc_flow_actions(new_acts_size); if (IS_ERR(acts)) return ERR_CAST(acts); @@ -3545,7 +3532,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, int err; u32 mpls_label_count = 0; - *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE)); + *sfa = nla_alloc_flow_actions(nla_len(attr)); if (IS_ERR(*sfa)) return PTR_ERR(*sfa); diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 2412d7813d244..125d310871e93 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -149,7 +149,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) /* Restrict bridge port to current netns. */ if (vport->port_no == OVSP_LOCAL) - vport->dev->netns_local = true; + vport->dev->netns_immutable = true; rtnl_lock(); err = register_netdevice(vport->dev); diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 3e71ca8ad8a78..9f67b9dd49f98 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -97,6 +97,8 @@ struct vport { * @desired_ifindex: New vport's ifindex. * @dp: New vport's datapath. * @port_no: New vport's port number. + * @upcall_portids: %OVS_VPORT_ATTR_UPCALL_PID attribute from Netlink message, + * %NULL if none was supplied. */ struct vport_parms { const char *name; @@ -125,6 +127,8 @@ struct vport_parms { * have any configuration. * @send: Send a packet on the device. * zero for dropped packets or negative for error. + * @owner: Module that implements this vport type. + * @list: List entry in the global list of vport types. */ struct vport_ops { enum ovs_vport_type type; @@ -144,6 +148,7 @@ struct vport_ops { /** * struct vport_upcall_stats_percpu - per-cpu packet upcall statistics for * a given vport. + * @syncp: Synchronization point for 64bit counters. * @n_success: Number of packets that upcall to userspace succeed. * @n_fail: Number of packets that upcall to userspace failed. */ @@ -164,6 +169,8 @@ void ovs_vport_free(struct vport *); * * @vport: vport to access * + * Returns: A void pointer to a private data allocated in the @vport. + * * If a nonzero size was passed in priv_size of vport_alloc() a private data * area was allocated on creation. This allows that area to be accessed and * used for any purpose needed by the vport implementer. @@ -178,6 +185,8 @@ static inline void *vport_priv(const struct vport *vport) * * @priv: Start of private data area. * + * Returns: A reference to a vport structure that contains @priv. + * * It is sometimes useful to translate from a pointer to the private data * area to the vport, such as in the case where the private data pointer is * the result of a hash table lookup. @priv must point to the start of the diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c131e5ceea374..3e9ddf72cd031 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2102,8 +2102,8 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, skb->protocol = proto; skb->dev = dev; - skb->priority = READ_ONCE(sk->sk_priority); - skb->mark = READ_ONCE(sk->sk_mark); + skb->priority = sockc.priority; + skb->mark = sockc.mark; skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid); skb_setup_tx_timestamp(skb, &sockc); @@ -2634,8 +2634,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, skb->protocol = proto; skb->dev = dev; - skb->priority = READ_ONCE(po->sk.sk_priority); - skb->mark = READ_ONCE(po->sk.sk_mark); + skb->priority = sockc->priority; + skb->mark = sockc->mark; skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, po->sk.sk_clockid); skb_setup_tx_timestamp(skb, sockc); skb_zcopy_set_nouarg(skb, ph.raw); @@ -3039,7 +3039,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_unlock; sockcm_init(&sockc, sk); - sockc.mark = READ_ONCE(sk->sk_mark); if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); if (unlikely(err)) diff --git a/net/rds/stats.c b/net/rds/stats.c index 9e87da43c0045..cb2e3d2cdf738 100644 --- a/net/rds/stats.c +++ b/net/rds/stats.c @@ -89,8 +89,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter, for (i = 0; i < nr; i++) { BUG_ON(strlen(names[i]) >= sizeof(ctr.name)); - strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); - ctr.name[sizeof(ctr.name) - 1] = '\0'; + strscpy_pad(ctr.name, names[i]); ctr.value = values[i]; rds_info_copy(iter, &ctr, sizeof(ctr)); diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 0581c53e65170..3cc2f303bf786 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -504,12 +504,8 @@ bool rds_tcp_tune(struct socket *sock) release_sock(sk); return false; } - /* Update ns_tracker to current stack trace and refcounted tracker */ - __netns_tracker_free(net, &sk->ns_tracker, false); - - sk->sk_net_refcnt = 1; - netns_tracker_alloc(net, &sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sk); + put_net(net); } rtn = net_generic(net, rds_tcp_netid); if (rtn->sndbuf_size > 0) { diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 9fa019e0dcadd..41e657e977618 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c @@ -162,6 +162,9 @@ static int rfkill_gpio_probe(struct platform_device *pdev) if (!rfkill->rfkill_dev) return -ENOMEM; + if (device_property_present(&pdev->dev, "default-blocked")) + rfkill_init_sw_state(rfkill->rfkill_dev, true); + ret = rfkill_register(rfkill->rfkill_dev); if (ret < 0) goto err_destroy; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 5e740c4862034..a64a0cab1bf7f 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -360,7 +360,6 @@ struct rxrpc_peer { u8 pmtud_jumbo; /* Max jumbo packets for the MTU */ bool ackr_adv_pmtud; /* T if the peer advertises path-MTU */ unsigned int ackr_max_data; /* Maximum data advertised by peer */ - seqcount_t mtu_lock; /* Lockless MTU access management */ unsigned int if_mtu; /* Local interface MTU (- hdrsize) for this peer */ unsigned int max_data; /* Maximum packet data capacity for this peer */ unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 9047ba13bd31e..24aceb183c2c3 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -810,9 +810,7 @@ static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb if (max_mtu < peer->max_data) { trace_rxrpc_pmtud_reduce(peer, sp->hdr.serial, max_mtu, rxrpc_pmtud_reduce_ack); - write_seqcount_begin(&peer->mtu_lock); peer->max_data = max_mtu; - write_seqcount_end(&peer->mtu_lock); } max_data = umin(max_mtu, peer->max_data); diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index bc283da9ee402..7f4729234957e 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c @@ -130,9 +130,7 @@ static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu) peer->pmtud_bad = max_data + 1; trace_rxrpc_pmtud_reduce(peer, 0, max_data, rxrpc_pmtud_reduce_icmp); - write_seqcount_begin(&peer->mtu_lock); peer->max_data = max_data; - write_seqcount_end(&peer->mtu_lock); } } @@ -408,13 +406,8 @@ void rxrpc_input_probe_for_pmtud(struct rxrpc_connection *conn, rxrpc_serial_t a } max_data = umin(max_data, peer->ackr_max_data); - if (max_data != peer->max_data) { - preempt_disable(); - write_seqcount_begin(&peer->mtu_lock); + if (max_data != peer->max_data) peer->max_data = max_data; - write_seqcount_end(&peer->mtu_lock); - preempt_enable(); - } jumbo = max_data + sizeof(struct rxrpc_jumbo_header); jumbo /= RXRPC_JUMBO_SUBPKTLEN; diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c index 0fcc87f0409f9..56e09d161a97f 100644 --- a/net/rxrpc/peer_object.c +++ b/net/rxrpc/peer_object.c @@ -235,7 +235,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp, peer->service_conns = RB_ROOT; seqlock_init(&peer->service_conn_lock); spin_lock_init(&peer->lock); - seqcount_init(&peer->mtu_lock); peer->debug_id = atomic_inc_return(&rxrpc_debug_id); peer->recent_srtt_us = UINT_MAX; peer->cong_ssthresh = RXRPC_TX_MAX_WINDOW; @@ -325,10 +324,10 @@ void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) hash_key = rxrpc_peer_hash_key(local, &peer->srx); rxrpc_init_peer(local, peer, hash_key); - spin_lock_bh(&rxnet->peer_hash_lock); + spin_lock(&rxnet->peer_hash_lock); hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new); - spin_unlock_bh(&rxnet->peer_hash_lock); + spin_unlock(&rxnet->peer_hash_lock); } /* diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c index 7ef93407be830..e848a4777b8c7 100644 --- a/net/rxrpc/rxperf.c +++ b/net/rxrpc/rxperf.c @@ -478,6 +478,18 @@ static int rxperf_deliver_request(struct rxperf_call *call) call->unmarshal++; fallthrough; case 2: + ret = rxperf_extract_data(call, true); + if (ret < 0) + return ret; + + /* Deal with the terminal magic cookie. */ + call->iov_len = 4; + call->kvec[0].iov_len = call->iov_len; + call->kvec[0].iov_base = call->tmp; + iov_iter_kvec(&call->iter, READ, call->kvec, 1, call->iov_len); + call->unmarshal++; + fallthrough; + case 3: ret = rxperf_extract_data(call, false); if (ret < 0) return ret; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index af7c998459488..ae5dea7c48a82 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -571,8 +571,8 @@ static void tunnel_key_release(struct tc_action *a) static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, const struct ip_tunnel_info *info) { + const u8 *src = ip_tunnel_info_opts(info); int len = info->options_len; - u8 *src = (u8 *)(info + 1); struct nlattr *start; start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE); @@ -580,7 +580,7 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, return -EMSGSIZE; while (len > 0) { - struct geneve_opt *opt = (struct geneve_opt *)src; + const struct geneve_opt *opt = (const struct geneve_opt *)src; if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS, opt->opt_class) || @@ -603,7 +603,7 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb, const struct ip_tunnel_info *info) { - struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1); + const struct vxlan_metadata *md = ip_tunnel_info_opts(info); struct nlattr *start; start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN); @@ -622,7 +622,7 @@ static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb, static int tunnel_key_erspan_opts_dump(struct sk_buff *skb, const struct ip_tunnel_info *info) { - struct erspan_metadata *md = (struct erspan_metadata *)(info + 1); + const struct erspan_metadata *md = ip_tunnel_info_opts(info); struct nlattr *start; start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN); diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 8996c73c9779b..3f2e707a11d18 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -460,7 +460,7 @@ META_COLLECTOR(int_sk_fwd_alloc) *err = -1; return; } - dst->value = sk_forward_alloc_get(sk); + dst->value = READ_ONCE(sk->sk_forward_alloc); } META_COLLECTOR(int_sk_sndbuf) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index e3e91cf867eb9..85ddb811780cf 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -26,6 +26,7 @@ #include #include +#include #include #include #include @@ -1279,9 +1280,13 @@ static struct Qdisc *qdisc_create(struct net_device *dev, * We replay the request because the device may * go away in the mean time. */ + dev_hold(dev); + netdev_unlock_ops(dev); rtnl_unlock(); request_module(NET_SCH_ALIAS_PREFIX "%s", name); rtnl_lock(); + netdev_lock_ops(dev); + dev_put(dev); ops = qdisc_lookup_ops(kind); if (ops != NULL) { /* We will try again qdisc_lookup_ops, @@ -1505,27 +1510,18 @@ const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = { * Delete/get qdisc. */ -static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, - struct netlink_ext_ack *extack) +static int __tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack, + struct net_device *dev, + struct nlattr *tca[TCA_MAX + 1], + struct tcmsg *tcm) { struct net *net = sock_net(skb->sk); - struct tcmsg *tcm = nlmsg_data(n); - struct nlattr *tca[TCA_MAX + 1]; - struct net_device *dev; - u32 clid; struct Qdisc *q = NULL; struct Qdisc *p = NULL; + u32 clid; int err; - err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, - rtm_tca_policy, extack); - if (err < 0) - return err; - - dev = __dev_get_by_index(net, tcm->tcm_ifindex); - if (!dev) - return -ENODEV; - clid = tcm->tcm_parent; if (clid) { if (clid != TC_H_ROOT) { @@ -1582,6 +1578,31 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, return 0; } +static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack) +{ + struct net *net = sock_net(skb->sk); + struct tcmsg *tcm = nlmsg_data(n); + struct nlattr *tca[TCA_MAX + 1]; + struct net_device *dev; + int err; + + err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, + rtm_tca_policy, extack); + if (err < 0) + return err; + + dev = __dev_get_by_index(net, tcm->tcm_ifindex); + if (!dev) + return -ENODEV; + + netdev_lock_ops(dev); + err = __tc_get_qdisc(skb, n, extack, dev, tca, tcm); + netdev_unlock_ops(dev); + + return err; +} + static bool req_create_or_replace(struct nlmsghdr *n) { return (n->nlmsg_flags & NLM_F_CREATE && @@ -1601,35 +1622,19 @@ static bool req_change(struct nlmsghdr *n) !(n->nlmsg_flags & NLM_F_EXCL)); } -/* - * Create/change qdisc. - */ -static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, - struct netlink_ext_ack *extack) +static int __tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack, + struct net_device *dev, + struct nlattr *tca[TCA_MAX + 1], + struct tcmsg *tcm, + bool *replay) { - struct net *net = sock_net(skb->sk); - struct tcmsg *tcm; - struct nlattr *tca[TCA_MAX + 1]; - struct net_device *dev; + struct Qdisc *q = NULL; + struct Qdisc *p = NULL; u32 clid; - struct Qdisc *q, *p; int err; -replay: - /* Reinit, just in case something touches this. */ - err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, - rtm_tca_policy, extack); - if (err < 0) - return err; - - tcm = nlmsg_data(n); clid = tcm->tcm_parent; - q = p = NULL; - - dev = __dev_get_by_index(net, tcm->tcm_ifindex); - if (!dev) - return -ENODEV; - if (clid) { if (clid != TC_H_ROOT) { @@ -1755,7 +1760,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, } err = qdisc_change(q, tca, extack); if (err == 0) - qdisc_notify(net, skb, n, clid, NULL, q, extack); + qdisc_notify(sock_net(skb->sk), skb, n, clid, NULL, q, extack); return err; create_n_graft: @@ -1788,8 +1793,10 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, tca, &err, extack); } if (q == NULL) { - if (err == -EAGAIN) - goto replay; + if (err == -EAGAIN) { + *replay = true; + return 0; + } return err; } @@ -1804,6 +1811,41 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, return 0; } +/* + * Create/change qdisc. + */ +static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack) +{ + struct net *net = sock_net(skb->sk); + struct nlattr *tca[TCA_MAX + 1]; + struct net_device *dev; + struct tcmsg *tcm; + bool replay; + int err; + +replay: + /* Reinit, just in case something touches this. */ + err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, + rtm_tca_policy, extack); + if (err < 0) + return err; + + tcm = nlmsg_data(n); + dev = __dev_get_by_index(net, tcm->tcm_ifindex); + if (!dev) + return -ENODEV; + + replay = false; + netdev_lock_ops(dev); + err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm, &replay); + netdev_unlock_ops(dev); + if (replay) + goto replay; + + return err; +} + static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, struct netlink_callback *cb, int *q_idx_p, int s_q_idx, bool recur, @@ -1888,17 +1930,23 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) s_q_idx = 0; q_idx = 0; + netdev_lock_ops(dev); if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc), skb, cb, &q_idx, s_q_idx, - true, tca[TCA_DUMP_INVISIBLE]) < 0) + true, tca[TCA_DUMP_INVISIBLE]) < 0) { + netdev_unlock_ops(dev); goto done; + } dev_queue = dev_ingress_queue(dev); if (dev_queue && tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping), skb, cb, &q_idx, s_q_idx, false, - tca[TCA_DUMP_INVISIBLE]) < 0) + tca[TCA_DUMP_INVISIBLE]) < 0) { + netdev_unlock_ops(dev); goto done; + } + netdev_unlock_ops(dev); cont: idx++; @@ -2135,15 +2183,15 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, #endif -static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, - struct netlink_ext_ack *extack) +static int __tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack, + struct net_device *dev, + struct nlattr *tca[TCA_MAX + 1], + struct tcmsg *tcm) { struct net *net = sock_net(skb->sk); - struct tcmsg *tcm = nlmsg_data(n); - struct nlattr *tca[TCA_MAX + 1]; - struct net_device *dev; - struct Qdisc *q = NULL; const struct Qdisc_class_ops *cops; + struct Qdisc *q = NULL; unsigned long cl = 0; unsigned long new_cl; u32 portid; @@ -2151,15 +2199,6 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, u32 qid; int err; - err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, - rtm_tca_policy, extack); - if (err < 0) - return err; - - dev = __dev_get_by_index(net, tcm->tcm_ifindex); - if (!dev) - return -ENODEV; - /* parent == TC_H_UNSPEC - unspecified parent. parent == TC_H_ROOT - class is root, which has no parent. @@ -2254,6 +2293,12 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, return -EOPNOTSUPP; } + /* Prevent creation of traffic classes with classid TC_H_ROOT */ + if (clid == TC_H_ROOT) { + NL_SET_ERR_MSG(extack, "Cannot create traffic class with classid TC_H_ROOT"); + return -EINVAL; + } + new_cl = cl; err = -EOPNOTSUPP; if (cops->change) @@ -2268,6 +2313,31 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, return err; } +static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, + struct netlink_ext_ack *extack) +{ + struct net *net = sock_net(skb->sk); + struct tcmsg *tcm = nlmsg_data(n); + struct nlattr *tca[TCA_MAX + 1]; + struct net_device *dev; + int err; + + err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX, + rtm_tca_policy, extack); + if (err < 0) + return err; + + dev = __dev_get_by_index(net, tcm->tcm_ifindex); + if (!dev) + return -ENODEV; + + netdev_lock_ops(dev); + err = __tc_ctl_tclass(skb, n, extack, dev, tca, tcm); + netdev_unlock_ops(dev); + + return err; +} + struct qdisc_dump_args { struct qdisc_walker w; struct sk_buff *skb; @@ -2344,20 +2414,12 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, return 0; } -static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) +static int __tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb, + struct tcmsg *tcm, struct net_device *dev) { - struct tcmsg *tcm = nlmsg_data(cb->nlh); - struct net *net = sock_net(skb->sk); struct netdev_queue *dev_queue; - struct net_device *dev; int t, s_t; - if (nlmsg_len(cb->nlh) < sizeof(*tcm)) - return 0; - dev = dev_get_by_index(net, tcm->tcm_ifindex); - if (!dev) - return 0; - s_t = cb->args[0]; t = 0; @@ -2374,10 +2436,32 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) done: cb->args[0] = t; - dev_put(dev); return skb->len; } +static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct tcmsg *tcm = nlmsg_data(cb->nlh); + struct net *net = sock_net(skb->sk); + struct net_device *dev; + int err; + + if (nlmsg_len(cb->nlh) < sizeof(*tcm)) + return 0; + + dev = dev_get_by_index(net, tcm->tcm_ifindex); + if (!dev) + return 0; + + netdev_lock_ops(dev); + err = __tc_dump_tclass(skb, cb, tcm, dev); + netdev_unlock_ops(dev); + + dev_put(dev); + + return err; +} + #ifdef CONFIG_PROC_FS static int psched_show(struct seq_file *seq, void *v) { diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index ab6234b4fcd54..532fde548b88f 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -913,7 +913,8 @@ static void gred_destroy(struct Qdisc *sch) for (i = 0; i < table->DPs; i++) gred_destroy_vq(table->tab[i]); - gred_offload(sch, TC_GRED_DESTROY); + if (table->opt) + gred_offload(sch, TC_GRED_DESTROY); kfree(table->opt); } diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 6a07cdbdb9e12..2cfbc977fe6d0 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -447,7 +447,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (q->wsum + delta_w > QFQ_MAX_WSUM) { NL_SET_ERR_MSG_FMT_MOD(extack, - "total weight out of range (%d + %u)\n", + "total weight out of range (%d + %u)", delta_w, q->wsum); return -EINVAL; } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 29727ed1008ed..5407a3922101d 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -185,12 +185,9 @@ static void sctp_v4_copy_ip_options(struct sock *sk, struct sock *newsk) rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { - newopt = sock_kmalloc(newsk, sizeof(*inet_opt) + + newopt = sock_kmemdup(newsk, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen, GFP_ATOMIC); - if (newopt) - memcpy(newopt, inet_opt, sizeof(*inet_opt) + - inet_opt->opt.optlen); - else + if (!newopt) pr_err("%s: Failed to copy ip options\n", __func__); } RCU_INIT_POINTER(newinet->inet_opt, newopt); diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index ca6984541edbd..3e6cb35baf25a 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -3337,10 +3337,7 @@ int smc_create_clcsk(struct net *net, struct sock *sk, int family) * which need net ref. */ sk = smc->clcsock->sk; - __netns_tracker_free(net, &sk->ns_tracker, false); - sk->sk_net_refcnt = 1; - get_net_track(net, &sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sk); return 0; } diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 716808f374a8d..b391c2ef463f2 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -1079,14 +1079,16 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, struct smc_init_info *ini) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; + struct net_device *base_ndev; struct net *net; - ndev = pnet_find_base_ndev(ndev); + base_ndev = pnet_find_base_ndev(ndev); net = dev_net(ndev); - if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, + if (smc_pnetid_by_dev_port(base_ndev->dev.parent, base_ndev->dev_port, ndev_pnetid) && + smc_pnet_find_ndev_pnetid_by_table(base_ndev, ndev_pnetid) && smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) { - smc_pnet_find_rdma_dev(ndev, ini); + smc_pnet_find_rdma_dev(base_ndev, ini); return; /* pnetid could not be determined */ } _smc_pnet_find_roce_by_pnetid(ndev_pnetid, ini, NULL, net); diff --git a/net/socket.c b/net/socket.c index 28bae5a942341..9a0e720f08598 100644 --- a/net/socket.c +++ b/net/socket.c @@ -680,17 +680,8 @@ void __sock_tx_timestamp(__u32 tsflags, __u8 *tx_flags) { u8 flags = *tx_flags; - if (tsflags & SOF_TIMESTAMPING_TX_HARDWARE) { - flags |= SKBTX_HW_TSTAMP; - - /* PTP hardware clocks can provide a free running cycle counter - * as a time base for virtual clocks. Tell driver to use the - * free running cycle counter for timestamp if socket is bound - * to virtual clock. - */ - if (tsflags & SOF_TIMESTAMPING_BIND_PHC) - flags |= SKBTX_HW_TSTAMP_USE_CYCLES; - } + if (tsflags & SOF_TIMESTAMPING_TX_HARDWARE) + flags |= SKBTX_HW_TSTAMP_NOBPF; if (tsflags & SOF_TIMESTAMPING_TX_SOFTWARE) flags |= SKBTX_SW_TSTAMP; @@ -698,6 +689,9 @@ void __sock_tx_timestamp(__u32 tsflags, __u8 *tx_flags) if (tsflags & SOF_TIMESTAMPING_TX_SCHED) flags |= SKBTX_SCHED_TSTAMP; + if (tsflags & SOF_TIMESTAMPING_TX_COMPLETION) + flags |= SKBTX_COMPLETION_TSTAMP; + *tx_flags = flags; } EXPORT_SYMBOL(__sock_tx_timestamp); @@ -1145,12 +1139,10 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from) */ static DEFINE_MUTEX(br_ioctl_mutex); -static int (*br_ioctl_hook)(struct net *net, struct net_bridge *br, - unsigned int cmd, struct ifreq *ifr, +static int (*br_ioctl_hook)(struct net *net, unsigned int cmd, void __user *uarg); -void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br, - unsigned int cmd, struct ifreq *ifr, +void brioctl_set(int (*hook)(struct net *net, unsigned int cmd, void __user *uarg)) { mutex_lock(&br_ioctl_mutex); @@ -1159,8 +1151,7 @@ void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br, } EXPORT_SYMBOL(brioctl_set); -int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd, - struct ifreq *ifr, void __user *uarg) +int br_ioctl_call(struct net *net, unsigned int cmd, void __user *uarg) { int err = -ENOPKG; @@ -1169,7 +1160,7 @@ int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd, mutex_lock(&br_ioctl_mutex); if (br_ioctl_hook) - err = br_ioctl_hook(net, br, cmd, ifr, uarg); + err = br_ioctl_hook(net, cmd, uarg); mutex_unlock(&br_ioctl_mutex); return err; @@ -1269,7 +1260,9 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) case SIOCSIFBR: case SIOCBRADDBR: case SIOCBRDELBR: - err = br_ioctl_call(net, NULL, cmd, NULL, argp); + case SIOCBRADDIF: + case SIOCBRDELIF: + err = br_ioctl_call(net, cmd, argp); break; case SIOCGIFVLAN: case SIOCSIFVLAN: @@ -3429,6 +3422,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: + case SIOCBRADDIF: + case SIOCBRDELIF: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCGSKNS: @@ -3468,8 +3463,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: - case SIOCBRADDIF: - case SIOCBRDELIF: case SIOCGIFNAME: case SIOCSIFNAME: case SIOCGMIIPHY: diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index cb279eb9ac4ba..7ce5e28a6c031 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1674,12 +1674,14 @@ static void remove_cache_proc_entries(struct cache_detail *cd) } } -#ifdef CONFIG_PROC_FS static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) { struct proc_dir_entry *p; struct sunrpc_net *sn; + if (!IS_ENABLED(CONFIG_PROC_FS)) + return 0; + sn = net_generic(net, sunrpc_net_id); cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc); if (cd->procfs == NULL) @@ -1707,12 +1709,6 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) remove_cache_proc_entries(cd); return -ENOMEM; } -#else /* CONFIG_PROC_FS */ -static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) -{ - return 0; -} -#endif void __init cache_initialize(void) { diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index cef623ea15060..9b45fbdc90cab 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -864,8 +864,6 @@ void rpc_signal_task(struct rpc_task *task) if (!rpc_task_set_rpc_status(task, -ERESTARTSYS)) return; trace_rpc_task_signalled(task, task->tk_action); - set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); - smp_mb__after_atomic(); queue = READ_ONCE(task->tk_waitqueue); if (queue) rpc_wake_up_queued_task(queue, task); diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index cb3bd12f5818b..72e5a01df3d35 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1541,10 +1541,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, newlen = error; if (protocol == IPPROTO_TCP) { - __netns_tracker_free(net, &sock->sk->ns_tracker, false); - sock->sk->sk_net_refcnt = 1; - get_net_track(net, &sock->sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); + sk_net_refcnt_upgrade(sock->sk); if ((error = kernel_listen(sock, 64)) < 0) goto bummer; } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c60936d8cef71..83cc095846d35 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1941,12 +1941,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, goto out; } - if (protocol == IPPROTO_TCP) { - __netns_tracker_free(xprt->xprt_net, &sock->sk->ns_tracker, false); - sock->sk->sk_net_refcnt = 1; - get_net_track(xprt->xprt_net, &sock->sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(xprt->xprt_net, 1); - } + if (protocol == IPPROTO_TCP) + sk_net_refcnt_upgrade(sock->sk); filp = sock_alloc_file(sock, O_NONBLOCK, NULL); if (IS_ERR(filp)) @@ -2581,7 +2577,15 @@ static void xs_tls_handshake_done(void *data, int status, key_serial_t peerid) struct sock_xprt *lower_transport = container_of(lower_xprt, struct sock_xprt, xprt); - lower_transport->xprt_err = status ? -EACCES : 0; + switch (status) { + case 0: + case -EACCES: + case -ETIMEDOUT: + lower_transport->xprt_err = status; + break; + default: + lower_transport->xprt_err = -EACCES; + } complete(&lower_transport->handshake_done); xprt_put(lower_xprt); } diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 6488ead9e4645..4d5fbacef496f 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -472,7 +472,7 @@ bool switchdev_port_obj_act_is_deferred(struct net_device *dev, EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred); static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); -static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); +static RAW_NOTIFIER_HEAD(switchdev_blocking_notif_chain); /** * register_switchdev_notifier - Register notifier @@ -518,17 +518,27 @@ EXPORT_SYMBOL_GPL(call_switchdev_notifiers); int register_switchdev_blocking_notifier(struct notifier_block *nb) { - struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; + struct raw_notifier_head *chain = &switchdev_blocking_notif_chain; + int err; + + rtnl_lock(); + err = raw_notifier_chain_register(chain, nb); + rtnl_unlock(); - return blocking_notifier_chain_register(chain, nb); + return err; } EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); int unregister_switchdev_blocking_notifier(struct notifier_block *nb) { - struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; + struct raw_notifier_head *chain = &switchdev_blocking_notif_chain; + int err; - return blocking_notifier_chain_unregister(chain, nb); + rtnl_lock(); + err = raw_notifier_chain_unregister(chain, nb); + rtnl_unlock(); + + return err; } EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); @@ -536,10 +546,11 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, struct switchdev_notifier_info *info, struct netlink_ext_ack *extack) { + ASSERT_RTNL(); info->dev = dev; info->extack = extack; - return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, - val, info); + return raw_notifier_call_chain(&switchdev_blocking_notif_chain, + val, info); } EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); diff --git a/net/tipc/link.c b/net/tipc/link.c index 5c2088a469cea..50c2e0846ea4d 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1951,7 +1951,6 @@ void tipc_link_create_dummy_tnl_msg(struct tipc_link *l, void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, int mtyp, struct sk_buff_head *xmitq) { - struct sk_buff_head *fdefq = &tnl->failover_deferdq; struct sk_buff *skb, *tnlskb; struct tipc_msg *hdr, tnlhdr; struct sk_buff_head *queue = &l->transmq; @@ -2078,6 +2077,8 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, tipc_link_xmit(tnl, &tnlq, xmitq); if (mtyp == FAILOVER_MSG) { + struct sk_buff_head *fdefq = &tnl->failover_deferdq; + tnl->drop_point = l->rcv_nxt; tnl->failover_reasm_skb = l->reasm_buf; l->reasm_buf = NULL; diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index e50b6e71df13b..f672a62a9a52f 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -157,7 +157,7 @@ static void delete_all_records(struct tls_offload_context_tx *offload_ctx) offload_ctx->retransmit_hint = NULL; } -static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq) +static void tls_tcp_clean_acked(struct sock *sk, u32 acked_seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_record_info *info, *temp; @@ -204,7 +204,7 @@ void tls_device_sk_destruct(struct sock *sk) destroy_record(ctx->open_record); delete_all_records(ctx); crypto_free_aead(ctx->aead_send); - clean_acked_data_disable(inet_csk(sk)); + clean_acked_data_disable(tcp_sk(sk)); } tls_device_queue_ctx_destruction(tls_ctx); @@ -1126,7 +1126,7 @@ int tls_set_device_offload(struct sock *sk) start_marker_record->num_frags = 0; list_add_tail(&start_marker_record->list, &offload_ctx->records_list); - clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked); + clean_acked_data_enable(tcp_sk(sk), &tls_tcp_clean_acked); ctx->push_pending_record = tls_device_push_pending_record; /* TLS offload is greatly simplified if we don't send @@ -1172,7 +1172,7 @@ int tls_set_device_offload(struct sock *sk) release_lock: up_read(&device_offload_lock); - clean_acked_data_disable(inet_csk(sk)); + clean_acked_data_disable(tcp_sk(sk)); crypto_free_aead(offload_ctx->aead_send); free_offload_ctx: kfree(offload_ctx); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 99ca4465f7021..cb86b0bf9a53e 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -1057,7 +1057,7 @@ static u16 tls_user_config(struct tls_context *ctx, bool tx) return 0; } -static int tls_get_info(struct sock *sk, struct sk_buff *skb) +static int tls_get_info(struct sock *sk, struct sk_buff *skb, bool net_admin) { u16 version, cipher_type; struct tls_context *ctx; @@ -1115,7 +1115,7 @@ static int tls_get_info(struct sock *sk, struct sk_buff *skb) return err; } -static size_t tls_get_info_size(const struct sock *sk) +static size_t tls_get_info_size(const struct sock *sk, bool net_admin) { size_t size = 0; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 34945de1fb1fa..f78a2492826f9 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -77,46 +77,37 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include -#include -#include -#include -#include -#include -#include +#include +#include #include -#include -#include -#include +#include #include +#include #include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include +#include #include -#include +#include +#include +#include +#include #include +#include +#include +#include +#include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "af_unix.h" static atomic_long_t unix_nr_socks; static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2]; @@ -1508,7 +1499,6 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, } static long unix_wait_for_peer(struct sock *other, long timeo) - __releases(&unix_sk(other)->lock) { struct unix_sock *u = unix_sk(other); int sched; @@ -2102,6 +2092,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, goto out_sock_put; } + sock_put(other); goto lookup; } diff --git a/net/unix/af_unix.h b/net/unix/af_unix.h new file mode 100644 index 0000000000000..ed4aedc428139 --- /dev/null +++ b/net/unix/af_unix.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __AF_UNIX_H +#define __AF_UNIX_H + +#include + +#define UNIX_HASH_MOD (256 - 1) +#define UNIX_HASH_SIZE (256 * 2) +#define UNIX_HASH_BITS 8 + +#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) +#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) + +struct sock *unix_peer_get(struct sock *sk); + +struct unix_skb_parms { + struct pid *pid; /* skb credentials */ + kuid_t uid; + kgid_t gid; + struct scm_fp_list *fp; /* Passed files */ +#ifdef CONFIG_SECURITY_NETWORK + u32 secid; /* Security ID */ +#endif + u32 consumed; +} __randomize_layout; + +#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) + +/* GC for SCM_RIGHTS */ +extern unsigned int unix_tot_inflight; +void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver); +void unix_del_edges(struct scm_fp_list *fpl); +void unix_update_edges(struct unix_sock *receiver); +int unix_prepare_fpl(struct scm_fp_list *fpl); +void unix_destroy_fpl(struct scm_fp_list *fpl); +void unix_gc(void); +void wait_for_unix_gc(struct scm_fp_list *fpl); + +/* SOCK_DIAG */ +long unix_inq_len(struct sock *sk); +long unix_outq_len(struct sock *sk); + +/* sysctl */ +#ifdef CONFIG_SYSCTL +int unix_sysctl_register(struct net *net); +void unix_sysctl_unregister(struct net *net); +#else +static inline int unix_sysctl_register(struct net *net) +{ + return 0; +} + +static inline void unix_sysctl_unregister(struct net *net) +{ +} +#endif + +/* BPF SOCKMAP */ +int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, int flags); +int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg, size_t size, int flags); + +#ifdef CONFIG_BPF_SYSCALL +extern struct proto unix_dgram_proto; +extern struct proto unix_stream_proto; + +int unix_dgram_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); +int unix_stream_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); +void __init unix_bpf_build_proto(void); +#else +static inline void __init unix_bpf_build_proto(void) +{ +} +#endif + +#endif diff --git a/net/unix/diag.c b/net/unix/diag.c index 9138af8b465e0..79b182d0e62ae 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -1,15 +1,17 @@ // SPDX-License-Identifier: GPL-2.0-only -#include -#include -#include -#include -#include + +#include #include -#include -#include +#include +#include +#include +#include #include +#include #include -#include +#include + +#include "af_unix.h" static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) { diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 9848b7b787017..01e2b9452c75b 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -63,24 +63,33 @@ * wrt receive and holding up unrelated socket operations. */ -#include -#include -#include -#include -#include #include +#include #include -#include -#include -#include -#include -#include - -#include +#include +#include #include #include #include +#include "af_unix.h" + +struct unix_vertex { + struct list_head edges; + struct list_head entry; + struct list_head scc_entry; + unsigned long out_degree; + unsigned long index; + unsigned long scc_index; +}; + +struct unix_edge { + struct unix_sock *predecessor; + struct unix_sock *successor; + struct list_head vertex_entry; + struct list_head stack_entry; +}; + struct unix_sock *unix_get_socket(struct file *filp) { struct inode *inode = file_inode(filp); diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c index 357b3e5f38472..e02ed6e3955c0 100644 --- a/net/unix/sysctl_net_unix.c +++ b/net/unix/sysctl_net_unix.c @@ -5,11 +5,13 @@ * Authors: Mike Shaver. */ -#include #include +#include #include - #include +#include + +#include "af_unix.h" static struct ctl_table unix_table[] = { { diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c index bca2d86ba97d8..e0d30d6d22acb 100644 --- a/net/unix/unix_bpf.c +++ b/net/unix/unix_bpf.c @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021 Cong Wang */ -#include #include -#include +#include #include +#include "af_unix.h" + #define unix_sk_has_data(__sk, __psock) \ ({ !skb_queue_empty(&__sk->sk_receive_queue) || \ !skb_queue_empty(&__psock->ingress_skb) || \ diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 9f918b77b40e2..193734b7f9dc5 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -6,7 +6,7 @@ * * Copyright 2009 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH - * Copyright 2018-2024 Intel Corporation + * Copyright 2018-2025 Intel Corporation */ #include @@ -55,11 +55,6 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, } EXPORT_SYMBOL(cfg80211_chandef_create); -static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c) -{ - return nl80211_chan_width_to_mhz(c->width); -} - static u32 cfg80211_get_start_freq(const struct cfg80211_chan_def *chandef, u32 cf) { @@ -1497,6 +1492,12 @@ bool cfg80211_reg_check_beaconing(struct wiphy *wiphy, if (cfg->reg_power == IEEE80211_REG_VLP_AP) permitting_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP; + if ((cfg->iftype == NL80211_IFTYPE_P2P_GO || + cfg->iftype == NL80211_IFTYPE_AP) && + (chandef->width == NL80211_CHAN_WIDTH_20_NOHT || + chandef->width == NL80211_CHAN_WIDTH_20)) + permitting_flags |= IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY; + return _cfg80211_reg_can_beacon(wiphy, chandef, cfg->iftype, check_no_ir ? IEEE80211_CHAN_NO_IR : 0, permitting_flags); diff --git a/net/wireless/core.c b/net/wireless/core.c index 12b780de8779c..9e6b319031210 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -5,7 +5,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -162,11 +162,11 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!wdev->netdev) continue; - wdev->netdev->netns_local = false; + wdev->netdev->netns_immutable = false; err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); if (err) break; - wdev->netdev->netns_local = true; + wdev->netdev->netns_immutable = true; } if (err) { @@ -178,11 +178,11 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, list) { if (!wdev->netdev) continue; - wdev->netdev->netns_local = false; + wdev->netdev->netns_immutable = false; err = dev_change_net_namespace(wdev->netdev, net, "wlan%d"); WARN_ON(err); - wdev->netdev->netns_local = true; + wdev->netdev->netns_immutable = true; } return err; @@ -546,6 +546,9 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, INIT_WORK(&rdev->mgmt_registrations_update_wk, cfg80211_mgmt_registrations_update_wk); spin_lock_init(&rdev->mgmt_registrations_lock); + INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work); + INIT_LIST_HEAD(&rdev->wiphy_work_list); + spin_lock_init(&rdev->wiphy_work_lock); #ifdef CONFIG_CFG80211_DEFAULT_PS rdev->wiphy.flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -563,9 +566,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, return NULL; } - INIT_WORK(&rdev->wiphy_work, cfg80211_wiphy_work); - INIT_LIST_HEAD(&rdev->wiphy_work_list); - spin_lock_init(&rdev->wiphy_work_lock); INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work); INIT_WORK(&rdev->conn_work, cfg80211_conn_work); INIT_WORK(&rdev->event_work, cfg80211_event_work); @@ -793,6 +793,7 @@ int wiphy_register(struct wiphy *wiphy) BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_80P80) | BIT(NL80211_CHAN_WIDTH_160) | + BIT(NL80211_CHAN_WIDTH_320) | BIT(NL80211_CHAN_WIDTH_5) | BIT(NL80211_CHAN_WIDTH_10)))) return -EINVAL; @@ -1191,6 +1192,13 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) { struct cfg80211_internal_bss *scan, *tmp; struct cfg80211_beacon_registration *reg, *treg; + unsigned long flags; + + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); + WARN_ON(!list_empty(&rdev->wiphy_work_list)); + spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); + cancel_work_sync(&rdev->wiphy_work); + rfkill_destroy(rdev->wiphy.rfkill); list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) { list_del(®->list); @@ -1513,7 +1521,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, SET_NETDEV_DEVTYPE(dev, &wiphy_type); wdev->netdev = dev; /* can only change netns with wiphy */ - dev->netns_local = true; + dev->netns_immutable = true; cfg80211_init_wdev(wdev); break; diff --git a/net/wireless/core.h b/net/wireless/core.h index 826299f3d7813..c56a35040caa1 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -3,7 +3,7 @@ * Wireless configuration interface internals. * * Copyright 2006-2010 Johannes Berg - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #ifndef __NET_WIRELESS_CORE_H #define __NET_WIRELESS_CORE_H @@ -180,7 +180,6 @@ struct cfg80211_internal_bss { struct list_head list; struct list_head hidden_list; struct rb_node rbn; - u64 ts_boottime; unsigned long ts; unsigned long refcount; atomic_t hold; @@ -569,8 +568,8 @@ void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask); int cfg80211_assoc_ml_reconf(struct cfg80211_registered_device *rdev, struct net_device *dev, - struct cfg80211_assoc_link *links, - u16 rem_links); + struct cfg80211_ml_reconf_req *req); + /** * struct cfg80211_colocated_ap - colocated AP information * diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index e10f2b3b4b7f6..05d44a4435189 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -4,7 +4,7 @@ * * Copyright (c) 2009, Jouni Malinen * Copyright (c) 2015 Intel Deutschland GmbH - * Copyright (C) 2019-2020, 2022-2024 Intel Corporation + * Copyright (C) 2019-2020, 2022-2025 Intel Corporation */ #include @@ -1297,25 +1297,24 @@ void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev) int cfg80211_assoc_ml_reconf(struct cfg80211_registered_device *rdev, struct net_device *dev, - struct cfg80211_assoc_link *links, - u16 rem_links) + struct cfg80211_ml_reconf_req *req) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; lockdep_assert_wiphy(wdev->wiphy); - err = rdev_assoc_ml_reconf(rdev, dev, links, rem_links); + err = rdev_assoc_ml_reconf(rdev, dev, req); if (!err) { int link_id; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { - if (!links[link_id].bss) + if (!req->add_links[link_id].bss) continue; - cfg80211_ref_bss(&rdev->wiphy, links[link_id].bss); - cfg80211_hold_bss(bss_from_pub(links[link_id].bss)); + cfg80211_ref_bss(&rdev->wiphy, req->add_links[link_id].bss); + cfg80211_hold_bss(bss_from_pub(req->add_links[link_id].bss)); } } @@ -1361,6 +1360,10 @@ void cfg80211_mlo_reconf_add_done(struct net_device *dev, if (data->added_links & BIT(link_id)) { wdev->links[link_id].client.current_bss = bss_from_pub(bss); + + memcpy(wdev->links[link_id].addr, + data->links[link_id].addr, + ETH_ALEN); } else { cfg80211_unhold_bss(bss_from_pub(bss)); cfg80211_put_bss(wiphy, bss); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d7d3da0f6833d..f039a7d0d6f73 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5,7 +5,7 @@ * Copyright 2006-2010 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include @@ -850,6 +850,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { NL80211_MAX_SUPP_SELECTORS), [NL80211_ATTR_MLO_RECONF_REM_LINKS] = { .type = NLA_U16 }, [NL80211_ATTR_EPCS] = { .type = NLA_FLAG }, + [NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS] = { .type = NLA_U16 }, }; /* policy for the key attributes */ @@ -1234,6 +1235,10 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, if ((chan->flags & IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP)) goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY) && + nla_put_flag(msg, + NL80211_FREQUENCY_ATTR_ALLOW_20MHZ_ACTIVITY)) + goto nla_put_failure; } if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, @@ -2768,6 +2773,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, CMD(update_ft_ies, UPDATE_FT_IES); if (rdev->wiphy.sar_capa) CMD(set_sar_specs, SET_SAR_SPECS); + CMD(assoc_ml_reconf, ASSOC_MLO_RECONF); } #undef CMD @@ -4220,6 +4226,11 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) if (flags[flag]) *mntrflags |= (1<flags & MONITOR_FLAG_COOK_FRAMES) + return -EOPNOTSUPP; + if (params->flags & MONITOR_FLAG_ACTIVE && !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) return -EOPNOTSUPP; @@ -6746,9 +6761,6 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO_U64(RX_BYTES64, rx_bytes); PUT_SINFO_U64(TX_BYTES64, tx_bytes); - PUT_SINFO(LLID, llid, u16); - PUT_SINFO(PLID, plid, u16); - PUT_SINFO(PLINK_STATE, plink_state, u8); PUT_SINFO_U64(RX_DURATION, rx_duration); PUT_SINFO_U64(TX_DURATION, tx_duration); @@ -6792,13 +6804,18 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO(TX_RETRIES, tx_retries, u32); PUT_SINFO(TX_FAILED, tx_failed, u32); PUT_SINFO(EXPECTED_THROUGHPUT, expected_throughput, u32); - PUT_SINFO(AIRTIME_LINK_METRIC, airtime_link_metric, u32); PUT_SINFO(BEACON_LOSS, beacon_loss_count, u32); + + PUT_SINFO(LLID, llid, u16); + PUT_SINFO(PLID, plid, u16); + PUT_SINFO(PLINK_STATE, plink_state, u8); + PUT_SINFO(AIRTIME_LINK_METRIC, airtime_link_metric, u32); PUT_SINFO(LOCAL_PM, local_pm, u32); PUT_SINFO(PEER_PM, peer_pm, u32); PUT_SINFO(NONPEER_PM, nonpeer_pm, u32); PUT_SINFO(CONNECTED_TO_GATE, connected_to_gate, u8); PUT_SINFO(CONNECTED_TO_AS, connected_to_as, u8); + PUT_SINFO_U64(T_OFFSET, t_offset); if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) { bss_param = nla_nest_start_noflag(msg, @@ -6826,7 +6843,6 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, &sinfo->sta_flags)) goto nla_put_failure; - PUT_SINFO_U64(T_OFFSET, t_offset); PUT_SINFO_U64(RX_DROP_MISC, rx_dropped_misc); PUT_SINFO_U64(BEACON_RX, rx_beacon); PUT_SINFO(BEACON_SIGNAL_AVG, rx_beacon_signal_avg, u8); @@ -10167,7 +10183,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: - wdev->links[0].ap.chandef = chandef; + wdev->links[link_id].ap.chandef = chandef; break; case NL80211_IFTYPE_ADHOC: wdev->u.ibss.chandef = chandef; @@ -10515,9 +10531,9 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, intbss->parent_bssid))) goto nla_put_failure; - if (intbss->ts_boottime && + if (res->ts_boottime && nla_put_u64_64bit(msg, NL80211_BSS_LAST_SEEN_BOOTTIME, - intbss->ts_boottime, NL80211_BSS_PAD)) + res->ts_boottime, NL80211_BSS_PAD)) goto nla_put_failure; if (!nl80211_put_signal(msg, intbss->pub.chains, @@ -11118,6 +11134,7 @@ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device static int nl80211_process_links(struct cfg80211_registered_device *rdev, struct cfg80211_assoc_link *links, + int assoc_link_id, const u8 *ssid, int ssid_len, struct genl_info *info) { @@ -11148,7 +11165,7 @@ static int nl80211_process_links(struct cfg80211_registered_device *rdev, } links[link_id].bss = nl80211_assoc_bss(rdev, ssid, ssid_len, attrs, - link_id, link_id); + assoc_link_id, link_id); if (IS_ERR(links[link_id].bss)) { err = PTR_ERR(links[link_id].bss); links[link_id].bss = NULL; @@ -11345,8 +11362,8 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); ap_addr = req.ap_mld_addr; - err = nl80211_process_links(rdev, req.links, ssid, ssid_len, - info); + err = nl80211_process_links(rdev, req.links, req.link_id, + ssid, ssid_len, info); if (err) goto free; @@ -11368,6 +11385,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) err = -EINVAL; goto free; } + + if (info->attrs[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS]) + req.ext_mld_capa_ops = + nla_get_u16(info->attrs[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS]); } else { if (req.link_id >= 0) return -EINVAL; @@ -11377,6 +11398,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(req.bss)) return PTR_ERR(req.bss); ap_addr = req.bss->bssid; + + if (info->attrs[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS]) + return -EINVAL; } err = nl80211_crypto_settings(rdev, info, &req.crypto, 1); @@ -16483,9 +16507,9 @@ static int nl80211_assoc_ml_reconf(struct sk_buff *skb, struct genl_info *info) struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; - struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS] = {}; + struct cfg80211_ml_reconf_req req = {}; unsigned int link_id; - u16 add_links, rem_links; + u16 add_links; int err; if (!wdev->valid_links) @@ -16501,39 +16525,44 @@ static int nl80211_assoc_ml_reconf(struct sk_buff *skb, struct genl_info *info) add_links = 0; if (info->attrs[NL80211_ATTR_MLO_LINKS]) { - err = nl80211_process_links(rdev, links, NULL, 0, info); + err = nl80211_process_links(rdev, req.add_links, + /* mark as MLO, but not assoc */ + IEEE80211_MLD_MAX_NUM_LINKS, + NULL, 0, info); if (err) return err; for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { - if (!links[link_id].bss) + if (!req.add_links[link_id].bss) continue; add_links |= BIT(link_id); } } if (info->attrs[NL80211_ATTR_MLO_RECONF_REM_LINKS]) - rem_links = + req.rem_links = nla_get_u16(info->attrs[NL80211_ATTR_MLO_RECONF_REM_LINKS]); - else - rem_links = 0; /* Validate that existing links are not added, removed links are valid * and don't allow adding and removing the same links */ - if ((add_links & rem_links) || !(add_links | rem_links) || + if ((add_links & req.rem_links) || !(add_links | req.rem_links) || (wdev->valid_links & add_links) || - ((wdev->valid_links & rem_links) != rem_links)) { + ((wdev->valid_links & req.rem_links) != req.rem_links)) { err = -EINVAL; goto out; } - err = cfg80211_assoc_ml_reconf(rdev, dev, links, rem_links); + if (info->attrs[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS]) + req.ext_mld_capa_ops = + nla_get_u16(info->attrs[NL80211_ATTR_ASSOC_MLD_EXT_CAPA_OPS]); + + err = cfg80211_assoc_ml_reconf(rdev, dev, &req); out: - for (link_id = 0; link_id < ARRAY_SIZE(links); link_id++) - cfg80211_put_bss(&rdev->wiphy, links[link_id].bss); + for (link_id = 0; link_id < ARRAY_SIZE(req.add_links); link_id++) + cfg80211_put_bss(&rdev->wiphy, req.add_links[link_id].bss); return err; } diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 759da16233428..9f4783c2354c9 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -2,7 +2,7 @@ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018, 2021-2024 Intel Corporation + * Copyright (C) 2018, 2021-2025 Intel Corporation */ #ifndef __CFG80211_RDEV_OPS #define __CFG80211_RDEV_OPS @@ -1551,16 +1551,14 @@ rdev_get_radio_mask(struct cfg80211_registered_device *rdev, static inline int rdev_assoc_ml_reconf(struct cfg80211_registered_device *rdev, struct net_device *dev, - struct cfg80211_assoc_link *add_links, - u16 rem_links) + struct cfg80211_ml_reconf_req *req) { struct wiphy *wiphy = &rdev->wiphy; int ret = -EOPNOTSUPP; - trace_rdev_assoc_ml_reconf(wiphy, dev, add_links, rem_links); + trace_rdev_assoc_ml_reconf(wiphy, dev, req); if (rdev->ops->assoc_ml_reconf) - ret = rdev->ops->assoc_ml_reconf(wiphy, dev, add_links, - rem_links); + ret = rdev->ops->assoc_ml_reconf(wiphy, dev, req); trace_rdev_return_int(wiphy, ret); return ret; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2dd0533e76605..c1752b31734fa 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -5,7 +5,7 @@ * Copyright 2008-2011 Luis R. Rodriguez * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2024 Intel Corporation + * Copyright (C) 2018 - 2025 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -407,7 +407,8 @@ static bool is_an_alpha2(const char *alpha2) { if (!alpha2) return false; - return isalpha(alpha2[0]) && isalpha(alpha2[1]); + return isascii(alpha2[0]) && isalpha(alpha2[0]) && + isascii(alpha2[1]) && isalpha(alpha2[1]); } static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y) @@ -1602,6 +1603,8 @@ static u32 map_regdom_flags(u32 rd_flags) channel_flags |= IEEE80211_CHAN_PSD; if (rd_flags & NL80211_RRF_ALLOW_6GHZ_VLP_AP) channel_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP; + if (rd_flags & NL80211_RRF_ALLOW_20MHZ_ACTIVITY) + channel_flags |= IEEE80211_CHAN_ALLOW_20MHZ_ACTIVITY; return channel_flags; } diff --git a/net/wireless/scan.c b/net/wireless/scan.c index cd21243295210..9865f305275da 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -5,7 +5,7 @@ * Copyright 2008 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2016 Intel Deutschland GmbH - * Copyright (C) 2018-2024 Intel Corporation + * Copyright (C) 2018-2025 Intel Corporation */ #include #include @@ -1365,7 +1365,7 @@ void cfg80211_bss_age(struct cfg80211_registered_device *rdev, unsigned long age_secs) { struct cfg80211_internal_bss *bss; - unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC); + unsigned long age_jiffies = secs_to_jiffies(age_secs); spin_lock_bh(&rdev->bss_lock); list_for_each_entry(bss, &rdev->bss_list, list) @@ -1934,7 +1934,7 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev, known->pub.signal = new->pub.signal; known->pub.capability = new->pub.capability; known->ts = new->ts; - known->ts_boottime = new->ts_boottime; + known->pub.ts_boottime = new->pub.ts_boottime; known->parent_tsf = new->parent_tsf; known->pub.chains = new->pub.chains; memcpy(known->pub.chain_signal, new->pub.chain_signal, @@ -2291,7 +2291,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy, tmp.pub.signal = 0; tmp.pub.beacon_interval = data->beacon_interval; tmp.pub.capability = data->capability; - tmp.ts_boottime = drv_data->boottime_ns; + tmp.pub.ts_boottime = drv_data->boottime_ns; tmp.parent_tsf = drv_data->parent_tsf; ether_addr_copy(tmp.parent_bssid, drv_data->parent_bssid); tmp.pub.chains = drv_data->chains; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 4f0abd5d49df8..4ed9fada4ec0e 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2,7 +2,7 @@ /* * Portions of this file * Copyright(c) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2018, 2020-2024 Intel Corporation + * Copyright (C) 2018, 2020-2025 Intel Corporation */ #undef TRACE_SYSTEM #define TRACE_SYSTEM cfg80211 @@ -1378,6 +1378,7 @@ TRACE_EVENT(rdev_assoc, __dynamic_array(u8, fils_kek, req->fils_kek_len) __dynamic_array(u8, fils_nonces, req->fils_nonces ? 2 * FILS_NONCE_LEN : 0) + __field(u16, ext_mld_capa_ops) ), TP_fast_assign( WIPHY_ASSIGN; @@ -1404,6 +1405,7 @@ TRACE_EVENT(rdev_assoc, if (req->fils_nonces) memcpy(__get_dynamic_array(fils_nonces), req->fils_nonces, 2 * FILS_NONCE_LEN); + __entry->ext_mld_capa_ops = req->ext_mld_capa_ops; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: %pM" ", previous bssid: %pM, use mfp: %s, flags: 0x%x", @@ -4118,7 +4120,7 @@ TRACE_EVENT(cfg80211_links_removed, NETDEV_ASSIGN; __entry->link_mask = link_mask; ), - TP_printk(NETDEV_PR_FMT ", link_mask:%u", NETDEV_PR_ARG, + TP_printk(NETDEV_PR_FMT ", link_mask:0x%x", NETDEV_PR_ARG, __entry->link_mask) ); @@ -4142,14 +4144,14 @@ TRACE_EVENT(cfg80211_mlo_reconf_add_done, TRACE_EVENT(rdev_assoc_ml_reconf, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - struct cfg80211_assoc_link *add_links, - u16 rem_links), - TP_ARGS(wiphy, netdev, add_links, rem_links), + struct cfg80211_ml_reconf_req *req), + TP_ARGS(wiphy, netdev, req), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY __field(u16, add_links) __field(u16, rem_links) + __field(u16, ext_mld_capa_ops) ), TP_fast_assign( WIPHY_ASSIGN; @@ -4157,10 +4159,11 @@ TRACE_EVENT(rdev_assoc_ml_reconf, u32 i; __entry->add_links = 0; - __entry->rem_links = rem_links; - for (i = 0; add_links && i < IEEE80211_MLD_MAX_NUM_LINKS; i++) - if (add_links[i].bss) + __entry->rem_links = req->rem_links; + for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) + if (req->add_links[i].bss) __entry->add_links |= BIT(i); + __entry->ext_mld_capa_ops = req->ext_mld_capa_ops; ), TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", add_links=0x%x, rem_links=0x%x", WIPHY_PR_ARG, NETDEV_PR_ARG, diff --git a/net/wireless/util.c b/net/wireless/util.c index 60157943d3519..ed868c0f7ca8e 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -5,7 +5,7 @@ * Copyright 2007-2009 Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2023, 2025 Intel Corporation */ #include #include @@ -2908,7 +2908,7 @@ bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio, u32 freq, width; freq = ieee80211_chandef_to_khz(chandef); - width = nl80211_chan_width_to_mhz(chandef->width); + width = cfg80211_chandef_get_width(chandef); if (!ieee80211_radio_freq_range_valid(radio, freq, width)) return false; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 89d2bef964698..de8bf97b2cb9f 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -309,6 +310,18 @@ static bool xsk_is_bound(struct xdp_sock *xs) return false; } +static void __xsk_mark_napi_id_once(struct sock *sk, struct net_device *dev, u32 qid) +{ + struct netdev_rx_queue *rxq; + + if (qid >= dev->real_num_rx_queues) + return; + + rxq = __netif_get_rx_queue(dev, qid); + if (rxq->napi) + __sk_mark_napi_id_once(sk, rxq->napi->napi_id); +} + static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) { if (!xsk_is_bound(xs)) @@ -322,6 +335,7 @@ static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) return -ENOSPC; } + __xsk_mark_napi_id_once(&xs->sk, xs->dev, xs->queue_id); return 0; } @@ -742,6 +756,9 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, goto free_err; } } + + if (meta->flags & XDP_TXMD_FLAGS_LAUNCH_TIME) + skb->skb_mstamp_ns = meta->request.launch_time; } } @@ -875,7 +892,7 @@ static bool xsk_no_wakeup(struct sock *sk) #ifdef CONFIG_NET_RX_BUSY_POLL /* Prefer busy-polling, skip the wakeup. */ return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && - READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID; + napi_id_valid(READ_ONCE(sk->sk_napi_id)); #else return false; #endif @@ -1178,6 +1195,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) goto out_release; } + netdev_lock_ops(dev); + if (!xs->rx && !xs->tx) { err = -EINVAL; goto out_unlock; @@ -1294,13 +1313,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) xs->queue_id = qid; xp_add_xsk(xs->pool, xs); - if (xs->zc && qid < dev->real_num_rx_queues) { - struct netdev_rx_queue *rxq; - - rxq = __netif_get_rx_queue(dev, qid); - if (rxq->napi) - __sk_mark_napi_id_once(sk, rxq->napi->napi_id); - } + if (xs->zc) + __xsk_mark_napi_id_once(sk, dev, qid); out_unlock: if (err) { @@ -1312,6 +1326,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) smp_wmb(); WRITE_ONCE(xs->state, XSK_BOUND); } + netdev_unlock_ops(dev); out_release: mutex_unlock(&xs->mutex); rtnl_unlock(); diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 1f7975b496579..c7e50fd86c6a4 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 +#include +#include #include #include #include @@ -105,7 +107,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, if (pool->unaligned) pool->free_heads[i] = xskb; else - xp_init_xskb_addr(xskb, pool, i * pool->chunk_size); + xp_init_xskb_addr(xskb, pool, (u64)i * pool->chunk_size); } return pool; @@ -219,6 +221,7 @@ int xp_assign_dev(struct xsk_buff_pool *pool, bpf.xsk.pool = pool; bpf.xsk.queue_id = queue_id; + netdev_ops_assert_locked(netdev); err = netdev->netdev_ops->ndo_bpf(netdev, &bpf); if (err) goto err_unreg_pool; @@ -276,9 +279,14 @@ static void xp_release_deferred(struct work_struct *work) { struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, work); + struct net_device *netdev = pool->netdev; rtnl_lock(); - xp_clear_dev(pool); + if (netdev) { + netdev_lock_ops(netdev); + xp_clear_dev(pool); + netdev_unlock_ops(netdev); + } rtnl_unlock(); if (pool->fq) { @@ -699,18 +707,56 @@ void xp_free(struct xdp_buff_xsk *xskb) } EXPORT_SYMBOL(xp_free); -void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) +static u64 __xp_raw_get_addr(const struct xsk_buff_pool *pool, u64 addr) +{ + return pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; +} + +static void *__xp_raw_get_data(const struct xsk_buff_pool *pool, u64 addr) { - addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; return pool->addrs + addr; } + +void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) +{ + return __xp_raw_get_data(pool, __xp_raw_get_addr(pool, addr)); +} EXPORT_SYMBOL(xp_raw_get_data); -dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) +static dma_addr_t __xp_raw_get_dma(const struct xsk_buff_pool *pool, u64 addr) { - addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; return (pool->dma_pages[addr >> PAGE_SHIFT] & ~XSK_NEXT_PG_CONTIG_MASK) + (addr & ~PAGE_MASK); } + +dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) +{ + return __xp_raw_get_dma(pool, __xp_raw_get_addr(pool, addr)); +} EXPORT_SYMBOL(xp_raw_get_dma); + +/** + * xp_raw_get_ctx - get &xdp_desc context + * @pool: XSk buff pool desc address belongs to + * @addr: desc address (from userspace) + * + * Helper for getting desc's DMA address and metadata pointer, if present. + * Saves one call on hotpath, double calculation of the actual address, + * and inline checks for metadata presence and sanity. + * + * Return: new &xdp_desc_ctx struct containing desc's DMA address and metadata + * pointer, if it is present and valid (initialized to %NULL otherwise). + */ +struct xdp_desc_ctx xp_raw_get_ctx(const struct xsk_buff_pool *pool, u64 addr) +{ + struct xdp_desc_ctx ret; + + addr = __xp_raw_get_addr(pool, addr); + + ret.dma = __xp_raw_get_dma(pool, addr); + ret.meta = __xsk_buff_get_metadata(pool, __xp_raw_get_data(pool, addr)); + + return ret; +} +EXPORT_SYMBOL(xp_raw_get_ctx); diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index d1fa94e52ceae..d62f76161d83e 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -244,11 +244,6 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, xfrm_address_t *daddr; bool is_packet_offload; - if (!x->type_offload) { - NL_SET_ERR_MSG(extack, "Type doesn't support offload"); - return -EINVAL; - } - if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) { NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request"); @@ -310,6 +305,13 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, return -EINVAL; } + xfrm_set_type_offload(x); + if (!x->type_offload) { + NL_SET_ERR_MSG(extack, "Type doesn't support offload"); + dev_put(dev); + return -EINVAL; + } + xso->dev = dev; netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); xso->real_dev = dev; @@ -332,6 +334,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, netdev_put(dev, &xso->dev_tracker); xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; + xfrm_unset_type_offload(x); /* User explicitly requested packet offload mode and configured * policy in addition to the XFRM state. So be civil to users, * and return an error instead of taking fallback path. @@ -415,14 +418,12 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) struct dst_entry *dst = skb_dst(skb); struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct net_device *dev = x->xso.dev; + bool check_tunnel_size; - if (!x->type_offload || - (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap)) + if (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED) return false; - if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET || - ((!dev || (dev == xfrm_dst_path(dst)->dev)) && - !xdst->child->xfrm)) { + if ((dev == xfrm_dst_path(dst)->dev) && !xdst->child->xfrm) { mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); if (skb->len <= mtu) goto ok; @@ -434,8 +435,29 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) return false; ok: - if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) - return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); + check_tunnel_size = x->xso.type == XFRM_DEV_OFFLOAD_PACKET && + x->props.mode == XFRM_MODE_TUNNEL; + switch (x->props.family) { + case AF_INET: + /* Check for IPv4 options */ + if (ip_hdr(skb)->ihl != 5) + return false; + if (check_tunnel_size && xfrm4_tunnel_check_size(skb)) + return false; + break; + case AF_INET6: + /* Check for IPv6 extensions */ + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + if (check_tunnel_size && xfrm6_tunnel_check_size(skb)) + return false; + break; + default: + break; + } + + if (dev->xfrmdev_ops->xdo_dev_offload_ok) + return dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); return true; } diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index c397eb99d8679..622445f041d32 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -242,10 +242,9 @@ static void xfrmi_dev_free(struct net_device *dev) gro_cells_destroy(&xi->gro_cells); } -static int xfrmi_create(struct net_device *dev) +static int xfrmi_create(struct net *net, struct net_device *dev) { struct xfrm_if *xi = netdev_priv(dev); - struct net *net = dev_net(dev); struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); int err; @@ -814,15 +813,17 @@ static void xfrmi_netlink_parms(struct nlattr *data[], parms->collect_md = true; } -static int xfrmi_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) +static int xfrmi_newlink(struct net_device *dev, + struct rtnl_newlink_params *params, + struct netlink_ext_ack *extack) { - struct net *net = dev_net(dev); + struct nlattr **data = params->data; struct xfrm_if_parms p = {}; struct xfrm_if *xi; + struct net *net; int err; + net = params->link_net ? : dev_net(dev); xfrmi_netlink_parms(data, &p); if (p.collect_md) { struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); @@ -851,7 +852,7 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev, xi->net = net; xi->dev = dev; - err = xfrmi_create(dev); + err = xfrmi_create(net, dev); return err; } diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index f7abd42c077d5..9077730ff7d0e 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -612,6 +612,40 @@ int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err) } EXPORT_SYMBOL_GPL(xfrm_output_resume); +static int xfrm_dev_direct_output(struct sock *sk, struct xfrm_state *x, + struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + struct net *net = xs_net(x); + int err; + + dst = skb_dst_pop(skb); + if (!dst) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); + kfree_skb(skb); + return -EHOSTUNREACH; + } + skb_dst_set(skb, dst); + nf_reset_ct(skb); + + err = skb_dst(skb)->ops->local_out(net, sk, skb); + if (unlikely(err != 1)) { + kfree_skb(skb); + return err; + } + + /* In transport mode, network destination is + * directly reachable, while in tunnel mode, + * inner packet network may not be. In packet + * offload type, HW is responsible for hard + * header packet mangling so directly xmit skb + * to netdevice. + */ + skb->dev = x->xso.dev; + __skb_push(skb, skb->dev->hard_header_len); + return dev_queue_xmit(skb); +} + static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb) { return xfrm_output_resume(sk, skb, 1); @@ -735,6 +769,13 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) return -EHOSTUNREACH; } + /* Exclusive direct xmit for tunnel mode, as + * some filtering or matching rules may apply + * in transport mode. + */ + if (x->props.mode == XFRM_MODE_TUNNEL) + return xfrm_dev_direct_output(sk, x, skb); + return xfrm_output_resume(sk, skb, 0); } @@ -758,7 +799,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) skb->encapsulation = 1; if (skb_is_gso(skb)) { - if (skb->inner_protocol) + if (skb->inner_protocol && x->props.mode == XFRM_MODE_TUNNEL) return xfrm_output_gso(net, sk, skb); skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; @@ -786,7 +827,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL_GPL(xfrm_output); -static int xfrm4_tunnel_check_size(struct sk_buff *skb) +int xfrm4_tunnel_check_size(struct sk_buff *skb) { int mtu, ret = 0; @@ -812,6 +853,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) out: return ret; } +EXPORT_SYMBOL_GPL(xfrm4_tunnel_check_size); static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb) { @@ -834,7 +876,7 @@ static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb) } #if IS_ENABLED(CONFIG_IPV6) -static int xfrm6_tunnel_check_size(struct sk_buff *skb) +int xfrm6_tunnel_check_size(struct sk_buff *skb) { int mtu, ret = 0; struct dst_entry *dst = skb_dst(skb); @@ -864,6 +906,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb) out: return ret; } +EXPORT_SYMBOL_GPL(xfrm6_tunnel_check_size); #endif static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 6551e588fe526..30970d40a454f 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -3294,7 +3294,7 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net, ok: xfrm_pols_put(pols, drop_pols); - if (dst && dst->xfrm && + if (dst->xfrm && (dst->xfrm->props.mode == XFRM_MODE_TUNNEL || dst->xfrm->props.mode == XFRM_MODE_IPTFS)) dst->flags |= DST_XFRM_TUNNEL; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index ad2202fa82f34..07545944a5369 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -424,18 +424,18 @@ void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, } EXPORT_SYMBOL(xfrm_unregister_type_offload); -static const struct xfrm_type_offload * -xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load) +void xfrm_set_type_offload(struct xfrm_state *x) { const struct xfrm_type_offload *type = NULL; struct xfrm_state_afinfo *afinfo; + bool try_load = true; retry: - afinfo = xfrm_state_get_afinfo(family); + afinfo = xfrm_state_get_afinfo(x->props.family); if (unlikely(afinfo == NULL)) - return NULL; + goto out; - switch (proto) { + switch (x->id.proto) { case IPPROTO_ESP: type = afinfo->type_offload_esp; break; @@ -449,18 +449,16 @@ xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load) rcu_read_unlock(); if (!type && try_load) { - request_module("xfrm-offload-%d-%d", family, proto); + request_module("xfrm-offload-%d-%d", x->props.family, + x->id.proto); try_load = false; goto retry; } - return type; -} - -static void xfrm_put_type_offload(const struct xfrm_type_offload *type) -{ - module_put(type->owner); +out: + x->type_offload = type; } +EXPORT_SYMBOL(xfrm_set_type_offload); static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = { [XFRM_MODE_BEET] = { @@ -609,8 +607,6 @@ static void ___xfrm_state_destroy(struct xfrm_state *x) kfree(x->coaddr); kfree(x->replay_esn); kfree(x->preplay_esn); - if (x->type_offload) - xfrm_put_type_offload(x->type_offload); if (x->type) { x->type->destructor(x); xfrm_put_type(x->type); @@ -784,6 +780,8 @@ void xfrm_dev_state_free(struct xfrm_state *x) struct xfrm_dev_offload *xso = &x->xso; struct net_device *dev = READ_ONCE(xso->dev); + xfrm_unset_type_offload(x); + if (dev && dev->xfrmdev_ops) { spin_lock_bh(&xfrm_state_dev_gc_lock); if (!hlist_unhashed(&x->dev_gclist)) @@ -2315,12 +2313,12 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark, struct xfrm_hash_state_ptrs state_ptrs; struct xfrm_state *x; - spin_lock_bh(&net->xfrm.xfrm_state_lock); + rcu_read_lock(); xfrm_hash_ptrs_get(net, &state_ptrs); x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family); - spin_unlock_bh(&net->xfrm.xfrm_state_lock); + rcu_read_unlock(); return x; } EXPORT_SYMBOL(xfrm_state_lookup_byaddr); @@ -3122,8 +3120,7 @@ u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) } EXPORT_SYMBOL_GPL(xfrm_state_mtu); -int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, - struct netlink_ext_ack *extack) +int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) { const struct xfrm_mode *inner_mode; const struct xfrm_mode *outer_mode; @@ -3178,8 +3175,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, goto error; } - x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload); - err = x->type->init_state(x, extack); if (err) goto error; @@ -3192,12 +3187,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, } x->outer_mode = *outer_mode; - if (init_replay) { - err = xfrm_init_replay(x, extack); - if (err) - goto error; - } - if (x->nat_keepalive_interval) { if (x->dir != XFRM_SA_DIR_OUT) { NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs"); @@ -3229,11 +3218,16 @@ int xfrm_init_state(struct xfrm_state *x) { int err; - err = __xfrm_init_state(x, true, false, NULL); - if (!err) - x->km.state = XFRM_STATE_VALID; + err = __xfrm_init_state(x, NULL); + if (err) + return err; - return err; + err = xfrm_init_replay(x, NULL); + if (err) + return err; + + x->km.state = XFRM_STATE_VALID; + return 0; } EXPORT_SYMBOL(xfrm_init_state); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 08c6d6f0179fb..784a2d124749f 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -178,6 +178,12 @@ static inline int verify_replay(struct xfrm_usersa_info *p, "Replay seq and seq_hi should be 0 for output SA"); return -EINVAL; } + if (rs->oseq_hi && !(p->flags & XFRM_STATE_ESN)) { + NL_SET_ERR_MSG( + extack, + "Replay oseq_hi should be 0 in non-ESN mode for output SA"); + return -EINVAL; + } if (rs->bmp_len) { NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA"); return -EINVAL; @@ -190,6 +196,12 @@ static inline int verify_replay(struct xfrm_usersa_info *p, "Replay oseq and oseq_hi should be 0 for input SA"); return -EINVAL; } + if (rs->seq_hi && !(p->flags & XFRM_STATE_ESN)) { + NL_SET_ERR_MSG( + extack, + "Replay seq_hi should be 0 in non-ESN mode for input SA"); + return -EINVAL; + } } return 0; @@ -907,7 +919,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, goto error; } - err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack); + err = __xfrm_init_state(x, extack); if (err) goto error; diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs index e3240d16040bd..c37d4c0c64e9f 100644 --- a/rust/kernel/alloc/allocator_test.rs +++ b/rust/kernel/alloc/allocator_test.rs @@ -62,6 +62,24 @@ unsafe impl Allocator for Cmalloc { )); } + // ISO C (ISO/IEC 9899:2011) defines `aligned_alloc`: + // + // > The value of alignment shall be a valid alignment supported by the implementation + // [...]. + // + // As an example of the "supported by the implementation" requirement, POSIX.1-2001 (IEEE + // 1003.1-2001) defines `posix_memalign`: + // + // > The value of alignment shall be a power of two multiple of sizeof (void *). + // + // and POSIX-based implementations of `aligned_alloc` inherit this requirement. At the time + // of writing, this is known to be the case on macOS (but not in glibc). + // + // Satisfy the stricter requirement to avoid spurious test failures on some platforms. + let min_align = core::mem::size_of::<*const crate::ffi::c_void>(); + let layout = layout.align_to(min_align).map_err(|_| AllocError)?; + let layout = layout.pad_to_align(); + // SAFETY: Returns either NULL or a pointer to a memory allocation that satisfies or // exceeds the given size and alignment requirements. let dst = unsafe { libc_aligned_alloc(layout.align(), layout.size()) } as *mut u8; diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs index f6ecf09cb65f4..a194d83e6835c 100644 --- a/rust/kernel/error.rs +++ b/rust/kernel/error.rs @@ -107,7 +107,7 @@ impl Error { } else { // TODO: Make it a `WARN_ONCE` once available. crate::pr_warn!( - "attempted to create `Error` with out of range `errno`: {}", + "attempted to create `Error` with out of range `errno`: {}\n", errno ); code::EINVAL diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs index 7fd1ea8265a55..e25d047f3c827 100644 --- a/rust/kernel/init.rs +++ b/rust/kernel/init.rs @@ -259,7 +259,7 @@ pub mod macros; /// }, /// })); /// let foo: Pin<&mut Foo> = foo; -/// pr_info!("a: {}", &*foo.a.lock()); +/// pr_info!("a: {}\n", &*foo.a.lock()); /// ``` /// /// # Syntax @@ -319,7 +319,7 @@ macro_rules! stack_pin_init { /// }, GFP_KERNEL)?, /// })); /// let foo = foo.unwrap(); -/// pr_info!("a: {}", &*foo.a.lock()); +/// pr_info!("a: {}\n", &*foo.a.lock()); /// ``` /// /// ```rust,ignore @@ -352,7 +352,7 @@ macro_rules! stack_pin_init { /// x: 64, /// }, GFP_KERNEL)?, /// })); -/// pr_info!("a: {}", &*foo.a.lock()); +/// pr_info!("a: {}\n", &*foo.a.lock()); /// # Ok::<_, AllocError>(()) /// ``` /// @@ -882,7 +882,7 @@ pub unsafe trait PinInit: Sized { /// /// impl Foo { /// fn setup(self: Pin<&mut Self>) { - /// pr_info!("Setting up foo"); + /// pr_info!("Setting up foo\n"); /// } /// } /// @@ -986,7 +986,7 @@ pub unsafe trait Init: PinInit { /// /// impl Foo { /// fn setup(&mut self) { - /// pr_info!("Setting up foo"); + /// pr_info!("Setting up foo\n"); /// } /// } /// @@ -1336,7 +1336,7 @@ impl InPlaceWrite for UniqueArc> { /// #[pinned_drop] /// impl PinnedDrop for Foo { /// fn drop(self: Pin<&mut Self>) { -/// pr_info!("Foo is being dropped!"); +/// pr_info!("Foo is being dropped!\n"); /// } /// } /// ``` @@ -1418,17 +1418,14 @@ impl_zeroable! { // SAFETY: `T: Zeroable` and `UnsafeCell` is `repr(transparent)`. {} UnsafeCell, - // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee). + // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee: + // https://doc.rust-lang.org/stable/std/option/index.html#representation). Option, Option, Option, Option, Option, Option, Option, Option, Option, Option, Option, Option, - - // SAFETY: All zeros is equivalent to `None` (option layout optimization guarantee). - // - // In this case we are allowed to use `T: ?Sized`, since all zeros is the `None` variant. - {} Option>, - {} Option>, + {} Option>, + {} Option>, // SAFETY: `null` pointer is valid. // diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs index 1fd146a832416..b7213962a6a5a 100644 --- a/rust/kernel/init/macros.rs +++ b/rust/kernel/init/macros.rs @@ -45,7 +45,7 @@ //! #[pinned_drop] //! impl PinnedDrop for Foo { //! fn drop(self: Pin<&mut Self>) { -//! pr_info!("{self:p} is getting dropped."); +//! pr_info!("{self:p} is getting dropped.\n"); //! } //! } //! @@ -412,7 +412,7 @@ //! #[pinned_drop] //! impl PinnedDrop for Foo { //! fn drop(self: Pin<&mut Self>) { -//! pr_info!("{self:p} is getting dropped."); +//! pr_info!("{self:p} is getting dropped.\n"); //! } //! } //! ``` @@ -423,7 +423,7 @@ //! // `unsafe`, full path and the token parameter are added, everything else stays the same. //! unsafe impl ::kernel::init::PinnedDrop for Foo { //! fn drop(self: Pin<&mut Self>, _: ::kernel::init::__internal::OnlyCallFromDrop) { -//! pr_info!("{self:p} is getting dropped."); +//! pr_info!("{self:p} is getting dropped.\n"); //! } //! } //! ``` diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index 398242f92a961..7697c60b2d1a6 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -6,7 +6,7 @@ //! usage by Rust code in the kernel and is shared by all of them. //! //! In other words, all the rest of the Rust code in the kernel (e.g. kernel -//! modules written in Rust) depends on [`core`], [`alloc`] and this crate. +//! modules written in Rust) depends on [`core`] and this crate. //! //! If you need a kernel C API that is not ported or wrapped yet here, then //! do so first instead of bypassing this crate. diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs index 3498fb344dc93..16eab9138b2ba 100644 --- a/rust/kernel/sync.rs +++ b/rust/kernel/sync.rs @@ -30,28 +30,20 @@ pub struct LockClassKey(Opaque); unsafe impl Sync for LockClassKey {} impl LockClassKey { - /// Creates a new lock class key. - pub const fn new() -> Self { - Self(Opaque::uninit()) - } - pub(crate) fn as_ptr(&self) -> *mut bindings::lock_class_key { self.0.get() } } -impl Default for LockClassKey { - fn default() -> Self { - Self::new() - } -} - /// Defines a new static lock class and returns a pointer to it. #[doc(hidden)] #[macro_export] macro_rules! static_lock_class { () => {{ - static CLASS: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new(); + static CLASS: $crate::sync::LockClassKey = + // SAFETY: lockdep expects uninitialized memory when it's handed a statically allocated + // lock_class_key + unsafe { ::core::mem::MaybeUninit::uninit().assume_init() }; &CLASS }}; } diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs index a7b244675c2b9..61f100a45b350 100644 --- a/rust/kernel/sync/locked_by.rs +++ b/rust/kernel/sync/locked_by.rs @@ -55,7 +55,7 @@ use core::{cell::UnsafeCell, mem::size_of, ptr}; /// fn print_bytes_used(dir: &Directory, file: &File) { /// let guard = dir.inner.lock(); /// let inner_file = file.inner.access(&guard); -/// pr_info!("{} {}", guard.bytes_used, inner_file.bytes_used); +/// pr_info!("{} {}\n", guard.bytes_used, inner_file.bytes_used); /// } /// /// /// Increments `bytes_used` for both the directory and file. diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs index 07bc22a7645c0..38da555a2bdbb 100644 --- a/rust/kernel/task.rs +++ b/rust/kernel/task.rs @@ -320,7 +320,7 @@ impl Task { /// Wakes up the task. pub fn wake_up(&self) { - // SAFETY: It's always safe to call `signal_pending` on a valid task, even if the task + // SAFETY: It's always safe to call `wake_up_process` on a valid task, even if the task // running. unsafe { bindings::wake_up_process(self.as_ptr()) }; } diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs index 0cd100d2aefb4..b7be224cdf4bb 100644 --- a/rust/kernel/workqueue.rs +++ b/rust/kernel/workqueue.rs @@ -60,7 +60,7 @@ //! type Pointer = Arc; //! //! fn run(this: Arc) { -//! pr_info!("The value is: {}", this.value); +//! pr_info!("The value is: {}\n", this.value); //! } //! } //! @@ -108,7 +108,7 @@ //! type Pointer = Arc; //! //! fn run(this: Arc) { -//! pr_info!("The value is: {}", this.value_1); +//! pr_info!("The value is: {}\n", this.value_1); //! } //! } //! @@ -116,7 +116,7 @@ //! type Pointer = Arc; //! //! fn run(this: Arc) { -//! pr_info!("The second value is: {}", this.value_2); +//! pr_info!("The second value is: {}\n", this.value_2); //! } //! } //! diff --git a/scripts/coccinelle/misc/newline_in_nl_msg.cocci b/scripts/coccinelle/misc/newline_in_nl_msg.cocci index 9baffe55d9170..2814f6b205b9a 100644 --- a/scripts/coccinelle/misc/newline_in_nl_msg.cocci +++ b/scripts/coccinelle/misc/newline_in_nl_msg.cocci @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /// -/// Catch strings ending in newline with GENL_SET_ERR_MSG, NL_SET_ERR_MSG, -/// NL_SET_ERR_MSG_MOD. +/// Catch strings ending in newline with (GE)NL_SET_ERR_MSG*. /// // Confidence: Very High // Copyright: (C) 2020 Intel Corporation @@ -17,7 +16,11 @@ expression e; constant m; position p; @@ - \(GENL_SET_ERR_MSG\|NL_SET_ERR_MSG\|NL_SET_ERR_MSG_MOD\)(e,m@p) + \(GENL_SET_ERR_MSG\|GENL_SET_ERR_MSG_FMT\|NL_SET_ERR_MSG\|NL_SET_ERR_MSG_MOD\| + NL_SET_ERR_MSG_FMT\|NL_SET_ERR_MSG_FMT_MOD\|NL_SET_ERR_MSG_WEAK\| + NL_SET_ERR_MSG_WEAK_MOD\|NL_SET_ERR_MSG_ATTR_POL\| + NL_SET_ERR_MSG_ATTR_POL_FMT\|NL_SET_ERR_MSG_ATTR\| + NL_SET_ERR_MSG_ATTR_FMT\)(e,m@p,...) @script:python@ m << r.m; @@ -32,7 +35,7 @@ expression r.e; constant r.m; position r.p; @@ - fname(e,m@p) + fname(e,m@p,...) //---------------------------------------------------------- // For context mode @@ -43,7 +46,7 @@ identifier r1.fname; expression r.e; constant r.m; @@ -* fname(e,m) +* fname(e,m,...) //---------------------------------------------------------- // For org mode diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py index aa8ea1a4dbe5f..adae71544cbd6 100755 --- a/scripts/generate_rust_analyzer.py +++ b/scripts/generate_rust_analyzer.py @@ -57,14 +57,26 @@ def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=Tr crates_indexes[display_name] = len(crates) crates.append(crate) - # First, the ones in `rust/` since they are a bit special. - append_crate( - "core", - sysroot_src / "core" / "src" / "lib.rs", - [], - cfg=crates_cfgs.get("core", []), - is_workspace_member=False, - ) + def append_sysroot_crate( + display_name, + deps, + cfg=[], + ): + append_crate( + display_name, + sysroot_src / display_name / "src" / "lib.rs", + deps, + cfg, + is_workspace_member=False, + ) + + # NB: sysroot crates reexport items from one another so setting up our transitive dependencies + # here is important for ensuring that rust-analyzer can resolve symbols. The sources of truth + # for this dependency graph are `(sysroot_src / crate / "Cargo.toml" for crate in crates)`. + append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", [])) + append_sysroot_crate("alloc", ["core"]) + append_sysroot_crate("std", ["alloc", "core"]) + append_sysroot_crate("proc_macro", ["core", "std"]) append_crate( "compiler_builtins", @@ -75,7 +87,7 @@ def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=Tr append_crate( "macros", srctree / "rust" / "macros" / "lib.rs", - [], + ["std", "proc_macro"], is_proc_macro=True, ) @@ -85,27 +97,28 @@ def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=Tr ["core", "compiler_builtins"], ) - append_crate( - "bindings", - srctree / "rust"/ "bindings" / "lib.rs", - ["core"], - cfg=cfg, - ) - crates[-1]["env"]["OBJTREE"] = str(objtree.resolve(True)) + def append_crate_with_generated( + display_name, + deps, + ): + append_crate( + display_name, + srctree / "rust"/ display_name / "lib.rs", + deps, + cfg=cfg, + ) + crates[-1]["env"]["OBJTREE"] = str(objtree.resolve(True)) + crates[-1]["source"] = { + "include_dirs": [ + str(srctree / "rust" / display_name), + str(objtree / "rust") + ], + "exclude_dirs": [], + } - append_crate( - "kernel", - srctree / "rust" / "kernel" / "lib.rs", - ["core", "macros", "build_error", "bindings"], - cfg=cfg, - ) - crates[-1]["source"] = { - "include_dirs": [ - str(srctree / "rust" / "kernel"), - str(objtree / "rust") - ], - "exclude_dirs": [], - } + append_crate_with_generated("bindings", ["core"]) + append_crate_with_generated("uapi", ["core"]) + append_crate_with_generated("kernel", ["core", "macros", "build_error", "bindings", "uapi"]) def is_root_crate(build_file, target): try: diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build index 2966473b46609..b96538787f3d9 100755 --- a/scripts/package/install-extmod-build +++ b/scripts/package/install-extmod-build @@ -63,7 +63,7 @@ if [ "${CC}" != "${HOSTCC}" ]; then # Clear VPATH and srcroot because the source files reside in the output # directory. # shellcheck disable=SC2016 # $(MAKE) and $(build) will be expanded by Make - "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"${destdir}"/scripts + "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-base=. "${destdir}")"/scripts rm -f "${destdir}/scripts/Kbuild" fi diff --git a/scripts/rustdoc_test_gen.rs b/scripts/rustdoc_test_gen.rs index 5ebd42ae4a3fd..76aaa8329413d 100644 --- a/scripts/rustdoc_test_gen.rs +++ b/scripts/rustdoc_test_gen.rs @@ -15,8 +15,8 @@ //! - Test code should be able to define functions and call them, without having to carry //! the context. //! -//! - Later on, we may want to be able to test non-kernel code (e.g. `core`, `alloc` or -//! third-party crates) which likely use the standard library `assert*!` macros. +//! - Later on, we may want to be able to test non-kernel code (e.g. `core` or third-party +//! crates) which likely use the standard library `assert*!` macros. //! //! For this reason, instead of the passed context, `kunit_get_current_test()` is used instead //! (i.e. `current->kunit_test`). diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index 7c06ffd633d24..a5e730ffda57f 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -180,7 +180,7 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode, } /* - * Dump large security xattr values as a continuous ascii hexademical string. + * Dump large security xattr values as a continuous ascii hexadecimal string. * (pr_debug is limited to 64 bytes.) */ static void dump_security_xattr_l(const char *prefix, const void *src, diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index 377e57e9084f0..0add782e73ba2 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -169,7 +169,7 @@ static int is_unsupported_hmac_fs(struct dentry *dentry) * and compare it against the stored security.evm xattr. * * For performance: - * - use the previoulsy retrieved xattr value and length to calculate the + * - use the previously retrieved xattr value and length to calculate the * HMAC.) * - cache the verification result in the iint, when available. * diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 24d09ea91b877..a4f284bd846c1 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -149,6 +149,9 @@ struct ima_kexec_hdr { #define IMA_CHECK_BLACKLIST 0x40000000 #define IMA_VERITY_REQUIRED 0x80000000 +/* Exclude non-action flags which are not rule-specific. */ +#define IMA_NONACTION_RULE_FLAGS (IMA_NONACTION_FLAGS & ~IMA_NEW_FILE) + #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \ IMA_HASH | IMA_APPRAISE_SUBMASK) #define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \ diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 9f9897a7c217e..28b8b0db6f9bb 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -269,10 +269,13 @@ static int process_measurement(struct file *file, const struct cred *cred, mutex_lock(&iint->mutex); if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags)) - /* reset appraisal flags if ima_inode_post_setattr was called */ + /* + * Reset appraisal flags (action and non-action rule-specific) + * if ima_inode_post_setattr was called. + */ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED | IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK | - IMA_NONACTION_FLAGS); + IMA_NONACTION_RULE_FLAGS); /* * Re-evaulate the file if either the xattr has changed or the @@ -1011,9 +1014,9 @@ int process_buffer_measurement(struct mnt_idmap *idmap, } /* - * Both LSM hooks and auxilary based buffer measurements are - * based on policy. To avoid code duplication, differentiate - * between the LSM hooks and auxilary buffer measurements, + * Both LSM hooks and auxiliary based buffer measurements are + * based on policy. To avoid code duplication, differentiate + * between the LSM hooks and auxiliary buffer measurements, * retrieving the policy rule information only for the LSM hook * buffer measurements. */ diff --git a/security/landlock/net.c b/security/landlock/net.c index d5dcc4407a197..104b6c01fe503 100644 --- a/security/landlock/net.c +++ b/security/landlock/net.c @@ -63,8 +63,7 @@ static int current_check_access_socket(struct socket *const sock, if (WARN_ON_ONCE(dom->num_layers < 1)) return -EACCES; - /* Checks if it's a (potential) TCP socket. */ - if (sock->type != SOCK_STREAM) + if (!sk_is_tcp(sock->sk)) return 0; /* Checks for minimal header length to safely read sa_family. */ diff --git a/security/landlock/ruleset.c b/security/landlock/ruleset.c index 241ce44375b6a..bff4e40a3093c 100644 --- a/security/landlock/ruleset.c +++ b/security/landlock/ruleset.c @@ -124,7 +124,7 @@ create_rule(const struct landlock_id id, return ERR_PTR(-ENOMEM); RB_CLEAR_NODE(&new_rule->node); if (is_object_pointer(id.type)) { - /* This should be catched by insert_rule(). */ + /* This should have been caught by insert_rule(). */ WARN_ON_ONCE(!id.key.object); landlock_get_object(id.key.object); } diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index cb66ec42a3f8a..706f53e39b53c 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -106,7 +106,7 @@ static struct snd_seq_client *clientptr(int clientid) return clienttab[clientid]; } -struct snd_seq_client *snd_seq_client_use_ptr(int clientid) +static struct snd_seq_client *client_use_ptr(int clientid, bool load_module) { unsigned long flags; struct snd_seq_client *client; @@ -126,7 +126,7 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid) } spin_unlock_irqrestore(&clients_lock, flags); #ifdef CONFIG_MODULES - if (!in_interrupt()) { + if (load_module) { static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS); static DECLARE_BITMAP(card_requested, SNDRV_CARDS); @@ -168,6 +168,20 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid) return client; } +/* get snd_seq_client object for the given id quickly */ +struct snd_seq_client *snd_seq_client_use_ptr(int clientid) +{ + return client_use_ptr(clientid, false); +} + +/* get snd_seq_client object for the given id; + * if not found, retry after loading the modules + */ +static struct snd_seq_client *client_load_and_use_ptr(int clientid) +{ + return client_use_ptr(clientid, IS_ENABLED(CONFIG_MODULES)); +} + /* Take refcount and perform ioctl_mutex lock on the given client; * used only for OSS sequencer * Unlock via snd_seq_client_ioctl_unlock() below @@ -176,7 +190,7 @@ bool snd_seq_client_ioctl_lock(int clientid) { struct snd_seq_client *client; - client = snd_seq_client_use_ptr(clientid); + client = client_load_and_use_ptr(clientid); if (!client) return false; mutex_lock(&client->ioctl_mutex); @@ -1195,7 +1209,7 @@ static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void *arg) int err = 0; /* requested client number */ - cptr = snd_seq_client_use_ptr(info->client); + cptr = client_load_and_use_ptr(info->client); if (cptr == NULL) return -ENOENT; /* don't change !!! */ @@ -1257,7 +1271,7 @@ static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client, struct snd_seq_client *cptr; /* requested client number */ - cptr = snd_seq_client_use_ptr(client_info->client); + cptr = client_load_and_use_ptr(client_info->client); if (cptr == NULL) return -ENOENT; /* don't change !!! */ @@ -1396,7 +1410,7 @@ static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client, void *arg) struct snd_seq_client *cptr; struct snd_seq_client_port *port; - cptr = snd_seq_client_use_ptr(info->addr.client); + cptr = client_load_and_use_ptr(info->addr.client); if (cptr == NULL) return -ENXIO; @@ -1503,10 +1517,10 @@ static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client, struct snd_seq_client *receiver = NULL, *sender = NULL; struct snd_seq_client_port *sport = NULL, *dport = NULL; - receiver = snd_seq_client_use_ptr(subs->dest.client); + receiver = client_load_and_use_ptr(subs->dest.client); if (!receiver) goto __end; - sender = snd_seq_client_use_ptr(subs->sender.client); + sender = client_load_and_use_ptr(subs->sender.client); if (!sender) goto __end; sport = snd_seq_port_use_ptr(sender, subs->sender.port); @@ -1871,7 +1885,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client, struct snd_seq_client_pool *info = arg; struct snd_seq_client *cptr; - cptr = snd_seq_client_use_ptr(info->client); + cptr = client_load_and_use_ptr(info->client); if (cptr == NULL) return -ENOENT; memset(info, 0, sizeof(*info)); @@ -1975,7 +1989,7 @@ static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client, struct snd_seq_client_port *sport = NULL; result = -EINVAL; - sender = snd_seq_client_use_ptr(subs->sender.client); + sender = client_load_and_use_ptr(subs->sender.client); if (!sender) goto __end; sport = snd_seq_port_use_ptr(sender, subs->sender.port); @@ -2006,7 +2020,7 @@ static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg) struct list_head *p; int i; - cptr = snd_seq_client_use_ptr(subs->root.client); + cptr = client_load_and_use_ptr(subs->root.client); if (!cptr) goto __end; port = snd_seq_port_use_ptr(cptr, subs->root.port); @@ -2073,7 +2087,7 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client, if (info->client < 0) info->client = 0; for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) { - cptr = snd_seq_client_use_ptr(info->client); + cptr = client_load_and_use_ptr(info->client); if (cptr) break; /* found */ } @@ -2096,7 +2110,7 @@ static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client, struct snd_seq_client *cptr; struct snd_seq_client_port *port = NULL; - cptr = snd_seq_client_use_ptr(info->addr.client); + cptr = client_load_and_use_ptr(info->addr.client); if (cptr == NULL) return -ENXIO; @@ -2193,7 +2207,7 @@ static int snd_seq_ioctl_client_ump_info(struct snd_seq_client *caller, size = sizeof(struct snd_ump_endpoint_info); else size = sizeof(struct snd_ump_block_info); - cptr = snd_seq_client_use_ptr(client); + cptr = client_load_and_use_ptr(client); if (!cptr) return -ENOENT; @@ -2475,7 +2489,7 @@ int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev, if (check_event_type_and_length(ev)) return -EINVAL; - cptr = snd_seq_client_use_ptr(client); + cptr = client_load_and_use_ptr(client); if (cptr == NULL) return -EINVAL; @@ -2707,7 +2721,7 @@ void snd_seq_info_clients_read(struct snd_info_entry *entry, /* list the client table */ for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) { - client = snd_seq_client_use_ptr(c); + client = client_load_and_use_ptr(c); if (client == NULL) continue; if (client->type == NO_CLIENT) { diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig index e393578cbe684..84ebf19f28836 100644 --- a/sound/pci/hda/Kconfig +++ b/sound/pci/hda/Kconfig @@ -222,6 +222,7 @@ comment "Set to Y if you want auto-loading the side codec driver" config SND_HDA_CODEC_REALTEK tristate "Build Realtek HD-audio codec support" + depends on INPUT select SND_HDA_GENERIC select SND_HDA_GENERIC_LEDS select SND_HDA_SCODEC_COMPONENT diff --git a/sound/pci/hda/cs35l56_hda_spi.c b/sound/pci/hda/cs35l56_hda_spi.c index d4ee5bb7c4866..9035784669053 100644 --- a/sound/pci/hda/cs35l56_hda_spi.c +++ b/sound/pci/hda/cs35l56_hda_spi.c @@ -22,6 +22,9 @@ static int cs35l56_hda_spi_probe(struct spi_device *spi) return -ENOMEM; cs35l56->base.dev = &spi->dev; + ret = cs35l56_init_config_for_spi(&cs35l56->base, spi); + if (ret) + return ret; #ifdef CS35L56_WAKE_HOLD_TIME_US cs35l56->base.can_hibernate = true; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 67540e0373099..e67c22c59f02b 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2232,6 +2232,8 @@ static const struct snd_pci_quirk power_save_denylist[] = { SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0), /* KONTRON SinglePC may cause a stall at runtime resume */ SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0), + /* Dell ALC3271 */ + SND_PCI_QUIRK(0x1028, 0x0962, "Dell ALC3271", 0), {} }; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 224616fbec4fa..a84857a3c2bfb 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3843,6 +3843,79 @@ static void alc225_shutup(struct hda_codec *codec) } } +static void alc222_init(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + hda_nid_t hp_pin = alc_get_hp_pin(spec); + bool hp1_pin_sense, hp2_pin_sense; + + if (!hp_pin) + return; + + msleep(30); + + hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin); + hp2_pin_sense = snd_hda_jack_detect(codec, 0x14); + + if (hp1_pin_sense || hp2_pin_sense) { + msleep(2); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + msleep(75); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); + + msleep(75); + } +} + +static void alc222_shutup(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + hda_nid_t hp_pin = alc_get_hp_pin(spec); + bool hp1_pin_sense, hp2_pin_sense; + + if (!hp_pin) + hp_pin = 0x21; + + hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin); + hp2_pin_sense = snd_hda_jack_detect(codec, 0x14); + + if (hp1_pin_sense || hp2_pin_sense) { + msleep(2); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + + msleep(75); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); + + msleep(75); + } + alc_auto_setup_eapd(codec, false); + alc_shutup_pins(codec); +} + static void alc_default_init(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -4717,6 +4790,21 @@ static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec, } } +static void alc295_fixup_hp_mute_led_coefbit11(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->mute_led_polarity = 0; + spec->mute_led_coef.idx = 0xb; + spec->mute_led_coef.mask = 3 << 3; + spec->mute_led_coef.on = 1 << 3; + spec->mute_led_coef.off = 1 << 4; + snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set); + } +} + static void alc285_fixup_hp_mute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -4927,7 +5015,6 @@ static void alc298_fixup_samsung_amp_v2_4_amps(struct hda_codec *codec, alc298_samsung_v2_init_amps(codec, 4); } -#if IS_REACHABLE(CONFIG_INPUT) static void gpio2_mic_hotkey_event(struct hda_codec *codec, struct hda_jack_callback *event) { @@ -5036,10 +5123,6 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec, spec->kb_dev = NULL; } } -#else /* INPUT */ -#define alc280_fixup_hp_gpio2_mic_hotkey NULL -#define alc233_fixup_lenovo_line2_mic_hotkey NULL -#endif /* INPUT */ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -5053,6 +5136,16 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, } } +static void alc233_fixup_lenovo_low_en_micmute_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) + spec->micmute_led_polarity = 1; + alc233_fixup_lenovo_line2_mic_hotkey(codec, fix, action); +} + static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay) { if (delay <= 0) @@ -7578,6 +7671,7 @@ enum { ALC290_FIXUP_MONO_SPEAKERS_HSJACK, ALC290_FIXUP_SUBWOOFER, ALC290_FIXUP_SUBWOOFER_HSJACK, + ALC295_FIXUP_HP_MUTE_LED_COEFBIT11, ALC269_FIXUP_THINKPAD_ACPI, ALC269_FIXUP_LENOVO_XPAD_ACPI, ALC269_FIXUP_DMIC_THINKPAD_ACPI, @@ -7621,6 +7715,7 @@ enum { ALC275_FIXUP_DELL_XPS, ALC293_FIXUP_LENOVO_SPK_NOISE, ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, + ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED, ALC255_FIXUP_DELL_SPK_NOISE, ALC225_FIXUP_DISABLE_MIC_VREF, ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, @@ -7690,7 +7785,6 @@ enum { ALC285_FIXUP_THINKPAD_X1_GEN7, ALC285_FIXUP_THINKPAD_HEADSET_JACK, ALC294_FIXUP_ASUS_ALLY, - ALC294_FIXUP_ASUS_ALLY_X, ALC294_FIXUP_ASUS_ALLY_PINS, ALC294_FIXUP_ASUS_ALLY_VERBS, ALC294_FIXUP_ASUS_ALLY_SPEAKER, @@ -8616,6 +8710,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_fixup_lenovo_line2_mic_hotkey, }, + [ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc233_fixup_lenovo_low_en_micmute_led, + }, [ALC233_FIXUP_INTEL_NUC8_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, @@ -9138,12 +9236,6 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC294_FIXUP_ASUS_ALLY_PINS }, - [ALC294_FIXUP_ASUS_ALLY_X] = { - .type = HDA_FIXUP_FUNC, - .v.func = tas2781_fixup_i2c, - .chained = true, - .chain_id = ALC294_FIXUP_ASUS_ALLY_PINS - }, [ALC294_FIXUP_ASUS_ALLY_PINS] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -9325,6 +9417,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC283_FIXUP_INT_MIC, }, + [ALC295_FIXUP_HP_MUTE_LED_COEFBIT11] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc295_fixup_hp_mute_led_coefbit11, + }, [ALC298_FIXUP_SAMSUNG_AMP] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_samsung_amp, @@ -10375,6 +10471,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360), SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x85c6, "HP Pavilion x360 Convertible 14-dy1xxx", ALC295_FIXUP_HP_MUTE_LED_COEFBIT11), SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360), SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT), @@ -10600,7 +10697,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), + SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1074, "ASUS G614PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x10a4, "ASUS TP3407SA", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), @@ -10608,21 +10707,25 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x1154, "ASUS TP3607SH", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1194, "ASUS UM3406KA", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1204, "ASUS Strix G615JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x1214, "ASUS Strix G615LH_LM_LP", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x1294, "ASUS B3405CVA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x12a3, "Asus N7691ZM", ALC269_FIXUP_ASUS_N7601ZM), SND_PCI_QUIRK(0x1043, 0x12af, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x12b4, "ASUS B3405CCA / P3405CCA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650PY/PZ/PV/PU/PYV/PZV/PIV/PVV", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), + SND_PCI_QUIRK(0x1043, 0x1460, "Asus VivoBook 15", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X/GA402N", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604VI/VC/VE/VG/VJ/VQ/VU/VV/VY/VZ", ALC285_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603VQ/VU/VV/VJ/VI", ALC285_FIXUP_ASUS_HEADSET_MIC), @@ -10644,7 +10747,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS), SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK), SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally NR2301L/X", ALC294_FIXUP_ASUS_ALLY), - SND_PCI_QUIRK(0x1043, 0x1eb3, "ROG Ally X RC72LA", ALC294_FIXUP_ASUS_ALLY_X), SND_PCI_QUIRK(0x1043, 0x1863, "ASUS UX6404VI/VV", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC), @@ -10656,7 +10758,6 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), - SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1a63, "ASUS UX3405MA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2), @@ -10699,14 +10800,28 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1f1f, "ASUS H7604JI/JV/J3D", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1f63, "ASUS P5405CSA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1fb3, "ASUS ROG Flow Z13 GZ302EA", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x3011, "ASUS B5605CVA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), + SND_PCI_QUIRK(0x1043, 0x3061, "ASUS B3405CCA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x3071, "ASUS B5405CCA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x30c1, "ASUS B3605CCA / P3605CCA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x30d1, "ASUS B5405CCA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x30e1, "ASUS B5605CCA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x31d0, "ASUS Zen AIO 27 Z272SD_A272SD", ALC274_FIXUP_ASUS_ZEN_AIO_27), + SND_PCI_QUIRK(0x1043, 0x31e1, "ASUS B5605CCA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x31f1, "ASUS B3605CCA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), + SND_PCI_QUIRK(0x1043, 0x3d78, "ASUS GA603KH", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x3d88, "ASUS GA603KM", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x3e00, "ASUS G814FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x1043, 0x3e20, "ASUS G814PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x3e30, "ASUS TP3607SA", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3ee0, "ASUS Strix G815_JHR_JMR_JPR", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3ef0, "ASUS Strix G635LR_LW_LX", ALC287_FIXUP_TAS2781_I2C), @@ -10714,6 +10829,8 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x3f10, "ASUS Strix G835LR_LW_LX", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3f20, "ASUS Strix G615LR_LW", ALC287_FIXUP_TAS2781_I2C), SND_PCI_QUIRK(0x1043, 0x3f30, "ASUS Strix G815LR_LW", ALC287_FIXUP_TAS2781_I2C), + SND_PCI_QUIRK(0x1043, 0x3fd0, "ASUS B3605CVA", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x3ff0, "ASUS B5405CVA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC), @@ -10912,6 +11029,9 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x17aa, 0x3384, "ThinkCentre M90a PRO", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED), + SND_PCI_QUIRK(0x17aa, 0x3386, "ThinkCentre M90a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED), + SND_PCI_QUIRK(0x17aa, 0x3387, "ThinkCentre M70a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED), SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), HDA_CODEC_QUIRK(0x17aa, 0x3802, "DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga Pro 9 14IRP8", ALC287_FIXUP_TAS2781_I2C), @@ -11900,8 +12020,11 @@ static int patch_alc269(struct hda_codec *codec) spec->codec_variant = ALC269_TYPE_ALC300; spec->gen.mixer_nid = 0; /* no loopback on ALC300 */ break; + case 0x10ec0222: case 0x10ec0623: spec->codec_variant = ALC269_TYPE_ALC623; + spec->shutup = alc222_shutup; + spec->init_hook = alc222_init; break; case 0x10ec0700: case 0x10ec0701: diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index b16587d8f97a8..a7637056972aa 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -248,6 +248,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "21M5"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21M6"), + } + }, { .driver_data = &acp6x_card, .matches = { diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c index e0ed4fc11155a..e28bfefa72f33 100644 --- a/sound/soc/codecs/cs35l56-shared.c +++ b/sound/soc/codecs/cs35l56-shared.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -303,6 +304,79 @@ void cs35l56_wait_min_reset_pulse(void) } EXPORT_SYMBOL_NS_GPL(cs35l56_wait_min_reset_pulse, "SND_SOC_CS35L56_SHARED"); +static const struct { + u32 addr; + u32 value; +} cs35l56_spi_system_reset_stages[] = { + { .addr = CS35L56_DSP_VIRTUAL1_MBOX_1, .value = CS35L56_MBOX_CMD_SYSTEM_RESET }, + /* The next write is necessary to delimit the soft reset */ + { .addr = CS35L56_DSP_MBOX_1_RAW, .value = CS35L56_MBOX_CMD_PING }, +}; + +static void cs35l56_spi_issue_bus_locked_reset(struct cs35l56_base *cs35l56_base, + struct spi_device *spi) +{ + struct cs35l56_spi_payload *buf = cs35l56_base->spi_payload_buf; + struct spi_transfer t = { + .tx_buf = buf, + .len = sizeof(*buf), + }; + struct spi_message m; + int i, ret; + + for (i = 0; i < ARRAY_SIZE(cs35l56_spi_system_reset_stages); i++) { + buf->addr = cpu_to_be32(cs35l56_spi_system_reset_stages[i].addr); + buf->value = cpu_to_be32(cs35l56_spi_system_reset_stages[i].value); + spi_message_init_with_transfers(&m, &t, 1); + ret = spi_sync_locked(spi, &m); + if (ret) + dev_warn(cs35l56_base->dev, "spi_sync failed: %d\n", ret); + + usleep_range(CS35L56_SPI_RESET_TO_PORT_READY_US, + 2 * CS35L56_SPI_RESET_TO_PORT_READY_US); + } +} + +static void cs35l56_spi_system_reset(struct cs35l56_base *cs35l56_base) +{ + struct spi_device *spi = to_spi_device(cs35l56_base->dev); + unsigned int val; + int read_ret, ret; + + /* + * There must not be any other SPI bus activity while the amp is + * soft-resetting. + */ + ret = spi_bus_lock(spi->controller); + if (ret) { + dev_warn(cs35l56_base->dev, "spi_bus_lock failed: %d\n", ret); + return; + } + + cs35l56_spi_issue_bus_locked_reset(cs35l56_base, spi); + spi_bus_unlock(spi->controller); + + /* + * Check firmware boot by testing for a response in MBOX_2. + * HALO_STATE cannot be trusted yet because the reset sequence + * can leave it with stale state. But MBOX is reset. + * The regmap must remain in cache-only until the chip has + * booted, so use a bypassed read. + */ + ret = read_poll_timeout(regmap_read_bypassed, read_ret, + (val > 0) && (val < 0xffffffff), + CS35L56_HALO_STATE_POLL_US, + CS35L56_HALO_STATE_TIMEOUT_US, + false, + cs35l56_base->regmap, + CS35L56_DSP_VIRTUAL1_MBOX_2, + &val); + if (ret) { + dev_err(cs35l56_base->dev, "SPI reboot timed out(%d): MBOX2=%#x\n", + read_ret, val); + } +} + static const struct reg_sequence cs35l56_system_reset_seq[] = { REG_SEQ0(CS35L56_DSP1_HALO_STATE, 0), REG_SEQ0(CS35L56_DSP_VIRTUAL1_MBOX_1, CS35L56_MBOX_CMD_SYSTEM_RESET), @@ -315,6 +389,12 @@ void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire) * accesses other than the controlled system reset sequence below. */ regcache_cache_only(cs35l56_base->regmap, true); + + if (cs35l56_is_spi(cs35l56_base)) { + cs35l56_spi_system_reset(cs35l56_base); + return; + } + regmap_multi_reg_write_bypassed(cs35l56_base->regmap, cs35l56_system_reset_seq, ARRAY_SIZE(cs35l56_system_reset_seq)); diff --git a/sound/soc/codecs/cs35l56-spi.c b/sound/soc/codecs/cs35l56-spi.c index c101134e85328..ca6c03a8766d3 100644 --- a/sound/soc/codecs/cs35l56-spi.c +++ b/sound/soc/codecs/cs35l56-spi.c @@ -33,6 +33,9 @@ static int cs35l56_spi_probe(struct spi_device *spi) cs35l56->base.dev = &spi->dev; cs35l56->base.can_hibernate = true; + ret = cs35l56_init_config_for_spi(&cs35l56->base, spi); + if (ret) + return ret; ret = cs35l56_common_probe(cs35l56); if (ret != 0) diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c index d9ab003e166bf..ac19a572fe70c 100644 --- a/sound/soc/codecs/cs42l43-jack.c +++ b/sound/soc/codecs/cs42l43-jack.c @@ -167,7 +167,7 @@ int cs42l43_set_jack(struct snd_soc_component *component, autocontrol |= 0x3 << CS42L43_JACKDET_MODE_SHIFT; ret = cs42l43_find_index(priv, "cirrus,tip-fall-db-ms", 500, - NULL, cs42l43_accdet_db_ms, + &priv->tip_fall_db_ms, cs42l43_accdet_db_ms, ARRAY_SIZE(cs42l43_accdet_db_ms)); if (ret < 0) goto error; @@ -175,7 +175,7 @@ int cs42l43_set_jack(struct snd_soc_component *component, tip_deb |= ret << CS42L43_TIPSENSE_FALLING_DB_TIME_SHIFT; ret = cs42l43_find_index(priv, "cirrus,tip-rise-db-ms", 500, - NULL, cs42l43_accdet_db_ms, + &priv->tip_rise_db_ms, cs42l43_accdet_db_ms, ARRAY_SIZE(cs42l43_accdet_db_ms)); if (ret < 0) goto error; @@ -764,6 +764,8 @@ void cs42l43_tip_sense_work(struct work_struct *work) error: mutex_unlock(&priv->jack_lock); + priv->suspend_jack_debounce = false; + pm_runtime_mark_last_busy(priv->dev); pm_runtime_put_autosuspend(priv->dev); } @@ -771,14 +773,19 @@ void cs42l43_tip_sense_work(struct work_struct *work) irqreturn_t cs42l43_tip_sense(int irq, void *data) { struct cs42l43_codec *priv = data; + unsigned int db_delay = priv->tip_debounce_ms; cancel_delayed_work(&priv->bias_sense_timeout); cancel_delayed_work(&priv->tip_sense_work); cancel_delayed_work(&priv->button_press_work); cancel_work(&priv->button_release_work); + // Ensure delay after suspend is long enough to avoid false detection + if (priv->suspend_jack_debounce) + db_delay += priv->tip_fall_db_ms + priv->tip_rise_db_ms; + queue_delayed_work(system_long_wq, &priv->tip_sense_work, - msecs_to_jiffies(priv->tip_debounce_ms)); + msecs_to_jiffies(db_delay)); return IRQ_HANDLED; } diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c index d2a2daefc2ec6..ea84ac64c775e 100644 --- a/sound/soc/codecs/cs42l43.c +++ b/sound/soc/codecs/cs42l43.c @@ -1146,7 +1146,7 @@ static const struct snd_kcontrol_new cs42l43_controls[] = { SOC_DOUBLE_R_SX_TLV("ADC Volume", CS42L43_ADC_B_CTRL1, CS42L43_ADC_B_CTRL2, CS42L43_ADC_PGA_GAIN_SHIFT, - 0xF, 5, cs42l43_adc_tlv), + 0xF, 4, cs42l43_adc_tlv), SOC_DOUBLE("PDM1 Invert Switch", CS42L43_DMIC_PDM_CTRL, CS42L43_PDM1L_INV_SHIFT, CS42L43_PDM1R_INV_SHIFT, 1, 0), @@ -2402,9 +2402,22 @@ static int cs42l43_codec_runtime_resume(struct device *dev) return 0; } +static int cs42l43_codec_runtime_force_suspend(struct device *dev) +{ + struct cs42l43_codec *priv = dev_get_drvdata(dev); + + dev_dbg(priv->dev, "Runtime suspend\n"); + + priv->suspend_jack_debounce = true; + + pm_runtime_force_suspend(dev); + + return 0; +} + static const struct dev_pm_ops cs42l43_codec_pm_ops = { RUNTIME_PM_OPS(NULL, cs42l43_codec_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) + SYSTEM_SLEEP_PM_OPS(cs42l43_codec_runtime_force_suspend, pm_runtime_force_resume) }; static const struct platform_device_id cs42l43_codec_id_table[] = { diff --git a/sound/soc/codecs/cs42l43.h b/sound/soc/codecs/cs42l43.h index 9c144e129535f..1cd9d8a71c439 100644 --- a/sound/soc/codecs/cs42l43.h +++ b/sound/soc/codecs/cs42l43.h @@ -78,6 +78,8 @@ struct cs42l43_codec { bool use_ring_sense; unsigned int tip_debounce_ms; + unsigned int tip_fall_db_ms; + unsigned int tip_rise_db_ms; unsigned int bias_low; unsigned int bias_sense_ua; unsigned int bias_ramp_ms; @@ -95,6 +97,7 @@ struct cs42l43_codec { bool button_detect_running; bool jack_present; int jack_override; + bool suspend_jack_debounce; struct work_struct hp_ilimit_work; struct delayed_work hp_ilimit_clear_work; diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index f3c97da798dc8..76159c45e6b52 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -233,7 +233,6 @@ static const struct snd_kcontrol_new es8328_right_line_controls = /* Left Mixer */ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { - SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0), @@ -243,7 +242,6 @@ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { static const struct snd_kcontrol_new es8328_right_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0), - SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0), }; @@ -336,10 +334,10 @@ static const struct snd_soc_dapm_widget es8328_dapm_widgets[] = { SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8328_DACPOWER, ES8328_DACPOWER_LDAC_OFF, 1), - SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_MIXER("Left Mixer", ES8328_DACCONTROL17, 7, 0, &es8328_left_mixer_controls[0], ARRAY_SIZE(es8328_left_mixer_controls)), - SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_MIXER("Right Mixer", ES8328_DACCONTROL20, 7, 0, &es8328_right_mixer_controls[0], ARRAY_SIZE(es8328_right_mixer_controls)), @@ -418,19 +416,14 @@ static const struct snd_soc_dapm_route es8328_dapm_routes[] = { { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, - { "Left Out 1", NULL, "Left DAC" }, - { "Right Out 1", NULL, "Right DAC" }, - { "Left Out 2", NULL, "Left DAC" }, - { "Right Out 2", NULL, "Right DAC" }, - - { "Left Mixer", "Playback Switch", "Left DAC" }, + { "Left Mixer", NULL, "Left DAC" }, { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Left Mixer", "Right Playback Switch", "Right DAC" }, { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Right Mixer", "Left Playback Switch", "Left DAC" }, { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, - { "Right Mixer", "Playback Switch", "Right DAC" }, + { "Right Mixer", NULL, "Right DAC" }, { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, { "DAC DIG", NULL, "DAC STM" }, diff --git a/sound/soc/codecs/rt1320-sdw.c b/sound/soc/codecs/rt1320-sdw.c index 3510c3819074b..d83b236a04503 100644 --- a/sound/soc/codecs/rt1320-sdw.c +++ b/sound/soc/codecs/rt1320-sdw.c @@ -535,6 +535,9 @@ static int rt1320_read_prop(struct sdw_slave *slave) /* set the timeout values */ prop->clk_stop_timeout = 64; + /* BIOS may set wake_capable. Make sure it is 0 as wake events are disabled. */ + prop->wake_capable = 0; + return 0; } diff --git a/sound/soc/codecs/rt722-sdca-sdw.c b/sound/soc/codecs/rt722-sdca-sdw.c index 25fc13687bc83..4d3043627bd04 100644 --- a/sound/soc/codecs/rt722-sdca-sdw.c +++ b/sound/soc/codecs/rt722-sdca-sdw.c @@ -86,6 +86,10 @@ static bool rt722_sdca_mbq_readable_register(struct device *dev, unsigned int re case 0x6100067: case 0x6100070 ... 0x610007c: case 0x6100080: + case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_FU15, RT722_SDCA_CTL_FU_CH_GAIN, + CH_01) ... + SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_FU15, RT722_SDCA_CTL_FU_CH_GAIN, + CH_04): case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E, RT722_SDCA_CTL_FU_VOLUME, CH_01): case SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT722_SDCA_ENT_USER_FU1E, RT722_SDCA_CTL_FU_VOLUME, diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c index d482cd194c08c..58315eab492a1 100644 --- a/sound/soc/codecs/tas2764.c +++ b/sound/soc/codecs/tas2764.c @@ -365,7 +365,7 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_component *component = dai->component; struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); - u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0; + u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0, asi_cfg_4 = 0; int ret; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { @@ -374,12 +374,14 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) fallthrough; case SND_SOC_DAIFMT_NB_NF: asi_cfg_1 = TAS2764_TDM_CFG1_RX_RISING; + asi_cfg_4 = TAS2764_TDM_CFG4_TX_FALLING; break; case SND_SOC_DAIFMT_IB_IF: asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; fallthrough; case SND_SOC_DAIFMT_IB_NF: asi_cfg_1 = TAS2764_TDM_CFG1_RX_FALLING; + asi_cfg_4 = TAS2764_TDM_CFG4_TX_RISING; break; } @@ -389,6 +391,12 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) if (ret < 0) return ret; + ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG4, + TAS2764_TDM_CFG4_TX_MASK, + asi_cfg_4); + if (ret < 0) + return ret; + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h index 168af772a898f..9490f2686e389 100644 --- a/sound/soc/codecs/tas2764.h +++ b/sound/soc/codecs/tas2764.h @@ -25,7 +25,7 @@ /* Power Control */ #define TAS2764_PWR_CTRL TAS2764_REG(0X0, 0x02) -#define TAS2764_PWR_CTRL_MASK GENMASK(1, 0) +#define TAS2764_PWR_CTRL_MASK GENMASK(2, 0) #define TAS2764_PWR_CTRL_ACTIVE 0x0 #define TAS2764_PWR_CTRL_MUTE BIT(0) #define TAS2764_PWR_CTRL_SHUTDOWN BIT(1) @@ -79,6 +79,12 @@ #define TAS2764_TDM_CFG3_RXS_SHIFT 0x4 #define TAS2764_TDM_CFG3_MASK GENMASK(3, 0) +/* TDM Configuration Reg4 */ +#define TAS2764_TDM_CFG4 TAS2764_REG(0X0, 0x0d) +#define TAS2764_TDM_CFG4_TX_MASK BIT(0) +#define TAS2764_TDM_CFG4_TX_RISING 0x0 +#define TAS2764_TDM_CFG4_TX_FALLING BIT(0) + /* TDM Configuration Reg5 */ #define TAS2764_TDM_CFG5 TAS2764_REG(0X0, 0x0e) #define TAS2764_TDM_CFG5_VSNS_MASK BIT(6) diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c index 9f93b230652a5..863c3f672ba98 100644 --- a/sound/soc/codecs/tas2770.c +++ b/sound/soc/codecs/tas2770.c @@ -506,7 +506,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component) } static DECLARE_TLV_DB_SCALE(tas2770_digital_tlv, 1100, 50, 0); -static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -12750, 50, 0); +static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -10050, 50, 0); static const struct snd_kcontrol_new tas2770_snd_controls[] = { SOC_SINGLE_TLV("Speaker Playback Volume", TAS2770_PLAY_CFG_REG2, diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c index edd2cb185c42c..9e67fbfc2ccaf 100644 --- a/sound/soc/codecs/wm0010.c +++ b/sound/soc/codecs/wm0010.c @@ -920,7 +920,7 @@ static int wm0010_spi_probe(struct spi_device *spi) if (ret) { dev_err(wm0010->dev, "Failed to set IRQ %d as wake source: %d\n", irq, ret); - return ret; + goto free_irq; } if (spi->max_speed_hz) @@ -932,9 +932,18 @@ static int wm0010_spi_probe(struct spi_device *spi) &soc_component_dev_wm0010, wm0010_dai, ARRAY_SIZE(wm0010_dai)); if (ret < 0) - return ret; + goto disable_irq_wake; return 0; + +disable_irq_wake: + irq_set_irq_wake(wm0010->irq, 0); + +free_irq: + if (wm0010->irq) + free_irq(wm0010->irq, wm0010); + + return ret; } static void wm0010_spi_remove(struct spi_device *spi) diff --git a/sound/soc/codecs/wsa884x.c b/sound/soc/codecs/wsa884x.c index 86df5152c547b..560a2c04b6955 100644 --- a/sound/soc/codecs/wsa884x.c +++ b/sound/soc/codecs/wsa884x.c @@ -1875,7 +1875,7 @@ static int wsa884x_get_temp(struct wsa884x_priv *wsa884x, long *temp) * Reading temperature is possible only when Power Amplifier is * off. Report last cached data. */ - *temp = wsa884x->temperature; + *temp = wsa884x->temperature * 1000; return 0; } @@ -1934,7 +1934,7 @@ static int wsa884x_get_temp(struct wsa884x_priv *wsa884x, long *temp) if ((val > WSA884X_LOW_TEMP_THRESHOLD) && (val < WSA884X_HIGH_TEMP_THRESHOLD)) { wsa884x->temperature = val; - *temp = val; + *temp = val * 1000; ret = 0; } else { ret = -EAGAIN; diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index c4eb87c5d39e4..9f33dd11d47f6 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -994,10 +994,10 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = { { .name = "sai-tx", .playback = { - .stream_name = "CPU-Playback", + .stream_name = "SAI-Playback", .channels_min = 1, .channels_max = 32, - .rate_min = 8000, + .rate_min = 8000, .rate_max = 2822400, .rates = SNDRV_PCM_RATE_KNOT, .formats = FSL_SAI_FORMATS, @@ -1007,7 +1007,7 @@ static struct snd_soc_dai_driver fsl_sai_dai_template[] = { { .name = "sai-rx", .capture = { - .stream_name = "CPU-Capture", + .stream_name = "SAI-Capture", .channels_min = 1, .channels_max = 32, .rate_min = 8000, diff --git a/sound/soc/fsl/imx-audmix.c b/sound/soc/fsl/imx-audmix.c index 50ecc5f51100e..dac5d4ddacd6e 100644 --- a/sound/soc/fsl/imx-audmix.c +++ b/sound/soc/fsl/imx-audmix.c @@ -119,8 +119,8 @@ static const struct snd_soc_ops imx_audmix_be_ops = { static const char *name[][3] = { {"HiFi-AUDMIX-FE-0", "HiFi-AUDMIX-FE-1", "HiFi-AUDMIX-FE-2"}, {"sai-tx", "sai-tx", "sai-rx"}, - {"AUDMIX-Playback-0", "AUDMIX-Playback-1", "CPU-Capture"}, - {"CPU-Playback", "CPU-Playback", "AUDMIX-Capture-0"}, + {"AUDMIX-Playback-0", "AUDMIX-Playback-1", "SAI-Capture"}, + {"SAI-Playback", "SAI-Playback", "AUDMIX-Capture-0"}, }; static int imx_audmix_probe(struct platform_device *pdev) diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index 203b07d4d833c..90dafa810b2ec 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -803,7 +803,9 @@ static int create_sdw_dailink(struct snd_soc_card *card, int *be_id, struct snd_soc_codec_conf **codec_conf) { struct device *dev = card->dev; + struct snd_soc_acpi_mach *mach = dev_get_platdata(card->dev); struct asoc_sdw_mc_private *ctx = snd_soc_card_get_drvdata(card); + struct snd_soc_acpi_mach_params *mach_params = &mach->mach_params; struct intel_mc_ctx *intel_ctx = (struct intel_mc_ctx *)ctx->private; struct asoc_sdw_endpoint *sof_end; int stream; @@ -900,6 +902,11 @@ static int create_sdw_dailink(struct snd_soc_card *card, codecs[j].name = sof_end->codec_name; codecs[j].dai_name = sof_end->dai_info->dai_name; + if (sof_end->dai_info->dai_type == SOC_SDW_DAI_TYPE_MIC && + mach_params->dmic_num > 0) { + dev_warn(dev, + "Both SDW DMIC and PCH DMIC are present, if incorrect, please set kernel params snd_sof_intel_hda_generic dmic_num=0 to disable PCH DMIC\n"); + } j++; } @@ -947,7 +954,7 @@ static int create_sdw_dailinks(struct snd_soc_card *card, /* generate DAI links by each sdw link */ while (sof_dais->initialised) { - int current_be_id; + int current_be_id = 0; ret = create_sdw_dailink(card, sof_dais, dai_links, ¤t_be_id, codec_conf); diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 19928f098d8dc..b0e4e4168f38d 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -337,7 +337,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (ucontrol->value.integer.value[0] < 0) return -EINVAL; val = ucontrol->value.integer.value[0]; - if (mc->platform_max && ((int)val + min) > mc->platform_max) + if (mc->platform_max && val > mc->platform_max) return -EINVAL; if (val > max - min) return -EINVAL; @@ -350,7 +350,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (ucontrol->value.integer.value[1] < 0) return -EINVAL; val2 = ucontrol->value.integer.value[1]; - if (mc->platform_max && ((int)val2 + min) > mc->platform_max) + if (mc->platform_max && val2 > mc->platform_max) return -EINVAL; if (val2 > max - min) return -EINVAL; @@ -503,17 +503,16 @@ int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol, { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; - int platform_max; - int min = mc->min; + int max; - if (!mc->platform_max) - mc->platform_max = mc->max; - platform_max = mc->platform_max; + max = mc->max - mc->min; + if (mc->platform_max && mc->platform_max < max) + max = mc->platform_max; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; uinfo->value.integer.min = 0; - uinfo->value.integer.max = platform_max - min; + uinfo->value.integer.max = max; return 0; } diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index be689f6e10c81..a1ccd95da8bb7 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -1312,22 +1312,8 @@ struct snd_soc_acpi_mach *hda_machine_select(struct snd_sof_dev *sdev) /* report to machine driver if any DMICs are found */ mach->mach_params.dmic_num = check_dmic_num(sdev); - if (sdw_mach_found) { - /* - * DMICs use up to 4 pins and are typically pin-muxed with SoundWire - * link 2 and 3, or link 1 and 2, thus we only try to enable dmics - * if all conditions are true: - * a) 2 or fewer links are used by SoundWire - * b) the NHLT table reports the presence of microphones - */ - if (hweight_long(mach->link_mask) <= 2) - dmic_fixup = true; - else - mach->mach_params.dmic_num = 0; - } else { - if (mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER) - dmic_fixup = true; - } + if (sdw_mach_found || mach->tplg_quirk_mask & SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER) + dmic_fixup = true; if (tplg_fixup && dmic_fixup && diff --git a/sound/soc/tegra/tegra210_adx.c b/sound/soc/tegra/tegra210_adx.c index 3e6e8f51f380b..0aa93b948378f 100644 --- a/sound/soc/tegra/tegra210_adx.c +++ b/sound/soc/tegra/tegra210_adx.c @@ -264,7 +264,7 @@ static const struct snd_soc_dai_ops tegra210_adx_out_dai_ops = { .rates = SNDRV_PCM_RATE_8000_192000, \ .formats = SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_S16_LE | \ - SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE, \ }, \ .capture = { \ @@ -274,7 +274,7 @@ static const struct snd_soc_dai_ops tegra210_adx_out_dai_ops = { .rates = SNDRV_PCM_RATE_8000_192000, \ .formats = SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_S16_LE | \ - SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S24_LE | \ SNDRV_PCM_FMTBIT_S32_LE, \ }, \ .ops = &tegra210_adx_out_dai_ops, \ diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 737dd00e97b14..779d97d31f170 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -1145,7 +1145,7 @@ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) { struct usbmidi_out_port *port = substream->runtime->private_data; - cancel_work_sync(&port->ep->work); + flush_work(&port->ep->work); return substream_open(substream, 0, 0); } diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index a97efb7b131ea..09210fb4ac60c 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1868,6 +1868,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ subs->stream_offset_adj = 2; break; + case USB_ID(0x2b73, 0x000a): /* Pioneer DJM-900NXS2 */ case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */ pioneer_djm_set_format_quirk(subs, 0x0082); break; diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c index 5f81c68fd42b6..5756ff3528a2d 100644 --- a/sound/usb/usx2y/usbusx2y.c +++ b/sound/usb/usx2y/usbusx2y.c @@ -151,6 +151,12 @@ static int snd_usx2y_card_used[SNDRV_CARDS]; static void snd_usx2y_card_private_free(struct snd_card *card); static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s); +#ifdef USX2Y_NRPACKS_VARIABLE +int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */ +module_param(nrpacks, int, 0444); +MODULE_PARM_DESC(nrpacks, "Number of packets per URB."); +#endif + /* * pipe 4 is used for switching the lamps, setting samplerate, volumes .... */ @@ -432,6 +438,11 @@ static int snd_usx2y_probe(struct usb_interface *intf, struct snd_card *card; int err; +#ifdef USX2Y_NRPACKS_VARIABLE + if (nrpacks < 0 || nrpacks > USX2Y_NRPACKS_MAX) + return -EINVAL; +#endif + if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 || (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 && diff --git a/sound/usb/usx2y/usbusx2y.h b/sound/usb/usx2y/usbusx2y.h index 391fd7b4ed5ef..6a76d04bf1c7d 100644 --- a/sound/usb/usx2y/usbusx2y.h +++ b/sound/usb/usx2y/usbusx2y.h @@ -7,6 +7,32 @@ #define NRURBS 2 +/* Default value used for nr of packs per urb. + * 1 to 4 have been tested ok on uhci. + * To use 3 on ohci, you'd need a patch: + * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on + * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425" + * + * 1, 2 and 4 work out of the box on ohci, if I recall correctly. + * Bigger is safer operation, smaller gives lower latencies. + */ +#define USX2Y_NRPACKS 4 + +#define USX2Y_NRPACKS_MAX 1024 + +/* If your system works ok with this module's parameter + * nrpacks set to 1, you might as well comment + * this define out, and thereby produce smaller, faster code. + * You'd also set USX2Y_NRPACKS to 1 then. + */ +#define USX2Y_NRPACKS_VARIABLE 1 + +#ifdef USX2Y_NRPACKS_VARIABLE +extern int nrpacks; +#define nr_of_packs() nrpacks +#else +#define nr_of_packs() USX2Y_NRPACKS +#endif #define URBS_ASYNC_SEQ 10 #define URB_DATA_LEN_ASYNC_SEQ 32 diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c index f540f46a0b143..acca8bead82e5 100644 --- a/sound/usb/usx2y/usbusx2yaudio.c +++ b/sound/usb/usx2y/usbusx2yaudio.c @@ -28,33 +28,6 @@ #include "usx2y.h" #include "usbusx2y.h" -/* Default value used for nr of packs per urb. - * 1 to 4 have been tested ok on uhci. - * To use 3 on ohci, you'd need a patch: - * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on - * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425" - * - * 1, 2 and 4 work out of the box on ohci, if I recall correctly. - * Bigger is safer operation, smaller gives lower latencies. - */ -#define USX2Y_NRPACKS 4 - -/* If your system works ok with this module's parameter - * nrpacks set to 1, you might as well comment - * this define out, and thereby produce smaller, faster code. - * You'd also set USX2Y_NRPACKS to 1 then. - */ -#define USX2Y_NRPACKS_VARIABLE 1 - -#ifdef USX2Y_NRPACKS_VARIABLE -static int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */ -#define nr_of_packs() nrpacks -module_param(nrpacks, int, 0444); -MODULE_PARM_DESC(nrpacks, "Number of packets per URB."); -#else -#define nr_of_packs() USX2Y_NRPACKS -#endif - static int usx2y_urb_capt_retire(struct snd_usx2y_substream *subs) { struct urb *urb = subs->completed_urb; diff --git a/tools/arch/arm64/tools/Makefile b/tools/arch/arm64/tools/Makefile index 7b42feedf6471..de4f1b66ef014 100644 --- a/tools/arch/arm64/tools/Makefile +++ b/tools/arch/arm64/tools/Makefile @@ -13,12 +13,6 @@ AWK ?= awk MKDIR ?= mkdir RM ?= rm -ifeq ($(V),1) -Q = -else -Q = @ -endif - arm64_tools_dir = $(top_srcdir)/arch/arm64/tools arm64_sysreg_tbl = $(arm64_tools_dir)/sysreg arm64_gen_sysreg = $(arm64_tools_dir)/gen-sysreg.awk diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index 243b79f2b451e..062bbd6cd048e 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile @@ -27,12 +27,6 @@ srctree := $(patsubst %/,%,$(dir $(CURDIR))) srctree := $(patsubst %/,%,$(dir $(srctree))) endif -ifeq ($(V),1) - Q = -else - Q = @ -endif - FEATURE_USER = .bpf FEATURE_TESTS = libbfd disassembler-four-args disassembler-init-styled FEATURE_DISPLAY = libbfd diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile index 4315652678b9f..bf843f328812e 100644 --- a/tools/bpf/bpftool/Documentation/Makefile +++ b/tools/bpf/bpftool/Documentation/Makefile @@ -5,12 +5,6 @@ INSTALL ?= install RM ?= rm -f RMDIR ?= rmdir --ignore-fail-on-non-empty -ifeq ($(V),1) - Q = -else - Q = @ -endif - prefix ?= /usr/local mandir ?= $(prefix)/man man8dir = $(mandir)/man8 diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index efe867dd28215..9e9a5f006cd2a 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -7,12 +7,6 @@ srctree := $(patsubst %/,%,$(dir $(srctree))) srctree := $(patsubst %/,%,$(dir $(srctree))) endif -ifeq ($(V),1) - Q = -else - Q = @ -endif - BPF_DIR = $(srctree)/tools/lib/bpf ifneq ($(OUTPUT),) diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile index 4b8079f294f65..afbddea3a39c6 100644 --- a/tools/bpf/resolve_btfids/Makefile +++ b/tools/bpf/resolve_btfids/Makefile @@ -5,10 +5,8 @@ include ../../scripts/Makefile.arch srctree := $(abspath $(CURDIR)/../../../) ifeq ($(V),1) - Q = msg = else - Q = @ ifeq ($(silent),1) msg = else diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile index 5613b5736d938..78a436c4072e3 100644 --- a/tools/bpf/runqslower/Makefile +++ b/tools/bpf/runqslower/Makefile @@ -27,10 +27,7 @@ VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \ VMLINUX_BTF_PATH := $(or $(VMLINUX_BTF),$(firstword \ $(wildcard $(VMLINUX_BTF_PATHS)))) -ifeq ($(V),1) -Q = -else -Q = @ +ifneq ($(V),1) MAKEFLAGS += --no-print-directory submake_extras := feature_display=0 endif diff --git a/tools/build/Makefile b/tools/build/Makefile index 18ad131f6ea74..63ef218787616 100644 --- a/tools/build/Makefile +++ b/tools/build/Makefile @@ -17,13 +17,7 @@ $(call allow-override,LD,$(CROSS_COMPILE)ld) export HOSTCC HOSTLD HOSTAR -ifeq ($(V),1) - Q = -else - Q = @ -endif - -export Q srctree CC LD +export srctree CC LD MAKEFLAGS := --no-print-directory build := -f $(srctree)/tools/build/Makefile.build dir=. obj diff --git a/tools/include/uapi/asm-generic/socket.h b/tools/include/uapi/asm-generic/socket.h index ffff554a52300..aa5016ff3d911 100644 --- a/tools/include/uapi/asm-generic/socket.h +++ b/tools/include/uapi/asm-generic/socket.h @@ -119,14 +119,31 @@ #define SO_DETACH_REUSEPORT_BPF 68 +#define SO_PREFER_BUSY_POLL 69 +#define SO_BUSY_POLL_BUDGET 70 + +#define SO_NETNS_COOKIE 71 + +#define SO_BUF_LOCK 72 + +#define SO_RESERVE_MEM 73 + +#define SO_TXREHASH 74 + #define SO_RCVMARK 75 #define SO_PASSPIDFD 76 #define SO_PEERPIDFD 77 -#define SCM_TS_OPT_ID 78 +#define SO_DEVMEM_LINEAR 78 +#define SCM_DEVMEM_LINEAR SO_DEVMEM_LINEAR +#define SO_DEVMEM_DMABUF 79 +#define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF +#define SO_DEVMEM_DONTNEED 80 + +#define SCM_TS_OPT_ID 81 -#define SO_RCVPRIORITY 79 +#define SO_RCVPRIORITY 82 #if !defined(__KERNEL__) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 661de24449654..28705ae677849 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -6921,6 +6921,12 @@ enum { BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F, }; +enum { + SK_BPF_CB_TX_TIMESTAMPING = 1<<0, + SK_BPF_CB_MASK = (SK_BPF_CB_TX_TIMESTAMPING - 1) | + SK_BPF_CB_TX_TIMESTAMPING +}; + /* List of known BPF sock_ops operators. * New entries can only be added at the end */ @@ -7033,6 +7039,29 @@ enum { * by the kernel or the * earlier bpf-progs. */ + BPF_SOCK_OPS_TSTAMP_SCHED_CB, /* Called when skb is passing + * through dev layer when + * SK_BPF_CB_TX_TIMESTAMPING + * feature is on. + */ + BPF_SOCK_OPS_TSTAMP_SND_SW_CB, /* Called when skb is about to send + * to the nic when SK_BPF_CB_TX_TIMESTAMPING + * feature is on. + */ + BPF_SOCK_OPS_TSTAMP_SND_HW_CB, /* Called in hardware phase when + * SK_BPF_CB_TX_TIMESTAMPING feature + * is on. + */ + BPF_SOCK_OPS_TSTAMP_ACK_CB, /* Called when all the skbs in the + * same sendmsg call are acked + * when SK_BPF_CB_TX_TIMESTAMPING + * feature is on. + */ + BPF_SOCK_OPS_TSTAMP_SENDMSG_CB, /* Called when every sendmsg syscall + * is triggered. It's used to correlate + * sendmsg timestamp with corresponding + * tskey. + */ }; /* List of TCP states. There is a build check in net/ipv4/tcp.c to detect @@ -7099,6 +7128,7 @@ enum { TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */ TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */ TCP_BPF_SOCK_OPS_CB_FLAGS = 1008, /* Get or Set TCP sock ops flags */ + SK_BPF_CB_FLAGS = 1009, /* Get or set sock ops flags in socket */ }; enum { diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h index 42ec5ddaab8dc..42869770776ec 100644 --- a/tools/include/uapi/linux/if_xdp.h +++ b/tools/include/uapi/linux/if_xdp.h @@ -127,6 +127,12 @@ struct xdp_options { */ #define XDP_TXMD_FLAGS_CHECKSUM (1 << 1) +/* Request launch time hardware offload. The device will schedule the packet for + * transmission at a pre-determined time called launch time. The value of + * launch time is communicated via launch_time field of struct xsk_tx_metadata. + */ +#define XDP_TXMD_FLAGS_LAUNCH_TIME (1 << 2) + /* AF_XDP offloads request. 'request' union member is consumed by the driver * when the packet is being transmitted. 'completion' union member is * filled by the driver when the transmit completion arrives. @@ -142,6 +148,10 @@ struct xsk_tx_metadata { __u16 csum_start; /* Offset from csum_start where checksum should be stored. */ __u16 csum_offset; + + /* XDP_TXMD_FLAGS_LAUNCH_TIME */ + /* Launch time in nanosecond against the PTP HW Clock */ + __u64 launch_time; } request; struct { diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h index e4be227d3ad64..b9b59d60957ff 100644 --- a/tools/include/uapi/linux/netdev.h +++ b/tools/include/uapi/linux/netdev.h @@ -59,10 +59,13 @@ enum netdev_xdp_rx_metadata { * by the driver. * @NETDEV_XSK_FLAGS_TX_CHECKSUM: L3 checksum HW offload is supported by the * driver. + * @NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO: Launch time HW offload is supported + * by the driver. */ enum netdev_xsk_flags { NETDEV_XSK_FLAGS_TX_TIMESTAMP = 1, NETDEV_XSK_FLAGS_TX_CHECKSUM = 2, + NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO = 4, }; enum netdev_queue_type { @@ -74,6 +77,12 @@ enum netdev_qstats_scope { NETDEV_QSTATS_SCOPE_QUEUE = 1, }; +enum netdev_napi_threaded { + NETDEV_NAPI_THREADED_DISABLE, + NETDEV_NAPI_THREADED_ENABLE, + NETDEV_NAPI_THREADED_BUSY_POLL_ENABLE, +}; + enum { NETDEV_A_DEV_IFINDEX = 1, NETDEV_A_DEV_PAD, @@ -86,6 +95,11 @@ enum { NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1) }; +enum { + __NETDEV_A_IO_URING_PROVIDER_INFO_MAX, + NETDEV_A_IO_URING_PROVIDER_INFO_MAX = (__NETDEV_A_IO_URING_PROVIDER_INFO_MAX - 1) +}; + enum { NETDEV_A_PAGE_POOL_ID = 1, NETDEV_A_PAGE_POOL_IFINDEX, @@ -94,6 +108,7 @@ enum { NETDEV_A_PAGE_POOL_INFLIGHT_MEM, NETDEV_A_PAGE_POOL_DETACH_TIME, NETDEV_A_PAGE_POOL_DMABUF, + NETDEV_A_PAGE_POOL_IO_URING, __NETDEV_A_PAGE_POOL_MAX, NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1) @@ -125,17 +140,25 @@ enum { NETDEV_A_NAPI_DEFER_HARD_IRQS, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, + NETDEV_A_NAPI_THREADED, __NETDEV_A_NAPI_MAX, NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1) }; +enum { + __NETDEV_A_XSK_INFO_MAX, + NETDEV_A_XSK_INFO_MAX = (__NETDEV_A_XSK_INFO_MAX - 1) +}; + enum { NETDEV_A_QUEUE_ID = 1, NETDEV_A_QUEUE_IFINDEX, NETDEV_A_QUEUE_TYPE, NETDEV_A_QUEUE_NAPI_ID, NETDEV_A_QUEUE_DMABUF, + NETDEV_A_QUEUE_IO_URING, + NETDEV_A_QUEUE_XSK, __NETDEV_A_QUEUE_MAX, NETDEV_A_QUEUE_MAX = (__NETDEV_A_QUEUE_MAX - 1) diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 857a5f7b413d6..168140f8e6461 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -53,13 +53,6 @@ include $(srctree)/tools/scripts/Makefile.include # copy a bit from Linux kbuild -ifeq ("$(origin V)", "command line") - VERBOSE = $(V) -endif -ifndef VERBOSE - VERBOSE = 0 -endif - INCLUDES = -I$(or $(OUTPUT),.) \ -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi \ -I$(srctree)/tools/arch/$(SRCARCH)/include @@ -96,12 +89,6 @@ override CFLAGS += $(CLANG_CROSS_FLAGS) # flags specific for shared library SHLIB_FLAGS := -DSHARED -fPIC -ifeq ($(VERBOSE),1) - Q = -else - Q = @ -endif - # Disable command line variables (CFLAGS) override from top # level Makefile (perf), otherwise build Makefile will get # the same command line setup. diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile index 3a9b2140aa048..e9a7ac2c062e2 100644 --- a/tools/lib/perf/Makefile +++ b/tools/lib/perf/Makefile @@ -39,19 +39,6 @@ libdir = $(prefix)/$(libdir_relative) libdir_SQ = $(subst ','\'',$(libdir)) libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) -ifeq ("$(origin V)", "command line") - VERBOSE = $(V) -endif -ifndef VERBOSE - VERBOSE = 0 -endif - -ifeq ($(VERBOSE),1) - Q = -else - Q = @ -endif - TEST_ARGS := $(if $(V),-v) # Set compile option CFLAGS diff --git a/tools/lib/thermal/Makefile b/tools/lib/thermal/Makefile index 8890fd57b110c..a1f5e388644d3 100644 --- a/tools/lib/thermal/Makefile +++ b/tools/lib/thermal/Makefile @@ -39,19 +39,6 @@ libdir = $(prefix)/$(libdir_relative) libdir_SQ = $(subst ','\'',$(libdir)) libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) -ifeq ("$(origin V)", "command line") - VERBOSE = $(V) -endif -ifndef VERBOSE - VERBOSE = 0 -endif - -ifeq ($(VERBOSE),1) - Q = -else - Q = @ -endif - # Set compile option CFLAGS ifdef EXTRA_CFLAGS CFLAGS := $(EXTRA_CFLAGS) diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps index 0712b5e82eb79..f3269ce39e5b7 100644 --- a/tools/net/ynl/Makefile.deps +++ b/tools/net/ynl/Makefile.deps @@ -17,10 +17,13 @@ get_hdr_inc=-D$(1) -include $(UAPI_PATH)/linux/$(2) CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h) CFLAGS_dpll:=$(call get_hdr_inc,_LINUX_DPLL_H,dpll.h) CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_H,ethtool.h) \ - $(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h) + $(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h) \ + $(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_GENERATED_H,ethtool_netlink_generated.h) CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h) CFLAGS_mptcp_pm:=$(call get_hdr_inc,_LINUX_MPTCP_PM_H,mptcp_pm.h) +CFLAGS_net_shaper:=$(call get_hdr_inc,_LINUX_NET_SHAPER_H,net_shaper.h) CFLAGS_netdev:=$(call get_hdr_inc,_LINUX_NETDEV_H,netdev.h) +CFLAGS_nl80211:=$(call get_hdr_inc,__LINUX_NL802121_H,nl80211.h) CFLAGS_nlctrl:=$(call get_hdr_inc,__LINUX_GENERIC_NETLINK_H,genetlink.h) CFLAGS_nfsd:=$(call get_hdr_inc,_LINUX_NFSD_NETLINK_H,nfsd_netlink.h) CFLAGS_ovs_datapath:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h) diff --git a/tools/net/ynl/pyynl/lib/ynl.py b/tools/net/ynl/pyynl/lib/ynl.py index 08f8bf89cfc20..dcc2c6b298d60 100644 --- a/tools/net/ynl/pyynl/lib/ynl.py +++ b/tools/net/ynl/pyynl/lib/ynl.py @@ -536,9 +536,11 @@ def _get_scalar(self, attr_spec, value): try: return int(value) except (ValueError, TypeError) as e: - if 'enum' not in attr_spec: - raise e - return self._encode_enum(attr_spec, value) + if 'enum' in attr_spec: + return self._encode_enum(attr_spec, value) + if attr_spec.display_hint: + return self._from_string(value, attr_spec) + raise e def _add_attr(self, space, name, value, search_attrs): try: @@ -571,7 +573,10 @@ def _add_attr(self, space, name, value, search_attrs): if isinstance(value, bytes): attr_payload = value elif isinstance(value, str): - attr_payload = bytes.fromhex(value) + if attr.display_hint: + attr_payload = self._from_string(value, attr) + else: + attr_payload = bytes.fromhex(value) elif isinstance(value, dict) and attr.struct_name: attr_payload = self._encode_struct(attr.struct_name, value) else: @@ -627,6 +632,11 @@ def _decode_binary(self, attr, attr_spec): decoded = self._decode_struct(attr.raw, attr_spec.struct_name) elif attr_spec.sub_type: decoded = attr.as_c_array(attr_spec.sub_type) + if 'enum' in attr_spec: + decoded = [ self._decode_enum(x, attr_spec) for x in decoded ] + elif attr_spec.display_hint: + decoded = [ self._formatted_string(x, attr_spec.display_hint) + for x in decoded ] else: decoded = attr.as_bin() if attr_spec.display_hint: @@ -644,15 +654,17 @@ def _decode_array_attr(self, attr, attr_spec): subattrs = self._decode(NlAttrs(item.raw), attr_spec['nested-attributes']) decoded.append({ item.type: subattrs }) elif attr_spec["sub-type"] == 'binary': - subattrs = item.as_bin() + subattr = item.as_bin() if attr_spec.display_hint: - subattrs = self._formatted_string(subattrs, attr_spec.display_hint) - decoded.append(subattrs) + subattr = self._formatted_string(subattr, attr_spec.display_hint) + decoded.append(subattr) elif attr_spec["sub-type"] in NlAttr.type_formats: - subattrs = item.as_scalar(attr_spec['sub-type'], attr_spec.byte_order) - if attr_spec.display_hint: - subattrs = self._formatted_string(subattrs, attr_spec.display_hint) - decoded.append(subattrs) + subattr = item.as_scalar(attr_spec['sub-type'], attr_spec.byte_order) + if 'enum' in attr_spec: + subattr = self._decode_enum(subattr, attr_spec) + elif attr_spec.display_hint: + subattr = self._formatted_string(subattr, attr_spec.display_hint) + decoded.append(subattr) else: raise Exception(f'Unknown {attr_spec["sub-type"]} with name {attr_spec["name"]}') return decoded @@ -899,6 +911,18 @@ def _formatted_string(self, raw, display_hint): formatted = raw return formatted + def _from_string(self, string, attr_spec): + if attr_spec.display_hint in ['ipv4', 'ipv6']: + ip = ipaddress.ip_address(string) + if attr_spec['type'] == 'binary': + raw = ip.packed + else: + raw = int(ip) + else: + raise Exception(f"Display hint '{attr_spec.display_hint}' not implemented" + f" when parsing '{attr_spec['name']}'") + return raw + def handle_ntf(self, decoded): msg = dict() if self.include_raw: diff --git a/tools/net/ynl/pyynl/ynl_gen_c.py b/tools/net/ynl/pyynl/ynl_gen_c.py index c2eabc90dce8c..a1427c5370302 100755 --- a/tools/net/ynl/pyynl/ynl_gen_c.py +++ b/tools/net/ynl/pyynl/ynl_gen_c.py @@ -74,6 +74,8 @@ def __init__(self, family, attr_set, attr, value): self.c_name = c_lower(self.name) if self.c_name in _C_KW: self.c_name += '_' + if self.c_name[0].isdigit(): + self.c_name = '_' + self.c_name # Added by resolve(): self.enum_name = None @@ -100,7 +102,7 @@ def get_limit(self, limit, default=None): if isinstance(value, int): return value if value in self.family.consts: - raise Exception("Resolving family constants not implemented, yet") + return self.family.consts[value]["value"] return limit_to_number(value) def get_limit_str(self, limit, default=None, suffix=''): @@ -110,6 +112,9 @@ def get_limit_str(self, limit, default=None, suffix=''): if isinstance(value, int): return str(value) + suffix if value in self.family.consts: + const = self.family.consts[value] + if const.get('header'): + return c_upper(value) return c_upper(f"{self.family['name']}-{value}") return c_upper(value) @@ -683,7 +688,10 @@ def _complex_member_type(self, ri): raise Exception(f"Sub-type {self.attr['sub-type']} not supported yet") def _attr_typol(self): - return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, ' + if self.attr['sub-type'] in scalars: + return f'.type = YNL_PT_U{c_upper(self.sub_type[1:])}, ' + else: + return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, ' def _attr_get(self, ri, var): local_vars = ['const struct nlattr *attr2;'] @@ -885,7 +893,7 @@ def new_attr(self, elem, value): elif elem['type'] == 'nest': t = TypeNest(self.family, self, elem, value) elif elem['type'] == 'indexed-array' and 'sub-type' in elem: - if elem["sub-type"] == 'nest': + if elem["sub-type"] in ['nest', 'u32']: t = TypeArrayNest(self.family, self, elem, value) else: raise Exception(f'new_attr: unsupported sub-type {elem["sub-type"]}') @@ -1437,7 +1445,7 @@ def ifdef_block(self, config): self._ifdef_block = config_option -scalars = {'u8', 'u16', 'u32', 'u64', 's32', 's64', 'uint', 'sint'} +scalars = {'u8', 'u16', 'u32', 'u64', 's8', 's16', 's32', 's64', 'uint', 'sint'} direction_to_suffix = { 'reply': '_rsp', @@ -1669,6 +1677,9 @@ def _multi_parse(ri, struct, init_lines, local_vars): if aspec["sub-type"] == 'nest': local_vars.append(f'const struct nlattr *attr_{aspec.c_name};') array_nests.add(arg) + elif aspec['sub-type'] in scalars: + local_vars.append(f'const struct nlattr *attr_{aspec.c_name};') + array_nests.add(arg) else: raise Exception(f'Not supported sub-type {aspec["sub-type"]}') if 'multi-attr' in aspec: @@ -1724,11 +1735,17 @@ def _multi_parse(ri, struct, init_lines, local_vars): ri.cw.p(f"dst->{aspec.c_name} = calloc(n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));") ri.cw.p(f"dst->n_{aspec.c_name} = n_{aspec.c_name};") ri.cw.p('i = 0;') - ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;") + if 'nested-attributes' in aspec: + ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;") ri.cw.block_start(line=f"ynl_attr_for_each_nested(attr, attr_{aspec.c_name})") - ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];") - ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr, ynl_attr_type(attr)))") - ri.cw.p('return YNL_PARSE_CB_ERROR;') + if 'nested-attributes' in aspec: + ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];") + ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr, ynl_attr_type(attr)))") + ri.cw.p('return YNL_PARSE_CB_ERROR;') + elif aspec.sub_type in scalars: + ri.cw.p(f"dst->{aspec.c_name}[i] = ynl_attr_get_{aspec.sub_type}(attr);") + else: + raise Exception(f"Nest parsing type not supported in {aspec['name']}") ri.cw.p('i++;') ri.cw.block_end() ri.cw.block_end() @@ -2549,6 +2566,9 @@ def render_uapi(family, cw): defines = [] for const in family['definitions']: + if const.get('header'): + continue + if const['type'] != 'const': cw.writes_defines(defines) defines = [] diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index f56e277275341..7a65948892e56 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -46,12 +46,6 @@ HOST_OVERRIDES := CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" AWK = awk MKDIR = mkdir -ifeq ($(V),1) - Q = -else - Q = @ -endif - BUILD_ORC := n ifeq ($(SRCARCH),x86) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index be18a04893030..ce973d9d8e6d8 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -2472,13 +2472,14 @@ static void mark_rodata(struct objtool_file *file) * * - .rodata: can contain GCC switch tables * - .rodata.: same, if -fdata-sections is being used - * - .rodata..c_jump_table: contains C annotated jump tables + * - .data.rel.ro.c_jump_table: contains C annotated jump tables * * .rodata.str1.* sections are ignored; they don't contain jump tables. */ for_each_sec(file, sec) { - if (!strncmp(sec->name, ".rodata", 7) && - !strstr(sec->name, ".str1.")) { + if ((!strncmp(sec->name, ".rodata", 7) && + !strstr(sec->name, ".str1.")) || + !strncmp(sec->name, ".data.rel.ro", 12)) { sec->rodata = true; found = true; } diff --git a/tools/objtool/include/objtool/special.h b/tools/objtool/include/objtool/special.h index e7ee7ffccefd4..e049679bb17b2 100644 --- a/tools/objtool/include/objtool/special.h +++ b/tools/objtool/include/objtool/special.h @@ -10,7 +10,7 @@ #include #include -#define C_JUMP_TABLE_SECTION ".rodata..c_jump_table" +#define C_JUMP_TABLE_SECTION ".data.rel.ro.c_jump_table" struct special_alt { struct list_head list; diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h index b2174894f9f71..6bb7edda3094d 100644 --- a/tools/objtool/noreturns.h +++ b/tools/objtool/noreturns.h @@ -19,7 +19,7 @@ NORETURN(__x64_sys_exit_group) NORETURN(arch_cpu_idle_dead) NORETURN(bch2_trans_in_restart_error) NORETURN(bch2_trans_restart_error) -NORETURN(bch2_trans_unlocked_error) +NORETURN(bch2_trans_unlocked_or_in_restart_error) NORETURN(cpu_bringup_and_idle) NORETURN(cpu_startup_entry) NORETURN(do_exit) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 55d6ce9ea52fb..05c083bb11220 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -161,47 +161,6 @@ export VPATH SOURCE := $(shell ln -sf $(srctree)/tools/perf $(OUTPUT)/source) endif -# Beautify output -# --------------------------------------------------------------------------- -# -# Most of build commands in Kbuild start with "cmd_". You can optionally define -# "quiet_cmd_*". If defined, the short log is printed. Otherwise, no log from -# that command is printed by default. -# -# e.g.) -# quiet_cmd_depmod = DEPMOD $(MODLIB) -# cmd_depmod = $(srctree)/scripts/depmod.sh $(DEPMOD) $(KERNELRELEASE) -# -# A simple variant is to prefix commands with $(Q) - that's useful -# for commands that shall be hidden in non-verbose mode. -# -# $(Q)$(MAKE) $(build)=scripts/basic -# -# To put more focus on warnings, be less verbose as default -# Use 'make V=1' to see the full commands - -ifeq ($(V),1) - quiet = - Q = -else - quiet=quiet_ - Q=@ -endif - -# If the user is running make -s (silent mode), suppress echoing of commands -# make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS. -ifeq ($(filter 3.%,$(MAKE_VERSION)),) -short-opts := $(firstword -$(MAKEFLAGS)) -else -short-opts := $(filter-out --%,$(MAKEFLAGS)) -endif - -ifneq ($(findstring s,$(short-opts)),) - quiet=silent_ -endif - -export quiet Q - # Do not use make's built-in rules # (this improves performance and avoids hard-to-debug behaviour); MAKEFLAGS += -r diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index 0aa4005017c72..45f4abef70640 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include @@ -136,6 +136,33 @@ else NO_SUBDIR = : endif +# Beautify output +# --------------------------------------------------------------------------- +# +# Most of build commands in Kbuild start with "cmd_". You can optionally define +# "quiet_cmd_*". If defined, the short log is printed. Otherwise, no log from +# that command is printed by default. +# +# e.g.) +# quiet_cmd_depmod = DEPMOD $(MODLIB) +# cmd_depmod = $(srctree)/scripts/depmod.sh $(DEPMOD) $(KERNELRELEASE) +# +# A simple variant is to prefix commands with $(Q) - that's useful +# for commands that shall be hidden in non-verbose mode. +# +# $(Q)$(MAKE) $(build)=scripts/basic +# +# To put more focus on warnings, be less verbose as default +# Use 'make V=1' to see the full commands + +ifeq ($(V),1) + quiet = + Q = +else + quiet = quiet_ + Q = @ +endif + # If the user is running make -s (silent mode), suppress echoing of commands # make-4.0 (and later) keep single letter options in the 1st word of MAKEFLAGS. ifeq ($(filter 3.%,$(MAKE_VERSION)),) @@ -146,8 +173,11 @@ endif ifneq ($(findstring s,$(short-opts)),) silent=1 + quiet=silent_ endif +export quiet Q + # # Define a callable command for descending to a new directory # diff --git a/tools/sound/dapm-graph b/tools/sound/dapm-graph index f14bdfedee8f1..b6196ee5065a4 100755 --- a/tools/sound/dapm-graph +++ b/tools/sound/dapm-graph @@ -10,7 +10,7 @@ set -eu STYLE_COMPONENT_ON="color=dodgerblue;style=bold" STYLE_COMPONENT_OFF="color=gray40;style=filled;fillcolor=gray90" -STYLE_NODE_ON="shape=box,style=bold,color=green4" +STYLE_NODE_ON="shape=box,style=bold,color=green4,fillcolor=white" STYLE_NODE_OFF="shape=box,style=filled,color=gray30,fillcolor=gray95" # Print usage and exit diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config index b0049be00c706..81f59d41202b8 100644 --- a/tools/testing/kunit/configs/all_tests.config +++ b/tools/testing/kunit/configs/all_tests.config @@ -35,17 +35,8 @@ CONFIG_MAC80211=y CONFIG_WLAN_VENDOR_INTEL=y CONFIG_IWLWIFI=y -CONFIG_DAMON=y -CONFIG_DAMON_VADDR=y -CONFIG_DAMON_PADDR=y - CONFIG_REGMAP_BUILD=y CONFIG_SECURITY=y CONFIG_SECURITY_APPARMOR=y CONFIG_SECURITY_LANDLOCK=y - -CONFIG_SOUND=y -CONFIG_SND=y -CONFIG_SND_SOC=y -CONFIG_SND_SOC_TOPOLOGY_BUILD=y diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 8daac70c2f9d2..042be9a42b2e2 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -69,6 +69,7 @@ TARGETS += net/hsr TARGETS += net/mptcp TARGETS += net/netfilter TARGETS += net/openvswitch +TARGETS += net/ovpn TARGETS += net/packetdrill TARGETS += net/rds TARGETS += net/tcp_ao diff --git a/tools/testing/selftests/bpf/Makefile.docs b/tools/testing/selftests/bpf/Makefile.docs index eb6a4fea8c794..f7f9e7088bb38 100644 --- a/tools/testing/selftests/bpf/Makefile.docs +++ b/tools/testing/selftests/bpf/Makefile.docs @@ -7,12 +7,6 @@ INSTALL ?= install RM ?= rm -f RMDIR ?= rmdir --ignore-fail-on-non-empty -ifeq ($(V),1) - Q = -else - Q = @ -endif - prefix ?= /usr/local mandir ?= $(prefix)/man man2dir = $(mandir)/man2 diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 97fb7caf95f45..72b5c174ab3bb 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -565,6 +565,34 @@ void close_netns(struct nstoken *token) free(token); } +int open_tuntap(const char *dev_name, bool need_mac) +{ + int err = 0; + struct ifreq ifr; + int fd = open("/dev/net/tun", O_RDWR); + + if (!ASSERT_GE(fd, 0, "open(/dev/net/tun)")) + return -1; + + ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN); + strncpy(ifr.ifr_name, dev_name, IFNAMSIZ - 1); + ifr.ifr_name[IFNAMSIZ - 1] = '\0'; + + err = ioctl(fd, TUNSETIFF, &ifr); + if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) { + close(fd); + return -1; + } + + err = fcntl(fd, F_SETFL, O_NONBLOCK); + if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) { + close(fd); + return -1; + } + + return fd; +} + int get_socket_local_port(int sock_fd) { struct sockaddr_storage addr; diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index a4e20c12c57ef..ef208eefd571a 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -8,6 +8,7 @@ typedef __u16 __sum16; #include #include +#include #include #include #include @@ -86,6 +87,8 @@ int get_socket_local_port(int sock_fd); int get_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param); int set_hw_ring_size(char *ifname, struct ethtool_ringparam *ring_param); +int open_tuntap(const char *dev_name, bool need_mac); + struct nstoken; /** * open_netns() - Switch to specified network namespace by name. diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h index fb1eb8c673617..ccec0fcdabc11 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h @@ -5,7 +5,6 @@ #include #include -#include #include #include "test_progs.h" @@ -37,34 +36,6 @@ static inline int netns_delete(void) return system("ip netns del " NETNS ">/dev/null 2>&1"); } -static int open_tuntap(const char *dev_name, bool need_mac) -{ - int err = 0; - struct ifreq ifr; - int fd = open("/dev/net/tun", O_RDWR); - - if (!ASSERT_GT(fd, 0, "open(/dev/net/tun)")) - return -1; - - ifr.ifr_flags = IFF_NO_PI | (need_mac ? IFF_TAP : IFF_TUN); - strncpy(ifr.ifr_name, dev_name, IFNAMSIZ - 1); - ifr.ifr_name[IFNAMSIZ - 1] = '\0'; - - err = ioctl(fd, TUNSETIFF, &ifr); - if (!ASSERT_OK(err, "ioctl(TUNSETIFF)")) { - close(fd); - return -1; - } - - err = fcntl(fd, F_SETFL, O_NONBLOCK); - if (!ASSERT_OK(err, "fcntl(O_NONBLOCK)")) { - close(fd); - return -1; - } - - return fd; -} - #define ICMP_PAYLOAD_SIZE 100 /* Match an ICMP packet with payload len ICMP_PAYLOAD_SIZE */ diff --git a/tools/testing/selftests/bpf/prog_tests/net_timestamping.c b/tools/testing/selftests/bpf/prog_tests/net_timestamping.c new file mode 100644 index 0000000000000..dbfd87499b6b5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/net_timestamping.c @@ -0,0 +1,239 @@ +#include +#include +#include +#include "test_progs.h" +#include "network_helpers.h" +#include "net_timestamping.skel.h" + +#define CG_NAME "/net-timestamping-test" +#define NSEC_PER_SEC 1000000000LL + +static const char addr4_str[] = "127.0.0.1"; +static const char addr6_str[] = "::1"; +static struct net_timestamping *skel; +static const int cfg_payload_len = 30; +static struct timespec usr_ts; +static u64 delay_tolerance_nsec = 10000000000; /* 10 seconds */ +int SK_TS_SCHED; +int SK_TS_TXSW; +int SK_TS_ACK; + +static int64_t timespec_to_ns64(struct timespec *ts) +{ + return ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec; +} + +static void validate_key(int tskey, int tstype) +{ + static int expected_tskey = -1; + + if (tstype == SCM_TSTAMP_SCHED) + expected_tskey = cfg_payload_len - 1; + + ASSERT_EQ(expected_tskey, tskey, "tskey mismatch"); + + expected_tskey = tskey; +} + +static void validate_timestamp(struct timespec *cur, struct timespec *prev) +{ + int64_t cur_ns, prev_ns; + + cur_ns = timespec_to_ns64(cur); + prev_ns = timespec_to_ns64(prev); + + ASSERT_LT(cur_ns - prev_ns, delay_tolerance_nsec, "latency"); +} + +static void test_socket_timestamp(struct scm_timestamping *tss, int tstype, + int tskey) +{ + static struct timespec prev_ts; + + validate_key(tskey, tstype); + + switch (tstype) { + case SCM_TSTAMP_SCHED: + validate_timestamp(&tss->ts[0], &usr_ts); + SK_TS_SCHED += 1; + break; + case SCM_TSTAMP_SND: + validate_timestamp(&tss->ts[0], &prev_ts); + SK_TS_TXSW += 1; + break; + case SCM_TSTAMP_ACK: + validate_timestamp(&tss->ts[0], &prev_ts); + SK_TS_ACK += 1; + break; + } + + prev_ts = tss->ts[0]; +} + +static void test_recv_errmsg_cmsg(struct msghdr *msg) +{ + struct sock_extended_err *serr = NULL; + struct scm_timestamping *tss = NULL; + struct cmsghdr *cm; + + for (cm = CMSG_FIRSTHDR(msg); + cm && cm->cmsg_len; + cm = CMSG_NXTHDR(msg, cm)) { + if (cm->cmsg_level == SOL_SOCKET && + cm->cmsg_type == SCM_TIMESTAMPING) { + tss = (void *)CMSG_DATA(cm); + } else if ((cm->cmsg_level == SOL_IP && + cm->cmsg_type == IP_RECVERR) || + (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_RECVERR) || + (cm->cmsg_level == SOL_PACKET && + cm->cmsg_type == PACKET_TX_TIMESTAMP)) { + serr = (void *)CMSG_DATA(cm); + ASSERT_EQ(serr->ee_origin, SO_EE_ORIGIN_TIMESTAMPING, + "cmsg type"); + } + + if (serr && tss) + test_socket_timestamp(tss, serr->ee_info, + serr->ee_data); + } +} + +static bool socket_recv_errmsg(int fd) +{ + static char ctrl[1024 /* overprovision*/]; + char data[cfg_payload_len]; + static struct msghdr msg; + struct iovec entry; + int n = 0; + + memset(&msg, 0, sizeof(msg)); + memset(&entry, 0, sizeof(entry)); + memset(ctrl, 0, sizeof(ctrl)); + + entry.iov_base = data; + entry.iov_len = cfg_payload_len; + msg.msg_iov = &entry; + msg.msg_iovlen = 1; + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_control = ctrl; + msg.msg_controllen = sizeof(ctrl); + + n = recvmsg(fd, &msg, MSG_ERRQUEUE); + if (n == -1) + ASSERT_EQ(errno, EAGAIN, "recvmsg MSG_ERRQUEUE"); + + if (n >= 0) + test_recv_errmsg_cmsg(&msg); + + return n == -1; +} + +static void test_socket_timestamping(int fd) +{ + while (!socket_recv_errmsg(fd)); + + ASSERT_EQ(SK_TS_SCHED, 1, "SCM_TSTAMP_SCHED"); + ASSERT_EQ(SK_TS_TXSW, 1, "SCM_TSTAMP_SND"); + ASSERT_EQ(SK_TS_ACK, 1, "SCM_TSTAMP_ACK"); + + SK_TS_SCHED = 0; + SK_TS_TXSW = 0; + SK_TS_ACK = 0; +} + +static void test_tcp(int family, bool enable_socket_timestamping) +{ + struct net_timestamping__bss *bss; + char buf[cfg_payload_len]; + int sfd = -1, cfd = -1; + unsigned int sock_opt; + struct netns_obj *ns; + int cg_fd; + int ret; + + cg_fd = test__join_cgroup(CG_NAME); + if (!ASSERT_OK_FD(cg_fd, "join cgroup")) + return; + + ns = netns_new("net_timestamping_ns", true); + if (!ASSERT_OK_PTR(ns, "create ns")) + goto out; + + skel = net_timestamping__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open and load skel")) + goto out; + + if (!ASSERT_OK(net_timestamping__attach(skel), "attach skel")) + goto out; + + skel->links.skops_sockopt = + bpf_program__attach_cgroup(skel->progs.skops_sockopt, cg_fd); + if (!ASSERT_OK_PTR(skel->links.skops_sockopt, "attach cgroup")) + goto out; + + bss = skel->bss; + memset(bss, 0, sizeof(*bss)); + + skel->bss->monitored_pid = getpid(); + + sfd = start_server(family, SOCK_STREAM, + family == AF_INET6 ? addr6_str : addr4_str, 0, 0); + if (!ASSERT_OK_FD(sfd, "start_server")) + goto out; + + cfd = connect_to_fd(sfd, 0); + if (!ASSERT_OK_FD(cfd, "connect_to_fd_server")) + goto out; + + if (enable_socket_timestamping) { + sock_opt = SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_OPT_ID | + SOF_TIMESTAMPING_TX_SCHED | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_ACK; + ret = setsockopt(cfd, SOL_SOCKET, SO_TIMESTAMPING, + (char *) &sock_opt, sizeof(sock_opt)); + if (!ASSERT_OK(ret, "setsockopt SO_TIMESTAMPING")) + goto out; + + ret = clock_gettime(CLOCK_REALTIME, &usr_ts); + if (!ASSERT_OK(ret, "get user time")) + goto out; + } + + ret = write(cfd, buf, sizeof(buf)); + if (!ASSERT_EQ(ret, sizeof(buf), "send to server")) + goto out; + + if (enable_socket_timestamping) + test_socket_timestamping(cfd); + + ASSERT_EQ(bss->nr_active, 1, "nr_active"); + ASSERT_EQ(bss->nr_snd, 2, "nr_snd"); + ASSERT_EQ(bss->nr_sched, 1, "nr_sched"); + ASSERT_EQ(bss->nr_txsw, 1, "nr_txsw"); + ASSERT_EQ(bss->nr_ack, 1, "nr_ack"); + +out: + if (sfd >= 0) + close(sfd); + if (cfd >= 0) + close(cfd); + net_timestamping__destroy(skel); + netns_free(ns); + close(cg_fd); +} + +void test_net_timestamping(void) +{ + if (test__start_subtest("INET4: bpf timestamping")) + test_tcp(AF_INET, false); + if (test__start_subtest("INET4: bpf and socket timestamping")) + test_tcp(AF_INET, true); + if (test__start_subtest("INET6: bpf timestamping")) + test_tcp(AF_INET6, false); + if (test__start_subtest("INET6: bpf and socket timestamping")) + test_tcp(AF_INET6, true); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c index 937da9b7532a5..b9d9f0a502cea 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c @@ -4,12 +4,20 @@ #include "test_xdp_context_test_run.skel.h" #include "test_xdp_meta.skel.h" -#define TX_ADDR "10.0.0.1" -#define RX_ADDR "10.0.0.2" #define RX_NAME "veth0" #define TX_NAME "veth1" #define TX_NETNS "xdp_context_tx" #define RX_NETNS "xdp_context_rx" +#define TAP_NAME "tap0" +#define TAP_NETNS "xdp_context_tuntap" + +#define TEST_PAYLOAD_LEN 32 +static const __u8 test_payload[TEST_PAYLOAD_LEN] = { + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, +}; void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts, __u32 data_meta, __u32 data, __u32 data_end, @@ -112,7 +120,59 @@ void test_xdp_context_test_run(void) test_xdp_context_test_run__destroy(skel); } -void test_xdp_context_functional(void) +static int send_test_packet(int ifindex) +{ + int n, sock = -1; + __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN]; + + /* The ethernet header is not relevant for this test and doesn't need to + * be meaningful. + */ + struct ethhdr eth = { 0 }; + + memcpy(packet, ð, sizeof(eth)); + memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN); + + sock = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW); + if (!ASSERT_GE(sock, 0, "socket")) + goto err; + + struct sockaddr_ll saddr = { + .sll_family = PF_PACKET, + .sll_ifindex = ifindex, + .sll_halen = ETH_ALEN + }; + n = sendto(sock, packet, sizeof(packet), 0, (struct sockaddr *)&saddr, + sizeof(saddr)); + if (!ASSERT_EQ(n, sizeof(packet), "sendto")) + goto err; + + close(sock); + return 0; + +err: + if (sock >= 0) + close(sock); + return -1; +} + +static void assert_test_result(struct test_xdp_meta *skel) +{ + int err; + __u32 map_key = 0; + __u8 map_value[TEST_PAYLOAD_LEN]; + + err = bpf_map__lookup_elem(skel->maps.test_result, &map_key, + sizeof(map_key), &map_value, + TEST_PAYLOAD_LEN, BPF_ANY); + if (!ASSERT_OK(err, "lookup test_result")) + return; + + ASSERT_MEMEQ(&map_value, &test_payload, TEST_PAYLOAD_LEN, + "test_result map contains test payload"); +} + +void test_xdp_context_veth(void) { LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS); LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); @@ -120,7 +180,7 @@ void test_xdp_context_functional(void) struct bpf_program *tc_prog, *xdp_prog; struct test_xdp_meta *skel = NULL; struct nstoken *nstoken = NULL; - int rx_ifindex; + int rx_ifindex, tx_ifindex; int ret; tx_ns = netns_new(TX_NETNS, false); @@ -138,7 +198,6 @@ void test_xdp_context_functional(void) if (!ASSERT_OK_PTR(nstoken, "setns rx_ns")) goto close; - SYS(close, "ip addr add " RX_ADDR "/24 dev " RX_NAME); SYS(close, "ip link set dev " RX_NAME " up"); skel = test_xdp_meta__open_and_load(); @@ -179,9 +238,17 @@ void test_xdp_context_functional(void) if (!ASSERT_OK_PTR(nstoken, "setns tx_ns")) goto close; - SYS(close, "ip addr add " TX_ADDR "/24 dev " TX_NAME); SYS(close, "ip link set dev " TX_NAME " up"); - ASSERT_OK(SYS_NOFAIL("ping -c 1 " RX_ADDR), "ping"); + + tx_ifindex = if_nametoindex(TX_NAME); + if (!ASSERT_GE(tx_ifindex, 0, "if_nametoindex tx")) + goto close; + + ret = send_test_packet(tx_ifindex); + if (!ASSERT_OK(ret, "send_test_packet")) + goto close; + + assert_test_result(skel); close: close_netns(nstoken); @@ -190,3 +257,67 @@ void test_xdp_context_functional(void) netns_free(tx_ns); } +void test_xdp_context_tuntap(void) +{ + LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS); + LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1); + struct netns_obj *ns = NULL; + struct test_xdp_meta *skel = NULL; + __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN]; + int tap_fd = -1; + int tap_ifindex; + int ret; + + ns = netns_new(TAP_NETNS, true); + if (!ASSERT_OK_PTR(ns, "create and open ns")) + return; + + tap_fd = open_tuntap(TAP_NAME, true); + if (!ASSERT_GE(tap_fd, 0, "open_tuntap")) + goto close; + + SYS(close, "ip link set dev " TAP_NAME " up"); + + skel = test_xdp_meta__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open and load skeleton")) + goto close; + + tap_ifindex = if_nametoindex(TAP_NAME); + if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex")) + goto close; + + tc_hook.ifindex = tap_ifindex; + ret = bpf_tc_hook_create(&tc_hook); + if (!ASSERT_OK(ret, "bpf_tc_hook_create")) + goto close; + + tc_opts.prog_fd = bpf_program__fd(skel->progs.ing_cls); + ret = bpf_tc_attach(&tc_hook, &tc_opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) + goto close; + + ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(skel->progs.ing_xdp), + 0, NULL); + if (!ASSERT_GE(ret, 0, "bpf_xdp_attach")) + goto close; + + /* The ethernet header is not relevant for this test and doesn't need to + * be meaningful. + */ + struct ethhdr eth = { 0 }; + + memcpy(packet, ð, sizeof(eth)); + memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN); + + ret = write(tap_fd, packet, sizeof(packet)); + if (!ASSERT_EQ(ret, sizeof(packet), "write packet")) + goto close; + + assert_test_result(skel); + +close: + if (tap_fd >= 0) + close(tap_fd); + test_xdp_meta__destroy(skel); + netns_free(ns); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c index d22449c69363a..164640db3a29c 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c @@ -99,10 +99,10 @@ static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp, icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk->icsk_retransmit_timer.expires; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk->icsk_retransmit_timer.expires; } else if (timer_pending(&sp->sk_timer)) { timer_active = 2; timer_expires = sp->sk_timer.expires; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c index 8b072666f9d9a..591c703f5032f 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c @@ -99,10 +99,10 @@ static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp, icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk->icsk_retransmit_timer.expires; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; - timer_expires = icsk->icsk_timeout; + timer_expires = icsk->icsk_retransmit_timer.expires; } else if (timer_pending(&sp->sk_timer)) { timer_active = 2; timer_expires = sp->sk_timer.expires; diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h index bcd44d5018bf7..659694162739e 100644 --- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -53,6 +53,7 @@ #define TCP_SAVED_SYN 28 #define TCP_CA_NAME_MAX 16 #define TCP_NAGLE_OFF 1 +#define TCP_RTO_MAX_MS 44 #define TCP_ECN_OK 1 #define TCP_ECN_QUEUE_CWR 2 diff --git a/tools/testing/selftests/bpf/progs/net_timestamping.c b/tools/testing/selftests/bpf/progs/net_timestamping.c new file mode 100644 index 0000000000000..b4c2f0f2be114 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/net_timestamping.c @@ -0,0 +1,248 @@ +#include "vmlinux.h" +#include "bpf_tracing_net.h" +#include +#include +#include "bpf_misc.h" +#include "bpf_kfuncs.h" +#include + +__u32 monitored_pid = 0; + +int nr_active; +int nr_snd; +int nr_passive; +int nr_sched; +int nr_txsw; +int nr_ack; + +struct sk_stg { + __u64 sendmsg_ns; /* record ts when sendmsg is called */ +}; + +struct sk_tskey { + u64 cookie; + u32 tskey; +}; + +struct delay_info { + u64 sendmsg_ns; /* record ts when sendmsg is called */ + u32 sched_delay; /* SCHED_CB - sendmsg_ns */ + u32 snd_sw_delay; /* SND_SW_CB - SCHED_CB */ + u32 ack_delay; /* ACK_CB - SND_SW_CB */ +}; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct sk_stg); +} sk_stg_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, struct sk_tskey); + __type(value, struct delay_info); + __uint(max_entries, 1024); +} time_map SEC(".maps"); + +static u64 delay_tolerance_nsec = 10000000000; /* 10 second as an example */ + +extern int bpf_sock_ops_enable_tx_tstamp(struct bpf_sock_ops_kern *skops, u64 flags) __ksym; + +static int bpf_test_sockopt(void *ctx, const struct sock *sk, int expected) +{ + int tmp, new = SK_BPF_CB_TX_TIMESTAMPING; + int opt = SK_BPF_CB_FLAGS; + int level = SOL_SOCKET; + + if (bpf_setsockopt(ctx, level, opt, &new, sizeof(new)) != expected) + return 1; + + if (bpf_getsockopt(ctx, level, opt, &tmp, sizeof(tmp)) != expected || + (!expected && tmp != new)) + return 1; + + return 0; +} + +static bool bpf_test_access_sockopt(void *ctx, const struct sock *sk) +{ + if (bpf_test_sockopt(ctx, sk, -EOPNOTSUPP)) + return true; + return false; +} + +static bool bpf_test_access_load_hdr_opt(struct bpf_sock_ops *skops) +{ + u8 opt[3] = {0}; + int load_flags = 0; + int ret; + + ret = bpf_load_hdr_opt(skops, opt, sizeof(opt), load_flags); + if (ret != -EOPNOTSUPP) + return true; + + return false; +} + +static bool bpf_test_access_cb_flags_set(struct bpf_sock_ops *skops) +{ + int ret; + + ret = bpf_sock_ops_cb_flags_set(skops, 0); + if (ret != -EOPNOTSUPP) + return true; + + return false; +} + +/* In the timestamping callbacks, we're not allowed to call the following + * BPF CALLs for the safety concern. Return false if expected. + */ +static bool bpf_test_access_bpf_calls(struct bpf_sock_ops *skops, + const struct sock *sk) +{ + if (bpf_test_access_sockopt(skops, sk)) + return true; + + if (bpf_test_access_load_hdr_opt(skops)) + return true; + + if (bpf_test_access_cb_flags_set(skops)) + return true; + + return false; +} + +static bool bpf_test_delay(struct bpf_sock_ops *skops, const struct sock *sk) +{ + struct bpf_sock_ops_kern *skops_kern; + u64 timestamp = bpf_ktime_get_ns(); + struct skb_shared_info *shinfo; + struct delay_info dinfo = {0}; + struct sk_tskey key = {0}; + struct delay_info *val; + struct sk_buff *skb; + struct sk_stg *stg; + u64 prior_ts, delay; + + if (bpf_test_access_bpf_calls(skops, sk)) + return false; + + skops_kern = bpf_cast_to_kern_ctx(skops); + skb = skops_kern->skb; + shinfo = bpf_core_cast(skb->head + skb->end, struct skb_shared_info); + + key.cookie = bpf_get_socket_cookie(skops); + if (!key.cookie) + return false; + + if (skops->op == BPF_SOCK_OPS_TSTAMP_SENDMSG_CB) { + stg = bpf_sk_storage_get(&sk_stg_map, (void *)sk, 0, 0); + if (!stg) + return false; + dinfo.sendmsg_ns = stg->sendmsg_ns; + bpf_sock_ops_enable_tx_tstamp(skops_kern, 0); + key.tskey = shinfo->tskey; + if (!key.tskey) + return false; + bpf_map_update_elem(&time_map, &key, &dinfo, BPF_ANY); + return true; + } + + key.tskey = shinfo->tskey; + if (!key.tskey) + return false; + + val = bpf_map_lookup_elem(&time_map, &key); + if (!val) + return false; + + switch (skops->op) { + case BPF_SOCK_OPS_TSTAMP_SCHED_CB: + val->sched_delay = timestamp - val->sendmsg_ns; + delay = val->sched_delay; + break; + case BPF_SOCK_OPS_TSTAMP_SND_SW_CB: + prior_ts = val->sched_delay + val->sendmsg_ns; + val->snd_sw_delay = timestamp - prior_ts; + delay = val->snd_sw_delay; + break; + case BPF_SOCK_OPS_TSTAMP_ACK_CB: + prior_ts = val->snd_sw_delay + val->sched_delay + val->sendmsg_ns; + val->ack_delay = timestamp - prior_ts; + delay = val->ack_delay; + break; + } + + if (delay >= delay_tolerance_nsec) + return false; + + /* Since it's the last one, remove from the map after latency check */ + if (skops->op == BPF_SOCK_OPS_TSTAMP_ACK_CB) + bpf_map_delete_elem(&time_map, &key); + + return true; +} + +SEC("fentry/tcp_sendmsg_locked") +int BPF_PROG(trace_tcp_sendmsg_locked, struct sock *sk, struct msghdr *msg, + size_t size) +{ + __u32 pid = bpf_get_current_pid_tgid() >> 32; + u64 timestamp = bpf_ktime_get_ns(); + u32 flag = sk->sk_bpf_cb_flags; + struct sk_stg *stg; + + if (pid != monitored_pid || !flag) + return 0; + + stg = bpf_sk_storage_get(&sk_stg_map, sk, 0, + BPF_SK_STORAGE_GET_F_CREATE); + if (!stg) + return 0; + + stg->sendmsg_ns = timestamp; + nr_snd += 1; + return 0; +} + +SEC("sockops") +int skops_sockopt(struct bpf_sock_ops *skops) +{ + struct bpf_sock *bpf_sk = skops->sk; + const struct sock *sk; + + if (!bpf_sk) + return 1; + + sk = (struct sock *)bpf_skc_to_tcp_sock(bpf_sk); + if (!sk) + return 1; + + switch (skops->op) { + case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: + nr_active += !bpf_test_sockopt(skops, sk, 0); + break; + case BPF_SOCK_OPS_TSTAMP_SENDMSG_CB: + if (bpf_test_delay(skops, sk)) + nr_snd += 1; + break; + case BPF_SOCK_OPS_TSTAMP_SCHED_CB: + if (bpf_test_delay(skops, sk)) + nr_sched += 1; + break; + case BPF_SOCK_OPS_TSTAMP_SND_SW_CB: + if (bpf_test_delay(skops, sk)) + nr_txsw += 1; + break; + case BPF_SOCK_OPS_TSTAMP_ACK_CB: + if (bpf_test_delay(skops, sk)) + nr_ack += 1; + break; + } + + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c index 6dd4318debbfb..0107a24b7522b 100644 --- a/tools/testing/selftests/bpf/progs/setget_sockopt.c +++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c @@ -61,6 +61,9 @@ static const struct sockopt_test sol_tcp_tests[] = { { .opt = TCP_NOTSENT_LOWAT, .new = 1314, .expected = 1314, }, { .opt = TCP_BPF_SOCK_OPS_CB_FLAGS, .new = BPF_SOCK_OPS_ALL_CB_FLAGS, .expected = BPF_SOCK_OPS_ALL_CB_FLAGS, }, + { .opt = TCP_BPF_DELACK_MAX, .new = 30000, .expected = 30000, }, + { .opt = TCP_BPF_RTO_MIN, .new = 30000, .expected = 30000, }, + { .opt = TCP_RTO_MAX_MS, .new = 2000, .expected = 2000, }, { .opt = 0, }, }; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_meta.c b/tools/testing/selftests/bpf/progs/test_xdp_meta.c index fe2d71ae0e717..fcf6ca14f2ea2 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_meta.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_meta.c @@ -4,37 +4,50 @@ #include -#define __round_mask(x, y) ((__typeof__(x))((y) - 1)) -#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1) +#define META_SIZE 32 + #define ctx_ptr(ctx, mem) (void *)(unsigned long)ctx->mem +/* Demonstrates how metadata can be passed from an XDP program to a TC program + * using bpf_xdp_adjust_meta. + * For the sake of testing the metadata support in drivers, the XDP program uses + * a fixed-size payload after the Ethernet header as metadata. The TC program + * copies the metadata it receives into a map so it can be checked from + * userspace. + */ + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __uint(value_size, META_SIZE); +} test_result SEC(".maps"); + SEC("tc") int ing_cls(struct __sk_buff *ctx) { - __u8 *data, *data_meta, *data_end; - __u32 diff = 0; + __u8 *data, *data_meta; + __u32 key = 0; data_meta = ctx_ptr(ctx, data_meta); - data_end = ctx_ptr(ctx, data_end); data = ctx_ptr(ctx, data); - if (data + ETH_ALEN > data_end || - data_meta + round_up(ETH_ALEN, 4) > data) + if (data_meta + META_SIZE > data) return TC_ACT_SHOT; - diff |= ((__u32 *)data_meta)[0] ^ ((__u32 *)data)[0]; - diff |= ((__u16 *)data_meta)[2] ^ ((__u16 *)data)[2]; + bpf_map_update_elem(&test_result, &key, data_meta, BPF_ANY); - return diff ? TC_ACT_SHOT : TC_ACT_OK; + return TC_ACT_SHOT; } SEC("xdp") int ing_xdp(struct xdp_md *ctx) { - __u8 *data, *data_meta, *data_end; + __u8 *data, *data_meta, *data_end, *payload; + struct ethhdr *eth; int ret; - ret = bpf_xdp_adjust_meta(ctx, -round_up(ETH_ALEN, 4)); + ret = bpf_xdp_adjust_meta(ctx, -META_SIZE); if (ret < 0) return XDP_DROP; @@ -42,11 +55,21 @@ int ing_xdp(struct xdp_md *ctx) data_end = ctx_ptr(ctx, data_end); data = ctx_ptr(ctx, data); - if (data + ETH_ALEN > data_end || - data_meta + round_up(ETH_ALEN, 4) > data) + eth = (struct ethhdr *)data; + payload = data + sizeof(struct ethhdr); + + if (payload + META_SIZE > data_end || + data_meta + META_SIZE > data) + return XDP_DROP; + + /* The Linux networking stack may send other packets on the test + * interface that interfere with the test. Just drop them. + * The test packets can be recognized by their ethertype of zero. + */ + if (eth->h_proto != 0) return XDP_DROP; - __builtin_memcpy(data_meta, data, ETH_ALEN); + __builtin_memcpy(data_meta, payload, META_SIZE); return XDP_PASS; } diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c index 6f7b15d6c6ed2..3d8de0d4c96a7 100644 --- a/tools/testing/selftests/bpf/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c @@ -13,6 +13,7 @@ * - UDP 9091 packets trigger TX reply * - TX HW timestamp is requested and reported back upon completion * - TX checksum is requested + * - TX launch time HW offload is requested for transmission */ #include @@ -37,6 +38,15 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "xdp_metadata.h" @@ -64,6 +74,18 @@ int rxq; bool skip_tx; __u64 last_hw_rx_timestamp; __u64 last_xdp_rx_timestamp; +__u64 last_launch_time; +__u64 launch_time_delta_to_hw_rx_timestamp; +int launch_time_queue; + +#define run_command(cmd, ...) \ +({ \ + char command[1024]; \ + memset(command, 0, sizeof(command)); \ + snprintf(command, sizeof(command), cmd, ##__VA_ARGS__); \ + fprintf(stderr, "Running: %s\n", command); \ + system(command); \ +}) void test__fail(void) { /* for network_helpers.c */ } @@ -298,6 +320,12 @@ static bool complete_tx(struct xsk *xsk, clockid_t clock_id) if (meta->completion.tx_timestamp) { __u64 ref_tstamp = gettime(clock_id); + if (launch_time_delta_to_hw_rx_timestamp) { + print_tstamp_delta("HW Launch-time", + "HW TX-complete-time", + last_launch_time, + meta->completion.tx_timestamp); + } print_tstamp_delta("HW TX-complete-time", "User TX-complete-time", meta->completion.tx_timestamp, ref_tstamp); print_tstamp_delta("XDP RX-time", "User TX-complete-time", @@ -395,6 +423,17 @@ static void ping_pong(struct xsk *xsk, void *rx_packet, clockid_t clock_id) xsk, ntohs(udph->check), ntohs(want_csum), meta->request.csum_start, meta->request.csum_offset); + /* Set the value of launch time */ + if (launch_time_delta_to_hw_rx_timestamp) { + meta->flags |= XDP_TXMD_FLAGS_LAUNCH_TIME; + meta->request.launch_time = last_hw_rx_timestamp + + launch_time_delta_to_hw_rx_timestamp; + last_launch_time = meta->request.launch_time; + print_tstamp_delta("HW RX-time", "HW Launch-time", + last_hw_rx_timestamp, + meta->request.launch_time); + } + memcpy(data, rx_packet, len); /* don't share umem chunk for simplicity */ tx_desc->options |= XDP_TX_METADATA; tx_desc->len = len; @@ -407,6 +446,7 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t const struct xdp_desc *rx_desc; struct pollfd fds[rxq + 1]; __u64 comp_addr; + __u64 deadline; __u64 addr; __u32 idx = 0; int ret; @@ -477,9 +517,15 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t if (ret) printf("kick_tx ret=%d\n", ret); - for (int j = 0; j < 500; j++) { + /* wait 1 second + cover launch time */ + deadline = gettime(clock_id) + + NANOSEC_PER_SEC + + launch_time_delta_to_hw_rx_timestamp; + while (true) { if (complete_tx(xsk, clock_id)) break; + if (gettime(clock_id) >= deadline) + break; usleep(10); } } @@ -608,6 +654,10 @@ static void print_usage(void) " -h Display this help and exit\n\n" " -m Enable multi-buffer XDP for larger MTU\n" " -r Don't generate AF_XDP reply (rx metadata only)\n" + " -l Delta of launch time relative to HW RX-time in ns\n" + " default: 0 ns (launch time request is disabled)\n" + " -L Tx Queue to be enabled with launch time offload\n" + " default: 0 (Tx Queue 0)\n" "Generate test packets on the other machine with:\n" " echo -n xdp | nc -u -q1 9091\n"; @@ -618,7 +668,7 @@ static void read_args(int argc, char *argv[]) { int opt; - while ((opt = getopt(argc, argv, "chmr")) != -1) { + while ((opt = getopt(argc, argv, "chmrl:L:")) != -1) { switch (opt) { case 'c': bind_flags &= ~XDP_USE_NEED_WAKEUP; @@ -634,6 +684,12 @@ static void read_args(int argc, char *argv[]) case 'r': skip_tx = true; break; + case 'l': + launch_time_delta_to_hw_rx_timestamp = atoll(optarg); + break; + case 'L': + launch_time_queue = atoll(optarg); + break; case '?': if (isprint(optopt)) fprintf(stderr, "Unknown option: -%c\n", optopt); @@ -657,23 +713,118 @@ static void read_args(int argc, char *argv[]) error(-1, errno, "Invalid interface name"); } +void clean_existing_configurations(void) +{ + /* Check and delete root qdisc if exists */ + if (run_command("sudo tc qdisc show dev %s | grep -q 'qdisc mqprio 8001:'", ifname) == 0) + run_command("sudo tc qdisc del dev %s root", ifname); + + /* Check and delete ingress qdisc if exists */ + if (run_command("sudo tc qdisc show dev %s | grep -q 'qdisc ingress ffff:'", ifname) == 0) + run_command("sudo tc qdisc del dev %s ingress", ifname); + + /* Check and delete ethtool filters if any exist */ + if (run_command("sudo ethtool -n %s | grep -q 'Filter:'", ifname) == 0) { + run_command("sudo ethtool -n %s | grep 'Filter:' | awk '{print $2}' | xargs -n1 sudo ethtool -N %s delete >&2", + ifname, ifname); + } +} + +#define MAX_TC 16 + int main(int argc, char *argv[]) { clockid_t clock_id = CLOCK_TAI; + struct bpf_program *prog; int server_fd = -1; + size_t map_len = 0; + size_t que_len = 0; + char *buf = NULL; + char *map = NULL; + char *que = NULL; + char *tmp = NULL; + int tc = 0; int ret; int i; - struct bpf_program *prog; - read_args(argc, argv); rxq = rxq_num(ifname); - printf("rxq: %d\n", rxq); + if (launch_time_queue >= rxq || launch_time_queue < 0) + error(1, 0, "Invalid launch_time_queue."); + + clean_existing_configurations(); + sleep(1); + + /* Enable tx and rx hardware timestamping */ hwtstamp_enable(ifname); + /* Prepare priority to traffic class map for tc-mqprio */ + for (i = 0; i < MAX_TC; i++) { + if (i < rxq) + tc = i; + + if (asprintf(&buf, "%d ", tc) == -1) { + printf("Failed to malloc buf for tc map.\n"); + goto free_mem; + } + + map_len += strlen(buf); + tmp = realloc(map, map_len + 1); + if (!tmp) { + printf("Failed to realloc tc map.\n"); + goto free_mem; + } + map = tmp; + strcat(map, buf); + free(buf); + buf = NULL; + } + + /* Prepare traffic class to hardware queue map for tc-mqprio */ + for (i = 0; i <= tc; i++) { + if (asprintf(&buf, "1@%d ", i) == -1) { + printf("Failed to malloc buf for tc queues.\n"); + goto free_mem; + } + + que_len += strlen(buf); + tmp = realloc(que, que_len + 1); + if (!tmp) { + printf("Failed to realloc tc queues.\n"); + goto free_mem; + } + que = tmp; + strcat(que, buf); + free(buf); + buf = NULL; + } + + /* Add mqprio qdisc */ + run_command("sudo tc qdisc add dev %s handle 8001: parent root mqprio num_tc %d map %squeues %shw 0", + ifname, tc + 1, map, que); + + /* To test launch time, send UDP packet with VLAN priority 1 to port 9091 */ + if (launch_time_delta_to_hw_rx_timestamp) { + /* Enable launch time hardware offload on launch_time_queue */ + run_command("sudo tc qdisc replace dev %s parent 8001:%d etf offload clockid CLOCK_TAI delta 500000", + ifname, launch_time_queue + 1); + sleep(1); + + /* Route incoming packet with VLAN priority 1 into launch_time_queue */ + if (run_command("sudo ethtool -N %s flow-type ether vlan 0x2000 vlan-mask 0x1FFF action %d", + ifname, launch_time_queue)) { + run_command("sudo tc qdisc add dev %s ingress", ifname); + run_command("sudo tc filter add dev %s parent ffff: protocol 802.1Q flower vlan_prio 1 hw_tc %d", + ifname, launch_time_queue); + } + + /* Enable VLAN tag stripping offload */ + run_command("sudo ethtool -K %s rxvlan on", ifname); + } + rx_xsk = malloc(sizeof(struct xsk) * rxq); if (!rx_xsk) error(1, ENOMEM, "malloc"); @@ -733,4 +884,11 @@ int main(int argc, char *argv[]) cleanup(); if (ret) error(1, -ret, "verify_metadata"); + + clean_existing_configurations(); + +free_mem: + free(buf); + free(map); + free(que); } diff --git a/tools/testing/selftests/damon/damon_nr_regions.py b/tools/testing/selftests/damon/damon_nr_regions.py index 2e8a74aff5431..58f3291fed12a 100755 --- a/tools/testing/selftests/damon/damon_nr_regions.py +++ b/tools/testing/selftests/damon/damon_nr_regions.py @@ -65,6 +65,7 @@ def test_nr_regions(real_nr_regions, min_nr_regions, max_nr_regions): test_name = 'nr_regions test with %d/%d/%d real/min/max nr_regions' % ( real_nr_regions, min_nr_regions, max_nr_regions) + collected_nr_regions.sort() if (collected_nr_regions[0] < min_nr_regions or collected_nr_regions[-1] > max_nr_regions): print('fail %s' % test_name) @@ -109,6 +110,7 @@ def main(): attrs = kdamonds.kdamonds[0].contexts[0].monitoring_attrs attrs.min_nr_regions = 3 attrs.max_nr_regions = 7 + attrs.update_us = 100000 err = kdamonds.kdamonds[0].commit() if err is not None: proc.terminate() diff --git a/tools/testing/selftests/damon/damos_quota.py b/tools/testing/selftests/damon/damos_quota.py index 7d4c6bb2e3cd2..57c4937aaed28 100755 --- a/tools/testing/selftests/damon/damos_quota.py +++ b/tools/testing/selftests/damon/damos_quota.py @@ -51,16 +51,19 @@ def main(): nr_quota_exceeds = scheme.stats.qt_exceeds wss_collected.sort() + nr_expected_quota_exceeds = 0 for wss in wss_collected: if wss > sz_quota: print('quota is not kept: %s > %s' % (wss, sz_quota)) print('collected samples are as below') print('\n'.join(['%d' % wss for wss in wss_collected])) exit(1) + if wss == sz_quota: + nr_expected_quota_exceeds += 1 - if nr_quota_exceeds < len(wss_collected): - print('quota is not always exceeded: %d > %d' % - (len(wss_collected), nr_quota_exceeds)) + if nr_quota_exceeds < nr_expected_quota_exceeds: + print('quota is exceeded less than expected: %d < %d' % + (nr_quota_exceeds, nr_expected_quota_exceeds)) exit(1) if __name__ == '__main__': diff --git a/tools/testing/selftests/damon/damos_quota_goal.py b/tools/testing/selftests/damon/damos_quota_goal.py index 18246f3b62f7e..f76e0412b564c 100755 --- a/tools/testing/selftests/damon/damos_quota_goal.py +++ b/tools/testing/selftests/damon/damos_quota_goal.py @@ -63,6 +63,9 @@ def main(): if last_effective_bytes != 0 else -1.0)) if last_effective_bytes == goal.effective_bytes: + # effective quota was already minimum that cannot be more reduced + if expect_increase is False and last_effective_bytes == 1: + continue print('efective bytes not changed: %d' % goal.effective_bytes) exit(1) diff --git a/tools/testing/selftests/drivers/net/.gitignore b/tools/testing/selftests/drivers/net/.gitignore new file mode 100644 index 0000000000000..ec746f374e858 --- /dev/null +++ b/tools/testing/selftests/drivers/net/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +xdp_helper diff --git a/tools/testing/selftests/drivers/net/Makefile b/tools/testing/selftests/drivers/net/Makefile index 137470bdee0c7..0c95bd944d56d 100644 --- a/tools/testing/selftests/drivers/net/Makefile +++ b/tools/testing/selftests/drivers/net/Makefile @@ -1,13 +1,18 @@ # SPDX-License-Identifier: GPL-2.0 +CFLAGS += $(KHDR_INCLUDES) TEST_INCLUDES := $(wildcard lib/py/*.py) \ $(wildcard lib/sh/*.sh) \ ../../net/net_helper.sh \ ../../net/lib.sh \ +TEST_GEN_FILES := xdp_helper + TEST_PROGS := \ netcons_basic.sh \ + netcons_fragmented_msg.sh \ netcons_overflow.sh \ + netcons_sysdata.sh \ ping.py \ queues.py \ stats.py \ diff --git a/tools/testing/selftests/drivers/net/README.rst b/tools/testing/selftests/drivers/net/README.rst index 3b6a29e6564b9..eb838ae948441 100644 --- a/tools/testing/selftests/drivers/net/README.rst +++ b/tools/testing/selftests/drivers/net/README.rst @@ -107,7 +107,7 @@ On the target machine, running the tests will use netdevsim by default:: 1..1 # timeout set to 45 # selftests: drivers/net: ping.py - # KTAP version 1 + # TAP version 13 # 1..3 # ok 1 ping.test_v4 # ok 2 ping.test_v6 @@ -128,7 +128,7 @@ Create a config with remote info:: Run the test:: [/root] # ./ksft-net-drv/drivers/net/ping.py - KTAP version 1 + TAP version 13 1..3 ok 1 ping.test_v4 ok 2 ping.test_v6 # SKIP Test requires IPv6 connectivity diff --git a/tools/testing/selftests/drivers/net/bonding/bond_options.sh b/tools/testing/selftests/drivers/net/bonding/bond_options.sh index edc56e2cc6069..7bc148889ca72 100755 --- a/tools/testing/selftests/drivers/net/bonding/bond_options.sh +++ b/tools/testing/selftests/drivers/net/bonding/bond_options.sh @@ -11,8 +11,8 @@ ALL_TESTS=" lib_dir=$(dirname "$0") source ${lib_dir}/bond_topo_3d1c.sh -c_maddr="33:33:00:00:00:10" -g_maddr="33:33:00:00:02:54" +c_maddr="33:33:ff:00:00:10" +g_maddr="33:33:ff:00:02:54" skip_prio() { diff --git a/tools/testing/selftests/drivers/net/config b/tools/testing/selftests/drivers/net/config index a2d8af60876df..f27172ddee0a1 100644 --- a/tools/testing/selftests/drivers/net/config +++ b/tools/testing/selftests/drivers/net/config @@ -4,3 +4,4 @@ CONFIG_CONFIGFS_FS=y CONFIG_NETCONSOLE=m CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_NETCONSOLE_EXTENDED_LOG=y +CONFIG_XDP_SOCKETS=y diff --git a/tools/testing/selftests/drivers/net/hds.py b/tools/testing/selftests/drivers/net/hds.py index 394971b25c0b1..7cc74faed7430 100755 --- a/tools/testing/selftests/drivers/net/hds.py +++ b/tools/testing/selftests/drivers/net/hds.py @@ -2,17 +2,53 @@ # SPDX-License-Identifier: GPL-2.0 import errno +import os from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_raises, KsftSkipEx -from lib.py import EthtoolFamily, NlError +from lib.py import CmdExitFailure, EthtoolFamily, NlError from lib.py import NetDrvEnv +from lib.py import defer, ethtool, ip -def get_hds(cfg, netnl) -> None: + +def _get_hds_mode(cfg, netnl) -> str: try: rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}}) except NlError as e: raise KsftSkipEx('ring-get not supported by device') if 'tcp-data-split' not in rings: raise KsftSkipEx('tcp-data-split not supported by device') + return rings['tcp-data-split'] + + +def _xdp_onoff(cfg): + prog = cfg.rpath("../../net/lib/xdp_dummy.bpf.o") + ip("link set dev %s xdp obj %s sec xdp" % + (cfg.ifname, prog)) + ip("link set dev %s xdp off" % cfg.ifname) + + +def _ioctl_ringparam_modify(cfg, netnl) -> None: + """ + Helper for performing a hopefully unimportant IOCTL SET. + IOCTL does not support HDS, so it should not affect the HDS config. + """ + try: + rings = netnl.rings_get({'header': {'dev-index': cfg.ifindex}}) + except NlError as e: + raise KsftSkipEx('ring-get not supported by device') + + if 'tx' not in rings: + raise KsftSkipEx('setting Tx ring size not supported') + + try: + ethtool(f"--disable-netlink -G {cfg.ifname} tx {rings['tx'] // 2}") + except CmdExitFailure as e: + ethtool(f"--disable-netlink -G {cfg.ifname} tx {rings['tx'] * 2}") + defer(ethtool, f"-G {cfg.ifname} tx {rings['tx']}") + + +def get_hds(cfg, netnl) -> None: + _get_hds_mode(cfg, netnl) + def get_hds_thresh(cfg, netnl) -> None: try: @@ -104,6 +140,103 @@ def set_hds_thresh_gt(cfg, netnl) -> None: netnl.rings_set({'header': {'dev-index': cfg.ifindex}, 'hds-thresh': hds_gt}) ksft_eq(e.exception.nl_msg.error, -errno.EINVAL) + +def set_xdp(cfg, netnl) -> None: + """ + Enable single-buffer XDP on the device. + When HDS is in "auto" / UNKNOWN mode, XDP installation should work. + """ + mode = _get_hds_mode(cfg, netnl) + if mode == 'enabled': + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, + 'tcp-data-split': 'unknown'}) + + _xdp_onoff(cfg) + + +def enabled_set_xdp(cfg, netnl) -> None: + """ + Enable single-buffer XDP on the device. + When HDS is in "enabled" mode, XDP installation should not work. + """ + _get_hds_mode(cfg, netnl) + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, + 'tcp-data-split': 'enabled'}) + + defer(netnl.rings_set, {'header': {'dev-index': cfg.ifindex}, + 'tcp-data-split': 'unknown'}) + + with ksft_raises(CmdExitFailure) as e: + _xdp_onoff(cfg) + + +def set_xdp(cfg, netnl) -> None: + """ + Enable single-buffer XDP on the device. + When HDS is in "auto" / UNKNOWN mode, XDP installation should work. + """ + mode = _get_hds_mode(cfg, netnl) + if mode == 'enabled': + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, + 'tcp-data-split': 'unknown'}) + + _xdp_onoff(cfg) + + +def enabled_set_xdp(cfg, netnl) -> None: + """ + Enable single-buffer XDP on the device. + When HDS is in "enabled" mode, XDP installation should not work. + """ + _get_hds_mode(cfg, netnl) # Trigger skip if not supported + + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, + 'tcp-data-split': 'enabled'}) + defer(netnl.rings_set, {'header': {'dev-index': cfg.ifindex}, + 'tcp-data-split': 'unknown'}) + + with ksft_raises(CmdExitFailure) as e: + _xdp_onoff(cfg) + + +def ioctl(cfg, netnl) -> None: + mode1 = _get_hds_mode(cfg, netnl) + _ioctl_ringparam_modify(cfg, netnl) + mode2 = _get_hds_mode(cfg, netnl) + + ksft_eq(mode1, mode2) + + +def ioctl_set_xdp(cfg, netnl) -> None: + """ + Like set_xdp(), but we perturb the settings via the legacy ioctl. + """ + mode = _get_hds_mode(cfg, netnl) + if mode == 'enabled': + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, + 'tcp-data-split': 'unknown'}) + + _ioctl_ringparam_modify(cfg, netnl) + + _xdp_onoff(cfg) + + +def ioctl_enabled_set_xdp(cfg, netnl) -> None: + """ + Enable single-buffer XDP on the device. + When HDS is in "enabled" mode, XDP installation should not work. + """ + _get_hds_mode(cfg, netnl) # Trigger skip if not supported + + netnl.rings_set({'header': {'dev-index': cfg.ifindex}, + 'tcp-data-split': 'enabled'}) + defer(netnl.rings_set, {'header': {'dev-index': cfg.ifindex}, + 'tcp-data-split': 'unknown'}) + + with ksft_raises(CmdExitFailure) as e: + _xdp_onoff(cfg) + + def main() -> None: with NetDrvEnv(__file__, queue_count=3) as cfg: ksft_run([get_hds, @@ -112,7 +245,12 @@ def main() -> None: set_hds_enable, set_hds_thresh_zero, set_hds_thresh_max, - set_hds_thresh_gt], + set_hds_thresh_gt, + set_xdp, + enabled_set_xdp, + ioctl, + ioctl_set_xdp, + ioctl_enabled_set_xdp], args=(cfg, EthtoolFamily())) ksft_exit() diff --git a/tools/testing/selftests/drivers/net/hw/Makefile b/tools/testing/selftests/drivers/net/hw/Makefile index 21ba64ce1e34b..cde5814ff9a75 100644 --- a/tools/testing/selftests/drivers/net/hw/Makefile +++ b/tools/testing/selftests/drivers/net/hw/Makefile @@ -10,11 +10,14 @@ TEST_PROGS = \ ethtool_rmon.sh \ hw_stats_l3.sh \ hw_stats_l3_gre.sh \ + irq.py \ loopback.sh \ nic_link_layer.py \ nic_performance.py \ pp_alloc_fail.py \ rss_ctx.py \ + rss_input_xfrm.py \ + tso.py \ # TEST_FILES := \ @@ -32,9 +35,12 @@ TEST_INCLUDES := \ # YNL files, must be before "include ..lib.mk" YNL_GEN_FILES := ncdevmem TEST_GEN_FILES += $(YNL_GEN_FILES) +TEST_GEN_FILES += $(patsubst %.c,%.o,$(wildcard *.bpf.c)) include ../../../lib.mk # YNL build YNL_GENS := ethtool netdev include ../../../net/ynl.mk + +include ../../../net/bpf.mk diff --git a/tools/testing/selftests/drivers/net/hw/csum.py b/tools/testing/selftests/drivers/net/hw/csum.py index cb40497faee44..701aca1361e0e 100755 --- a/tools/testing/selftests/drivers/net/hw/csum.py +++ b/tools/testing/selftests/drivers/net/hw/csum.py @@ -9,15 +9,12 @@ from lib.py import EthtoolFamily, NetDrvEpEnv from lib.py import bkg, cmd, wait_port_listen -def test_receive(cfg, ipv4=False, extra_args=None): +def test_receive(cfg, ipver="6", extra_args=None): """Test local nic checksum receive. Remote host sends crafted packets.""" if not cfg.have_rx_csum: raise KsftSkipEx(f"Test requires rx checksum offload on {cfg.ifname}") - if ipv4: - ip_args = f"-4 -S {cfg.remote_v4} -D {cfg.v4}" - else: - ip_args = f"-6 -S {cfg.remote_v6} -D {cfg.v6}" + ip_args = f"-{ipver} -S {cfg.remote_addr_v[ipver]} -D {cfg.addr_v[ipver]}" rx_cmd = f"{cfg.bin_local} -i {cfg.ifname} -n 100 {ip_args} -r 1 -R {extra_args}" tx_cmd = f"{cfg.bin_remote} -i {cfg.ifname} -n 100 {ip_args} -r 1 -T {extra_args}" @@ -27,17 +24,14 @@ def test_receive(cfg, ipv4=False, extra_args=None): cmd(tx_cmd, host=cfg.remote) -def test_transmit(cfg, ipv4=False, extra_args=None): +def test_transmit(cfg, ipver="6", extra_args=None): """Test local nic checksum transmit. Remote host verifies packets.""" if (not cfg.have_tx_csum_generic and - not (cfg.have_tx_csum_ipv4 and ipv4) and - not (cfg.have_tx_csum_ipv6 and not ipv4)): + not (cfg.have_tx_csum_ipv4 and ipver == "4") and + not (cfg.have_tx_csum_ipv6 and ipver == "6")): raise KsftSkipEx(f"Test requires tx checksum offload on {cfg.ifname}") - if ipv4: - ip_args = f"-4 -S {cfg.v4} -D {cfg.remote_v4}" - else: - ip_args = f"-6 -S {cfg.v6} -D {cfg.remote_v6}" + ip_args = f"-{ipver} -S {cfg.addr_v[ipver]} -D {cfg.remote_addr_v[ipver]}" # Cannot randomize input when calculating zero checksum if extra_args != "-U -Z": @@ -51,26 +45,20 @@ def test_transmit(cfg, ipv4=False, extra_args=None): cmd(tx_cmd) -def test_builder(name, cfg, ipv4=False, tx=False, extra_args=""): +def test_builder(name, cfg, ipver="6", tx=False, extra_args=""): """Construct specific tests from the common template. Most tests follow the same basic pattern, differing only in Direction of the test and optional flags passed to csum.""" def f(cfg): - if ipv4: - cfg.require_v4() - else: - cfg.require_v6() + cfg.require_ipver(ipver) if tx: - test_transmit(cfg, ipv4, extra_args) + test_transmit(cfg, ipver, extra_args) else: - test_receive(cfg, ipv4, extra_args) + test_receive(cfg, ipver, extra_args) - if ipv4: - f.__name__ = "ipv4_" + name - else: - f.__name__ = "ipv6_" + name + f.__name__ = f"ipv{ipver}_" + name return f @@ -100,19 +88,19 @@ def main() -> None: with NetDrvEpEnv(__file__, nsim_test=False) as cfg: check_nic_features(cfg) - cfg.bin_local = path.abspath(path.dirname(__file__) + "/../../../net/lib/csum") + cfg.bin_local = cfg.rpath("../../../net/lib/csum") cfg.bin_remote = cfg.remote.deploy(cfg.bin_local) cases = [] - for ipv4 in [True, False]: - cases.append(test_builder("rx_tcp", cfg, ipv4, False, "-t")) - cases.append(test_builder("rx_tcp_invalid", cfg, ipv4, False, "-t -E")) + for ipver in ["4", "6"]: + cases.append(test_builder("rx_tcp", cfg, ipver, False, "-t")) + cases.append(test_builder("rx_tcp_invalid", cfg, ipver, False, "-t -E")) - cases.append(test_builder("rx_udp", cfg, ipv4, False, "")) - cases.append(test_builder("rx_udp_invalid", cfg, ipv4, False, "-E")) + cases.append(test_builder("rx_udp", cfg, ipver, False, "")) + cases.append(test_builder("rx_udp_invalid", cfg, ipver, False, "-E")) - cases.append(test_builder("tx_udp_csum_offload", cfg, ipv4, True, "-U")) - cases.append(test_builder("tx_udp_zero_checksum", cfg, ipv4, True, "-U -Z")) + cases.append(test_builder("tx_udp_csum_offload", cfg, ipver, True, "-U")) + cases.append(test_builder("tx_udp_zero_checksum", cfg, ipver, True, "-U -Z")) ksft_run(cases=cases, args=(cfg, )) ksft_exit() diff --git a/tools/testing/selftests/drivers/net/hw/devmem.py b/tools/testing/selftests/drivers/net/hw/devmem.py index 1223f0f5c10c3..3947e91571159 100755 --- a/tools/testing/selftests/drivers/net/hw/devmem.py +++ b/tools/testing/selftests/drivers/net/hw/devmem.py @@ -21,15 +21,15 @@ def require_devmem(cfg): @ksft_disruptive def check_rx(cfg) -> None: - cfg.require_v6() + cfg.require_ipver("6") require_devmem(cfg) port = rand_port() - listen_cmd = f"./ncdevmem -l -f {cfg.ifname} -s {cfg.v6} -p {port}" + listen_cmd = f"./ncdevmem -l -f {cfg.ifname} -s {cfg.addr_v['6']} -p {port}" with bkg(listen_cmd) as socat: wait_port_listen(port) - cmd(f"echo -e \"hello\\nworld\"| socat -u - TCP6:[{cfg.v6}]:{port}", host=cfg.remote, shell=True) + cmd(f"echo -e \"hello\\nworld\"| socat -u - TCP6:[{cfg.addr_v['6']}]:{port}", host=cfg.remote, shell=True) ksft_eq(socat.stdout.strip(), "hello\nworld") diff --git a/tools/testing/selftests/drivers/net/hw/irq.py b/tools/testing/selftests/drivers/net/hw/irq.py new file mode 100755 index 0000000000000..42ab983702458 --- /dev/null +++ b/tools/testing/selftests/drivers/net/hw/irq.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +from lib.py import ksft_run, ksft_exit +from lib.py import ksft_ge, ksft_eq +from lib.py import KsftSkipEx +from lib.py import ksft_disruptive +from lib.py import EthtoolFamily, NetdevFamily +from lib.py import NetDrvEnv +from lib.py import cmd, ip, defer + + +def read_affinity(irq) -> str: + with open(f'/proc/irq/{irq}/smp_affinity', 'r') as fp: + return fp.read().lstrip("0,").strip() + + +def write_affinity(irq, what) -> str: + if what != read_affinity(irq): + with open(f'/proc/irq/{irq}/smp_affinity', 'w') as fp: + fp.write(what) + + +def check_irqs_reported(cfg) -> None: + """ Check that device reports IRQs for NAPI instances """ + napis = cfg.netnl.napi_get({"ifindex": cfg.ifindex}, dump=True) + irqs = sum(['irq' in x for x in napis]) + + ksft_ge(irqs, 1) + ksft_eq(irqs, len(napis)) + + +def _check_reconfig(cfg, reconfig_cb) -> None: + napis = cfg.netnl.napi_get({"ifindex": cfg.ifindex}, dump=True) + for n in reversed(napis): + if 'irq' in n: + break + else: + raise KsftSkipEx(f"Device has no NAPI with IRQ attribute (#napis: {len(napis)}") + + old = read_affinity(n['irq']) + # pick an affinity that's not the current one + new = "3" if old != "3" else "5" + write_affinity(n['irq'], new) + defer(write_affinity, n['irq'], old) + + reconfig_cb(cfg) + + ksft_eq(read_affinity(n['irq']), new, comment="IRQ affinity changed after reconfig") + + +def check_reconfig_queues(cfg) -> None: + def reconfig(cfg) -> None: + channels = cfg.ethnl.channels_get({'header': {'dev-index': cfg.ifindex}}) + if channels['combined-count'] == 0: + rx_type = 'rx' + else: + rx_type = 'combined' + cur_queue_cnt = channels[f'{rx_type}-count'] + max_queue_cnt = channels[f'{rx_type}-max'] + + cmd(f"ethtool -L {cfg.ifname} {rx_type} 1") + cmd(f"ethtool -L {cfg.ifname} {rx_type} {max_queue_cnt}") + cmd(f"ethtool -L {cfg.ifname} {rx_type} {cur_queue_cnt}") + + _check_reconfig(cfg, reconfig) + + +def check_reconfig_xdp(cfg) -> None: + def reconfig(cfg) -> None: + ip(f"link set dev %s xdp obj %s sec xdp" % + (cfg.ifname, cfg.rpath("xdp_dummy.bpf.o"))) + ip(f"link set dev %s xdp off" % cfg.ifname) + + _check_reconfig(cfg, reconfig) + + +@ksft_disruptive +def check_down(cfg) -> None: + def reconfig(cfg) -> None: + ip("link set dev %s down" % cfg.ifname) + ip("link set dev %s up" % cfg.ifname) + + _check_reconfig(cfg, reconfig) + + +def main() -> None: + with NetDrvEnv(__file__, nsim_test=False) as cfg: + cfg.ethnl = EthtoolFamily() + cfg.netnl = NetdevFamily() + + ksft_run([check_irqs_reported, check_reconfig_queues, + check_reconfig_xdp, check_down], + args=(cfg, )) + ksft_exit() + + +if __name__ == "__main__": + main() diff --git a/tools/testing/selftests/drivers/net/hw/ncdevmem.c b/tools/testing/selftests/drivers/net/hw/ncdevmem.c index 19a6969643f46..2bf14ac2b8c62 100644 --- a/tools/testing/selftests/drivers/net/hw/ncdevmem.c +++ b/tools/testing/selftests/drivers/net/hw/ncdevmem.c @@ -50,7 +50,6 @@ #include #include #include -#include #include #include #include diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py index 319aaa004c407..ca60ae325c22e 100755 --- a/tools/testing/selftests/drivers/net/hw/rss_ctx.py +++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py @@ -4,7 +4,8 @@ import datetime import random import re -from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ne, ksft_ge, ksft_lt, ksft_true +from lib.py import ksft_run, ksft_pr, ksft_exit +from lib.py import ksft_eq, ksft_ne, ksft_ge, ksft_in, ksft_lt, ksft_true, ksft_raises from lib.py import NetDrvEpEnv from lib.py import EthtoolFamily, NetdevFamily from lib.py import KsftSkipEx, KsftFailEx @@ -58,6 +59,14 @@ def require_ntuple(cfg): raise KsftSkipEx("Ntuple filters not enabled on the device: " + str(features["ntuple-filters"])) +def require_context_cnt(cfg, need_cnt): + # There's no good API to get the context count, so the tests + # which try to add a lot opportunisitically set the count they + # discovered. Careful with test ordering! + if need_cnt and cfg.context_cnt and cfg.context_cnt < need_cnt: + raise KsftSkipEx(f"Test requires at least {need_cnt} contexts, but device only has {cfg.context_cnt}") + + # Get Rx packet counts for all queues, as a simple list of integers # if @prev is specified the prev counts will be subtracted def _get_rx_cnts(cfg, prev=None): @@ -383,7 +392,7 @@ def test_rss_context_dump(cfg): # Sanity-check the results for data in ctxs: - ksft_ne(set(data['indir']), {0}, "indir table is all zero") + ksft_ne(set(data.get('indir', [1])), {0}, "indir table is all zero") ksft_ne(set(data.get('hkey', [1])), {0}, "key is all zero") # More specific checks @@ -456,6 +465,8 @@ def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None): raise ksft_pr(f"Failed to create context {i + 1}, trying to test what we got") ctx_cnt = i + if cfg.context_cnt is None: + cfg.context_cnt = ctx_cnt break _rss_key_check(cfg, context=ctx_id) @@ -511,8 +522,7 @@ def test_rss_context_out_of_order(cfg, ctx_cnt=4): """ require_ntuple(cfg) - - requested_ctx_cnt = ctx_cnt + require_context_cnt(cfg, 4) # Try to allocate more queues when necessary qcnt = len(_get_rx_cnts(cfg)) @@ -577,9 +587,6 @@ def check_traffic(): remove_ctx(-1) check_traffic() - if requested_ctx_cnt != ctx_cnt: - raise KsftSkipEx(f"Tested only {ctx_cnt} contexts, wanted {requested_ctx_cnt}") - def test_rss_context_overlap(cfg, other_ctx=0): """ @@ -588,6 +595,8 @@ def test_rss_context_overlap(cfg, other_ctx=0): """ require_ntuple(cfg) + if other_ctx: + require_context_cnt(cfg, 2) queue_cnt = len(_get_rx_cnts(cfg)) if queue_cnt < 4: @@ -649,6 +658,29 @@ def test_rss_context_overlap2(cfg): test_rss_context_overlap(cfg, True) +def test_flow_add_context_missing(cfg): + """ + Test that we are not allowed to add a rule pointing to an RSS context + which was never created. + """ + + require_ntuple(cfg) + + # Find a context which doesn't exist + for ctx_id in range(1, 100): + try: + get_rss(cfg, context=ctx_id) + except CmdExitFailure: + break + + with ksft_raises(CmdExitFailure) as cm: + flow = f"flow-type tcp{cfg.addr_ipver} dst-ip {cfg.addr} dst-port 1234 context {ctx_id}" + ntuple_id = ethtool_create(cfg, "-N", flow) + ethtool(f"-N {cfg.ifname} delete {ntuple_id}") + if cm.exception: + ksft_in('Invalid argument', cm.exception.cmd.stderr) + + def test_delete_rss_context_busy(cfg): """ Test that deletion returns -EBUSY when an rss context is being used @@ -717,6 +749,7 @@ def test_rss_ntuple_addition(cfg): def main() -> None: with NetDrvEpEnv(__file__, nsim_test=False) as cfg: + cfg.context_cnt = None cfg.ethnl = EthtoolFamily() cfg.netdevnl = NetdevFamily() @@ -726,6 +759,7 @@ def main() -> None: test_rss_context_dump, test_rss_context_queue_reconfigure, test_rss_context_overlap, test_rss_context_overlap2, test_rss_context_out_of_order, test_rss_context4_create_with_cfg, + test_flow_add_context_missing, test_delete_rss_context_busy, test_rss_ntuple_addition], args=(cfg, )) ksft_exit() diff --git a/tools/testing/selftests/drivers/net/hw/rss_input_xfrm.py b/tools/testing/selftests/drivers/net/hw/rss_input_xfrm.py new file mode 100755 index 0000000000000..53bb08cc29ec3 --- /dev/null +++ b/tools/testing/selftests/drivers/net/hw/rss_input_xfrm.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +import multiprocessing +import socket +from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_ge, cmd, fd_read_timeout +from lib.py import NetDrvEpEnv +from lib.py import EthtoolFamily, NetdevFamily +from lib.py import KsftSkipEx, KsftFailEx +from lib.py import rand_port + + +def traffic(cfg, local_port, remote_port, ipver): + af_inet = socket.AF_INET if ipver == "4" else socket.AF_INET6 + sock = socket.socket(af_inet, socket.SOCK_DGRAM) + sock.bind(("", local_port)) + sock.connect((cfg.remote_addr_v[ipver], remote_port)) + tgt = f"{ipver}:[{cfg.addr_v[ipver]}]:{local_port},sourceport={remote_port}" + cmd("echo a | socat - UDP" + tgt, host=cfg.remote) + fd_read_timeout(sock.fileno(), 5) + return sock.getsockopt(socket.SOL_SOCKET, socket.SO_INCOMING_CPU) + + +def test_rss_input_xfrm(cfg, ipver): + """ + Test symmetric input_xfrm. + If symmetric RSS hash is configured, send traffic twice, swapping the + src/dst UDP ports, and verify that the same queue is receiving the traffic + in both cases (IPs are constant). + """ + + if multiprocessing.cpu_count() < 2: + raise KsftSkipEx("Need at least two CPUs to test symmetric RSS hash") + + input_xfrm = cfg.ethnl.rss_get( + {'header': {'dev-name': cfg.ifname}}).get('input_xfrm') + + # Check for symmetric xor/or-xor + if not input_xfrm or (input_xfrm != 1 and input_xfrm != 2): + raise KsftSkipEx("Symmetric RSS hash not requested") + + cpus = set() + successful = 0 + for _ in range(100): + try: + port1 = rand_port(socket.SOCK_DGRAM) + port2 = rand_port(socket.SOCK_DGRAM) + cpu1 = traffic(cfg, port1, port2, ipver) + cpu2 = traffic(cfg, port2, port1, ipver) + cpus.update([cpu1, cpu2]) + ksft_eq( + cpu1, cpu2, comment=f"Received traffic on different cpus with ports ({port1 = }, {port2 = }) while symmetric hash is configured") + + successful += 1 + if successful == 10: + break + except: + continue + else: + raise KsftFailEx("Failed to run traffic") + + ksft_ge(len(cpus), 2, + comment=f"Received traffic on less than two cpus {cpus = }") + + +def test_rss_input_xfrm_ipv4(cfg): + cfg.require_ipver("4") + test_rss_input_xfrm(cfg, "4") + + +def test_rss_input_xfrm_ipv6(cfg): + cfg.require_ipver("6") + test_rss_input_xfrm(cfg, "6") + + +def main() -> None: + with NetDrvEpEnv(__file__, nsim_test=False) as cfg: + cfg.ethnl = EthtoolFamily() + cfg.netdevnl = NetdevFamily() + + ksft_run([test_rss_input_xfrm_ipv4, test_rss_input_xfrm_ipv6], + args=(cfg, )) + ksft_exit() + + +if __name__ == "__main__": + main() diff --git a/tools/testing/selftests/drivers/net/hw/tso.py b/tools/testing/selftests/drivers/net/hw/tso.py new file mode 100755 index 0000000000000..e1ecb92f79d9b --- /dev/null +++ b/tools/testing/selftests/drivers/net/hw/tso.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +"""Run the tools/testing/selftests/net/csum testsuite.""" + +import fcntl +import socket +import struct +import termios +import time + +from lib.py import ksft_pr, ksft_run, ksft_exit, KsftSkipEx, KsftXfailEx +from lib.py import ksft_eq, ksft_ge, ksft_lt +from lib.py import EthtoolFamily, NetdevFamily, NetDrvEpEnv +from lib.py import bkg, cmd, defer, ethtool, ip, rand_port, wait_port_listen + + +def sock_wait_drain(sock, max_wait=1000): + """Wait for all pending write data on the socket to get ACKed.""" + for _ in range(max_wait): + one = b'\0' * 4 + outq = fcntl.ioctl(sock.fileno(), termios.TIOCOUTQ, one) + outq = struct.unpack("I", outq)[0] + if outq == 0: + break + time.sleep(0.01) + ksft_eq(outq, 0) + + +def tcp_sock_get_retrans(sock): + """Get the number of retransmissions for the TCP socket.""" + info = sock.getsockopt(socket.SOL_TCP, socket.TCP_INFO, 512) + return struct.unpack("I", info[100:104])[0] + + +def run_one_stream(cfg, ipver, remote_v4, remote_v6, should_lso): + cfg.require_cmd("socat", remote=True) + + port = rand_port() + listen_cmd = f"socat -{ipver} -t 2 -u TCP-LISTEN:{port},reuseport /dev/null,ignoreeof" + + with bkg(listen_cmd, host=cfg.remote) as nc: + wait_port_listen(port, host=cfg.remote) + + if ipver == "4": + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((remote_v4, port)) + else: + sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) + sock.connect((remote_v6, port)) + + # Small send to make sure the connection is working. + sock.send("ping".encode()) + sock_wait_drain(sock) + + # Send 4MB of data, record the LSO packet count. + qstat_old = cfg.netnl.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0] + buf = b"0" * 1024 * 1024 * 4 + sock.send(buf) + sock_wait_drain(sock) + qstat_new = cfg.netnl.qstats_get({"ifindex": cfg.ifindex}, dump=True)[0] + + # No math behind the 10 here, but try to catch cases where + # TCP falls back to non-LSO. + ksft_lt(tcp_sock_get_retrans(sock), 10) + sock.close() + + # Check that at least 90% of the data was sent as LSO packets. + # System noise may cause false negatives. Also header overheads + # will add up to 5% of extra packes... The check is best effort. + total_lso_wire = len(buf) * 0.90 // cfg.dev["mtu"] + total_lso_super = len(buf) * 0.90 // cfg.dev["tso_max_size"] + if should_lso: + if cfg.have_stat_super_count: + ksft_ge(qstat_new['tx-hw-gso-packets'] - + qstat_old['tx-hw-gso-packets'], + total_lso_super, + comment="Number of LSO super-packets with LSO enabled") + if cfg.have_stat_wire_count: + ksft_ge(qstat_new['tx-hw-gso-wire-packets'] - + qstat_old['tx-hw-gso-wire-packets'], + total_lso_wire, + comment="Number of LSO wire-packets with LSO enabled") + else: + if cfg.have_stat_super_count: + ksft_lt(qstat_new['tx-hw-gso-packets'] - + qstat_old['tx-hw-gso-packets'], + 15, comment="Number of LSO super-packets with LSO disabled") + if cfg.have_stat_wire_count: + ksft_lt(qstat_new['tx-hw-gso-wire-packets'] - + qstat_old['tx-hw-gso-wire-packets'], + 500, comment="Number of LSO wire-packets with LSO disabled") + + +def build_tunnel(cfg, outer_ipver, tun_info): + local_v4 = NetDrvEpEnv.nsim_v4_pfx + "1" + local_v6 = NetDrvEpEnv.nsim_v6_pfx + "1" + remote_v4 = NetDrvEpEnv.nsim_v4_pfx + "2" + remote_v6 = NetDrvEpEnv.nsim_v6_pfx + "2" + + local_addr = cfg.addr_v[outer_ipver] + remote_addr = cfg.remote_addr_v[outer_ipver] + + tun_type = tun_info[0] + tun_arg = tun_info[2] + ip(f"link add {tun_type}-ksft type {tun_type} {tun_arg} local {local_addr} remote {remote_addr} dev {cfg.ifname}") + defer(ip, f"link del {tun_type}-ksft") + ip(f"link set dev {tun_type}-ksft up") + ip(f"addr add {local_v4}/24 dev {tun_type}-ksft") + ip(f"addr add {local_v6}/64 dev {tun_type}-ksft") + + ip(f"link add {tun_type}-ksft type {tun_type} {tun_arg} local {remote_addr} remote {local_addr} dev {cfg.remote_ifname}", + host=cfg.remote) + defer(ip, f"link del {tun_type}-ksft", host=cfg.remote) + ip(f"link set dev {tun_type}-ksft up", host=cfg.remote) + ip(f"addr add {remote_v4}/24 dev {tun_type}-ksft", host=cfg.remote) + ip(f"addr add {remote_v6}/64 dev {tun_type}-ksft", host=cfg.remote) + + return remote_v4, remote_v6 + + +def test_builder(name, cfg, outer_ipver, feature, tun=None, inner_ipver=None): + """Construct specific tests from the common template.""" + def f(cfg): + cfg.require_ipver(outer_ipver) + + if not cfg.have_stat_super_count and \ + not cfg.have_stat_wire_count: + raise KsftSkipEx(f"Device does not support LSO queue stats") + + ipver = outer_ipver + if tun: + remote_v4, remote_v6 = build_tunnel(cfg, ipver, tun) + ipver = inner_ipver + else: + remote_v4 = cfg.remote_addr_v["4"] + remote_v6 = cfg.remote_addr_v["6"] + + tun_partial = tun and tun[1] + # Tunnel which can silently fall back to gso-partial + has_gso_partial = tun and 'tx-gso-partial' in cfg.features + + # For TSO4 via partial we need mangleid + if ipver == "4" and feature in cfg.partial_features: + ksft_pr("Testing with mangleid enabled") + if 'tx-tcp-mangleid-segmentation' not in cfg.features: + ethtool(f"-K {cfg.ifname} tx-tcp-mangleid-segmentation on") + defer(ethtool, f"-K {cfg.ifname} tx-tcp-mangleid-segmentation off") + + # First test without the feature enabled. + ethtool(f"-K {cfg.ifname} {feature} off") + if has_gso_partial: + ethtool(f"-K {cfg.ifname} tx-gso-partial off") + run_one_stream(cfg, ipver, remote_v4, remote_v6, should_lso=False) + + # Now test with the feature enabled. + # For compatible tunnels only - just GSO partial, not specific feature. + if has_gso_partial: + ethtool(f"-K {cfg.ifname} tx-gso-partial on") + run_one_stream(cfg, ipver, remote_v4, remote_v6, + should_lso=tun_partial) + + # Full feature enabled. + if feature in cfg.features: + ethtool(f"-K {cfg.ifname} {feature} on") + run_one_stream(cfg, ipver, remote_v4, remote_v6, should_lso=True) + else: + raise KsftXfailEx(f"Device does not support {feature}") + + f.__name__ = name + ((outer_ipver + "_") if tun else "") + "ipv" + inner_ipver + return f + + +def query_nic_features(cfg) -> None: + """Query and cache the NIC features.""" + cfg.have_stat_super_count = False + cfg.have_stat_wire_count = False + + cfg.features = set() + features = cfg.ethnl.features_get({"header": {"dev-index": cfg.ifindex}}) + for f in features["active"]["bits"]["bit"]: + cfg.features.add(f["name"]) + + # Check which features are supported via GSO partial + cfg.partial_features = set() + if 'tx-gso-partial' in cfg.features: + ethtool(f"-K {cfg.ifname} tx-gso-partial off") + + no_partial = set() + features = cfg.ethnl.features_get({"header": {"dev-index": cfg.ifindex}}) + for f in features["active"]["bits"]["bit"]: + no_partial.add(f["name"]) + cfg.partial_features = cfg.features - no_partial + ethtool(f"-K {cfg.ifname} tx-gso-partial on") + + stats = cfg.netnl.qstats_get({"ifindex": cfg.ifindex}, dump=True) + if stats: + if 'tx-hw-gso-packets' in stats[0]: + ksft_pr("Detected qstat for LSO super-packets") + cfg.have_stat_super_count = True + if 'tx-hw-gso-wire-packets' in stats[0]: + ksft_pr("Detected qstat for LSO wire-packets") + cfg.have_stat_wire_count = True + + +def main() -> None: + with NetDrvEpEnv(__file__, nsim_test=False) as cfg: + cfg.ethnl = EthtoolFamily() + cfg.netnl = NetdevFamily() + + query_nic_features(cfg) + + test_info = ( + # name, v4/v6 ethtool_feature tun:(type, partial, args) + ("", "4", "tx-tcp-segmentation", None), + ("", "6", "tx-tcp6-segmentation", None), + ("vxlan", "", "tx-udp_tnl-segmentation", ("vxlan", True, "id 100 dstport 4789 noudpcsum")), + ("vxlan_csum", "", "tx-udp_tnl-csum-segmentation", ("vxlan", False, "id 100 dstport 4789 udpcsum")), + ("gre", "4", "tx-gre-segmentation", ("ipgre", False, "")), + ("gre", "6", "tx-gre-segmentation", ("ip6gre", False, "")), + ) + + cases = [] + for outer_ipver in ["4", "6"]: + for info in test_info: + # Skip if test which only works for a specific IP version + if info[1] and outer_ipver != info[1]: + continue + + cases.append(test_builder(info[0], cfg, outer_ipver, info[2], + tun=info[3], inner_ipver="4")) + if info[3]: + cases.append(test_builder(info[0], cfg, outer_ipver, info[2], + tun=info[3], inner_ipver="6")) + + ksft_run(cases=cases, args=(cfg, )) + ksft_exit() + + +if __name__ == "__main__": + main() diff --git a/tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c b/tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c new file mode 100644 index 0000000000000..d988b2e0cee84 --- /dev/null +++ b/tools/testing/selftests/drivers/net/hw/xdp_dummy.bpf.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define KBUILD_MODNAME "xdp_dummy" +#include +#include + +SEC("xdp") +int xdp_dummy_prog(struct xdp_md *ctx) +{ + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py index 987e452d3a457..fd4d674e6c72e 100644 --- a/tools/testing/selftests/drivers/net/lib/py/env.py +++ b/tools/testing/selftests/drivers/net/lib/py/env.py @@ -10,42 +10,68 @@ from .remote import Remote -def _load_env_file(src_path): - env = os.environ.copy() +class NetDrvEnvBase: + """ + Base class for a NIC / host envirnoments + """ + def __init__(self, src_path): + self.src_path = src_path + self.env = self._load_env_file() - src_dir = Path(src_path).parent.resolve() - if not (src_dir / "net.config").exists(): + def rpath(self, path): + """ + Get an absolute path to a file based on a path relative to the directory + containing the test which constructed env. + + For example, if the test.py is in the same directory as + a binary (built from helper.c), the test can use env.rpath("helper") + to get the absolute path to the binary + """ + src_dir = Path(self.src_path).parent.resolve() + return (src_dir / path).as_posix() + + def _load_env_file(self): + env = os.environ.copy() + + src_dir = Path(self.src_path).parent.resolve() + if not (src_dir / "net.config").exists(): + return ksft_setup(env) + + with open((src_dir / "net.config").as_posix(), 'r') as fp: + for line in fp.readlines(): + full_file = line + # Strip comments + pos = line.find("#") + if pos >= 0: + line = line[:pos] + line = line.strip() + if not line: + continue + pair = line.split('=', maxsplit=1) + if len(pair) != 2: + raise Exception("Can't parse configuration line:", full_file) + env[pair[0]] = pair[1] return ksft_setup(env) - with open((src_dir / "net.config").as_posix(), 'r') as fp: - for line in fp.readlines(): - full_file = line - # Strip comments - pos = line.find("#") - if pos >= 0: - line = line[:pos] - line = line.strip() - if not line: - continue - pair = line.split('=', maxsplit=1) - if len(pair) != 2: - raise Exception("Can't parse configuration line:", full_file) - env[pair[0]] = pair[1] - return ksft_setup(env) - - -class NetDrvEnv: + +class NetDrvEnv(NetDrvEnvBase): """ Class for a single NIC / host env, with no remote end """ - def __init__(self, src_path, **kwargs): - self._ns = None + def __init__(self, src_path, nsim_test=None, **kwargs): + super().__init__(src_path) - self.env = _load_env_file(src_path) + self._ns = None if 'NETIF' in self.env: - self.dev = ip("link show dev " + self.env['NETIF'], json=True)[0] + if nsim_test is True: + raise KsftXfailEx("Test only works on netdevsim") + + self.dev = ip("-d link show dev " + self.env['NETIF'], json=True)[0] else: + if nsim_test is False: + raise KsftXfailEx("Test does not work on netdevsim") + self._ns = NetdevSimDev(**kwargs) self.dev = self._ns.nsims[0].dev self.ifname = self.dev['ifname'] @@ -68,7 +94,7 @@ def __del__(self): self._ns = None -class NetDrvEpEnv: +class NetDrvEpEnv(NetDrvEnvBase): """ Class for an environment with a local device and "remote endpoint" which can be used to send traffic in. @@ -82,8 +108,7 @@ class NetDrvEpEnv: nsim_v6_pfx = "2001:db8::" def __init__(self, src_path, nsim_test=None): - - self.env = _load_env_file(src_path) + super().__init__(src_path) self._stats_settle_time = None @@ -94,17 +119,20 @@ def __init__(self, src_path, nsim_test=None): self._ns = None self._ns_peer = None + self.addr_v = { "4": None, "6": None } + self.remote_addr_v = { "4": None, "6": None } + if "NETIF" in self.env: if nsim_test is True: raise KsftXfailEx("Test only works on netdevsim") self._check_env() - self.dev = ip("link show dev " + self.env['NETIF'], json=True)[0] + self.dev = ip("-d link show dev " + self.env['NETIF'], json=True)[0] - self.v4 = self.env.get("LOCAL_V4") - self.v6 = self.env.get("LOCAL_V6") - self.remote_v4 = self.env.get("REMOTE_V4") - self.remote_v6 = self.env.get("REMOTE_V6") + self.addr_v["4"] = self.env.get("LOCAL_V4") + self.addr_v["6"] = self.env.get("LOCAL_V6") + self.remote_addr_v["4"] = self.env.get("REMOTE_V4") + self.remote_addr_v["6"] = self.env.get("REMOTE_V6") kind = self.env["REMOTE_TYPE"] args = self.env["REMOTE_ARGS"] else: @@ -115,26 +143,29 @@ def __init__(self, src_path, nsim_test=None): self.dev = self._ns.nsims[0].dev - self.v4 = self.nsim_v4_pfx + "1" - self.v6 = self.nsim_v6_pfx + "1" - self.remote_v4 = self.nsim_v4_pfx + "2" - self.remote_v6 = self.nsim_v6_pfx + "2" + self.addr_v["4"] = self.nsim_v4_pfx + "1" + self.addr_v["6"] = self.nsim_v6_pfx + "1" + self.remote_addr_v["4"] = self.nsim_v4_pfx + "2" + self.remote_addr_v["6"] = self.nsim_v6_pfx + "2" kind = "netns" args = self._netns.name self.remote = Remote(kind, args, src_path) - self.addr = self.v6 if self.v6 else self.v4 - self.remote_addr = self.remote_v6 if self.remote_v6 else self.remote_v4 + self.addr_ipver = "6" if self.addr_v["6"] else "4" + self.addr = self.addr_v[self.addr_ipver] + self.remote_addr = self.remote_addr_v[self.addr_ipver] - self.addr_ipver = "6" if self.v6 else "4" # Bracketed addresses, some commands need IPv6 to be inside [] - self.baddr = f"[{self.v6}]" if self.v6 else self.v4 - self.remote_baddr = f"[{self.remote_v6}]" if self.remote_v6 else self.remote_v4 + self.baddr = f"[{self.addr_v['6']}]" if self.addr_v["6"] else self.addr_v["4"] + self.remote_baddr = f"[{self.remote_addr_v['6']}]" if self.remote_addr_v["6"] else self.remote_addr_v["4"] self.ifname = self.dev['ifname'] self.ifindex = self.dev['ifindex'] + # resolve remote interface name + self.remote_ifname = self.resolve_remote_ifc() + self._required_cmd = {} def create_local(self): @@ -181,6 +212,18 @@ def _check_env(self): raise Exception("Invalid environment, missing configuration:", missing, "Please see tools/testing/selftests/drivers/net/README.rst") + def resolve_remote_ifc(self): + v4 = v6 = None + if self.remote_addr_v["4"]: + v4 = ip("addr show to " + self.remote_addr_v["4"], json=True, host=self.remote) + if self.remote_addr_v["6"]: + v6 = ip("addr show to " + self.remote_addr_v["6"], json=True, host=self.remote) + if v4 and v6 and v4[0]["ifname"] != v6[0]["ifname"]: + raise Exception("Can't resolve remote interface name, v4 and v6 don't match") + if (v4 and len(v4) > 1) or (v6 and len(v6) > 1): + raise Exception("Can't resolve remote interface name, multiple interfaces match") + return v6[0]["ifname"] if v6 else v4[0]["ifname"] + def __enter__(self): return self @@ -204,13 +247,9 @@ def __del__(self): del self.remote self.remote = None - def require_v4(self): - if not self.v4 or not self.remote_v4: - raise KsftSkipEx("Test requires IPv4 connectivity") - - def require_v6(self): - if not self.v6 or not self.remote_v6: - raise KsftSkipEx("Test requires IPv6 connectivity") + def require_ipver(self, ipver): + if not self.addr_v[ipver] or not self.remote_addr_v[ipver]: + raise KsftSkipEx(f"Test requires IPv{ipver} connectivity") def _require_cmd(self, comm, key, host=None): cached = self._required_cmd.get(comm, {}) diff --git a/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh b/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh index 3acaba41ac7b2..3c96b022954db 100644 --- a/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh +++ b/tools/testing/selftests/drivers/net/lib/sh/lib_netcons.sh @@ -110,6 +110,13 @@ function create_dynamic_target() { echo 1 > "${NETCONS_PATH}"/enabled } +# Do not append the release to the header of the message +function disable_release_append() { + echo 0 > "${NETCONS_PATH}"/enabled + echo 0 > "${NETCONS_PATH}"/release + echo 1 > "${NETCONS_PATH}"/enabled +} + function cleanup() { local NSIM_DEV_SYS_DEL="/sys/bus/netdevsim/del_device" @@ -223,3 +230,20 @@ function check_for_dependencies() { exit "${ksft_skip}" fi } + +function check_for_taskset() { + if ! which taskset > /dev/null ; then + echo "SKIP: taskset(1) is not available" >&2 + exit "${ksft_skip}" + fi +} + +# This is necessary if running multiple tests in a row +function pkill_socat() { + PROCESS_NAME="socat UDP-LISTEN:6666,fork ${OUTPUT_FILE}" + # socat runs under timeout(1), kill it if it is still alive + # do not fail if socat doesn't exist anymore + set +e + pkill -f "${PROCESS_NAME}" + set -e +} diff --git a/tools/testing/selftests/drivers/net/netcons_fragmented_msg.sh b/tools/testing/selftests/drivers/net/netcons_fragmented_msg.sh new file mode 100755 index 0000000000000..4a71e01a230c7 --- /dev/null +++ b/tools/testing/selftests/drivers/net/netcons_fragmented_msg.sh @@ -0,0 +1,122 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-2.0 + +# Test netconsole's message fragmentation functionality. +# +# When a message exceeds the maximum packet size, netconsole splits it into +# multiple fragments for transmission. This test verifies: +# - Correct fragmentation of large messages +# - Proper reassembly of fragments at the receiver +# - Preservation of userdata across fragments +# - Behavior with and without kernel release version appending +# +# Author: Breno Leitao + +set -euo pipefail + +SCRIPTDIR=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")") + +source "${SCRIPTDIR}"/lib/sh/lib_netcons.sh + +modprobe netdevsim 2> /dev/null || true +modprobe netconsole 2> /dev/null || true + +# The content of kmsg will be save to the following file +OUTPUT_FILE="/tmp/${TARGET}" + +# set userdata to a long value. In this case, it is "1-2-3-4...50-" +USERDATA_VALUE=$(printf -- '%.2s-' {1..60}) + +# Convert the header string in a regexp, so, we can remove +# the second header as well. +# A header looks like "13,468,514729715,-,ncfrag=0/1135;". If +# release is appended, you might find something like:L +# "6.13.0-04048-g4f561a87745a,13,468,514729715,-,ncfrag=0/1135;" +function header_to_regex() { + # header is everything before ; + local HEADER="${1}" + REGEX=$(echo "${HEADER}" | cut -d'=' -f1) + echo "${REGEX}=[0-9]*\/[0-9]*;" +} + +# We have two headers in the message. Remove both to get the full message, +# and extract the full message. +function extract_msg() { + local MSGFILE="${1}" + # Extract the header, which is the very first thing that arrives in the + # first list. + HEADER=$(sed -n '1p' "${MSGFILE}" | cut -d';' -f1) + HEADER_REGEX=$(header_to_regex "${HEADER}") + + # Remove the two headers from the received message + # This will return the message without any header, similarly to what + # was sent. + sed "s/""${HEADER_REGEX}""//g" "${MSGFILE}" +} + +# Validate the message, which has two messages glued together. +# unwrap them to make sure all the characters were transmitted. +# File will look like the following: +# 13,468,514729715,-,ncfrag=0/1135; +# key=-13,468,514729715,-,ncfrag=967/1135; +function validate_fragmented_result() { + # Discard the netconsole headers, and assemble the full message + RCVMSG=$(extract_msg "${1}") + + # check for the main message + if ! echo "${RCVMSG}" | grep -q "${MSG}"; then + echo "Message body doesn't match." >&2 + echo "msg received=" "${RCVMSG}" >&2 + exit "${ksft_fail}" + fi + + # check userdata + if ! echo "${RCVMSG}" | grep -q "${USERDATA_VALUE}"; then + echo "message userdata doesn't match" >&2 + echo "msg received=" "${RCVMSG}" >&2 + exit "${ksft_fail}" + fi + # test passed. hooray +} + +# Check for basic system dependency and exit if not found +check_for_dependencies +# Set current loglevel to KERN_INFO(6), and default to KERN_NOTICE(5) +echo "6 5" > /proc/sys/kernel/printk +# Remove the namespace, interfaces and netconsole target on exit +trap cleanup EXIT +# Create one namespace and two interfaces +set_network +# Create a dynamic target for netconsole +create_dynamic_target +# Set userdata "key" with the "value" value +set_user_data + + +# TEST 1: Send message and userdata. They will fragment +# ======= +MSG=$(printf -- 'MSG%.3s=' {1..150}) + +# Listen for netconsole port inside the namespace and destination interface +listen_port_and_save_to "${OUTPUT_FILE}" & +# Wait for socat to start and listen to the port. +wait_local_port_listen "${NAMESPACE}" "${PORT}" udp +# Send the message +echo "${MSG}: ${TARGET}" > /dev/kmsg +# Wait until socat saves the file to disk +busywait "${BUSYWAIT_TIMEOUT}" test -s "${OUTPUT_FILE}" +# Check if the message was not corrupted +validate_fragmented_result "${OUTPUT_FILE}" + +# TEST 2: Test with smaller message, and without release appended +# ======= +MSG=$(printf -- 'FOOBAR%.3s=' {1..100}) +# Let's disable release and test again. +disable_release_append + +listen_port_and_save_to "${OUTPUT_FILE}" & +wait_local_port_listen "${NAMESPACE}" "${PORT}" udp +echo "${MSG}: ${TARGET}" > /dev/kmsg +busywait "${BUSYWAIT_TIMEOUT}" test -s "${OUTPUT_FILE}" +validate_fragmented_result "${OUTPUT_FILE}" +exit "${ksft_pass}" diff --git a/tools/testing/selftests/drivers/net/netcons_sysdata.sh b/tools/testing/selftests/drivers/net/netcons_sysdata.sh new file mode 100755 index 0000000000000..a737e377bf085 --- /dev/null +++ b/tools/testing/selftests/drivers/net/netcons_sysdata.sh @@ -0,0 +1,242 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: GPL-2.0 + +# A test that makes sure that sysdata runtime CPU data is properly set +# when a message is sent. +# +# There are 3 different tests, every time sent using a random CPU. +# - Test #1 +# * Only enable cpu_nr sysdata feature. +# - Test #2 +# * Keep cpu_nr sysdata feature enable and enable userdata. +# - Test #3 +# * keep userdata enabled, and disable sysdata cpu_nr feature. +# +# Author: Breno Leitao + +set -euo pipefail + +SCRIPTDIR=$(dirname "$(readlink -e "${BASH_SOURCE[0]}")") + +source "${SCRIPTDIR}"/lib/sh/lib_netcons.sh + +# Enable the sysdata cpu_nr feature +function set_cpu_nr() { + if [[ ! -f "${NETCONS_PATH}/userdata/cpu_nr_enabled" ]] + then + echo "Populate CPU configfs path not available in ${NETCONS_PATH}/userdata/cpu_nr_enabled" >&2 + exit "${ksft_skip}" + fi + + echo 1 > "${NETCONS_PATH}/userdata/cpu_nr_enabled" +} + +# Enable the taskname to be appended to sysdata +function set_taskname() { + if [[ ! -f "${NETCONS_PATH}/userdata/taskname_enabled" ]] + then + echo "Not able to enable taskname sysdata append. Configfs not available in ${NETCONS_PATH}/userdata/taskname_enabled" >&2 + exit "${ksft_skip}" + fi + + echo 1 > "${NETCONS_PATH}/userdata/taskname_enabled" +} + +# Enable the release to be appended to sysdata +function set_release() { + if [[ ! -f "${NETCONS_PATH}/userdata/release_enabled" ]] + then + echo "Not able to enable release sysdata append. Configfs not available in ${NETCONS_PATH}/userdata/release_enabled" >&2 + exit "${ksft_skip}" + fi + + echo 1 > "${NETCONS_PATH}/userdata/release_enabled" +} + +# Disable the sysdata cpu_nr feature +function unset_cpu_nr() { + echo 0 > "${NETCONS_PATH}/userdata/cpu_nr_enabled" +} + +# Once called, taskname=<..> will not be appended anymore +function unset_taskname() { + echo 0 > "${NETCONS_PATH}/userdata/taskname_enabled" +} + +function unset_release() { + echo 0 > "${NETCONS_PATH}/userdata/release_enabled" +} + +# Test if MSG contains sysdata +function validate_sysdata() { + # OUTPUT_FILE will contain something like: + # 6.11.1-0_fbk0_rc13_509_g30d75cea12f7,13,1822,115075213798,-;netconsole selftest: netcons_gtJHM + # userdatakey=userdatavalue + # cpu=X + # taskname= + + # Echo is what this test uses to create the message. See runtest() + # function + SENDER="echo" + + if [ ! -f "$OUTPUT_FILE" ]; then + echo "FAIL: File was not generated." >&2 + exit "${ksft_fail}" + fi + + if ! grep -q "${MSG}" "${OUTPUT_FILE}"; then + echo "FAIL: ${MSG} not found in ${OUTPUT_FILE}" >&2 + cat "${OUTPUT_FILE}" >&2 + exit "${ksft_fail}" + fi + + # Check if cpu=XX exists in the file and matches the one used + # in taskset(1) + if ! grep -q "cpu=${CPU}\+" "${OUTPUT_FILE}"; then + echo "FAIL: 'cpu=${CPU}' not found in ${OUTPUT_FILE}" >&2 + cat "${OUTPUT_FILE}" >&2 + exit "${ksft_fail}" + fi + + if ! grep -q "taskname=${SENDER}" "${OUTPUT_FILE}"; then + echo "FAIL: 'taskname=echo' not found in ${OUTPUT_FILE}" >&2 + cat "${OUTPUT_FILE}" >&2 + exit "${ksft_fail}" + fi + + rm "${OUTPUT_FILE}" + pkill_socat +} + +function validate_release() { + RELEASE=$(uname -r) + + if [ ! -f "$OUTPUT_FILE" ]; then + echo "FAIL: File was not generated." >&2 + exit "${ksft_fail}" + fi + + if ! grep -q "release=${RELEASE}" "${OUTPUT_FILE}"; then + echo "FAIL: 'release=${RELEASE}' not found in ${OUTPUT_FILE}" >&2 + cat "${OUTPUT_FILE}" >&2 + exit "${ksft_fail}" + fi +} + +# Test if MSG content exists in OUTPUT_FILE but no `cpu=` and `taskname=` +# strings +function validate_no_sysdata() { + if [ ! -f "$OUTPUT_FILE" ]; then + echo "FAIL: File was not generated." >&2 + exit "${ksft_fail}" + fi + + if ! grep -q "${MSG}" "${OUTPUT_FILE}"; then + echo "FAIL: ${MSG} not found in ${OUTPUT_FILE}" >&2 + cat "${OUTPUT_FILE}" >&2 + exit "${ksft_fail}" + fi + + if grep -q "cpu=" "${OUTPUT_FILE}"; then + echo "FAIL: 'cpu= found in ${OUTPUT_FILE}" >&2 + cat "${OUTPUT_FILE}" >&2 + exit "${ksft_fail}" + fi + + if grep -q "taskname=" "${OUTPUT_FILE}"; then + echo "FAIL: 'taskname= found in ${OUTPUT_FILE}" >&2 + cat "${OUTPUT_FILE}" >&2 + exit "${ksft_fail}" + fi + + if grep -q "release=" "${OUTPUT_FILE}"; then + echo "FAIL: 'release= found in ${OUTPUT_FILE}" >&2 + cat "${OUTPUT_FILE}" >&2 + exit "${ksft_fail}" + fi + + rm "${OUTPUT_FILE}" +} + +# Start socat, send the message and wait for the file to show up in the file +# system +function runtest { + # Listen for netconsole port inside the namespace and destination + # interface + listen_port_and_save_to "${OUTPUT_FILE}" & + # Wait for socat to start and listen to the port. + wait_local_port_listen "${NAMESPACE}" "${PORT}" udp + # Send the message + taskset -c "${CPU}" echo "${MSG}: ${TARGET}" > /dev/kmsg + # Wait until socat saves the file to disk + busywait "${BUSYWAIT_TIMEOUT}" test -s "${OUTPUT_FILE}" +} + +# ========== # +# Start here # +# ========== # + +modprobe netdevsim 2> /dev/null || true +modprobe netconsole 2> /dev/null || true + +# Check for basic system dependency and exit if not found +check_for_dependencies +# This test also depends on taskset(1). Check for it before starting the test +check_for_taskset + +# Set current loglevel to KERN_INFO(6), and default to KERN_NOTICE(5) +echo "6 5" > /proc/sys/kernel/printk +# Remove the namespace, interfaces and netconsole target on exit +trap cleanup EXIT +# Create one namespace and two interfaces +set_network +# Create a dynamic target for netconsole +create_dynamic_target + +#==================================================== +# TEST #1 +# Send message from a random CPU +#==================================================== +# Random CPU in the system +CPU=$((RANDOM % $(nproc))) +OUTPUT_FILE="/tmp/${TARGET}_1" +MSG="Test #1 from CPU${CPU}" +# Enable the auto population of cpu_nr +set_cpu_nr +# Enable taskname to be appended to sysdata +set_taskname +set_release +runtest +# Make sure the message was received in the dst part +# and exit +validate_release +validate_sysdata + +#==================================================== +# TEST #2 +# This test now adds userdata together with sysdata +# =================================================== +# Get a new random CPU +CPU=$((RANDOM % $(nproc))) +OUTPUT_FILE="/tmp/${TARGET}_2" +MSG="Test #2 from CPU${CPU}" +set_user_data +runtest +validate_release +validate_sysdata + +# =================================================== +# TEST #3 +# Unset all sysdata, fail if any userdata is set +# =================================================== +CPU=$((RANDOM % $(nproc))) +OUTPUT_FILE="/tmp/${TARGET}_3" +MSG="Test #3 from CPU${CPU}" +unset_cpu_nr +unset_taskname +unset_release +runtest +# At this time, cpu= shouldn't be present in the msg +validate_no_sysdata + +exit "${ksft_pass}" diff --git a/tools/testing/selftests/drivers/net/ping.py b/tools/testing/selftests/drivers/net/ping.py index eb83e7b487978..93120e86e1024 100755 --- a/tools/testing/selftests/drivers/net/ping.py +++ b/tools/testing/selftests/drivers/net/ping.py @@ -1,49 +1,219 @@ #!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 +import os +import random, string, time from lib.py import ksft_run, ksft_exit -from lib.py import ksft_eq -from lib.py import NetDrvEpEnv +from lib.py import ksft_eq, KsftSkipEx, KsftFailEx +from lib.py import EthtoolFamily, NetDrvEpEnv from lib.py import bkg, cmd, wait_port_listen, rand_port +from lib.py import defer, ethtool, ip +remote_ifname="" +no_sleep=False -def test_v4(cfg) -> None: - cfg.require_v4() +def _test_v4(cfg) -> None: + cfg.require_ipver("4") - cmd(f"ping -c 1 -W0.5 {cfg.remote_v4}") - cmd(f"ping -c 1 -W0.5 {cfg.v4}", host=cfg.remote) + cmd("ping -c 1 -W0.5 " + cfg.remote_addr_v["4"]) + cmd("ping -c 1 -W0.5 " + cfg.addr_v["4"], host=cfg.remote) + cmd("ping -s 65000 -c 1 -W0.5 " + cfg.remote_addr_v["4"]) + cmd("ping -s 65000 -c 1 -W0.5 " + cfg.addr_v["4"], host=cfg.remote) +def _test_v6(cfg) -> None: + cfg.require_ipver("6") -def test_v6(cfg) -> None: - cfg.require_v6() + cmd("ping -c 1 -W5 " + cfg.remote_addr_v["6"]) + cmd("ping -c 1 -W5 " + cfg.addr_v["6"], host=cfg.remote) + cmd("ping -s 65000 -c 1 -W0.5 " + cfg.remote_addr_v["6"]) + cmd("ping -s 65000 -c 1 -W0.5 " + cfg.addr_v["6"], host=cfg.remote) - cmd(f"ping -c 1 -W0.5 {cfg.remote_v6}") - cmd(f"ping -c 1 -W0.5 {cfg.v6}", host=cfg.remote) - - -def test_tcp(cfg) -> None: +def _test_tcp(cfg) -> None: cfg.require_cmd("socat", remote=True) port = rand_port() listen_cmd = f"socat -{cfg.addr_ipver} -t 2 -u TCP-LISTEN:{port},reuseport STDOUT" + test_string = ''.join(random.choice(string.ascii_lowercase) for _ in range(65536)) with bkg(listen_cmd, exit_wait=True) as nc: wait_port_listen(port) - cmd(f"echo ping | socat -t 2 -u STDIN TCP:{cfg.baddr}:{port}", + cmd(f"echo {test_string} | socat -t 2 -u STDIN TCP:{cfg.baddr}:{port}", shell=True, host=cfg.remote) - ksft_eq(nc.stdout.strip(), "ping") + ksft_eq(nc.stdout.strip(), test_string) + test_string = ''.join(random.choice(string.ascii_lowercase) for _ in range(65536)) with bkg(listen_cmd, host=cfg.remote, exit_wait=True) as nc: wait_port_listen(port, host=cfg.remote) - cmd(f"echo ping | socat -t 2 -u STDIN TCP:{cfg.remote_baddr}:{port}", shell=True) - ksft_eq(nc.stdout.strip(), "ping") - + cmd(f"echo {test_string} | socat -t 2 -u STDIN TCP:{cfg.remote_baddr}:{port}", shell=True) + ksft_eq(nc.stdout.strip(), test_string) + +def _set_offload_checksum(cfg, netnl, on) -> None: + try: + ethtool(f" -K {cfg.ifname} rx {on} tx {on} ") + except: + return + +def _set_xdp_generic_sb_on(cfg) -> None: + test_dir = os.path.dirname(os.path.realpath(__file__)) + prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o" + cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote) + cmd(f"ip link set dev {cfg.ifname} mtu 1500 xdpgeneric obj {prog} sec xdp", shell=True) + defer(cmd, f"ip link set dev {cfg.ifname} xdpgeneric off") + + if no_sleep != True: + time.sleep(10) + +def _set_xdp_generic_mb_on(cfg) -> None: + test_dir = os.path.dirname(os.path.realpath(__file__)) + prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o" + cmd(f"ip link set dev {remote_ifname} mtu 9000", shell=True, host=cfg.remote) + defer(ip, f"link set dev {remote_ifname} mtu 1500", host=cfg.remote) + ip("link set dev %s mtu 9000 xdpgeneric obj %s sec xdp.frags" % (cfg.ifname, prog)) + defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdpgeneric off") + + if no_sleep != True: + time.sleep(10) + +def _set_xdp_native_sb_on(cfg) -> None: + test_dir = os.path.dirname(os.path.realpath(__file__)) + prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o" + cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote) + cmd(f"ip -j link set dev {cfg.ifname} mtu 1500 xdp obj {prog} sec xdp", shell=True) + defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdp off") + xdp_info = ip("-d link show %s" % (cfg.ifname), json=True)[0] + if xdp_info['xdp']['mode'] != 1: + """ + If the interface doesn't support native-mode, it falls back to generic mode. + The mode value 1 is native and 2 is generic. + So it raises an exception if mode is not 1(native mode). + """ + raise KsftSkipEx('device does not support native-XDP') + + if no_sleep != True: + time.sleep(10) + +def _set_xdp_native_mb_on(cfg) -> None: + test_dir = os.path.dirname(os.path.realpath(__file__)) + prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o" + cmd(f"ip link set dev {remote_ifname} mtu 9000", shell=True, host=cfg.remote) + defer(ip, f"link set dev {remote_ifname} mtu 1500", host=cfg.remote) + try: + cmd(f"ip link set dev {cfg.ifname} mtu 9000 xdp obj {prog} sec xdp.frags", shell=True) + defer(ip, f"link set dev {cfg.ifname} mtu 1500 xdp off") + except Exception as e: + raise KsftSkipEx('device does not support native-multi-buffer XDP') + + if no_sleep != True: + time.sleep(10) + +def _set_xdp_offload_on(cfg) -> None: + test_dir = os.path.dirname(os.path.realpath(__file__)) + prog = test_dir + "/../../net/lib/xdp_dummy.bpf.o" + cmd(f"ip link set dev {cfg.ifname} mtu 1500", shell=True) + try: + cmd(f"ip link set dev {cfg.ifname} xdpoffload obj {prog} sec xdp", shell=True) + except Exception as e: + raise KsftSkipEx('device does not support offloaded XDP') + defer(ip, f"link set dev {cfg.ifname} xdpoffload off") + cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote) + + if no_sleep != True: + time.sleep(10) + +def get_interface_info(cfg) -> None: + global remote_ifname + global no_sleep + + remote_info = cmd(f"ip -4 -o addr show to {cfg.remote_addr_v['4']} | awk '{{print $2}}'", shell=True, host=cfg.remote).stdout + remote_ifname = remote_info.rstrip('\n') + if remote_ifname == "": + raise KsftFailEx('Can not get remote interface') + local_info = ip("-d link show %s" % (cfg.ifname), json=True)[0] + if 'parentbus' in local_info and local_info['parentbus'] == "netdevsim": + no_sleep=True + if 'linkinfo' in local_info and local_info['linkinfo']['info_kind'] == "veth": + no_sleep=True + +def set_interface_init(cfg) -> None: + cmd(f"ip link set dev {cfg.ifname} mtu 1500", shell=True) + cmd(f"ip link set dev {cfg.ifname} xdp off ", shell=True) + cmd(f"ip link set dev {cfg.ifname} xdpgeneric off ", shell=True) + cmd(f"ip link set dev {cfg.ifname} xdpoffload off", shell=True) + cmd(f"ip link set dev {remote_ifname} mtu 1500", shell=True, host=cfg.remote) + +def test_default(cfg, netnl) -> None: + _set_offload_checksum(cfg, netnl, "off") + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) + _set_offload_checksum(cfg, netnl, "on") + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) + +def test_xdp_generic_sb(cfg, netnl) -> None: + _set_xdp_generic_sb_on(cfg) + _set_offload_checksum(cfg, netnl, "off") + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) + _set_offload_checksum(cfg, netnl, "on") + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) + +def test_xdp_generic_mb(cfg, netnl) -> None: + _set_xdp_generic_mb_on(cfg) + _set_offload_checksum(cfg, netnl, "off") + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) + _set_offload_checksum(cfg, netnl, "on") + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) + +def test_xdp_native_sb(cfg, netnl) -> None: + _set_xdp_native_sb_on(cfg) + _set_offload_checksum(cfg, netnl, "off") + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) + _set_offload_checksum(cfg, netnl, "on") + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) + +def test_xdp_native_mb(cfg, netnl) -> None: + _set_xdp_native_mb_on(cfg) + _set_offload_checksum(cfg, netnl, "off") + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) + _set_offload_checksum(cfg, netnl, "on") + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) + +def test_xdp_offload(cfg, netnl) -> None: + _set_xdp_offload_on(cfg) + _test_v4(cfg) + _test_v6(cfg) + _test_tcp(cfg) def main() -> None: with NetDrvEpEnv(__file__) as cfg: - ksft_run(globs=globals(), case_pfx={"test_"}, args=(cfg, )) + get_interface_info(cfg) + set_interface_init(cfg) + ksft_run([test_default, + test_xdp_generic_sb, + test_xdp_generic_mb, + test_xdp_native_sb, + test_xdp_native_mb, + test_xdp_offload], + args=(cfg, EthtoolFamily())) ksft_exit() diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py index 38303da957ee5..cae923f84f699 100755 --- a/tools/testing/selftests/drivers/net/queues.py +++ b/tools/testing/selftests/drivers/net/queues.py @@ -2,13 +2,15 @@ # SPDX-License-Identifier: GPL-2.0 from lib.py import ksft_disruptive, ksft_exit, ksft_run -from lib.py import ksft_eq, ksft_raises, KsftSkipEx +from lib.py import ksft_eq, ksft_not_in, ksft_raises, KsftSkipEx, KsftFailEx from lib.py import EthtoolFamily, NetdevFamily, NlError from lib.py import NetDrvEnv -from lib.py import cmd, defer, ip +from lib.py import bkg, cmd, defer, ip import errno import glob - +import os +import socket +import struct def sys_get_queues(ifname, qtype='rx') -> int: folders = glob.glob(f'/sys/class/net/{ifname}/queues/{qtype}-*') @@ -22,6 +24,40 @@ def nl_get_queues(cfg, nl, qtype='rx'): return None +def check_xsk(cfg, nl, xdp_queue_id=0) -> None: + # Probe for support + xdp = cmd(cfg.rpath("xdp_helper") + ' - -', fail=False) + if xdp.ret == 255: + raise KsftSkipEx('AF_XDP unsupported') + elif xdp.ret > 0: + raise KsftFailEx('unable to create AF_XDP socket') + + with bkg(f'{cfg.rpath("xdp_helper")} {cfg.ifindex} {xdp_queue_id}', + ksft_wait=3): + + rx = tx = False + + queues = nl.queue_get({'ifindex': cfg.ifindex}, dump=True) + if not queues: + raise KsftSkipEx("Netlink reports no queues") + + for q in queues: + if q['id'] == 0: + if q['type'] == 'rx': + rx = True + if q['type'] == 'tx': + tx = True + + ksft_eq(q.get('xsk', None), {}, + comment="xsk attr on queue we configured") + else: + ksft_not_in('xsk', q, + comment="xsk attr on queue we didn't configure") + + ksft_eq(rx, True) + ksft_eq(tx, True) + + def get_queues(cfg, nl) -> None: snl = NetdevFamily(recv_size=4096) @@ -45,10 +81,9 @@ def addremove_queues(cfg, nl) -> None: netnl = EthtoolFamily() channels = netnl.channels_get({'header': {'dev-index': cfg.ifindex}}) - if channels['combined-count'] == 0: - rx_type = 'rx' - else: - rx_type = 'combined' + rx_type = 'rx' + if channels.get('combined-count', 0) > 0: + rx_type = 'combined' expected = curr_queues - 1 cmd(f"ethtool -L {cfg.dev['ifname']} {rx_type} {expected}", timeout=10) @@ -81,7 +116,8 @@ def check_down(cfg, nl) -> None: def main() -> None: with NetDrvEnv(__file__, queue_count=100) as cfg: - ksft_run([get_queues, addremove_queues, check_down], args=(cfg, NetdevFamily())) + ksft_run([get_queues, addremove_queues, check_down, check_xsk], + args=(cfg, NetdevFamily())) ksft_exit() diff --git a/tools/testing/selftests/drivers/net/settings b/tools/testing/selftests/drivers/net/settings new file mode 100644 index 0000000000000..a953c96aa16e1 --- /dev/null +++ b/tools/testing/selftests/drivers/net/settings @@ -0,0 +1 @@ +timeout=180 diff --git a/tools/testing/selftests/drivers/net/xdp_helper.c b/tools/testing/selftests/drivers/net/xdp_helper.c new file mode 100644 index 0000000000000..aeed259141045 --- /dev/null +++ b/tools/testing/selftests/drivers/net/xdp_helper.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define UMEM_SZ (1U << 16) +#define NUM_DESC (UMEM_SZ / 2048) + +/* Move this to a common header when reused! */ +static void ksft_ready(void) +{ + const char msg[7] = "ready\n"; + char *env_str; + int fd; + + env_str = getenv("KSFT_READY_FD"); + if (env_str) { + fd = atoi(env_str); + if (!fd) { + fprintf(stderr, "invalid KSFT_READY_FD = '%s'\n", + env_str); + return; + } + } else { + fd = STDOUT_FILENO; + } + + write(fd, msg, sizeof(msg)); + if (fd != STDOUT_FILENO) + close(fd); +} + +static void ksft_wait(void) +{ + char *env_str; + char byte; + int fd; + + env_str = getenv("KSFT_WAIT_FD"); + if (env_str) { + fd = atoi(env_str); + if (!fd) { + fprintf(stderr, "invalid KSFT_WAIT_FD = '%s'\n", + env_str); + return; + } + } else { + /* Not running in KSFT env, wait for input from STDIN instead */ + fd = STDIN_FILENO; + } + + read(fd, &byte, sizeof(byte)); + if (fd != STDIN_FILENO) + close(fd); +} + +/* this is a simple helper program that creates an XDP socket and does the + * minimum necessary to get bind() to succeed. + * + * this test program is not intended to actually process packets, but could be + * extended in the future if that is actually needed. + * + * it is used by queues.py to ensure the xsk netlinux attribute is set + * correctly. + */ +int main(int argc, char **argv) +{ + struct xdp_umem_reg umem_reg = { 0 }; + struct sockaddr_xdp sxdp = { 0 }; + int num_desc = NUM_DESC; + void *umem_area; + int ifindex; + int sock_fd; + int queue; + + if (argc != 3) { + fprintf(stderr, "Usage: %s ifindex queue_id\n", argv[0]); + return 1; + } + + sock_fd = socket(AF_XDP, SOCK_RAW, 0); + if (sock_fd < 0) { + perror("socket creation failed"); + /* if the kernel doesn't support AF_XDP, let the test program + * know with -1. All other error paths return 1. + */ + if (errno == EAFNOSUPPORT) + return -1; + return 1; + } + + /* "Probing mode", just checking if AF_XDP sockets are supported */ + if (!strcmp(argv[1], "-") && !strcmp(argv[2], "-")) { + printf("AF_XDP support detected\n"); + close(sock_fd); + return 0; + } + + ifindex = atoi(argv[1]); + queue = atoi(argv[2]); + + umem_area = mmap(NULL, UMEM_SZ, PROT_READ | PROT_WRITE, MAP_PRIVATE | + MAP_ANONYMOUS, -1, 0); + if (umem_area == MAP_FAILED) { + perror("mmap failed"); + return 1; + } + + umem_reg.addr = (uintptr_t)umem_area; + umem_reg.len = UMEM_SZ; + umem_reg.chunk_size = 2048; + umem_reg.headroom = 0; + + setsockopt(sock_fd, SOL_XDP, XDP_UMEM_REG, &umem_reg, + sizeof(umem_reg)); + setsockopt(sock_fd, SOL_XDP, XDP_UMEM_FILL_RING, &num_desc, + sizeof(num_desc)); + setsockopt(sock_fd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &num_desc, + sizeof(num_desc)); + setsockopt(sock_fd, SOL_XDP, XDP_RX_RING, &num_desc, sizeof(num_desc)); + + sxdp.sxdp_family = AF_XDP; + sxdp.sxdp_ifindex = ifindex; + sxdp.sxdp_queue_id = queue; + sxdp.sxdp_flags = 0; + + if (bind(sock_fd, (struct sockaddr *)&sxdp, sizeof(sxdp)) != 0) { + munmap(umem_area, UMEM_SZ); + perror("bind failed"); + close(sock_fd); + return 1; + } + + ksft_ready(); + ksft_wait(); + + /* parent program will write a byte to stdin when its ready for this + * helper to exit + */ + + close(sock_fd); + return 0; +} diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc index dc25bcf4f9e2c..73f6c6fcecabe 100644 --- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc +++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc @@ -7,12 +7,42 @@ echo 0 > events/enable echo > dynamic_events PLACE=$FUNCTION_FORK +PLACE2="kmem_cache_free" +PLACE3="schedule_timeout" + +# Some functions may have BPF programs attached, therefore +# count already enabled_functions before tests start +ocnt=`cat enabled_functions | wc -l` echo "f:myevent1 $PLACE" >> dynamic_events + +# Make sure the event is attached and is the only one +grep -q $PLACE enabled_functions +cnt=`cat enabled_functions | wc -l` +if [ $cnt -ne $((ocnt + 1)) ]; then + exit_fail +fi + echo "f:myevent2 $PLACE%return" >> dynamic_events +# It should till be the only attached function +cnt=`cat enabled_functions | wc -l` +if [ $cnt -ne $((ocnt + 1)) ]; then + exit_fail +fi + +# add another event +echo "f:myevent3 $PLACE2" >> dynamic_events + +grep -q $PLACE2 enabled_functions +cnt=`cat enabled_functions | wc -l` +if [ $cnt -ne $((ocnt + 2)) ]; then + exit_fail +fi + grep -q myevent1 dynamic_events grep -q myevent2 dynamic_events +grep -q myevent3 dynamic_events test -d events/fprobes/myevent1 test -d events/fprobes/myevent2 @@ -21,6 +51,34 @@ echo "-:myevent2" >> dynamic_events grep -q myevent1 dynamic_events ! grep -q myevent2 dynamic_events +# should still have 2 left +cnt=`cat enabled_functions | wc -l` +if [ $cnt -ne $((ocnt + 2)) ]; then + exit_fail +fi + echo > dynamic_events +# Should have none left +cnt=`cat enabled_functions | wc -l` +if [ $cnt -ne $ocnt ]; then + exit_fail +fi + +echo "f:myevent4 $PLACE" >> dynamic_events + +# Should only have one enabled +cnt=`cat enabled_functions | wc -l` +if [ $cnt -ne $((ocnt + 1)) ]; then + exit_fail +fi + +echo > dynamic_events + +# Should have none left +cnt=`cat enabled_functions | wc -l` +if [ $cnt -ne $ocnt ]; then + exit_fail +fi + clear_trace diff --git a/tools/testing/selftests/hid/Makefile b/tools/testing/selftests/hid/Makefile index 0336353bd15f0..2839d2612ce3a 100644 --- a/tools/testing/selftests/hid/Makefile +++ b/tools/testing/selftests/hid/Makefile @@ -43,10 +43,8 @@ TEST_GEN_PROGS = hid_bpf hidraw # $3 - target (assumed to be file); only file name will be emitted; # $4 - optional extra arg, emitted as-is, if provided. ifeq ($(V),1) -Q = msg = else -Q = @ msg = @printf ' %-8s%s %s%s\n' "$(1)" "$(if $(2), [$(2)])" "$(notdir $(3))" "$(if $(4), $(4))"; MAKEFLAGS += --no-print-directory submake_extras := feature_display=0 diff --git a/tools/testing/selftests/kselftest/prefix.pl b/tools/testing/selftests/kselftest/prefix.pl index 12a7f4ca2684d..b8e4d697e3b3a 100755 --- a/tools/testing/selftests/kselftest/prefix.pl +++ b/tools/testing/selftests/kselftest/prefix.pl @@ -4,12 +4,15 @@ # to have unbuffering forced with "stdbuf -i0 -o0 -e0 $cmd". use strict; use IO::Handle; +use Time::HiRes qw( time ); binmode STDIN; binmode STDOUT; STDOUT->autoflush(1); +my $start_time = time(); +my $prev_time = $start_time; my $needed = 1; while (1) { my $char; @@ -17,6 +20,11 @@ exit 0 if ($bytes == 0); if ($needed) { print "# "; + if ($ENV{kselftest_profile}) { + my $now = time(); + printf("%.2f [+%.2f] ", $now - $start_time, $now - $prev_time); + $prev_time = $now; + } $needed = 0; } print $char; diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh index 2c3c58e65a419..e65bffb9981f8 100644 --- a/tools/testing/selftests/kselftest/runner.sh +++ b/tools/testing/selftests/kselftest/runner.sh @@ -89,7 +89,7 @@ run_one() fi field=$(echo "$line" | cut -d= -f1) value=$(echo "$line" | cut -d= -f2-) - eval "kselftest_$field"="$value" + eval "export kselftest_$field"="$value" done < "$settings" fi diff --git a/tools/testing/selftests/kvm/mmu_stress_test.c b/tools/testing/selftests/kvm/mmu_stress_test.c index d9c76b4c0d88a..6a437d2be9fa4 100644 --- a/tools/testing/selftests/kvm/mmu_stress_test.c +++ b/tools/testing/selftests/kvm/mmu_stress_test.c @@ -18,6 +18,7 @@ #include "ucall_common.h" static bool mprotect_ro_done; +static bool all_vcpus_hit_ro_fault; static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) { @@ -36,9 +37,9 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) /* * Write to the region while mprotect(PROT_READ) is underway. Keep - * looping until the memory is guaranteed to be read-only, otherwise - * vCPUs may complete their writes and advance to the next stage - * prematurely. + * looping until the memory is guaranteed to be read-only and a fault + * has occurred, otherwise vCPUs may complete their writes and advance + * to the next stage prematurely. * * For architectures that support skipping the faulting instruction, * generate the store via inline assembly to ensure the exact length @@ -56,7 +57,7 @@ static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) #else vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa); #endif - } while (!READ_ONCE(mprotect_ro_done)); + } while (!READ_ONCE(mprotect_ro_done) || !READ_ONCE(all_vcpus_hit_ro_fault)); /* * Only architectures that write the entire range can explicitly sync, @@ -81,6 +82,7 @@ struct vcpu_info { static int nr_vcpus; static atomic_t rendezvous; +static atomic_t nr_ro_faults; static void rendezvous_with_boss(void) { @@ -148,12 +150,16 @@ static void *vcpu_worker(void *data) * be stuck on the faulting instruction for other architectures. Go to * stage 3 without a rendezvous */ - do { - r = _vcpu_run(vcpu); - } while (!r); + r = _vcpu_run(vcpu); TEST_ASSERT(r == -1 && errno == EFAULT, "Expected EFAULT on write to RO memory, got r = %d, errno = %d", r, errno); + atomic_inc(&nr_ro_faults); + if (atomic_read(&nr_ro_faults) == nr_vcpus) { + WRITE_ONCE(all_vcpus_hit_ro_fault, true); + sync_global_to_guest(vm, all_vcpus_hit_ro_fault); + } + #if defined(__x86_64__) || defined(__aarch64__) /* * Verify *all* writes from the guest hit EFAULT due to the VMA now @@ -378,7 +384,6 @@ int main(int argc, char *argv[]) rendezvous_with_vcpus(&time_run2, "run 2"); mprotect(mem, slot_size, PROT_READ); - usleep(10); mprotect_ro_done = true; sync_global_to_guest(vm, mprotect_ro_done); diff --git a/tools/testing/selftests/kvm/x86/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86/nested_exceptions_test.c index 3eb0313ffa397..3641a42934acb 100644 --- a/tools/testing/selftests/kvm/x86/nested_exceptions_test.c +++ b/tools/testing/selftests/kvm/x86/nested_exceptions_test.c @@ -85,6 +85,7 @@ static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector, GUEST_ASSERT_EQ(ctrl->exit_code, (SVM_EXIT_EXCP_BASE + vector)); GUEST_ASSERT_EQ(ctrl->exit_info_1, error_code); + GUEST_ASSERT(!ctrl->int_state); } static void l1_svm_code(struct svm_test_data *svm) @@ -122,6 +123,7 @@ static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code) GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI); GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), vector); GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE), error_code); + GUEST_ASSERT(!vmreadz(GUEST_INTERRUPTIBILITY_INFO)); } static void l1_vmx_code(struct vmx_pages *vmx) diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c index a1a688e752666..d97816dc476a2 100644 --- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c +++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c @@ -52,7 +52,8 @@ static void compare_xsave(u8 *from_host, u8 *from_guest) bool bad = false; for (i = 0; i < 4095; i++) { if (from_host[i] != from_guest[i]) { - printf("mismatch at %02hhx | %02hhx %02hhx\n", i, from_host[i], from_guest[i]); + printf("mismatch at %u | %02hhx %02hhx\n", + i, from_host[i], from_guest[i]); bad = true; } } diff --git a/tools/testing/selftests/landlock/.gitignore b/tools/testing/selftests/landlock/.gitignore index 470203a7cd737..335b2b1a3463a 100644 --- a/tools/testing/selftests/landlock/.gitignore +++ b/tools/testing/selftests/landlock/.gitignore @@ -1,2 +1,4 @@ /*_test +/sandbox-and-launch /true +/wait-pipe diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h index a604ea5d8297c..6064c9ac05329 100644 --- a/tools/testing/selftests/landlock/common.h +++ b/tools/testing/selftests/landlock/common.h @@ -207,6 +207,7 @@ enforce_ruleset(struct __test_metadata *const _metadata, const int ruleset_fd) struct protocol_variant { int domain; int type; + int protocol; }; struct service_fixture { diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config index 29af19c4e9f98..425de4c20271c 100644 --- a/tools/testing/selftests/landlock/config +++ b/tools/testing/selftests/landlock/config @@ -1,8 +1,11 @@ +CONFIG_AF_UNIX_OOB=y CONFIG_CGROUPS=y CONFIG_CGROUP_SCHED=y CONFIG_INET=y CONFIG_IPV6=y CONFIG_KEYS=y +CONFIG_MPTCP=y +CONFIG_MPTCP_IPV6=y CONFIG_NET=y CONFIG_NET_NS=y CONFIG_OVERLAY_FS=y diff --git a/tools/testing/selftests/landlock/net_test.c b/tools/testing/selftests/landlock/net_test.c index 4e0aeb53b225a..d9de0ee49ebc2 100644 --- a/tools/testing/selftests/landlock/net_test.c +++ b/tools/testing/selftests/landlock/net_test.c @@ -85,18 +85,18 @@ static void setup_loopback(struct __test_metadata *const _metadata) clear_ambient_cap(_metadata, CAP_NET_ADMIN); } +static bool prot_is_tcp(const struct protocol_variant *const prot) +{ + return (prot->domain == AF_INET || prot->domain == AF_INET6) && + prot->type == SOCK_STREAM && + (prot->protocol == IPPROTO_TCP || prot->protocol == IPPROTO_IP); +} + static bool is_restricted(const struct protocol_variant *const prot, const enum sandbox_type sandbox) { - switch (prot->domain) { - case AF_INET: - case AF_INET6: - switch (prot->type) { - case SOCK_STREAM: - return sandbox == TCP_SANDBOX; - } - break; - } + if (sandbox == TCP_SANDBOX) + return prot_is_tcp(prot); return false; } @@ -105,7 +105,7 @@ static int socket_variant(const struct service_fixture *const srv) int ret; ret = socket(srv->protocol.domain, srv->protocol.type | SOCK_CLOEXEC, - 0); + srv->protocol.protocol); if (ret < 0) return -errno; return ret; @@ -290,22 +290,70 @@ FIXTURE_TEARDOWN(protocol) } /* clang-format off */ -FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp) { +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp1) { /* clang-format on */ .sandbox = NO_SANDBOX, .prot = { .domain = AF_INET, .type = SOCK_STREAM, + /* IPPROTO_IP == 0 */ + .protocol = IPPROTO_IP, }, }; /* clang-format off */ -FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp) { +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_tcp2) { + /* clang-format on */ + .sandbox = NO_SANDBOX, + .prot = { + .domain = AF_INET, + .type = SOCK_STREAM, + .protocol = IPPROTO_TCP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv4_mptcp) { + /* clang-format on */ + .sandbox = NO_SANDBOX, + .prot = { + .domain = AF_INET, + .type = SOCK_STREAM, + .protocol = IPPROTO_MPTCP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp1) { + /* clang-format on */ + .sandbox = NO_SANDBOX, + .prot = { + .domain = AF_INET6, + .type = SOCK_STREAM, + /* IPPROTO_IP == 0 */ + .protocol = IPPROTO_IP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_tcp2) { /* clang-format on */ .sandbox = NO_SANDBOX, .prot = { .domain = AF_INET6, .type = SOCK_STREAM, + .protocol = IPPROTO_TCP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_ipv6_mptcp) { + /* clang-format on */ + .sandbox = NO_SANDBOX, + .prot = { + .domain = AF_INET6, + .type = SOCK_STREAM, + .protocol = IPPROTO_MPTCP, }, }; @@ -350,22 +398,70 @@ FIXTURE_VARIANT_ADD(protocol, no_sandbox_with_unix_datagram) { }; /* clang-format off */ -FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp) { +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp1) { + /* clang-format on */ + .sandbox = TCP_SANDBOX, + .prot = { + .domain = AF_INET, + .type = SOCK_STREAM, + /* IPPROTO_IP == 0 */ + .protocol = IPPROTO_IP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_tcp2) { + /* clang-format on */ + .sandbox = TCP_SANDBOX, + .prot = { + .domain = AF_INET, + .type = SOCK_STREAM, + .protocol = IPPROTO_TCP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv4_mptcp) { /* clang-format on */ .sandbox = TCP_SANDBOX, .prot = { .domain = AF_INET, .type = SOCK_STREAM, + .protocol = IPPROTO_MPTCP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp1) { + /* clang-format on */ + .sandbox = TCP_SANDBOX, + .prot = { + .domain = AF_INET6, + .type = SOCK_STREAM, + /* IPPROTO_IP == 0 */ + .protocol = IPPROTO_IP, + }, +}; + +/* clang-format off */ +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp2) { + /* clang-format on */ + .sandbox = TCP_SANDBOX, + .prot = { + .domain = AF_INET6, + .type = SOCK_STREAM, + .protocol = IPPROTO_TCP, }, }; /* clang-format off */ -FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_tcp) { +FIXTURE_VARIANT_ADD(protocol, tcp_sandbox_with_ipv6_mptcp) { /* clang-format on */ .sandbox = TCP_SANDBOX, .prot = { .domain = AF_INET6, .type = SOCK_STREAM, + .protocol = IPPROTO_MPTCP, }, }; diff --git a/tools/testing/selftests/mm/hugepage-mremap.c b/tools/testing/selftests/mm/hugepage-mremap.c index ada9156cc497b..c463d1c09c9b4 100644 --- a/tools/testing/selftests/mm/hugepage-mremap.c +++ b/tools/testing/selftests/mm/hugepage-mremap.c @@ -15,7 +15,7 @@ #define _GNU_SOURCE #include #include -#include +#include #include #include #include /* Definition of O_* constants */ diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c index 66b4e111b5a27..b61803e36d1cf 100644 --- a/tools/testing/selftests/mm/ksm_functional_tests.c +++ b/tools/testing/selftests/mm/ksm_functional_tests.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -369,6 +369,7 @@ static void test_unmerge_discarded(void) munmap(map, size); } +#ifdef __NR_userfaultfd static void test_unmerge_uffd_wp(void) { struct uffdio_writeprotect uffd_writeprotect; @@ -429,6 +430,7 @@ static void test_unmerge_uffd_wp(void) unmap: munmap(map, size); } +#endif /* Verify that KSM can be enabled / queried with prctl. */ static void test_prctl(void) @@ -684,7 +686,9 @@ int main(int argc, char **argv) exit(test_child_ksm()); } +#ifdef __NR_userfaultfd tests++; +#endif ksft_print_header(); ksft_set_plan(tests); @@ -696,7 +700,9 @@ int main(int argc, char **argv) test_unmerge(); test_unmerge_zero_pages(); test_unmerge_discarded(); +#ifdef __NR_userfaultfd test_unmerge_uffd_wp(); +#endif test_prot_none(); diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c index 74c911aa3aea9..9a0597310a765 100644 --- a/tools/testing/selftests/mm/memfd_secret.c +++ b/tools/testing/selftests/mm/memfd_secret.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #include #include @@ -28,6 +28,8 @@ #define pass(fmt, ...) ksft_test_result_pass(fmt, ##__VA_ARGS__) #define skip(fmt, ...) ksft_test_result_skip(fmt, ##__VA_ARGS__) +#ifdef __NR_memfd_secret + #define PATTERN 0x55 static const int prot = PROT_READ | PROT_WRITE; @@ -332,3 +334,13 @@ int main(int argc, char *argv[]) ksft_finished(); } + +#else /* __NR_memfd_secret */ + +int main(int argc, char *argv[]) +{ + printf("skip: skipping memfd_secret test (missing __NR_memfd_secret)\n"); + return KSFT_SKIP; +} + +#endif /* __NR_memfd_secret */ diff --git a/tools/testing/selftests/mm/mkdirty.c b/tools/testing/selftests/mm/mkdirty.c index af2fce496912b..09feeb4536460 100644 --- a/tools/testing/selftests/mm/mkdirty.c +++ b/tools/testing/selftests/mm/mkdirty.c @@ -9,7 +9,7 @@ */ #include #include -#include +#include #include #include #include @@ -265,6 +265,7 @@ static void test_pte_mapped_thp(void) munmap(mmap_mem, mmap_size); } +#ifdef __NR_userfaultfd static void test_uffdio_copy(void) { struct uffdio_register uffdio_register; @@ -322,6 +323,7 @@ static void test_uffdio_copy(void) munmap(dst, pagesize); free(src); } +#endif /* __NR_userfaultfd */ int main(void) { @@ -334,7 +336,9 @@ int main(void) thpsize / 1024); tests += 3; } +#ifdef __NR_userfaultfd tests += 1; +#endif /* __NR_userfaultfd */ ksft_print_header(); ksft_set_plan(tests); @@ -364,7 +368,9 @@ int main(void) if (thpsize) test_pte_mapped_thp(); /* Placing a fresh page via userfaultfd may set the PTE dirty. */ +#ifdef __NR_userfaultfd test_uffdio_copy(); +#endif /* __NR_userfaultfd */ err = ksft_get_fail_cnt(); if (err) diff --git a/tools/testing/selftests/mm/mlock2.h b/tools/testing/selftests/mm/mlock2.h index 1e5731bab499a..4417eaa5cfb78 100644 --- a/tools/testing/selftests/mm/mlock2.h +++ b/tools/testing/selftests/mm/mlock2.h @@ -3,7 +3,6 @@ #include #include #include -#include static int mlock2_(void *start, size_t len, int flags) { diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c index a4683f2476f27..35565af308af6 100644 --- a/tools/testing/selftests/mm/protection_keys.c +++ b/tools/testing/selftests/mm/protection_keys.c @@ -42,7 +42,7 @@ #include #include #include -#include +#include #include #include diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index da7e266681031..7cc71d942f83b 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -304,7 +304,9 @@ uffd_stress_bin=./uffd-stress CATEGORY="userfaultfd" run_test ${uffd_stress_bin} anon 20 16 # Hugetlb tests require source and destination huge pages. Pass in half # the size of the free pages we have, which is used for *each*. -half_ufd_size_MB=$((freepgs / 2)) +# uffd-stress expects a region expressed in MiB, so we adjust +# half_ufd_size_MB accordingly. +half_ufd_size_MB=$(((freepgs * hpgsize_KB) / 1024 / 2)) CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 32 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16 diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c index 717539eddf987..7ad6ba660c7d6 100644 --- a/tools/testing/selftests/mm/uffd-common.c +++ b/tools/testing/selftests/mm/uffd-common.c @@ -673,7 +673,11 @@ int uffd_open_dev(unsigned int flags) int uffd_open_sys(unsigned int flags) { +#ifdef __NR_userfaultfd return syscall(__NR_userfaultfd, flags); +#else + return -1; +#endif } int uffd_open(unsigned int flags) diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index a4b83280998ab..944d559ade21f 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -33,10 +33,11 @@ * pthread_mutex_lock will also verify the atomicity of the memory * transfer (UFFDIO_COPY). */ -#include + #include "uffd-common.h" uint64_t features; +#ifdef __NR_userfaultfd #define BOUNCE_RANDOM (1<<0) #define BOUNCE_RACINGFAULTS (1<<1) @@ -471,3 +472,15 @@ int main(int argc, char **argv) nr_pages, nr_pages_per_cpu); return userfaultfd_stress(); } + +#else /* __NR_userfaultfd */ + +#warning "missing __NR_userfaultfd definition" + +int main(void) +{ + printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); + return KSFT_SKIP; +} + +#endif /* __NR_userfaultfd */ diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index 9ff71fa1f9bf0..74c8bc02b5063 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -5,11 +5,12 @@ * Copyright (C) 2015-2023 Red Hat, Inc. */ -#include #include "uffd-common.h" #include "../../../../mm/gup_test.h" +#ifdef __NR_userfaultfd + /* The unit test doesn't need a large or random size, make it 32MB for now */ #define UFFD_TEST_MEM_SIZE (32UL << 20) @@ -1558,3 +1559,14 @@ int main(int argc, char *argv[]) return ksft_get_fail_cnt() ? KSFT_FAIL : KSFT_PASS; } +#else /* __NR_userfaultfd */ + +#warning "missing __NR_userfaultfd definition" + +int main(void) +{ + printf("Skipping %s (missing __NR_userfaultfd)\n", __file__); + return KSFT_SKIP; +} + +#endif /* __NR_userfaultfd */ diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index 28a715a8ef2b1..679542f565a48 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -21,6 +21,7 @@ msg_oob msg_zerocopy netlink-dumps nettest +proc_net_pktgen psock_fanout psock_snd psock_tpacket @@ -42,6 +43,7 @@ socket so_incoming_cpu so_netns_cookie so_txtime +so_rcv_listener stress_reuseport_listen tap tcp_fastopen_backup_key diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 73ee88d6b0430..6d718b478ed83 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -7,7 +7,7 @@ CFLAGS += -I../../../../usr/include/ $(KHDR_INCLUDES) CFLAGS += -I../ TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh \ - rtnetlink.sh xfrm_policy.sh test_blackhole_dev.sh + rtnetlink.sh xfrm_policy.sh TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh @@ -33,9 +33,12 @@ TEST_PROGS += gro.sh TEST_PROGS += gre_gso.sh TEST_PROGS += cmsg_so_mark.sh TEST_PROGS += cmsg_so_priority.sh -TEST_PROGS += cmsg_time.sh cmsg_ipv6.sh +TEST_PROGS += test_so_rcv.sh +TEST_PROGS += cmsg_time.sh cmsg_ip.sh TEST_PROGS += netns-name.sh +TEST_PROGS += link_netns.py TEST_PROGS += nl_netdev.py +TEST_PROGS += rtnetlink.py TEST_PROGS += srv6_end_dt46_l3vpn_test.sh TEST_PROGS += srv6_end_dt4_l3vpn_test.sh TEST_PROGS += srv6_end_dt6_l3vpn_test.sh @@ -75,6 +78,7 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls tun tap epoll_busy_ TEST_GEN_FILES += toeplitz TEST_GEN_FILES += cmsg_sender TEST_GEN_FILES += stress_reuseport_listen +TEST_GEN_FILES += so_rcv_listener TEST_PROGS += test_vxlan_vnifiltering.sh TEST_GEN_FILES += io_uring_zerocopy_tx TEST_PROGS += io_uring_zerocopy_tx.sh @@ -100,6 +104,8 @@ TEST_PROGS += vlan_bridge_binding.sh TEST_PROGS += bpf_offload.py TEST_PROGS += ipv6_route_update_soft_lockup.sh TEST_PROGS += busy_poll_test.sh +TEST_GEN_PROGS += proc_net_pktgen +TEST_PROGS += lwt_dst_cache_ref_loop.sh # YNL files, must be before "include ..lib.mk" YNL_GEN_FILES := busy_poller netlink-dumps diff --git a/tools/testing/selftests/net/bpf_offload.py b/tools/testing/selftests/net/bpf_offload.py index fd0d959914e47..b2c271b79240b 100755 --- a/tools/testing/selftests/net/bpf_offload.py +++ b/tools/testing/selftests/net/bpf_offload.py @@ -207,9 +207,11 @@ def bpftool_prog_list_wait(expected=0, n_retry=20): raise Exception("Time out waiting for program counts to stabilize want %d, have %d" % (expected, nprogs)) def bpftool_map_list_wait(expected=0, n_retry=20, ns=""): + nmaps = None for i in range(n_retry): maps = bpftool_map_list(ns=ns) - if len(maps) == expected: + nmaps = len(maps) + if nmaps == expected: return maps time.sleep(0.05) raise Exception("Time out waiting for map counts to stabilize want %d, have %d" % (expected, nmaps)) @@ -710,6 +712,7 @@ def test_multi_prog(simdev, sim, obj, modename, modeid): base_map_names = [ 'pid_iter.rodata', # created on each bpftool invocation 'libbpf_det_bind', # created on each bpftool invocation + 'libbpf_global', ] # Check netdevsim diff --git a/tools/testing/selftests/net/busy_poll_test.sh b/tools/testing/selftests/net/busy_poll_test.sh index 7db292ec4884a..aeca610dc9892 100755 --- a/tools/testing/selftests/net/busy_poll_test.sh +++ b/tools/testing/selftests/net/busy_poll_test.sh @@ -27,6 +27,9 @@ NAPI_DEFER_HARD_IRQS=100 GRO_FLUSH_TIMEOUT=50000 SUSPEND_TIMEOUT=20000000 +# NAPI threaded busy poll config +NAPI_THREADED_POLL=2 + setup_ns() { set -e @@ -62,6 +65,9 @@ cleanup_ns() test_busypoll() { suspend_value=${1:-0} + napi_threaded_value=${2:-0} + prefer_busy_poll_value=${3:-$PREFER_BUSY_POLL} + tmp_file=$(mktemp) out_file=$(mktemp) @@ -73,10 +79,11 @@ test_busypoll() -b${SERVER_IP} \ -m${MAX_EVENTS} \ -u${BUSY_POLL_USECS} \ - -P${PREFER_BUSY_POLL} \ + -P${prefer_busy_poll_value} \ -g${BUSY_POLL_BUDGET} \ -i${NSIM_SV_IFIDX} \ -s${suspend_value} \ + -t${napi_threaded_value} \ -o${out_file}& wait_local_port_listen nssv ${SERVER_PORT} tcp @@ -109,6 +116,15 @@ test_busypoll_with_suspend() return $? } +test_busypoll_with_napi_threaded() +{ + # Only enable napi threaded poll. Set suspend timeout and prefer busy + # poll to 0. + test_busypoll 0 ${NAPI_THREADED_POLL} 0 + + return $? +} + ### ### Code start ### @@ -154,6 +170,13 @@ if [ $? -ne 0 ]; then exit 1 fi +test_busypoll_with_napi_threaded +if [ $? -ne 0 ]; then + echo "test_busypoll_with_napi_threaded failed" + cleanup_ns + exit 1 +fi + echo "$NSIM_SV_FD:$NSIM_SV_IFIDX" > $NSIM_DEV_SYS_UNLINK echo $NSIM_CL_ID > $NSIM_DEV_SYS_DEL diff --git a/tools/testing/selftests/net/busy_poller.c b/tools/testing/selftests/net/busy_poller.c index 04c7ff577bb89..f7407f09f635e 100644 --- a/tools/testing/selftests/net/busy_poller.c +++ b/tools/testing/selftests/net/busy_poller.c @@ -65,15 +65,16 @@ static uint32_t cfg_busy_poll_usecs; static uint16_t cfg_busy_poll_budget; static uint8_t cfg_prefer_busy_poll; -/* IRQ params */ +/* NAPI params */ static uint32_t cfg_defer_hard_irqs; static uint64_t cfg_gro_flush_timeout; static uint64_t cfg_irq_suspend_timeout; +static enum netdev_napi_threaded cfg_napi_threaded_poll = NETDEV_NAPI_THREADED_DISABLE; static void usage(const char *filepath) { error(1, 0, - "Usage: %s -p -b -m -u -P -g -o -d -r -s -i", + "Usage: %s -p -b -m -u -P -g -o -d -r -s -t -i", filepath); } @@ -86,7 +87,7 @@ static void parse_opts(int argc, char **argv) if (argc <= 1) usage(argv[0]); - while ((c = getopt(argc, argv, "p:m:b:u:P:g:o:d:r:s:i:")) != -1) { + while ((c = getopt(argc, argv, "p:m:b:u:P:g:o:d:r:s:i:t:")) != -1) { /* most options take integer values, except o and b, so reduce * code duplication a bit for the common case by calling * strtoull here and leave bounds checking and casting per @@ -168,6 +169,12 @@ static void parse_opts(int argc, char **argv) cfg_ifindex = (int)tmp; break; + case 't': + if (tmp == ULLONG_MAX || tmp > 2) + error(1, ERANGE, "napi threaded poll value must be 0-2"); + + cfg_napi_threaded_poll = (enum netdev_napi_threaded)tmp; + break; } } @@ -246,6 +253,7 @@ static void setup_queue(void) cfg_gro_flush_timeout); netdev_napi_set_req_set_irq_suspend_timeout(set_req, cfg_irq_suspend_timeout); + netdev_napi_set_req_set_threaded(set_req, cfg_napi_threaded_poll); if (netdev_napi_set(ys, set_req)) error(1, 0, "can't set NAPI params: %s\n", yerr.msg); diff --git a/tools/testing/selftests/net/cmsg_ip.sh b/tools/testing/selftests/net/cmsg_ip.sh new file mode 100755 index 0000000000000..b55680e081ad7 --- /dev/null +++ b/tools/testing/selftests/net/cmsg_ip.sh @@ -0,0 +1,187 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source lib.sh + +IP4=172.16.0.1/24 +TGT4=172.16.0.2 +IP6=2001:db8:1::1/64 +TGT6=2001:db8:1::2 +TMPF=$(mktemp --suffix ".pcap") + +cleanup() +{ + rm -f $TMPF + cleanup_ns $NS +} + +trap cleanup EXIT + +tcpdump -h | grep immediate-mode >> /dev/null +if [ $? -ne 0 ]; then + echo "SKIP - tcpdump with --immediate-mode option required" + exit $ksft_skip +fi + +# Namespaces +setup_ns NS +NSEXE="ip netns exec $NS" + +$NSEXE sysctl -w net.ipv4.ping_group_range='0 2147483647' > /dev/null + +# Connectivity +ip -netns $NS link add type dummy +ip -netns $NS link set dev dummy0 up +ip -netns $NS addr add $IP4 dev dummy0 +ip -netns $NS addr add $IP6 dev dummy0 + +# Test +BAD=0 +TOTAL=0 + +check_result() { + ((TOTAL++)) + if [ $1 -ne $2 ]; then + echo " Case $3 returned $1, expected $2" + ((BAD++)) + fi +} + +# IPV6_DONTFRAG +for ovr in setsock cmsg both diff; do + for df in 0 1; do + for p in u U i r; do + [ $p == "u" ] && prot=UDP + [ $p == "U" ] && prot=UDP + [ $p == "i" ] && prot=ICMP + [ $p == "r" ] && prot=RAW + + [ $ovr == "setsock" ] && m="-F $df" + [ $ovr == "cmsg" ] && m="-f $df" + [ $ovr == "both" ] && m="-F $df -f $df" + [ $ovr == "diff" ] && m="-F $((1 - df)) -f $df" + + $NSEXE ./cmsg_sender -s -S 2000 -6 -p $p $m $TGT6 1234 + check_result $? $df "DONTFRAG $prot $ovr" + done + done +done + +# IP_TOS + IPV6_TCLASS + +test_dscp() { + local -r IPVER=$1 + local -r TGT=$2 + local -r MATCH=$3 + + local -r TOS=0x10 + local -r TOS2=0x20 + local -r ECN=0x3 + + ip $IPVER -netns $NS rule add tos $TOS lookup 300 + ip $IPVER -netns $NS route add table 300 prohibit any + + for ovr in setsock cmsg both diff; do + for p in u U i r; do + [ $p == "u" ] && prot=UDP + [ $p == "U" ] && prot=UDP + [ $p == "i" ] && prot=ICMP + [ $p == "r" ] && prot=RAW + + [ $ovr == "setsock" ] && m="-C" + [ $ovr == "cmsg" ] && m="-c" + [ $ovr == "both" ] && m="-C $((TOS2)) -c" + [ $ovr == "diff" ] && m="-C $((TOS )) -c" + + $NSEXE nohup tcpdump --immediate-mode -p -ni dummy0 -w $TMPF -c 4 2> /dev/null & + BG=$! + sleep 0.05 + + $NSEXE ./cmsg_sender $IPVER -p $p $m $((TOS2)) $TGT 1234 + check_result $? 0 "$MATCH $prot $ovr - pass" + + while [ -d /proc/$BG ]; do + $NSEXE ./cmsg_sender $IPVER -p $p $m $((TOS2)) $TGT 1234 + done + + tcpdump -r $TMPF -v 2>&1 | grep "$MATCH $TOS2" >> /dev/null + check_result $? 0 "$MATCH $prot $ovr - packet data" + rm $TMPF + + [ $ovr == "both" ] && m="-C $((TOS )) -c" + [ $ovr == "diff" ] && m="-C $((TOS2)) -c" + + # Match prohibit rule: expect failure + $NSEXE ./cmsg_sender $IPVER -p $p $m $((TOS)) -s $TGT 1234 + check_result $? 1 "$MATCH $prot $ovr - rejection" + + # Match prohibit rule: IPv4 masks ECN: expect failure + if [[ "$IPVER" == "-4" ]]; then + $NSEXE ./cmsg_sender $IPVER -p $p $m "$((TOS | ECN))" -s $TGT 1234 + check_result $? 1 "$MATCH $prot $ovr - rejection (ECN)" + fi + done + done +} + +test_dscp -4 $TGT4 tos +test_dscp -6 $TGT6 class + +# IP_TTL + IPV6_HOPLIMIT +test_ttl_hoplimit() { + local -r IPVER=$1 + local -r TGT=$2 + local -r MATCH=$3 + + local -r LIM=4 + + for ovr in setsock cmsg both diff; do + for p in u U i r; do + [ $p == "u" ] && prot=UDP + [ $p == "U" ] && prot=UDP + [ $p == "i" ] && prot=ICMP + [ $p == "r" ] && prot=RAW + + [ $ovr == "setsock" ] && m="-L" + [ $ovr == "cmsg" ] && m="-l" + [ $ovr == "both" ] && m="-L $LIM -l" + [ $ovr == "diff" ] && m="-L $((LIM + 1)) -l" + + $NSEXE nohup tcpdump --immediate-mode -p -ni dummy0 -w $TMPF -c 4 2> /dev/null & + BG=$! + sleep 0.05 + + $NSEXE ./cmsg_sender $IPVER -p $p $m $LIM $TGT 1234 + check_result $? 0 "$MATCH $prot $ovr - pass" + + while [ -d /proc/$BG ]; do + $NSEXE ./cmsg_sender $IPVER -p $p $m $LIM $TGT 1234 + done + + tcpdump -r $TMPF -v 2>&1 | grep "$MATCH $LIM[^0-9]" >> /dev/null + check_result $? 0 "$MATCH $prot $ovr - packet data" + rm $TMPF + done + done +} + +test_ttl_hoplimit -4 $TGT4 ttl +test_ttl_hoplimit -6 $TGT6 hlim + +# IPV6 exthdr +for p in u U i r; do + # Very basic "does it crash" test + for h in h d r; do + $NSEXE ./cmsg_sender -p $p -6 -H $h $TGT6 1234 + check_result $? 0 "ExtHdr $prot $ovr - pass" + done +done + +# Summary +if [ $BAD -ne 0 ]; then + echo "FAIL - $BAD/$TOTAL cases failed" + exit 1 +else + echo "OK" + exit 0 +fi diff --git a/tools/testing/selftests/net/cmsg_ipv6.sh b/tools/testing/selftests/net/cmsg_ipv6.sh deleted file mode 100755 index 8bc23fb4c82b7..0000000000000 --- a/tools/testing/selftests/net/cmsg_ipv6.sh +++ /dev/null @@ -1,154 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -source lib.sh - -IP6=2001:db8:1::1/64 -TGT6=2001:db8:1::2 -TMPF=$(mktemp --suffix ".pcap") - -cleanup() -{ - rm -f $TMPF - cleanup_ns $NS -} - -trap cleanup EXIT - -tcpdump -h | grep immediate-mode >> /dev/null -if [ $? -ne 0 ]; then - echo "SKIP - tcpdump with --immediate-mode option required" - exit $ksft_skip -fi - -# Namespaces -setup_ns NS -NSEXE="ip netns exec $NS" - -$NSEXE sysctl -w net.ipv4.ping_group_range='0 2147483647' > /dev/null - -# Connectivity -ip -netns $NS link add type dummy -ip -netns $NS link set dev dummy0 up -ip -netns $NS addr add $IP6 dev dummy0 - -# Test -BAD=0 -TOTAL=0 - -check_result() { - ((TOTAL++)) - if [ $1 -ne $2 ]; then - echo " Case $3 returned $1, expected $2" - ((BAD++)) - fi -} - -# IPV6_DONTFRAG -for ovr in setsock cmsg both diff; do - for df in 0 1; do - for p in u i r; do - [ $p == "u" ] && prot=UDP - [ $p == "i" ] && prot=ICMP - [ $p == "r" ] && prot=RAW - - [ $ovr == "setsock" ] && m="-F $df" - [ $ovr == "cmsg" ] && m="-f $df" - [ $ovr == "both" ] && m="-F $df -f $df" - [ $ovr == "diff" ] && m="-F $((1 - df)) -f $df" - - $NSEXE ./cmsg_sender -s -S 2000 -6 -p $p $m $TGT6 1234 - check_result $? $df "DONTFRAG $prot $ovr" - done - done -done - -# IPV6_TCLASS -TOS=0x10 -TOS2=0x20 - -ip -6 -netns $NS rule add tos $TOS lookup 300 -ip -6 -netns $NS route add table 300 prohibit any - -for ovr in setsock cmsg both diff; do - for p in u i r; do - [ $p == "u" ] && prot=UDP - [ $p == "i" ] && prot=ICMP - [ $p == "r" ] && prot=RAW - - [ $ovr == "setsock" ] && m="-C" - [ $ovr == "cmsg" ] && m="-c" - [ $ovr == "both" ] && m="-C $((TOS2)) -c" - [ $ovr == "diff" ] && m="-C $((TOS )) -c" - - $NSEXE nohup tcpdump --immediate-mode -p -ni dummy0 -w $TMPF -c 4 2> /dev/null & - BG=$! - sleep 0.05 - - $NSEXE ./cmsg_sender -6 -p $p $m $((TOS2)) $TGT6 1234 - check_result $? 0 "TCLASS $prot $ovr - pass" - - while [ -d /proc/$BG ]; do - $NSEXE ./cmsg_sender -6 -p $p $m $((TOS2)) $TGT6 1234 - done - - tcpdump -r $TMPF -v 2>&1 | grep "class $TOS2" >> /dev/null - check_result $? 0 "TCLASS $prot $ovr - packet data" - rm $TMPF - - [ $ovr == "both" ] && m="-C $((TOS )) -c" - [ $ovr == "diff" ] && m="-C $((TOS2)) -c" - - $NSEXE ./cmsg_sender -6 -p $p $m $((TOS)) -s $TGT6 1234 - check_result $? 1 "TCLASS $prot $ovr - rejection" - done -done - -# IPV6_HOPLIMIT -LIM=4 - -for ovr in setsock cmsg both diff; do - for p in u i r; do - [ $p == "u" ] && prot=UDP - [ $p == "i" ] && prot=ICMP - [ $p == "r" ] && prot=RAW - - [ $ovr == "setsock" ] && m="-L" - [ $ovr == "cmsg" ] && m="-l" - [ $ovr == "both" ] && m="-L $LIM -l" - [ $ovr == "diff" ] && m="-L $((LIM + 1)) -l" - - $NSEXE nohup tcpdump --immediate-mode -p -ni dummy0 -w $TMPF -c 4 2> /dev/null & - BG=$! - sleep 0.05 - - $NSEXE ./cmsg_sender -6 -p $p $m $LIM $TGT6 1234 - check_result $? 0 "HOPLIMIT $prot $ovr - pass" - - while [ -d /proc/$BG ]; do - $NSEXE ./cmsg_sender -6 -p $p $m $LIM $TGT6 1234 - done - - tcpdump -r $TMPF -v 2>&1 | grep "hlim $LIM[^0-9]" >> /dev/null - check_result $? 0 "HOPLIMIT $prot $ovr - packet data" - rm $TMPF - done -done - -# IPV6 exthdr -for p in u i r; do - # Very basic "does it crash" test - for h in h d r; do - $NSEXE ./cmsg_sender -p $p -6 -H $h $TGT6 1234 - check_result $? 0 "ExtHdr $prot $ovr - pass" - done -done - -# Summary -if [ $BAD -ne 0 ]; then - echo "FAIL - $BAD/$TOTAL cases failed" - exit 1 -else - echo "OK" - exit 0 -fi diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c index bc314382e4e1b..a825e628aee73 100644 --- a/tools/testing/selftests/net/cmsg_sender.c +++ b/tools/testing/selftests/net/cmsg_sender.c @@ -33,6 +33,7 @@ enum { ERN_RECVERR, ERN_CMSG_RD, ERN_CMSG_RCV, + ERN_SEND_MORE, }; struct option_cmsg_u32 { @@ -46,6 +47,7 @@ struct options { const char *service; unsigned int size; unsigned int num_pkt; + bool msg_more; struct { unsigned int mark; unsigned int dontfrag; @@ -72,7 +74,7 @@ struct options { struct option_cmsg_u32 tclass; struct option_cmsg_u32 hlimit; struct option_cmsg_u32 exthdr; - } v6; + } cmsg; } opt = { .size = 13, .num_pkt = 1, @@ -94,7 +96,8 @@ static void __attribute__((noreturn)) cs_usage(const char *bin) "\t\t-S send() size\n" "\t\t-4/-6 Force IPv4 / IPv6 only\n" "\t\t-p prot Socket protocol\n" - "\t\t (u = UDP (default); i = ICMP; r = RAW)\n" + "\t\t (u = UDP (default); i = ICMP; r = RAW;\n" + "\t\t U = UDP with MSG_MORE)\n" "\n" "\t\t-m val Set SO_MARK with given value\n" "\t\t-M val Set SO_MARK via setsockopt\n" @@ -104,13 +107,13 @@ static void __attribute__((noreturn)) cs_usage(const char *bin) "\t\t-t Enable time stamp reporting\n" "\t\t-f val Set don't fragment via cmsg\n" "\t\t-F val Set don't fragment via setsockopt\n" - "\t\t-c val Set TCLASS via cmsg\n" - "\t\t-C val Set TCLASS via setsockopt\n" - "\t\t-l val Set HOPLIMIT via cmsg\n" - "\t\t-L val Set HOPLIMIT via setsockopt\n" + "\t\t-c val Set TOS/TCLASS via cmsg\n" + "\t\t-C val Set TOS/TCLASS via setsockopt\n" + "\t\t-l val Set TTL/HOPLIMIT via cmsg\n" + "\t\t-L val Set TTL/HOPLIMIT via setsockopt\n" "\t\t-H type Add an IPv6 header option\n" - "\t\t (h = HOP; d = DST; r = RTDST)" - ""); + "\t\t (h = HOP; d = DST; r = RTDST)\n" + "\n"); exit(ERN_HELP); } @@ -133,8 +136,11 @@ static void cs_parse_args(int argc, char *argv[]) opt.sock.family = AF_INET6; break; case 'p': - if (*optarg == 'u' || *optarg == 'U') { + if (*optarg == 'u') { opt.sock.proto = IPPROTO_UDP; + } else if (*optarg == 'U') { + opt.sock.proto = IPPROTO_UDP; + opt.msg_more = true; } else if (*optarg == 'i' || *optarg == 'I') { opt.sock.proto = IPPROTO_ICMP; } else if (*optarg == 'r') { @@ -169,37 +175,37 @@ static void cs_parse_args(int argc, char *argv[]) opt.ts.ena = true; break; case 'f': - opt.v6.dontfrag.ena = true; - opt.v6.dontfrag.val = atoi(optarg); + opt.cmsg.dontfrag.ena = true; + opt.cmsg.dontfrag.val = atoi(optarg); break; case 'F': opt.sockopt.dontfrag = atoi(optarg); break; case 'c': - opt.v6.tclass.ena = true; - opt.v6.tclass.val = atoi(optarg); + opt.cmsg.tclass.ena = true; + opt.cmsg.tclass.val = atoi(optarg); break; case 'C': opt.sockopt.tclass = atoi(optarg); break; case 'l': - opt.v6.hlimit.ena = true; - opt.v6.hlimit.val = atoi(optarg); + opt.cmsg.hlimit.ena = true; + opt.cmsg.hlimit.val = atoi(optarg); break; case 'L': opt.sockopt.hlimit = atoi(optarg); break; case 'H': - opt.v6.exthdr.ena = true; + opt.cmsg.exthdr.ena = true; switch (optarg[0]) { case 'h': - opt.v6.exthdr.val = IPV6_HOPOPTS; + opt.cmsg.exthdr.val = IPV6_HOPOPTS; break; case 'd': - opt.v6.exthdr.val = IPV6_DSTOPTS; + opt.cmsg.exthdr.val = IPV6_DSTOPTS; break; case 'r': - opt.v6.exthdr.val = IPV6_RTHDRDSTOPTS; + opt.cmsg.exthdr.val = IPV6_RTHDRDSTOPTS; break; default: printf("Error: hdr type: %s\n", optarg); @@ -261,12 +267,20 @@ cs_write_cmsg(int fd, struct msghdr *msg, char *cbuf, size_t cbuf_sz) SOL_SOCKET, SO_MARK, &opt.mark); ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len, SOL_SOCKET, SO_PRIORITY, &opt.priority); - ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len, - SOL_IPV6, IPV6_DONTFRAG, &opt.v6.dontfrag); - ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len, - SOL_IPV6, IPV6_TCLASS, &opt.v6.tclass); - ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len, - SOL_IPV6, IPV6_HOPLIMIT, &opt.v6.hlimit); + + if (opt.sock.family == AF_INET) { + ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len, + SOL_IP, IP_TOS, &opt.cmsg.tclass); + ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len, + SOL_IP, IP_TTL, &opt.cmsg.hlimit); + } else { + ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len, + SOL_IPV6, IPV6_DONTFRAG, &opt.cmsg.dontfrag); + ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len, + SOL_IPV6, IPV6_TCLASS, &opt.cmsg.tclass); + ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len, + SOL_IPV6, IPV6_HOPLIMIT, &opt.cmsg.hlimit); + } if (opt.txtime.ena) { __u64 txtime; @@ -297,14 +311,14 @@ cs_write_cmsg(int fd, struct msghdr *msg, char *cbuf, size_t cbuf_sz) *(__u32 *)CMSG_DATA(cmsg) = SOF_TIMESTAMPING_TX_SCHED | SOF_TIMESTAMPING_TX_SOFTWARE; } - if (opt.v6.exthdr.ena) { + if (opt.cmsg.exthdr.ena) { cmsg = (struct cmsghdr *)(cbuf + cmsg_len); cmsg_len += CMSG_SPACE(8); if (cbuf_sz < cmsg_len) error(ERN_CMSG_WR, EFAULT, "cmsg buffer too small"); cmsg->cmsg_level = SOL_IPV6; - cmsg->cmsg_type = opt.v6.exthdr.val; + cmsg->cmsg_type = opt.cmsg.exthdr.val; cmsg->cmsg_len = CMSG_LEN(8); *(__u64 *)CMSG_DATA(cmsg) = 0; } @@ -405,23 +419,35 @@ static void ca_set_sockopts(int fd) setsockopt(fd, SOL_SOCKET, SO_MARK, &opt.sockopt.mark, sizeof(opt.sockopt.mark))) error(ERN_SOCKOPT, errno, "setsockopt SO_MARK"); - if (opt.sockopt.dontfrag && - setsockopt(fd, SOL_IPV6, IPV6_DONTFRAG, - &opt.sockopt.dontfrag, sizeof(opt.sockopt.dontfrag))) - error(ERN_SOCKOPT, errno, "setsockopt IPV6_DONTFRAG"); - if (opt.sockopt.tclass && - setsockopt(fd, SOL_IPV6, IPV6_TCLASS, - &opt.sockopt.tclass, sizeof(opt.sockopt.tclass))) - error(ERN_SOCKOPT, errno, "setsockopt IPV6_TCLASS"); - if (opt.sockopt.hlimit && - setsockopt(fd, SOL_IPV6, IPV6_UNICAST_HOPS, - &opt.sockopt.hlimit, sizeof(opt.sockopt.hlimit))) - error(ERN_SOCKOPT, errno, "setsockopt IPV6_HOPLIMIT"); if (opt.sockopt.priority && setsockopt(fd, SOL_SOCKET, SO_PRIORITY, &opt.sockopt.priority, sizeof(opt.sockopt.priority))) error(ERN_SOCKOPT, errno, "setsockopt SO_PRIORITY"); + if (opt.sock.family == AF_INET) { + if (opt.sockopt.tclass && + setsockopt(fd, SOL_IP, IP_TOS, + &opt.sockopt.tclass, sizeof(opt.sockopt.tclass))) + error(ERN_SOCKOPT, errno, "setsockopt IP_TOS"); + if (opt.sockopt.hlimit && + setsockopt(fd, SOL_IP, IP_TTL, + &opt.sockopt.hlimit, sizeof(opt.sockopt.hlimit))) + error(ERN_SOCKOPT, errno, "setsockopt IP_TTL"); + } else { + if (opt.sockopt.dontfrag && + setsockopt(fd, SOL_IPV6, IPV6_DONTFRAG, + &opt.sockopt.dontfrag, sizeof(opt.sockopt.dontfrag))) + error(ERN_SOCKOPT, errno, "setsockopt IPV6_DONTFRAG"); + if (opt.sockopt.tclass && + setsockopt(fd, SOL_IPV6, IPV6_TCLASS, + &opt.sockopt.tclass, sizeof(opt.sockopt.tclass))) + error(ERN_SOCKOPT, errno, "setsockopt IPV6_TCLASS"); + if (opt.sockopt.hlimit && + setsockopt(fd, SOL_IPV6, IPV6_UNICAST_HOPS, + &opt.sockopt.hlimit, sizeof(opt.sockopt.hlimit))) + error(ERN_SOCKOPT, errno, "setsockopt IPV6_HOPLIMIT"); + } + if (opt.txtime.ena) { struct sock_txtime so_txtime = { .clockid = CLOCK_MONOTONIC, @@ -511,7 +537,7 @@ int main(int argc, char *argv[]) cs_write_cmsg(fd, &msg, cbuf, sizeof(cbuf)); for (i = 0; i < opt.num_pkt; i++) { - err = sendmsg(fd, &msg, 0); + err = sendmsg(fd, &msg, opt.msg_more ? MSG_MORE : 0); if (err < 0) { if (!opt.silent_send) fprintf(stderr, "send failed: %s\n", strerror(errno)); @@ -522,6 +548,14 @@ int main(int argc, char *argv[]) err = ERN_SEND_SHORT; goto err_out; } + if (opt.msg_more) { + err = write(fd, NULL, 0); + if (err < 0) { + fprintf(stderr, "send more: %s\n", strerror(errno)); + err = ERN_SEND_MORE; + goto err_out; + } + } } err = ERN_SUCCESS; diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config index 5b9baf7089506..130d532b7e67a 100644 --- a/tools/testing/selftests/net/config +++ b/tools/testing/selftests/net/config @@ -18,6 +18,8 @@ CONFIG_DUMMY=y CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_BRIDGE=y CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_DEBUG_INFO_BTF=y +CONFIG_DEBUG_INFO_BTF_MODULES=n CONFIG_VLAN_8021Q=y CONFIG_GENEVE=m CONFIG_IFB=y @@ -107,3 +109,11 @@ CONFIG_XFRM_INTERFACE=m CONFIG_XFRM_USER=m CONFIG_IP_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IPVLAN=m +CONFIG_CAN=m +CONFIG_CAN_DEV=m +CONFIG_CAN_VXCAN=m +CONFIG_NETKIT=y +CONFIG_NET_PKTGEN=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_RPL_LWTUNNEL=y diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index 899dbad0104bf..4fcc38907e48e 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -3667,7 +3667,7 @@ ipv6_addr_bind_novrf() # when it really should not a=${NSA_LO_IP6} log_start - show_hint "Tecnically should fail since address is not on device but kernel allows" + show_hint "Technically should fail since address is not on device but kernel allows" run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b log_test_addr ${a} $? 0 "TCP socket bind to out of scope local address" } @@ -3724,7 +3724,7 @@ ipv6_addr_bind_vrf() # passes when it really should not a=${VRF_IP6} log_start - show_hint "Tecnically should fail since address is not on device but kernel allows" + show_hint "Technically should fail since address is not on device but kernel allows" run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b log_test_addr ${a} $? 0 "TCP socket bind to VRF address with device bind" diff --git a/tools/testing/selftests/net/fdb_flush.sh b/tools/testing/selftests/net/fdb_flush.sh index d5e3abb8658c3..9931a1e36e3db 100755 --- a/tools/testing/selftests/net/fdb_flush.sh +++ b/tools/testing/selftests/net/fdb_flush.sh @@ -583,7 +583,7 @@ vxlan_test_flush_by_remote_attributes() $IP link del dev vx10 $IP link add name vx10 type vxlan dstport "$VXPORT" external - # For multicat FDB entries, the VXLAN driver stores a linked list of + # For multicast FDB entries, the VXLAN driver stores a linked list of # remotes for a given key. Verify that only the expected remotes are # flushed. multicast_fdb_entries_add diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 77c83d9508d3b..b39f748c25722 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -76,11 +76,13 @@ log_test() printf "TEST: %-60s [ OK ]\n" "${msg}" nsuccess=$((nsuccess+1)) else - ret=1 - nfail=$((nfail+1)) if [[ $rc -eq $ksft_skip ]]; then + [[ $ret -eq 0 ]] && ret=$ksft_skip + nskip=$((nskip+1)) printf "TEST: %-60s [SKIP]\n" "${msg}" else + ret=1 + nfail=$((nfail+1)) printf "TEST: %-60s [FAIL]\n" "${msg}" fi @@ -741,7 +743,7 @@ ipv6_fcnal() run_cmd "$IP nexthop add id 52 via 2001:db8:92::3" log_test $? 2 "Create nexthop - gw only" - # gw is not reachable throught given dev + # gw is not reachable through given dev run_cmd "$IP nexthop add id 53 via 2001:db8:3::3 dev veth1" log_test $? 2 "Create nexthop - invalid gw+dev combination" @@ -2528,6 +2530,7 @@ done if [ "$TESTS" != "none" ]; then printf "\nTests passed: %3d\n" ${nsuccess} printf "Tests failed: %3d\n" ${nfail} + printf "Tests skipped: %2d\n" ${nskip} fi exit $ret diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh index 847936363a12b..b866bab1d92a1 100755 --- a/tools/testing/selftests/net/fib_rule_tests.sh +++ b/tools/testing/selftests/net/fib_rule_tests.sh @@ -256,6 +256,24 @@ fib_rule6_test() fib_rule6_test_match_n_redirect "$match" "$match" \ "$getnomatch" "sport and dport redirect to table" \ "sport and dport no redirect to table" + + match="sport 100-200 dport 300-400" + getmatch="sport 100 dport 400" + getnomatch="sport 100 dport 401" + fib_rule6_test_match_n_redirect "$match" "$getmatch" \ + "$getnomatch" \ + "sport and dport range redirect to table" \ + "sport and dport range no redirect to table" + fi + + ip rule help 2>&1 | grep sport | grep -q MASK + if [ $? -eq 0 ]; then + match="sport 0x0f00/0xff00 dport 0x000f/0x00ff" + getmatch="sport 0x0f11 dport 0x220f" + getnomatch="sport 0x1f11 dport 0x221f" + fib_rule6_test_match_n_redirect "$match" "$getmatch" \ + "$getnomatch" "sport and dport masked redirect to table" \ + "sport and dport masked no redirect to table" fi fib_check_iproute_support "ipproto" "ipproto" @@ -292,6 +310,25 @@ fib_rule6_test() "iif dscp no redirect to table" fi + ip rule help 2>&1 | grep -q "DSCP\[/MASK\]" + if [ $? -eq 0 ]; then + match="dscp 0x0f/0x0f" + tosmatch=$(printf 0x"%x" $((0x1f << 2))) + tosnomatch=$(printf 0x"%x" $((0x1e << 2))) + getmatch="tos $tosmatch" + getnomatch="tos $tosnomatch" + fib_rule6_test_match_n_redirect "$match" "$getmatch" \ + "$getnomatch" "dscp masked redirect to table" \ + "dscp masked no redirect to table" + + match="dscp 0x0f/0x0f" + getmatch="from $SRC_IP6 iif $DEV tos $tosmatch" + getnomatch="from $SRC_IP6 iif $DEV tos $tosnomatch" + fib_rule6_test_match_n_redirect "$match" "$getmatch" \ + "$getnomatch" "iif dscp masked redirect to table" \ + "iif dscp masked no redirect to table" + fi + fib_check_iproute_support "flowlabel" "flowlabel" if [ $? -eq 0 ]; then match="flowlabel 0xfffff" @@ -525,6 +562,24 @@ fib_rule4_test() fib_rule4_test_match_n_redirect "$match" "$match" \ "$getnomatch" "sport and dport redirect to table" \ "sport and dport no redirect to table" + + match="sport 100-200 dport 300-400" + getmatch="sport 100 dport 400" + getnomatch="sport 100 dport 401" + fib_rule4_test_match_n_redirect "$match" "$getmatch" \ + "$getnomatch" \ + "sport and dport range redirect to table" \ + "sport and dport range no redirect to table" + fi + + ip rule help 2>&1 | grep sport | grep -q MASK + if [ $? -eq 0 ]; then + match="sport 0x0f00/0xff00 dport 0x000f/0x00ff" + getmatch="sport 0x0f11 dport 0x220f" + getnomatch="sport 0x1f11 dport 0x221f" + fib_rule4_test_match_n_redirect "$match" "$getmatch" \ + "$getnomatch" "sport and dport masked redirect to table" \ + "sport and dport masked no redirect to table" fi fib_check_iproute_support "ipproto" "ipproto" @@ -561,6 +616,25 @@ fib_rule4_test() "$getnomatch" "iif dscp redirect to table" \ "iif dscp no redirect to table" fi + + ip rule help 2>&1 | grep -q "DSCP\[/MASK\]" + if [ $? -eq 0 ]; then + match="dscp 0x0f/0x0f" + tosmatch=$(printf 0x"%x" $((0x1f << 2))) + tosnomatch=$(printf 0x"%x" $((0x1e << 2))) + getmatch="tos $tosmatch" + getnomatch="tos $tosnomatch" + fib_rule4_test_match_n_redirect "$match" "$getmatch" \ + "$getnomatch" "dscp masked redirect to table" \ + "dscp masked no redirect to table" + + match="dscp 0x0f/0x0f" + getmatch="from $SRC_IP iif $DEV tos $tosmatch" + getnomatch="from $SRC_IP iif $DEV tos $tosnomatch" + fib_rule4_test_match_n_redirect "$match" "$getmatch" \ + "$getnomatch" "iif dscp masked redirect to table" \ + "iif dscp masked no redirect to table" + fi } fib_rule4_vrf_test() diff --git a/tools/testing/selftests/net/forwarding/README b/tools/testing/selftests/net/forwarding/README index a652429bfd53c..7b41cff993adb 100644 --- a/tools/testing/selftests/net/forwarding/README +++ b/tools/testing/selftests/net/forwarding/README @@ -6,7 +6,7 @@ to easily create and test complex environments. Unfortunately, these namespaces can not be used with actual switching ASICs, as their ports can not be migrated to other network namespaces -(dev->netns_local) and most of them probably do not support the +(dev->netns_immutable) and most of them probably do not support the L1-separation provided by namespaces. However, a similar kind of flexibility can be achieved by using VRFs and diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh index d9d587454d207..8c1597ebc2d38 100755 --- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh +++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh @@ -149,7 +149,7 @@ cfg_test_host_common() check_err $? "Failed to add $name host entry" bridge mdb replace dev br0 port br0 grp $grp $state vid 10 &> /dev/null - check_fail $? "Managed to replace $name host entry" + check_err $? "Failed to replace $name host entry" bridge mdb del dev br0 port br0 grp $grp $state vid 10 bridge mdb get dev br0 grp $grp vid 10 &> /dev/null diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index 8de80acf249ea..508f3c700d719 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -291,16 +291,6 @@ if [[ "$CHECK_TC" = "yes" ]]; then check_tc_version fi -require_command() -{ - local cmd=$1; shift - - if [[ ! -x "$(command -v "$cmd")" ]]; then - echo "SKIP: $cmd not installed" - exit $ksft_skip - fi -} - # IPv6 support was added in v3.0 check_mtools_version() { diff --git a/tools/testing/selftests/net/forwarding/settings b/tools/testing/selftests/net/forwarding/settings index e7b9417537fbc..ff3de4936a009 100644 --- a/tools/testing/selftests/net/forwarding/settings +++ b/tools/testing/selftests/net/forwarding/settings @@ -1 +1,2 @@ -timeout=0 +timeout=10800 +profile=1 diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh index ea89e558672db..5823d94b19e9b 100755 --- a/tools/testing/selftests/net/forwarding/tc_actions.sh +++ b/tools/testing/selftests/net/forwarding/tc_actions.sh @@ -223,19 +223,32 @@ mirred_egress_to_ingress_tcp_test() ip_proto icmp \ action drop - ip vrf exec v$h1 ncat --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 & - local rpid=$! - ip vrf exec v$h1 ncat -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1 - wait -n $rpid - cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2 - check_err $? "server output check failed" + echo P2 $MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \ -t icmp "ping,id=42,seq=5" -q + echo P2.1 tc_check_packets "dev $h1 egress" 101 10 check_err $? "didn't mirred redirect ICMP" + echo P2.2 tc_check_packets "dev $h1 ingress" 102 10 check_err $? "didn't drop mirred ICMP" + echo P2.3 + + echo P1 + + ip vrf exec v$h1 ncat --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 & + local rpid=$! + sleep 0.2 + echo P1.1 + ip vrf exec v$h1 ncat -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1 + echo P1.2 + sleep 0.2 + wait -n $rpid + cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2 + check_err $? "server output check failed" + + echo P3 tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower @@ -359,4 +372,5 @@ else tests_run fi +dmesg exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh index 3f9d50f1ef9ec..b43816dd998ca 100755 --- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh +++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh @@ -428,6 +428,14 @@ __test_flood() test_flood() { __test_flood de:ad:be:ef:13:37 192.0.2.100 "flood" + + # Add an entry with arbitrary destination IP. Verify that packets are + # not duplicated (this can happen if hardware floods the packets, and + # then traps them due to misconfiguration, so software data path repeats + # flooding and resends packets). + bridge fdb append dev vx1 00:00:00:00:00:00 dst 198.51.100.1 self + __test_flood de:ad:be:ef:13:37 192.0.2.100 "flood, unresolved FDB entry" + bridge fdb del dev vx1 00:00:00:00:00:00 dst 198.51.100.1 self } vxlan_fdb_add_del() @@ -740,6 +748,8 @@ test_learning() vxlan_flood_test $mac $dst 0 10 0 + # The entry should age out when it only forwards traffic + $MZ $h1 -c 50 -d 1sec -p 64 -b $mac -B $dst -t icmp -q & sleep 60 bridge fdb show brport vx1 | grep $mac | grep -q self diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh index fb9a34cb50c66..afc65647f6731 100755 --- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh +++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh @@ -539,6 +539,21 @@ test_flood() 10 10 0 10 0 __test_flood ca:fe:be:ef:13:37 198.51.100.100 20 "flood vlan 20" \ 10 0 10 0 10 + + # Add entries with arbitrary destination IP. Verify that packets are + # not duplicated (this can happen if hardware floods the packets, and + # then traps them due to misconfiguration, so software data path repeats + # flooding and resends packets). + bridge fdb append dev vx10 00:00:00:00:00:00 dst 203.0.113.1 self + bridge fdb append dev vx20 00:00:00:00:00:00 dst 203.0.113.2 self + + __test_flood de:ad:be:ef:13:37 192.0.2.100 10 \ + "flood vlan 10, unresolved FDB entry" 10 10 0 10 0 + __test_flood ca:fe:be:ef:13:37 198.51.100.100 20 \ + "flood vlan 20, unresolved FDB entry" 10 0 10 0 10 + + bridge fdb del dev vx20 00:00:00:00:00:00 dst 203.0.113.2 self + bridge fdb del dev vx10 00:00:00:00:00:00 dst 203.0.113.1 self } vxlan_fdb_add_del() diff --git a/tools/testing/selftests/net/gro.c b/tools/testing/selftests/net/gro.c index b2184847e3881..d5824eadea109 100644 --- a/tools/testing/selftests/net/gro.c +++ b/tools/testing/selftests/net/gro.c @@ -1318,11 +1318,13 @@ int main(int argc, char **argv) read_MAC(src_mac, smac); read_MAC(dst_mac, dmac); - if (tx_socket) + if (tx_socket) { gro_sender(); - else + } else { + /* Only the receiver exit status determines test success. */ gro_receiver(); + fprintf(stderr, "Gro::%s test passed.\n", testname); + } - fprintf(stderr, "Gro::%s test passed.\n", testname); return 0; } diff --git a/tools/testing/selftests/net/gro.sh b/tools/testing/selftests/net/gro.sh index 02c21ff4ca81f..9e3f186bc2a14 100755 --- a/tools/testing/selftests/net/gro.sh +++ b/tools/testing/selftests/net/gro.sh @@ -18,10 +18,10 @@ run_test() { "--smac" "${CLIENT_MAC}" "--test" "${test}" "--verbose" ) setup_ns - # Each test is run 3 times to deflake, because given the receive timing, + # Each test is run 6 times to deflake, because given the receive timing, # not all packets that should coalesce will be considered in the same flow # on every try. - for tries in {1..3}; do + for tries in {1..6}; do # Actual test starts here ip netns exec $server_ns ./gro "${ARGS[@]}" "--rx" "--iface" "server" \ 1>>log.txt & @@ -100,5 +100,6 @@ trap cleanup EXIT if [[ "${test}" == "all" ]]; then run_all_tests else - run_test "${proto}" "${test}" + exit_code=$(run_test "${proto}" "${test}") + exit $exit_code fi; diff --git a/tools/testing/selftests/net/ip_local_port_range.sh b/tools/testing/selftests/net/ip_local_port_range.sh index 6c6ad346eaa0c..4ff746db12564 100755 --- a/tools/testing/selftests/net/ip_local_port_range.sh +++ b/tools/testing/selftests/net/ip_local_port_range.sh @@ -2,4 +2,6 @@ # SPDX-License-Identifier: GPL-2.0 ./in_netns.sh \ - sh -c 'sysctl -q -w net.ipv4.ip_local_port_range="40000 49999" && ./ip_local_port_range' + sh -c 'sysctl -q -w net.mptcp.enabled=1 && \ + sysctl -q -w net.ipv4.ip_local_port_range="40000 49999" && \ + ./ip_local_port_range' diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh index 0bd9a038a1f0e..975be4fdbcdbe 100644 --- a/tools/testing/selftests/net/lib.sh +++ b/tools/testing/selftests/net/lib.sh @@ -450,6 +450,25 @@ kill_process() { kill $pid && wait $pid; } 2>/dev/null } +check_command() +{ + local cmd=$1; shift + + if [[ ! -x "$(command -v "$cmd")" ]]; then + log_test_skip "$cmd not installed" + return $EXIT_STATUS + fi +} + +require_command() +{ + local cmd=$1; shift + + if ! check_command "$cmd"; then + exit $EXIT_STATUS + fi +} + ip_link_add() { local name=$1; shift diff --git a/tools/testing/selftests/net/lib/Makefile b/tools/testing/selftests/net/lib/Makefile index bc6b6762baf3e..c22623b9a2a5f 100644 --- a/tools/testing/selftests/net/lib/Makefile +++ b/tools/testing/selftests/net/lib/Makefile @@ -9,7 +9,10 @@ TEST_FILES := ../../../../../Documentation/netlink/specs TEST_FILES += ../../../../net/ynl TEST_GEN_FILES += csum +TEST_GEN_FILES += $(patsubst %.c,%.o,$(wildcard *.bpf.c)) TEST_INCLUDES := $(wildcard py/*.py sh/*.sh) include ../../lib.mk + +include ../bpf.mk diff --git a/tools/testing/selftests/net/lib/py/__init__.py b/tools/testing/selftests/net/lib/py/__init__.py index 54d8f5eba8101..8697bd27dc305 100644 --- a/tools/testing/selftests/net/lib/py/__init__.py +++ b/tools/testing/selftests/net/lib/py/__init__.py @@ -2,8 +2,8 @@ from .consts import KSRC from .ksft import * -from .netns import NetNS +from .netns import NetNS, NetNSEnter from .nsim import * from .utils import * -from .ynl import NlError, YnlFamily, EthtoolFamily, NetdevFamily, RtnlFamily +from .ynl import NlError, YnlFamily, EthtoolFamily, NetdevFamily, RtnlFamily, RtnlAddrFamily from .ynl import NetshaperFamily diff --git a/tools/testing/selftests/net/lib/py/ksft.py b/tools/testing/selftests/net/lib/py/ksft.py index 3efe005436cd1..3cfad0fd45703 100644 --- a/tools/testing/selftests/net/lib/py/ksft.py +++ b/tools/testing/selftests/net/lib/py/ksft.py @@ -71,6 +71,11 @@ def ksft_in(a, b, comment=""): _fail("Check failed", a, "not in", b, comment) +def ksft_not_in(a, b, comment=""): + if a in b: + _fail("Check failed", a, "in", b, comment) + + def ksft_is(a, b, comment=""): if a is not b: _fail("Check failed", a, "is not", b, comment) @@ -202,7 +207,7 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()): totals = {"pass": 0, "fail": 0, "skip": 0, "xfail": 0} - print("KTAP version 1") + print("TAP version 13") print("1.." + str(len(cases))) global KSFT_RESULT diff --git a/tools/testing/selftests/net/lib/py/netns.py b/tools/testing/selftests/net/lib/py/netns.py index ecff85f9074fd..8e9317044eefe 100644 --- a/tools/testing/selftests/net/lib/py/netns.py +++ b/tools/testing/selftests/net/lib/py/netns.py @@ -1,9 +1,12 @@ # SPDX-License-Identifier: GPL-2.0 from .utils import ip +import ctypes import random import string +libc = ctypes.cdll.LoadLibrary('libc.so.6') + class NetNS: def __init__(self, name=None): @@ -29,3 +32,18 @@ def __str__(self): def __repr__(self): return f"NetNS({self.name})" + + +class NetNSEnter: + def __init__(self, ns_name): + self.ns_path = f"/run/netns/{ns_name}" + + def __enter__(self): + self.saved = open("/proc/thread-self/ns/net") + with open(self.ns_path) as ns_file: + libc.setns(ns_file.fileno(), 0) + return self + + def __exit__(self, exc_type, exc_value, traceback): + libc.setns(self.saved.fileno(), 0) + self.saved.close() diff --git a/tools/testing/selftests/net/lib/py/utils.py b/tools/testing/selftests/net/lib/py/utils.py index 9e3bcddcf3e83..34470d65d871a 100644 --- a/tools/testing/selftests/net/lib/py/utils.py +++ b/tools/testing/selftests/net/lib/py/utils.py @@ -2,8 +2,10 @@ import errno import json as _json +import os import random import re +import select import socket import subprocess import time @@ -15,21 +17,56 @@ def __init__(self, msg, cmd_obj): self.cmd = cmd_obj +def fd_read_timeout(fd, timeout): + rlist, _, _ = select.select([fd], [], [], timeout) + if rlist: + return os.read(fd, 1024) + else: + raise TimeoutError("Timeout waiting for fd read") + + class cmd: - def __init__(self, comm, shell=True, fail=True, ns=None, background=False, host=None, timeout=5): + """ + Execute a command on local or remote host. + + Use bkg() instead to run a command in the background. + """ + def __init__(self, comm, shell=True, fail=True, ns=None, background=False, + host=None, timeout=5, ksft_wait=None): if ns: comm = f'ip netns exec {ns} ' + comm self.stdout = None self.stderr = None self.ret = None + self.ksft_term_fd = None self.comm = comm if host: self.proc = host.cmd(comm) else: + # ksft_wait lets us wait for the background process to fully start, + # we pass an FD to the child process, and wait for it to write back. + # Similarly term_fd tells child it's time to exit. + pass_fds = () + env = os.environ.copy() + if ksft_wait is not None: + rfd, ready_fd = os.pipe() + wait_fd, self.ksft_term_fd = os.pipe() + pass_fds = (ready_fd, wait_fd, ) + env["KSFT_READY_FD"] = str(ready_fd) + env["KSFT_WAIT_FD"] = str(wait_fd) + self.proc = subprocess.Popen(comm, shell=shell, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) + stderr=subprocess.PIPE, pass_fds=pass_fds, + env=env) + if ksft_wait is not None: + os.close(ready_fd) + os.close(wait_fd) + msg = fd_read_timeout(rfd, ksft_wait) + os.close(rfd) + if not msg: + raise Exception("Did not receive ready message") if not background: self.process(terminate=False, fail=fail, timeout=timeout) @@ -37,6 +74,8 @@ def process(self, terminate=True, fail=None, timeout=5): if fail is None: fail = not terminate + if self.ksft_term_fd: + os.write(self.ksft_term_fd, b"1") if terminate: self.proc.terminate() stdout, stderr = self.proc.communicate(timeout) @@ -54,13 +93,36 @@ def process(self, terminate=True, fail=None, timeout=5): class bkg(cmd): + """ + Run a command in the background. + + Examples usage: + + Run a command on remote host, and wait for it to finish. + This is usually paired with wait_port_listen() to make sure + the command has initialized: + + with bkg("socat ...", exit_wait=True, host=cfg.remote) as nc: + ... + + Run a command and expect it to let us know that it's ready + by writing to a special file descriptor passed via KSFT_READY_FD. + Command will be terminated when we exit the context manager: + + with bkg("my_binary", ksft_wait=5): + """ def __init__(self, comm, shell=True, fail=None, ns=None, host=None, - exit_wait=False): + exit_wait=False, ksft_wait=None): super().__init__(comm, background=True, - shell=shell, fail=fail, ns=ns, host=host) - self.terminate = not exit_wait + shell=shell, fail=fail, ns=ns, host=host, + ksft_wait=ksft_wait) + self.terminate = not exit_wait and not ksft_wait self.check_fail = fail + if shell and self.terminate: + print("# Warning: combining shell and terminate is risky!") + print("# SIGTERM may not reach the child on zsh/ksh!") + def __enter__(self): return self @@ -123,20 +185,13 @@ def ethtool(args, json=None, ns=None, host=None): return tool('ethtool', args, json=json, ns=ns, host=host) -def rand_port(): +def rand_port(type=socket.SOCK_STREAM): """ - Get a random unprivileged port, try to make sure it's not already used. + Get a random unprivileged port. """ - for _ in range(1000): - port = random.randint(10000, 65535) - try: - with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s: - s.bind(("", port)) - return port - except OSError as e: - if e.errno != errno.EADDRINUSE: - raise - raise Exception("Can't find any free unprivileged port") + with socket.socket(socket.AF_INET6, type) as s: + s.bind(("", 0)) + return s.getsockname()[1] def wait_port_listen(port, proto="tcp", ns=None, host=None, sleep=0.005, deadline=5): diff --git a/tools/testing/selftests/net/lib/py/ynl.py b/tools/testing/selftests/net/lib/py/ynl.py index ad1e36baee2a1..8986c584cb371 100644 --- a/tools/testing/selftests/net/lib/py/ynl.py +++ b/tools/testing/selftests/net/lib/py/ynl.py @@ -42,6 +42,10 @@ def __init__(self, recv_size=0): super().__init__((SPEC_PATH / Path('rt_link.yaml')).as_posix(), schema='', recv_size=recv_size) +class RtnlAddrFamily(YnlFamily): + def __init__(self, recv_size=0): + super().__init__((SPEC_PATH / Path('rt_addr.yaml')).as_posix(), + schema='', recv_size=recv_size) class NetdevFamily(YnlFamily): def __init__(self, recv_size=0): diff --git a/tools/testing/selftests/net/lib/xdp_dummy.bpf.c b/tools/testing/selftests/net/lib/xdp_dummy.bpf.c new file mode 100644 index 0000000000000..e73fab3edd9f7 --- /dev/null +++ b/tools/testing/selftests/net/lib/xdp_dummy.bpf.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 + +#define KBUILD_MODNAME "xdp_dummy" +#include +#include + +SEC("xdp") +int xdp_dummy_prog(struct xdp_md *ctx) +{ + return XDP_PASS; +} + +SEC("xdp.frags") +int xdp_dummy_prog_frags(struct xdp_md *ctx) +{ + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/net/link_netns.py b/tools/testing/selftests/net/link_netns.py new file mode 100755 index 0000000000000..aab043c59d695 --- /dev/null +++ b/tools/testing/selftests/net/link_netns.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +import time + +from lib.py import ksft_run, ksft_exit, ksft_true +from lib.py import ip +from lib.py import NetNS, NetNSEnter +from lib.py import RtnlFamily + + +LINK_NETNSID = 100 + + +def test_event() -> None: + with NetNS() as ns1, NetNS() as ns2: + with NetNSEnter(str(ns2)): + rtnl = RtnlFamily() + + rtnl.ntf_subscribe("rtnlgrp-link") + + ip(f"netns set {ns2} {LINK_NETNSID}", ns=str(ns1)) + ip(f"link add netns {ns1} link-netnsid {LINK_NETNSID} dummy1 type dummy") + ip(f"link add netns {ns1} dummy2 type dummy", ns=str(ns2)) + + ip("link del dummy1", ns=str(ns1)) + ip("link del dummy2", ns=str(ns1)) + + time.sleep(1) + rtnl.check_ntf() + ksft_true(rtnl.async_msg_queue.empty(), + "Received unexpected link notification") + + +def validate_link_netns(netns, ifname, link_netnsid) -> bool: + link_info = ip(f"-d link show dev {ifname}", ns=netns, json=True) + if not link_info: + return False + return link_info[0].get("link_netnsid") == link_netnsid + + +def test_link_net() -> None: + configs = [ + # type, common args, type args, fallback to dev_net + ("ipvlan", "link dummy1", "", False), + ("macsec", "link dummy1", "", False), + ("macvlan", "link dummy1", "", False), + ("macvtap", "link dummy1", "", False), + ("vlan", "link dummy1", "id 100", False), + ("gre", "", "local 192.0.2.1", True), + ("vti", "", "local 192.0.2.1", True), + ("ipip", "", "local 192.0.2.1", True), + ("ip6gre", "", "local 2001:db8::1", True), + ("ip6tnl", "", "local 2001:db8::1", True), + ("vti6", "", "local 2001:db8::1", True), + ("sit", "", "local 192.0.2.1", True), + ("xfrm", "", "if_id 1", True), + ] + + with NetNS() as ns1, NetNS() as ns2, NetNS() as ns3: + net1, net2, net3 = str(ns1), str(ns2), str(ns3) + + # prepare link netnsid and a dummy link needed by certain drivers + ip(f"netns set {net3} {LINK_NETNSID}", ns=str(net2)) + ip("link add dummy1 type dummy", ns=net3) + + cases = [ + # source, "netns", "link-netns", expected link-netns + (net3, None, None, None, None), + (net3, net2, None, None, LINK_NETNSID), + (net2, None, net3, LINK_NETNSID, LINK_NETNSID), + (net1, net2, net3, LINK_NETNSID, LINK_NETNSID), + ] + + for src_net, netns, link_netns, exp1, exp2 in cases: + tgt_net = netns or src_net + for typ, cargs, targs, fb_dev_net in configs: + cmd = "link add" + if netns: + cmd += f" netns {netns}" + if link_netns: + cmd += f" link-netns {link_netns}" + cmd += f" {cargs} foo type {typ} {targs}" + ip(cmd, ns=src_net) + if fb_dev_net: + ksft_true(validate_link_netns(tgt_net, "foo", exp1), + f"{typ} link_netns validation failed") + else: + ksft_true(validate_link_netns(tgt_net, "foo", exp2), + f"{typ} link_netns validation failed") + ip(f"link del foo", ns=tgt_net) + + +def test_peer_net() -> None: + types = [ + "vxcan", + "netkit", + "veth", + ] + + with NetNS() as ns1, NetNS() as ns2, NetNS() as ns3, NetNS() as ns4: + net1, net2, net3, net4 = str(ns1), str(ns2), str(ns3), str(ns4) + + ip(f"netns set {net3} {LINK_NETNSID}", ns=str(net2)) + + cases = [ + # source, "netns", "link-netns", "peer netns", expected + (net1, None, None, None, None), + (net1, net2, None, None, None), + (net2, None, net3, None, LINK_NETNSID), + (net1, net2, net3, None, None), + (net2, None, None, net3, LINK_NETNSID), + (net1, net2, None, net3, LINK_NETNSID), + (net2, None, net2, net3, LINK_NETNSID), + (net1, net2, net4, net3, LINK_NETNSID), + ] + + for src_net, netns, link_netns, peer_netns, exp in cases: + tgt_net = netns or src_net + for typ in types: + cmd = "link add" + if netns: + cmd += f" netns {netns}" + if link_netns: + cmd += f" link-netns {link_netns}" + cmd += f" foo type {typ}" + if peer_netns: + cmd += f" peer netns {peer_netns}" + ip(cmd, ns=src_net) + ksft_true(validate_link_netns(tgt_net, "foo", exp), + f"{typ} peer_netns validation failed") + ip(f"link del foo", ns=tgt_net) + + +def main() -> None: + ksft_run([test_event, test_link_net, test_peer_net]) + ksft_exit() + + +if __name__ == "__main__": + main() diff --git a/tools/testing/selftests/net/lwt_dst_cache_ref_loop.sh b/tools/testing/selftests/net/lwt_dst_cache_ref_loop.sh new file mode 100755 index 0000000000000..881eb399798f7 --- /dev/null +++ b/tools/testing/selftests/net/lwt_dst_cache_ref_loop.sh @@ -0,0 +1,246 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0+ +# +# Author: Justin Iurman +# +# WARNING +# ------- +# This is just a dummy script that triggers encap cases with possible dst cache +# reference loops in affected lwt users (see list below). Some cases are +# pathological configurations for simplicity, others are valid. Overall, we +# don't want this issue to happen, no matter what. In order to catch any +# reference loops, kmemleak MUST be used. The results alone are always blindly +# successful, don't rely on them. Note that the following tests may crash the +# kernel if the fix to prevent lwtunnel_{input|output|xmit}() reentry loops is +# not present. +# +# Affected lwt users so far (please update accordingly if needed): +# - ila_lwt (output only) +# - ioam6_iptunnel (output only) +# - rpl_iptunnel (both input and output) +# - seg6_iptunnel (both input and output) + +source lib.sh + +check_compatibility() +{ + setup_ns tmp_node &>/dev/null + if [ $? != 0 ]; then + echo "SKIP: Cannot create netns." + exit $ksft_skip + fi + + ip link add name veth0 netns $tmp_node type veth \ + peer name veth1 netns $tmp_node &>/dev/null + local ret=$? + + ip -netns $tmp_node link set veth0 up &>/dev/null + ret=$((ret + $?)) + + ip -netns $tmp_node link set veth1 up &>/dev/null + ret=$((ret + $?)) + + if [ $ret != 0 ]; then + echo "SKIP: Cannot configure links." + cleanup_ns $tmp_node + exit $ksft_skip + fi + + lsmod 2>/dev/null | grep -q "ila" + ila_lsmod=$? + [ $ila_lsmod != 0 ] && modprobe ila &>/dev/null + + ip -netns $tmp_node route add 2001:db8:1::/64 \ + encap ila 1:2:3:4 csum-mode no-action ident-type luid \ + hook-type output \ + dev veth0 &>/dev/null + + ip -netns $tmp_node route add 2001:db8:2::/64 \ + encap ioam6 trace prealloc type 0x800000 ns 0 size 4 \ + dev veth0 &>/dev/null + + ip -netns $tmp_node route add 2001:db8:3::/64 \ + encap rpl segs 2001:db8:3::1 dev veth0 &>/dev/null + + ip -netns $tmp_node route add 2001:db8:4::/64 \ + encap seg6 mode inline segs 2001:db8:4::1 dev veth0 &>/dev/null + + ip -netns $tmp_node -6 route 2>/dev/null | grep -q "encap ila" + skip_ila=$? + + ip -netns $tmp_node -6 route 2>/dev/null | grep -q "encap ioam6" + skip_ioam6=$? + + ip -netns $tmp_node -6 route 2>/dev/null | grep -q "encap rpl" + skip_rpl=$? + + ip -netns $tmp_node -6 route 2>/dev/null | grep -q "encap seg6" + skip_seg6=$? + + cleanup_ns $tmp_node +} + +setup() +{ + setup_ns alpha beta gamma &>/dev/null + + ip link add name veth-alpha netns $alpha type veth \ + peer name veth-betaL netns $beta &>/dev/null + + ip link add name veth-betaR netns $beta type veth \ + peer name veth-gamma netns $gamma &>/dev/null + + ip -netns $alpha link set veth-alpha name veth0 &>/dev/null + ip -netns $beta link set veth-betaL name veth0 &>/dev/null + ip -netns $beta link set veth-betaR name veth1 &>/dev/null + ip -netns $gamma link set veth-gamma name veth0 &>/dev/null + + ip -netns $alpha addr add 2001:db8:1::2/64 dev veth0 &>/dev/null + ip -netns $alpha link set veth0 up &>/dev/null + ip -netns $alpha link set lo up &>/dev/null + ip -netns $alpha route add 2001:db8:2::/64 \ + via 2001:db8:1::1 dev veth0 &>/dev/null + + ip -netns $beta addr add 2001:db8:1::1/64 dev veth0 &>/dev/null + ip -netns $beta addr add 2001:db8:2::1/64 dev veth1 &>/dev/null + ip -netns $beta link set veth0 up &>/dev/null + ip -netns $beta link set veth1 up &>/dev/null + ip -netns $beta link set lo up &>/dev/null + ip -netns $beta route del 2001:db8:2::/64 + ip -netns $beta route add 2001:db8:2::/64 dev veth1 + ip netns exec $beta \ + sysctl -wq net.ipv6.conf.all.forwarding=1 &>/dev/null + + ip -netns $gamma addr add 2001:db8:2::2/64 dev veth0 &>/dev/null + ip -netns $gamma link set veth0 up &>/dev/null + ip -netns $gamma link set lo up &>/dev/null + ip -netns $gamma route add 2001:db8:1::/64 \ + via 2001:db8:2::1 dev veth0 &>/dev/null + + sleep 1 + + ip netns exec $alpha ping6 -c 5 -W 1 2001:db8:2::2 &>/dev/null + if [ $? != 0 ]; then + echo "SKIP: Setup failed." + exit $ksft_skip + fi + + sleep 1 +} + +cleanup() +{ + cleanup_ns $alpha $beta $gamma + [ $ila_lsmod != 0 ] && modprobe -r ila &>/dev/null +} + +run_ila() +{ + if [ $skip_ila != 0 ]; then + echo "SKIP: ila (output)" + return + fi + + ip -netns $beta route del 2001:db8:2::/64 + ip -netns $beta route add 2001:db8:2:0:0:0:0:2/128 \ + encap ila 2001:db8:2:0 csum-mode no-action ident-type luid \ + hook-type output \ + dev veth1 &>/dev/null + sleep 1 + + echo "TEST: ila (output)" + ip netns exec $beta ping6 -c 2 -W 1 2001:db8:2::2 &>/dev/null + sleep 1 + + ip -netns $beta route del 2001:db8:2:0:0:0:0:2/128 + ip -netns $beta route add 2001:db8:2::/64 dev veth1 + sleep 1 +} + +run_ioam6() +{ + if [ $skip_ioam6 != 0 ]; then + echo "SKIP: ioam6 (output)" + return + fi + + ip -netns $beta route change 2001:db8:2::/64 \ + encap ioam6 trace prealloc type 0x800000 ns 1 size 4 \ + dev veth1 &>/dev/null + sleep 1 + + echo "TEST: ioam6 (output)" + ip netns exec $beta ping6 -c 2 -W 1 2001:db8:2::2 &>/dev/null + sleep 1 +} + +run_rpl() +{ + if [ $skip_rpl != 0 ]; then + echo "SKIP: rpl (input)" + echo "SKIP: rpl (output)" + return + fi + + ip -netns $beta route change 2001:db8:2::/64 \ + encap rpl segs 2001:db8:2::2 \ + dev veth1 &>/dev/null + sleep 1 + + echo "TEST: rpl (input)" + ip netns exec $alpha ping6 -c 2 -W 1 2001:db8:2::2 &>/dev/null + sleep 1 + + echo "TEST: rpl (output)" + ip netns exec $beta ping6 -c 2 -W 1 2001:db8:2::2 &>/dev/null + sleep 1 +} + +run_seg6() +{ + if [ $skip_seg6 != 0 ]; then + echo "SKIP: seg6 (input)" + echo "SKIP: seg6 (output)" + return + fi + + ip -netns $beta route change 2001:db8:2::/64 \ + encap seg6 mode inline segs 2001:db8:2::2 \ + dev veth1 &>/dev/null + sleep 1 + + echo "TEST: seg6 (input)" + ip netns exec $alpha ping6 -c 2 -W 1 2001:db8:2::2 &>/dev/null + sleep 1 + + echo "TEST: seg6 (output)" + ip netns exec $beta ping6 -c 2 -W 1 2001:db8:2::2 &>/dev/null + sleep 1 +} + +run() +{ + run_ila + run_ioam6 + run_rpl + run_seg6 +} + +if [ "$(id -u)" -ne 0 ]; then + echo "SKIP: Need root privileges." + exit $ksft_skip +fi + +if [ ! -x "$(command -v ip)" ]; then + echo "SKIP: Could not run test without ip tool." + exit $ksft_skip +fi + +check_compatibility + +trap cleanup EXIT + +setup +run + +exit $ksft_pass diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile index c76525fe2b84d..340e1a777e16a 100644 --- a/tools/testing/selftests/net/mptcp/Makefile +++ b/tools/testing/selftests/net/mptcp/Makefile @@ -7,7 +7,7 @@ CFLAGS += -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INC TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \ simult_flows.sh mptcp_sockopt.sh userspace_pm.sh -TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq +TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq mptcp_diag TEST_FILES := mptcp_lib.sh settings diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh index 2bd0c1eb70c5b..4f55477ffe087 100755 --- a/tools/testing/selftests/net/mptcp/diag.sh +++ b/tools/testing/selftests/net/mptcp/diag.sh @@ -200,6 +200,32 @@ chk_msk_cestab() "${expected}" "${msg}" "" } +chk_dump_one() +{ + local ss_token + local token + local msg + + ss_token="$(ss -inmHMN $ns | grep 'token:' |\ + head -n 1 |\ + sed 's/.*token:\([0-9a-f]*\).*/\1/')" + + token="$(ip netns exec $ns ./mptcp_diag -t $ss_token |\ + awk -F':[ \t]+' '/^token/ {print $2}')" + + msg="....chk dump_one" + + mptcp_lib_print_title "$msg" + if [ -n "$ss_token" ] && [ "$ss_token" = "$token" ]; then + mptcp_lib_pr_ok + mptcp_lib_result_pass "${msg}" + else + mptcp_lib_pr_fail "expected $ss_token found $token" + mptcp_lib_result_fail "${msg}" + ret=${KSFT_FAIL} + fi +} + msk_info_get_value() { local port="${1}" @@ -290,6 +316,7 @@ chk_msk_remote_key_nr 2 "....chk remote_key" chk_msk_fallback_nr 0 "....chk no fallback" chk_msk_inuse 2 chk_msk_cestab 2 +chk_dump_one flush_pids chk_msk_inuse 0 "2->0" diff --git a/tools/testing/selftests/net/mptcp/mptcp_diag.c b/tools/testing/selftests/net/mptcp/mptcp_diag.c new file mode 100644 index 0000000000000..284286c524cfe --- /dev/null +++ b/tools/testing/selftests/net/mptcp/mptcp_diag.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025, Kylin Software */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#ifndef IPPROTO_MPTCP +#define IPPROTO_MPTCP 262 +#endif + +struct mptcp_info { + __u8 mptcpi_subflows; + __u8 mptcpi_add_addr_signal; + __u8 mptcpi_add_addr_accepted; + __u8 mptcpi_subflows_max; + __u8 mptcpi_add_addr_signal_max; + __u8 mptcpi_add_addr_accepted_max; + __u32 mptcpi_flags; + __u32 mptcpi_token; + __u64 mptcpi_write_seq; + __u64 mptcpi_snd_una; + __u64 mptcpi_rcv_nxt; + __u8 mptcpi_local_addr_used; + __u8 mptcpi_local_addr_max; + __u8 mptcpi_csum_enabled; + __u32 mptcpi_retransmits; + __u64 mptcpi_bytes_retrans; + __u64 mptcpi_bytes_sent; + __u64 mptcpi_bytes_received; + __u64 mptcpi_bytes_acked; + __u8 mptcpi_subflows_total; + __u8 reserved[3]; + __u32 mptcpi_last_data_sent; + __u32 mptcpi_last_data_recv; + __u32 mptcpi_last_ack_recv; +}; + +static void die_perror(const char *msg) +{ + perror(msg); + exit(1); +} + +static void die_usage(int r) +{ + fprintf(stderr, "Usage: mptcp_diag -t\n"); + exit(r); +} + +static void send_query(int fd, __u32 token) +{ + struct sockaddr_nl nladdr = { + .nl_family = AF_NETLINK + }; + struct { + struct nlmsghdr nlh; + struct inet_diag_req_v2 r; + } req = { + .nlh = { + .nlmsg_len = sizeof(req), + .nlmsg_type = SOCK_DIAG_BY_FAMILY, + .nlmsg_flags = NLM_F_REQUEST + }, + .r = { + .sdiag_family = AF_INET, + /* Real proto is set via INET_DIAG_REQ_PROTOCOL */ + .sdiag_protocol = IPPROTO_TCP, + .id.idiag_cookie[0] = token, + } + }; + struct rtattr rta_proto; + struct iovec iov[6]; + int iovlen = 1; + __u32 proto; + + req.r.idiag_ext |= (1 << (INET_DIAG_INFO - 1)); + proto = IPPROTO_MPTCP; + rta_proto.rta_type = INET_DIAG_REQ_PROTOCOL; + rta_proto.rta_len = RTA_LENGTH(sizeof(proto)); + + iov[0] = (struct iovec) { + .iov_base = &req, + .iov_len = sizeof(req) + }; + iov[iovlen] = (struct iovec){ &rta_proto, sizeof(rta_proto)}; + iov[iovlen + 1] = (struct iovec){ &proto, sizeof(proto)}; + req.nlh.nlmsg_len += RTA_LENGTH(sizeof(proto)); + iovlen += 2; + struct msghdr msg = { + .msg_name = &nladdr, + .msg_namelen = sizeof(nladdr), + .msg_iov = iov, + .msg_iovlen = iovlen + }; + + for (;;) { + if (sendmsg(fd, &msg, 0) < 0) { + if (errno == EINTR) + continue; + die_perror("sendmsg"); + } + break; + } +} + +static void parse_rtattr_flags(struct rtattr *tb[], int max, struct rtattr *rta, + int len, unsigned short flags) +{ + unsigned short type; + + memset(tb, 0, sizeof(struct rtattr *) * (max + 1)); + while (RTA_OK(rta, len)) { + type = rta->rta_type & ~flags; + if (type <= max && !tb[type]) + tb[type] = rta; + rta = RTA_NEXT(rta, len); + } +} + +static void print_info_msg(struct mptcp_info *info) +{ + printf("Token & Flags\n"); + printf("token: %x\n", info->mptcpi_token); + printf("flags: %x\n", info->mptcpi_flags); + printf("csum_enabled: %u\n", info->mptcpi_csum_enabled); + + printf("\nBasic Info\n"); + printf("subflows: %u\n", info->mptcpi_subflows); + printf("subflows_max: %u\n", info->mptcpi_subflows_max); + printf("subflows_total: %u\n", info->mptcpi_subflows_total); + printf("local_addr_used: %u\n", info->mptcpi_local_addr_used); + printf("local_addr_max: %u\n", info->mptcpi_local_addr_max); + printf("add_addr_signal: %u\n", info->mptcpi_add_addr_signal); + printf("add_addr_accepted: %u\n", info->mptcpi_add_addr_accepted); + printf("add_addr_signal_max: %u\n", info->mptcpi_add_addr_signal_max); + printf("add_addr_accepted_max: %u\n", info->mptcpi_add_addr_accepted_max); + + printf("\nTransmission Info\n"); + printf("write_seq: %llu\n", info->mptcpi_write_seq); + printf("snd_una: %llu\n", info->mptcpi_snd_una); + printf("rcv_nxt: %llu\n", info->mptcpi_rcv_nxt); + printf("last_data_sent: %u\n", info->mptcpi_last_data_sent); + printf("last_data_recv: %u\n", info->mptcpi_last_data_recv); + printf("last_ack_recv: %u\n", info->mptcpi_last_ack_recv); + printf("retransmits: %u\n", info->mptcpi_retransmits); + printf("retransmit bytes: %llu\n", info->mptcpi_bytes_retrans); + printf("bytes_sent: %llu\n", info->mptcpi_bytes_sent); + printf("bytes_received: %llu\n", info->mptcpi_bytes_received); + printf("bytes_acked: %llu\n", info->mptcpi_bytes_acked); +} + +static void parse_nlmsg(struct nlmsghdr *nlh) +{ + struct inet_diag_msg *r = NLMSG_DATA(nlh); + struct rtattr *tb[INET_DIAG_MAX + 1]; + + parse_rtattr_flags(tb, INET_DIAG_MAX, (struct rtattr *)(r + 1), + nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*r)), + NLA_F_NESTED); + + if (tb[INET_DIAG_INFO]) { + int len = RTA_PAYLOAD(tb[INET_DIAG_INFO]); + struct mptcp_info *info; + + /* workaround fort older kernels with less fields */ + if (len < sizeof(*info)) { + info = alloca(sizeof(*info)); + memcpy(info, RTA_DATA(tb[INET_DIAG_INFO]), len); + memset((char *)info + len, 0, sizeof(*info) - len); + } else { + info = RTA_DATA(tb[INET_DIAG_INFO]); + } + print_info_msg(info); + } +} + +static void recv_nlmsg(int fd, struct nlmsghdr *nlh) +{ + char rcv_buff[8192]; + struct sockaddr_nl rcv_nladdr = { + .nl_family = AF_NETLINK + }; + struct iovec rcv_iov = { + .iov_base = rcv_buff, + .iov_len = sizeof(rcv_buff) + }; + struct msghdr rcv_msg = { + .msg_name = &rcv_nladdr, + .msg_namelen = sizeof(rcv_nladdr), + .msg_iov = &rcv_iov, + .msg_iovlen = 1 + }; + int len; + + len = recvmsg(fd, &rcv_msg, 0); + nlh = (struct nlmsghdr *)rcv_buff; + + while (NLMSG_OK(nlh, len)) { + if (nlh->nlmsg_type == NLMSG_DONE) { + printf("NLMSG_DONE\n"); + break; + } else if (nlh->nlmsg_type == NLMSG_ERROR) { + struct nlmsgerr *err; + + err = (struct nlmsgerr *)NLMSG_DATA(nlh); + printf("Error %d:%s\n", + -(err->error), strerror(-(err->error))); + break; + } + parse_nlmsg(nlh); + nlh = NLMSG_NEXT(nlh, len); + } +} + +static void get_mptcpinfo(__u32 token) +{ + struct nlmsghdr *nlh = NULL; + int fd; + + fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_SOCK_DIAG); + if (fd < 0) + die_perror("Netlink socket"); + + send_query(fd, token); + recv_nlmsg(fd, nlh); + + close(fd); +} + +static void parse_opts(int argc, char **argv, __u32 *target_token) +{ + int c; + + if (argc < 2) + die_usage(1); + + while ((c = getopt(argc, argv, "ht:")) != -1) { + switch (c) { + case 'h': + die_usage(0); + break; + case 't': + sscanf(optarg, "%x", target_token); + break; + default: + die_usage(1); + break; + } + } +} + +int main(int argc, char *argv[]) +{ + __u32 target_token; + + parse_opts(argc, argv, &target_token); + get_mptcpinfo(target_token); + + return 0; +} + diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh index 9c2a415976cbf..2329c2f8519b7 100755 --- a/tools/testing/selftests/net/mptcp/simult_flows.sh +++ b/tools/testing/selftests/net/mptcp/simult_flows.sh @@ -28,7 +28,7 @@ size=0 usage() { echo "Usage: $0 [ -b ] [ -c ] [ -d ] [ -i]" - echo -e "\t-b: bail out after first error, otherwise runs al testcases" + echo -e "\t-b: bail out after first error, otherwise runs all testcases" echo -e "\t-c: capture packets for each test using tcpdump (default: no capture)" echo -e "\t-d: debug this script" echo -e "\t-i: use 'ip mptcp' instead of 'pm_nl_ctl'" diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh index 3651f73451cf8..333064b0b5ac0 100755 --- a/tools/testing/selftests/net/mptcp/userspace_pm.sh +++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh @@ -117,7 +117,36 @@ cleanup() trap cleanup EXIT # Create and configure network namespaces for testing +print_title "Init" mptcp_lib_ns_init ns1 ns2 + +# check path_manager and pm_type sysctl mapping +if [ -f /proc/sys/net/mptcp/path_manager ]; then + ip netns exec "$ns1" sysctl -q net.mptcp.path_manager=userspace + pm_type="$(ip netns exec "$ns1" sysctl -n net.mptcp.pm_type)" + if [ "${pm_type}" != "1" ]; then + test_fail "unexpected pm_type: ${pm_type}" + mptcp_lib_result_print_all_tap + exit ${KSFT_FAIL} + fi + + ip netns exec "$ns1" sysctl -q net.mptcp.path_manager=error 2>/dev/null + pm_type="$(ip netns exec "$ns1" sysctl -n net.mptcp.pm_type)" + if [ "${pm_type}" != "1" ]; then + test_fail "unexpected pm_type after error: ${pm_type}" + mptcp_lib_result_print_all_tap + exit ${KSFT_FAIL} + fi + + ip netns exec "$ns1" sysctl -q net.mptcp.pm_type=0 + pm_name="$(ip netns exec "$ns1" sysctl -n net.mptcp.path_manager)" + if [ "${pm_name}" != "kernel" ]; then + test_fail "unexpected path-manager: ${pm_name}" + mptcp_lib_result_print_all_tap + exit ${KSFT_FAIL} + fi +fi + for i in "$ns1" "$ns2" ;do ip netns exec "$i" sysctl -q net.mptcp.pm_type=1 done @@ -152,7 +181,6 @@ mptcp_lib_events "${ns1}" "${server_evts}" server_evts_pid sleep 0.5 mptcp_lib_subtests_last_ts_reset -print_title "Init" print_test "Created network namespaces ns1, ns2" test_pass diff --git a/tools/testing/selftests/net/netfilter/br_netfilter.sh b/tools/testing/selftests/net/netfilter/br_netfilter.sh index c28379a965d83..1559ba275105e 100755 --- a/tools/testing/selftests/net/netfilter/br_netfilter.sh +++ b/tools/testing/selftests/net/netfilter/br_netfilter.sh @@ -13,6 +13,12 @@ source lib.sh checktool "nft --version" "run test without nft tool" +read t < /proc/sys/kernel/tainted +if [ "$t" -ne 0 ];then + echo SKIP: kernel is tainted + exit $ksft_skip +fi + cleanup() { cleanup_all_ns } @@ -165,6 +171,7 @@ if [ "$t" -eq 0 ];then echo PASS: kernel not tainted else echo ERROR: kernel is tainted + dmesg ret=1 fi diff --git a/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh b/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh index 6a764d70ab06f..4788641717d93 100755 --- a/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh +++ b/tools/testing/selftests/net/netfilter/br_netfilter_queue.sh @@ -4,6 +4,12 @@ source lib.sh checktool "nft --version" "run test without nft tool" +read t < /proc/sys/kernel/tainted +if [ "$t" -ne 0 ];then + echo SKIP: kernel is tainted + exit $ksft_skip +fi + cleanup() { cleanup_all_ns } @@ -72,6 +78,7 @@ if [ "$t" -eq 0 ];then echo PASS: kernel not tainted else echo ERROR: kernel is tainted + dmesg exit 1 fi diff --git a/tools/testing/selftests/net/netfilter/nft_queue.sh b/tools/testing/selftests/net/netfilter/nft_queue.sh index 785e3875a6da4..784d1b46912b0 100755 --- a/tools/testing/selftests/net/netfilter/nft_queue.sh +++ b/tools/testing/selftests/net/netfilter/nft_queue.sh @@ -593,6 +593,7 @@ EOF echo "PASS: queue program exiting while packets queued" else echo "TAINT: queue program exiting while packets queued" + dmesg ret=1 fi } diff --git a/tools/testing/selftests/net/netns-name.sh b/tools/testing/selftests/net/netns-name.sh index 6974474c26f3e..0be1905d1f2f1 100755 --- a/tools/testing/selftests/net/netns-name.sh +++ b/tools/testing/selftests/net/netns-name.sh @@ -78,6 +78,16 @@ ip -netns $NS link show dev $ALT_NAME 2> /dev/null && fail "Can still find alt-name after move" ip -netns $test_ns link del $DEV || fail +# +# Test no conflict of the same name/ifindex in different netns +# +ip -netns $NS link add name $DEV index 100 type dummy || fail +ip -netns $NS link add netns $test_ns name $DEV index 100 type dummy || + fail "Can create in netns without moving" +ip -netns $test_ns link show dev $DEV >> /dev/null || fail "Device not found" +ip -netns $NS link del $DEV || fail +ip -netns $test_ns link del $DEV || fail + echo -ne "$(basename $0) \t\t\t\t" if [ $RET_CODE -eq 0 ]; then echo "[ OK ]" diff --git a/tools/testing/selftests/net/netns-sysctl.sh b/tools/testing/selftests/net/netns-sysctl.sh index 45c34a3b9aae3..bb6f173a28e25 100755 --- a/tools/testing/selftests/net/netns-sysctl.sh +++ b/tools/testing/selftests/net/netns-sysctl.sh @@ -20,21 +20,31 @@ fail() { setup_ns test_ns for sc in {r,w}mem_{default,max}; do - # check that this is writable in a netns + initial_value="$(sysctl -n "net.core.$sc")" + + # check that this is writable in the init netns [ -w "/proc/sys/net/core/$sc" ] || fail "$sc isn't writable in the init netns!" - # change the value in the host netns + # change the value in the init netns sysctl -qw "net.core.$sc=300000" || fail "Can't write $sc in init netns!" - # check that the value is read from the init netns - [ "$(ip netns exec $test_ns sysctl -n "net.core.$sc")" -eq 300000 ] || + # check that the value did not change in the test netns + [ "$(ip netns exec $test_ns sysctl -n "net.core.$sc")" -eq "$initial_value" ] || fail "Value for $sc mismatch!" - # check that this isn't writable in a netns - ip netns exec $test_ns [ -w "/proc/sys/net/core/$sc" ] && - fail "$sc is writable in a netns!" + # check that this is also writable in the test netns + ip netns exec $test_ns [ -w "/proc/sys/net/core/$sc" ] || + fail "$sc isn't writable in the test netns!" + + # change the value in the test netns + ip netns exec $test_ns sysctl -qw "net.core.$sc=200000" || + fail "Can't write $sc in test netns!" + + # check that the value is read from the test netns + [ "$(ip netns exec $test_ns sysctl -n "net.core.$sc")" -eq 200000 ] || + fail "Value for $sc mismatch!" done echo 'Test passed OK' diff --git a/tools/testing/selftests/net/nl_netdev.py b/tools/testing/selftests/net/nl_netdev.py index 93e8cb671c3d9..beaee5e4e2aab 100755 --- a/tools/testing/selftests/net/nl_netdev.py +++ b/tools/testing/selftests/net/nl_netdev.py @@ -35,6 +35,21 @@ def napi_list_check(nf) -> None: comment=f"queue count after reset queue {q} mode {i}") +def nsim_rxq_reset_down(nf) -> None: + """ + Test that the queue API supports resetting a queue + while the interface is down. We should convert this + test to testing real HW once more devices support + queue API. + """ + with NetdevSimDev(queue_count=4) as nsimdev: + nsim = nsimdev.nsims[0] + + ip(f"link set dev {nsim.ifname} down") + for i in [0, 2, 3]: + nsim.dfs_write("queue_reset", f"1 {i}") + + def page_pool_check(nf) -> None: with NetdevSimDev() as nsimdev: nsim = nsimdev.nsims[0] @@ -106,7 +121,8 @@ def get_pp(): def main() -> None: nf = NetdevFamily() - ksft_run([empty_check, lo_check, page_pool_check, napi_list_check], + ksft_run([empty_check, lo_check, page_pool_check, napi_list_check, + nsim_rxq_reset_down], args=(nf, )) ksft_exit() diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh index 960e1ab4dd04b..3c8d3455d8e7f 100755 --- a/tools/testing/selftests/net/openvswitch/openvswitch.sh +++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh @@ -330,6 +330,11 @@ test_psample() { # - drop packets and verify the right drop reason is reported test_drop_reason() { which perf >/dev/null 2>&1 || return $ksft_skip + which pahole >/dev/null 2>&1 || return $ksft_skip + + ovs_drop_subsys=$(pahole -C skb_drop_reason_subsys | + awk '/OPENVSWITCH/ { print $3; }' | + tr -d ,) sbx_add "test_drop_reason" || return $? @@ -373,7 +378,7 @@ test_drop_reason() { "in_port(2),eth(),eth_type(0x0800),ipv4(src=172.31.110.20,proto=1),icmp()" 'drop' ovs_drop_record_and_run "test_drop_reason" ip netns exec client ping -c 2 172.31.110.20 - ovs_drop_reason_count 0x30001 # OVS_DROP_FLOW_ACTION + ovs_drop_reason_count 0x${ovs_drop_subsys}0001 # OVS_DROP_FLOW_ACTION if [[ "$?" -ne "2" ]]; then info "Did not detect expected drops: $?" return 1 @@ -390,7 +395,7 @@ test_drop_reason() { ovs_drop_record_and_run \ "test_drop_reason" ip netns exec client nc -i 1 -zuv 172.31.110.20 6000 - ovs_drop_reason_count 0x30004 # OVS_DROP_EXPLICIT_ACTION_ERROR + ovs_drop_reason_count 0x${ovs_drop_subsys}0004 # OVS_DROP_EXPLICIT_ACTION_ERROR if [[ "$?" -ne "1" ]]; then info "Did not detect expected explicit error drops: $?" return 1 @@ -398,7 +403,7 @@ test_drop_reason() { ovs_drop_record_and_run \ "test_drop_reason" ip netns exec client nc -i 1 -zuv 172.31.110.20 7000 - ovs_drop_reason_count 0x30003 # OVS_DROP_EXPLICIT_ACTION + ovs_drop_reason_count 0x${ovs_drop_subsys}0003 # OVS_DROP_EXPLICIT_ACTION if [[ "$?" -ne "1" ]]; then info "Did not detect expected explicit drops: $?" return 1 diff --git a/tools/testing/selftests/net/ovpn/.gitignore b/tools/testing/selftests/net/ovpn/.gitignore new file mode 100644 index 0000000000000..ee44c081ca7c0 --- /dev/null +++ b/tools/testing/selftests/net/ovpn/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0+ +ovpn-cli diff --git a/tools/testing/selftests/net/ovpn/Makefile b/tools/testing/selftests/net/ovpn/Makefile new file mode 100644 index 0000000000000..2d102878cb6dd --- /dev/null +++ b/tools/testing/selftests/net/ovpn/Makefile @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020-2025 OpenVPN, Inc. +# +CFLAGS = -pedantic -Wextra -Wall -Wl,--no-as-needed -g -O0 -ggdb $(KHDR_INCLUDES) +VAR_CFLAGS = $(shell pkg-config --cflags libnl-3.0 libnl-genl-3.0 2>/dev/null) +ifeq ($(VAR_CFLAGS),) +VAR_CFLAGS = -I/usr/include/libnl3 +endif +CFLAGS += $(VAR_CFLAGS) + + +LDLIBS = -lmbedtls -lmbedcrypto +VAR_LDLIBS = $(shell pkg-config --libs libnl-3.0 libnl-genl-3.0 2>/dev/null) +ifeq ($(VAR_LDLIBS),) +VAR_LDLIBS = -lnl-genl-3 -lnl-3 +endif +LDLIBS += $(VAR_LDLIBS) + + +TEST_FILES = common.sh + +TEST_PROGS = test.sh \ + test-chachapoly.sh \ + test-tcp.sh \ + test-float.sh \ + test-close-socket.sh \ + test-close-socket-tcp.sh + +TEST_GEN_FILES := ovpn-cli + +include ../../lib.mk diff --git a/tools/testing/selftests/net/ovpn/common.sh b/tools/testing/selftests/net/ovpn/common.sh new file mode 100644 index 0000000000000..7502292a1ee03 --- /dev/null +++ b/tools/testing/selftests/net/ovpn/common.sh @@ -0,0 +1,92 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020-2025 OpenVPN, Inc. +# +# Author: Antonio Quartulli + +UDP_PEERS_FILE=${UDP_PEERS_FILE:-udp_peers.txt} +TCP_PEERS_FILE=${TCP_PEERS_FILE:-tcp_peers.txt} +OVPN_CLI=${OVPN_CLI:-./ovpn-cli} +ALG=${ALG:-aes} +PROTO=${PROTO:-UDP} +FLOAT=${FLOAT:-0} + +create_ns() { + ip netns add peer${1} +} + +setup_ns() { + MODE="P2P" + + if [ ${1} -eq 0 ]; then + MODE="MP" + for p in $(seq 1 ${NUM_PEERS}); do + ip link add veth${p} netns peer0 type veth peer name veth${p} netns peer${p} + + ip -n peer0 addr add 10.10.${p}.1/24 dev veth${p} + ip -n peer0 link set veth${p} up + + ip -n peer${p} addr add 10.10.${p}.2/24 dev veth${p} + ip -n peer${p} link set veth${p} up + done + fi + + ip netns exec peer${1} ${OVPN_CLI} new_iface tun${1} $MODE + ip -n peer${1} addr add ${2} dev tun${1} + ip -n peer${1} link set tun${1} up +} + +add_peer() { + if [ "${PROTO}" == "UDP" ]; then + if [ ${1} -eq 0 ]; then + ip netns exec peer0 ${OVPN_CLI} new_multi_peer tun0 1 ${UDP_PEERS_FILE} + + for p in $(seq 1 ${NUM_PEERS}); do + ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 1 0 ${ALG} 0 \ + data64.key + done + else + ip netns exec peer${1} ${OVPN_CLI} new_peer tun${1} ${1} 1 10.10.${1}.1 1 + ip netns exec peer${1} ${OVPN_CLI} new_key tun${1} ${1} 1 0 ${ALG} 1 \ + data64.key + fi + else + if [ ${1} -eq 0 ]; then + (ip netns exec peer0 ${OVPN_CLI} listen tun0 1 ${TCP_PEERS_FILE} && { + for p in $(seq 1 ${NUM_PEERS}); do + ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 1 0 \ + ${ALG} 0 data64.key + done + }) & + sleep 5 + else + ip netns exec peer${1} ${OVPN_CLI} connect tun${1} ${1} 10.10.${1}.1 1 \ + data64.key + fi + fi +} + +cleanup() { + # some ovpn-cli processes sleep in background so they need manual poking + killall $(basename ${OVPN_CLI}) 2>/dev/null || true + + # netns peer0 is deleted without erasing ifaces first + for p in $(seq 1 10); do + ip -n peer${p} link set tun${p} down 2>/dev/null || true + ip netns exec peer${p} ${OVPN_CLI} del_iface tun${p} 2>/dev/null || true + done + for p in $(seq 1 10); do + ip -n peer0 link del veth${p} 2>/dev/null || true + done + for p in $(seq 0 10); do + ip netns del peer${p} 2>/dev/null || true + done +} + +if [ "${PROTO}" == "UDP" ]; then + NUM_PEERS=${NUM_PEERS:-$(wc -l ${UDP_PEERS_FILE} | awk '{print $1}')} +else + NUM_PEERS=${NUM_PEERS:-$(wc -l ${TCP_PEERS_FILE} | awk '{print $1}')} +fi + + diff --git a/tools/testing/selftests/net/ovpn/config b/tools/testing/selftests/net/ovpn/config new file mode 100644 index 0000000000000..71946ba9fa175 --- /dev/null +++ b/tools/testing/selftests/net/ovpn/config @@ -0,0 +1,10 @@ +CONFIG_NET=y +CONFIG_INET=y +CONFIG_STREAM_PARSER=y +CONFIG_NET_UDP_TUNNEL=y +CONFIG_DST_CACHE=y +CONFIG_CRYPTO=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_CHACHA20POLY1305=y +CONFIG_OVPN=m diff --git a/tools/testing/selftests/net/ovpn/data64.key b/tools/testing/selftests/net/ovpn/data64.key new file mode 100644 index 0000000000000..a99e88c4e290f --- /dev/null +++ b/tools/testing/selftests/net/ovpn/data64.key @@ -0,0 +1,5 @@ +jRqMACN7d7/aFQNT8S7jkrBD8uwrgHbG5OQZP2eu4R1Y7tfpS2bf5RHv06Vi163CGoaIiTX99R3B +ia9ycAH8Wz1+9PWv51dnBLur9jbShlgZ2QHLtUc4a/gfT7zZwULXuuxdLnvR21DDeMBaTbkgbai9 +uvAa7ne1liIgGFzbv+Bas4HDVrygxIxuAnP5Qgc3648IJkZ0QEXPF+O9f0n5+QIvGCxkAUVx+5K6 +KIs+SoeWXnAopELmoGSjUpFtJbagXK82HfdqpuUxT2Tnuef0/14SzVE/vNleBNu2ZbyrSAaah8tE +BofkPJUBFY+YQcfZNM5Dgrw3i+Bpmpq/gpdg5w== diff --git a/tools/testing/selftests/net/ovpn/ovpn-cli.c b/tools/testing/selftests/net/ovpn/ovpn-cli.c new file mode 100644 index 0000000000000..b6948b23e6b7f --- /dev/null +++ b/tools/testing/selftests/net/ovpn/ovpn-cli.c @@ -0,0 +1,2395 @@ +// SPDX-License-Identifier: GPL-2.0 +/* OpenVPN data channel accelerator + * + * Copyright (C) 2020-2025 OpenVPN, Inc. + * + * Author: Antonio Quartulli + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include + +/* defines to make checkpatch happy */ +#define strscpy strncpy +#define __always_unused __attribute__((__unused__)) + +/* libnl < 3.5.0 does not set the NLA_F_NESTED on its own, therefore we + * have to explicitly do it to prevent the kernel from failing upon + * parsing of the message + */ +#define nla_nest_start(_msg, _type) \ + nla_nest_start(_msg, (_type) | NLA_F_NESTED) + +/* libnl < 3.11.0 does not implement nla_get_uint() */ +uint64_t ovpn_nla_get_uint(struct nlattr *attr) +{ + if (nla_len(attr) == sizeof(uint32_t)) + return nla_get_u32(attr); + else + return nla_get_u64(attr); +} + +typedef int (*ovpn_nl_cb)(struct nl_msg *msg, void *arg); + +enum ovpn_key_direction { + KEY_DIR_IN = 0, + KEY_DIR_OUT, +}; + +#define KEY_LEN (256 / 8) +#define NONCE_LEN 8 + +#define PEER_ID_UNDEF 0x00FFFFFF +#define MAX_PEERS 10 + +struct nl_ctx { + struct nl_sock *nl_sock; + struct nl_msg *nl_msg; + struct nl_cb *nl_cb; + + int ovpn_dco_id; +}; + +enum ovpn_cmd { + CMD_INVALID, + CMD_NEW_IFACE, + CMD_DEL_IFACE, + CMD_LISTEN, + CMD_CONNECT, + CMD_NEW_PEER, + CMD_NEW_MULTI_PEER, + CMD_SET_PEER, + CMD_DEL_PEER, + CMD_GET_PEER, + CMD_NEW_KEY, + CMD_DEL_KEY, + CMD_GET_KEY, + CMD_SWAP_KEYS, + CMD_LISTEN_MCAST, +}; + +struct ovpn_ctx { + enum ovpn_cmd cmd; + + __u8 key_enc[KEY_LEN]; + __u8 key_dec[KEY_LEN]; + __u8 nonce[NONCE_LEN]; + + enum ovpn_cipher_alg cipher; + + sa_family_t sa_family; + + unsigned long peer_id; + unsigned long lport; + + union { + struct sockaddr_in in4; + struct sockaddr_in6 in6; + } remote; + + union { + struct sockaddr_in in4; + struct sockaddr_in6 in6; + } peer_ip; + + bool peer_ip_set; + + unsigned int ifindex; + char ifname[IFNAMSIZ]; + enum ovpn_mode mode; + bool mode_set; + + int socket; + int cli_sockets[MAX_PEERS]; + + __u32 keepalive_interval; + __u32 keepalive_timeout; + + enum ovpn_key_direction key_dir; + enum ovpn_key_slot key_slot; + int key_id; + + const char *peers_file; +}; + +static int ovpn_nl_recvmsgs(struct nl_ctx *ctx) +{ + int ret; + + ret = nl_recvmsgs(ctx->nl_sock, ctx->nl_cb); + + switch (ret) { + case -NLE_INTR: + fprintf(stderr, + "netlink received interrupt due to signal - ignoring\n"); + break; + case -NLE_NOMEM: + fprintf(stderr, "netlink out of memory error\n"); + break; + case -NLE_AGAIN: + fprintf(stderr, + "netlink reports blocking read - aborting wait\n"); + break; + default: + if (ret) + fprintf(stderr, "netlink reports error (%d): %s\n", + ret, nl_geterror(-ret)); + break; + } + + return ret; +} + +static struct nl_ctx *nl_ctx_alloc_flags(struct ovpn_ctx *ovpn, int cmd, + int flags) +{ + struct nl_ctx *ctx; + int err, ret; + + ctx = calloc(1, sizeof(*ctx)); + if (!ctx) + return NULL; + + ctx->nl_sock = nl_socket_alloc(); + if (!ctx->nl_sock) { + fprintf(stderr, "cannot allocate netlink socket\n"); + goto err_free; + } + + nl_socket_set_buffer_size(ctx->nl_sock, 8192, 8192); + + ret = genl_connect(ctx->nl_sock); + if (ret) { + fprintf(stderr, "cannot connect to generic netlink: %s\n", + nl_geterror(ret)); + goto err_sock; + } + + /* enable Extended ACK for detailed error reporting */ + err = 1; + setsockopt(nl_socket_get_fd(ctx->nl_sock), SOL_NETLINK, NETLINK_EXT_ACK, + &err, sizeof(err)); + + ctx->ovpn_dco_id = genl_ctrl_resolve(ctx->nl_sock, OVPN_FAMILY_NAME); + if (ctx->ovpn_dco_id < 0) { + fprintf(stderr, "cannot find ovpn_dco netlink component: %d\n", + ctx->ovpn_dco_id); + goto err_free; + } + + ctx->nl_msg = nlmsg_alloc(); + if (!ctx->nl_msg) { + fprintf(stderr, "cannot allocate netlink message\n"); + goto err_sock; + } + + ctx->nl_cb = nl_cb_alloc(NL_CB_DEFAULT); + if (!ctx->nl_cb) { + fprintf(stderr, "failed to allocate netlink callback\n"); + goto err_msg; + } + + nl_socket_set_cb(ctx->nl_sock, ctx->nl_cb); + + genlmsg_put(ctx->nl_msg, 0, 0, ctx->ovpn_dco_id, 0, flags, cmd, 0); + + if (ovpn->ifindex > 0) + NLA_PUT_U32(ctx->nl_msg, OVPN_A_IFINDEX, ovpn->ifindex); + + return ctx; +nla_put_failure: +err_msg: + nlmsg_free(ctx->nl_msg); +err_sock: + nl_socket_free(ctx->nl_sock); +err_free: + free(ctx); + return NULL; +} + +static struct nl_ctx *nl_ctx_alloc(struct ovpn_ctx *ovpn, int cmd) +{ + return nl_ctx_alloc_flags(ovpn, cmd, 0); +} + +static void nl_ctx_free(struct nl_ctx *ctx) +{ + if (!ctx) + return; + + nl_socket_free(ctx->nl_sock); + nlmsg_free(ctx->nl_msg); + nl_cb_put(ctx->nl_cb); + free(ctx); +} + +static int ovpn_nl_cb_error(struct sockaddr_nl (*nla)__always_unused, + struct nlmsgerr *err, void *arg) +{ + struct nlmsghdr *nlh = (struct nlmsghdr *)err - 1; + struct nlattr *tb_msg[NLMSGERR_ATTR_MAX + 1]; + int len = nlh->nlmsg_len; + struct nlattr *attrs; + int *ret = arg; + int ack_len = sizeof(*nlh) + sizeof(int) + sizeof(*nlh); + + *ret = err->error; + + if (!(nlh->nlmsg_flags & NLM_F_ACK_TLVS)) + return NL_STOP; + + if (!(nlh->nlmsg_flags & NLM_F_CAPPED)) + ack_len += err->msg.nlmsg_len - sizeof(*nlh); + + if (len <= ack_len) + return NL_STOP; + + attrs = (void *)((uint8_t *)nlh + ack_len); + len -= ack_len; + + nla_parse(tb_msg, NLMSGERR_ATTR_MAX, attrs, len, NULL); + if (tb_msg[NLMSGERR_ATTR_MSG]) { + len = strnlen((char *)nla_data(tb_msg[NLMSGERR_ATTR_MSG]), + nla_len(tb_msg[NLMSGERR_ATTR_MSG])); + fprintf(stderr, "kernel error: %*s\n", len, + (char *)nla_data(tb_msg[NLMSGERR_ATTR_MSG])); + } + + if (tb_msg[NLMSGERR_ATTR_MISS_NEST]) { + fprintf(stderr, "missing required nesting type %u\n", + nla_get_u32(tb_msg[NLMSGERR_ATTR_MISS_NEST])); + } + + if (tb_msg[NLMSGERR_ATTR_MISS_TYPE]) { + fprintf(stderr, "missing required attribute type %u\n", + nla_get_u32(tb_msg[NLMSGERR_ATTR_MISS_TYPE])); + } + + return NL_STOP; +} + +static int ovpn_nl_cb_finish(struct nl_msg (*msg)__always_unused, + void *arg) +{ + int *status = arg; + + *status = 0; + return NL_SKIP; +} + +static int ovpn_nl_cb_ack(struct nl_msg (*msg)__always_unused, + void *arg) +{ + int *status = arg; + + *status = 0; + return NL_STOP; +} + +static int ovpn_nl_msg_send(struct nl_ctx *ctx, ovpn_nl_cb cb) +{ + int status = 1; + + nl_cb_err(ctx->nl_cb, NL_CB_CUSTOM, ovpn_nl_cb_error, &status); + nl_cb_set(ctx->nl_cb, NL_CB_FINISH, NL_CB_CUSTOM, ovpn_nl_cb_finish, + &status); + nl_cb_set(ctx->nl_cb, NL_CB_ACK, NL_CB_CUSTOM, ovpn_nl_cb_ack, &status); + + if (cb) + nl_cb_set(ctx->nl_cb, NL_CB_VALID, NL_CB_CUSTOM, cb, ctx); + + nl_send_auto_complete(ctx->nl_sock, ctx->nl_msg); + + while (status == 1) + ovpn_nl_recvmsgs(ctx); + + if (status < 0) + fprintf(stderr, "failed to send netlink message: %s (%d)\n", + strerror(-status), status); + + return status; +} + +static int ovpn_parse_key(const char *file, struct ovpn_ctx *ctx) +{ + int idx_enc, idx_dec, ret = -1; + unsigned char *ckey = NULL; + __u8 *bkey = NULL; + size_t olen = 0; + long ckey_len; + FILE *fp; + + fp = fopen(file, "r"); + if (!fp) { + fprintf(stderr, "cannot open: %s\n", file); + return -1; + } + + /* get file size */ + fseek(fp, 0L, SEEK_END); + ckey_len = ftell(fp); + rewind(fp); + + /* if the file is longer, let's just read a portion */ + if (ckey_len > 256) + ckey_len = 256; + + ckey = malloc(ckey_len); + if (!ckey) + goto err; + + ret = fread(ckey, 1, ckey_len, fp); + if (ret != ckey_len) { + fprintf(stderr, + "couldn't read enough data from key file: %dbytes read\n", + ret); + goto err; + } + + olen = 0; + ret = mbedtls_base64_decode(NULL, 0, &olen, ckey, ckey_len); + if (ret != MBEDTLS_ERR_BASE64_BUFFER_TOO_SMALL) { + char buf[256]; + + mbedtls_strerror(ret, buf, sizeof(buf)); + fprintf(stderr, "unexpected base64 error1: %s (%d)\n", buf, + ret); + + goto err; + } + + bkey = malloc(olen); + if (!bkey) { + fprintf(stderr, "cannot allocate binary key buffer\n"); + goto err; + } + + ret = mbedtls_base64_decode(bkey, olen, &olen, ckey, ckey_len); + if (ret) { + char buf[256]; + + mbedtls_strerror(ret, buf, sizeof(buf)); + fprintf(stderr, "unexpected base64 error2: %s (%d)\n", buf, + ret); + + goto err; + } + + if (olen < 2 * KEY_LEN + NONCE_LEN) { + fprintf(stderr, + "not enough data in key file, found %zdB but needs %dB\n", + olen, 2 * KEY_LEN + NONCE_LEN); + goto err; + } + + switch (ctx->key_dir) { + case KEY_DIR_IN: + idx_enc = 0; + idx_dec = 1; + break; + case KEY_DIR_OUT: + idx_enc = 1; + idx_dec = 0; + break; + default: + goto err; + } + + memcpy(ctx->key_enc, bkey + KEY_LEN * idx_enc, KEY_LEN); + memcpy(ctx->key_dec, bkey + KEY_LEN * idx_dec, KEY_LEN); + memcpy(ctx->nonce, bkey + 2 * KEY_LEN, NONCE_LEN); + + ret = 0; + +err: + fclose(fp); + free(bkey); + free(ckey); + + return ret; +} + +static int ovpn_parse_cipher(const char *cipher, struct ovpn_ctx *ctx) +{ + if (strcmp(cipher, "aes") == 0) + ctx->cipher = OVPN_CIPHER_ALG_AES_GCM; + else if (strcmp(cipher, "chachapoly") == 0) + ctx->cipher = OVPN_CIPHER_ALG_CHACHA20_POLY1305; + else if (strcmp(cipher, "none") == 0) + ctx->cipher = OVPN_CIPHER_ALG_NONE; + else + return -ENOTSUP; + + return 0; +} + +static int ovpn_parse_key_direction(const char *dir, struct ovpn_ctx *ctx) +{ + int in_dir; + + in_dir = strtoll(dir, NULL, 10); + switch (in_dir) { + case KEY_DIR_IN: + case KEY_DIR_OUT: + ctx->key_dir = in_dir; + break; + default: + fprintf(stderr, + "invalid key direction provided. Can be 0 or 1 only\n"); + return -1; + } + + return 0; +} + +static int ovpn_socket(struct ovpn_ctx *ctx, sa_family_t family, int proto) +{ + struct sockaddr_storage local_sock = { 0 }; + struct sockaddr_in6 *in6; + struct sockaddr_in *in; + int ret, s, sock_type; + size_t sock_len; + + if (proto == IPPROTO_UDP) + sock_type = SOCK_DGRAM; + else if (proto == IPPROTO_TCP) + sock_type = SOCK_STREAM; + else + return -EINVAL; + + s = socket(family, sock_type, 0); + if (s < 0) { + perror("cannot create socket"); + return -1; + } + + switch (family) { + case AF_INET: + in = (struct sockaddr_in *)&local_sock; + in->sin_family = family; + in->sin_port = htons(ctx->lport); + in->sin_addr.s_addr = htonl(INADDR_ANY); + sock_len = sizeof(*in); + break; + case AF_INET6: + in6 = (struct sockaddr_in6 *)&local_sock; + in6->sin6_family = family; + in6->sin6_port = htons(ctx->lport); + in6->sin6_addr = in6addr_any; + sock_len = sizeof(*in6); + break; + default: + return -1; + } + + int opt = 1; + + ret = setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)); + + if (ret < 0) { + perror("setsockopt for SO_REUSEADDR"); + return ret; + } + + ret = setsockopt(s, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)); + if (ret < 0) { + perror("setsockopt for SO_REUSEPORT"); + return ret; + } + + if (family == AF_INET6) { + opt = 0; + if (setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, &opt, + sizeof(opt))) { + perror("failed to set IPV6_V6ONLY"); + return -1; + } + } + + ret = bind(s, (struct sockaddr *)&local_sock, sock_len); + if (ret < 0) { + perror("cannot bind socket"); + goto err_socket; + } + + ctx->socket = s; + ctx->sa_family = family; + return 0; + +err_socket: + close(s); + return -1; +} + +static int ovpn_udp_socket(struct ovpn_ctx *ctx, sa_family_t family) +{ + return ovpn_socket(ctx, family, IPPROTO_UDP); +} + +static int ovpn_listen(struct ovpn_ctx *ctx, sa_family_t family) +{ + int ret; + + ret = ovpn_socket(ctx, family, IPPROTO_TCP); + if (ret < 0) + return ret; + + ret = listen(ctx->socket, 10); + if (ret < 0) { + perror("listen"); + close(ctx->socket); + return -1; + } + + return 0; +} + +static int ovpn_accept(struct ovpn_ctx *ctx) +{ + socklen_t socklen; + int ret; + + socklen = sizeof(ctx->remote); + ret = accept(ctx->socket, (struct sockaddr *)&ctx->remote, &socklen); + if (ret < 0) { + perror("accept"); + goto err; + } + + fprintf(stderr, "Connection received!\n"); + + switch (socklen) { + case sizeof(struct sockaddr_in): + case sizeof(struct sockaddr_in6): + break; + default: + fprintf(stderr, "error: expecting IPv4 or IPv6 connection\n"); + close(ret); + ret = -EINVAL; + goto err; + } + + return ret; +err: + close(ctx->socket); + return ret; +} + +static int ovpn_connect(struct ovpn_ctx *ovpn) +{ + socklen_t socklen; + int s, ret; + + s = socket(ovpn->remote.in4.sin_family, SOCK_STREAM, 0); + if (s < 0) { + perror("cannot create socket"); + return -1; + } + + switch (ovpn->remote.in4.sin_family) { + case AF_INET: + socklen = sizeof(struct sockaddr_in); + break; + case AF_INET6: + socklen = sizeof(struct sockaddr_in6); + break; + default: + return -EOPNOTSUPP; + } + + ret = connect(s, (struct sockaddr *)&ovpn->remote, socklen); + if (ret < 0) { + perror("connect"); + goto err; + } + + fprintf(stderr, "connected\n"); + + ovpn->socket = s; + + return 0; +err: + close(s); + return ret; +} + +static int ovpn_new_peer(struct ovpn_ctx *ovpn, bool is_tcp) +{ + struct nlattr *attr; + struct nl_ctx *ctx; + int ret = -1; + + ctx = nl_ctx_alloc(ovpn, OVPN_CMD_PEER_NEW); + if (!ctx) + return -ENOMEM; + + attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_SOCKET, ovpn->socket); + + if (!is_tcp) { + switch (ovpn->remote.in4.sin_family) { + case AF_INET: + NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_REMOTE_IPV4, + ovpn->remote.in4.sin_addr.s_addr); + NLA_PUT_U16(ctx->nl_msg, OVPN_A_PEER_REMOTE_PORT, + ovpn->remote.in4.sin_port); + break; + case AF_INET6: + NLA_PUT(ctx->nl_msg, OVPN_A_PEER_REMOTE_IPV6, + sizeof(ovpn->remote.in6.sin6_addr), + &ovpn->remote.in6.sin6_addr); + NLA_PUT_U32(ctx->nl_msg, + OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID, + ovpn->remote.in6.sin6_scope_id); + NLA_PUT_U16(ctx->nl_msg, OVPN_A_PEER_REMOTE_PORT, + ovpn->remote.in6.sin6_port); + break; + default: + fprintf(stderr, + "Invalid family for remote socket address\n"); + goto nla_put_failure; + } + } + + if (ovpn->peer_ip_set) { + switch (ovpn->peer_ip.in4.sin_family) { + case AF_INET: + NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_VPN_IPV4, + ovpn->peer_ip.in4.sin_addr.s_addr); + break; + case AF_INET6: + NLA_PUT(ctx->nl_msg, OVPN_A_PEER_VPN_IPV6, + sizeof(struct in6_addr), + &ovpn->peer_ip.in6.sin6_addr); + break; + default: + fprintf(stderr, "Invalid family for peer address\n"); + goto nla_put_failure; + } + } + + nla_nest_end(ctx->nl_msg, attr); + + ret = ovpn_nl_msg_send(ctx, NULL); +nla_put_failure: + nl_ctx_free(ctx); + return ret; +} + +static int ovpn_set_peer(struct ovpn_ctx *ovpn) +{ + struct nlattr *attr; + struct nl_ctx *ctx; + int ret = -1; + + ctx = nl_ctx_alloc(ovpn, OVPN_CMD_PEER_SET); + if (!ctx) + return -ENOMEM; + + attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_KEEPALIVE_INTERVAL, + ovpn->keepalive_interval); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_KEEPALIVE_TIMEOUT, + ovpn->keepalive_timeout); + nla_nest_end(ctx->nl_msg, attr); + + ret = ovpn_nl_msg_send(ctx, NULL); +nla_put_failure: + nl_ctx_free(ctx); + return ret; +} + +static int ovpn_del_peer(struct ovpn_ctx *ovpn) +{ + struct nlattr *attr; + struct nl_ctx *ctx; + int ret = -1; + + ctx = nl_ctx_alloc(ovpn, OVPN_CMD_PEER_DEL); + if (!ctx) + return -ENOMEM; + + attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id); + nla_nest_end(ctx->nl_msg, attr); + + ret = ovpn_nl_msg_send(ctx, NULL); +nla_put_failure: + nl_ctx_free(ctx); + return ret; +} + +static int ovpn_handle_peer(struct nl_msg *msg, void (*arg)__always_unused) +{ + struct nlattr *pattrs[OVPN_A_PEER_MAX + 1]; + struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg)); + struct nlattr *attrs[OVPN_A_MAX + 1]; + __u16 rport = 0, lport = 0; + + nla_parse(attrs, OVPN_A_MAX, genlmsg_attrdata(gnlh, 0), + genlmsg_attrlen(gnlh, 0), NULL); + + if (!attrs[OVPN_A_PEER]) { + fprintf(stderr, "no packet content in netlink message\n"); + return NL_SKIP; + } + + nla_parse(pattrs, OVPN_A_PEER_MAX, nla_data(attrs[OVPN_A_PEER]), + nla_len(attrs[OVPN_A_PEER]), NULL); + + if (pattrs[OVPN_A_PEER_ID]) + fprintf(stderr, "* Peer %u\n", + nla_get_u32(pattrs[OVPN_A_PEER_ID])); + + if (pattrs[OVPN_A_PEER_SOCKET_NETNSID]) + fprintf(stderr, "\tsocket NetNS ID: %d\n", + nla_get_s32(pattrs[OVPN_A_PEER_SOCKET_NETNSID])); + + if (pattrs[OVPN_A_PEER_VPN_IPV4]) { + char buf[INET_ADDRSTRLEN]; + + inet_ntop(AF_INET, nla_data(pattrs[OVPN_A_PEER_VPN_IPV4]), + buf, sizeof(buf)); + fprintf(stderr, "\tVPN IPv4: %s\n", buf); + } + + if (pattrs[OVPN_A_PEER_VPN_IPV6]) { + char buf[INET6_ADDRSTRLEN]; + + inet_ntop(AF_INET6, nla_data(pattrs[OVPN_A_PEER_VPN_IPV6]), + buf, sizeof(buf)); + fprintf(stderr, "\tVPN IPv6: %s\n", buf); + } + + if (pattrs[OVPN_A_PEER_LOCAL_PORT]) + lport = ntohs(nla_get_u16(pattrs[OVPN_A_PEER_LOCAL_PORT])); + + if (pattrs[OVPN_A_PEER_REMOTE_PORT]) + rport = ntohs(nla_get_u16(pattrs[OVPN_A_PEER_REMOTE_PORT])); + + if (pattrs[OVPN_A_PEER_REMOTE_IPV6]) { + void *ip = pattrs[OVPN_A_PEER_REMOTE_IPV6]; + char buf[INET6_ADDRSTRLEN]; + int scope_id = -1; + + if (pattrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]) { + void *p = pattrs[OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID]; + + scope_id = nla_get_u32(p); + } + + inet_ntop(AF_INET6, nla_data(ip), buf, sizeof(buf)); + fprintf(stderr, "\tRemote: %s:%hu (scope-id: %u)\n", buf, rport, + scope_id); + + if (pattrs[OVPN_A_PEER_LOCAL_IPV6]) { + void *ip = pattrs[OVPN_A_PEER_LOCAL_IPV6]; + + inet_ntop(AF_INET6, nla_data(ip), buf, sizeof(buf)); + fprintf(stderr, "\tLocal: %s:%hu\n", buf, lport); + } + } + + if (pattrs[OVPN_A_PEER_REMOTE_IPV4]) { + void *ip = pattrs[OVPN_A_PEER_REMOTE_IPV4]; + char buf[INET_ADDRSTRLEN]; + + inet_ntop(AF_INET, nla_data(ip), buf, sizeof(buf)); + fprintf(stderr, "\tRemote: %s:%hu\n", buf, rport); + + if (pattrs[OVPN_A_PEER_LOCAL_IPV4]) { + void *p = pattrs[OVPN_A_PEER_LOCAL_IPV4]; + + inet_ntop(AF_INET, nla_data(p), buf, sizeof(buf)); + fprintf(stderr, "\tLocal: %s:%hu\n", buf, lport); + } + } + + if (pattrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]) { + void *p = pattrs[OVPN_A_PEER_KEEPALIVE_INTERVAL]; + + fprintf(stderr, "\tKeepalive interval: %u sec\n", + nla_get_u32(p)); + } + + if (pattrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT]) + fprintf(stderr, "\tKeepalive timeout: %u sec\n", + nla_get_u32(pattrs[OVPN_A_PEER_KEEPALIVE_TIMEOUT])); + + if (pattrs[OVPN_A_PEER_VPN_RX_BYTES]) + fprintf(stderr, "\tVPN RX bytes: %" PRIu64 "\n", + ovpn_nla_get_uint(pattrs[OVPN_A_PEER_VPN_RX_BYTES])); + + if (pattrs[OVPN_A_PEER_VPN_TX_BYTES]) + fprintf(stderr, "\tVPN TX bytes: %" PRIu64 "\n", + ovpn_nla_get_uint(pattrs[OVPN_A_PEER_VPN_TX_BYTES])); + + if (pattrs[OVPN_A_PEER_VPN_RX_PACKETS]) + fprintf(stderr, "\tVPN RX packets: %" PRIu64 "\n", + ovpn_nla_get_uint(pattrs[OVPN_A_PEER_VPN_RX_PACKETS])); + + if (pattrs[OVPN_A_PEER_VPN_TX_PACKETS]) + fprintf(stderr, "\tVPN TX packets: %" PRIu64 "\n", + ovpn_nla_get_uint(pattrs[OVPN_A_PEER_VPN_TX_PACKETS])); + + if (pattrs[OVPN_A_PEER_LINK_RX_BYTES]) + fprintf(stderr, "\tLINK RX bytes: %" PRIu64 "\n", + ovpn_nla_get_uint(pattrs[OVPN_A_PEER_LINK_RX_BYTES])); + + if (pattrs[OVPN_A_PEER_LINK_TX_BYTES]) + fprintf(stderr, "\tLINK TX bytes: %" PRIu64 "\n", + ovpn_nla_get_uint(pattrs[OVPN_A_PEER_LINK_TX_BYTES])); + + if (pattrs[OVPN_A_PEER_LINK_RX_PACKETS]) + fprintf(stderr, "\tLINK RX packets: %" PRIu64 "\n", + ovpn_nla_get_uint(pattrs[OVPN_A_PEER_LINK_RX_PACKETS])); + + if (pattrs[OVPN_A_PEER_LINK_TX_PACKETS]) + fprintf(stderr, "\tLINK TX packets: %" PRIu64 "\n", + ovpn_nla_get_uint(pattrs[OVPN_A_PEER_LINK_TX_PACKETS])); + + return NL_SKIP; +} + +static int ovpn_get_peer(struct ovpn_ctx *ovpn) +{ + int flags = 0, ret = -1; + struct nlattr *attr; + struct nl_ctx *ctx; + + if (ovpn->peer_id == PEER_ID_UNDEF) + flags = NLM_F_DUMP; + + ctx = nl_ctx_alloc_flags(ovpn, OVPN_CMD_PEER_GET, flags); + if (!ctx) + return -ENOMEM; + + if (ovpn->peer_id != PEER_ID_UNDEF) { + attr = nla_nest_start(ctx->nl_msg, OVPN_A_PEER); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_PEER_ID, ovpn->peer_id); + nla_nest_end(ctx->nl_msg, attr); + } + + ret = ovpn_nl_msg_send(ctx, ovpn_handle_peer); +nla_put_failure: + nl_ctx_free(ctx); + return ret; +} + +static int ovpn_new_key(struct ovpn_ctx *ovpn) +{ + struct nlattr *keyconf, *key_dir; + struct nl_ctx *ctx; + int ret = -1; + + ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_NEW); + if (!ctx) + return -ENOMEM; + + keyconf = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_SLOT, ovpn->key_slot); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_KEY_ID, ovpn->key_id); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_CIPHER_ALG, ovpn->cipher); + + key_dir = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF_ENCRYPT_DIR); + NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_CIPHER_KEY, KEY_LEN, ovpn->key_enc); + NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_NONCE_TAIL, NONCE_LEN, ovpn->nonce); + nla_nest_end(ctx->nl_msg, key_dir); + + key_dir = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF_DECRYPT_DIR); + NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_CIPHER_KEY, KEY_LEN, ovpn->key_dec); + NLA_PUT(ctx->nl_msg, OVPN_A_KEYDIR_NONCE_TAIL, NONCE_LEN, ovpn->nonce); + nla_nest_end(ctx->nl_msg, key_dir); + + nla_nest_end(ctx->nl_msg, keyconf); + + ret = ovpn_nl_msg_send(ctx, NULL); +nla_put_failure: + nl_ctx_free(ctx); + return ret; +} + +static int ovpn_del_key(struct ovpn_ctx *ovpn) +{ + struct nlattr *keyconf; + struct nl_ctx *ctx; + int ret = -1; + + ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_DEL); + if (!ctx) + return -ENOMEM; + + keyconf = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_SLOT, ovpn->key_slot); + nla_nest_end(ctx->nl_msg, keyconf); + + ret = ovpn_nl_msg_send(ctx, NULL); +nla_put_failure: + nl_ctx_free(ctx); + return ret; +} + +static int ovpn_handle_key(struct nl_msg *msg, void (*arg)__always_unused) +{ + struct nlattr *kattrs[OVPN_A_KEYCONF_MAX + 1]; + struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg)); + struct nlattr *attrs[OVPN_A_MAX + 1]; + + nla_parse(attrs, OVPN_A_MAX, genlmsg_attrdata(gnlh, 0), + genlmsg_attrlen(gnlh, 0), NULL); + + if (!attrs[OVPN_A_KEYCONF]) { + fprintf(stderr, "no packet content in netlink message\n"); + return NL_SKIP; + } + + nla_parse(kattrs, OVPN_A_KEYCONF_MAX, nla_data(attrs[OVPN_A_KEYCONF]), + nla_len(attrs[OVPN_A_KEYCONF]), NULL); + + if (kattrs[OVPN_A_KEYCONF_PEER_ID]) + fprintf(stderr, "* Peer %u\n", + nla_get_u32(kattrs[OVPN_A_KEYCONF_PEER_ID])); + if (kattrs[OVPN_A_KEYCONF_SLOT]) { + fprintf(stderr, "\t- Slot: "); + switch (nla_get_u32(kattrs[OVPN_A_KEYCONF_SLOT])) { + case OVPN_KEY_SLOT_PRIMARY: + fprintf(stderr, "primary\n"); + break; + case OVPN_KEY_SLOT_SECONDARY: + fprintf(stderr, "secondary\n"); + break; + default: + fprintf(stderr, "invalid (%u)\n", + nla_get_u32(kattrs[OVPN_A_KEYCONF_SLOT])); + break; + } + } + if (kattrs[OVPN_A_KEYCONF_KEY_ID]) + fprintf(stderr, "\t- Key ID: %u\n", + nla_get_u32(kattrs[OVPN_A_KEYCONF_KEY_ID])); + if (kattrs[OVPN_A_KEYCONF_CIPHER_ALG]) { + fprintf(stderr, "\t- Cipher: "); + switch (nla_get_u32(kattrs[OVPN_A_KEYCONF_CIPHER_ALG])) { + case OVPN_CIPHER_ALG_NONE: + fprintf(stderr, "none\n"); + break; + case OVPN_CIPHER_ALG_AES_GCM: + fprintf(stderr, "aes-gcm\n"); + break; + case OVPN_CIPHER_ALG_CHACHA20_POLY1305: + fprintf(stderr, "chacha20poly1305\n"); + break; + default: + fprintf(stderr, "invalid (%u)\n", + nla_get_u32(kattrs[OVPN_A_KEYCONF_CIPHER_ALG])); + break; + } + } + + return NL_SKIP; +} + +static int ovpn_get_key(struct ovpn_ctx *ovpn) +{ + struct nlattr *keyconf; + struct nl_ctx *ctx; + int ret = -1; + + ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_GET); + if (!ctx) + return -ENOMEM; + + keyconf = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_SLOT, ovpn->key_slot); + nla_nest_end(ctx->nl_msg, keyconf); + + ret = ovpn_nl_msg_send(ctx, ovpn_handle_key); +nla_put_failure: + nl_ctx_free(ctx); + return ret; +} + +static int ovpn_swap_keys(struct ovpn_ctx *ovpn) +{ + struct nl_ctx *ctx; + struct nlattr *kc; + int ret = -1; + + ctx = nl_ctx_alloc(ovpn, OVPN_CMD_KEY_SWAP); + if (!ctx) + return -ENOMEM; + + kc = nla_nest_start(ctx->nl_msg, OVPN_A_KEYCONF); + NLA_PUT_U32(ctx->nl_msg, OVPN_A_KEYCONF_PEER_ID, ovpn->peer_id); + nla_nest_end(ctx->nl_msg, kc); + + ret = ovpn_nl_msg_send(ctx, NULL); +nla_put_failure: + nl_ctx_free(ctx); + return ret; +} + +/* Helper function used to easily add attributes to a rtnl message */ +static int ovpn_addattr(struct nlmsghdr *n, int maxlen, int type, + const void *data, int alen) +{ + int len = RTA_LENGTH(alen); + struct rtattr *rta; + + if ((int)(NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len)) > maxlen) { + fprintf(stderr, "%s: rtnl: message exceeded bound of %d\n", + __func__, maxlen); + return -EMSGSIZE; + } + + rta = nlmsg_tail(n); + rta->rta_type = type; + rta->rta_len = len; + + if (!data) + memset(RTA_DATA(rta), 0, alen); + else + memcpy(RTA_DATA(rta), data, alen); + + n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len); + + return 0; +} + +static struct rtattr *ovpn_nest_start(struct nlmsghdr *msg, size_t max_size, + int attr) +{ + struct rtattr *nest = nlmsg_tail(msg); + + if (ovpn_addattr(msg, max_size, attr, NULL, 0) < 0) + return NULL; + + return nest; +} + +static void ovpn_nest_end(struct nlmsghdr *msg, struct rtattr *nest) +{ + nest->rta_len = (uint8_t *)nlmsg_tail(msg) - (uint8_t *)nest; +} + +#define RT_SNDBUF_SIZE (1024 * 2) +#define RT_RCVBUF_SIZE (1024 * 4) + +/* Open RTNL socket */ +static int ovpn_rt_socket(void) +{ + int sndbuf = RT_SNDBUF_SIZE, rcvbuf = RT_RCVBUF_SIZE, fd; + + fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); + if (fd < 0) { + fprintf(stderr, "%s: cannot open netlink socket\n", __func__); + return fd; + } + + if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, + sizeof(sndbuf)) < 0) { + fprintf(stderr, "%s: SO_SNDBUF\n", __func__); + close(fd); + return -1; + } + + if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf, + sizeof(rcvbuf)) < 0) { + fprintf(stderr, "%s: SO_RCVBUF\n", __func__); + close(fd); + return -1; + } + + return fd; +} + +/* Bind socket to Netlink subsystem */ +static int ovpn_rt_bind(int fd, uint32_t groups) +{ + struct sockaddr_nl local = { 0 }; + socklen_t addr_len; + + local.nl_family = AF_NETLINK; + local.nl_groups = groups; + + if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) { + fprintf(stderr, "%s: cannot bind netlink socket: %d\n", + __func__, errno); + return -errno; + } + + addr_len = sizeof(local); + if (getsockname(fd, (struct sockaddr *)&local, &addr_len) < 0) { + fprintf(stderr, "%s: cannot getsockname: %d\n", __func__, + errno); + return -errno; + } + + if (addr_len != sizeof(local)) { + fprintf(stderr, "%s: wrong address length %d\n", __func__, + addr_len); + return -EINVAL; + } + + if (local.nl_family != AF_NETLINK) { + fprintf(stderr, "%s: wrong address family %d\n", __func__, + local.nl_family); + return -EINVAL; + } + + return 0; +} + +typedef int (*ovpn_parse_reply_cb)(struct nlmsghdr *msg, void *arg); + +/* Send Netlink message and run callback on reply (if specified) */ +static int ovpn_rt_send(struct nlmsghdr *payload, pid_t peer, + unsigned int groups, ovpn_parse_reply_cb cb, + void *arg_cb) +{ + int len, rem_len, fd, ret, rcv_len; + struct sockaddr_nl nladdr = { 0 }; + struct nlmsgerr *err; + struct nlmsghdr *h; + char buf[1024 * 16]; + struct iovec iov = { + .iov_base = payload, + .iov_len = payload->nlmsg_len, + }; + struct msghdr nlmsg = { + .msg_name = &nladdr, + .msg_namelen = sizeof(nladdr), + .msg_iov = &iov, + .msg_iovlen = 1, + }; + + nladdr.nl_family = AF_NETLINK; + nladdr.nl_pid = peer; + nladdr.nl_groups = groups; + + payload->nlmsg_seq = time(NULL); + + /* no need to send reply */ + if (!cb) + payload->nlmsg_flags |= NLM_F_ACK; + + fd = ovpn_rt_socket(); + if (fd < 0) { + fprintf(stderr, "%s: can't open rtnl socket\n", __func__); + return -errno; + } + + ret = ovpn_rt_bind(fd, 0); + if (ret < 0) { + fprintf(stderr, "%s: can't bind rtnl socket\n", __func__); + ret = -errno; + goto out; + } + + ret = sendmsg(fd, &nlmsg, 0); + if (ret < 0) { + fprintf(stderr, "%s: rtnl: error on sendmsg()\n", __func__); + ret = -errno; + goto out; + } + + /* prepare buffer to store RTNL replies */ + memset(buf, 0, sizeof(buf)); + iov.iov_base = buf; + + while (1) { + /* + * iov_len is modified by recvmsg(), therefore has to be initialized before + * using it again + */ + iov.iov_len = sizeof(buf); + rcv_len = recvmsg(fd, &nlmsg, 0); + if (rcv_len < 0) { + if (errno == EINTR || errno == EAGAIN) { + fprintf(stderr, "%s: interrupted call\n", + __func__); + continue; + } + fprintf(stderr, "%s: rtnl: error on recvmsg()\n", + __func__); + ret = -errno; + goto out; + } + + if (rcv_len == 0) { + fprintf(stderr, + "%s: rtnl: socket reached unexpected EOF\n", + __func__); + ret = -EIO; + goto out; + } + + if (nlmsg.msg_namelen != sizeof(nladdr)) { + fprintf(stderr, + "%s: sender address length: %u (expected %zu)\n", + __func__, nlmsg.msg_namelen, sizeof(nladdr)); + ret = -EIO; + goto out; + } + + h = (struct nlmsghdr *)buf; + while (rcv_len >= (int)sizeof(*h)) { + len = h->nlmsg_len; + rem_len = len - sizeof(*h); + + if (rem_len < 0 || len > rcv_len) { + if (nlmsg.msg_flags & MSG_TRUNC) { + fprintf(stderr, "%s: truncated message\n", + __func__); + ret = -EIO; + goto out; + } + fprintf(stderr, "%s: malformed message: len=%d\n", + __func__, len); + ret = -EIO; + goto out; + } + + if (h->nlmsg_type == NLMSG_DONE) { + ret = 0; + goto out; + } + + if (h->nlmsg_type == NLMSG_ERROR) { + err = (struct nlmsgerr *)NLMSG_DATA(h); + if (rem_len < (int)sizeof(struct nlmsgerr)) { + fprintf(stderr, "%s: ERROR truncated\n", + __func__); + ret = -EIO; + goto out; + } + + if (err->error) { + fprintf(stderr, "%s: (%d) %s\n", + __func__, err->error, + strerror(-err->error)); + ret = err->error; + goto out; + } + + ret = 0; + if (cb) { + int r = cb(h, arg_cb); + + if (r <= 0) + ret = r; + } + goto out; + } + + if (cb) { + int r = cb(h, arg_cb); + + if (r <= 0) { + ret = r; + goto out; + } + } else { + fprintf(stderr, "%s: RTNL: unexpected reply\n", + __func__); + } + + rcv_len -= NLMSG_ALIGN(len); + h = (struct nlmsghdr *)((uint8_t *)h + + NLMSG_ALIGN(len)); + } + + if (nlmsg.msg_flags & MSG_TRUNC) { + fprintf(stderr, "%s: message truncated\n", __func__); + continue; + } + + if (rcv_len) { + fprintf(stderr, "%s: rtnl: %d not parsed bytes\n", + __func__, rcv_len); + ret = -1; + goto out; + } + } +out: + close(fd); + + return ret; +} + +struct ovpn_link_req { + struct nlmsghdr n; + struct ifinfomsg i; + char buf[256]; +}; + +static int ovpn_new_iface(struct ovpn_ctx *ovpn) +{ + struct rtattr *linkinfo, *data; + struct ovpn_link_req req = { 0 }; + int ret = -1; + + fprintf(stdout, "Creating interface %s with mode %u\n", ovpn->ifname, + ovpn->mode); + + req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.i)); + req.n.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; + req.n.nlmsg_type = RTM_NEWLINK; + + if (ovpn_addattr(&req.n, sizeof(req), IFLA_IFNAME, ovpn->ifname, + strlen(ovpn->ifname) + 1) < 0) + goto err; + + linkinfo = ovpn_nest_start(&req.n, sizeof(req), IFLA_LINKINFO); + if (!linkinfo) + goto err; + + if (ovpn_addattr(&req.n, sizeof(req), IFLA_INFO_KIND, OVPN_FAMILY_NAME, + strlen(OVPN_FAMILY_NAME) + 1) < 0) + goto err; + + if (ovpn->mode_set) { + data = ovpn_nest_start(&req.n, sizeof(req), IFLA_INFO_DATA); + if (!data) + goto err; + + if (ovpn_addattr(&req.n, sizeof(req), IFLA_OVPN_MODE, + &ovpn->mode, sizeof(uint8_t)) < 0) + goto err; + + ovpn_nest_end(&req.n, data); + } + + ovpn_nest_end(&req.n, linkinfo); + + req.i.ifi_family = AF_PACKET; + + ret = ovpn_rt_send(&req.n, 0, 0, NULL, NULL); +err: + return ret; +} + +static int ovpn_del_iface(struct ovpn_ctx *ovpn) +{ + struct ovpn_link_req req = { 0 }; + + fprintf(stdout, "Deleting interface %s ifindex %u\n", ovpn->ifname, + ovpn->ifindex); + + req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.i)); + req.n.nlmsg_flags = NLM_F_REQUEST; + req.n.nlmsg_type = RTM_DELLINK; + + req.i.ifi_family = AF_PACKET; + req.i.ifi_index = ovpn->ifindex; + + return ovpn_rt_send(&req.n, 0, 0, NULL, NULL); +} + +static int nl_seq_check(struct nl_msg (*msg)__always_unused, + void (*arg)__always_unused) +{ + return NL_OK; +} + +struct mcast_handler_args { + const char *group; + int id; +}; + +static int mcast_family_handler(struct nl_msg *msg, void *arg) +{ + struct mcast_handler_args *grp = arg; + struct nlattr *tb[CTRL_ATTR_MAX + 1]; + struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg)); + struct nlattr *mcgrp; + int rem_mcgrp; + + nla_parse(tb, CTRL_ATTR_MAX, genlmsg_attrdata(gnlh, 0), + genlmsg_attrlen(gnlh, 0), NULL); + + if (!tb[CTRL_ATTR_MCAST_GROUPS]) + return NL_SKIP; + + nla_for_each_nested(mcgrp, tb[CTRL_ATTR_MCAST_GROUPS], rem_mcgrp) { + struct nlattr *tb_mcgrp[CTRL_ATTR_MCAST_GRP_MAX + 1]; + + nla_parse(tb_mcgrp, CTRL_ATTR_MCAST_GRP_MAX, + nla_data(mcgrp), nla_len(mcgrp), NULL); + + if (!tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME] || + !tb_mcgrp[CTRL_ATTR_MCAST_GRP_ID]) + continue; + if (strncmp(nla_data(tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME]), + grp->group, nla_len(tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME]))) + continue; + grp->id = nla_get_u32(tb_mcgrp[CTRL_ATTR_MCAST_GRP_ID]); + break; + } + + return NL_SKIP; +} + +static int mcast_error_handler(struct sockaddr_nl (*nla)__always_unused, + struct nlmsgerr *err, void *arg) +{ + int *ret = arg; + + *ret = err->error; + return NL_STOP; +} + +static int mcast_ack_handler(struct nl_msg (*msg)__always_unused, void *arg) +{ + int *ret = arg; + + *ret = 0; + return NL_STOP; +} + +static int ovpn_handle_msg(struct nl_msg *msg, void *arg) +{ + struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg)); + struct nlattr *attrs[OVPN_A_MAX + 1]; + struct nlmsghdr *nlh = nlmsg_hdr(msg); + //enum ovpn_del_peer_reason reason; + char ifname[IF_NAMESIZE]; + int *ret = arg; + __u32 ifindex; + + fprintf(stderr, "received message from ovpn-dco\n"); + + *ret = -1; + + if (!genlmsg_valid_hdr(nlh, 0)) { + fprintf(stderr, "invalid header\n"); + return NL_STOP; + } + + if (nla_parse(attrs, OVPN_A_MAX, genlmsg_attrdata(gnlh, 0), + genlmsg_attrlen(gnlh, 0), NULL)) { + fprintf(stderr, "received bogus data from ovpn-dco\n"); + return NL_STOP; + } + + if (!attrs[OVPN_A_IFINDEX]) { + fprintf(stderr, "no ifindex in this message\n"); + return NL_STOP; + } + + ifindex = nla_get_u32(attrs[OVPN_A_IFINDEX]); + if (!if_indextoname(ifindex, ifname)) { + fprintf(stderr, "cannot resolve ifname for ifindex: %u\n", + ifindex); + return NL_STOP; + } + + switch (gnlh->cmd) { + case OVPN_CMD_PEER_DEL_NTF: + /*if (!attrs[OVPN_A_DEL_PEER_REASON]) { + * fprintf(stderr, "no reason in DEL_PEER message\n"); + * return NL_STOP; + *} + * + *reason = nla_get_u8(attrs[OVPN_A_DEL_PEER_REASON]); + *fprintf(stderr, + * "received CMD_DEL_PEER, ifname: %s reason: %d\n", + * ifname, reason); + */ + fprintf(stdout, "received CMD_PEER_DEL_NTF\n"); + break; + case OVPN_CMD_KEY_SWAP_NTF: + fprintf(stdout, "received CMD_KEY_SWAP_NTF\n"); + break; + default: + fprintf(stderr, "received unknown command: %d\n", gnlh->cmd); + return NL_STOP; + } + + *ret = 0; + return NL_OK; +} + +static int ovpn_get_mcast_id(struct nl_sock *sock, const char *family, + const char *group) +{ + struct nl_msg *msg; + struct nl_cb *cb; + int ret, ctrlid; + struct mcast_handler_args grp = { + .group = group, + .id = -ENOENT, + }; + + msg = nlmsg_alloc(); + if (!msg) + return -ENOMEM; + + cb = nl_cb_alloc(NL_CB_DEFAULT); + if (!cb) { + ret = -ENOMEM; + goto out_fail_cb; + } + + ctrlid = genl_ctrl_resolve(sock, "nlctrl"); + + genlmsg_put(msg, 0, 0, ctrlid, 0, 0, CTRL_CMD_GETFAMILY, 0); + + ret = -ENOBUFS; + NLA_PUT_STRING(msg, CTRL_ATTR_FAMILY_NAME, family); + + ret = nl_send_auto_complete(sock, msg); + if (ret < 0) + goto nla_put_failure; + + ret = 1; + + nl_cb_err(cb, NL_CB_CUSTOM, mcast_error_handler, &ret); + nl_cb_set(cb, NL_CB_ACK, NL_CB_CUSTOM, mcast_ack_handler, &ret); + nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, mcast_family_handler, &grp); + + while (ret > 0) + nl_recvmsgs(sock, cb); + + if (ret == 0) + ret = grp.id; + nla_put_failure: + nl_cb_put(cb); + out_fail_cb: + nlmsg_free(msg); + return ret; +} + +static int ovpn_listen_mcast(void) +{ + struct nl_sock *sock; + struct nl_cb *cb; + int mcid, ret; + + sock = nl_socket_alloc(); + if (!sock) { + fprintf(stderr, "cannot allocate netlink socket\n"); + goto err_free; + } + + nl_socket_set_buffer_size(sock, 8192, 8192); + + ret = genl_connect(sock); + if (ret < 0) { + fprintf(stderr, "cannot connect to generic netlink: %s\n", + nl_geterror(ret)); + goto err_free; + } + + mcid = ovpn_get_mcast_id(sock, OVPN_FAMILY_NAME, OVPN_MCGRP_PEERS); + if (mcid < 0) { + fprintf(stderr, "cannot get mcast group: %s\n", + nl_geterror(mcid)); + goto err_free; + } + + ret = nl_socket_add_membership(sock, mcid); + if (ret) { + fprintf(stderr, "failed to join mcast group: %d\n", ret); + goto err_free; + } + + ret = 1; + cb = nl_cb_alloc(NL_CB_DEFAULT); + nl_cb_set(cb, NL_CB_SEQ_CHECK, NL_CB_CUSTOM, nl_seq_check, NULL); + nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, ovpn_handle_msg, &ret); + nl_cb_err(cb, NL_CB_CUSTOM, ovpn_nl_cb_error, &ret); + + while (ret == 1) { + int err = nl_recvmsgs(sock, cb); + + if (err < 0) { + fprintf(stderr, + "cannot receive netlink message: (%d) %s\n", + err, nl_geterror(-err)); + ret = -1; + break; + } + } + + nl_cb_put(cb); +err_free: + nl_socket_free(sock); + return ret; +} + +static void usage(const char *cmd) +{ + fprintf(stderr, + "Usage %s [arguments..]\n", + cmd); + fprintf(stderr, "where can be one of the following\n\n"); + + fprintf(stderr, "* new_iface [mode]: create new ovpn interface\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, "\tmode:\n"); + fprintf(stderr, "\t\t- P2P for peer-to-peer mode (i.e. client)\n"); + fprintf(stderr, "\t\t- MP for multi-peer mode (i.e. server)\n"); + + fprintf(stderr, "* del_iface : delete ovpn interface\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + + fprintf(stderr, + "* listen [ipv6]: listen for incoming peer TCP connections\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, "\tlport: TCP port to listen to\n"); + fprintf(stderr, + "\tpeers_file: file containing one peer per line: Line format:\n"); + fprintf(stderr, "\t\t \n"); + fprintf(stderr, + "\tipv6: whether the socket should listen to the IPv6 wildcard address\n"); + + fprintf(stderr, + "* connect [key_file]: start connecting peer of TCP-based VPN session\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, "\tpeer_id: peer ID of the connecting peer\n"); + fprintf(stderr, "\traddr: peer IP address to connect to\n"); + fprintf(stderr, "\trport: peer TCP port to connect to\n"); + fprintf(stderr, + "\tkey_file: file containing the symmetric key for encryption\n"); + + fprintf(stderr, + "* new_peer [vpnaddr]: add new peer\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, "\tlport: local UDP port to bind to\n"); + fprintf(stderr, + "\tpeer_id: peer ID to be used in data packets to/from this peer\n"); + fprintf(stderr, "\traddr: peer IP address\n"); + fprintf(stderr, "\trport: peer UDP port\n"); + fprintf(stderr, "\tvpnaddr: peer VPN IP\n"); + + fprintf(stderr, + "* new_multi_peer : add multiple peers as listed in the file\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, "\tlport: local UDP port to bind to\n"); + fprintf(stderr, + "\tpeers_file: text file containing one peer per line. Line format:\n"); + fprintf(stderr, "\t\t \n"); + + fprintf(stderr, + "* set_peer : set peer attributes\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, "\tpeer_id: peer ID of the peer to modify\n"); + fprintf(stderr, + "\tkeepalive_interval: interval for sending ping messages\n"); + fprintf(stderr, + "\tkeepalive_timeout: time after which a peer is timed out\n"); + + fprintf(stderr, "* del_peer : delete peer\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, "\tpeer_id: peer ID of the peer to delete\n"); + + fprintf(stderr, "* get_peer [peer_id]: retrieve peer(s) status\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, + "\tpeer_id: peer ID of the peer to query. All peers are returned if omitted\n"); + + fprintf(stderr, + "* new_key : set data channel key\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, + "\tpeer_id: peer ID of the peer to configure the key for\n"); + fprintf(stderr, "\tslot: either 1 (primary) or 2 (secondary)\n"); + fprintf(stderr, "\tkey_id: an ID from 0 to 7\n"); + fprintf(stderr, + "\tcipher: cipher to use, supported: aes (AES-GCM), chachapoly (CHACHA20POLY1305)\n"); + fprintf(stderr, + "\tkey_dir: key direction, must 0 on one host and 1 on the other\n"); + fprintf(stderr, "\tkey_file: file containing the pre-shared key\n"); + + fprintf(stderr, + "* del_key [slot]: erase existing data channel key\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, "\tpeer_id: peer ID of the peer to modify\n"); + fprintf(stderr, "\tslot: slot to erase. PRIMARY if omitted\n"); + + fprintf(stderr, + "* get_key : retrieve non sensible key data\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, "\tpeer_id: peer ID of the peer to query\n"); + fprintf(stderr, "\tslot: either 1 (primary) or 2 (secondary)\n"); + + fprintf(stderr, + "* swap_keys : swap content of primary and secondary key slots\n"); + fprintf(stderr, "\tiface: ovpn interface name\n"); + fprintf(stderr, "\tpeer_id: peer ID of the peer to modify\n"); + + fprintf(stderr, + "* listen_mcast: listen to ovpn netlink multicast messages\n"); +} + +static int ovpn_parse_remote(struct ovpn_ctx *ovpn, const char *host, + const char *service, const char *vpnip) +{ + int ret; + struct addrinfo *result; + struct addrinfo hints = { + .ai_family = ovpn->sa_family, + .ai_socktype = SOCK_DGRAM, + .ai_protocol = IPPROTO_UDP + }; + + if (host) { + ret = getaddrinfo(host, service, &hints, &result); + if (ret == EAI_NONAME || ret == EAI_FAIL) + return -1; + + if (!(result->ai_family == AF_INET && + result->ai_addrlen == sizeof(struct sockaddr_in)) && + !(result->ai_family == AF_INET6 && + result->ai_addrlen == sizeof(struct sockaddr_in6))) { + ret = -EINVAL; + goto out; + } + + memcpy(&ovpn->remote, result->ai_addr, result->ai_addrlen); + } + + if (vpnip) { + ret = getaddrinfo(vpnip, NULL, &hints, &result); + if (ret == EAI_NONAME || ret == EAI_FAIL) + return -1; + + if (!(result->ai_family == AF_INET && + result->ai_addrlen == sizeof(struct sockaddr_in)) && + !(result->ai_family == AF_INET6 && + result->ai_addrlen == sizeof(struct sockaddr_in6))) { + ret = -EINVAL; + goto out; + } + + memcpy(&ovpn->peer_ip, result->ai_addr, result->ai_addrlen); + ovpn->sa_family = result->ai_family; + + ovpn->peer_ip_set = true; + } + + ret = 0; +out: + freeaddrinfo(result); + return ret; +} + +static int ovpn_parse_new_peer(struct ovpn_ctx *ovpn, const char *peer_id, + const char *raddr, const char *rport, + const char *vpnip) +{ + ovpn->peer_id = strtoul(peer_id, NULL, 10); + if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) { + fprintf(stderr, "peer ID value out of range\n"); + return -1; + } + + return ovpn_parse_remote(ovpn, raddr, rport, vpnip); +} + +static int ovpn_parse_key_slot(const char *arg, struct ovpn_ctx *ovpn) +{ + int slot = strtoul(arg, NULL, 10); + + if (errno == ERANGE || slot < 1 || slot > 2) { + fprintf(stderr, "key slot out of range\n"); + return -1; + } + + switch (slot) { + case 1: + ovpn->key_slot = OVPN_KEY_SLOT_PRIMARY; + break; + case 2: + ovpn->key_slot = OVPN_KEY_SLOT_SECONDARY; + break; + } + + return 0; +} + +static int ovpn_send_tcp_data(int socket) +{ + uint16_t len = htons(1000); + uint8_t buf[1002]; + int ret; + + memcpy(buf, &len, sizeof(len)); + memset(buf + sizeof(len), 0x86, sizeof(buf) - sizeof(len)); + + ret = send(socket, buf, sizeof(buf), MSG_NOSIGNAL); + + fprintf(stdout, "Sent %u bytes over TCP socket\n", ret); + + return ret > 0 ? 0 : ret; +} + +static int ovpn_recv_tcp_data(int socket) +{ + uint8_t buf[1002]; + uint16_t len; + int ret; + + ret = recv(socket, buf, sizeof(buf), MSG_NOSIGNAL); + + if (ret < 2) { + fprintf(stderr, ">>>> Error while reading TCP data: %d\n", ret); + return ret; + } + + memcpy(&len, buf, sizeof(len)); + len = ntohs(len); + + fprintf(stdout, ">>>> Received %u bytes over TCP socket, header: %u\n", + ret, len); + +/* int i; + * for (i = 2; i < ret; i++) { + * fprintf(stdout, "0x%.2x ", buf[i]); + * if (i && !((i - 2) % 16)) + * fprintf(stdout, "\n"); + * } + * fprintf(stdout, "\n"); + */ + return 0; +} + +static enum ovpn_cmd ovpn_parse_cmd(const char *cmd) +{ + if (!strcmp(cmd, "new_iface")) + return CMD_NEW_IFACE; + + if (!strcmp(cmd, "del_iface")) + return CMD_DEL_IFACE; + + if (!strcmp(cmd, "listen")) + return CMD_LISTEN; + + if (!strcmp(cmd, "connect")) + return CMD_CONNECT; + + if (!strcmp(cmd, "new_peer")) + return CMD_NEW_PEER; + + if (!strcmp(cmd, "new_multi_peer")) + return CMD_NEW_MULTI_PEER; + + if (!strcmp(cmd, "set_peer")) + return CMD_SET_PEER; + + if (!strcmp(cmd, "del_peer")) + return CMD_DEL_PEER; + + if (!strcmp(cmd, "get_peer")) + return CMD_GET_PEER; + + if (!strcmp(cmd, "new_key")) + return CMD_NEW_KEY; + + if (!strcmp(cmd, "del_key")) + return CMD_DEL_KEY; + + if (!strcmp(cmd, "get_key")) + return CMD_GET_KEY; + + if (!strcmp(cmd, "swap_keys")) + return CMD_SWAP_KEYS; + + if (!strcmp(cmd, "listen_mcast")) + return CMD_LISTEN_MCAST; + + return CMD_INVALID; +} + +/* Send process to background and waits for signal. + * + * This helper is called at the end of commands + * creating sockets, so that the latter stay alive + * along with the process that created them. + * + * A signal is expected to be delivered in order to + * terminate the waiting processes + */ +static void ovpn_waitbg(void) +{ + daemon(1, 1); + pause(); +} + +static int ovpn_run_cmd(struct ovpn_ctx *ovpn) +{ + char peer_id[10], vpnip[INET6_ADDRSTRLEN], raddr[128], rport[10]; + int n, ret; + FILE *fp; + + switch (ovpn->cmd) { + case CMD_NEW_IFACE: + ret = ovpn_new_iface(ovpn); + break; + case CMD_DEL_IFACE: + ret = ovpn_del_iface(ovpn); + break; + case CMD_LISTEN: + ret = ovpn_listen(ovpn, ovpn->sa_family); + if (ret < 0) { + fprintf(stderr, "cannot listen on TCP socket\n"); + return ret; + } + + fp = fopen(ovpn->peers_file, "r"); + if (!fp) { + fprintf(stderr, "cannot open file: %s\n", + ovpn->peers_file); + return -1; + } + + int num_peers = 0; + + while ((n = fscanf(fp, "%s %s\n", peer_id, vpnip)) == 2) { + struct ovpn_ctx peer_ctx = { 0 }; + + if (num_peers == MAX_PEERS) { + fprintf(stderr, "max peers reached!\n"); + return -E2BIG; + } + + peer_ctx.ifindex = ovpn->ifindex; + peer_ctx.sa_family = ovpn->sa_family; + + peer_ctx.socket = ovpn_accept(ovpn); + if (peer_ctx.socket < 0) { + fprintf(stderr, "cannot accept connection!\n"); + return -1; + } + + /* store peer sockets to test TCP I/O */ + ovpn->cli_sockets[num_peers] = peer_ctx.socket; + + ret = ovpn_parse_new_peer(&peer_ctx, peer_id, NULL, + NULL, vpnip); + if (ret < 0) { + fprintf(stderr, "error while parsing line\n"); + return -1; + } + + ret = ovpn_new_peer(&peer_ctx, true); + if (ret < 0) { + fprintf(stderr, + "cannot add peer to VPN: %s %s\n", + peer_id, vpnip); + return ret; + } + num_peers++; + } + + for (int i = 0; i < num_peers; i++) { + ret = ovpn_recv_tcp_data(ovpn->cli_sockets[i]); + if (ret < 0) + break; + } + ovpn_waitbg(); + break; + case CMD_CONNECT: + ret = ovpn_connect(ovpn); + if (ret < 0) { + fprintf(stderr, "cannot connect TCP socket\n"); + return ret; + } + + ret = ovpn_new_peer(ovpn, true); + if (ret < 0) { + fprintf(stderr, "cannot add peer to VPN\n"); + close(ovpn->socket); + return ret; + } + + if (ovpn->cipher != OVPN_CIPHER_ALG_NONE) { + ret = ovpn_new_key(ovpn); + if (ret < 0) { + fprintf(stderr, "cannot set key\n"); + return ret; + } + } + + ret = ovpn_send_tcp_data(ovpn->socket); + ovpn_waitbg(); + break; + case CMD_NEW_PEER: + ret = ovpn_udp_socket(ovpn, AF_INET6); //ovpn->sa_family ? + if (ret < 0) + return ret; + + ret = ovpn_new_peer(ovpn, false); + ovpn_waitbg(); + break; + case CMD_NEW_MULTI_PEER: + ret = ovpn_udp_socket(ovpn, AF_INET6); + if (ret < 0) + return ret; + + fp = fopen(ovpn->peers_file, "r"); + if (!fp) { + fprintf(stderr, "cannot open file: %s\n", + ovpn->peers_file); + return -1; + } + + while ((n = fscanf(fp, "%s %s %s %s\n", peer_id, raddr, rport, + vpnip)) == 4) { + struct ovpn_ctx peer_ctx = { 0 }; + + peer_ctx.ifindex = ovpn->ifindex; + peer_ctx.socket = ovpn->socket; + peer_ctx.sa_family = AF_UNSPEC; + + ret = ovpn_parse_new_peer(&peer_ctx, peer_id, raddr, + rport, vpnip); + if (ret < 0) { + fprintf(stderr, "error while parsing line\n"); + return -1; + } + + ret = ovpn_new_peer(&peer_ctx, false); + if (ret < 0) { + fprintf(stderr, + "cannot add peer to VPN: %s %s %s %s\n", + peer_id, raddr, rport, vpnip); + return ret; + } + } + ovpn_waitbg(); + break; + case CMD_SET_PEER: + ret = ovpn_set_peer(ovpn); + break; + case CMD_DEL_PEER: + ret = ovpn_del_peer(ovpn); + break; + case CMD_GET_PEER: + if (ovpn->peer_id == PEER_ID_UNDEF) + fprintf(stderr, "List of peers connected to: %s\n", + ovpn->ifname); + + ret = ovpn_get_peer(ovpn); + break; + case CMD_NEW_KEY: + ret = ovpn_new_key(ovpn); + break; + case CMD_DEL_KEY: + ret = ovpn_del_key(ovpn); + break; + case CMD_GET_KEY: + ret = ovpn_get_key(ovpn); + break; + case CMD_SWAP_KEYS: + ret = ovpn_swap_keys(ovpn); + break; + case CMD_LISTEN_MCAST: + ret = ovpn_listen_mcast(); + break; + case CMD_INVALID: + break; + } + + return ret; +} + +static int ovpn_parse_cmd_args(struct ovpn_ctx *ovpn, int argc, char *argv[]) +{ + int ret; + + /* no args required for LISTEN_MCAST */ + if (ovpn->cmd == CMD_LISTEN_MCAST) + return 0; + + /* all commands need an ifname */ + if (argc < 3) + return -EINVAL; + + strscpy(ovpn->ifname, argv[2], IFNAMSIZ - 1); + ovpn->ifname[IFNAMSIZ - 1] = '\0'; + + /* all commands, except NEW_IFNAME, needs an ifindex */ + if (ovpn->cmd != CMD_NEW_IFACE) { + ovpn->ifindex = if_nametoindex(ovpn->ifname); + if (!ovpn->ifindex) { + fprintf(stderr, "cannot find interface: %s\n", + strerror(errno)); + return -1; + } + } + + switch (ovpn->cmd) { + case CMD_NEW_IFACE: + if (argc < 4) + break; + + if (!strcmp(argv[3], "P2P")) { + ovpn->mode = OVPN_MODE_P2P; + } else if (!strcmp(argv[3], "MP")) { + ovpn->mode = OVPN_MODE_MP; + } else { + fprintf(stderr, "Cannot parse iface mode: %s\n", + argv[3]); + return -1; + } + ovpn->mode_set = true; + break; + case CMD_DEL_IFACE: + break; + case CMD_LISTEN: + if (argc < 5) + return -EINVAL; + + ovpn->lport = strtoul(argv[3], NULL, 10); + if (errno == ERANGE || ovpn->lport > 65535) { + fprintf(stderr, "lport value out of range\n"); + return -1; + } + + ovpn->peers_file = argv[4]; + + if (argc > 5 && !strcmp(argv[5], "ipv6")) + ovpn->sa_family = AF_INET6; + break; + case CMD_CONNECT: + if (argc < 6) + return -EINVAL; + + ovpn->sa_family = AF_INET; + + ret = ovpn_parse_new_peer(ovpn, argv[3], argv[4], argv[5], + NULL); + if (ret < 0) { + fprintf(stderr, "Cannot parse remote peer data\n"); + return -1; + } + + if (argc > 6) { + ovpn->key_slot = OVPN_KEY_SLOT_PRIMARY; + ovpn->key_id = 0; + ovpn->cipher = OVPN_CIPHER_ALG_AES_GCM; + ovpn->key_dir = KEY_DIR_OUT; + + ret = ovpn_parse_key(argv[6], ovpn); + if (ret) + return -1; + } + break; + case CMD_NEW_PEER: + if (argc < 7) + return -EINVAL; + + ovpn->lport = strtoul(argv[4], NULL, 10); + if (errno == ERANGE || ovpn->lport > 65535) { + fprintf(stderr, "lport value out of range\n"); + return -1; + } + + const char *vpnip = (argc > 7) ? argv[7] : NULL; + + ret = ovpn_parse_new_peer(ovpn, argv[3], argv[5], argv[6], + vpnip); + if (ret < 0) + return -1; + break; + case CMD_NEW_MULTI_PEER: + if (argc < 5) + return -EINVAL; + + ovpn->lport = strtoul(argv[3], NULL, 10); + if (errno == ERANGE || ovpn->lport > 65535) { + fprintf(stderr, "lport value out of range\n"); + return -1; + } + + ovpn->peers_file = argv[4]; + break; + case CMD_SET_PEER: + if (argc < 6) + return -EINVAL; + + ovpn->peer_id = strtoul(argv[3], NULL, 10); + if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) { + fprintf(stderr, "peer ID value out of range\n"); + return -1; + } + + ovpn->keepalive_interval = strtoul(argv[4], NULL, 10); + if (errno == ERANGE) { + fprintf(stderr, + "keepalive interval value out of range\n"); + return -1; + } + + ovpn->keepalive_timeout = strtoul(argv[5], NULL, 10); + if (errno == ERANGE) { + fprintf(stderr, + "keepalive interval value out of range\n"); + return -1; + } + break; + case CMD_DEL_PEER: + if (argc < 4) + return -EINVAL; + + ovpn->peer_id = strtoul(argv[3], NULL, 10); + if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) { + fprintf(stderr, "peer ID value out of range\n"); + return -1; + } + break; + case CMD_GET_PEER: + ovpn->peer_id = PEER_ID_UNDEF; + if (argc > 3) { + ovpn->peer_id = strtoul(argv[3], NULL, 10); + if (errno == ERANGE || ovpn->peer_id > PEER_ID_UNDEF) { + fprintf(stderr, "peer ID value out of range\n"); + return -1; + } + } + break; + case CMD_NEW_KEY: + if (argc < 9) + return -EINVAL; + + ovpn->peer_id = strtoul(argv[3], NULL, 10); + if (errno == ERANGE) { + fprintf(stderr, "peer ID value out of range\n"); + return -1; + } + + ret = ovpn_parse_key_slot(argv[4], ovpn); + if (ret) + return -1; + + ovpn->key_id = strtoul(argv[5], NULL, 10); + if (errno == ERANGE || ovpn->key_id > 2) { + fprintf(stderr, "key ID out of range\n"); + return -1; + } + + ret = ovpn_parse_cipher(argv[6], ovpn); + if (ret < 0) + return -1; + + ret = ovpn_parse_key_direction(argv[7], ovpn); + if (ret < 0) + return -1; + + ret = ovpn_parse_key(argv[8], ovpn); + if (ret) + return -1; + break; + case CMD_DEL_KEY: + if (argc < 4) + return -EINVAL; + + ovpn->peer_id = strtoul(argv[3], NULL, 10); + if (errno == ERANGE) { + fprintf(stderr, "peer ID value out of range\n"); + return -1; + } + + ret = ovpn_parse_key_slot(argv[4], ovpn); + if (ret) + return ret; + break; + case CMD_GET_KEY: + if (argc < 5) + return -EINVAL; + + ovpn->peer_id = strtoul(argv[3], NULL, 10); + if (errno == ERANGE) { + fprintf(stderr, "peer ID value out of range\n"); + return -1; + } + + ret = ovpn_parse_key_slot(argv[4], ovpn); + if (ret) + return ret; + break; + case CMD_SWAP_KEYS: + if (argc < 4) + return -EINVAL; + + ovpn->peer_id = strtoul(argv[3], NULL, 10); + if (errno == ERANGE) { + fprintf(stderr, "peer ID value out of range\n"); + return -1; + } + break; + case CMD_LISTEN_MCAST: + break; + case CMD_INVALID: + break; + } + + return 0; +} + +int main(int argc, char *argv[]) +{ + struct ovpn_ctx ovpn; + int ret; + + if (argc < 2) { + usage(argv[0]); + return -1; + } + + memset(&ovpn, 0, sizeof(ovpn)); + ovpn.sa_family = AF_INET; + ovpn.cipher = OVPN_CIPHER_ALG_NONE; + + ovpn.cmd = ovpn_parse_cmd(argv[1]); + if (ovpn.cmd == CMD_INVALID) { + fprintf(stderr, "Error: unknown command.\n\n"); + usage(argv[0]); + return -1; + } + + ret = ovpn_parse_cmd_args(&ovpn, argc, argv); + if (ret < 0) { + fprintf(stderr, "Error: invalid arguments.\n\n"); + if (ret == -EINVAL) + usage(argv[0]); + return ret; + } + + ret = ovpn_run_cmd(&ovpn); + if (ret) + fprintf(stderr, "Cannot execute command: %s (%d)\n", + strerror(-ret), ret); + + return ret; +} diff --git a/tools/testing/selftests/net/ovpn/tcp_peers.txt b/tools/testing/selftests/net/ovpn/tcp_peers.txt new file mode 100644 index 0000000000000..d753eebe8716e --- /dev/null +++ b/tools/testing/selftests/net/ovpn/tcp_peers.txt @@ -0,0 +1,5 @@ +1 5.5.5.2 +2 5.5.5.3 +3 5.5.5.4 +4 5.5.5.5 +5 5.5.5.6 diff --git a/tools/testing/selftests/net/ovpn/test-chachapoly.sh b/tools/testing/selftests/net/ovpn/test-chachapoly.sh new file mode 100755 index 0000000000000..32504079a2b89 --- /dev/null +++ b/tools/testing/selftests/net/ovpn/test-chachapoly.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2025 OpenVPN, Inc. +# +# Author: Antonio Quartulli + +ALG="chachapoly" + +source test.sh diff --git a/tools/testing/selftests/net/ovpn/test-close-socket-tcp.sh b/tools/testing/selftests/net/ovpn/test-close-socket-tcp.sh new file mode 100755 index 0000000000000..093d44772ffdf --- /dev/null +++ b/tools/testing/selftests/net/ovpn/test-close-socket-tcp.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2025 OpenVPN, Inc. +# +# Author: Antonio Quartulli + +PROTO="TCP" + +source test-close-socket.sh diff --git a/tools/testing/selftests/net/ovpn/test-close-socket.sh b/tools/testing/selftests/net/ovpn/test-close-socket.sh new file mode 100755 index 0000000000000..5e48a8b679287 --- /dev/null +++ b/tools/testing/selftests/net/ovpn/test-close-socket.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020-2025 OpenVPN, Inc. +# +# Author: Antonio Quartulli + +#set -x +set -e + +source ./common.sh + +cleanup + +modprobe -q ovpn || true + +for p in $(seq 0 ${NUM_PEERS}); do + create_ns ${p} +done + +for p in $(seq 0 ${NUM_PEERS}); do + setup_ns ${p} 5.5.5.$((${p} + 1))/24 +done + +for p in $(seq 0 ${NUM_PEERS}); do + add_peer ${p} +done + +for p in $(seq 1 ${NUM_PEERS}); do + ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 60 120 + ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 60 120 +done + +sleep 1 + +for p in $(seq 1 ${NUM_PEERS}); do + ip netns exec peer0 ping -qfc 500 -w 3 5.5.5.$((${p} + 1)) +done + +ip netns exec peer0 iperf3 -1 -s & +sleep 1 +ip netns exec peer1 iperf3 -Z -t 3 -c 5.5.5.1 + +cleanup + +modprobe -r ovpn || true diff --git a/tools/testing/selftests/net/ovpn/test-float.sh b/tools/testing/selftests/net/ovpn/test-float.sh new file mode 100755 index 0000000000000..ba5d725e18b07 --- /dev/null +++ b/tools/testing/selftests/net/ovpn/test-float.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2025 OpenVPN, Inc. +# +# Author: Antonio Quartulli + +FLOAT="1" + +source test.sh diff --git a/tools/testing/selftests/net/ovpn/test-tcp.sh b/tools/testing/selftests/net/ovpn/test-tcp.sh new file mode 100755 index 0000000000000..ba3f1f315a349 --- /dev/null +++ b/tools/testing/selftests/net/ovpn/test-tcp.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2025 OpenVPN, Inc. +# +# Author: Antonio Quartulli + +PROTO="TCP" + +source test.sh diff --git a/tools/testing/selftests/net/ovpn/test.sh b/tools/testing/selftests/net/ovpn/test.sh new file mode 100755 index 0000000000000..7b62897b02408 --- /dev/null +++ b/tools/testing/selftests/net/ovpn/test.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2020-2025 OpenVPN, Inc. +# +# Author: Antonio Quartulli + +#set -x +set -e + +source ./common.sh + +cleanup + +modprobe -q ovpn || true + +for p in $(seq 0 ${NUM_PEERS}); do + create_ns ${p} +done + +for p in $(seq 0 ${NUM_PEERS}); do + setup_ns ${p} 5.5.5.$((${p} + 1))/24 +done + +for p in $(seq 0 ${NUM_PEERS}); do + add_peer ${p} +done + +for p in $(seq 1 ${NUM_PEERS}); do + ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 60 120 + ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 60 120 +done + +sleep 1 + +for p in $(seq 1 ${NUM_PEERS}); do + ip netns exec peer0 ping -qfc 500 -w 3 5.5.5.$((${p} + 1)) +done + +if [ "$FLOAT" == "1" ]; then + # make clients float.. + for p in $(seq 1 ${NUM_PEERS}); do + ip -n peer${p} addr del 10.10.${p}.2/24 dev veth${p} + ip -n peer${p} addr add 10.10.${p}.3/24 dev veth${p} + done + for p in $(seq 1 ${NUM_PEERS}); do + ip netns exec peer${p} ping -qfc 500 -w 3 5.5.5.1 + done +fi + +ip netns exec peer0 iperf3 -1 -s & +sleep 1 +ip netns exec peer1 iperf3 -Z -t 3 -c 5.5.5.1 + +echo "Adding secondary key and then swap:" +for p in $(seq 1 ${NUM_PEERS}); do + ip netns exec peer0 ${OVPN_CLI} new_key tun0 ${p} 2 1 ${ALG} 0 data64.key + ip netns exec peer${p} ${OVPN_CLI} new_key tun${p} ${p} 2 1 ${ALG} 1 data64.key + ip netns exec peer${p} ${OVPN_CLI} swap_keys tun${p} ${p} +done + +sleep 1 + +echo "Querying all peers:" +ip netns exec peer0 ${OVPN_CLI} get_peer tun0 +ip netns exec peer1 ${OVPN_CLI} get_peer tun1 + +echo "Querying peer 1:" +ip netns exec peer0 ${OVPN_CLI} get_peer tun0 1 + +echo "Querying non-existent peer 10:" +ip netns exec peer0 ${OVPN_CLI} get_peer tun0 10 || true + +echo "Deleting peer 1:" +ip netns exec peer0 ${OVPN_CLI} del_peer tun0 1 +ip netns exec peer1 ${OVPN_CLI} del_peer tun1 1 + +echo "Querying keys:" +for p in $(seq 2 ${NUM_PEERS}); do + ip netns exec peer${p} ${OVPN_CLI} get_key tun${p} ${p} 1 + ip netns exec peer${p} ${OVPN_CLI} get_key tun${p} ${p} 2 +done + +echo "Deleting peer while sending traffic:" +(ip netns exec peer2 ping -qf -w 4 5.5.5.1)& +sleep 2 +ip netns exec peer0 ${OVPN_CLI} del_peer tun0 2 +# following command fails in TCP mode +# (both ends get conn reset when one peer disconnects) +ip netns exec peer2 ${OVPN_CLI} del_peer tun2 2 || true + +echo "Deleting keys:" +for p in $(seq 3 ${NUM_PEERS}); do + ip netns exec peer${p} ${OVPN_CLI} del_key tun${p} ${p} 1 + ip netns exec peer${p} ${OVPN_CLI} del_key tun${p} ${p} 2 +done + +echo "Setting timeout to 3s MP:" +for p in $(seq 3 ${NUM_PEERS}); do + ip netns exec peer0 ${OVPN_CLI} set_peer tun0 ${p} 3 3 || true + ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 0 0 +done +# wait for peers to timeout +sleep 5 + +echo "Setting timeout to 3s P2P:" +for p in $(seq 3 ${NUM_PEERS}); do + ip netns exec peer${p} ${OVPN_CLI} set_peer tun${p} ${p} 3 3 +done +sleep 5 + +cleanup + +modprobe -r ovpn || true diff --git a/tools/testing/selftests/net/ovpn/udp_peers.txt b/tools/testing/selftests/net/ovpn/udp_peers.txt new file mode 100644 index 0000000000000..32f14bd9347a6 --- /dev/null +++ b/tools/testing/selftests/net/ovpn/udp_peers.txt @@ -0,0 +1,5 @@ +1 10.10.1.2 1 5.5.5.2 +2 10.10.2.2 1 5.5.5.3 +3 10.10.3.2 1 5.5.5.4 +4 10.10.4.2 1 5.5.5.5 +5 10.10.5.2 1 5.5.5.6 diff --git a/tools/testing/selftests/net/proc_net_pktgen.c b/tools/testing/selftests/net/proc_net_pktgen.c new file mode 100644 index 0000000000000..69444fb295778 --- /dev/null +++ b/tools/testing/selftests/net/proc_net_pktgen.c @@ -0,0 +1,690 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * proc_net_pktgen: kselftest for /proc/net/pktgen interface + * + * Copyright (c) 2025 Peter Seiderer + * + */ +#include +#include +#include +#include + +#include "../kselftest_harness.h" + +static const char ctrl_cmd_stop[] = "stop"; +static const char ctrl_cmd_start[] = "start"; +static const char ctrl_cmd_reset[] = "reset"; + +static const char wrong_ctrl_cmd[] = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"; + +static const char thr_cmd_add_loopback_0[] = "add_device lo@0"; +static const char thr_cmd_rm_loopback_0[] = "rem_device_all"; + +static const char wrong_thr_cmd[] = "forsureawrongcommand"; +static const char legacy_thr_cmd[] = "max_before_softirq"; + +static const char wrong_dev_cmd[] = "forsurewrongcommand"; +static const char dev_cmd_min_pkt_size_0[] = "min_pkt_size"; +static const char dev_cmd_min_pkt_size_1[] = "min_pkt_size "; +static const char dev_cmd_min_pkt_size_2[] = "min_pkt_size 0"; +static const char dev_cmd_min_pkt_size_3[] = "min_pkt_size 1"; +static const char dev_cmd_min_pkt_size_4[] = "min_pkt_size 100"; +static const char dev_cmd_min_pkt_size_5[] = "min_pkt_size=1001"; +static const char dev_cmd_min_pkt_size_6[] = "min_pkt_size =2002"; +static const char dev_cmd_min_pkt_size_7[] = "min_pkt_size= 3003"; +static const char dev_cmd_min_pkt_size_8[] = "min_pkt_size = 4004"; +static const char dev_cmd_max_pkt_size_0[] = "max_pkt_size 200"; +static const char dev_cmd_pkt_size_0[] = "pkt_size 300"; +static const char dev_cmd_imix_weights_0[] = "imix_weights 0,7 576,4 1500,1"; +static const char dev_cmd_imix_weights_1[] = "imix_weights 101,1 102,2 103,3 104,4 105,5 106,6 107,7 108,8 109,9 110,10 111,11 112,12 113,13 114,14 115,15 116,16 117,17 118,18 119,19 120,20"; +static const char dev_cmd_imix_weights_2[] = "imix_weights 100,1 102,2 103,3 104,4 105,5 106,6 107,7 108,8 109,9 110,10 111,11 112,12 113,13 114,14 115,15 116,16 117,17 118,18 119,19 120,20 121,21"; +static const char dev_cmd_imix_weights_3[] = "imix_weights"; +static const char dev_cmd_imix_weights_4[] = "imix_weights "; +static const char dev_cmd_imix_weights_5[] = "imix_weights 0"; +static const char dev_cmd_imix_weights_6[] = "imix_weights 0,"; +static const char dev_cmd_debug_0[] = "debug 1"; +static const char dev_cmd_debug_1[] = "debug 0"; +static const char dev_cmd_frags_0[] = "frags 100"; +static const char dev_cmd_delay_0[] = "delay 100"; +static const char dev_cmd_delay_1[] = "delay 2147483647"; +static const char dev_cmd_rate_0[] = "rate 0"; +static const char dev_cmd_rate_1[] = "rate 100"; +static const char dev_cmd_ratep_0[] = "ratep 0"; +static const char dev_cmd_ratep_1[] = "ratep 200"; +static const char dev_cmd_udp_src_min_0[] = "udp_src_min 1"; +static const char dev_cmd_udp_dst_min_0[] = "udp_dst_min 2"; +static const char dev_cmd_udp_src_max_0[] = "udp_src_max 3"; +static const char dev_cmd_udp_dst_max_0[] = "udp_dst_max 4"; +static const char dev_cmd_clone_skb_0[] = "clone_skb 1"; +static const char dev_cmd_clone_skb_1[] = "clone_skb 0"; +static const char dev_cmd_count_0[] = "count 100"; +static const char dev_cmd_src_mac_count_0[] = "src_mac_count 100"; +static const char dev_cmd_dst_mac_count_0[] = "dst_mac_count 100"; +static const char dev_cmd_burst_0[] = "burst 0"; +static const char dev_cmd_node_0[] = "node 100"; +static const char dev_cmd_xmit_mode_0[] = "xmit_mode start_xmit"; +static const char dev_cmd_xmit_mode_1[] = "xmit_mode netif_receive"; +static const char dev_cmd_xmit_mode_2[] = "xmit_mode queue_xmit"; +static const char dev_cmd_xmit_mode_3[] = "xmit_mode nonsense"; +static const char dev_cmd_flag_0[] = "flag UDPCSUM"; +static const char dev_cmd_flag_1[] = "flag !UDPCSUM"; +static const char dev_cmd_flag_2[] = "flag nonsense"; +static const char dev_cmd_dst_min_0[] = "dst_min 101.102.103.104"; +static const char dev_cmd_dst_0[] = "dst 101.102.103.104"; +static const char dev_cmd_dst_max_0[] = "dst_max 201.202.203.204"; +static const char dev_cmd_dst6_0[] = "dst6 2001:db38:1234:0000:0000:0000:0000:0000"; +static const char dev_cmd_dst6_min_0[] = "dst6_min 2001:db8:1234:0000:0000:0000:0000:0000"; +static const char dev_cmd_dst6_max_0[] = "dst6_max 2001:db8:1234:0000:0000:0000:0000:0000"; +static const char dev_cmd_src6_0[] = "src6 2001:db38:1234:0000:0000:0000:0000:0000"; +static const char dev_cmd_src_min_0[] = "src_min 101.102.103.104"; +static const char dev_cmd_src_max_0[] = "src_max 201.202.203.204"; +static const char dev_cmd_dst_mac_0[] = "dst_mac 01:02:03:04:05:06"; +static const char dev_cmd_src_mac_0[] = "src_mac 11:12:13:14:15:16"; +static const char dev_cmd_clear_counters_0[] = "clear_counters"; +static const char dev_cmd_flows_0[] = "flows 100"; +static const char dev_cmd_spi_0[] = "spi 100"; +static const char dev_cmd_flowlen_0[] = "flowlen 100"; +static const char dev_cmd_queue_map_min_0[] = "queue_map_min 1"; +static const char dev_cmd_queue_map_max_0[] = "queue_map_max 2"; +static const char dev_cmd_mpls_0[] = "mpls 00000001"; +static const char dev_cmd_mpls_1[] = "mpls 00000001,000000f2"; +static const char dev_cmd_mpls_2[] = "mpls 00000f00,00000f01,00000f02,00000f03,00000f04,00000f05,00000f06,00000f07,00000f08,00000f09,00000f0a,00000f0b,00000f0c,00000f0d,00000f0e,00000f0f"; +static const char dev_cmd_mpls_3[] = "mpls 00000f00,00000f01,00000f02,00000f03,00000f04,00000f05,00000f06,00000f07,00000f08,00000f09,00000f0a,00000f0b,00000f0c,00000f0d,00000f0e,00000f0f,00000f10"; +static const char dev_cmd_vlan_id_0[] = "vlan_id 1"; +static const char dev_cmd_vlan_p_0[] = "vlan_p 1"; +static const char dev_cmd_vlan_cfi_0[] = "vlan_cfi 1"; +static const char dev_cmd_vlan_id_1[] = "vlan_id 4096"; +static const char dev_cmd_svlan_id_0[] = "svlan_id 1"; +static const char dev_cmd_svlan_p_0[] = "svlan_p 1"; +static const char dev_cmd_svlan_cfi_0[] = "svlan_cfi 1"; +static const char dev_cmd_svlan_id_1[] = "svlan_id 4096"; +static const char dev_cmd_tos_0[] = "tos 0"; +static const char dev_cmd_tos_1[] = "tos 0f"; +static const char dev_cmd_tos_2[] = "tos 0ff"; +static const char dev_cmd_traffic_class_0[] = "traffic_class f0"; +static const char dev_cmd_skb_priority_0[] = "skb_priority 999"; + +FIXTURE(proc_net_pktgen) { + int ctrl_fd; + int thr_fd; + int dev_fd; +}; + +FIXTURE_SETUP(proc_net_pktgen) { + int r; + ssize_t len; + + r = system("modprobe pktgen"); + ASSERT_EQ(r, 0) TH_LOG("CONFIG_NET_PKTGEN not enabled, module pktgen not loaded?"); + + self->ctrl_fd = open("/proc/net/pktgen/pgctrl", O_RDWR); + ASSERT_GE(self->ctrl_fd, 0) TH_LOG("CONFIG_NET_PKTGEN not enabled, module pktgen not loaded?"); + + self->thr_fd = open("/proc/net/pktgen/kpktgend_0", O_RDWR); + ASSERT_GE(self->thr_fd, 0) TH_LOG("CONFIG_NET_PKTGEN not enabled, module pktgen not loaded?"); + + len = write(self->thr_fd, thr_cmd_add_loopback_0, sizeof(thr_cmd_add_loopback_0)); + ASSERT_EQ(len, sizeof(thr_cmd_add_loopback_0)) TH_LOG("device lo@0 already registered?"); + + self->dev_fd = open("/proc/net/pktgen/lo@0", O_RDWR); + ASSERT_GE(self->dev_fd, 0) TH_LOG("device entry for lo@0 missing?"); +} + +FIXTURE_TEARDOWN(proc_net_pktgen) { + int ret; + ssize_t len; + + ret = close(self->dev_fd); + EXPECT_EQ(ret, 0); + + len = write(self->thr_fd, thr_cmd_rm_loopback_0, sizeof(thr_cmd_rm_loopback_0)); + EXPECT_EQ(len, sizeof(thr_cmd_rm_loopback_0)); + + ret = close(self->thr_fd); + EXPECT_EQ(ret, 0); + + ret = close(self->ctrl_fd); + EXPECT_EQ(ret, 0); +} + +TEST_F(proc_net_pktgen, wrong_ctrl_cmd) { + for (int i = 0; i <= sizeof(wrong_ctrl_cmd); i++) { + ssize_t len; + + len = write(self->ctrl_fd, wrong_ctrl_cmd, i); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + } +} + +TEST_F(proc_net_pktgen, ctrl_cmd) { + ssize_t len; + + len = write(self->ctrl_fd, ctrl_cmd_stop, sizeof(ctrl_cmd_stop)); + EXPECT_EQ(len, sizeof(ctrl_cmd_stop)); + + len = write(self->ctrl_fd, ctrl_cmd_stop, sizeof(ctrl_cmd_stop) - 1); + EXPECT_EQ(len, sizeof(ctrl_cmd_stop) - 1); + + len = write(self->ctrl_fd, ctrl_cmd_start, sizeof(ctrl_cmd_start)); + EXPECT_EQ(len, sizeof(ctrl_cmd_start)); + + len = write(self->ctrl_fd, ctrl_cmd_start, sizeof(ctrl_cmd_start) - 1); + EXPECT_EQ(len, sizeof(ctrl_cmd_start) - 1); + + len = write(self->ctrl_fd, ctrl_cmd_reset, sizeof(ctrl_cmd_reset)); + EXPECT_EQ(len, sizeof(ctrl_cmd_reset)); + + len = write(self->ctrl_fd, ctrl_cmd_reset, sizeof(ctrl_cmd_reset) - 1); + EXPECT_EQ(len, sizeof(ctrl_cmd_reset) - 1); +} + +TEST_F(proc_net_pktgen, wrong_thr_cmd) { + for (int i = 0; i <= sizeof(wrong_thr_cmd); i++) { + ssize_t len; + + len = write(self->thr_fd, wrong_thr_cmd, i); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + } +} + +TEST_F(proc_net_pktgen, legacy_thr_cmd) { + for (int i = 0; i <= sizeof(legacy_thr_cmd); i++) { + ssize_t len; + + len = write(self->thr_fd, legacy_thr_cmd, i); + if (i < (sizeof(legacy_thr_cmd) - 1)) { + /* incomplete command string */ + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + } else { + /* complete command string without/with trailing '\0' */ + EXPECT_EQ(len, i); + } + } +} + +TEST_F(proc_net_pktgen, wrong_dev_cmd) { + for (int i = 0; i <= sizeof(wrong_dev_cmd); i++) { + ssize_t len; + + len = write(self->dev_fd, wrong_dev_cmd, i); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + } +} + +TEST_F(proc_net_pktgen, dev_cmd_min_pkt_size) { + ssize_t len; + + /* with trailing '\0' */ + len = write(self->dev_fd, dev_cmd_min_pkt_size_0, sizeof(dev_cmd_min_pkt_size_0)); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_0)); + + /* without trailing '\0' */ + len = write(self->dev_fd, dev_cmd_min_pkt_size_0, sizeof(dev_cmd_min_pkt_size_0) - 1); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_0) - 1); + + /* with trailing '\0' */ + len = write(self->dev_fd, dev_cmd_min_pkt_size_1, sizeof(dev_cmd_min_pkt_size_1)); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_1)); + + /* without trailing '\0' */ + len = write(self->dev_fd, dev_cmd_min_pkt_size_1, sizeof(dev_cmd_min_pkt_size_1) - 1); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_1) - 1); + + /* with trailing '\0' */ + len = write(self->dev_fd, dev_cmd_min_pkt_size_2, sizeof(dev_cmd_min_pkt_size_2)); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_2)); + + /* without trailing '\0' */ + len = write(self->dev_fd, dev_cmd_min_pkt_size_2, sizeof(dev_cmd_min_pkt_size_2) - 1); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_2) - 1); + + len = write(self->dev_fd, dev_cmd_min_pkt_size_3, sizeof(dev_cmd_min_pkt_size_3)); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_3)); + + len = write(self->dev_fd, dev_cmd_min_pkt_size_4, sizeof(dev_cmd_min_pkt_size_4)); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_4)); + + len = write(self->dev_fd, dev_cmd_min_pkt_size_5, sizeof(dev_cmd_min_pkt_size_5)); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_5)); + + len = write(self->dev_fd, dev_cmd_min_pkt_size_6, sizeof(dev_cmd_min_pkt_size_6)); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_6)); + + len = write(self->dev_fd, dev_cmd_min_pkt_size_7, sizeof(dev_cmd_min_pkt_size_7)); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_7)); + + len = write(self->dev_fd, dev_cmd_min_pkt_size_8, sizeof(dev_cmd_min_pkt_size_8)); + EXPECT_EQ(len, sizeof(dev_cmd_min_pkt_size_8)); +} + +TEST_F(proc_net_pktgen, dev_cmd_max_pkt_size) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_max_pkt_size_0, sizeof(dev_cmd_max_pkt_size_0)); + EXPECT_EQ(len, sizeof(dev_cmd_max_pkt_size_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_pkt_size) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_pkt_size_0, sizeof(dev_cmd_pkt_size_0)); + EXPECT_EQ(len, sizeof(dev_cmd_pkt_size_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_imix_weights) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_imix_weights_0, sizeof(dev_cmd_imix_weights_0)); + EXPECT_EQ(len, sizeof(dev_cmd_imix_weights_0)); + + len = write(self->dev_fd, dev_cmd_imix_weights_1, sizeof(dev_cmd_imix_weights_1)); + EXPECT_EQ(len, sizeof(dev_cmd_imix_weights_1)); + + len = write(self->dev_fd, dev_cmd_imix_weights_2, sizeof(dev_cmd_imix_weights_2)); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, E2BIG); + + /* with trailing '\0' */ + len = write(self->dev_fd, dev_cmd_imix_weights_3, sizeof(dev_cmd_imix_weights_3)); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + + /* without trailing '\0' */ + len = write(self->dev_fd, dev_cmd_imix_weights_3, sizeof(dev_cmd_imix_weights_3) - 1); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + + /* with trailing '\0' */ + len = write(self->dev_fd, dev_cmd_imix_weights_4, sizeof(dev_cmd_imix_weights_4)); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + + /* without trailing '\0' */ + len = write(self->dev_fd, dev_cmd_imix_weights_4, sizeof(dev_cmd_imix_weights_4) - 1); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + + /* with trailing '\0' */ + len = write(self->dev_fd, dev_cmd_imix_weights_5, sizeof(dev_cmd_imix_weights_5)); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + + /* without trailing '\0' */ + len = write(self->dev_fd, dev_cmd_imix_weights_5, sizeof(dev_cmd_imix_weights_5) - 1); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + + /* with trailing '\0' */ + len = write(self->dev_fd, dev_cmd_imix_weights_6, sizeof(dev_cmd_imix_weights_6)); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + + /* without trailing '\0' */ + len = write(self->dev_fd, dev_cmd_imix_weights_6, sizeof(dev_cmd_imix_weights_6) - 1); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); +} + +TEST_F(proc_net_pktgen, dev_cmd_debug) { + ssize_t len; + + /* debug on */ + len = write(self->dev_fd, dev_cmd_debug_0, sizeof(dev_cmd_debug_0)); + EXPECT_EQ(len, sizeof(dev_cmd_debug_0)); + + /* debug off */ + len = write(self->dev_fd, dev_cmd_debug_1, sizeof(dev_cmd_debug_1)); + EXPECT_EQ(len, sizeof(dev_cmd_debug_1)); +} + +TEST_F(proc_net_pktgen, dev_cmd_frags) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_frags_0, sizeof(dev_cmd_frags_0)); + EXPECT_EQ(len, sizeof(dev_cmd_frags_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_delay) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_delay_0, sizeof(dev_cmd_delay_0)); + EXPECT_EQ(len, sizeof(dev_cmd_delay_0)); + + len = write(self->dev_fd, dev_cmd_delay_1, sizeof(dev_cmd_delay_1)); + EXPECT_EQ(len, sizeof(dev_cmd_delay_1)); +} + +TEST_F(proc_net_pktgen, dev_cmd_rate) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_rate_0, sizeof(dev_cmd_rate_0)); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + + len = write(self->dev_fd, dev_cmd_rate_1, sizeof(dev_cmd_rate_1)); + EXPECT_EQ(len, sizeof(dev_cmd_rate_1)); +} + +TEST_F(proc_net_pktgen, dev_cmd_ratep) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_ratep_0, sizeof(dev_cmd_ratep_0)); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EINVAL); + + len = write(self->dev_fd, dev_cmd_ratep_1, sizeof(dev_cmd_ratep_1)); + EXPECT_EQ(len, sizeof(dev_cmd_ratep_1)); +} + +TEST_F(proc_net_pktgen, dev_cmd_udp_src_min) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_udp_src_min_0, sizeof(dev_cmd_udp_src_min_0)); + EXPECT_EQ(len, sizeof(dev_cmd_udp_src_min_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_udp_dst_min) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_udp_dst_min_0, sizeof(dev_cmd_udp_dst_min_0)); + EXPECT_EQ(len, sizeof(dev_cmd_udp_dst_min_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_udp_src_max) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_udp_src_max_0, sizeof(dev_cmd_udp_src_max_0)); + EXPECT_EQ(len, sizeof(dev_cmd_udp_src_max_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_udp_dst_max) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_udp_dst_max_0, sizeof(dev_cmd_udp_dst_max_0)); + EXPECT_EQ(len, sizeof(dev_cmd_udp_dst_max_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_clone_skb) { + ssize_t len; + + /* clone_skb on (gives EOPNOTSUPP on lo device) */ + len = write(self->dev_fd, dev_cmd_clone_skb_0, sizeof(dev_cmd_clone_skb_0)); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, EOPNOTSUPP); + + /* clone_skb off */ + len = write(self->dev_fd, dev_cmd_clone_skb_1, sizeof(dev_cmd_clone_skb_1)); + EXPECT_EQ(len, sizeof(dev_cmd_clone_skb_1)); +} + +TEST_F(proc_net_pktgen, dev_cmd_count) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_count_0, sizeof(dev_cmd_count_0)); + EXPECT_EQ(len, sizeof(dev_cmd_count_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_src_mac_count) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_src_mac_count_0, sizeof(dev_cmd_src_mac_count_0)); + EXPECT_EQ(len, sizeof(dev_cmd_src_mac_count_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_dst_mac_count) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_dst_mac_count_0, sizeof(dev_cmd_dst_mac_count_0)); + EXPECT_EQ(len, sizeof(dev_cmd_dst_mac_count_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_burst) { + ssize_t len; + + /* burst off */ + len = write(self->dev_fd, dev_cmd_burst_0, sizeof(dev_cmd_burst_0)); + EXPECT_EQ(len, sizeof(dev_cmd_burst_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_node) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_node_0, sizeof(dev_cmd_node_0)); + EXPECT_EQ(len, sizeof(dev_cmd_node_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_xmit_mode) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_xmit_mode_0, sizeof(dev_cmd_xmit_mode_0)); + EXPECT_EQ(len, sizeof(dev_cmd_xmit_mode_0)); + + len = write(self->dev_fd, dev_cmd_xmit_mode_1, sizeof(dev_cmd_xmit_mode_1)); + EXPECT_EQ(len, sizeof(dev_cmd_xmit_mode_1)); + + len = write(self->dev_fd, dev_cmd_xmit_mode_2, sizeof(dev_cmd_xmit_mode_2)); + EXPECT_EQ(len, sizeof(dev_cmd_xmit_mode_2)); + + len = write(self->dev_fd, dev_cmd_xmit_mode_3, sizeof(dev_cmd_xmit_mode_3)); + EXPECT_EQ(len, sizeof(dev_cmd_xmit_mode_3)); +} + +TEST_F(proc_net_pktgen, dev_cmd_flag) { + ssize_t len; + + /* flag UDPCSUM on */ + len = write(self->dev_fd, dev_cmd_flag_0, sizeof(dev_cmd_flag_0)); + EXPECT_EQ(len, sizeof(dev_cmd_flag_0)); + + /* flag UDPCSUM off */ + len = write(self->dev_fd, dev_cmd_flag_1, sizeof(dev_cmd_flag_1)); + EXPECT_EQ(len, sizeof(dev_cmd_flag_1)); + + /* flag invalid */ + len = write(self->dev_fd, dev_cmd_flag_2, sizeof(dev_cmd_flag_2)); + EXPECT_EQ(len, sizeof(dev_cmd_flag_2)); +} + +TEST_F(proc_net_pktgen, dev_cmd_dst_min) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_dst_min_0, sizeof(dev_cmd_dst_min_0)); + EXPECT_EQ(len, sizeof(dev_cmd_dst_min_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_dst) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_dst_0, sizeof(dev_cmd_dst_0)); + EXPECT_EQ(len, sizeof(dev_cmd_dst_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_dst_max) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_dst_max_0, sizeof(dev_cmd_dst_max_0)); + EXPECT_EQ(len, sizeof(dev_cmd_dst_max_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_dst6) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_dst6_0, sizeof(dev_cmd_dst6_0)); + EXPECT_EQ(len, sizeof(dev_cmd_dst6_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_dst6_min) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_dst6_min_0, sizeof(dev_cmd_dst6_min_0)); + EXPECT_EQ(len, sizeof(dev_cmd_dst6_min_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_dst6_max) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_dst6_max_0, sizeof(dev_cmd_dst6_max_0)); + EXPECT_EQ(len, sizeof(dev_cmd_dst6_max_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_src6) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_src6_0, sizeof(dev_cmd_src6_0)); + EXPECT_EQ(len, sizeof(dev_cmd_src6_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_src_min) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_src_min_0, sizeof(dev_cmd_src_min_0)); + EXPECT_EQ(len, sizeof(dev_cmd_src_min_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_src_max) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_src_max_0, sizeof(dev_cmd_src_max_0)); + EXPECT_EQ(len, sizeof(dev_cmd_src_max_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_dst_mac) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_dst_mac_0, sizeof(dev_cmd_dst_mac_0)); + EXPECT_EQ(len, sizeof(dev_cmd_dst_mac_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_src_mac) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_src_mac_0, sizeof(dev_cmd_src_mac_0)); + EXPECT_EQ(len, sizeof(dev_cmd_src_mac_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_clear_counters) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_clear_counters_0, sizeof(dev_cmd_clear_counters_0)); + EXPECT_EQ(len, sizeof(dev_cmd_clear_counters_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_flows) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_flows_0, sizeof(dev_cmd_flows_0)); + EXPECT_EQ(len, sizeof(dev_cmd_flows_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_spi) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_spi_0, sizeof(dev_cmd_spi_0)); + EXPECT_EQ(len, sizeof(dev_cmd_spi_0)) TH_LOG("CONFIG_XFRM not enabled?"); +} + +TEST_F(proc_net_pktgen, dev_cmd_flowlen) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_flowlen_0, sizeof(dev_cmd_flowlen_0)); + EXPECT_EQ(len, sizeof(dev_cmd_flowlen_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_queue_map_min) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_queue_map_min_0, sizeof(dev_cmd_queue_map_min_0)); + EXPECT_EQ(len, sizeof(dev_cmd_queue_map_min_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_queue_map_max) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_queue_map_max_0, sizeof(dev_cmd_queue_map_max_0)); + EXPECT_EQ(len, sizeof(dev_cmd_queue_map_max_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_mpls) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_mpls_0, sizeof(dev_cmd_mpls_0)); + EXPECT_EQ(len, sizeof(dev_cmd_mpls_0)); + + len = write(self->dev_fd, dev_cmd_mpls_1, sizeof(dev_cmd_mpls_1)); + EXPECT_EQ(len, sizeof(dev_cmd_mpls_1)); + + len = write(self->dev_fd, dev_cmd_mpls_2, sizeof(dev_cmd_mpls_2)); + EXPECT_EQ(len, sizeof(dev_cmd_mpls_2)); + + len = write(self->dev_fd, dev_cmd_mpls_3, sizeof(dev_cmd_mpls_3)); + EXPECT_EQ(len, -1); + EXPECT_EQ(errno, E2BIG); +} + +TEST_F(proc_net_pktgen, dev_cmd_vlan_id) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_vlan_id_0, sizeof(dev_cmd_vlan_id_0)); + EXPECT_EQ(len, sizeof(dev_cmd_vlan_id_0)); + + len = write(self->dev_fd, dev_cmd_vlan_p_0, sizeof(dev_cmd_vlan_p_0)); + EXPECT_EQ(len, sizeof(dev_cmd_vlan_p_0)); + + len = write(self->dev_fd, dev_cmd_vlan_cfi_0, sizeof(dev_cmd_vlan_cfi_0)); + EXPECT_EQ(len, sizeof(dev_cmd_vlan_cfi_0)); + + len = write(self->dev_fd, dev_cmd_vlan_id_1, sizeof(dev_cmd_vlan_id_1)); + EXPECT_EQ(len, sizeof(dev_cmd_vlan_id_1)); +} + +TEST_F(proc_net_pktgen, dev_cmd_svlan_id) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_svlan_id_0, sizeof(dev_cmd_svlan_id_0)); + EXPECT_EQ(len, sizeof(dev_cmd_svlan_id_0)); + + len = write(self->dev_fd, dev_cmd_svlan_p_0, sizeof(dev_cmd_svlan_p_0)); + EXPECT_EQ(len, sizeof(dev_cmd_svlan_p_0)); + + len = write(self->dev_fd, dev_cmd_svlan_cfi_0, sizeof(dev_cmd_svlan_cfi_0)); + EXPECT_EQ(len, sizeof(dev_cmd_svlan_cfi_0)); + + len = write(self->dev_fd, dev_cmd_svlan_id_1, sizeof(dev_cmd_svlan_id_1)); + EXPECT_EQ(len, sizeof(dev_cmd_svlan_id_1)); +} + + +TEST_F(proc_net_pktgen, dev_cmd_tos) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_tos_0, sizeof(dev_cmd_tos_0)); + EXPECT_EQ(len, sizeof(dev_cmd_tos_0)); + + len = write(self->dev_fd, dev_cmd_tos_1, sizeof(dev_cmd_tos_1)); + EXPECT_EQ(len, sizeof(dev_cmd_tos_1)); + + len = write(self->dev_fd, dev_cmd_tos_2, sizeof(dev_cmd_tos_2)); + EXPECT_EQ(len, sizeof(dev_cmd_tos_2)); +} + + +TEST_F(proc_net_pktgen, dev_cmd_traffic_class) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_traffic_class_0, sizeof(dev_cmd_traffic_class_0)); + EXPECT_EQ(len, sizeof(dev_cmd_traffic_class_0)); +} + +TEST_F(proc_net_pktgen, dev_cmd_skb_priority) { + ssize_t len; + + len = write(self->dev_fd, dev_cmd_skb_priority_0, sizeof(dev_cmd_skb_priority_0)); + EXPECT_EQ(len, sizeof(dev_cmd_skb_priority_0)); +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c index 404a2ce759ab6..221270cee3eaa 100644 --- a/tools/testing/selftests/net/psock_tpacket.c +++ b/tools/testing/selftests/net/psock_tpacket.c @@ -12,7 +12,7 @@ * * Datapath: * Open a pair of packet sockets and send resp. receive an a priori known - * packet pattern accross the sockets and check if it was received resp. + * packet pattern across the sockets and check if it was received resp. * sent correctly. Fanout in combination with RX_RING is currently not * tested here. * diff --git a/tools/testing/selftests/net/reuseaddr_ports_exhausted.c b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c index 066efd30e2946..7b9bf8a7bbe1c 100644 --- a/tools/testing/selftests/net/reuseaddr_ports_exhausted.c +++ b/tools/testing/selftests/net/reuseaddr_ports_exhausted.c @@ -112,7 +112,7 @@ TEST(reuseaddr_ports_exhausted_reusable_same_euid) ASSERT_NE(-1, fd[0]) TH_LOG("failed to bind."); if (opts->reuseport[0] && opts->reuseport[1]) { - EXPECT_EQ(-1, fd[1]) TH_LOG("should fail to bind because both sockets succeed to be listened."); + EXPECT_EQ(-1, fd[1]) TH_LOG("should fail to bind because both sockets successfully listened."); } else { EXPECT_NE(-1, fd[1]) TH_LOG("should succeed to bind to connect to different destinations."); } diff --git a/tools/testing/selftests/net/rtnetlink.py b/tools/testing/selftests/net/rtnetlink.py new file mode 100755 index 0000000000000..80950888800be --- /dev/null +++ b/tools/testing/selftests/net/rtnetlink.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +from lib.py import ksft_exit, ksft_run, ksft_ge, RtnlAddrFamily +import socket + +IPV4_ALL_HOSTS_MULTICAST = b'\xe0\x00\x00\x01' + +def dump_mcaddr_check(rtnl: RtnlAddrFamily) -> None: + """ + Verify that at least one interface has the IPv4 all-hosts multicast address. + At least the loopback interface should have this address. + """ + + addresses = rtnl.getmaddrs({"ifa-family": socket.AF_INET}, dump=True) + + all_host_multicasts = [ + addr for addr in addresses if addr['ifa-multicast'] == IPV4_ALL_HOSTS_MULTICAST + ] + + ksft_ge(len(all_host_multicasts), 1, + "No interface found with the IPv4 all-hosts multicast address") + +def main() -> None: + rtnl = RtnlAddrFamily() + ksft_run([dump_mcaddr_check], args=(rtnl, )) + ksft_exit() + +if __name__ == "__main__": + main() diff --git a/tools/testing/selftests/net/settings b/tools/testing/selftests/net/settings index ed8418e8217a0..a38764182822e 100644 --- a/tools/testing/selftests/net/settings +++ b/tools/testing/selftests/net/settings @@ -1 +1,2 @@ timeout=3600 +profile=1 diff --git a/tools/testing/selftests/net/setup_veth.sh b/tools/testing/selftests/net/setup_veth.sh index 1f78a87f6f37e..152bf4c657478 100644 --- a/tools/testing/selftests/net/setup_veth.sh +++ b/tools/testing/selftests/net/setup_veth.sh @@ -11,7 +11,8 @@ setup_veth_ns() { local -r ns_mac="$4" [[ -e /var/run/netns/"${ns_name}" ]] || ip netns add "${ns_name}" - echo 1000000 > "/sys/class/net/${ns_dev}/gro_flush_timeout" + echo 200000 > "/sys/class/net/${ns_dev}/gro_flush_timeout" + echo 1 > "/sys/class/net/${ns_dev}/napi_defer_hard_irqs" ip link set dev "${ns_dev}" netns "${ns_name}" mtu 65535 ip -netns "${ns_name}" link set dev "${ns_dev}" up diff --git a/tools/testing/selftests/net/so_rcv_listener.c b/tools/testing/selftests/net/so_rcv_listener.c new file mode 100644 index 0000000000000..bc5841192aa6b --- /dev/null +++ b/tools/testing/selftests/net/so_rcv_listener.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef SO_RCVPRIORITY +#define SO_RCVPRIORITY 82 +#endif + +struct options { + __u32 val; + int name; + int rcvname; + const char *host; + const char *service; +} opt; + +static void __attribute__((noreturn)) usage(const char *bin) +{ + printf("Usage: %s [opts] \n", bin); + printf("Options:\n" + "\t\t-M val Test SO_RCVMARK\n" + "\t\t-P val Test SO_RCVPRIORITY\n" + ""); + exit(EXIT_FAILURE); +} + +static void parse_args(int argc, char *argv[]) +{ + int o; + + while ((o = getopt(argc, argv, "M:P:")) != -1) { + switch (o) { + case 'M': + opt.val = atoi(optarg); + opt.name = SO_MARK; + opt.rcvname = SO_RCVMARK; + break; + case 'P': + opt.val = atoi(optarg); + opt.name = SO_PRIORITY; + opt.rcvname = SO_RCVPRIORITY; + break; + default: + usage(argv[0]); + break; + } + } + + if (optind != argc - 2) + usage(argv[0]); + + opt.host = argv[optind]; + opt.service = argv[optind + 1]; +} + +int main(int argc, char *argv[]) +{ + int err = 0; + int recv_fd = -1; + int ret_value = 0; + __u32 recv_val; + struct cmsghdr *cmsg; + char cbuf[CMSG_SPACE(sizeof(__u32))]; + char recv_buf[CMSG_SPACE(sizeof(__u32))]; + struct iovec iov[1]; + struct msghdr msg; + struct sockaddr_in recv_addr4; + struct sockaddr_in6 recv_addr6; + + parse_args(argc, argv); + + int family = strchr(opt.host, ':') ? AF_INET6 : AF_INET; + + recv_fd = socket(family, SOCK_DGRAM, IPPROTO_UDP); + if (recv_fd < 0) { + perror("Can't open recv socket"); + ret_value = -errno; + goto cleanup; + } + + err = setsockopt(recv_fd, SOL_SOCKET, opt.rcvname, &opt.val, sizeof(opt.val)); + if (err < 0) { + perror("Recv setsockopt error"); + ret_value = -errno; + goto cleanup; + } + + if (family == AF_INET) { + memset(&recv_addr4, 0, sizeof(recv_addr4)); + recv_addr4.sin_family = family; + recv_addr4.sin_port = htons(atoi(opt.service)); + + if (inet_pton(family, opt.host, &recv_addr4.sin_addr) <= 0) { + perror("Invalid IPV4 address"); + ret_value = -errno; + goto cleanup; + } + + err = bind(recv_fd, (struct sockaddr *)&recv_addr4, sizeof(recv_addr4)); + } else { + memset(&recv_addr6, 0, sizeof(recv_addr6)); + recv_addr6.sin6_family = family; + recv_addr6.sin6_port = htons(atoi(opt.service)); + + if (inet_pton(family, opt.host, &recv_addr6.sin6_addr) <= 0) { + perror("Invalid IPV6 address"); + ret_value = -errno; + goto cleanup; + } + + err = bind(recv_fd, (struct sockaddr *)&recv_addr6, sizeof(recv_addr6)); + } + + if (err < 0) { + perror("Recv bind error"); + ret_value = -errno; + goto cleanup; + } + + iov[0].iov_base = recv_buf; + iov[0].iov_len = sizeof(recv_buf); + + memset(&msg, 0, sizeof(msg)); + msg.msg_iov = iov; + msg.msg_iovlen = 1; + msg.msg_control = cbuf; + msg.msg_controllen = sizeof(cbuf); + + err = recvmsg(recv_fd, &msg, 0); + if (err < 0) { + perror("Message receive error"); + ret_value = -errno; + goto cleanup; + } + + for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL; cmsg = CMSG_NXTHDR(&msg, cmsg)) { + if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == opt.name) { + recv_val = *(__u32 *)CMSG_DATA(cmsg); + printf("Received value: %u\n", recv_val); + + if (recv_val != opt.val) { + fprintf(stderr, "Error: expected value: %u, got: %u\n", + opt.val, recv_val); + ret_value = -EINVAL; + } + goto cleanup; + } + } + + fprintf(stderr, "Error: No matching cmsg received\n"); + ret_value = -ENOMSG; + +cleanup: + if (recv_fd >= 0) + close(recv_fd); + + return ret_value; +} diff --git a/tools/testing/selftests/net/tcp_ao/connect-deny.c b/tools/testing/selftests/net/tcp_ao/connect-deny.c index d418162d335f4..93b61e9a36f1f 100644 --- a/tools/testing/selftests/net/tcp_ao/connect-deny.c +++ b/tools/testing/selftests/net/tcp_ao/connect-deny.c @@ -4,6 +4,7 @@ #include "aolib.h" #define fault(type) (inj == FAULT_ ## type) +static volatile int sk_pair; static inline int test_add_key_maclen(int sk, const char *key, uint8_t maclen, union tcp_addr in_addr, uint8_t prefix, @@ -34,10 +35,10 @@ static void try_accept(const char *tst_name, unsigned int port, const char *pwd, const char *cnt_name, test_cnt cnt_expected, fault_t inj) { - struct tcp_ao_counters ao_cnt1, ao_cnt2; + struct tcp_counters cnt1, cnt2; uint64_t before_cnt = 0, after_cnt = 0; /* silence GCC */ + test_cnt poll_cnt = (cnt_expected == TEST_CNT_GOOD) ? 0 : cnt_expected; int lsk, err, sk = 0; - time_t timeout; lsk = test_listen_socket(this_ip_addr, port, 1); @@ -46,21 +47,24 @@ static void try_accept(const char *tst_name, unsigned int port, const char *pwd, if (cnt_name) before_cnt = netstat_get_one(cnt_name, NULL); - if (pwd && test_get_tcp_ao_counters(lsk, &ao_cnt1)) - test_error("test_get_tcp_ao_counters()"); + if (pwd && test_get_tcp_counters(lsk, &cnt1)) + test_error("test_get_tcp_counters()"); synchronize_threads(); /* preparations done */ - timeout = fault(TIMEOUT) ? TEST_RETRANSMIT_SEC : TEST_TIMEOUT_SEC; - err = test_wait_fd(lsk, timeout, 0); + err = test_skpair_wait_poll(lsk, 0, poll_cnt, &sk_pair); if (err == -ETIMEDOUT) { + sk_pair = err; if (!fault(TIMEOUT)) - test_fail("timed out for accept()"); + test_fail("%s: timed out for accept()", tst_name); + } else if (err == -EKEYREJECTED) { + if (!fault(KEYREJECT)) + test_fail("%s: key was rejected", tst_name); } else if (err < 0) { - test_error("test_wait_fd()"); + test_error("test_skpair_wait_poll()"); } else { if (fault(TIMEOUT)) - test_fail("ready to accept"); + test_fail("%s: ready to accept", tst_name); sk = accept(lsk, NULL, NULL); if (sk < 0) { @@ -72,13 +76,13 @@ static void try_accept(const char *tst_name, unsigned int port, const char *pwd, } synchronize_threads(); /* before counter checks */ - if (pwd && test_get_tcp_ao_counters(lsk, &ao_cnt2)) - test_error("test_get_tcp_ao_counters()"); + if (pwd && test_get_tcp_counters(lsk, &cnt2)) + test_error("test_get_tcp_counters()"); close(lsk); if (pwd) - test_tcp_ao_counters_cmp(tst_name, &ao_cnt1, &ao_cnt2, cnt_expected); + test_assert_counters(tst_name, &cnt1, &cnt2, cnt_expected); if (!cnt_name) goto out; @@ -109,7 +113,7 @@ static void *server_fn(void *arg) try_accept("Non-AO server + AO client", port++, NULL, this_ip_dest, -1, 100, 100, 0, - "TCPAOKeyNotFound", 0, FAULT_TIMEOUT); + "TCPAOKeyNotFound", TEST_CNT_NS_KEY_NOT_FOUND, FAULT_TIMEOUT); try_accept("AO server + Non-AO client", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 0, @@ -135,8 +139,9 @@ static void *server_fn(void *arg) wrong_addr, -1, 100, 100, 0, "TCPAOKeyNotFound", TEST_CNT_AO_KEY_NOT_FOUND, FAULT_TIMEOUT); + /* Key rejected by the other side, failing short through skpair */ try_accept("Client: Wrong addr", port++, NULL, - this_ip_dest, -1, 100, 100, 0, NULL, 0, FAULT_TIMEOUT); + this_ip_dest, -1, 100, 100, 0, NULL, 0, FAULT_KEYREJECT); try_accept("rcv id != snd id", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 200, 100, 0, @@ -163,8 +168,7 @@ static void try_connect(const char *tst_name, unsigned int port, uint8_t sndid, uint8_t rcvid, test_cnt cnt_expected, fault_t inj) { - struct tcp_ao_counters ao_cnt1, ao_cnt2; - time_t timeout; + struct tcp_counters cnt1, cnt2; int sk, ret; sk = socket(test_family, SOCK_STREAM, IPPROTO_TCP); @@ -174,16 +178,15 @@ static void try_connect(const char *tst_name, unsigned int port, if (pwd && test_add_key(sk, pwd, addr, prefix, sndid, rcvid)) test_error("setsockopt(TCP_AO_ADD_KEY)"); - if (pwd && test_get_tcp_ao_counters(sk, &ao_cnt1)) - test_error("test_get_tcp_ao_counters()"); + if (pwd && test_get_tcp_counters(sk, &cnt1)) + test_error("test_get_tcp_counters()"); synchronize_threads(); /* preparations done */ - timeout = fault(TIMEOUT) ? TEST_RETRANSMIT_SEC : TEST_TIMEOUT_SEC; - ret = _test_connect_socket(sk, this_ip_dest, port, timeout); - + ret = test_skpair_connect_poll(sk, this_ip_dest, port, cnt_expected, &sk_pair); synchronize_threads(); /* before counter checks */ if (ret < 0) { + sk_pair = ret; if (fault(KEYREJECT) && ret == -EKEYREJECTED) { test_ok("%s: connect() was prevented", tst_name); } else if (ret == -ETIMEDOUT && fault(TIMEOUT)) { @@ -202,9 +205,11 @@ static void try_connect(const char *tst_name, unsigned int port, else test_ok("%s: connected", tst_name); if (pwd && ret > 0) { - if (test_get_tcp_ao_counters(sk, &ao_cnt2)) - test_error("test_get_tcp_ao_counters()"); - test_tcp_ao_counters_cmp(tst_name, &ao_cnt1, &ao_cnt2, cnt_expected); + if (test_get_tcp_counters(sk, &cnt2)) + test_error("test_get_tcp_counters()"); + test_assert_counters(tst_name, &cnt1, &cnt2, cnt_expected); + } else if (pwd) { + test_tcp_counters_free(&cnt1); } out: synchronize_threads(); /* close() */ @@ -241,6 +246,11 @@ static void *client_fn(void *arg) try_connect("Wrong rcv id", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 0, FAULT_TIMEOUT); + /* + * XXX: The test doesn't increase any counters, see tcp_make_synack(). + * Potentially, it can be speed up by setting sk_pair = -ETIMEDOUT + * but the price would be increased complexity of the tracer thread. + */ trace_ao_event_sk_expect(TCP_AO_SYNACK_NO_KEY, this_ip_dest, addr_any, port, 0, 100, 100); try_connect("Wrong snd id", port++, DEFAULT_TEST_PASSWORD, diff --git a/tools/testing/selftests/net/tcp_ao/connect.c b/tools/testing/selftests/net/tcp_ao/connect.c index f1d8d29e393f6..340f00e979eaf 100644 --- a/tools/testing/selftests/net/tcp_ao/connect.c +++ b/tools/testing/selftests/net/tcp_ao/connect.c @@ -35,7 +35,7 @@ static void *client_fn(void *arg) uint64_t before_aogood, after_aogood; const size_t nr_packets = 20; struct netstat *ns_before, *ns_after; - struct tcp_ao_counters ao1, ao2; + struct tcp_counters ao1, ao2; if (sk < 0) test_error("socket()"); @@ -50,18 +50,18 @@ static void *client_fn(void *arg) ns_before = netstat_read(); before_aogood = netstat_get(ns_before, "TCPAOGood", NULL); - if (test_get_tcp_ao_counters(sk, &ao1)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &ao1)) + test_error("test_get_tcp_counters()"); - if (test_client_verify(sk, 100, nr_packets, TEST_TIMEOUT_SEC)) { + if (test_client_verify(sk, 100, nr_packets)) { test_fail("verify failed"); return NULL; } ns_after = netstat_read(); after_aogood = netstat_get(ns_after, "TCPAOGood", NULL); - if (test_get_tcp_ao_counters(sk, &ao2)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &ao2)) + test_error("test_get_tcp_counters()"); netstat_print_diff(ns_before, ns_after); netstat_free(ns_before); netstat_free(ns_after); @@ -71,14 +71,14 @@ static void *client_fn(void *arg) nr_packets, after_aogood, before_aogood); return NULL; } - if (test_tcp_ao_counters_cmp("connect", &ao1, &ao2, TEST_CNT_GOOD)) + if (test_assert_counters("connect", &ao1, &ao2, TEST_CNT_GOOD)) return NULL; test_ok("connect TCPAOGood %" PRIu64 "/%" PRIu64 "/%" PRIu64 " => %" PRIu64 "/%" PRIu64 "/%" PRIu64 ", sent %zu", - before_aogood, ao1.ao_info_pkt_good, - ao1.key_cnts[0].pkt_good, - after_aogood, ao2.ao_info_pkt_good, - ao2.key_cnts[0].pkt_good, + before_aogood, ao1.ao.ao_info_pkt_good, + ao1.ao.key_cnts[0].pkt_good, + after_aogood, ao2.ao.ao_info_pkt_good, + ao2.ao.key_cnts[0].pkt_good, nr_packets); return NULL; } diff --git a/tools/testing/selftests/net/tcp_ao/icmps-discard.c b/tools/testing/selftests/net/tcp_ao/icmps-discard.c index a1614f0d8c448..85c1a1e958c68 100644 --- a/tools/testing/selftests/net/tcp_ao/icmps-discard.c +++ b/tools/testing/selftests/net/tcp_ao/icmps-discard.c @@ -53,7 +53,7 @@ static void serve_interfered(int sk) ssize_t test_quota = packet_size * packets_nr * 10; uint64_t dest_unreach_a, dest_unreach_b; uint64_t icmp_ignored_a, icmp_ignored_b; - struct tcp_ao_counters ao_cnt1, ao_cnt2; + struct tcp_counters cnt1, cnt2; bool counter_not_found; struct netstat *ns_after, *ns_before; ssize_t bytes; @@ -61,16 +61,16 @@ static void serve_interfered(int sk) ns_before = netstat_read(); dest_unreach_a = netstat_get(ns_before, dst_unreach, NULL); icmp_ignored_a = netstat_get(ns_before, tcpao_icmps, NULL); - if (test_get_tcp_ao_counters(sk, &ao_cnt1)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt1)) + test_error("test_get_tcp_counters()"); bytes = test_server_run(sk, test_quota, 0); ns_after = netstat_read(); netstat_print_diff(ns_before, ns_after); dest_unreach_b = netstat_get(ns_after, dst_unreach, NULL); icmp_ignored_b = netstat_get(ns_after, tcpao_icmps, &counter_not_found); - if (test_get_tcp_ao_counters(sk, &ao_cnt2)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt2)) + test_error("test_get_tcp_counters()"); netstat_free(ns_before); netstat_free(ns_after); @@ -91,9 +91,9 @@ static void serve_interfered(int sk) return; } #ifdef TEST_ICMPS_ACCEPT - test_tcp_ao_counters_cmp(NULL, &ao_cnt1, &ao_cnt2, TEST_CNT_GOOD); + test_assert_counters(NULL, &cnt1, &cnt2, TEST_CNT_GOOD); #else - test_tcp_ao_counters_cmp(NULL, &ao_cnt1, &ao_cnt2, TEST_CNT_GOOD | TEST_CNT_AO_DROPPED_ICMP); + test_assert_counters(NULL, &cnt1, &cnt2, TEST_CNT_GOOD | TEST_CNT_AO_DROPPED_ICMP); #endif if (icmp_ignored_a >= icmp_ignored_b) { test_icmps_fail("%s counter didn't change: %" PRIu64 " >= %" PRIu64, @@ -395,7 +395,6 @@ static void icmp_interfere(const size_t nr, uint32_t rcv_nxt, void *src, void *d static void send_interfered(int sk) { - const unsigned int timeout = TEST_TIMEOUT_SEC; struct sockaddr_in6 src, dst; socklen_t addr_sz; @@ -409,7 +408,7 @@ static void send_interfered(int sk) while (1) { uint32_t rcv_nxt; - if (test_client_verify(sk, packet_size, packets_nr, timeout)) { + if (test_client_verify(sk, packet_size, packets_nr)) { test_fail("client: connection is broken"); return; } diff --git a/tools/testing/selftests/net/tcp_ao/key-management.c b/tools/testing/selftests/net/tcp_ao/key-management.c index d4385b52c10b6..69d9a7a05d5c1 100644 --- a/tools/testing/selftests/net/tcp_ao/key-management.c +++ b/tools/testing/selftests/net/tcp_ao/key-management.c @@ -629,11 +629,11 @@ static int key_collection_socket(bool server, unsigned int port) } static void verify_counters(const char *tst_name, bool is_listen_sk, bool server, - struct tcp_ao_counters *a, struct tcp_ao_counters *b) + struct tcp_counters *a, struct tcp_counters *b) { unsigned int i; - __test_tcp_ao_counters_cmp(tst_name, a, b, TEST_CNT_GOOD); + test_assert_counters_sk(tst_name, a, b, TEST_CNT_GOOD); for (i = 0; i < collection.nr_keys; i++) { struct test_key *key = &collection.keys[i]; @@ -652,12 +652,12 @@ static void verify_counters(const char *tst_name, bool is_listen_sk, bool server rx_cnt_expected = key->used_on_server_tx; } - test_tcp_ao_key_counters_cmp(tst_name, a, b, - rx_cnt_expected ? TEST_CNT_KEY_GOOD : 0, - sndid, rcvid); + test_assert_counters_key(tst_name, &a->ao, &b->ao, + rx_cnt_expected ? TEST_CNT_KEY_GOOD : 0, + sndid, rcvid); } - test_tcp_ao_counters_free(a); - test_tcp_ao_counters_free(b); + test_tcp_counters_free(a); + test_tcp_counters_free(b); test_ok("%s: passed counters checks", tst_name); } @@ -791,17 +791,17 @@ static void verify_keys(const char *tst_name, int sk, } static int start_server(const char *tst_name, unsigned int port, size_t quota, - struct tcp_ao_counters *begin, + struct tcp_counters *begin, unsigned int current_index, unsigned int rnext_index) { - struct tcp_ao_counters lsk_c1, lsk_c2; + struct tcp_counters lsk_c1, lsk_c2; ssize_t bytes; int sk, lsk; synchronize_threads(); /* 1: key collection initialized */ lsk = key_collection_socket(true, port); - if (test_get_tcp_ao_counters(lsk, &lsk_c1)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(lsk, &lsk_c1)) + test_error("test_get_tcp_counters()"); synchronize_threads(); /* 2: MKTs added => connect() */ if (test_wait_fd(lsk, TEST_TIMEOUT_SEC, 0)) test_error("test_wait_fd()"); @@ -809,12 +809,12 @@ static int start_server(const char *tst_name, unsigned int port, size_t quota, sk = accept(lsk, NULL, NULL); if (sk < 0) test_error("accept()"); - if (test_get_tcp_ao_counters(sk, begin)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, begin)) + test_error("test_get_tcp_counters()"); synchronize_threads(); /* 3: accepted => send data */ - if (test_get_tcp_ao_counters(lsk, &lsk_c2)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(lsk, &lsk_c2)) + test_error("test_get_tcp_counters()"); verify_keys(tst_name, lsk, true, true); close(lsk); @@ -830,12 +830,12 @@ static int start_server(const char *tst_name, unsigned int port, size_t quota, } static void end_server(const char *tst_name, int sk, - struct tcp_ao_counters *begin) + struct tcp_counters *begin) { - struct tcp_ao_counters end; + struct tcp_counters end; - if (test_get_tcp_ao_counters(sk, &end)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &end)) + test_error("test_get_tcp_counters()"); verify_keys(tst_name, sk, false, true); synchronize_threads(); /* 4: verified => closed */ @@ -848,7 +848,7 @@ static void end_server(const char *tst_name, int sk, static void try_server_run(const char *tst_name, unsigned int port, size_t quota, unsigned int current_index, unsigned int rnext_index) { - struct tcp_ao_counters tmp; + struct tcp_counters tmp; int sk; sk = start_server(tst_name, port, quota, &tmp, @@ -860,7 +860,7 @@ static void server_rotations(const char *tst_name, unsigned int port, size_t quota, unsigned int rotations, unsigned int current_index, unsigned int rnext_index) { - struct tcp_ao_counters tmp; + struct tcp_counters tmp; unsigned int i; int sk; @@ -886,7 +886,7 @@ static void server_rotations(const char *tst_name, unsigned int port, static int run_client(const char *tst_name, unsigned int port, unsigned int nr_keys, int current_index, int rnext_index, - struct tcp_ao_counters *before, + struct tcp_counters *before, const size_t msg_sz, const size_t msg_nr) { int sk; @@ -904,8 +904,8 @@ static int run_client(const char *tst_name, unsigned int port, if (test_set_key(sk, sndid, rcvid)) test_error("failed to set current/rnext keys"); } - if (before && test_get_tcp_ao_counters(sk, before)) - test_error("test_get_tcp_ao_counters()"); + if (before && test_get_tcp_counters(sk, before)) + test_error("test_get_tcp_counters()"); synchronize_threads(); /* 2: MKTs added => connect() */ if (test_connect_socket(sk, this_ip_dest, port++) <= 0) @@ -918,11 +918,11 @@ static int run_client(const char *tst_name, unsigned int port, collection.keys[rnext_index].used_on_server_tx = 1; synchronize_threads(); /* 3: accepted => send data */ - if (test_client_verify(sk, msg_sz, msg_nr, TEST_TIMEOUT_SEC)) { + if (test_client_verify(sk, msg_sz, msg_nr)) { test_fail("verify failed"); close(sk); if (before) - test_tcp_ao_counters_free(before); + test_tcp_counters_free(before); return -1; } @@ -931,7 +931,7 @@ static int run_client(const char *tst_name, unsigned int port, static int start_client(const char *tst_name, unsigned int port, unsigned int nr_keys, int current_index, int rnext_index, - struct tcp_ao_counters *before, + struct tcp_counters *before, const size_t msg_sz, const size_t msg_nr) { if (init_default_key_collection(nr_keys, true)) @@ -943,9 +943,9 @@ static int start_client(const char *tst_name, unsigned int port, static void end_client(const char *tst_name, int sk, unsigned int nr_keys, int current_index, int rnext_index, - struct tcp_ao_counters *start) + struct tcp_counters *start) { - struct tcp_ao_counters end; + struct tcp_counters end; /* Some application may become dependent on this kernel choice */ if (current_index < 0) @@ -955,8 +955,8 @@ static void end_client(const char *tst_name, int sk, unsigned int nr_keys, verify_current_rnext(tst_name, sk, collection.keys[current_index].client_keyid, collection.keys[rnext_index].server_keyid); - if (start && test_get_tcp_ao_counters(sk, &end)) - test_error("test_get_tcp_ao_counters()"); + if (start && test_get_tcp_counters(sk, &end)) + test_error("test_get_tcp_counters()"); verify_keys(tst_name, sk, false, false); synchronize_threads(); /* 4: verify => closed */ close(sk); @@ -1016,7 +1016,7 @@ static void try_unmatched_keys(int sk, int *rnext_index, unsigned int port) trace_ao_event_expect(TCP_AO_RNEXT_REQUEST, this_ip_addr, this_ip_dest, -1, port, 0, -1, -1, -1, -1, -1, -1, key->server_keyid, -1); - if (test_client_verify(sk, msg_len, nr_packets, TEST_TIMEOUT_SEC)) + if (test_client_verify(sk, msg_len, nr_packets)) test_fail("verify failed"); *rnext_index = i; } @@ -1048,7 +1048,7 @@ static void check_current_back(const char *tst_name, unsigned int port, unsigned int current_index, unsigned int rnext_index, unsigned int rotate_to_index) { - struct tcp_ao_counters tmp; + struct tcp_counters tmp; int sk; sk = start_client(tst_name, port, nr_keys, current_index, rnext_index, @@ -1061,7 +1061,7 @@ static void check_current_back(const char *tst_name, unsigned int port, port, -1, 0, -1, -1, -1, -1, -1, collection.keys[rotate_to_index].client_keyid, collection.keys[current_index].client_keyid, -1); - if (test_client_verify(sk, msg_len, nr_packets, TEST_TIMEOUT_SEC)) + if (test_client_verify(sk, msg_len, nr_packets)) test_fail("verify failed"); /* There is a race here: between setting the current_key with * setsockopt(TCP_AO_INFO) and starting to send some data - there @@ -1081,7 +1081,7 @@ static void roll_over_keys(const char *tst_name, unsigned int port, unsigned int nr_keys, unsigned int rotations, unsigned int current_index, unsigned int rnext_index) { - struct tcp_ao_counters tmp; + struct tcp_counters tmp; unsigned int i; int sk; @@ -1099,10 +1099,10 @@ static void roll_over_keys(const char *tst_name, unsigned int port, collection.keys[i].server_keyid, -1); if (test_set_key(sk, -1, collection.keys[i].server_keyid)) test_error("Can't change the Rnext key"); - if (test_client_verify(sk, msg_len, nr_packets, TEST_TIMEOUT_SEC)) { + if (test_client_verify(sk, msg_len, nr_packets)) { test_fail("verify failed"); close(sk); - test_tcp_ao_counters_free(&tmp); + test_tcp_counters_free(&tmp); return; } verify_current_rnext(tst_name, sk, -1, @@ -1116,7 +1116,7 @@ static void roll_over_keys(const char *tst_name, unsigned int port, static void try_client_run(const char *tst_name, unsigned int port, unsigned int nr_keys, int current_index, int rnext_index) { - struct tcp_ao_counters tmp; + struct tcp_counters tmp; int sk; sk = start_client(tst_name, port, nr_keys, current_index, rnext_index, diff --git a/tools/testing/selftests/net/tcp_ao/lib/aolib.h b/tools/testing/selftests/net/tcp_ao/lib/aolib.h index 5db2f65cddc4e..ebb2899c12fe0 100644 --- a/tools/testing/selftests/net/tcp_ao/lib/aolib.h +++ b/tools/testing/selftests/net/tcp_ao/lib/aolib.h @@ -289,7 +289,7 @@ extern int link_set_up(const char *intf); extern const unsigned int test_server_port; extern int test_wait_fd(int sk, time_t sec, bool write); extern int __test_connect_socket(int sk, const char *device, - void *addr, size_t addr_sz, time_t timeout); + void *addr, size_t addr_sz, bool async); extern int __test_listen_socket(int backlog, void *addr, size_t addr_sz); static inline int test_listen_socket(const union tcp_addr taddr, @@ -331,25 +331,26 @@ static inline int test_listen_socket(const union tcp_addr taddr, * If set to 0 - kernel will try to retransmit SYN number of times, set in * /proc/sys/net/ipv4/tcp_syn_retries * By default set to 1 to make tests pass faster on non-busy machine. + * [in process of removal, don't use in new tests] */ #ifndef TEST_RETRANSMIT_SEC #define TEST_RETRANSMIT_SEC 1 #endif static inline int _test_connect_socket(int sk, const union tcp_addr taddr, - unsigned int port, time_t timeout) + unsigned int port, bool async) { sockaddr_af addr; tcp_addr_to_sockaddr_in(&addr, &taddr, htons(port)); return __test_connect_socket(sk, veth_name, - (void *)&addr, sizeof(addr), timeout); + (void *)&addr, sizeof(addr), async); } static inline int test_connect_socket(int sk, const union tcp_addr taddr, unsigned int port) { - return _test_connect_socket(sk, taddr, port, TEST_TIMEOUT_SEC); + return _test_connect_socket(sk, taddr, port, false); } extern int __test_set_md5(int sk, void *addr, size_t addr_sz, @@ -483,10 +484,7 @@ static inline int test_set_ao_flags(int sk, bool ao_required, bool accept_icmps) } extern ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec); -extern ssize_t test_client_loop(int sk, char *buf, size_t buf_sz, - const size_t msg_len, time_t timeout_sec); -extern int test_client_verify(int sk, const size_t msg_len, const size_t nr, - time_t timeout_sec); +extern int test_client_verify(int sk, const size_t msg_len, const size_t nr); struct tcp_ao_key_counters { uint8_t sndid; @@ -512,7 +510,15 @@ struct tcp_ao_counters { size_t nr_keys; struct tcp_ao_key_counters *key_cnts; }; -extern int test_get_tcp_ao_counters(int sk, struct tcp_ao_counters *out); + +struct tcp_counters { + struct tcp_ao_counters ao; + uint64_t netns_md5_notfound; + uint64_t netns_md5_unexpected; + uint64_t netns_md5_failure; +}; + +extern int test_get_tcp_counters(int sk, struct tcp_counters *out); #define TEST_CNT_KEY_GOOD BIT(0) #define TEST_CNT_KEY_BAD BIT(1) @@ -526,8 +532,31 @@ extern int test_get_tcp_ao_counters(int sk, struct tcp_ao_counters *out); #define TEST_CNT_NS_KEY_NOT_FOUND BIT(9) #define TEST_CNT_NS_AO_REQUIRED BIT(10) #define TEST_CNT_NS_DROPPED_ICMP BIT(11) +#define TEST_CNT_NS_MD5_NOT_FOUND BIT(12) +#define TEST_CNT_NS_MD5_UNEXPECTED BIT(13) +#define TEST_CNT_NS_MD5_FAILURE BIT(14) typedef uint16_t test_cnt; +#define _for_each_counter(f) \ +do { \ + /* per-netns */ \ + f(ao.netns_ao_good, TEST_CNT_NS_GOOD); \ + f(ao.netns_ao_bad, TEST_CNT_NS_BAD); \ + f(ao.netns_ao_key_not_found, TEST_CNT_NS_KEY_NOT_FOUND); \ + f(ao.netns_ao_required, TEST_CNT_NS_AO_REQUIRED); \ + f(ao.netns_ao_dropped_icmp, TEST_CNT_NS_DROPPED_ICMP); \ + /* per-socket */ \ + f(ao.ao_info_pkt_good, TEST_CNT_SOCK_GOOD); \ + f(ao.ao_info_pkt_bad, TEST_CNT_SOCK_BAD); \ + f(ao.ao_info_pkt_key_not_found, TEST_CNT_SOCK_KEY_NOT_FOUND); \ + f(ao.ao_info_pkt_ao_required, TEST_CNT_SOCK_AO_REQUIRED); \ + f(ao.ao_info_pkt_dropped_icmp, TEST_CNT_SOCK_DROPPED_ICMP); \ + /* non-AO */ \ + f(netns_md5_notfound, TEST_CNT_NS_MD5_NOT_FOUND); \ + f(netns_md5_unexpected, TEST_CNT_NS_MD5_UNEXPECTED); \ + f(netns_md5_failure, TEST_CNT_NS_MD5_FAILURE); \ +} while (0) + #define TEST_CNT_AO_GOOD (TEST_CNT_SOCK_GOOD | TEST_CNT_NS_GOOD) #define TEST_CNT_AO_BAD (TEST_CNT_SOCK_BAD | TEST_CNT_NS_BAD) #define TEST_CNT_AO_KEY_NOT_FOUND (TEST_CNT_SOCK_KEY_NOT_FOUND | \ @@ -539,34 +568,71 @@ typedef uint16_t test_cnt; #define TEST_CNT_GOOD (TEST_CNT_KEY_GOOD | TEST_CNT_AO_GOOD) #define TEST_CNT_BAD (TEST_CNT_KEY_BAD | TEST_CNT_AO_BAD) -extern int __test_tcp_ao_counters_cmp(const char *tst_name, - struct tcp_ao_counters *before, struct tcp_ao_counters *after, +extern test_cnt test_cmp_counters(struct tcp_counters *before, + struct tcp_counters *after); +extern int test_assert_counters_sk(const char *tst_name, + struct tcp_counters *before, struct tcp_counters *after, test_cnt expected); -extern int test_tcp_ao_key_counters_cmp(const char *tst_name, +extern int test_assert_counters_key(const char *tst_name, struct tcp_ao_counters *before, struct tcp_ao_counters *after, test_cnt expected, int sndid, int rcvid); -extern void test_tcp_ao_counters_free(struct tcp_ao_counters *cnts); +extern void test_tcp_counters_free(struct tcp_counters *cnts); + +/* + * Polling for netns and socket counters during select()/connect() and also + * client/server messaging. Instead of constant timeout on underlying select(), + * check the counters and return early. This allows to pass the tests where + * timeout is expected without waiting for that fixing timeout (tests speed-up). + * Previously shorter timeouts were used for tests expecting to time out, + * but that leaded to sporadic false positives on counter checks failures, + * as one second timeouts aren't enough for TCP retransmit. + * + * Two sides of the socketpair (client/server) should synchronize failures + * using a shared variable *err, so that they can detect the other side's + * failure. + */ +extern int test_skpair_wait_poll(int sk, bool write, test_cnt cond, + volatile int *err); +extern int _test_skpair_connect_poll(int sk, const char *device, + void *addr, size_t addr_sz, + test_cnt cond, volatile int *err); +static inline int test_skpair_connect_poll(int sk, const union tcp_addr taddr, + unsigned int port, + test_cnt cond, volatile int *err) +{ + sockaddr_af addr; + + tcp_addr_to_sockaddr_in(&addr, &taddr, htons(port)); + return _test_skpair_connect_poll(sk, veth_name, + (void *)&addr, sizeof(addr), cond, err); +} + +extern int test_skpair_client(int sk, const size_t msg_len, const size_t nr, + test_cnt cond, volatile int *err); +extern int test_skpair_server(int sk, ssize_t quota, + test_cnt cond, volatile int *err); + /* - * Frees buffers allocated in test_get_tcp_ao_counters(). + * Frees buffers allocated in test_get_tcp_counters(). * The function doesn't expect new keys or keys removed between calls - * to test_get_tcp_ao_counters(). Check key counters manually if they + * to test_get_tcp_counters(). Check key counters manually if they * may change. */ -static inline int test_tcp_ao_counters_cmp(const char *tst_name, - struct tcp_ao_counters *before, - struct tcp_ao_counters *after, - test_cnt expected) +static inline int test_assert_counters(const char *tst_name, + struct tcp_counters *before, + struct tcp_counters *after, + test_cnt expected) { int ret; - ret = __test_tcp_ao_counters_cmp(tst_name, before, after, expected); + ret = test_assert_counters_sk(tst_name, before, after, expected); if (ret) goto out; - ret = test_tcp_ao_key_counters_cmp(tst_name, before, after, - expected, -1, -1); + ret = test_assert_counters_key(tst_name, &before->ao, &after->ao, + expected, -1, -1); out: - test_tcp_ao_counters_free(before); - test_tcp_ao_counters_free(after); + test_tcp_counters_free(before); + test_tcp_counters_free(after); return ret; } diff --git a/tools/testing/selftests/net/tcp_ao/lib/ftrace-tcp.c b/tools/testing/selftests/net/tcp_ao/lib/ftrace-tcp.c index 24380c68fec6b..27403f8750547 100644 --- a/tools/testing/selftests/net/tcp_ao/lib/ftrace-tcp.c +++ b/tools/testing/selftests/net/tcp_ao/lib/ftrace-tcp.c @@ -427,11 +427,8 @@ static void dump_trace_event(struct expected_trace_point *e) test_print("trace event filter %s [%s:%d => %s:%d, L3index %d, flags: %s%s%s%s%s, keyid: %d, rnext: %d, maclen: %d, sne: %d] = %zu", trace_event_names[e->type], src, e->src_port, dst, e->dst_port, e->L3index, - (e->fin > 0) ? "F" : (e->fin == 0) ? "!F" : "", - (e->syn > 0) ? "S" : (e->syn == 0) ? "!S" : "", - (e->rst > 0) ? "R" : (e->rst == 0) ? "!R" : "", - (e->psh > 0) ? "P" : (e->psh == 0) ? "!P" : "", - (e->ack > 0) ? "." : (e->ack == 0) ? "!." : "", + e->fin ? "F" : "", e->syn ? "S" : "", e->rst ? "R" : "", + e->psh ? "P" : "", e->ack ? "." : "", e->keyid, e->rnext, e->maclen, e->sne, e->matched); } diff --git a/tools/testing/selftests/net/tcp_ao/lib/sock.c b/tools/testing/selftests/net/tcp_ao/lib/sock.c index 0ffda966c677b..ef8e9031d47a3 100644 --- a/tools/testing/selftests/net/tcp_ao/lib/sock.c +++ b/tools/testing/selftests/net/tcp_ao/lib/sock.c @@ -34,10 +34,8 @@ int __test_listen_socket(int backlog, void *addr, size_t addr_sz) return sk; } -int test_wait_fd(int sk, time_t sec, bool write) +static int __test_wait_fd(int sk, struct timeval *tv, bool write) { - struct timeval tv = { .tv_sec = sec }; - struct timeval *ptv = NULL; fd_set fds, efds; int ret; socklen_t slen = sizeof(ret); @@ -47,14 +45,11 @@ int test_wait_fd(int sk, time_t sec, bool write) FD_ZERO(&efds); FD_SET(sk, &efds); - if (sec) - ptv = &tv; - errno = 0; if (write) - ret = select(sk + 1, NULL, &fds, &efds, ptv); + ret = select(sk + 1, NULL, &fds, &efds, tv); else - ret = select(sk + 1, &fds, NULL, &efds, ptv); + ret = select(sk + 1, &fds, NULL, &efds, tv); if (ret < 0) return -errno; if (ret == 0) { @@ -69,8 +64,54 @@ int test_wait_fd(int sk, time_t sec, bool write) return 0; } +int test_wait_fd(int sk, time_t sec, bool write) +{ + struct timeval tv = { .tv_sec = sec, }; + + return __test_wait_fd(sk, sec ? &tv : NULL, write); +} + +static bool __skpair_poll_should_stop(int sk, struct tcp_counters *c, + test_cnt condition) +{ + struct tcp_counters c2; + test_cnt diff; + + if (test_get_tcp_counters(sk, &c2)) + test_error("test_get_tcp_counters()"); + + diff = test_cmp_counters(c, &c2); + test_tcp_counters_free(&c2); + return (diff & condition) == condition; +} + +/* How often wake up and check netns counters & paired (*err) */ +#define POLL_USEC 150 +static int __test_skpair_poll(int sk, bool write, uint64_t timeout, + struct tcp_counters *c, test_cnt cond, + volatile int *err) +{ + uint64_t t; + + for (t = 0; t <= timeout * 1000000; t += POLL_USEC) { + struct timeval tv = { .tv_usec = POLL_USEC, }; + int ret; + + ret = __test_wait_fd(sk, &tv, write); + if (ret != -ETIMEDOUT) + return ret; + if (c && cond && __skpair_poll_should_stop(sk, c, cond)) + break; + if (err && *err) + return *err; + } + if (err) + *err = -ETIMEDOUT; + return -ETIMEDOUT; +} + int __test_connect_socket(int sk, const char *device, - void *addr, size_t addr_sz, time_t timeout) + void *addr, size_t addr_sz, bool async) { long flags; int err; @@ -82,15 +123,6 @@ int __test_connect_socket(int sk, const char *device, test_error("setsockopt(SO_BINDTODEVICE, %s)", device); } - if (!timeout) { - err = connect(sk, addr, addr_sz); - if (err) { - err = -errno; - goto out; - } - return 0; - } - flags = fcntl(sk, F_GETFL); if ((flags < 0) || (fcntl(sk, F_SETFL, flags | O_NONBLOCK) < 0)) test_error("fcntl()"); @@ -100,9 +132,9 @@ int __test_connect_socket(int sk, const char *device, err = -errno; goto out; } - if (timeout < 0) + if (async) return sk; - err = test_wait_fd(sk, timeout, 1); + err = test_wait_fd(sk, TEST_TIMEOUT_SEC, 1); if (err) goto out; } @@ -113,6 +145,45 @@ int __test_connect_socket(int sk, const char *device, return err; } +int test_skpair_wait_poll(int sk, bool write, + test_cnt cond, volatile int *err) +{ + struct tcp_counters c; + int ret; + + *err = 0; + if (test_get_tcp_counters(sk, &c)) + test_error("test_get_tcp_counters()"); + synchronize_threads(); /* 1: init skpair & read nscounters */ + + ret = __test_skpair_poll(sk, write, TEST_TIMEOUT_SEC, &c, cond, err); + test_tcp_counters_free(&c); + return ret; +} + +int _test_skpair_connect_poll(int sk, const char *device, + void *addr, size_t addr_sz, + test_cnt condition, volatile int *err) +{ + struct tcp_counters c; + int ret; + + *err = 0; + if (test_get_tcp_counters(sk, &c)) + test_error("test_get_tcp_counters()"); + synchronize_threads(); /* 1: init skpair & read nscounters */ + ret = __test_connect_socket(sk, device, addr, addr_sz, true); + if (ret < 0) { + test_tcp_counters_free(&c); + return (*err = ret); + } + ret = __test_skpair_poll(sk, 1, TEST_TIMEOUT_SEC, &c, condition, err); + if (ret < 0) + close(sk); + test_tcp_counters_free(&c); + return ret; +} + int __test_set_md5(int sk, void *addr, size_t addr_sz, uint8_t prefix, int vrf, const char *password) { @@ -333,12 +404,12 @@ do { \ return 0; } -int test_get_tcp_ao_counters(int sk, struct tcp_ao_counters *out) +int test_get_tcp_counters(int sk, struct tcp_counters *out) { struct tcp_ao_getsockopt *key_dump; socklen_t key_dump_sz = sizeof(*key_dump); struct tcp_ao_info_opt info = {}; - bool c1, c2, c3, c4, c5; + bool c1, c2, c3, c4, c5, c6, c7, c8; struct netstat *ns; int err, nr_keys; @@ -346,25 +417,30 @@ int test_get_tcp_ao_counters(int sk, struct tcp_ao_counters *out) /* per-netns */ ns = netstat_read(); - out->netns_ao_good = netstat_get(ns, "TCPAOGood", &c1); - out->netns_ao_bad = netstat_get(ns, "TCPAOBad", &c2); - out->netns_ao_key_not_found = netstat_get(ns, "TCPAOKeyNotFound", &c3); - out->netns_ao_required = netstat_get(ns, "TCPAORequired", &c4); - out->netns_ao_dropped_icmp = netstat_get(ns, "TCPAODroppedIcmps", &c5); + out->ao.netns_ao_good = netstat_get(ns, "TCPAOGood", &c1); + out->ao.netns_ao_bad = netstat_get(ns, "TCPAOBad", &c2); + out->ao.netns_ao_key_not_found = netstat_get(ns, "TCPAOKeyNotFound", &c3); + out->ao.netns_ao_required = netstat_get(ns, "TCPAORequired", &c4); + out->ao.netns_ao_dropped_icmp = netstat_get(ns, "TCPAODroppedIcmps", &c5); + out->netns_md5_notfound = netstat_get(ns, "TCPMD5NotFound", &c6); + out->netns_md5_unexpected = netstat_get(ns, "TCPMD5Unexpected", &c7); + out->netns_md5_failure = netstat_get(ns, "TCPMD5Failure", &c8); netstat_free(ns); - if (c1 || c2 || c3 || c4 || c5) + if (c1 || c2 || c3 || c4 || c5 || c6 || c7 || c8) return -EOPNOTSUPP; err = test_get_ao_info(sk, &info); + if (err == -ENOENT) + return 0; if (err) return err; /* per-socket */ - out->ao_info_pkt_good = info.pkt_good; - out->ao_info_pkt_bad = info.pkt_bad; - out->ao_info_pkt_key_not_found = info.pkt_key_not_found; - out->ao_info_pkt_ao_required = info.pkt_ao_required; - out->ao_info_pkt_dropped_icmp = info.pkt_dropped_icmp; + out->ao.ao_info_pkt_good = info.pkt_good; + out->ao.ao_info_pkt_bad = info.pkt_bad; + out->ao.ao_info_pkt_key_not_found = info.pkt_key_not_found; + out->ao.ao_info_pkt_ao_required = info.pkt_ao_required; + out->ao.ao_info_pkt_dropped_icmp = info.pkt_dropped_icmp; /* per-key */ nr_keys = test_get_ao_keys_nr(sk); @@ -372,7 +448,7 @@ int test_get_tcp_ao_counters(int sk, struct tcp_ao_counters *out) return nr_keys; if (nr_keys == 0) test_error("test_get_ao_keys_nr() == 0"); - out->nr_keys = (size_t)nr_keys; + out->ao.nr_keys = (size_t)nr_keys; key_dump = calloc(nr_keys, key_dump_sz); if (!key_dump) return -errno; @@ -386,72 +462,84 @@ int test_get_tcp_ao_counters(int sk, struct tcp_ao_counters *out) return -errno; } - out->key_cnts = calloc(nr_keys, sizeof(out->key_cnts[0])); - if (!out->key_cnts) { + out->ao.key_cnts = calloc(nr_keys, sizeof(out->ao.key_cnts[0])); + if (!out->ao.key_cnts) { free(key_dump); return -errno; } while (nr_keys--) { - out->key_cnts[nr_keys].sndid = key_dump[nr_keys].sndid; - out->key_cnts[nr_keys].rcvid = key_dump[nr_keys].rcvid; - out->key_cnts[nr_keys].pkt_good = key_dump[nr_keys].pkt_good; - out->key_cnts[nr_keys].pkt_bad = key_dump[nr_keys].pkt_bad; + out->ao.key_cnts[nr_keys].sndid = key_dump[nr_keys].sndid; + out->ao.key_cnts[nr_keys].rcvid = key_dump[nr_keys].rcvid; + out->ao.key_cnts[nr_keys].pkt_good = key_dump[nr_keys].pkt_good; + out->ao.key_cnts[nr_keys].pkt_bad = key_dump[nr_keys].pkt_bad; } free(key_dump); return 0; } -int __test_tcp_ao_counters_cmp(const char *tst_name, - struct tcp_ao_counters *before, - struct tcp_ao_counters *after, - test_cnt expected) +test_cnt test_cmp_counters(struct tcp_counters *before, + struct tcp_counters *after) +{ +#define __cmp(cnt, e_cnt) \ +do { \ + if (before->cnt > after->cnt) \ + test_error("counter " __stringify(cnt) " decreased"); \ + if (before->cnt != after->cnt) \ + ret |= e_cnt; \ +} while (0) + + test_cnt ret = 0; + size_t i; + + if (before->ao.nr_keys != after->ao.nr_keys) + test_error("the number of keys has changed"); + + _for_each_counter(__cmp); + + i = before->ao.nr_keys; + while (i--) { + __cmp(ao.key_cnts[i].pkt_good, TEST_CNT_KEY_GOOD); + __cmp(ao.key_cnts[i].pkt_bad, TEST_CNT_KEY_BAD); + } +#undef __cmp + return ret; +} + +int test_assert_counters_sk(const char *tst_name, + struct tcp_counters *before, + struct tcp_counters *after, + test_cnt expected) { -#define __cmp_ao(cnt, expecting_inc) \ +#define __cmp_ao(cnt, e_cnt) \ do { \ if (before->cnt > after->cnt) { \ test_fail("%s: Decreased counter " __stringify(cnt) " %" PRIu64 " > %" PRIu64, \ - tst_name ?: "", before->cnt, after->cnt); \ + tst_name ?: "", before->cnt, after->cnt); \ return -1; \ } \ - if ((before->cnt != after->cnt) != (expecting_inc)) { \ + if ((before->cnt != after->cnt) != !!(expected & e_cnt)) { \ test_fail("%s: Counter " __stringify(cnt) " was %sexpected to increase %" PRIu64 " => %" PRIu64, \ - tst_name ?: "", (expecting_inc) ? "" : "not ", \ + tst_name ?: "", (expected & e_cnt) ? "" : "not ", \ before->cnt, after->cnt); \ return -1; \ } \ -} while(0) +} while (0) errno = 0; - /* per-netns */ - __cmp_ao(netns_ao_good, !!(expected & TEST_CNT_NS_GOOD)); - __cmp_ao(netns_ao_bad, !!(expected & TEST_CNT_NS_BAD)); - __cmp_ao(netns_ao_key_not_found, - !!(expected & TEST_CNT_NS_KEY_NOT_FOUND)); - __cmp_ao(netns_ao_required, !!(expected & TEST_CNT_NS_AO_REQUIRED)); - __cmp_ao(netns_ao_dropped_icmp, - !!(expected & TEST_CNT_NS_DROPPED_ICMP)); - /* per-socket */ - __cmp_ao(ao_info_pkt_good, !!(expected & TEST_CNT_SOCK_GOOD)); - __cmp_ao(ao_info_pkt_bad, !!(expected & TEST_CNT_SOCK_BAD)); - __cmp_ao(ao_info_pkt_key_not_found, - !!(expected & TEST_CNT_SOCK_KEY_NOT_FOUND)); - __cmp_ao(ao_info_pkt_ao_required, !!(expected & TEST_CNT_SOCK_AO_REQUIRED)); - __cmp_ao(ao_info_pkt_dropped_icmp, - !!(expected & TEST_CNT_SOCK_DROPPED_ICMP)); + _for_each_counter(__cmp_ao); return 0; #undef __cmp_ao } -int test_tcp_ao_key_counters_cmp(const char *tst_name, - struct tcp_ao_counters *before, - struct tcp_ao_counters *after, - test_cnt expected, - int sndid, int rcvid) +int test_assert_counters_key(const char *tst_name, + struct tcp_ao_counters *before, + struct tcp_ao_counters *after, + test_cnt expected, int sndid, int rcvid) { size_t i; -#define __cmp_ao(i, cnt, expecting_inc) \ +#define __cmp_ao(i, cnt, e_cnt) \ do { \ if (before->key_cnts[i].cnt > after->key_cnts[i].cnt) { \ test_fail("%s: Decreased counter " __stringify(cnt) " %" PRIu64 " > %" PRIu64 " for key %u:%u", \ @@ -461,16 +549,16 @@ do { \ before->key_cnts[i].rcvid); \ return -1; \ } \ - if ((before->key_cnts[i].cnt != after->key_cnts[i].cnt) != (expecting_inc)) { \ + if ((before->key_cnts[i].cnt != after->key_cnts[i].cnt) != !!(expected & e_cnt)) { \ test_fail("%s: Counter " __stringify(cnt) " was %sexpected to increase %" PRIu64 " => %" PRIu64 " for key %u:%u", \ - tst_name ?: "", (expecting_inc) ? "" : "not ",\ + tst_name ?: "", (expected & e_cnt) ? "" : "not ",\ before->key_cnts[i].cnt, \ after->key_cnts[i].cnt, \ before->key_cnts[i].sndid, \ before->key_cnts[i].rcvid); \ return -1; \ } \ -} while(0) +} while (0) if (before->nr_keys != after->nr_keys) { test_fail("%s: Keys changed on the socket %zu != %zu", @@ -485,20 +573,22 @@ do { \ continue; if (rcvid >= 0 && before->key_cnts[i].rcvid != rcvid) continue; - __cmp_ao(i, pkt_good, !!(expected & TEST_CNT_KEY_GOOD)); - __cmp_ao(i, pkt_bad, !!(expected & TEST_CNT_KEY_BAD)); + __cmp_ao(i, pkt_good, TEST_CNT_KEY_GOOD); + __cmp_ao(i, pkt_bad, TEST_CNT_KEY_BAD); } return 0; #undef __cmp_ao } -void test_tcp_ao_counters_free(struct tcp_ao_counters *cnts) +void test_tcp_counters_free(struct tcp_counters *cnts) { - free(cnts->key_cnts); + free(cnts->ao.key_cnts); } #define TEST_BUF_SIZE 4096 -ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec) +static ssize_t _test_server_run(int sk, ssize_t quota, struct tcp_counters *c, + test_cnt cond, volatile int *err, + time_t timeout_sec) { ssize_t total = 0; @@ -507,7 +597,7 @@ ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec) ssize_t bytes, sent; int ret; - ret = test_wait_fd(sk, timeout_sec, 0); + ret = __test_skpair_poll(sk, 0, timeout_sec, c, cond, err); if (ret) return ret; @@ -518,7 +608,7 @@ ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec) if (bytes == 0) break; - ret = test_wait_fd(sk, timeout_sec, 1); + ret = __test_skpair_poll(sk, 1, timeout_sec, c, cond, err); if (ret) return ret; @@ -533,13 +623,41 @@ ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec) return total; } -ssize_t test_client_loop(int sk, char *buf, size_t buf_sz, - const size_t msg_len, time_t timeout_sec) +ssize_t test_server_run(int sk, ssize_t quota, time_t timeout_sec) +{ + return _test_server_run(sk, quota, NULL, 0, NULL, + timeout_sec ?: TEST_TIMEOUT_SEC); +} + +int test_skpair_server(int sk, ssize_t quota, test_cnt cond, volatile int *err) +{ + struct tcp_counters c; + ssize_t ret; + + *err = 0; + if (test_get_tcp_counters(sk, &c)) + test_error("test_get_tcp_counters()"); + synchronize_threads(); /* 1: init skpair & read nscounters */ + + ret = _test_server_run(sk, quota, &c, cond, err, TEST_TIMEOUT_SEC); + test_tcp_counters_free(&c); + return ret; +} + +static ssize_t test_client_loop(int sk, size_t buf_sz, const size_t msg_len, + struct tcp_counters *c, test_cnt cond, + volatile int *err) { char msg[msg_len]; int nodelay = 1; + char *buf; size_t i; + buf = alloca(buf_sz); + if (!buf) + return -ENOMEM; + randomize_buffer(buf, buf_sz); + if (setsockopt(sk, IPPROTO_TCP, TCP_NODELAY, &nodelay, sizeof(nodelay))) test_error("setsockopt(TCP_NODELAY)"); @@ -547,7 +665,7 @@ ssize_t test_client_loop(int sk, char *buf, size_t buf_sz, size_t sent, bytes = min(msg_len, buf_sz - i); int ret; - ret = test_wait_fd(sk, timeout_sec, 1); + ret = __test_skpair_poll(sk, 1, TEST_TIMEOUT_SEC, c, cond, err); if (ret) return ret; @@ -561,7 +679,8 @@ ssize_t test_client_loop(int sk, char *buf, size_t buf_sz, do { ssize_t got; - ret = test_wait_fd(sk, timeout_sec, 0); + ret = __test_skpair_poll(sk, 0, TEST_TIMEOUT_SEC, + c, cond, err); if (ret) return ret; @@ -580,15 +699,31 @@ ssize_t test_client_loop(int sk, char *buf, size_t buf_sz, return i; } -int test_client_verify(int sk, const size_t msg_len, const size_t nr, - time_t timeout_sec) +int test_client_verify(int sk, const size_t msg_len, const size_t nr) { size_t buf_sz = msg_len * nr; - char *buf = alloca(buf_sz); ssize_t ret; - randomize_buffer(buf, buf_sz); - ret = test_client_loop(sk, buf, buf_sz, msg_len, timeout_sec); + ret = test_client_loop(sk, buf_sz, msg_len, NULL, 0, NULL); + if (ret < 0) + return (int)ret; + return ret != buf_sz ? -1 : 0; +} + +int test_skpair_client(int sk, const size_t msg_len, const size_t nr, + test_cnt cond, volatile int *err) +{ + struct tcp_counters c; + size_t buf_sz = msg_len * nr; + ssize_t ret; + + *err = 0; + if (test_get_tcp_counters(sk, &c)) + test_error("test_get_tcp_counters()"); + synchronize_threads(); /* 1: init skpair & read nscounters */ + + ret = test_client_loop(sk, buf_sz, msg_len, &c, cond, err); + test_tcp_counters_free(&c); if (ret < 0) return (int)ret; return ret != buf_sz ? -1 : 0; diff --git a/tools/testing/selftests/net/tcp_ao/restore.c b/tools/testing/selftests/net/tcp_ao/restore.c index ecc6f1e3a4141..9a059b6c45231 100644 --- a/tools/testing/selftests/net/tcp_ao/restore.c +++ b/tools/testing/selftests/net/tcp_ao/restore.c @@ -16,11 +16,11 @@ const size_t quota = nr_packets * msg_len; static void try_server_run(const char *tst_name, unsigned int port, fault_t inj, test_cnt cnt_expected) { + test_cnt poll_cnt = (cnt_expected == TEST_CNT_GOOD) ? 0 : cnt_expected; const char *cnt_name = "TCPAOGood"; - struct tcp_ao_counters ao1, ao2; + struct tcp_counters cnt1, cnt2; uint64_t before_cnt, after_cnt; - int sk, lsk; - time_t timeout; + int sk, lsk, dummy; ssize_t bytes; if (fault(TIMEOUT)) @@ -48,11 +48,10 @@ static void try_server_run(const char *tst_name, unsigned int port, } before_cnt = netstat_get_one(cnt_name, NULL); - if (test_get_tcp_ao_counters(sk, &ao1)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt1)) + test_error("test_get_tcp_counters()"); - timeout = fault(TIMEOUT) ? TEST_RETRANSMIT_SEC : TEST_TIMEOUT_SEC; - bytes = test_server_run(sk, quota, timeout); + bytes = test_skpair_server(sk, quota, poll_cnt, &dummy); if (fault(TIMEOUT)) { if (bytes > 0) test_fail("%s: server served: %zd", tst_name, bytes); @@ -65,17 +64,17 @@ static void try_server_run(const char *tst_name, unsigned int port, test_ok("%s: server alive", tst_name); } synchronize_threads(); /* 3: counters checks */ - if (test_get_tcp_ao_counters(sk, &ao2)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt2)) + test_error("test_get_tcp_counters()"); after_cnt = netstat_get_one(cnt_name, NULL); - test_tcp_ao_counters_cmp(tst_name, &ao1, &ao2, cnt_expected); + test_assert_counters(tst_name, &cnt1, &cnt2, cnt_expected); if (after_cnt <= before_cnt) { - test_fail("%s: %s counter did not increase: %" PRIu64 " <= %" PRIu64, - tst_name, cnt_name, after_cnt, before_cnt); + test_fail("%s(server): %s counter did not increase: %" PRIu64 " <= %" PRIu64, + tst_name, cnt_name, after_cnt, before_cnt); } else { - test_ok("%s: counter %s increased %" PRIu64 " => %" PRIu64, + test_ok("%s(server): counter %s increased %" PRIu64 " => %" PRIu64, tst_name, cnt_name, before_cnt, after_cnt); } @@ -92,16 +91,16 @@ static void *server_fn(void *arg) { unsigned int port = test_server_port; - try_server_run("TCP-AO migrate to another socket", port++, + try_server_run("TCP-AO migrate to another socket (server)", port++, 0, TEST_CNT_GOOD); - try_server_run("TCP-AO with wrong send ISN", port++, + try_server_run("TCP-AO with wrong send ISN (server)", port++, FAULT_TIMEOUT, TEST_CNT_BAD); - try_server_run("TCP-AO with wrong receive ISN", port++, + try_server_run("TCP-AO with wrong receive ISN (server)", port++, FAULT_TIMEOUT, TEST_CNT_BAD); - try_server_run("TCP-AO with wrong send SEQ ext number", port++, + try_server_run("TCP-AO with wrong send SEQ ext number (server)", port++, FAULT_TIMEOUT, TEST_CNT_BAD); - try_server_run("TCP-AO with wrong receive SEQ ext number", port++, - FAULT_TIMEOUT, TEST_CNT_NS_BAD | TEST_CNT_GOOD); + try_server_run("TCP-AO with wrong receive SEQ ext number (server)", + port++, FAULT_TIMEOUT, TEST_CNT_NS_BAD | TEST_CNT_GOOD); synchronize_threads(); /* don't race to exit: client exits */ return NULL; @@ -125,7 +124,7 @@ static void test_get_sk_checkpoint(unsigned int server_port, sockaddr_af *saddr, test_error("failed to connect()"); synchronize_threads(); /* 2: accepted => send data */ - if (test_client_verify(sk, msg_len, nr_packets, TEST_TIMEOUT_SEC)) + if (test_client_verify(sk, msg_len, nr_packets)) test_fail("pre-migrate verify failed"); test_enable_repair(sk); @@ -139,11 +138,11 @@ static void test_sk_restore(const char *tst_name, unsigned int server_port, struct tcp_ao_repair *ao_img, fault_t inj, test_cnt cnt_expected) { + test_cnt poll_cnt = (cnt_expected == TEST_CNT_GOOD) ? 0 : cnt_expected; const char *cnt_name = "TCPAOGood"; - struct tcp_ao_counters ao1, ao2; + struct tcp_counters cnt1, cnt2; uint64_t before_cnt, after_cnt; - time_t timeout; - int sk; + int sk, dummy; if (fault(TIMEOUT)) cnt_name = "TCPAOBad"; @@ -159,30 +158,30 @@ static void test_sk_restore(const char *tst_name, unsigned int server_port, test_error("setsockopt(TCP_AO_ADD_KEY)"); test_ao_restore(sk, ao_img); - if (test_get_tcp_ao_counters(sk, &ao1)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt1)) + test_error("test_get_tcp_counters()"); test_disable_repair(sk); test_sock_state_free(img); - timeout = fault(TIMEOUT) ? TEST_RETRANSMIT_SEC : TEST_TIMEOUT_SEC; - if (test_client_verify(sk, msg_len, nr_packets, timeout)) { + if (test_skpair_client(sk, msg_len, nr_packets, poll_cnt, &dummy)) { if (fault(TIMEOUT)) test_ok("%s: post-migrate connection is broken", tst_name); else test_fail("%s: post-migrate connection is working", tst_name); } else { if (fault(TIMEOUT)) - test_fail("%s: post-migrate connection still working", tst_name); + test_fail("%s: post-migrate connection is working", tst_name); else test_ok("%s: post-migrate connection is alive", tst_name); } + synchronize_threads(); /* 3: counters checks */ - if (test_get_tcp_ao_counters(sk, &ao2)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt2)) + test_error("test_get_tcp_counters()"); after_cnt = netstat_get_one(cnt_name, NULL); - test_tcp_ao_counters_cmp(tst_name, &ao1, &ao2, cnt_expected); + test_assert_counters(tst_name, &cnt1, &cnt2, cnt_expected); if (after_cnt <= before_cnt) { test_fail("%s: %s counter did not increase: %" PRIu64 " <= %" PRIu64, @@ -203,7 +202,7 @@ static void *client_fn(void *arg) sockaddr_af saddr; test_get_sk_checkpoint(port, &saddr, &tcp_img, &ao_img); - test_sk_restore("TCP-AO migrate to another socket", port++, + test_sk_restore("TCP-AO migrate to another socket (client)", port++, &saddr, &tcp_img, &ao_img, 0, TEST_CNT_GOOD); test_get_sk_checkpoint(port, &saddr, &tcp_img, &ao_img); @@ -212,7 +211,7 @@ static void *client_fn(void *arg) -1, port, 0, -1, -1, -1, -1, -1, 100, 100, -1); trace_ao_event_expect(TCP_AO_MISMATCH, this_ip_dest, this_ip_addr, port, -1, 0, -1, -1, -1, -1, -1, 100, 100, -1); - test_sk_restore("TCP-AO with wrong send ISN", port++, + test_sk_restore("TCP-AO with wrong send ISN (client)", port++, &saddr, &tcp_img, &ao_img, FAULT_TIMEOUT, TEST_CNT_BAD); test_get_sk_checkpoint(port, &saddr, &tcp_img, &ao_img); @@ -221,7 +220,7 @@ static void *client_fn(void *arg) -1, port, 0, -1, -1, -1, -1, -1, 100, 100, -1); trace_ao_event_expect(TCP_AO_MISMATCH, this_ip_dest, this_ip_addr, port, -1, 0, -1, -1, -1, -1, -1, 100, 100, -1); - test_sk_restore("TCP-AO with wrong receive ISN", port++, + test_sk_restore("TCP-AO with wrong receive ISN (client)", port++, &saddr, &tcp_img, &ao_img, FAULT_TIMEOUT, TEST_CNT_BAD); test_get_sk_checkpoint(port, &saddr, &tcp_img, &ao_img); @@ -229,8 +228,8 @@ static void *client_fn(void *arg) trace_ao_event_expect(TCP_AO_MISMATCH, this_ip_addr, this_ip_dest, -1, port, 0, -1, -1, -1, -1, -1, 100, 100, -1); /* not expecting server => client mismatches as only snd sne is broken */ - test_sk_restore("TCP-AO with wrong send SEQ ext number", port++, - &saddr, &tcp_img, &ao_img, FAULT_TIMEOUT, + test_sk_restore("TCP-AO with wrong send SEQ ext number (client)", + port++, &saddr, &tcp_img, &ao_img, FAULT_TIMEOUT, TEST_CNT_NS_BAD | TEST_CNT_GOOD); test_get_sk_checkpoint(port, &saddr, &tcp_img, &ao_img); @@ -238,8 +237,8 @@ static void *client_fn(void *arg) /* not expecting client => server mismatches as only rcv sne is broken */ trace_ao_event_expect(TCP_AO_MISMATCH, this_ip_dest, this_ip_addr, port, -1, 0, -1, -1, -1, -1, -1, 100, 100, -1); - test_sk_restore("TCP-AO with wrong receive SEQ ext number", port++, - &saddr, &tcp_img, &ao_img, FAULT_TIMEOUT, + test_sk_restore("TCP-AO with wrong receive SEQ ext number (client)", + port++, &saddr, &tcp_img, &ao_img, FAULT_TIMEOUT, TEST_CNT_NS_GOOD | TEST_CNT_BAD); return NULL; diff --git a/tools/testing/selftests/net/tcp_ao/rst.c b/tools/testing/selftests/net/tcp_ao/rst.c index 6364facaa63ed..883cddf377cff 100644 --- a/tools/testing/selftests/net/tcp_ao/rst.c +++ b/tools/testing/selftests/net/tcp_ao/rst.c @@ -84,15 +84,15 @@ static void close_forced(int sk) static void test_server_active_rst(unsigned int port) { - struct tcp_ao_counters cnt1, cnt2; + struct tcp_counters cnt1, cnt2; ssize_t bytes; int sk, lsk; lsk = test_listen_socket(this_ip_addr, port, backlog); if (test_add_key(lsk, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100)) test_error("setsockopt(TCP_AO_ADD_KEY)"); - if (test_get_tcp_ao_counters(lsk, &cnt1)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(lsk, &cnt1)) + test_error("test_get_tcp_counters()"); synchronize_threads(); /* 1: MKT added */ if (test_wait_fd(lsk, TEST_TIMEOUT_SEC, 0)) @@ -103,8 +103,8 @@ static void test_server_active_rst(unsigned int port) test_error("accept()"); synchronize_threads(); /* 2: connection accept()ed, another queued */ - if (test_get_tcp_ao_counters(lsk, &cnt2)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(lsk, &cnt2)) + test_error("test_get_tcp_counters()"); synchronize_threads(); /* 3: close listen socket */ close(lsk); @@ -120,7 +120,7 @@ static void test_server_active_rst(unsigned int port) synchronize_threads(); /* 5: closed active sk */ synchronize_threads(); /* 6: counters checks */ - if (test_tcp_ao_counters_cmp("active RST server", &cnt1, &cnt2, TEST_CNT_GOOD)) + if (test_assert_counters("active RST server", &cnt1, &cnt2, TEST_CNT_GOOD)) test_fail("MKT counters (server) have not only good packets"); else test_ok("MKT counters are good on server"); @@ -128,7 +128,7 @@ static void test_server_active_rst(unsigned int port) static void test_server_passive_rst(unsigned int port) { - struct tcp_ao_counters ao1, ao2; + struct tcp_counters cnt1, cnt2; int sk, lsk; ssize_t bytes; @@ -147,8 +147,8 @@ static void test_server_passive_rst(unsigned int port) synchronize_threads(); /* 2: accepted => send data */ close(lsk); - if (test_get_tcp_ao_counters(sk, &ao1)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt1)) + test_error("test_get_tcp_counters()"); bytes = test_server_run(sk, quota, TEST_TIMEOUT_SEC); if (bytes != quota) { @@ -160,12 +160,12 @@ static void test_server_passive_rst(unsigned int port) synchronize_threads(); /* 3: checkpoint the client */ synchronize_threads(); /* 4: close the server, creating twsk */ - if (test_get_tcp_ao_counters(sk, &ao2)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt2)) + test_error("test_get_tcp_counters()"); close(sk); synchronize_threads(); /* 5: restore the socket, send more data */ - test_tcp_ao_counters_cmp("passive RST server", &ao1, &ao2, TEST_CNT_GOOD); + test_assert_counters("passive RST server", &cnt1, &cnt2, TEST_CNT_GOOD); synchronize_threads(); /* 6: server exits */ } @@ -271,8 +271,7 @@ static void test_client_active_rst(unsigned int port) synchronize_threads(); /* 1: MKT added */ for (i = 0; i < last; i++) { - err = _test_connect_socket(sk[i], this_ip_dest, port, - (i == 0) ? TEST_TIMEOUT_SEC : -1); + err = _test_connect_socket(sk[i], this_ip_dest, port, i != 0); if (err < 0) test_error("failed to connect()"); } @@ -283,12 +282,12 @@ static void test_client_active_rst(unsigned int port) test_error("test_wait_fds(): %d", err); /* async connect() with third sk to get into request_sock_queue */ - err = _test_connect_socket(sk[last], this_ip_dest, port, -1); + err = _test_connect_socket(sk[last], this_ip_dest, port, 1); if (err < 0) test_error("failed to connect()"); synchronize_threads(); /* 3: close listen socket */ - if (test_client_verify(sk[0], packet_sz, quota / packet_sz, TEST_TIMEOUT_SEC)) + if (test_client_verify(sk[0], packet_sz, quota / packet_sz)) test_fail("Failed to send data on connected socket"); else test_ok("Verified established tcp connection"); @@ -323,7 +322,7 @@ static void test_client_active_rst(unsigned int port) static void test_client_passive_rst(unsigned int port) { - struct tcp_ao_counters ao1, ao2; + struct tcp_counters cnt1, cnt2; struct tcp_ao_repair ao_img; struct tcp_sock_state img; sockaddr_af saddr; @@ -341,7 +340,7 @@ static void test_client_passive_rst(unsigned int port) test_error("failed to connect()"); synchronize_threads(); /* 2: accepted => send data */ - if (test_client_verify(sk, packet_sz, quota / packet_sz, TEST_TIMEOUT_SEC)) + if (test_client_verify(sk, packet_sz, quota / packet_sz)) test_fail("Failed to send data on connected socket"); else test_ok("Verified established tcp connection"); @@ -397,8 +396,8 @@ static void test_client_passive_rst(unsigned int port) test_error("setsockopt(TCP_AO_ADD_KEY)"); test_ao_restore(sk, &ao_img); - if (test_get_tcp_ao_counters(sk, &ao1)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt1)) + test_error("test_get_tcp_counters()"); test_disable_repair(sk); test_sock_state_free(&img); @@ -417,7 +416,7 @@ static void test_client_passive_rst(unsigned int port) * IP 10.0.254.1.7011 > 10.0.1.1.59772: Flags [R], seq 3215596252, win 0, * options [tcp-ao keyid 100 rnextkeyid 100 mac 0x0bcfbbf497bce844312304b2], length 0 */ - err = test_client_verify(sk, packet_sz, quota / packet_sz, 2 * TEST_TIMEOUT_SEC); + err = test_client_verify(sk, packet_sz, quota / packet_sz); /* Make sure that the connection was reset, not timeouted */ if (err && err == -ECONNRESET) test_ok("client sock was passively reset post-seq-adjust"); @@ -426,12 +425,12 @@ static void test_client_passive_rst(unsigned int port) else test_fail("client sock is yet connected post-seq-adjust"); - if (test_get_tcp_ao_counters(sk, &ao2)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt2)) + test_error("test_get_tcp_counters()"); synchronize_threads(); /* 6: server exits */ close(sk); - test_tcp_ao_counters_cmp("client passive RST", &ao1, &ao2, TEST_CNT_GOOD); + test_assert_counters("client passive RST", &cnt1, &cnt2, TEST_CNT_GOOD); } static void *client_fn(void *arg) diff --git a/tools/testing/selftests/net/tcp_ao/self-connect.c b/tools/testing/selftests/net/tcp_ao/self-connect.c index 3ecd2b58de6a3..73b2f2276f3f5 100644 --- a/tools/testing/selftests/net/tcp_ao/self-connect.c +++ b/tools/testing/selftests/net/tcp_ao/self-connect.c @@ -30,7 +30,7 @@ static void setup_lo_intf(const char *lo_intf) static void tcp_self_connect(const char *tst, unsigned int port, bool different_keyids, bool check_restore) { - struct tcp_ao_counters before_ao, after_ao; + struct tcp_counters before, after; uint64_t before_aogood, after_aogood; struct netstat *ns_before, *ns_after; const size_t nr_packets = 20; @@ -60,17 +60,17 @@ static void tcp_self_connect(const char *tst, unsigned int port, ns_before = netstat_read(); before_aogood = netstat_get(ns_before, "TCPAOGood", NULL); - if (test_get_tcp_ao_counters(sk, &before_ao)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &before)) + test_error("test_get_tcp_counters()"); if (__test_connect_socket(sk, "lo", (struct sockaddr *)&addr, - sizeof(addr), TEST_TIMEOUT_SEC) < 0) { + sizeof(addr), 0) < 0) { ns_after = netstat_read(); netstat_print_diff(ns_before, ns_after); test_error("failed to connect()"); } - if (test_client_verify(sk, 100, nr_packets, TEST_TIMEOUT_SEC)) { + if (test_client_verify(sk, 100, nr_packets)) { test_fail("%s: tcp connection verify failed", tst); close(sk); return; @@ -78,8 +78,8 @@ static void tcp_self_connect(const char *tst, unsigned int port, ns_after = netstat_read(); after_aogood = netstat_get(ns_after, "TCPAOGood", NULL); - if (test_get_tcp_ao_counters(sk, &after_ao)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &after)) + test_error("test_get_tcp_counters()"); if (!check_restore) { /* to debug: netstat_print_diff(ns_before, ns_after); */ netstat_free(ns_before); @@ -93,7 +93,7 @@ static void tcp_self_connect(const char *tst, unsigned int port, return; } - if (test_tcp_ao_counters_cmp(tst, &before_ao, &after_ao, TEST_CNT_GOOD)) { + if (test_assert_counters(tst, &before, &after, TEST_CNT_GOOD)) { close(sk); return; } @@ -136,7 +136,7 @@ static void tcp_self_connect(const char *tst, unsigned int port, test_ao_restore(sk, &ao_img); test_disable_repair(sk); test_sock_state_free(&img); - if (test_client_verify(sk, 100, nr_packets, TEST_TIMEOUT_SEC)) { + if (test_client_verify(sk, 100, nr_packets)) { test_fail("%s: tcp connection verify failed", tst); close(sk); return; diff --git a/tools/testing/selftests/net/tcp_ao/seq-ext.c b/tools/testing/selftests/net/tcp_ao/seq-ext.c index 8901a6785dc81..f00245263b20d 100644 --- a/tools/testing/selftests/net/tcp_ao/seq-ext.c +++ b/tools/testing/selftests/net/tcp_ao/seq-ext.c @@ -40,7 +40,7 @@ static void test_adjust_seqs(struct tcp_sock_state *img, static int test_sk_restore(struct tcp_sock_state *img, struct tcp_ao_repair *ao_img, sockaddr_af *saddr, const union tcp_addr daddr, unsigned int dport, - struct tcp_ao_counters *cnt) + struct tcp_counters *cnt) { int sk; @@ -54,8 +54,8 @@ static int test_sk_restore(struct tcp_sock_state *img, test_error("setsockopt(TCP_AO_ADD_KEY)"); test_ao_restore(sk, ao_img); - if (test_get_tcp_ao_counters(sk, cnt)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, cnt)) + test_error("test_get_tcp_counters()"); test_disable_repair(sk); test_sock_state_free(img); @@ -65,7 +65,7 @@ static int test_sk_restore(struct tcp_sock_state *img, static void *server_fn(void *arg) { uint64_t before_good, after_good, after_bad; - struct tcp_ao_counters ao1, ao2; + struct tcp_counters cnt1, cnt2; struct tcp_sock_state img; struct tcp_ao_repair ao_img; sockaddr_af saddr; @@ -114,7 +114,7 @@ static void *server_fn(void *arg) test_adjust_seqs(&img, &ao_img, true); synchronize_threads(); /* 4: dump finished */ sk = test_sk_restore(&img, &ao_img, &saddr, this_ip_dest, - client_new_port, &ao1); + client_new_port, &cnt1); trace_ao_event_sne_expect(TCP_AO_SND_SNE_UPDATE, this_ip_addr, this_ip_dest, test_server_port + 1, client_new_port, 1); @@ -136,11 +136,11 @@ static void *server_fn(void *arg) } synchronize_threads(); /* 6: verify counters after SEQ-number rollover */ - if (test_get_tcp_ao_counters(sk, &ao2)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt2)) + test_error("test_get_tcp_counters()"); after_good = netstat_get_one("TCPAOGood", NULL); - test_tcp_ao_counters_cmp(NULL, &ao1, &ao2, TEST_CNT_GOOD); + test_assert_counters(NULL, &cnt1, &cnt2, TEST_CNT_GOOD); if (after_good <= before_good) { test_fail("TCPAOGood counter did not increase: %" PRIu64 " <= %" PRIu64, @@ -173,7 +173,7 @@ static void *server_fn(void *arg) static void *client_fn(void *arg) { uint64_t before_good, after_good, after_bad; - struct tcp_ao_counters ao1, ao2; + struct tcp_counters cnt1, cnt2; struct tcp_sock_state img; struct tcp_ao_repair ao_img; sockaddr_af saddr; @@ -191,7 +191,7 @@ static void *client_fn(void *arg) test_error("failed to connect()"); synchronize_threads(); /* 2: accepted => send data */ - if (test_client_verify(sk, msg_len, nr_packets, TEST_TIMEOUT_SEC)) { + if (test_client_verify(sk, msg_len, nr_packets)) { test_fail("pre-migrate verify failed"); return NULL; } @@ -213,20 +213,20 @@ static void *client_fn(void *arg) test_adjust_seqs(&img, &ao_img, false); synchronize_threads(); /* 4: dump finished */ sk = test_sk_restore(&img, &ao_img, &saddr, this_ip_dest, - test_server_port + 1, &ao1); + test_server_port + 1, &cnt1); synchronize_threads(); /* 5: verify the connection during SEQ-number rollover */ - if (test_client_verify(sk, msg_len, nr_packets, TEST_TIMEOUT_SEC)) + if (test_client_verify(sk, msg_len, nr_packets)) test_fail("post-migrate verify failed"); else test_ok("post-migrate connection alive"); synchronize_threads(); /* 5: verify counters after SEQ-number rollover */ - if (test_get_tcp_ao_counters(sk, &ao2)) - test_error("test_get_tcp_ao_counters()"); + if (test_get_tcp_counters(sk, &cnt2)) + test_error("test_get_tcp_counters()"); after_good = netstat_get_one("TCPAOGood", NULL); - test_tcp_ao_counters_cmp(NULL, &ao1, &ao2, TEST_CNT_GOOD); + test_assert_counters(NULL, &cnt1, &cnt2, TEST_CNT_GOOD); if (after_good <= before_good) { test_fail("TCPAOGood counter did not increase: %" PRIu64 " <= %" PRIu64, diff --git a/tools/testing/selftests/net/tcp_ao/unsigned-md5.c b/tools/testing/selftests/net/tcp_ao/unsigned-md5.c index f779e5892bc16..a1467b64390ad 100644 --- a/tools/testing/selftests/net/tcp_ao/unsigned-md5.c +++ b/tools/testing/selftests/net/tcp_ao/unsigned-md5.c @@ -6,6 +6,7 @@ #define fault(type) (inj == FAULT_ ## type) static const char *md5_password = "Some evil genius, enemy to mankind, must have been the first contriver."; static const char *ao_password = DEFAULT_TEST_PASSWORD; +static volatile int sk_pair; static union tcp_addr client2; static union tcp_addr client3; @@ -41,10 +42,10 @@ static void try_accept(const char *tst_name, unsigned int port, const char *cnt_name, test_cnt cnt_expected, int needs_tcp_md5, fault_t inj) { - struct tcp_ao_counters ao_cnt1, ao_cnt2; + struct tcp_counters cnt1, cnt2; uint64_t before_cnt = 0, after_cnt = 0; /* silence GCC */ - int lsk, err, sk = 0; - time_t timeout; + test_cnt poll_cnt = (cnt_expected == TEST_CNT_GOOD) ? 0 : cnt_expected; + int lsk, err, sk = -1; if (needs_tcp_md5 && should_skip_test(tst_name, KCONFIG_TCP_MD5)) return; @@ -63,22 +64,25 @@ static void try_accept(const char *tst_name, unsigned int port, if (cnt_name) before_cnt = netstat_get_one(cnt_name, NULL); - if (ao_addr && test_get_tcp_ao_counters(lsk, &ao_cnt1)) - test_error("test_get_tcp_ao_counters()"); + if (ao_addr && test_get_tcp_counters(lsk, &cnt1)) + test_error("test_get_tcp_counters()"); synchronize_threads(); /* preparations done */ - timeout = fault(TIMEOUT) ? TEST_RETRANSMIT_SEC : TEST_TIMEOUT_SEC; - err = test_wait_fd(lsk, timeout, 0); + err = test_skpair_wait_poll(lsk, 0, poll_cnt, &sk_pair); synchronize_threads(); /* connect()/accept() timeouts */ if (err == -ETIMEDOUT) { + sk_pair = err; if (!fault(TIMEOUT)) - test_fail("timed out for accept()"); + test_fail("%s: timed out for accept()", tst_name); + } else if (err == -EKEYREJECTED) { + if (!fault(KEYREJECT)) + test_fail("%s: key was rejected", tst_name); } else if (err < 0) { - test_error("test_wait_fd()"); + test_error("test_skpair_wait_poll()"); } else { if (fault(TIMEOUT)) - test_fail("ready to accept"); + test_fail("%s: ready to accept", tst_name); sk = accept(lsk, NULL, NULL); if (sk < 0) { @@ -89,8 +93,8 @@ static void try_accept(const char *tst_name, unsigned int port, } } - if (ao_addr && test_get_tcp_ao_counters(lsk, &ao_cnt2)) - test_error("test_get_tcp_ao_counters()"); + if (ao_addr && test_get_tcp_counters(lsk, &cnt2)) + test_error("test_get_tcp_counters()"); close(lsk); if (!cnt_name) { @@ -108,11 +112,11 @@ static void try_accept(const char *tst_name, unsigned int port, tst_name, cnt_name, before_cnt, after_cnt); } if (ao_addr) - test_tcp_ao_counters_cmp(tst_name, &ao_cnt1, &ao_cnt2, cnt_expected); + test_assert_counters(tst_name, &cnt1, &cnt2, cnt_expected); out: synchronize_threads(); /* test_kill_sk() */ - if (sk > 0) + if (sk >= 0) test_kill_sk(sk); } @@ -153,78 +157,82 @@ static void *server_fn(void *arg) server_add_routes(); - try_accept("AO server (INADDR_ANY): AO client", port++, NULL, 0, + try_accept("[server] AO server (INADDR_ANY): AO client", port++, NULL, 0, &addr_any, 0, 0, 100, 100, 0, "TCPAOGood", TEST_CNT_GOOD, 0, 0); - try_accept("AO server (INADDR_ANY): MD5 client", port++, NULL, 0, + try_accept("[server] AO server (INADDR_ANY): MD5 client", port++, NULL, 0, &addr_any, 0, 0, 100, 100, 0, "TCPMD5Unexpected", - 0, 1, FAULT_TIMEOUT); - try_accept("AO server (INADDR_ANY): no sign client", port++, NULL, 0, + TEST_CNT_NS_MD5_UNEXPECTED, 1, FAULT_TIMEOUT); + try_accept("[server] AO server (INADDR_ANY): no sign client", port++, NULL, 0, &addr_any, 0, 0, 100, 100, 0, "TCPAORequired", TEST_CNT_AO_REQUIRED, 0, FAULT_TIMEOUT); - try_accept("AO server (AO_REQUIRED): AO client", port++, NULL, 0, + try_accept("[server] AO server (AO_REQUIRED): AO client", port++, NULL, 0, &this_ip_dest, TEST_PREFIX, true, 100, 100, 0, "TCPAOGood", TEST_CNT_GOOD, 0, 0); - try_accept("AO server (AO_REQUIRED): unsigned client", port++, NULL, 0, + try_accept("[server] AO server (AO_REQUIRED): unsigned client", port++, NULL, 0, &this_ip_dest, TEST_PREFIX, true, 100, 100, 0, "TCPAORequired", TEST_CNT_AO_REQUIRED, 0, FAULT_TIMEOUT); - try_accept("MD5 server (INADDR_ANY): AO client", port++, &addr_any, 0, + try_accept("[server] MD5 server (INADDR_ANY): AO client", port++, &addr_any, 0, NULL, 0, 0, 0, 0, 0, "TCPAOKeyNotFound", - 0, 1, FAULT_TIMEOUT); - try_accept("MD5 server (INADDR_ANY): MD5 client", port++, &addr_any, 0, + TEST_CNT_NS_KEY_NOT_FOUND, 1, FAULT_TIMEOUT); + try_accept("[server] MD5 server (INADDR_ANY): MD5 client", port++, &addr_any, 0, NULL, 0, 0, 0, 0, 0, NULL, 0, 1, 0); - try_accept("MD5 server (INADDR_ANY): no sign client", port++, &addr_any, + try_accept("[server] MD5 server (INADDR_ANY): no sign client", port++, &addr_any, 0, NULL, 0, 0, 0, 0, 0, "TCPMD5NotFound", - 0, 1, FAULT_TIMEOUT); + TEST_CNT_NS_MD5_NOT_FOUND, 1, FAULT_TIMEOUT); - try_accept("no sign server: AO client", port++, NULL, 0, + try_accept("[server] no sign server: AO client", port++, NULL, 0, NULL, 0, 0, 0, 0, 0, "TCPAOKeyNotFound", - TEST_CNT_AO_KEY_NOT_FOUND, 0, FAULT_TIMEOUT); - try_accept("no sign server: MD5 client", port++, NULL, 0, + TEST_CNT_NS_KEY_NOT_FOUND, 0, FAULT_TIMEOUT); + try_accept("[server] no sign server: MD5 client", port++, NULL, 0, NULL, 0, 0, 0, 0, 0, "TCPMD5Unexpected", - 0, 1, FAULT_TIMEOUT); - try_accept("no sign server: no sign client", port++, NULL, 0, + TEST_CNT_NS_MD5_UNEXPECTED, 1, FAULT_TIMEOUT); + try_accept("[server] no sign server: no sign client", port++, NULL, 0, NULL, 0, 0, 0, 0, 0, "CurrEstab", 0, 0, 0); - try_accept("AO+MD5 server: AO client (matching)", port++, + try_accept("[server] AO+MD5 server: AO client (matching)", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, 100, 100, 0, "TCPAOGood", TEST_CNT_GOOD, 1, 0); - try_accept("AO+MD5 server: AO client (misconfig, matching MD5)", port++, + try_accept("[server] AO+MD5 server: AO client (misconfig, matching MD5)", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, 100, 100, 0, "TCPAOKeyNotFound", TEST_CNT_AO_KEY_NOT_FOUND, 1, FAULT_TIMEOUT); - try_accept("AO+MD5 server: AO client (misconfig, non-matching)", port++, + try_accept("[server] AO+MD5 server: AO client (misconfig, non-matching)", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, 100, 100, 0, "TCPAOKeyNotFound", TEST_CNT_AO_KEY_NOT_FOUND, 1, FAULT_TIMEOUT); - try_accept("AO+MD5 server: MD5 client (matching)", port++, + try_accept("[server] AO+MD5 server: MD5 client (matching)", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, 100, 100, 0, NULL, 0, 1, 0); - try_accept("AO+MD5 server: MD5 client (misconfig, matching AO)", port++, + try_accept("[server] AO+MD5 server: MD5 client (misconfig, matching AO)", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, - 100, 100, 0, "TCPMD5Unexpected", 0, 1, FAULT_TIMEOUT); - try_accept("AO+MD5 server: MD5 client (misconfig, non-matching)", port++, + 100, 100, 0, "TCPMD5Unexpected", + TEST_CNT_NS_MD5_UNEXPECTED, 1, FAULT_TIMEOUT); + try_accept("[server] AO+MD5 server: MD5 client (misconfig, non-matching)", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, - 100, 100, 0, "TCPMD5Unexpected", 0, 1, FAULT_TIMEOUT); - try_accept("AO+MD5 server: no sign client (unmatched)", port++, + 100, 100, 0, "TCPMD5Unexpected", + TEST_CNT_NS_MD5_UNEXPECTED, 1, FAULT_TIMEOUT); + try_accept("[server] AO+MD5 server: no sign client (unmatched)", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, 100, 100, 0, "CurrEstab", 0, 1, 0); - try_accept("AO+MD5 server: no sign client (misconfig, matching AO)", + try_accept("[server] AO+MD5 server: no sign client (misconfig, matching AO)", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, 100, 100, 0, "TCPAORequired", TEST_CNT_AO_REQUIRED, 1, FAULT_TIMEOUT); - try_accept("AO+MD5 server: no sign client (misconfig, matching MD5)", + try_accept("[server] AO+MD5 server: no sign client (misconfig, matching MD5)", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, - 100, 100, 0, "TCPMD5NotFound", 0, 1, FAULT_TIMEOUT); + 100, 100, 0, "TCPMD5NotFound", + TEST_CNT_NS_MD5_NOT_FOUND, 1, FAULT_TIMEOUT); - try_accept("AO+MD5 server: client with both [TCP-MD5] and TCP-AO keys", + /* Key rejected by the other side, failing short through skpair */ + try_accept("[server] AO+MD5 server: client with both [TCP-MD5] and TCP-AO keys", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, - 100, 100, 0, NULL, 0, 1, FAULT_TIMEOUT); - try_accept("AO+MD5 server: client with both TCP-MD5 and [TCP-AO] keys", + 100, 100, 0, NULL, 0, 1, FAULT_KEYREJECT); + try_accept("[server] AO+MD5 server: client with both TCP-MD5 and [TCP-AO] keys", port++, &this_ip_dest, TEST_PREFIX, &client2, TEST_PREFIX, 0, - 100, 100, 0, NULL, 0, 1, FAULT_TIMEOUT); + 100, 100, 0, NULL, 0, 1, FAULT_KEYREJECT); server_add_fail_tests(&port); @@ -259,7 +267,6 @@ static void try_connect(const char *tst_name, unsigned int port, uint8_t sndid, uint8_t rcvid, uint8_t vrf, fault_t inj, int needs_tcp_md5, union tcp_addr *bind_addr) { - time_t timeout; int sk, ret; if (needs_tcp_md5 && should_skip_test(tst_name, KCONFIG_TCP_MD5)) @@ -281,11 +288,10 @@ static void try_connect(const char *tst_name, unsigned int port, synchronize_threads(); /* preparations done */ - timeout = fault(TIMEOUT) ? TEST_RETRANSMIT_SEC : TEST_TIMEOUT_SEC; - ret = _test_connect_socket(sk, this_ip_dest, port, timeout); - + ret = test_skpair_connect_poll(sk, this_ip_dest, port, 0, &sk_pair); synchronize_threads(); /* connect()/accept() timeouts */ if (ret < 0) { + sk_pair = ret; if (fault(KEYREJECT) && ret == -EKEYREJECTED) test_ok("%s: connect() was prevented", tst_name); else if (ret == -ETIMEDOUT && fault(TIMEOUT)) @@ -305,8 +311,7 @@ static void try_connect(const char *tst_name, unsigned int port, out: synchronize_threads(); /* test_kill_sk() */ - /* _test_connect_socket() cleans up on failure */ - if (ret > 0) + if (ret > 0) /* test_skpair_connect_poll() cleans up on failure */ test_kill_sk(sk); } @@ -437,7 +442,6 @@ static void try_to_add(const char *tst_name, unsigned int port, int ao_vrf, uint8_t sndid, uint8_t rcvid, int needs_tcp_md5, fault_t inj) { - time_t timeout; int sk, ret; if (needs_tcp_md5 && should_skip_test(tst_name, KCONFIG_TCP_MD5)) @@ -450,11 +454,10 @@ static void try_to_add(const char *tst_name, unsigned int port, synchronize_threads(); /* preparations done */ - timeout = fault(TIMEOUT) ? TEST_RETRANSMIT_SEC : TEST_TIMEOUT_SEC; - ret = _test_connect_socket(sk, this_ip_dest, port, timeout); + ret = test_skpair_connect_poll(sk, this_ip_dest, port, 0, &sk_pair); synchronize_threads(); /* connect()/accept() timeouts */ - if (ret <= 0) { + if (ret < 0) { test_error("%s: connect() returned %d", tst_name, ret); goto out; } @@ -490,8 +493,7 @@ static void try_to_add(const char *tst_name, unsigned int port, out: synchronize_threads(); /* test_kill_sk() */ - /* _test_connect_socket() cleans up on failure */ - if (ret > 0) + if (ret > 0) /* test_skpair_connect_poll() cleans up on failure */ test_kill_sk(sk); } diff --git a/tools/testing/selftests/net/test_blackhole_dev.sh b/tools/testing/selftests/net/test_blackhole_dev.sh deleted file mode 100755 index 3119b80e711fd..0000000000000 --- a/tools/testing/selftests/net/test_blackhole_dev.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 -# Runs blackhole-dev test using blackhole-dev kernel module - -if /sbin/modprobe -q test_blackhole_dev ; then - /sbin/modprobe -q -r test_blackhole_dev; - echo "test_blackhole_dev: ok"; -else - echo "test_blackhole_dev: [FAIL]"; - exit 1; -fi diff --git a/tools/testing/selftests/net/test_so_rcv.sh b/tools/testing/selftests/net/test_so_rcv.sh new file mode 100755 index 0000000000000..d8aa4362879da --- /dev/null +++ b/tools/testing/selftests/net/test_so_rcv.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source lib.sh + +HOSTS=("127.0.0.1" "::1") +PORT=1234 +TOTAL_TESTS=0 +FAILED_TESTS=0 + +declare -A TESTS=( + ["SO_RCVPRIORITY"]="-P 2" + ["SO_RCVMARK"]="-M 3" +) + +check_result() { + ((TOTAL_TESTS++)) + if [ "$1" -ne 0 ]; then + ((FAILED_TESTS++)) + fi +} + +cleanup() +{ + cleanup_ns $NS +} + +trap cleanup EXIT + +setup_ns NS + +for HOST in "${HOSTS[@]}"; do + PROTOCOL="IPv4" + if [[ "$HOST" == "::1" ]]; then + PROTOCOL="IPv6" + fi + for test_name in "${!TESTS[@]}"; do + echo "Running $test_name test, $PROTOCOL" + arg=${TESTS[$test_name]} + + ip netns exec $NS ./so_rcv_listener $arg $HOST $PORT & + LISTENER_PID=$! + + sleep 0.5 + + if ! ip netns exec $NS ./cmsg_sender $arg $HOST $PORT; then + echo "Sender failed for $test_name, $PROTOCOL" + kill "$LISTENER_PID" 2>/dev/null + wait "$LISTENER_PID" + check_result 1 + continue + fi + + wait "$LISTENER_PID" + LISTENER_EXIT_CODE=$? + + if [ "$LISTENER_EXIT_CODE" -eq 0 ]; then + echo "Rcv test OK for $test_name, $PROTOCOL" + check_result 0 + else + echo "Rcv test FAILED for $test_name, $PROTOCOL" + check_result 1 + fi + done +done + +if [ "$FAILED_TESTS" -ne 0 ]; then + echo "FAIL - $FAILED_TESTS/$TOTAL_TESTS tests failed" + exit ${KSFT_FAIL} +else + echo "OK - All $TOTAL_TESTS tests passed" + exit ${KSFT_PASS} +fi diff --git a/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh b/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh index 2d442cdab11e1..062f957950af3 100755 --- a/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh +++ b/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh @@ -1,29 +1,114 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -# Check FDB default-remote handling across "ip link set". +ALL_TESTS=" + test_set_remote + test_change_mc_remote +" +source lib.sh check_remotes() { local what=$1; shift local N=$(bridge fdb sh dev vx | grep 00:00:00:00:00:00 | wc -l) - echo -ne "expected two remotes after $what\t" - if [[ $N != 2 ]]; then - echo "[FAIL]" - EXIT_STATUS=1 + ((N == 2)) + check_err $? "expected 2 remotes after $what, got $N" +} + +# Check FDB default-remote handling across "ip link set". +test_set_remote() +{ + RET=0 + + ip_link_add vx up type vxlan id 2000 dstport 4789 + bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.20 self permanent + bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.30 self permanent + check_remotes "fdb append" + + ip link set dev vx type vxlan remote 192.0.2.30 + check_remotes "link set" + + log_test 'FDB default-remote handling across "ip link set"' +} + +fmt_remote() +{ + local addr=$1; shift + + if [[ $addr == 224.* ]]; then + echo "group $addr" else - echo "[ OK ]" + echo "remote $addr" fi } -ip link add name vx up type vxlan id 2000 dstport 4789 -bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.20 self permanent -bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.30 self permanent -check_remotes "fdb append" +change_remote() +{ + local remote=$1; shift + + ip link set dev vx type vxlan $(fmt_remote $remote) dev v1 +} + +check_membership() +{ + local check_vec=("$@") + + local memberships + memberships=$( + netstat -n --groups | + sed -n '/^v1\b/p' | + grep -o '[^ ]*$' + ) + check_err $? "Couldn't obtain group memberships" + + local item + for item in "${check_vec[@]}"; do + eval "local $item" + echo "$memberships" | grep -q "\b$group\b" + check_err_fail $fail $? "$group is_ex reported in IGMP query response" + done +} + +test_change_mc_remote() +{ + check_command netstat || return + + ip_link_add v1 up type veth peer name v2 + ip_link_set_up v2 + + RET=0 + + ip_link_add vx up type vxlan dstport 4789 \ + local 192.0.2.1 $(fmt_remote 224.1.1.1) dev v1 vni 1000 + + check_membership "group=224.1.1.1 fail=0" \ + "group=224.1.1.2 fail=1" \ + "group=224.1.1.3 fail=1" + + log_test "MC group report after VXLAN creation" + + RET=0 + + change_remote 224.1.1.2 + check_membership "group=224.1.1.1 fail=1" \ + "group=224.1.1.2 fail=0" \ + "group=224.1.1.3 fail=1" + + log_test "MC group report after changing VXLAN remote MC->MC" + + RET=0 + + change_remote 192.0.2.2 + check_membership "group=224.1.1.1 fail=1" \ + "group=224.1.1.2 fail=1" \ + "group=224.1.1.3 fail=1" + + log_test "MC group report after changing VXLAN remote MC->UC" +} + +trap defer_scopes_cleanup EXIT -ip link set dev vx type vxlan remote 192.0.2.30 -check_remotes "link set" +tests_run -ip link del dev vx exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/ynl.mk b/tools/testing/selftests/net/ynl.mk index 12e7cae251be3..e907c2751956f 100644 --- a/tools/testing/selftests/net/ynl.mk +++ b/tools/testing/selftests/net/ynl.mk @@ -27,7 +27,8 @@ $(OUTPUT)/.libynl-$(YNL_GENS_HASH).sig: $(OUTPUT)/libynl.a: $(YNL_SPECS) $(OUTPUT)/.libynl-$(YNL_GENS_HASH).sig $(Q)rm -f $(top_srcdir)/tools/net/ynl/libynl.a - $(Q)$(MAKE) -C $(top_srcdir)/tools/net/ynl GENS="$(YNL_GENS)" libynl.a + $(Q)$(MAKE) -C $(top_srcdir)/tools/net/ynl \ + GENS="$(YNL_GENS)" RSTS="" libynl.a $(Q)cp $(top_srcdir)/tools/net/ynl/libynl.a $(OUTPUT)/libynl.a EXTRA_CLEAN += \ diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c index 58064151f2c89..edc08a4433fd4 100644 --- a/tools/testing/selftests/ptp/testptp.c +++ b/tools/testing/selftests/ptp/testptp.c @@ -140,6 +140,7 @@ static void usage(char *progname) " -H val set output phase to 'val' nanoseconds (requires -p)\n" " -w val set output pulse width to 'val' nanoseconds (requires -p)\n" " -P val enable or disable (val=1|0) the system clock PPS\n" + " -r open the ptp clock in readonly mode\n" " -s set the ptp clock time from the system time\n" " -S set the system time from the ptp clock time\n" " -t val shift the ptp clock time by 'val' seconds\n" @@ -188,6 +189,7 @@ int main(int argc, char *argv[]) int pin_index = -1, pin_func; int pps = -1; int seconds = 0; + int readonly = 0; int settime = 0; int channel = -1; clockid_t ext_clockid = CLOCK_REALTIME; @@ -200,7 +202,7 @@ int main(int argc, char *argv[]) progname = strrchr(argv[0], '/'); progname = progname ? 1+progname : argv[0]; - while (EOF != (c = getopt(argc, argv, "cd:e:f:F:ghH:i:k:lL:n:o:p:P:sSt:T:w:x:Xy:z"))) { + while (EOF != (c = getopt(argc, argv, "cd:e:f:F:ghH:i:k:lL:n:o:p:P:rsSt:T:w:x:Xy:z"))) { switch (c) { case 'c': capabilities = 1; @@ -252,6 +254,9 @@ int main(int argc, char *argv[]) case 'P': pps = atoi(optarg); break; + case 'r': + readonly = 1; + break; case 's': settime = 1; break; @@ -308,7 +313,7 @@ int main(int argc, char *argv[]) } } - fd = open(device, O_RDWR); + fd = open(device, readonly ? O_RDONLY : O_RDWR); if (fd < 0) { fprintf(stderr, "opening %s: %s\n", device, strerror(errno)); return -1; @@ -436,14 +441,16 @@ int main(int argc, char *argv[]) } if (extts) { - memset(&extts_request, 0, sizeof(extts_request)); - extts_request.index = index; - extts_request.flags = PTP_ENABLE_FEATURE; - if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) { - perror("PTP_EXTTS_REQUEST"); - extts = 0; - } else { - puts("external time stamp request okay"); + if (!readonly) { + memset(&extts_request, 0, sizeof(extts_request)); + extts_request.index = index; + extts_request.flags = PTP_ENABLE_FEATURE; + if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) { + perror("PTP_EXTTS_REQUEST"); + extts = 0; + } else { + puts("external time stamp request okay"); + } } for (; extts; extts--) { cnt = read(fd, &event, sizeof(event)); @@ -455,10 +462,12 @@ int main(int argc, char *argv[]) event.t.sec, event.t.nsec); fflush(stdout); } - /* Disable the feature again. */ - extts_request.flags = 0; - if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) { - perror("PTP_EXTTS_REQUEST"); + if (!readonly) { + /* Disable the feature again. */ + extts_request.flags = 0; + if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) { + perror("PTP_EXTTS_REQUEST"); + } } } diff --git a/tools/testing/selftests/rseq/rseq-riscv-bits.h b/tools/testing/selftests/rseq/rseq-riscv-bits.h index de31a0143139b..f02f411d550d1 100644 --- a/tools/testing/selftests/rseq/rseq-riscv-bits.h +++ b/tools/testing/selftests/rseq/rseq-riscv-bits.h @@ -243,7 +243,7 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i #ifdef RSEQ_COMPARE_TWICE RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, "%l[error1]") #endif - RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, 3) + RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, 3) RSEQ_INJECT_ASM(4) RSEQ_ASM_DEFINE_ABORT(4, abort) : /* gcc asm goto does not allow outputs */ @@ -251,8 +251,8 @@ int RSEQ_TEMPLATE_IDENTIFIER(rseq_offset_deref_addv)(intptr_t *ptr, off_t off, i [current_cpu_id] "m" (rseq_get_abi()->RSEQ_TEMPLATE_CPU_ID_FIELD), [rseq_cs] "m" (rseq_get_abi()->rseq_cs.arch.ptr), [ptr] "r" (ptr), - [off] "er" (off), - [inc] "er" (inc) + [off] "r" (off), + [inc] "r" (inc) RSEQ_INJECT_INPUT : "memory", RSEQ_ASM_TMP_REG_1 RSEQ_INJECT_CLOBBER diff --git a/tools/testing/selftests/rseq/rseq-riscv.h b/tools/testing/selftests/rseq/rseq-riscv.h index 37e598d0a365e..67d544aaa9a3b 100644 --- a/tools/testing/selftests/rseq/rseq-riscv.h +++ b/tools/testing/selftests/rseq/rseq-riscv.h @@ -158,7 +158,7 @@ do { \ "bnez " RSEQ_ASM_TMP_REG_1 ", 222b\n" \ "333:\n" -#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, post_commit_label) \ +#define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, inc, post_commit_label) \ "mv " RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(ptr) "]\n" \ RSEQ_ASM_OP_R_ADD(off) \ REG_L RSEQ_ASM_TMP_REG_1 ", 0(" RSEQ_ASM_TMP_REG_1 ")\n" \ diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json index dd8109768f8f9..5596f4df0e9f6 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/actions/police.json +++ b/tools/testing/selftests/tc-testing/tc-tests/actions/police.json @@ -689,7 +689,7 @@ "cmdUnderTest": "$TC actions add action police rate 7mbit burst 1m continue index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action police index 1", - "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst 1024Kb mtu 2Kb action continue", + "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst (1024Kb|1Mb) mtu 2Kb action continue", "matchCount": "1", "teardown": [ "$TC actions flush action police" @@ -716,7 +716,7 @@ "cmdUnderTest": "$TC actions add action police rate 7mbit burst 1m drop index 1", "expExitCode": "0", "verifyCmd": "$TC actions ls action police", - "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst 1024Kb mtu 2Kb action drop", + "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst (1024Kb|1Mb) mtu 2Kb action drop", "matchCount": "1", "teardown": [ "$TC actions flush action police" @@ -743,7 +743,7 @@ "cmdUnderTest": "$TC actions add action police rate 7mbit burst 1m ok index 1", "expExitCode": "0", "verifyCmd": "$TC actions ls action police", - "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst 1024Kb mtu 2Kb action pass", + "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst (1024Kb|1Mb) mtu 2Kb action pass", "matchCount": "1", "teardown": [ "$TC actions flush action police" @@ -770,7 +770,7 @@ "cmdUnderTest": "$TC actions add action police rate 7mbit burst 1m reclassify index 1", "expExitCode": "0", "verifyCmd": "$TC actions get action police index 1", - "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst 1024Kb mtu 2Kb action reclassify", + "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst (1024Kb|1Mb) mtu 2Kb action reclassify", "matchCount": "1", "teardown": [ "$TC actions flush action police" @@ -797,7 +797,7 @@ "cmdUnderTest": "$TC actions add action police rate 7mbit burst 1m pipe index 1", "expExitCode": "0", "verifyCmd": "$TC actions ls action police", - "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst 1024Kb mtu 2Kb action pipe", + "matchPattern": "action order [0-9]*: police 0x1 rate 7Mbit burst (1024Kb|1Mb) mtu 2Kb action pipe", "matchCount": "1", "teardown": [ "$TC actions flush action police" diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json index 7126ec3485cbd..2b61d8d79bde8 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json @@ -61,5 +61,30 @@ "teardown": [ "$TC qdisc del dev $DUMMY handle 1: root" ] + }, + { + "id": "4009", + "name": "Reject creation of DRR class with classid TC_H_ROOT", + "category": [ + "qdisc", + "drr" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DUMMY root handle ffff: drr", + "$TC filter add dev $DUMMY parent ffff: basic classid ffff:1", + "$TC class add dev $DUMMY parent ffff: classid ffff:1 drr", + "$TC filter add dev $DUMMY parent ffff: prio 1 u32 match u16 0x0000 0xfe00 at 2 flowid ffff:ffff" + ], + "cmdUnderTest": "$TC class add dev $DUMMY parent ffff: classid ffff:ffff drr", + "expExitCode": "2", + "verifyCmd": "$TC class show dev $DUMMY", + "matchPattern": "class drr ffff:ffff", + "matchCount": "0", + "teardown": [ + "$TC qdisc del dev $DUMMY root" + ] } ] diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c index 2fe5e983cb22f..f89d052c730eb 100644 --- a/tools/testing/selftests/vDSO/parse_vdso.c +++ b/tools/testing/selftests/vDSO/parse_vdso.c @@ -53,7 +53,7 @@ static struct vdso_info /* Symbol table */ ELF(Sym) *symtab; const char *symstrings; - ELF(Word) *gnu_hash; + ELF(Word) *gnu_hash, *gnu_bucket; ELF_HASH_ENTRY *bucket, *chain; ELF_HASH_ENTRY nbucket, nchain; @@ -185,8 +185,8 @@ void vdso_init_from_sysinfo_ehdr(uintptr_t base) /* The bucket array is located after the header (4 uint32) and the bloom * filter (size_t array of gnu_hash[2] elements). */ - vdso_info.bucket = vdso_info.gnu_hash + 4 + - sizeof(size_t) / 4 * vdso_info.gnu_hash[2]; + vdso_info.gnu_bucket = vdso_info.gnu_hash + 4 + + sizeof(size_t) / 4 * vdso_info.gnu_hash[2]; } else { vdso_info.nbucket = hash[0]; vdso_info.nchain = hash[1]; @@ -268,11 +268,11 @@ void *vdso_sym(const char *version, const char *name) if (vdso_info.gnu_hash) { uint32_t h1 = gnu_hash(name), h2, *hashval; - i = vdso_info.bucket[h1 % vdso_info.nbucket]; + i = vdso_info.gnu_bucket[h1 % vdso_info.nbucket]; if (i == 0) return 0; h1 |= 1; - hashval = vdso_info.bucket + vdso_info.nbucket + + hashval = vdso_info.gnu_bucket + vdso_info.nbucket + (i - vdso_info.gnu_hash[1]); for (;; i++) { ELF(Sym) *sym = &vdso_info.symtab[i]; diff --git a/tools/thermal/lib/Makefile b/tools/thermal/lib/Makefile index f2552f73a64c7..056d212f25cf5 100644 --- a/tools/thermal/lib/Makefile +++ b/tools/thermal/lib/Makefile @@ -39,19 +39,6 @@ libdir = $(prefix)/$(libdir_relative) libdir_SQ = $(subst ','\'',$(libdir)) libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) -ifeq ("$(origin V)", "command line") - VERBOSE = $(V) -endif -ifndef VERBOSE - VERBOSE = 0 -endif - -ifeq ($(VERBOSE),1) - Q = -else - Q = @ -endif - # Set compile option CFLAGS ifdef EXTRA_CFLAGS CFLAGS := $(EXTRA_CFLAGS) diff --git a/tools/tracing/latency/Makefile b/tools/tracing/latency/Makefile index 6518b03e05c71..257a56b1899f2 100644 --- a/tools/tracing/latency/Makefile +++ b/tools/tracing/latency/Makefile @@ -37,12 +37,6 @@ FEATURE_TESTS += libtracefs FEATURE_DISPLAY := libtraceevent FEATURE_DISPLAY += libtracefs -ifeq ($(V),1) - Q = -else - Q = @ -endif - all: $(LATENCY-COLLECTOR) include $(srctree)/tools/build/Makefile.include diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile index 8b5101457c70b..0b61208db604e 100644 --- a/tools/tracing/rtla/Makefile +++ b/tools/tracing/rtla/Makefile @@ -37,12 +37,6 @@ FEATURE_DISPLAY := libtraceevent FEATURE_DISPLAY += libtracefs FEATURE_DISPLAY += libcpupower -ifeq ($(V),1) - Q = -else - Q = @ -endif - all: $(RTLA) include $(srctree)/tools/build/Makefile.include diff --git a/tools/verification/rv/Makefile b/tools/verification/rv/Makefile index 411d62b3d8eb9..5b898360ba481 100644 --- a/tools/verification/rv/Makefile +++ b/tools/verification/rv/Makefile @@ -35,12 +35,6 @@ FEATURE_TESTS += libtracefs FEATURE_DISPLAY := libtraceevent FEATURE_DISPLAY += libtracefs -ifeq ($(V),1) - Q = -else - Q = @ -endif - all: $(RV) include $(srctree)/tools/build/Makefile.include diff --git a/usr/include/Makefile b/usr/include/Makefile index 6c6de1b1622b1..e3d6b03527fec 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -10,7 +10,7 @@ UAPI_CFLAGS := -std=c90 -Wall -Werror=implicit-function-declaration # In theory, we do not care -m32 or -m64 for header compile tests. # It is here just because CONFIG_CC_CAN_LINK is tested with -m32 or -m64. -UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS)) +UAPI_CFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)) # USERCFLAGS might contain sysroot location for CC. UAPI_CFLAGS += $(USERCFLAGS)